repo
stringlengths 7
55
| path
stringlengths 4
127
| func_name
stringlengths 1
88
| original_string
stringlengths 75
19.8k
| language
stringclasses 1
value | code
stringlengths 75
19.8k
| code_tokens
list | docstring
stringlengths 3
17.3k
| docstring_tokens
list | sha
stringlengths 40
40
| url
stringlengths 87
242
| partition
stringclasses 1
value |
---|---|---|---|---|---|---|---|---|---|---|---|
yamcs/yamcs-python | yamcs-client/yamcs/storage/model.py | ObjectInfo.upload | def upload(self, file_obj):
"""
Replace the content of this object.
:param file file_obj: The file (or file-like object) to upload.
"""
return self._client.upload_object(
self._instance, self._bucket, self.name, file_obj) | python | def upload(self, file_obj):
"""
Replace the content of this object.
:param file file_obj: The file (or file-like object) to upload.
"""
return self._client.upload_object(
self._instance, self._bucket, self.name, file_obj) | [
"def",
"upload",
"(",
"self",
",",
"file_obj",
")",
":",
"return",
"self",
".",
"_client",
".",
"upload_object",
"(",
"self",
".",
"_instance",
",",
"self",
".",
"_bucket",
",",
"self",
".",
"name",
",",
"file_obj",
")"
]
| Replace the content of this object.
:param file file_obj: The file (or file-like object) to upload. | [
"Replace",
"the",
"content",
"of",
"this",
"object",
"."
]
| 1082fee8a299010cc44416bbb7518fac0ef08b48 | https://github.com/yamcs/yamcs-python/blob/1082fee8a299010cc44416bbb7518fac0ef08b48/yamcs-client/yamcs/storage/model.py#L151-L158 | train |
raymondEhlers/pachyderm | pachyderm/utils.py | moving_average | def moving_average(arr: np.ndarray, n: int = 3) -> np.ndarray:
""" Calculate the moving overage over an array.
Algorithm from: https://stackoverflow.com/a/14314054
Args:
arr (np.ndarray): Array over which to calculate the moving average.
n (int): Number of elements over which to calculate the moving average. Default: 3
Returns:
np.ndarray: Moving average calculated over n.
"""
ret = np.cumsum(arr, dtype=float)
ret[n:] = ret[n:] - ret[:-n]
return ret[n - 1:] / n | python | def moving_average(arr: np.ndarray, n: int = 3) -> np.ndarray:
""" Calculate the moving overage over an array.
Algorithm from: https://stackoverflow.com/a/14314054
Args:
arr (np.ndarray): Array over which to calculate the moving average.
n (int): Number of elements over which to calculate the moving average. Default: 3
Returns:
np.ndarray: Moving average calculated over n.
"""
ret = np.cumsum(arr, dtype=float)
ret[n:] = ret[n:] - ret[:-n]
return ret[n - 1:] / n | [
"def",
"moving_average",
"(",
"arr",
":",
"np",
".",
"ndarray",
",",
"n",
":",
"int",
"=",
"3",
")",
"->",
"np",
".",
"ndarray",
":",
"ret",
"=",
"np",
".",
"cumsum",
"(",
"arr",
",",
"dtype",
"=",
"float",
")",
"ret",
"[",
"n",
":",
"]",
"=",
"ret",
"[",
"n",
":",
"]",
"-",
"ret",
"[",
":",
"-",
"n",
"]",
"return",
"ret",
"[",
"n",
"-",
"1",
":",
"]",
"/",
"n"
]
| Calculate the moving overage over an array.
Algorithm from: https://stackoverflow.com/a/14314054
Args:
arr (np.ndarray): Array over which to calculate the moving average.
n (int): Number of elements over which to calculate the moving average. Default: 3
Returns:
np.ndarray: Moving average calculated over n. | [
"Calculate",
"the",
"moving",
"overage",
"over",
"an",
"array",
"."
]
| aaa1d8374fd871246290ce76f1796f2f7582b01d | https://github.com/raymondEhlers/pachyderm/blob/aaa1d8374fd871246290ce76f1796f2f7582b01d/pachyderm/utils.py#L27-L40 | train |
raymondEhlers/pachyderm | pachyderm/utils.py | recursive_getattr | def recursive_getattr(obj: Any, attr: str, *args) -> Any:
""" Recursive ``getattar``.
This can be used as a drop in for the standard ``getattr(...)``. Credit to:
https://stackoverflow.com/a/31174427
Args:
obj: Object to retrieve the attribute from.
attr: Name of the attribute, with each successive attribute separated by a ".".
Returns:
The requested attribute. (Same as ``getattr``).
Raises:
AttributeError: If the attribute was not found and no default was provided. (Same as ``getattr``).
"""
def _getattr(obj, attr):
return getattr(obj, attr, *args)
return functools.reduce(_getattr, [obj] + attr.split('.')) | python | def recursive_getattr(obj: Any, attr: str, *args) -> Any:
""" Recursive ``getattar``.
This can be used as a drop in for the standard ``getattr(...)``. Credit to:
https://stackoverflow.com/a/31174427
Args:
obj: Object to retrieve the attribute from.
attr: Name of the attribute, with each successive attribute separated by a ".".
Returns:
The requested attribute. (Same as ``getattr``).
Raises:
AttributeError: If the attribute was not found and no default was provided. (Same as ``getattr``).
"""
def _getattr(obj, attr):
return getattr(obj, attr, *args)
return functools.reduce(_getattr, [obj] + attr.split('.')) | [
"def",
"recursive_getattr",
"(",
"obj",
":",
"Any",
",",
"attr",
":",
"str",
",",
"*",
"args",
")",
"->",
"Any",
":",
"def",
"_getattr",
"(",
"obj",
",",
"attr",
")",
":",
"return",
"getattr",
"(",
"obj",
",",
"attr",
",",
"*",
"args",
")",
"return",
"functools",
".",
"reduce",
"(",
"_getattr",
",",
"[",
"obj",
"]",
"+",
"attr",
".",
"split",
"(",
"'.'",
")",
")"
]
| Recursive ``getattar``.
This can be used as a drop in for the standard ``getattr(...)``. Credit to:
https://stackoverflow.com/a/31174427
Args:
obj: Object to retrieve the attribute from.
attr: Name of the attribute, with each successive attribute separated by a ".".
Returns:
The requested attribute. (Same as ``getattr``).
Raises:
AttributeError: If the attribute was not found and no default was provided. (Same as ``getattr``). | [
"Recursive",
"getattar",
"."
]
| aaa1d8374fd871246290ce76f1796f2f7582b01d | https://github.com/raymondEhlers/pachyderm/blob/aaa1d8374fd871246290ce76f1796f2f7582b01d/pachyderm/utils.py#L42-L58 | train |
raymondEhlers/pachyderm | pachyderm/utils.py | recursive_setattr | def recursive_setattr(obj: Any, attr: str, val: Any) -> Any:
""" Recusrive ``setattr``.
This can be used as a drop in for the standard ``setattr(...)``. Credit to:
https://stackoverflow.com/a/31174427
Args:
obj: Object to retrieve the attribute from.
attr: Name of the attribute, with each successive attribute separated by a ".".
value: Value to set the attribute to.
Returns:
The requested attribute. (Same as ``getattr``).
Raises:
AttributeError: If the attribute was not found and no default was provided. (Same as ``getattr``).
"""
pre, _, post = attr.rpartition('.')
return setattr(recursive_getattr(obj, pre) if pre else obj, post, val) | python | def recursive_setattr(obj: Any, attr: str, val: Any) -> Any:
""" Recusrive ``setattr``.
This can be used as a drop in for the standard ``setattr(...)``. Credit to:
https://stackoverflow.com/a/31174427
Args:
obj: Object to retrieve the attribute from.
attr: Name of the attribute, with each successive attribute separated by a ".".
value: Value to set the attribute to.
Returns:
The requested attribute. (Same as ``getattr``).
Raises:
AttributeError: If the attribute was not found and no default was provided. (Same as ``getattr``).
"""
pre, _, post = attr.rpartition('.')
return setattr(recursive_getattr(obj, pre) if pre else obj, post, val) | [
"def",
"recursive_setattr",
"(",
"obj",
":",
"Any",
",",
"attr",
":",
"str",
",",
"val",
":",
"Any",
")",
"->",
"Any",
":",
"pre",
",",
"_",
",",
"post",
"=",
"attr",
".",
"rpartition",
"(",
"'.'",
")",
"return",
"setattr",
"(",
"recursive_getattr",
"(",
"obj",
",",
"pre",
")",
"if",
"pre",
"else",
"obj",
",",
"post",
",",
"val",
")"
]
| Recusrive ``setattr``.
This can be used as a drop in for the standard ``setattr(...)``. Credit to:
https://stackoverflow.com/a/31174427
Args:
obj: Object to retrieve the attribute from.
attr: Name of the attribute, with each successive attribute separated by a ".".
value: Value to set the attribute to.
Returns:
The requested attribute. (Same as ``getattr``).
Raises:
AttributeError: If the attribute was not found and no default was provided. (Same as ``getattr``). | [
"Recusrive",
"setattr",
"."
]
| aaa1d8374fd871246290ce76f1796f2f7582b01d | https://github.com/raymondEhlers/pachyderm/blob/aaa1d8374fd871246290ce76f1796f2f7582b01d/pachyderm/utils.py#L60-L76 | train |
raymondEhlers/pachyderm | pachyderm/utils.py | recursive_getitem | def recursive_getitem(d: Mapping[str, Any], keys: Union[str, Sequence[str]]) -> Any:
""" Recursively retrieve an item from a nested dict.
Credit to: https://stackoverflow.com/a/52260663
Args:
d: Mapping of strings to objects.
keys: Names of the keys under which the object is stored. Can also just be a single string.
Returns:
The object stored under the keys.
Raises:
KeyError: If one of the keys isnt' found.
"""
# If only a string, then just just return the item
if isinstance(keys, str):
return d[keys]
else:
return functools.reduce(operator.getitem, keys, d) | python | def recursive_getitem(d: Mapping[str, Any], keys: Union[str, Sequence[str]]) -> Any:
""" Recursively retrieve an item from a nested dict.
Credit to: https://stackoverflow.com/a/52260663
Args:
d: Mapping of strings to objects.
keys: Names of the keys under which the object is stored. Can also just be a single string.
Returns:
The object stored under the keys.
Raises:
KeyError: If one of the keys isnt' found.
"""
# If only a string, then just just return the item
if isinstance(keys, str):
return d[keys]
else:
return functools.reduce(operator.getitem, keys, d) | [
"def",
"recursive_getitem",
"(",
"d",
":",
"Mapping",
"[",
"str",
",",
"Any",
"]",
",",
"keys",
":",
"Union",
"[",
"str",
",",
"Sequence",
"[",
"str",
"]",
"]",
")",
"->",
"Any",
":",
"# If only a string, then just just return the item",
"if",
"isinstance",
"(",
"keys",
",",
"str",
")",
":",
"return",
"d",
"[",
"keys",
"]",
"else",
":",
"return",
"functools",
".",
"reduce",
"(",
"operator",
".",
"getitem",
",",
"keys",
",",
"d",
")"
]
| Recursively retrieve an item from a nested dict.
Credit to: https://stackoverflow.com/a/52260663
Args:
d: Mapping of strings to objects.
keys: Names of the keys under which the object is stored. Can also just be a single string.
Returns:
The object stored under the keys.
Raises:
KeyError: If one of the keys isnt' found. | [
"Recursively",
"retrieve",
"an",
"item",
"from",
"a",
"nested",
"dict",
"."
]
| aaa1d8374fd871246290ce76f1796f2f7582b01d | https://github.com/raymondEhlers/pachyderm/blob/aaa1d8374fd871246290ce76f1796f2f7582b01d/pachyderm/utils.py#L78-L95 | train |
raymondEhlers/pachyderm | pachyderm/utils.py | get_array_for_fit | def get_array_for_fit(observables: dict, track_pt_bin: int, jet_pt_bin: int) -> histogram.Histogram1D:
""" Get a Histogram1D associated with the selected jet and track pt bins.
This is often used to retrieve data for fitting.
Args:
observables (dict): The observables from which the hist should be retrieved.
track_pt_bin (int): Track pt bin of the desired hist.
jet_ptbin (int): Jet pt bin of the desired hist.
Returns:
Histogram1D: Converted TH1 or uproot histogram.
Raises:
ValueError: If the requested observable couldn't be found.
"""
for name, observable in observables.items():
if observable.track_pt_bin == track_pt_bin and observable.jet_pt_bin == jet_pt_bin:
return histogram.Histogram1D.from_existing_hist(observable.hist)
raise ValueError("Cannot find fit with jet pt bin {jet_pt_bin} and track pt bin {track_pt_bin}") | python | def get_array_for_fit(observables: dict, track_pt_bin: int, jet_pt_bin: int) -> histogram.Histogram1D:
""" Get a Histogram1D associated with the selected jet and track pt bins.
This is often used to retrieve data for fitting.
Args:
observables (dict): The observables from which the hist should be retrieved.
track_pt_bin (int): Track pt bin of the desired hist.
jet_ptbin (int): Jet pt bin of the desired hist.
Returns:
Histogram1D: Converted TH1 or uproot histogram.
Raises:
ValueError: If the requested observable couldn't be found.
"""
for name, observable in observables.items():
if observable.track_pt_bin == track_pt_bin and observable.jet_pt_bin == jet_pt_bin:
return histogram.Histogram1D.from_existing_hist(observable.hist)
raise ValueError("Cannot find fit with jet pt bin {jet_pt_bin} and track pt bin {track_pt_bin}") | [
"def",
"get_array_for_fit",
"(",
"observables",
":",
"dict",
",",
"track_pt_bin",
":",
"int",
",",
"jet_pt_bin",
":",
"int",
")",
"->",
"histogram",
".",
"Histogram1D",
":",
"for",
"name",
",",
"observable",
"in",
"observables",
".",
"items",
"(",
")",
":",
"if",
"observable",
".",
"track_pt_bin",
"==",
"track_pt_bin",
"and",
"observable",
".",
"jet_pt_bin",
"==",
"jet_pt_bin",
":",
"return",
"histogram",
".",
"Histogram1D",
".",
"from_existing_hist",
"(",
"observable",
".",
"hist",
")",
"raise",
"ValueError",
"(",
"\"Cannot find fit with jet pt bin {jet_pt_bin} and track pt bin {track_pt_bin}\"",
")"
]
| Get a Histogram1D associated with the selected jet and track pt bins.
This is often used to retrieve data for fitting.
Args:
observables (dict): The observables from which the hist should be retrieved.
track_pt_bin (int): Track pt bin of the desired hist.
jet_ptbin (int): Jet pt bin of the desired hist.
Returns:
Histogram1D: Converted TH1 or uproot histogram.
Raises:
ValueError: If the requested observable couldn't be found. | [
"Get",
"a",
"Histogram1D",
"associated",
"with",
"the",
"selected",
"jet",
"and",
"track",
"pt",
"bins",
"."
]
| aaa1d8374fd871246290ce76f1796f2f7582b01d | https://github.com/raymondEhlers/pachyderm/blob/aaa1d8374fd871246290ce76f1796f2f7582b01d/pachyderm/utils.py#L97-L115 | train |
lowandrew/OLCTools | spadespipeline/CHAS.py | CHAS.epcrparsethreads | def epcrparsethreads(self):
"""
Parse the ePCR results, and run BLAST on the parsed results
"""
from Bio import SeqIO
# Create the threads for the BLAST analysis
for sample in self.metadata:
if sample.general.bestassemblyfile != 'NA':
threads = Thread(target=self.epcrparse, args=())
threads.setDaemon(True)
threads.start()
for sample in self.metadata:
if sample.general.bestassemblyfile != 'NA':
if sample[self.analysistype].primers != 'NA':
# Initialise a dictionary to store the SeqIO records of each assembly
record = dict()
# Initialise dictionaries to store results in the object
sample[self.analysistype].blastresults = dict()
sample[self.analysistype].rawblastresults = dict()
# Load the records from the assembly into the dictionary
for rec in SeqIO.parse(sample.general.bestassemblyfile, 'fasta'):
record[rec.id] = str(rec.seq)
# Iterate through the ePCR results
for line in sample[self.analysistype].epcrresults:
# The data of interest is in the lines that do not start with a #
# TLH 2016-SEQ-0359_4_length_321195_cov_28.6354_ID_3773 + 227879 228086 0 0 208/1000-1000
if not line.startswith('#'):
# Add the variables to the queue
self.epcrparsequeue.put((sample, record, line))
self.epcrparsequeue.join() | python | def epcrparsethreads(self):
"""
Parse the ePCR results, and run BLAST on the parsed results
"""
from Bio import SeqIO
# Create the threads for the BLAST analysis
for sample in self.metadata:
if sample.general.bestassemblyfile != 'NA':
threads = Thread(target=self.epcrparse, args=())
threads.setDaemon(True)
threads.start()
for sample in self.metadata:
if sample.general.bestassemblyfile != 'NA':
if sample[self.analysistype].primers != 'NA':
# Initialise a dictionary to store the SeqIO records of each assembly
record = dict()
# Initialise dictionaries to store results in the object
sample[self.analysistype].blastresults = dict()
sample[self.analysistype].rawblastresults = dict()
# Load the records from the assembly into the dictionary
for rec in SeqIO.parse(sample.general.bestassemblyfile, 'fasta'):
record[rec.id] = str(rec.seq)
# Iterate through the ePCR results
for line in sample[self.analysistype].epcrresults:
# The data of interest is in the lines that do not start with a #
# TLH 2016-SEQ-0359_4_length_321195_cov_28.6354_ID_3773 + 227879 228086 0 0 208/1000-1000
if not line.startswith('#'):
# Add the variables to the queue
self.epcrparsequeue.put((sample, record, line))
self.epcrparsequeue.join() | [
"def",
"epcrparsethreads",
"(",
"self",
")",
":",
"from",
"Bio",
"import",
"SeqIO",
"# Create the threads for the BLAST analysis",
"for",
"sample",
"in",
"self",
".",
"metadata",
":",
"if",
"sample",
".",
"general",
".",
"bestassemblyfile",
"!=",
"'NA'",
":",
"threads",
"=",
"Thread",
"(",
"target",
"=",
"self",
".",
"epcrparse",
",",
"args",
"=",
"(",
")",
")",
"threads",
".",
"setDaemon",
"(",
"True",
")",
"threads",
".",
"start",
"(",
")",
"for",
"sample",
"in",
"self",
".",
"metadata",
":",
"if",
"sample",
".",
"general",
".",
"bestassemblyfile",
"!=",
"'NA'",
":",
"if",
"sample",
"[",
"self",
".",
"analysistype",
"]",
".",
"primers",
"!=",
"'NA'",
":",
"# Initialise a dictionary to store the SeqIO records of each assembly",
"record",
"=",
"dict",
"(",
")",
"# Initialise dictionaries to store results in the object",
"sample",
"[",
"self",
".",
"analysistype",
"]",
".",
"blastresults",
"=",
"dict",
"(",
")",
"sample",
"[",
"self",
".",
"analysistype",
"]",
".",
"rawblastresults",
"=",
"dict",
"(",
")",
"# Load the records from the assembly into the dictionary",
"for",
"rec",
"in",
"SeqIO",
".",
"parse",
"(",
"sample",
".",
"general",
".",
"bestassemblyfile",
",",
"'fasta'",
")",
":",
"record",
"[",
"rec",
".",
"id",
"]",
"=",
"str",
"(",
"rec",
".",
"seq",
")",
"# Iterate through the ePCR results",
"for",
"line",
"in",
"sample",
"[",
"self",
".",
"analysistype",
"]",
".",
"epcrresults",
":",
"# The data of interest is in the lines that do not start with a #",
"# TLH 2016-SEQ-0359_4_length_321195_cov_28.6354_ID_3773 + 227879 228086 0\t0 208/1000-1000",
"if",
"not",
"line",
".",
"startswith",
"(",
"'#'",
")",
":",
"# Add the variables to the queue",
"self",
".",
"epcrparsequeue",
".",
"put",
"(",
"(",
"sample",
",",
"record",
",",
"line",
")",
")",
"self",
".",
"epcrparsequeue",
".",
"join",
"(",
")"
]
| Parse the ePCR results, and run BLAST on the parsed results | [
"Parse",
"the",
"ePCR",
"results",
"and",
"run",
"BLAST",
"on",
"the",
"parsed",
"results"
]
| 88aa90ac85f84d0bbeb03e43c29b0a9d36e4ce2a | https://github.com/lowandrew/OLCTools/blob/88aa90ac85f84d0bbeb03e43c29b0a9d36e4ce2a/spadespipeline/CHAS.py#L125-L154 | train |
lowandrew/OLCTools | spadespipeline/CHAS.py | CHAS.epcrparse | def epcrparse(self):
"""
Run BLAST, and record results to the object
"""
from Bio.Blast.Applications import NcbiblastnCommandline
while True:
sample, record, line = self.epcrparsequeue.get()
# Split the data on tabs
gene, chromosome, strand, start, end, m_match, gaps, act_len_exp_len = line.split('\t')
# Extract the gene sequence from the contigs
# The record dictionary has the contig name, and the sequence. Splice out the data using the start and
# end coordinates specified by ePCR
genesequence = record[chromosome][int(start) - 1:int(end)]
# Set up BLASTn using blastn-short, as the probe sequences tend to be very short
blastn = NcbiblastnCommandline(db=sample[self.analysistype].probes.split('.')[0],
num_threads=12,
task='blastn-short',
num_alignments=1,
outfmt="'6 qseqid sseqid positive mismatch gaps "
"evalue bitscore slen length'")
# Run the BLASTn, with the gene sequence as stdin
out, err = blastn(stdin=genesequence)
# Split the output string on tabs
results = out.rstrip().split('\t')
# Populate the raw blast results
sample[self.analysistype].rawblastresults[gene] = results
# Create named variables from the list
positives = float(results[2])
mismatches = float(results[3])
gaps = float(results[4])
subjectlength = float(results[7])
# Calculate the percent identity
percentidentity = float('{:0.2f}'.format((positives - gaps) / subjectlength * 100))
# Create a dictionary with the desired values to store in the metadata object
resultdict = {
'matches': positives,
'mismatches': mismatches,
'gaps': gaps,
'subject_length': subjectlength,
'percent_identity': percentidentity,
'match_length': results[8].split('\n')[0]
}
# Populate the metadata object with the dictionary
sample[self.analysistype].blastresults[gene] = resultdict
self.epcrparsequeue.task_done() | python | def epcrparse(self):
"""
Run BLAST, and record results to the object
"""
from Bio.Blast.Applications import NcbiblastnCommandline
while True:
sample, record, line = self.epcrparsequeue.get()
# Split the data on tabs
gene, chromosome, strand, start, end, m_match, gaps, act_len_exp_len = line.split('\t')
# Extract the gene sequence from the contigs
# The record dictionary has the contig name, and the sequence. Splice out the data using the start and
# end coordinates specified by ePCR
genesequence = record[chromosome][int(start) - 1:int(end)]
# Set up BLASTn using blastn-short, as the probe sequences tend to be very short
blastn = NcbiblastnCommandline(db=sample[self.analysistype].probes.split('.')[0],
num_threads=12,
task='blastn-short',
num_alignments=1,
outfmt="'6 qseqid sseqid positive mismatch gaps "
"evalue bitscore slen length'")
# Run the BLASTn, with the gene sequence as stdin
out, err = blastn(stdin=genesequence)
# Split the output string on tabs
results = out.rstrip().split('\t')
# Populate the raw blast results
sample[self.analysistype].rawblastresults[gene] = results
# Create named variables from the list
positives = float(results[2])
mismatches = float(results[3])
gaps = float(results[4])
subjectlength = float(results[7])
# Calculate the percent identity
percentidentity = float('{:0.2f}'.format((positives - gaps) / subjectlength * 100))
# Create a dictionary with the desired values to store in the metadata object
resultdict = {
'matches': positives,
'mismatches': mismatches,
'gaps': gaps,
'subject_length': subjectlength,
'percent_identity': percentidentity,
'match_length': results[8].split('\n')[0]
}
# Populate the metadata object with the dictionary
sample[self.analysistype].blastresults[gene] = resultdict
self.epcrparsequeue.task_done() | [
"def",
"epcrparse",
"(",
"self",
")",
":",
"from",
"Bio",
".",
"Blast",
".",
"Applications",
"import",
"NcbiblastnCommandline",
"while",
"True",
":",
"sample",
",",
"record",
",",
"line",
"=",
"self",
".",
"epcrparsequeue",
".",
"get",
"(",
")",
"# Split the data on tabs",
"gene",
",",
"chromosome",
",",
"strand",
",",
"start",
",",
"end",
",",
"m_match",
",",
"gaps",
",",
"act_len_exp_len",
"=",
"line",
".",
"split",
"(",
"'\\t'",
")",
"# Extract the gene sequence from the contigs",
"# The record dictionary has the contig name, and the sequence. Splice out the data using the start and",
"# end coordinates specified by ePCR",
"genesequence",
"=",
"record",
"[",
"chromosome",
"]",
"[",
"int",
"(",
"start",
")",
"-",
"1",
":",
"int",
"(",
"end",
")",
"]",
"# Set up BLASTn using blastn-short, as the probe sequences tend to be very short",
"blastn",
"=",
"NcbiblastnCommandline",
"(",
"db",
"=",
"sample",
"[",
"self",
".",
"analysistype",
"]",
".",
"probes",
".",
"split",
"(",
"'.'",
")",
"[",
"0",
"]",
",",
"num_threads",
"=",
"12",
",",
"task",
"=",
"'blastn-short'",
",",
"num_alignments",
"=",
"1",
",",
"outfmt",
"=",
"\"'6 qseqid sseqid positive mismatch gaps \"",
"\"evalue bitscore slen length'\"",
")",
"# Run the BLASTn, with the gene sequence as stdin",
"out",
",",
"err",
"=",
"blastn",
"(",
"stdin",
"=",
"genesequence",
")",
"# Split the output string on tabs",
"results",
"=",
"out",
".",
"rstrip",
"(",
")",
".",
"split",
"(",
"'\\t'",
")",
"# Populate the raw blast results",
"sample",
"[",
"self",
".",
"analysistype",
"]",
".",
"rawblastresults",
"[",
"gene",
"]",
"=",
"results",
"# Create named variables from the list",
"positives",
"=",
"float",
"(",
"results",
"[",
"2",
"]",
")",
"mismatches",
"=",
"float",
"(",
"results",
"[",
"3",
"]",
")",
"gaps",
"=",
"float",
"(",
"results",
"[",
"4",
"]",
")",
"subjectlength",
"=",
"float",
"(",
"results",
"[",
"7",
"]",
")",
"# Calculate the percent identity",
"percentidentity",
"=",
"float",
"(",
"'{:0.2f}'",
".",
"format",
"(",
"(",
"positives",
"-",
"gaps",
")",
"/",
"subjectlength",
"*",
"100",
")",
")",
"# Create a dictionary with the desired values to store in the metadata object",
"resultdict",
"=",
"{",
"'matches'",
":",
"positives",
",",
"'mismatches'",
":",
"mismatches",
",",
"'gaps'",
":",
"gaps",
",",
"'subject_length'",
":",
"subjectlength",
",",
"'percent_identity'",
":",
"percentidentity",
",",
"'match_length'",
":",
"results",
"[",
"8",
"]",
".",
"split",
"(",
"'\\n'",
")",
"[",
"0",
"]",
"}",
"# Populate the metadata object with the dictionary",
"sample",
"[",
"self",
".",
"analysistype",
"]",
".",
"blastresults",
"[",
"gene",
"]",
"=",
"resultdict",
"self",
".",
"epcrparsequeue",
".",
"task_done",
"(",
")"
]
| Run BLAST, and record results to the object | [
"Run",
"BLAST",
"and",
"record",
"results",
"to",
"the",
"object"
]
| 88aa90ac85f84d0bbeb03e43c29b0a9d36e4ce2a | https://github.com/lowandrew/OLCTools/blob/88aa90ac85f84d0bbeb03e43c29b0a9d36e4ce2a/spadespipeline/CHAS.py#L156-L200 | train |
lowandrew/OLCTools | spadespipeline/CHAS.py | CHAS.report | def report(self):
"""
Create reports of the findings
"""
# Initialise a variable to store the results
data = ''
for sample in self.metadata:
if sample[self.analysistype].primers != 'NA':
# Set the name of the strain-specific report
sample[self.analysistype].report = os.path.join(sample[self.analysistype].reportdir,
'{}_{}.csv'.format(sample.name, self.analysistype))
# Populate the strain-specific string with header, and strain name
strainspecific = 'Strain,{},\n{},'.format(','.join(sorted(sample[self.analysistype].targets)),
sample.name)
# Iterate through all the genes in the organism-specific analysis
for gene in sorted(sample[self.analysistype].targets):
try:
# Extract the percent identity
percentidentity = sample[self.analysistype].blastresults[gene]['percent_identity']
# If the % identity is greater than the cutoff of 50%, the gene is considered to be present
if percentidentity > 50:
strainspecific += '{},'.format(percentidentity)
else:
strainspecific += '-,'
# If there are no BLAST results, then the gene is absent
except KeyError:
strainspecific += '-,'
strainspecific += '\n'
# Open and write the data to the strain-specific report
with open(sample[self.analysistype].report, 'w') as specificreport:
specificreport.write(strainspecific)
# Add all the data from each strain to the cumulative data string
data += strainspecific
# Open and write the cumulative data to the cumulative report
with open(os.path.join(self.reportdir, '{}.csv'.format(self.analysistype)), 'w') as report:
report.write(data) | python | def report(self):
"""
Create reports of the findings
"""
# Initialise a variable to store the results
data = ''
for sample in self.metadata:
if sample[self.analysistype].primers != 'NA':
# Set the name of the strain-specific report
sample[self.analysistype].report = os.path.join(sample[self.analysistype].reportdir,
'{}_{}.csv'.format(sample.name, self.analysistype))
# Populate the strain-specific string with header, and strain name
strainspecific = 'Strain,{},\n{},'.format(','.join(sorted(sample[self.analysistype].targets)),
sample.name)
# Iterate through all the genes in the organism-specific analysis
for gene in sorted(sample[self.analysistype].targets):
try:
# Extract the percent identity
percentidentity = sample[self.analysistype].blastresults[gene]['percent_identity']
# If the % identity is greater than the cutoff of 50%, the gene is considered to be present
if percentidentity > 50:
strainspecific += '{},'.format(percentidentity)
else:
strainspecific += '-,'
# If there are no BLAST results, then the gene is absent
except KeyError:
strainspecific += '-,'
strainspecific += '\n'
# Open and write the data to the strain-specific report
with open(sample[self.analysistype].report, 'w') as specificreport:
specificreport.write(strainspecific)
# Add all the data from each strain to the cumulative data string
data += strainspecific
# Open and write the cumulative data to the cumulative report
with open(os.path.join(self.reportdir, '{}.csv'.format(self.analysistype)), 'w') as report:
report.write(data) | [
"def",
"report",
"(",
"self",
")",
":",
"# Initialise a variable to store the results",
"data",
"=",
"''",
"for",
"sample",
"in",
"self",
".",
"metadata",
":",
"if",
"sample",
"[",
"self",
".",
"analysistype",
"]",
".",
"primers",
"!=",
"'NA'",
":",
"# Set the name of the strain-specific report",
"sample",
"[",
"self",
".",
"analysistype",
"]",
".",
"report",
"=",
"os",
".",
"path",
".",
"join",
"(",
"sample",
"[",
"self",
".",
"analysistype",
"]",
".",
"reportdir",
",",
"'{}_{}.csv'",
".",
"format",
"(",
"sample",
".",
"name",
",",
"self",
".",
"analysistype",
")",
")",
"# Populate the strain-specific string with header, and strain name",
"strainspecific",
"=",
"'Strain,{},\\n{},'",
".",
"format",
"(",
"','",
".",
"join",
"(",
"sorted",
"(",
"sample",
"[",
"self",
".",
"analysistype",
"]",
".",
"targets",
")",
")",
",",
"sample",
".",
"name",
")",
"# Iterate through all the genes in the organism-specific analysis",
"for",
"gene",
"in",
"sorted",
"(",
"sample",
"[",
"self",
".",
"analysistype",
"]",
".",
"targets",
")",
":",
"try",
":",
"# Extract the percent identity",
"percentidentity",
"=",
"sample",
"[",
"self",
".",
"analysistype",
"]",
".",
"blastresults",
"[",
"gene",
"]",
"[",
"'percent_identity'",
"]",
"# If the % identity is greater than the cutoff of 50%, the gene is considered to be present",
"if",
"percentidentity",
">",
"50",
":",
"strainspecific",
"+=",
"'{},'",
".",
"format",
"(",
"percentidentity",
")",
"else",
":",
"strainspecific",
"+=",
"'-,'",
"# If there are no BLAST results, then the gene is absent",
"except",
"KeyError",
":",
"strainspecific",
"+=",
"'-,'",
"strainspecific",
"+=",
"'\\n'",
"# Open and write the data to the strain-specific report",
"with",
"open",
"(",
"sample",
"[",
"self",
".",
"analysistype",
"]",
".",
"report",
",",
"'w'",
")",
"as",
"specificreport",
":",
"specificreport",
".",
"write",
"(",
"strainspecific",
")",
"# Add all the data from each strain to the cumulative data string",
"data",
"+=",
"strainspecific",
"# Open and write the cumulative data to the cumulative report",
"with",
"open",
"(",
"os",
".",
"path",
".",
"join",
"(",
"self",
".",
"reportdir",
",",
"'{}.csv'",
".",
"format",
"(",
"self",
".",
"analysistype",
")",
")",
",",
"'w'",
")",
"as",
"report",
":",
"report",
".",
"write",
"(",
"data",
")"
]
| Create reports of the findings | [
"Create",
"reports",
"of",
"the",
"findings"
]
| 88aa90ac85f84d0bbeb03e43c29b0a9d36e4ce2a | https://github.com/lowandrew/OLCTools/blob/88aa90ac85f84d0bbeb03e43c29b0a9d36e4ce2a/spadespipeline/CHAS.py#L219-L254 | train |
JukeboxPipeline/jukeboxmaya | src/jukeboxmaya/mayapylauncher.py | setup_environment | def setup_environment():
"""Set up neccessary environment variables
This appends all path of sys.path to the python path
so mayapy will find all installed modules.
We have to make sure, that we use maya libs instead of
libs of the virtual env. So we insert all the libs for mayapy
first.
:returns: None
:rtype: None
:raises: None
"""
osinter = ostool.get_interface()
pypath = osinter.get_maya_envpath()
for p in sys.path:
pypath = os.pathsep.join((pypath, p))
os.environ['PYTHONPATH'] = pypath | python | def setup_environment():
"""Set up neccessary environment variables
This appends all path of sys.path to the python path
so mayapy will find all installed modules.
We have to make sure, that we use maya libs instead of
libs of the virtual env. So we insert all the libs for mayapy
first.
:returns: None
:rtype: None
:raises: None
"""
osinter = ostool.get_interface()
pypath = osinter.get_maya_envpath()
for p in sys.path:
pypath = os.pathsep.join((pypath, p))
os.environ['PYTHONPATH'] = pypath | [
"def",
"setup_environment",
"(",
")",
":",
"osinter",
"=",
"ostool",
".",
"get_interface",
"(",
")",
"pypath",
"=",
"osinter",
".",
"get_maya_envpath",
"(",
")",
"for",
"p",
"in",
"sys",
".",
"path",
":",
"pypath",
"=",
"os",
".",
"pathsep",
".",
"join",
"(",
"(",
"pypath",
",",
"p",
")",
")",
"os",
".",
"environ",
"[",
"'PYTHONPATH'",
"]",
"=",
"pypath"
]
| Set up neccessary environment variables
This appends all path of sys.path to the python path
so mayapy will find all installed modules.
We have to make sure, that we use maya libs instead of
libs of the virtual env. So we insert all the libs for mayapy
first.
:returns: None
:rtype: None
:raises: None | [
"Set",
"up",
"neccessary",
"environment",
"variables"
]
| c8d6318d53cdb5493453c4a6b65ef75bdb2d5f2c | https://github.com/JukeboxPipeline/jukeboxmaya/blob/c8d6318d53cdb5493453c4a6b65ef75bdb2d5f2c/src/jukeboxmaya/mayapylauncher.py#L28-L45 | train |
JukeboxPipeline/jukeboxmaya | src/jukeboxmaya/mayapylauncher.py | execute_mayapy | def execute_mayapy(args, wait=True):
"""Execute mayapython with the given arguments, capture and return the output
:param args: arguments for the maya python intepreter
:type args: list
:param wait: If True, waits for the process to finish and returns the returncode.
If False, just returns the process
:type wait: bool
:returns: if wait is True, the returncode, else the process
:rtype: int|:class:`subprocess.Popen`
:raises: None
"""
osinter = ostool.get_interface()
mayapy = osinter.get_maya_python()
allargs = [mayapy]
allargs.extend(args)
print "Executing mayapy with: %s" % allargs
mayapyprocess = subprocess.Popen(allargs)
if wait:
rc = mayapyprocess.wait()
print "Process mayapy finished!"
return rc
else:
return mayapyprocess | python | def execute_mayapy(args, wait=True):
"""Execute mayapython with the given arguments, capture and return the output
:param args: arguments for the maya python intepreter
:type args: list
:param wait: If True, waits for the process to finish and returns the returncode.
If False, just returns the process
:type wait: bool
:returns: if wait is True, the returncode, else the process
:rtype: int|:class:`subprocess.Popen`
:raises: None
"""
osinter = ostool.get_interface()
mayapy = osinter.get_maya_python()
allargs = [mayapy]
allargs.extend(args)
print "Executing mayapy with: %s" % allargs
mayapyprocess = subprocess.Popen(allargs)
if wait:
rc = mayapyprocess.wait()
print "Process mayapy finished!"
return rc
else:
return mayapyprocess | [
"def",
"execute_mayapy",
"(",
"args",
",",
"wait",
"=",
"True",
")",
":",
"osinter",
"=",
"ostool",
".",
"get_interface",
"(",
")",
"mayapy",
"=",
"osinter",
".",
"get_maya_python",
"(",
")",
"allargs",
"=",
"[",
"mayapy",
"]",
"allargs",
".",
"extend",
"(",
"args",
")",
"print",
"\"Executing mayapy with: %s\"",
"%",
"allargs",
"mayapyprocess",
"=",
"subprocess",
".",
"Popen",
"(",
"allargs",
")",
"if",
"wait",
":",
"rc",
"=",
"mayapyprocess",
".",
"wait",
"(",
")",
"print",
"\"Process mayapy finished!\"",
"return",
"rc",
"else",
":",
"return",
"mayapyprocess"
]
| Execute mayapython with the given arguments, capture and return the output
:param args: arguments for the maya python intepreter
:type args: list
:param wait: If True, waits for the process to finish and returns the returncode.
If False, just returns the process
:type wait: bool
:returns: if wait is True, the returncode, else the process
:rtype: int|:class:`subprocess.Popen`
:raises: None | [
"Execute",
"mayapython",
"with",
"the",
"given",
"arguments",
"capture",
"and",
"return",
"the",
"output"
]
| c8d6318d53cdb5493453c4a6b65ef75bdb2d5f2c | https://github.com/JukeboxPipeline/jukeboxmaya/blob/c8d6318d53cdb5493453c4a6b65ef75bdb2d5f2c/src/jukeboxmaya/mayapylauncher.py#L48-L71 | train |
portfors-lab/sparkle | sparkle/gui/stim/stim_detail.py | StimDetailWidget.setDoc | def setDoc(self, doc):
"""Presents the documentation
:param doc: documentation for StimulusModel. i.e. returned from
:meth:`componentDoc<sparkle.stim.stimulus_model.StimulusModel.componentDoc>`
or :meth:`templateDoc<sparkle.stim.stimulus_model.StimulusModel.templateDoc>`
"""
self.ui.overAtten.setNum(doc['overloaded_attenuation'])
# also set composite stim type
# self.ui.traceType.setText(doc['testtype'])
self.ui.componentDetails.clearDoc()
self.ui.componentDetails.setDoc(doc['components']) | python | def setDoc(self, doc):
"""Presents the documentation
:param doc: documentation for StimulusModel. i.e. returned from
:meth:`componentDoc<sparkle.stim.stimulus_model.StimulusModel.componentDoc>`
or :meth:`templateDoc<sparkle.stim.stimulus_model.StimulusModel.templateDoc>`
"""
self.ui.overAtten.setNum(doc['overloaded_attenuation'])
# also set composite stim type
# self.ui.traceType.setText(doc['testtype'])
self.ui.componentDetails.clearDoc()
self.ui.componentDetails.setDoc(doc['components']) | [
"def",
"setDoc",
"(",
"self",
",",
"doc",
")",
":",
"self",
".",
"ui",
".",
"overAtten",
".",
"setNum",
"(",
"doc",
"[",
"'overloaded_attenuation'",
"]",
")",
"# also set composite stim type",
"# self.ui.traceType.setText(doc['testtype'])",
"self",
".",
"ui",
".",
"componentDetails",
".",
"clearDoc",
"(",
")",
"self",
".",
"ui",
".",
"componentDetails",
".",
"setDoc",
"(",
"doc",
"[",
"'components'",
"]",
")"
]
| Presents the documentation
:param doc: documentation for StimulusModel. i.e. returned from
:meth:`componentDoc<sparkle.stim.stimulus_model.StimulusModel.componentDoc>`
or :meth:`templateDoc<sparkle.stim.stimulus_model.StimulusModel.templateDoc>` | [
"Presents",
"the",
"documentation"
]
| 5fad1cf2bec58ec6b15d91da20f6236a74826110 | https://github.com/portfors-lab/sparkle/blob/5fad1cf2bec58ec6b15d91da20f6236a74826110/sparkle/gui/stim/stim_detail.py#L24-L36 | train |
sirfoga/pyhal | hal/cvs/versioning.py | Version.increase_by_changes | def increase_by_changes(self, changes_amount, ratio):
"""Increase version by amount of changes
:param changes_amount: Number of changes done
:param ratio: Ratio changes
:return: Increases version accordingly to changes
"""
increases = round(changes_amount * ratio)
return self.increase(int(increases)) | python | def increase_by_changes(self, changes_amount, ratio):
"""Increase version by amount of changes
:param changes_amount: Number of changes done
:param ratio: Ratio changes
:return: Increases version accordingly to changes
"""
increases = round(changes_amount * ratio)
return self.increase(int(increases)) | [
"def",
"increase_by_changes",
"(",
"self",
",",
"changes_amount",
",",
"ratio",
")",
":",
"increases",
"=",
"round",
"(",
"changes_amount",
"*",
"ratio",
")",
"return",
"self",
".",
"increase",
"(",
"int",
"(",
"increases",
")",
")"
]
| Increase version by amount of changes
:param changes_amount: Number of changes done
:param ratio: Ratio changes
:return: Increases version accordingly to changes | [
"Increase",
"version",
"by",
"amount",
"of",
"changes"
]
| 4394d8a1f7e45bea28a255ec390f4962ee64d33a | https://github.com/sirfoga/pyhal/blob/4394d8a1f7e45bea28a255ec390f4962ee64d33a/hal/cvs/versioning.py#L218-L226 | train |
JukeboxPipeline/jukeboxmaya | src/jukeboxmaya/gui/main.py | wrap_maya_ui | def wrap_maya_ui(mayaname):
"""Given the name of a Maya UI element of any type,
return the corresponding QWidget or QAction.
If the object does not exist, returns None
:param mayaname: the maya ui element
:type mayaname: str
:returns: the wraped object
:rtype: QObject | None
:raises: None
"""
ptr = apiUI.MQtUtil.findControl(mayaname)
if ptr is None:
ptr = apiUI.MQtUtil.findLayout(mayaname)
if ptr is None:
ptr = apiUI.MQtUtil.findMenuItem(mayaname)
if ptr is not None:
return wrap(long(ptr)) | python | def wrap_maya_ui(mayaname):
"""Given the name of a Maya UI element of any type,
return the corresponding QWidget or QAction.
If the object does not exist, returns None
:param mayaname: the maya ui element
:type mayaname: str
:returns: the wraped object
:rtype: QObject | None
:raises: None
"""
ptr = apiUI.MQtUtil.findControl(mayaname)
if ptr is None:
ptr = apiUI.MQtUtil.findLayout(mayaname)
if ptr is None:
ptr = apiUI.MQtUtil.findMenuItem(mayaname)
if ptr is not None:
return wrap(long(ptr)) | [
"def",
"wrap_maya_ui",
"(",
"mayaname",
")",
":",
"ptr",
"=",
"apiUI",
".",
"MQtUtil",
".",
"findControl",
"(",
"mayaname",
")",
"if",
"ptr",
"is",
"None",
":",
"ptr",
"=",
"apiUI",
".",
"MQtUtil",
".",
"findLayout",
"(",
"mayaname",
")",
"if",
"ptr",
"is",
"None",
":",
"ptr",
"=",
"apiUI",
".",
"MQtUtil",
".",
"findMenuItem",
"(",
"mayaname",
")",
"if",
"ptr",
"is",
"not",
"None",
":",
"return",
"wrap",
"(",
"long",
"(",
"ptr",
")",
")"
]
| Given the name of a Maya UI element of any type,
return the corresponding QWidget or QAction.
If the object does not exist, returns None
:param mayaname: the maya ui element
:type mayaname: str
:returns: the wraped object
:rtype: QObject | None
:raises: None | [
"Given",
"the",
"name",
"of",
"a",
"Maya",
"UI",
"element",
"of",
"any",
"type",
"return",
"the",
"corresponding",
"QWidget",
"or",
"QAction",
".",
"If",
"the",
"object",
"does",
"not",
"exist",
"returns",
"None"
]
| c8d6318d53cdb5493453c4a6b65ef75bdb2d5f2c | https://github.com/JukeboxPipeline/jukeboxmaya/blob/c8d6318d53cdb5493453c4a6b65ef75bdb2d5f2c/src/jukeboxmaya/gui/main.py#L6-L23 | train |
NoviceLive/intellicoder | intellicoder/intellisense/database.py | IntelliSense.query_args | def query_args(self, name):
"""Query the return type and argument list of the specified
function in the specified database.
"""
sql = 'select type, id from code_items ' \
'where kind = 22 and name = ?'
logging.debug('%s %s', sql, (name,))
self.cursor.execute(sql, (name,))
func = self.cursor.fetchone()
if func:
sql = 'select param_number, type, name ' \
'from code_items where parent_id = ?'
logging.debug('%s %s', sql, (func[1],))
self.cursor.execute(sql, (func[1],))
args = self.cursor.fetchall()
ret_type = clean_ret_type(func[0])
args = [
(arg_number, sanitize_type(arg_type), arg_name)
for arg_number, arg_type, arg_name in args
]
return ret_type, name, args
return None | python | def query_args(self, name):
"""Query the return type and argument list of the specified
function in the specified database.
"""
sql = 'select type, id from code_items ' \
'where kind = 22 and name = ?'
logging.debug('%s %s', sql, (name,))
self.cursor.execute(sql, (name,))
func = self.cursor.fetchone()
if func:
sql = 'select param_number, type, name ' \
'from code_items where parent_id = ?'
logging.debug('%s %s', sql, (func[1],))
self.cursor.execute(sql, (func[1],))
args = self.cursor.fetchall()
ret_type = clean_ret_type(func[0])
args = [
(arg_number, sanitize_type(arg_type), arg_name)
for arg_number, arg_type, arg_name in args
]
return ret_type, name, args
return None | [
"def",
"query_args",
"(",
"self",
",",
"name",
")",
":",
"sql",
"=",
"'select type, id from code_items '",
"'where kind = 22 and name = ?'",
"logging",
".",
"debug",
"(",
"'%s %s'",
",",
"sql",
",",
"(",
"name",
",",
")",
")",
"self",
".",
"cursor",
".",
"execute",
"(",
"sql",
",",
"(",
"name",
",",
")",
")",
"func",
"=",
"self",
".",
"cursor",
".",
"fetchone",
"(",
")",
"if",
"func",
":",
"sql",
"=",
"'select param_number, type, name '",
"'from code_items where parent_id = ?'",
"logging",
".",
"debug",
"(",
"'%s %s'",
",",
"sql",
",",
"(",
"func",
"[",
"1",
"]",
",",
")",
")",
"self",
".",
"cursor",
".",
"execute",
"(",
"sql",
",",
"(",
"func",
"[",
"1",
"]",
",",
")",
")",
"args",
"=",
"self",
".",
"cursor",
".",
"fetchall",
"(",
")",
"ret_type",
"=",
"clean_ret_type",
"(",
"func",
"[",
"0",
"]",
")",
"args",
"=",
"[",
"(",
"arg_number",
",",
"sanitize_type",
"(",
"arg_type",
")",
",",
"arg_name",
")",
"for",
"arg_number",
",",
"arg_type",
",",
"arg_name",
"in",
"args",
"]",
"return",
"ret_type",
",",
"name",
",",
"args",
"return",
"None"
]
| Query the return type and argument list of the specified
function in the specified database. | [
"Query",
"the",
"return",
"type",
"and",
"argument",
"list",
"of",
"the",
"specified",
"function",
"in",
"the",
"specified",
"database",
"."
]
| 6cac5ebfce65c370dbebe47756a1789b120ef982 | https://github.com/NoviceLive/intellicoder/blob/6cac5ebfce65c370dbebe47756a1789b120ef982/intellicoder/intellisense/database.py#L100-L121 | train |
NoviceLive/intellicoder | intellicoder/intellisense/database.py | IntelliSense.query_info | def query_info(self, name, like, kind):
"""Query the information of the name in the database."""
kind = self._make_kind_id(kind)
# Database from VS2015 does not have assoc_text.
#
# sql = 'select name, kind, file_id, type, assoc_text ' \
# 'from code_items ' \
# 'where name {} ?'.format('like' if like else '=')
sql = 'select name, kind, file_id, type ' \
'from code_items ' \
'where name {} ?'.format('like' if like else '=')
args = (name,)
if like:
sql += ' escape ?'
args = (name, '\\')
if kind:
sql += ' and kind = ?'
args = (name, kind)
if like and kind:
args = (name, '\\', kind)
logging.debug('%s %s', sql, args)
self.cursor.execute(sql, args)
return self.cursor.fetchall(), self | python | def query_info(self, name, like, kind):
"""Query the information of the name in the database."""
kind = self._make_kind_id(kind)
# Database from VS2015 does not have assoc_text.
#
# sql = 'select name, kind, file_id, type, assoc_text ' \
# 'from code_items ' \
# 'where name {} ?'.format('like' if like else '=')
sql = 'select name, kind, file_id, type ' \
'from code_items ' \
'where name {} ?'.format('like' if like else '=')
args = (name,)
if like:
sql += ' escape ?'
args = (name, '\\')
if kind:
sql += ' and kind = ?'
args = (name, kind)
if like and kind:
args = (name, '\\', kind)
logging.debug('%s %s', sql, args)
self.cursor.execute(sql, args)
return self.cursor.fetchall(), self | [
"def",
"query_info",
"(",
"self",
",",
"name",
",",
"like",
",",
"kind",
")",
":",
"kind",
"=",
"self",
".",
"_make_kind_id",
"(",
"kind",
")",
"# Database from VS2015 does not have assoc_text.",
"#",
"# sql = 'select name, kind, file_id, type, assoc_text ' \\",
"# 'from code_items ' \\",
"# 'where name {} ?'.format('like' if like else '=')",
"sql",
"=",
"'select name, kind, file_id, type '",
"'from code_items '",
"'where name {} ?'",
".",
"format",
"(",
"'like'",
"if",
"like",
"else",
"'='",
")",
"args",
"=",
"(",
"name",
",",
")",
"if",
"like",
":",
"sql",
"+=",
"' escape ?'",
"args",
"=",
"(",
"name",
",",
"'\\\\'",
")",
"if",
"kind",
":",
"sql",
"+=",
"' and kind = ?'",
"args",
"=",
"(",
"name",
",",
"kind",
")",
"if",
"like",
"and",
"kind",
":",
"args",
"=",
"(",
"name",
",",
"'\\\\'",
",",
"kind",
")",
"logging",
".",
"debug",
"(",
"'%s %s'",
",",
"sql",
",",
"args",
")",
"self",
".",
"cursor",
".",
"execute",
"(",
"sql",
",",
"args",
")",
"return",
"self",
".",
"cursor",
".",
"fetchall",
"(",
")",
",",
"self"
]
| Query the information of the name in the database. | [
"Query",
"the",
"information",
"of",
"the",
"name",
"in",
"the",
"database",
"."
]
| 6cac5ebfce65c370dbebe47756a1789b120ef982 | https://github.com/NoviceLive/intellicoder/blob/6cac5ebfce65c370dbebe47756a1789b120ef982/intellicoder/intellisense/database.py#L124-L146 | train |
NoviceLive/intellicoder | intellicoder/intellisense/database.py | IntelliSense.query_names | def query_names(self, name, like, kind):
"""
Query function declarations in the files.
"""
kind = self._make_kind_id(kind)
sql = 'select id, name from files ' \
'where leaf_name {} ?'.format('like' if like else '=')
args = (name,)
if like:
sql += ' escape ?'
args = (name, '\\')
logging.debug('%s %s', sql, args)
self.cursor.execute(sql, args)
ids = self.cursor.fetchall()
files = []
for file_id, header in ids:
sql = 'select name from code_items ' \
'where file_id = ?'
args = (file_id,)
if kind:
sql += 'and kind = ?'
args = (file_id, kind)
logging.debug('%s %s', sql, args)
self.cursor.execute(sql, args)
files.append((header, self.cursor.fetchall()))
return files | python | def query_names(self, name, like, kind):
"""
Query function declarations in the files.
"""
kind = self._make_kind_id(kind)
sql = 'select id, name from files ' \
'where leaf_name {} ?'.format('like' if like else '=')
args = (name,)
if like:
sql += ' escape ?'
args = (name, '\\')
logging.debug('%s %s', sql, args)
self.cursor.execute(sql, args)
ids = self.cursor.fetchall()
files = []
for file_id, header in ids:
sql = 'select name from code_items ' \
'where file_id = ?'
args = (file_id,)
if kind:
sql += 'and kind = ?'
args = (file_id, kind)
logging.debug('%s %s', sql, args)
self.cursor.execute(sql, args)
files.append((header, self.cursor.fetchall()))
return files | [
"def",
"query_names",
"(",
"self",
",",
"name",
",",
"like",
",",
"kind",
")",
":",
"kind",
"=",
"self",
".",
"_make_kind_id",
"(",
"kind",
")",
"sql",
"=",
"'select id, name from files '",
"'where leaf_name {} ?'",
".",
"format",
"(",
"'like'",
"if",
"like",
"else",
"'='",
")",
"args",
"=",
"(",
"name",
",",
")",
"if",
"like",
":",
"sql",
"+=",
"' escape ?'",
"args",
"=",
"(",
"name",
",",
"'\\\\'",
")",
"logging",
".",
"debug",
"(",
"'%s %s'",
",",
"sql",
",",
"args",
")",
"self",
".",
"cursor",
".",
"execute",
"(",
"sql",
",",
"args",
")",
"ids",
"=",
"self",
".",
"cursor",
".",
"fetchall",
"(",
")",
"files",
"=",
"[",
"]",
"for",
"file_id",
",",
"header",
"in",
"ids",
":",
"sql",
"=",
"'select name from code_items '",
"'where file_id = ?'",
"args",
"=",
"(",
"file_id",
",",
")",
"if",
"kind",
":",
"sql",
"+=",
"'and kind = ?'",
"args",
"=",
"(",
"file_id",
",",
"kind",
")",
"logging",
".",
"debug",
"(",
"'%s %s'",
",",
"sql",
",",
"args",
")",
"self",
".",
"cursor",
".",
"execute",
"(",
"sql",
",",
"args",
")",
"files",
".",
"append",
"(",
"(",
"header",
",",
"self",
".",
"cursor",
".",
"fetchall",
"(",
")",
")",
")",
"return",
"files"
]
| Query function declarations in the files. | [
"Query",
"function",
"declarations",
"in",
"the",
"files",
"."
]
| 6cac5ebfce65c370dbebe47756a1789b120ef982 | https://github.com/NoviceLive/intellicoder/blob/6cac5ebfce65c370dbebe47756a1789b120ef982/intellicoder/intellisense/database.py#L149-L174 | train |
NoviceLive/intellicoder | intellicoder/intellisense/database.py | IntelliSense.query_struct | def query_struct(self, name):
"""Query struct."""
sql = 'select id, file_id, name from code_items '\
'where name = ?'
self.cursor.execute(sql, (name,))
for i in self.cursor.fetchall():
sql = 'select id, type, name from code_items ' \
'where parent_id = ?'
self.cursor.execute(sql, (i[0],))
members = self.cursor.fetchall()
if members:
print(self.file_id_to_name(i[1]), i[2])
print(members) | python | def query_struct(self, name):
"""Query struct."""
sql = 'select id, file_id, name from code_items '\
'where name = ?'
self.cursor.execute(sql, (name,))
for i in self.cursor.fetchall():
sql = 'select id, type, name from code_items ' \
'where parent_id = ?'
self.cursor.execute(sql, (i[0],))
members = self.cursor.fetchall()
if members:
print(self.file_id_to_name(i[1]), i[2])
print(members) | [
"def",
"query_struct",
"(",
"self",
",",
"name",
")",
":",
"sql",
"=",
"'select id, file_id, name from code_items '",
"'where name = ?'",
"self",
".",
"cursor",
".",
"execute",
"(",
"sql",
",",
"(",
"name",
",",
")",
")",
"for",
"i",
"in",
"self",
".",
"cursor",
".",
"fetchall",
"(",
")",
":",
"sql",
"=",
"'select id, type, name from code_items '",
"'where parent_id = ?'",
"self",
".",
"cursor",
".",
"execute",
"(",
"sql",
",",
"(",
"i",
"[",
"0",
"]",
",",
")",
")",
"members",
"=",
"self",
".",
"cursor",
".",
"fetchall",
"(",
")",
"if",
"members",
":",
"print",
"(",
"self",
".",
"file_id_to_name",
"(",
"i",
"[",
"1",
"]",
")",
",",
"i",
"[",
"2",
"]",
")",
"print",
"(",
"members",
")"
]
| Query struct. | [
"Query",
"struct",
"."
]
| 6cac5ebfce65c370dbebe47756a1789b120ef982 | https://github.com/NoviceLive/intellicoder/blob/6cac5ebfce65c370dbebe47756a1789b120ef982/intellicoder/intellisense/database.py#L177-L189 | train |
NoviceLive/intellicoder | intellicoder/intellisense/database.py | IntelliSense.file_id_to_name | def file_id_to_name(self, file_id):
"""Convert a file id to the file name."""
sql = 'select name from files where id = ?'
logging.debug('%s %s', sql, (file_id,))
self.cursor.execute(sql, (file_id,))
name = self.cursor.fetchone()
if name:
return name[0]
return '' | python | def file_id_to_name(self, file_id):
"""Convert a file id to the file name."""
sql = 'select name from files where id = ?'
logging.debug('%s %s', sql, (file_id,))
self.cursor.execute(sql, (file_id,))
name = self.cursor.fetchone()
if name:
return name[0]
return '' | [
"def",
"file_id_to_name",
"(",
"self",
",",
"file_id",
")",
":",
"sql",
"=",
"'select name from files where id = ?'",
"logging",
".",
"debug",
"(",
"'%s %s'",
",",
"sql",
",",
"(",
"file_id",
",",
")",
")",
"self",
".",
"cursor",
".",
"execute",
"(",
"sql",
",",
"(",
"file_id",
",",
")",
")",
"name",
"=",
"self",
".",
"cursor",
".",
"fetchone",
"(",
")",
"if",
"name",
":",
"return",
"name",
"[",
"0",
"]",
"return",
"''"
]
| Convert a file id to the file name. | [
"Convert",
"a",
"file",
"id",
"to",
"the",
"file",
"name",
"."
]
| 6cac5ebfce65c370dbebe47756a1789b120ef982 | https://github.com/NoviceLive/intellicoder/blob/6cac5ebfce65c370dbebe47756a1789b120ef982/intellicoder/intellisense/database.py#L191-L199 | train |
NoviceLive/intellicoder | intellicoder/intellisense/database.py | IntelliSense._make_kind_id | def _make_kind_id(self, name_or_id):
"""Make kind_id from kind_name or kind_id."""
if not name_or_id:
return None
if name_or_id.isdigit():
return name_or_id
return self.kind_name_to_id(name_or_id) | python | def _make_kind_id(self, name_or_id):
"""Make kind_id from kind_name or kind_id."""
if not name_or_id:
return None
if name_or_id.isdigit():
return name_or_id
return self.kind_name_to_id(name_or_id) | [
"def",
"_make_kind_id",
"(",
"self",
",",
"name_or_id",
")",
":",
"if",
"not",
"name_or_id",
":",
"return",
"None",
"if",
"name_or_id",
".",
"isdigit",
"(",
")",
":",
"return",
"name_or_id",
"return",
"self",
".",
"kind_name_to_id",
"(",
"name_or_id",
")"
]
| Make kind_id from kind_name or kind_id. | [
"Make",
"kind_id",
"from",
"kind_name",
"or",
"kind_id",
"."
]
| 6cac5ebfce65c370dbebe47756a1789b120ef982 | https://github.com/NoviceLive/intellicoder/blob/6cac5ebfce65c370dbebe47756a1789b120ef982/intellicoder/intellisense/database.py#L201-L207 | train |
NoviceLive/intellicoder | intellicoder/intellisense/database.py | IntelliSense.query_kinds | def query_kinds(self, kind):
"""Query kinds."""
logging.debug(_('querying %s'), kind)
if kind is None:
return self._kind_id_to_name.items()
if kind.isdigit():
kind_name = self.kind_id_to_name(int(kind))
if kind_name:
kind = (kind, kind_name)
else:
logging.warning(_('id not found: %s'), kind)
kind = None
else:
kind_id = self.kind_name_to_id(kind)
if kind_id:
kind = (kind_id, kind)
else:
logging.warning(_('name not found: %s'), kind)
kind = None
return [kind] | python | def query_kinds(self, kind):
"""Query kinds."""
logging.debug(_('querying %s'), kind)
if kind is None:
return self._kind_id_to_name.items()
if kind.isdigit():
kind_name = self.kind_id_to_name(int(kind))
if kind_name:
kind = (kind, kind_name)
else:
logging.warning(_('id not found: %s'), kind)
kind = None
else:
kind_id = self.kind_name_to_id(kind)
if kind_id:
kind = (kind_id, kind)
else:
logging.warning(_('name not found: %s'), kind)
kind = None
return [kind] | [
"def",
"query_kinds",
"(",
"self",
",",
"kind",
")",
":",
"logging",
".",
"debug",
"(",
"_",
"(",
"'querying %s'",
")",
",",
"kind",
")",
"if",
"kind",
"is",
"None",
":",
"return",
"self",
".",
"_kind_id_to_name",
".",
"items",
"(",
")",
"if",
"kind",
".",
"isdigit",
"(",
")",
":",
"kind_name",
"=",
"self",
".",
"kind_id_to_name",
"(",
"int",
"(",
"kind",
")",
")",
"if",
"kind_name",
":",
"kind",
"=",
"(",
"kind",
",",
"kind_name",
")",
"else",
":",
"logging",
".",
"warning",
"(",
"_",
"(",
"'id not found: %s'",
")",
",",
"kind",
")",
"kind",
"=",
"None",
"else",
":",
"kind_id",
"=",
"self",
".",
"kind_name_to_id",
"(",
"kind",
")",
"if",
"kind_id",
":",
"kind",
"=",
"(",
"kind_id",
",",
"kind",
")",
"else",
":",
"logging",
".",
"warning",
"(",
"_",
"(",
"'name not found: %s'",
")",
",",
"kind",
")",
"kind",
"=",
"None",
"return",
"[",
"kind",
"]"
]
| Query kinds. | [
"Query",
"kinds",
"."
]
| 6cac5ebfce65c370dbebe47756a1789b120ef982 | https://github.com/NoviceLive/intellicoder/blob/6cac5ebfce65c370dbebe47756a1789b120ef982/intellicoder/intellisense/database.py#L210-L229 | train |
NoviceLive/intellicoder | intellicoder/intellisense/database.py | IntelliSense._init_kind_converter | def _init_kind_converter(self):
"""Make a dictionary mapping kind ids to the names."""
from ..utils import invert_dict
kinds = self.session.query(Kind).all()
self._kind_id_to_name = {
kind.id: kind.name for kind in kinds
}
self._kind_name_to_id = invert_dict(self._kind_id_to_name) | python | def _init_kind_converter(self):
"""Make a dictionary mapping kind ids to the names."""
from ..utils import invert_dict
kinds = self.session.query(Kind).all()
self._kind_id_to_name = {
kind.id: kind.name for kind in kinds
}
self._kind_name_to_id = invert_dict(self._kind_id_to_name) | [
"def",
"_init_kind_converter",
"(",
"self",
")",
":",
"from",
".",
".",
"utils",
"import",
"invert_dict",
"kinds",
"=",
"self",
".",
"session",
".",
"query",
"(",
"Kind",
")",
".",
"all",
"(",
")",
"self",
".",
"_kind_id_to_name",
"=",
"{",
"kind",
".",
"id",
":",
"kind",
".",
"name",
"for",
"kind",
"in",
"kinds",
"}",
"self",
".",
"_kind_name_to_id",
"=",
"invert_dict",
"(",
"self",
".",
"_kind_id_to_name",
")"
]
| Make a dictionary mapping kind ids to the names. | [
"Make",
"a",
"dictionary",
"mapping",
"kind",
"ids",
"to",
"the",
"names",
"."
]
| 6cac5ebfce65c370dbebe47756a1789b120ef982 | https://github.com/NoviceLive/intellicoder/blob/6cac5ebfce65c370dbebe47756a1789b120ef982/intellicoder/intellisense/database.py#L239-L247 | train |
NoviceLive/intellicoder | intellicoder/intellisense/database.py | SenseWithExport.make_export | def make_export(self, exports):
"""Populate library exported function data."""
sql = 'drop table if exists export'
logging.debug(sql)
self.cursor.execute(sql)
sql = 'create table if not exists export ' \
'(func text unique, module text)'
logging.debug(sql)
self.cursor.execute(sql)
for module in exports:
logging.debug(_('insering exports from %s'), module)
sql = 'insert into export values (?, ?)'
for func in exports[module]:
if func:
try:
self.cursor.execute(sql, (func, module))
except sqlite3.IntegrityError:
pass
self.con.commit() | python | def make_export(self, exports):
"""Populate library exported function data."""
sql = 'drop table if exists export'
logging.debug(sql)
self.cursor.execute(sql)
sql = 'create table if not exists export ' \
'(func text unique, module text)'
logging.debug(sql)
self.cursor.execute(sql)
for module in exports:
logging.debug(_('insering exports from %s'), module)
sql = 'insert into export values (?, ?)'
for func in exports[module]:
if func:
try:
self.cursor.execute(sql, (func, module))
except sqlite3.IntegrityError:
pass
self.con.commit() | [
"def",
"make_export",
"(",
"self",
",",
"exports",
")",
":",
"sql",
"=",
"'drop table if exists export'",
"logging",
".",
"debug",
"(",
"sql",
")",
"self",
".",
"cursor",
".",
"execute",
"(",
"sql",
")",
"sql",
"=",
"'create table if not exists export '",
"'(func text unique, module text)'",
"logging",
".",
"debug",
"(",
"sql",
")",
"self",
".",
"cursor",
".",
"execute",
"(",
"sql",
")",
"for",
"module",
"in",
"exports",
":",
"logging",
".",
"debug",
"(",
"_",
"(",
"'insering exports from %s'",
")",
",",
"module",
")",
"sql",
"=",
"'insert into export values (?, ?)'",
"for",
"func",
"in",
"exports",
"[",
"module",
"]",
":",
"if",
"func",
":",
"try",
":",
"self",
".",
"cursor",
".",
"execute",
"(",
"sql",
",",
"(",
"func",
",",
"module",
")",
")",
"except",
"sqlite3",
".",
"IntegrityError",
":",
"pass",
"self",
".",
"con",
".",
"commit",
"(",
")"
]
| Populate library exported function data. | [
"Populate",
"library",
"exported",
"function",
"data",
"."
]
| 6cac5ebfce65c370dbebe47756a1789b120ef982 | https://github.com/NoviceLive/intellicoder/blob/6cac5ebfce65c370dbebe47756a1789b120ef982/intellicoder/intellisense/database.py#L267-L285 | train |
NoviceLive/intellicoder | intellicoder/intellisense/database.py | SenseWithExport.query_func_module | def query_func_module(self, func):
"""Query the module name of the specified function."""
exp = self.session.query(Export).filter_by(
func=func).first()
if exp:
return exp
logging.debug(_('Function not found: %s'), func)
alt = func + 'A'
exp = self.session.query(Export).filter_by(
func=alt).first()
if exp:
logging.warning(_('Using ANSI version: %s'), alt)
return exp
logging.warning(_('Not handled: %s or %s'), func, alt)
return None | python | def query_func_module(self, func):
"""Query the module name of the specified function."""
exp = self.session.query(Export).filter_by(
func=func).first()
if exp:
return exp
logging.debug(_('Function not found: %s'), func)
alt = func + 'A'
exp = self.session.query(Export).filter_by(
func=alt).first()
if exp:
logging.warning(_('Using ANSI version: %s'), alt)
return exp
logging.warning(_('Not handled: %s or %s'), func, alt)
return None | [
"def",
"query_func_module",
"(",
"self",
",",
"func",
")",
":",
"exp",
"=",
"self",
".",
"session",
".",
"query",
"(",
"Export",
")",
".",
"filter_by",
"(",
"func",
"=",
"func",
")",
".",
"first",
"(",
")",
"if",
"exp",
":",
"return",
"exp",
"logging",
".",
"debug",
"(",
"_",
"(",
"'Function not found: %s'",
")",
",",
"func",
")",
"alt",
"=",
"func",
"+",
"'A'",
"exp",
"=",
"self",
".",
"session",
".",
"query",
"(",
"Export",
")",
".",
"filter_by",
"(",
"func",
"=",
"alt",
")",
".",
"first",
"(",
")",
"if",
"exp",
":",
"logging",
".",
"warning",
"(",
"_",
"(",
"'Using ANSI version: %s'",
")",
",",
"alt",
")",
"return",
"exp",
"logging",
".",
"warning",
"(",
"_",
"(",
"'Not handled: %s or %s'",
")",
",",
"func",
",",
"alt",
")",
"return",
"None"
]
| Query the module name of the specified function. | [
"Query",
"the",
"module",
"name",
"of",
"the",
"specified",
"function",
"."
]
| 6cac5ebfce65c370dbebe47756a1789b120ef982 | https://github.com/NoviceLive/intellicoder/blob/6cac5ebfce65c370dbebe47756a1789b120ef982/intellicoder/intellisense/database.py#L287-L301 | train |
NoviceLive/intellicoder | intellicoder/intellisense/database.py | SenseWithExport.query_module_funcs | def query_module_funcs(self, module):
"""Query the functions in the specified module."""
funcs = self.session.query(Export).filter_by(
module=module).all()
return funcs | python | def query_module_funcs(self, module):
"""Query the functions in the specified module."""
funcs = self.session.query(Export).filter_by(
module=module).all()
return funcs | [
"def",
"query_module_funcs",
"(",
"self",
",",
"module",
")",
":",
"funcs",
"=",
"self",
".",
"session",
".",
"query",
"(",
"Export",
")",
".",
"filter_by",
"(",
"module",
"=",
"module",
")",
".",
"all",
"(",
")",
"return",
"funcs"
]
| Query the functions in the specified module. | [
"Query",
"the",
"functions",
"in",
"the",
"specified",
"module",
"."
]
| 6cac5ebfce65c370dbebe47756a1789b120ef982 | https://github.com/NoviceLive/intellicoder/blob/6cac5ebfce65c370dbebe47756a1789b120ef982/intellicoder/intellisense/database.py#L303-L307 | train |
yamcs/yamcs-python | yamcs-client/yamcs/tmtc/client.py | _build_named_object_ids | def _build_named_object_ids(parameters):
"""Builds a list of NamedObjectId."""
if isinstance(parameters, str):
return [_build_named_object_id(parameters)]
return [_build_named_object_id(parameter) for parameter in parameters] | python | def _build_named_object_ids(parameters):
"""Builds a list of NamedObjectId."""
if isinstance(parameters, str):
return [_build_named_object_id(parameters)]
return [_build_named_object_id(parameter) for parameter in parameters] | [
"def",
"_build_named_object_ids",
"(",
"parameters",
")",
":",
"if",
"isinstance",
"(",
"parameters",
",",
"str",
")",
":",
"return",
"[",
"_build_named_object_id",
"(",
"parameters",
")",
"]",
"return",
"[",
"_build_named_object_id",
"(",
"parameter",
")",
"for",
"parameter",
"in",
"parameters",
"]"
]
| Builds a list of NamedObjectId. | [
"Builds",
"a",
"list",
"of",
"NamedObjectId",
"."
]
| 1082fee8a299010cc44416bbb7518fac0ef08b48 | https://github.com/yamcs/yamcs-python/blob/1082fee8a299010cc44416bbb7518fac0ef08b48/yamcs-client/yamcs/tmtc/client.py#L102-L106 | train |
yamcs/yamcs-python | yamcs-client/yamcs/tmtc/client.py | _build_command_ids | def _build_command_ids(issued_commands):
"""Builds a list of CommandId."""
if isinstance(issued_commands, IssuedCommand):
entry = issued_commands._proto.commandQueueEntry
return [entry.cmdId]
else:
return [issued_command._proto.commandQueueEntry.cmdId
for issued_command in issued_commands] | python | def _build_command_ids(issued_commands):
"""Builds a list of CommandId."""
if isinstance(issued_commands, IssuedCommand):
entry = issued_commands._proto.commandQueueEntry
return [entry.cmdId]
else:
return [issued_command._proto.commandQueueEntry.cmdId
for issued_command in issued_commands] | [
"def",
"_build_command_ids",
"(",
"issued_commands",
")",
":",
"if",
"isinstance",
"(",
"issued_commands",
",",
"IssuedCommand",
")",
":",
"entry",
"=",
"issued_commands",
".",
"_proto",
".",
"commandQueueEntry",
"return",
"[",
"entry",
".",
"cmdId",
"]",
"else",
":",
"return",
"[",
"issued_command",
".",
"_proto",
".",
"commandQueueEntry",
".",
"cmdId",
"for",
"issued_command",
"in",
"issued_commands",
"]"
]
| Builds a list of CommandId. | [
"Builds",
"a",
"list",
"of",
"CommandId",
"."
]
| 1082fee8a299010cc44416bbb7518fac0ef08b48 | https://github.com/yamcs/yamcs-python/blob/1082fee8a299010cc44416bbb7518fac0ef08b48/yamcs-client/yamcs/tmtc/client.py#L110-L117 | train |
yamcs/yamcs-python | yamcs-client/yamcs/tmtc/client.py | CommandHistorySubscription._cache_key | def _cache_key(cmd_id):
"""commandId is a tuple. Make a 'unique' key for it."""
return '{}__{}__{}__{}'.format(
cmd_id.generationTime, cmd_id.origin, cmd_id.sequenceNumber,
cmd_id.commandName) | python | def _cache_key(cmd_id):
"""commandId is a tuple. Make a 'unique' key for it."""
return '{}__{}__{}__{}'.format(
cmd_id.generationTime, cmd_id.origin, cmd_id.sequenceNumber,
cmd_id.commandName) | [
"def",
"_cache_key",
"(",
"cmd_id",
")",
":",
"return",
"'{}__{}__{}__{}'",
".",
"format",
"(",
"cmd_id",
".",
"generationTime",
",",
"cmd_id",
".",
"origin",
",",
"cmd_id",
".",
"sequenceNumber",
",",
"cmd_id",
".",
"commandName",
")"
]
| commandId is a tuple. Make a 'unique' key for it. | [
"commandId",
"is",
"a",
"tuple",
".",
"Make",
"a",
"unique",
"key",
"for",
"it",
"."
]
| 1082fee8a299010cc44416bbb7518fac0ef08b48 | https://github.com/yamcs/yamcs-python/blob/1082fee8a299010cc44416bbb7518fac0ef08b48/yamcs-client/yamcs/tmtc/client.py#L203-L207 | train |
yamcs/yamcs-python | yamcs-client/yamcs/tmtc/client.py | CommandHistorySubscription.get_command_history | def get_command_history(self, issued_command):
"""
Gets locally cached CommandHistory for the specified command.
:param .IssuedCommand issued_command: object representing a
previously issued command.
:rtype: .CommandHistory
"""
#pylint: disable=protected-access
entry = issued_command._proto.commandQueueEntry
key = self._cache_key(entry.cmdId)
if key in self._cache:
return self._cache[key]
return None | python | def get_command_history(self, issued_command):
"""
Gets locally cached CommandHistory for the specified command.
:param .IssuedCommand issued_command: object representing a
previously issued command.
:rtype: .CommandHistory
"""
#pylint: disable=protected-access
entry = issued_command._proto.commandQueueEntry
key = self._cache_key(entry.cmdId)
if key in self._cache:
return self._cache[key]
return None | [
"def",
"get_command_history",
"(",
"self",
",",
"issued_command",
")",
":",
"#pylint: disable=protected-access",
"entry",
"=",
"issued_command",
".",
"_proto",
".",
"commandQueueEntry",
"key",
"=",
"self",
".",
"_cache_key",
"(",
"entry",
".",
"cmdId",
")",
"if",
"key",
"in",
"self",
".",
"_cache",
":",
"return",
"self",
".",
"_cache",
"[",
"key",
"]",
"return",
"None"
]
| Gets locally cached CommandHistory for the specified command.
:param .IssuedCommand issued_command: object representing a
previously issued command.
:rtype: .CommandHistory | [
"Gets",
"locally",
"cached",
"CommandHistory",
"for",
"the",
"specified",
"command",
"."
]
| 1082fee8a299010cc44416bbb7518fac0ef08b48 | https://github.com/yamcs/yamcs-python/blob/1082fee8a299010cc44416bbb7518fac0ef08b48/yamcs-client/yamcs/tmtc/client.py#L219-L232 | train |
yamcs/yamcs-python | yamcs-client/yamcs/tmtc/client.py | ParameterSubscription.add | def add(self,
parameters,
abort_on_invalid=True,
send_from_cache=True):
"""
Add one or more parameters to this subscription.
:param parameters: Parameter(s) to be added
:type parameters: Union[str, str[]]
:param bool abort_on_invalid: If ``True`` one invalid parameter
means any other parameter in the
request will also not be added
to the subscription.
:param bool send_from_cache: If ``True`` the last processed parameter
value is sent from parameter cache.
When ``False`` only newly processed
parameters are received.
"""
# Verify that we already know our assigned subscription_id
assert self.subscription_id != -1
if not parameters:
return
options = web_pb2.ParameterSubscriptionRequest()
options.subscriptionId = self.subscription_id
options.abortOnInvalid = abort_on_invalid
options.sendFromCache = send_from_cache
options.id.extend(_build_named_object_ids(parameters))
self._manager.send('subscribe', options) | python | def add(self,
parameters,
abort_on_invalid=True,
send_from_cache=True):
"""
Add one or more parameters to this subscription.
:param parameters: Parameter(s) to be added
:type parameters: Union[str, str[]]
:param bool abort_on_invalid: If ``True`` one invalid parameter
means any other parameter in the
request will also not be added
to the subscription.
:param bool send_from_cache: If ``True`` the last processed parameter
value is sent from parameter cache.
When ``False`` only newly processed
parameters are received.
"""
# Verify that we already know our assigned subscription_id
assert self.subscription_id != -1
if not parameters:
return
options = web_pb2.ParameterSubscriptionRequest()
options.subscriptionId = self.subscription_id
options.abortOnInvalid = abort_on_invalid
options.sendFromCache = send_from_cache
options.id.extend(_build_named_object_ids(parameters))
self._manager.send('subscribe', options) | [
"def",
"add",
"(",
"self",
",",
"parameters",
",",
"abort_on_invalid",
"=",
"True",
",",
"send_from_cache",
"=",
"True",
")",
":",
"# Verify that we already know our assigned subscription_id",
"assert",
"self",
".",
"subscription_id",
"!=",
"-",
"1",
"if",
"not",
"parameters",
":",
"return",
"options",
"=",
"web_pb2",
".",
"ParameterSubscriptionRequest",
"(",
")",
"options",
".",
"subscriptionId",
"=",
"self",
".",
"subscription_id",
"options",
".",
"abortOnInvalid",
"=",
"abort_on_invalid",
"options",
".",
"sendFromCache",
"=",
"send_from_cache",
"options",
".",
"id",
".",
"extend",
"(",
"_build_named_object_ids",
"(",
"parameters",
")",
")",
"self",
".",
"_manager",
".",
"send",
"(",
"'subscribe'",
",",
"options",
")"
]
| Add one or more parameters to this subscription.
:param parameters: Parameter(s) to be added
:type parameters: Union[str, str[]]
:param bool abort_on_invalid: If ``True`` one invalid parameter
means any other parameter in the
request will also not be added
to the subscription.
:param bool send_from_cache: If ``True`` the last processed parameter
value is sent from parameter cache.
When ``False`` only newly processed
parameters are received. | [
"Add",
"one",
"or",
"more",
"parameters",
"to",
"this",
"subscription",
"."
]
| 1082fee8a299010cc44416bbb7518fac0ef08b48 | https://github.com/yamcs/yamcs-python/blob/1082fee8a299010cc44416bbb7518fac0ef08b48/yamcs-client/yamcs/tmtc/client.py#L269-L300 | train |
yamcs/yamcs-python | yamcs-client/yamcs/tmtc/client.py | ParameterSubscription.remove | def remove(self, parameters):
"""
Remove one or more parameters from this subscription.
:param parameters: Parameter(s) to be removed
:type parameters: Union[str, str[]]
"""
# Verify that we already know our assigned subscription_id
assert self.subscription_id != -1
if not parameters:
return
options = web_pb2.ParameterSubscriptionRequest()
options.subscriptionId = self.subscription_id
options.id.extend(_build_named_object_ids(parameters))
self._manager.send('unsubscribe', options) | python | def remove(self, parameters):
"""
Remove one or more parameters from this subscription.
:param parameters: Parameter(s) to be removed
:type parameters: Union[str, str[]]
"""
# Verify that we already know our assigned subscription_id
assert self.subscription_id != -1
if not parameters:
return
options = web_pb2.ParameterSubscriptionRequest()
options.subscriptionId = self.subscription_id
options.id.extend(_build_named_object_ids(parameters))
self._manager.send('unsubscribe', options) | [
"def",
"remove",
"(",
"self",
",",
"parameters",
")",
":",
"# Verify that we already know our assigned subscription_id",
"assert",
"self",
".",
"subscription_id",
"!=",
"-",
"1",
"if",
"not",
"parameters",
":",
"return",
"options",
"=",
"web_pb2",
".",
"ParameterSubscriptionRequest",
"(",
")",
"options",
".",
"subscriptionId",
"=",
"self",
".",
"subscription_id",
"options",
".",
"id",
".",
"extend",
"(",
"_build_named_object_ids",
"(",
"parameters",
")",
")",
"self",
".",
"_manager",
".",
"send",
"(",
"'unsubscribe'",
",",
"options",
")"
]
| Remove one or more parameters from this subscription.
:param parameters: Parameter(s) to be removed
:type parameters: Union[str, str[]] | [
"Remove",
"one",
"or",
"more",
"parameters",
"from",
"this",
"subscription",
"."
]
| 1082fee8a299010cc44416bbb7518fac0ef08b48 | https://github.com/yamcs/yamcs-python/blob/1082fee8a299010cc44416bbb7518fac0ef08b48/yamcs-client/yamcs/tmtc/client.py#L302-L320 | train |
yamcs/yamcs-python | yamcs-client/yamcs/tmtc/client.py | ProcessorClient.set_parameter_value | def set_parameter_value(self, parameter, value):
"""
Sets the value of the specified parameter.
:param str parameter: Either a fully-qualified XTCE name or an alias in the
format ``NAMESPACE/NAME``.
:param value: The value to set
"""
parameter = adapt_name_for_rest(parameter)
url = '/processors/{}/{}/parameters{}'.format(
self._instance, self._processor, parameter)
req = _build_value_proto(value)
self._client.put_proto(url, data=req.SerializeToString()) | python | def set_parameter_value(self, parameter, value):
"""
Sets the value of the specified parameter.
:param str parameter: Either a fully-qualified XTCE name or an alias in the
format ``NAMESPACE/NAME``.
:param value: The value to set
"""
parameter = adapt_name_for_rest(parameter)
url = '/processors/{}/{}/parameters{}'.format(
self._instance, self._processor, parameter)
req = _build_value_proto(value)
self._client.put_proto(url, data=req.SerializeToString()) | [
"def",
"set_parameter_value",
"(",
"self",
",",
"parameter",
",",
"value",
")",
":",
"parameter",
"=",
"adapt_name_for_rest",
"(",
"parameter",
")",
"url",
"=",
"'/processors/{}/{}/parameters{}'",
".",
"format",
"(",
"self",
".",
"_instance",
",",
"self",
".",
"_processor",
",",
"parameter",
")",
"req",
"=",
"_build_value_proto",
"(",
"value",
")",
"self",
".",
"_client",
".",
"put_proto",
"(",
"url",
",",
"data",
"=",
"req",
".",
"SerializeToString",
"(",
")",
")"
]
| Sets the value of the specified parameter.
:param str parameter: Either a fully-qualified XTCE name or an alias in the
format ``NAMESPACE/NAME``.
:param value: The value to set | [
"Sets",
"the",
"value",
"of",
"the",
"specified",
"parameter",
"."
]
| 1082fee8a299010cc44416bbb7518fac0ef08b48 | https://github.com/yamcs/yamcs-python/blob/1082fee8a299010cc44416bbb7518fac0ef08b48/yamcs-client/yamcs/tmtc/client.py#L459-L471 | train |
yamcs/yamcs-python | yamcs-client/yamcs/tmtc/client.py | ProcessorClient.set_parameter_values | def set_parameter_values(self, values):
"""
Sets the value of multiple parameters.
:param dict values: Values keyed by parameter name. This name can be either
a fully-qualified XTCE name or an alias in the format
``NAMESPACE/NAME``.
"""
req = rest_pb2.BulkSetParameterValueRequest()
for key in values:
item = req.request.add()
item.id.MergeFrom(_build_named_object_id(key))
item.value.MergeFrom(_build_value_proto(values[key]))
url = '/processors/{}/{}/parameters/mset'.format(
self._instance, self._processor)
self._client.post_proto(url, data=req.SerializeToString()) | python | def set_parameter_values(self, values):
"""
Sets the value of multiple parameters.
:param dict values: Values keyed by parameter name. This name can be either
a fully-qualified XTCE name or an alias in the format
``NAMESPACE/NAME``.
"""
req = rest_pb2.BulkSetParameterValueRequest()
for key in values:
item = req.request.add()
item.id.MergeFrom(_build_named_object_id(key))
item.value.MergeFrom(_build_value_proto(values[key]))
url = '/processors/{}/{}/parameters/mset'.format(
self._instance, self._processor)
self._client.post_proto(url, data=req.SerializeToString()) | [
"def",
"set_parameter_values",
"(",
"self",
",",
"values",
")",
":",
"req",
"=",
"rest_pb2",
".",
"BulkSetParameterValueRequest",
"(",
")",
"for",
"key",
"in",
"values",
":",
"item",
"=",
"req",
".",
"request",
".",
"add",
"(",
")",
"item",
".",
"id",
".",
"MergeFrom",
"(",
"_build_named_object_id",
"(",
"key",
")",
")",
"item",
".",
"value",
".",
"MergeFrom",
"(",
"_build_value_proto",
"(",
"values",
"[",
"key",
"]",
")",
")",
"url",
"=",
"'/processors/{}/{}/parameters/mset'",
".",
"format",
"(",
"self",
".",
"_instance",
",",
"self",
".",
"_processor",
")",
"self",
".",
"_client",
".",
"post_proto",
"(",
"url",
",",
"data",
"=",
"req",
".",
"SerializeToString",
"(",
")",
")"
]
| Sets the value of multiple parameters.
:param dict values: Values keyed by parameter name. This name can be either
a fully-qualified XTCE name or an alias in the format
``NAMESPACE/NAME``. | [
"Sets",
"the",
"value",
"of",
"multiple",
"parameters",
"."
]
| 1082fee8a299010cc44416bbb7518fac0ef08b48 | https://github.com/yamcs/yamcs-python/blob/1082fee8a299010cc44416bbb7518fac0ef08b48/yamcs-client/yamcs/tmtc/client.py#L473-L488 | train |
yamcs/yamcs-python | yamcs-client/yamcs/tmtc/client.py | ProcessorClient.issue_command | def issue_command(self, command, args=None, dry_run=False, comment=None):
"""
Issue the given command
:param str command: Either a fully-qualified XTCE name or an alias in the
format ``NAMESPACE/NAME``.
:param dict args: named arguments (if the command requires these)
:param bool dry_run: If ``True`` the command is not actually issued. This
can be used to check if the server would generate
errors when preparing the command (for example
because an argument is missing).
:param str comment: Comment attached to the command.
:return: An object providing access to properties of the newly issued
command.
:rtype: .IssuedCommand
"""
req = rest_pb2.IssueCommandRequest()
req.sequenceNumber = SequenceGenerator.next()
req.origin = socket.gethostname()
req.dryRun = dry_run
if comment:
req.comment = comment
if args:
for key in args:
assignment = req.assignment.add()
assignment.name = key
assignment.value = str(args[key])
command = adapt_name_for_rest(command)
url = '/processors/{}/{}/commands{}'.format(
self._instance, self._processor, command)
response = self._client.post_proto(url, data=req.SerializeToString())
proto = rest_pb2.IssueCommandResponse()
proto.ParseFromString(response.content)
return IssuedCommand(proto, self) | python | def issue_command(self, command, args=None, dry_run=False, comment=None):
"""
Issue the given command
:param str command: Either a fully-qualified XTCE name or an alias in the
format ``NAMESPACE/NAME``.
:param dict args: named arguments (if the command requires these)
:param bool dry_run: If ``True`` the command is not actually issued. This
can be used to check if the server would generate
errors when preparing the command (for example
because an argument is missing).
:param str comment: Comment attached to the command.
:return: An object providing access to properties of the newly issued
command.
:rtype: .IssuedCommand
"""
req = rest_pb2.IssueCommandRequest()
req.sequenceNumber = SequenceGenerator.next()
req.origin = socket.gethostname()
req.dryRun = dry_run
if comment:
req.comment = comment
if args:
for key in args:
assignment = req.assignment.add()
assignment.name = key
assignment.value = str(args[key])
command = adapt_name_for_rest(command)
url = '/processors/{}/{}/commands{}'.format(
self._instance, self._processor, command)
response = self._client.post_proto(url, data=req.SerializeToString())
proto = rest_pb2.IssueCommandResponse()
proto.ParseFromString(response.content)
return IssuedCommand(proto, self) | [
"def",
"issue_command",
"(",
"self",
",",
"command",
",",
"args",
"=",
"None",
",",
"dry_run",
"=",
"False",
",",
"comment",
"=",
"None",
")",
":",
"req",
"=",
"rest_pb2",
".",
"IssueCommandRequest",
"(",
")",
"req",
".",
"sequenceNumber",
"=",
"SequenceGenerator",
".",
"next",
"(",
")",
"req",
".",
"origin",
"=",
"socket",
".",
"gethostname",
"(",
")",
"req",
".",
"dryRun",
"=",
"dry_run",
"if",
"comment",
":",
"req",
".",
"comment",
"=",
"comment",
"if",
"args",
":",
"for",
"key",
"in",
"args",
":",
"assignment",
"=",
"req",
".",
"assignment",
".",
"add",
"(",
")",
"assignment",
".",
"name",
"=",
"key",
"assignment",
".",
"value",
"=",
"str",
"(",
"args",
"[",
"key",
"]",
")",
"command",
"=",
"adapt_name_for_rest",
"(",
"command",
")",
"url",
"=",
"'/processors/{}/{}/commands{}'",
".",
"format",
"(",
"self",
".",
"_instance",
",",
"self",
".",
"_processor",
",",
"command",
")",
"response",
"=",
"self",
".",
"_client",
".",
"post_proto",
"(",
"url",
",",
"data",
"=",
"req",
".",
"SerializeToString",
"(",
")",
")",
"proto",
"=",
"rest_pb2",
".",
"IssueCommandResponse",
"(",
")",
"proto",
".",
"ParseFromString",
"(",
"response",
".",
"content",
")",
"return",
"IssuedCommand",
"(",
"proto",
",",
"self",
")"
]
| Issue the given command
:param str command: Either a fully-qualified XTCE name or an alias in the
format ``NAMESPACE/NAME``.
:param dict args: named arguments (if the command requires these)
:param bool dry_run: If ``True`` the command is not actually issued. This
can be used to check if the server would generate
errors when preparing the command (for example
because an argument is missing).
:param str comment: Comment attached to the command.
:return: An object providing access to properties of the newly issued
command.
:rtype: .IssuedCommand | [
"Issue",
"the",
"given",
"command"
]
| 1082fee8a299010cc44416bbb7518fac0ef08b48 | https://github.com/yamcs/yamcs-python/blob/1082fee8a299010cc44416bbb7518fac0ef08b48/yamcs-client/yamcs/tmtc/client.py#L490-L524 | train |
yamcs/yamcs-python | yamcs-client/yamcs/tmtc/client.py | ProcessorClient.list_alarms | def list_alarms(self, start=None, stop=None):
"""
Lists the active alarms.
Remark that this does not query the archive. Only active alarms on the
current processor are returned.
:param ~datetime.datetime start: Minimum trigger time of the returned alarms (inclusive)
:param ~datetime.datetime stop: Maximum trigger time of the returned alarms (exclusive)
:rtype: ~collections.Iterable[.Alarm]
"""
# TODO implement continuation token on server
params = {
'order': 'asc'
}
if start is not None:
params['start'] = to_isostring(start)
if stop is not None:
params['stop'] = to_isostring(stop)
# Server does not do pagination on listings of this resource.
# Return an iterator anyway for similarity with other API methods
url = '/processors/{}/{}/alarms'.format(self._instance, self._processor)
response = self._client.get_proto(path=url, params=params)
message = rest_pb2.ListAlarmsResponse()
message.ParseFromString(response.content)
alarms = getattr(message, 'alarm')
return iter([Alarm(alarm) for alarm in alarms]) | python | def list_alarms(self, start=None, stop=None):
"""
Lists the active alarms.
Remark that this does not query the archive. Only active alarms on the
current processor are returned.
:param ~datetime.datetime start: Minimum trigger time of the returned alarms (inclusive)
:param ~datetime.datetime stop: Maximum trigger time of the returned alarms (exclusive)
:rtype: ~collections.Iterable[.Alarm]
"""
# TODO implement continuation token on server
params = {
'order': 'asc'
}
if start is not None:
params['start'] = to_isostring(start)
if stop is not None:
params['stop'] = to_isostring(stop)
# Server does not do pagination on listings of this resource.
# Return an iterator anyway for similarity with other API methods
url = '/processors/{}/{}/alarms'.format(self._instance, self._processor)
response = self._client.get_proto(path=url, params=params)
message = rest_pb2.ListAlarmsResponse()
message.ParseFromString(response.content)
alarms = getattr(message, 'alarm')
return iter([Alarm(alarm) for alarm in alarms]) | [
"def",
"list_alarms",
"(",
"self",
",",
"start",
"=",
"None",
",",
"stop",
"=",
"None",
")",
":",
"# TODO implement continuation token on server",
"params",
"=",
"{",
"'order'",
":",
"'asc'",
"}",
"if",
"start",
"is",
"not",
"None",
":",
"params",
"[",
"'start'",
"]",
"=",
"to_isostring",
"(",
"start",
")",
"if",
"stop",
"is",
"not",
"None",
":",
"params",
"[",
"'stop'",
"]",
"=",
"to_isostring",
"(",
"stop",
")",
"# Server does not do pagination on listings of this resource.",
"# Return an iterator anyway for similarity with other API methods",
"url",
"=",
"'/processors/{}/{}/alarms'",
".",
"format",
"(",
"self",
".",
"_instance",
",",
"self",
".",
"_processor",
")",
"response",
"=",
"self",
".",
"_client",
".",
"get_proto",
"(",
"path",
"=",
"url",
",",
"params",
"=",
"params",
")",
"message",
"=",
"rest_pb2",
".",
"ListAlarmsResponse",
"(",
")",
"message",
".",
"ParseFromString",
"(",
"response",
".",
"content",
")",
"alarms",
"=",
"getattr",
"(",
"message",
",",
"'alarm'",
")",
"return",
"iter",
"(",
"[",
"Alarm",
"(",
"alarm",
")",
"for",
"alarm",
"in",
"alarms",
"]",
")"
]
| Lists the active alarms.
Remark that this does not query the archive. Only active alarms on the
current processor are returned.
:param ~datetime.datetime start: Minimum trigger time of the returned alarms (inclusive)
:param ~datetime.datetime stop: Maximum trigger time of the returned alarms (exclusive)
:rtype: ~collections.Iterable[.Alarm] | [
"Lists",
"the",
"active",
"alarms",
"."
]
| 1082fee8a299010cc44416bbb7518fac0ef08b48 | https://github.com/yamcs/yamcs-python/blob/1082fee8a299010cc44416bbb7518fac0ef08b48/yamcs-client/yamcs/tmtc/client.py#L526-L552 | train |
yamcs/yamcs-python | yamcs-client/yamcs/tmtc/client.py | ProcessorClient.set_default_calibrator | def set_default_calibrator(self, parameter, type, data): # pylint: disable=W0622
"""
Apply a calibrator while processing raw values of the specified
parameter. If there is already a default calibrator associated
to this parameter, that calibrator gets replaced.
.. note::
Contextual calibrators take precedence over the default calibrator
See :meth:`set_calibrators` for setting contextual calibrators.
Two types of calibrators can be applied:
* Polynomial calibrators apply a polynomial expression of the form:
`y = a + bx + cx^2 + ...`.
The `data` argument must be an array of floats ``[a, b, c, ...]``.
* Spline calibrators interpolate the raw value between a set of points
which represent a linear curve.
The `data` argument must be an array of ``[x, y]`` points.
:param str parameter: Either a fully-qualified XTCE name or an alias
in the format ``NAMESPACE/NAME``.
:param str type: One of ``polynomial`` or ``spline``.
:param data: Calibration definition for the selected type.
"""
req = mdb_pb2.ChangeParameterRequest()
req.action = mdb_pb2.ChangeParameterRequest.SET_DEFAULT_CALIBRATOR
if type:
_add_calib(req.defaultCalibrator, type, data)
url = '/mdb/{}/{}/parameters/{}'.format(
self._instance, self._processor, parameter)
response = self._client.post_proto(url, data=req.SerializeToString()) | python | def set_default_calibrator(self, parameter, type, data): # pylint: disable=W0622
"""
Apply a calibrator while processing raw values of the specified
parameter. If there is already a default calibrator associated
to this parameter, that calibrator gets replaced.
.. note::
Contextual calibrators take precedence over the default calibrator
See :meth:`set_calibrators` for setting contextual calibrators.
Two types of calibrators can be applied:
* Polynomial calibrators apply a polynomial expression of the form:
`y = a + bx + cx^2 + ...`.
The `data` argument must be an array of floats ``[a, b, c, ...]``.
* Spline calibrators interpolate the raw value between a set of points
which represent a linear curve.
The `data` argument must be an array of ``[x, y]`` points.
:param str parameter: Either a fully-qualified XTCE name or an alias
in the format ``NAMESPACE/NAME``.
:param str type: One of ``polynomial`` or ``spline``.
:param data: Calibration definition for the selected type.
"""
req = mdb_pb2.ChangeParameterRequest()
req.action = mdb_pb2.ChangeParameterRequest.SET_DEFAULT_CALIBRATOR
if type:
_add_calib(req.defaultCalibrator, type, data)
url = '/mdb/{}/{}/parameters/{}'.format(
self._instance, self._processor, parameter)
response = self._client.post_proto(url, data=req.SerializeToString()) | [
"def",
"set_default_calibrator",
"(",
"self",
",",
"parameter",
",",
"type",
",",
"data",
")",
":",
"# pylint: disable=W0622",
"req",
"=",
"mdb_pb2",
".",
"ChangeParameterRequest",
"(",
")",
"req",
".",
"action",
"=",
"mdb_pb2",
".",
"ChangeParameterRequest",
".",
"SET_DEFAULT_CALIBRATOR",
"if",
"type",
":",
"_add_calib",
"(",
"req",
".",
"defaultCalibrator",
",",
"type",
",",
"data",
")",
"url",
"=",
"'/mdb/{}/{}/parameters/{}'",
".",
"format",
"(",
"self",
".",
"_instance",
",",
"self",
".",
"_processor",
",",
"parameter",
")",
"response",
"=",
"self",
".",
"_client",
".",
"post_proto",
"(",
"url",
",",
"data",
"=",
"req",
".",
"SerializeToString",
"(",
")",
")"
]
| Apply a calibrator while processing raw values of the specified
parameter. If there is already a default calibrator associated
to this parameter, that calibrator gets replaced.
.. note::
Contextual calibrators take precedence over the default calibrator
See :meth:`set_calibrators` for setting contextual calibrators.
Two types of calibrators can be applied:
* Polynomial calibrators apply a polynomial expression of the form:
`y = a + bx + cx^2 + ...`.
The `data` argument must be an array of floats ``[a, b, c, ...]``.
* Spline calibrators interpolate the raw value between a set of points
which represent a linear curve.
The `data` argument must be an array of ``[x, y]`` points.
:param str parameter: Either a fully-qualified XTCE name or an alias
in the format ``NAMESPACE/NAME``.
:param str type: One of ``polynomial`` or ``spline``.
:param data: Calibration definition for the selected type. | [
"Apply",
"a",
"calibrator",
"while",
"processing",
"raw",
"values",
"of",
"the",
"specified",
"parameter",
".",
"If",
"there",
"is",
"already",
"a",
"default",
"calibrator",
"associated",
"to",
"this",
"parameter",
"that",
"calibrator",
"gets",
"replaced",
"."
]
| 1082fee8a299010cc44416bbb7518fac0ef08b48 | https://github.com/yamcs/yamcs-python/blob/1082fee8a299010cc44416bbb7518fac0ef08b48/yamcs-client/yamcs/tmtc/client.py#L554-L589 | train |
yamcs/yamcs-python | yamcs-client/yamcs/tmtc/client.py | ProcessorClient.reset_calibrators | def reset_calibrators(self, parameter):
"""
Reset all calibrators for the specified parameter to their original MDB value.
"""
req = mdb_pb2.ChangeParameterRequest()
req.action = mdb_pb2.ChangeParameterRequest.RESET_CALIBRATORS
calib_info = req.defaultCalibrator
url = '/mdb/{}/{}/parameters/{}'.format(
self._instance, self._processor, parameter)
response = self._client.post_proto(url, data=req.SerializeToString()) | python | def reset_calibrators(self, parameter):
"""
Reset all calibrators for the specified parameter to their original MDB value.
"""
req = mdb_pb2.ChangeParameterRequest()
req.action = mdb_pb2.ChangeParameterRequest.RESET_CALIBRATORS
calib_info = req.defaultCalibrator
url = '/mdb/{}/{}/parameters/{}'.format(
self._instance, self._processor, parameter)
response = self._client.post_proto(url, data=req.SerializeToString()) | [
"def",
"reset_calibrators",
"(",
"self",
",",
"parameter",
")",
":",
"req",
"=",
"mdb_pb2",
".",
"ChangeParameterRequest",
"(",
")",
"req",
".",
"action",
"=",
"mdb_pb2",
".",
"ChangeParameterRequest",
".",
"RESET_CALIBRATORS",
"calib_info",
"=",
"req",
".",
"defaultCalibrator",
"url",
"=",
"'/mdb/{}/{}/parameters/{}'",
".",
"format",
"(",
"self",
".",
"_instance",
",",
"self",
".",
"_processor",
",",
"parameter",
")",
"response",
"=",
"self",
".",
"_client",
".",
"post_proto",
"(",
"url",
",",
"data",
"=",
"req",
".",
"SerializeToString",
"(",
")",
")"
]
| Reset all calibrators for the specified parameter to their original MDB value. | [
"Reset",
"all",
"calibrators",
"for",
"the",
"specified",
"parameter",
"to",
"their",
"original",
"MDB",
"value",
"."
]
| 1082fee8a299010cc44416bbb7518fac0ef08b48 | https://github.com/yamcs/yamcs-python/blob/1082fee8a299010cc44416bbb7518fac0ef08b48/yamcs-client/yamcs/tmtc/client.py#L642-L651 | train |
yamcs/yamcs-python | yamcs-client/yamcs/tmtc/client.py | ProcessorClient.set_default_alarm_ranges | def set_default_alarm_ranges(self, parameter, watch=None, warning=None,
distress=None, critical=None, severe=None,
min_violations=1):
"""
Generate out-of-limit alarms for a parameter using the specified
alarm ranges.
This replaces any previous default alarms on this parameter.
.. note::
Contextual range sets take precedence over the default alarm
ranges. See :meth:`set_alarm_range_sets` for setting contextual
range sets.
:param str parameter: Either a fully-qualified XTCE name or an alias
in the format ``NAMESPACE/NAME``.
:param (float,float) watch: Range expressed as a tuple ``(lo, hi)``
where lo and hi are assumed exclusive.
:param (float,float) warning: Range expressed as a tuple ``(lo, hi)``
where lo and hi are assumed exclusive.
:param (float,float) distress: Range expressed as a tuple ``(lo, hi)``
where lo and hi are assumed exclusive.
:param (float,float) critical: Range expressed as a tuple ``(lo, hi)``
where lo and hi are assumed exclusive.
:param (float,float) severe: Range expressed as a tuple ``(lo, hi)``
where lo and hi are assumed exclusive.
:param int min_violations: Minimum violations before an alarm is
generated.
"""
req = mdb_pb2.ChangeParameterRequest()
req.action = mdb_pb2.ChangeParameterRequest.SET_DEFAULT_ALARMS
if(watch or warning or distress or critical or severe):
_add_alarms(req.defaultAlarm, watch, warning, distress, critical, severe, min_violations)
url = '/mdb/{}/{}/parameters/{}'.format(
self._instance, self._processor, parameter)
response = self._client.post_proto(url, data=req.SerializeToString()) | python | def set_default_alarm_ranges(self, parameter, watch=None, warning=None,
distress=None, critical=None, severe=None,
min_violations=1):
"""
Generate out-of-limit alarms for a parameter using the specified
alarm ranges.
This replaces any previous default alarms on this parameter.
.. note::
Contextual range sets take precedence over the default alarm
ranges. See :meth:`set_alarm_range_sets` for setting contextual
range sets.
:param str parameter: Either a fully-qualified XTCE name or an alias
in the format ``NAMESPACE/NAME``.
:param (float,float) watch: Range expressed as a tuple ``(lo, hi)``
where lo and hi are assumed exclusive.
:param (float,float) warning: Range expressed as a tuple ``(lo, hi)``
where lo and hi are assumed exclusive.
:param (float,float) distress: Range expressed as a tuple ``(lo, hi)``
where lo and hi are assumed exclusive.
:param (float,float) critical: Range expressed as a tuple ``(lo, hi)``
where lo and hi are assumed exclusive.
:param (float,float) severe: Range expressed as a tuple ``(lo, hi)``
where lo and hi are assumed exclusive.
:param int min_violations: Minimum violations before an alarm is
generated.
"""
req = mdb_pb2.ChangeParameterRequest()
req.action = mdb_pb2.ChangeParameterRequest.SET_DEFAULT_ALARMS
if(watch or warning or distress or critical or severe):
_add_alarms(req.defaultAlarm, watch, warning, distress, critical, severe, min_violations)
url = '/mdb/{}/{}/parameters/{}'.format(
self._instance, self._processor, parameter)
response = self._client.post_proto(url, data=req.SerializeToString()) | [
"def",
"set_default_alarm_ranges",
"(",
"self",
",",
"parameter",
",",
"watch",
"=",
"None",
",",
"warning",
"=",
"None",
",",
"distress",
"=",
"None",
",",
"critical",
"=",
"None",
",",
"severe",
"=",
"None",
",",
"min_violations",
"=",
"1",
")",
":",
"req",
"=",
"mdb_pb2",
".",
"ChangeParameterRequest",
"(",
")",
"req",
".",
"action",
"=",
"mdb_pb2",
".",
"ChangeParameterRequest",
".",
"SET_DEFAULT_ALARMS",
"if",
"(",
"watch",
"or",
"warning",
"or",
"distress",
"or",
"critical",
"or",
"severe",
")",
":",
"_add_alarms",
"(",
"req",
".",
"defaultAlarm",
",",
"watch",
",",
"warning",
",",
"distress",
",",
"critical",
",",
"severe",
",",
"min_violations",
")",
"url",
"=",
"'/mdb/{}/{}/parameters/{}'",
".",
"format",
"(",
"self",
".",
"_instance",
",",
"self",
".",
"_processor",
",",
"parameter",
")",
"response",
"=",
"self",
".",
"_client",
".",
"post_proto",
"(",
"url",
",",
"data",
"=",
"req",
".",
"SerializeToString",
"(",
")",
")"
]
| Generate out-of-limit alarms for a parameter using the specified
alarm ranges.
This replaces any previous default alarms on this parameter.
.. note::
Contextual range sets take precedence over the default alarm
ranges. See :meth:`set_alarm_range_sets` for setting contextual
range sets.
:param str parameter: Either a fully-qualified XTCE name or an alias
in the format ``NAMESPACE/NAME``.
:param (float,float) watch: Range expressed as a tuple ``(lo, hi)``
where lo and hi are assumed exclusive.
:param (float,float) warning: Range expressed as a tuple ``(lo, hi)``
where lo and hi are assumed exclusive.
:param (float,float) distress: Range expressed as a tuple ``(lo, hi)``
where lo and hi are assumed exclusive.
:param (float,float) critical: Range expressed as a tuple ``(lo, hi)``
where lo and hi are assumed exclusive.
:param (float,float) severe: Range expressed as a tuple ``(lo, hi)``
where lo and hi are assumed exclusive.
:param int min_violations: Minimum violations before an alarm is
generated. | [
"Generate",
"out",
"-",
"of",
"-",
"limit",
"alarms",
"for",
"a",
"parameter",
"using",
"the",
"specified",
"alarm",
"ranges",
"."
]
| 1082fee8a299010cc44416bbb7518fac0ef08b48 | https://github.com/yamcs/yamcs-python/blob/1082fee8a299010cc44416bbb7518fac0ef08b48/yamcs-client/yamcs/tmtc/client.py#L654-L691 | train |
yamcs/yamcs-python | yamcs-client/yamcs/tmtc/client.py | ProcessorClient.reset_alarm_ranges | def reset_alarm_ranges(self, parameter):
"""
Reset all alarm limits for the specified parameter to their original MDB value.
"""
req = mdb_pb2.ChangeParameterRequest()
req.action = mdb_pb2.ChangeParameterRequest.RESET_ALARMS
url = '/mdb/{}/{}/parameters/{}'.format(
self._instance, self._processor, parameter)
response = self._client.post_proto(url, data=req.SerializeToString()) | python | def reset_alarm_ranges(self, parameter):
"""
Reset all alarm limits for the specified parameter to their original MDB value.
"""
req = mdb_pb2.ChangeParameterRequest()
req.action = mdb_pb2.ChangeParameterRequest.RESET_ALARMS
url = '/mdb/{}/{}/parameters/{}'.format(
self._instance, self._processor, parameter)
response = self._client.post_proto(url, data=req.SerializeToString()) | [
"def",
"reset_alarm_ranges",
"(",
"self",
",",
"parameter",
")",
":",
"req",
"=",
"mdb_pb2",
".",
"ChangeParameterRequest",
"(",
")",
"req",
".",
"action",
"=",
"mdb_pb2",
".",
"ChangeParameterRequest",
".",
"RESET_ALARMS",
"url",
"=",
"'/mdb/{}/{}/parameters/{}'",
".",
"format",
"(",
"self",
".",
"_instance",
",",
"self",
".",
"_processor",
",",
"parameter",
")",
"response",
"=",
"self",
".",
"_client",
".",
"post_proto",
"(",
"url",
",",
"data",
"=",
"req",
".",
"SerializeToString",
"(",
")",
")"
]
| Reset all alarm limits for the specified parameter to their original MDB value. | [
"Reset",
"all",
"alarm",
"limits",
"for",
"the",
"specified",
"parameter",
"to",
"their",
"original",
"MDB",
"value",
"."
]
| 1082fee8a299010cc44416bbb7518fac0ef08b48 | https://github.com/yamcs/yamcs-python/blob/1082fee8a299010cc44416bbb7518fac0ef08b48/yamcs-client/yamcs/tmtc/client.py#L742-L751 | train |
yamcs/yamcs-python | yamcs-client/yamcs/tmtc/client.py | ProcessorClient.acknowledge_alarm | def acknowledge_alarm(self, alarm, comment=None):
"""
Acknowledges a specific alarm associated with a parameter.
:param alarm: Alarm instance
:type alarm: :class:`.Alarm`
:param str comment: Optional comment to associate with the state
change.
"""
url = '/processors/{}/{}/parameters{}/alarms/{}'.format(
self._instance, self._processor, alarm.name, alarm.sequence_number)
req = rest_pb2.EditAlarmRequest()
req.state = 'acknowledged'
if comment is not None:
req.comment = comment
self._client.put_proto(url, data=req.SerializeToString()) | python | def acknowledge_alarm(self, alarm, comment=None):
"""
Acknowledges a specific alarm associated with a parameter.
:param alarm: Alarm instance
:type alarm: :class:`.Alarm`
:param str comment: Optional comment to associate with the state
change.
"""
url = '/processors/{}/{}/parameters{}/alarms/{}'.format(
self._instance, self._processor, alarm.name, alarm.sequence_number)
req = rest_pb2.EditAlarmRequest()
req.state = 'acknowledged'
if comment is not None:
req.comment = comment
self._client.put_proto(url, data=req.SerializeToString()) | [
"def",
"acknowledge_alarm",
"(",
"self",
",",
"alarm",
",",
"comment",
"=",
"None",
")",
":",
"url",
"=",
"'/processors/{}/{}/parameters{}/alarms/{}'",
".",
"format",
"(",
"self",
".",
"_instance",
",",
"self",
".",
"_processor",
",",
"alarm",
".",
"name",
",",
"alarm",
".",
"sequence_number",
")",
"req",
"=",
"rest_pb2",
".",
"EditAlarmRequest",
"(",
")",
"req",
".",
"state",
"=",
"'acknowledged'",
"if",
"comment",
"is",
"not",
"None",
":",
"req",
".",
"comment",
"=",
"comment",
"self",
".",
"_client",
".",
"put_proto",
"(",
"url",
",",
"data",
"=",
"req",
".",
"SerializeToString",
"(",
")",
")"
]
| Acknowledges a specific alarm associated with a parameter.
:param alarm: Alarm instance
:type alarm: :class:`.Alarm`
:param str comment: Optional comment to associate with the state
change. | [
"Acknowledges",
"a",
"specific",
"alarm",
"associated",
"with",
"a",
"parameter",
"."
]
| 1082fee8a299010cc44416bbb7518fac0ef08b48 | https://github.com/yamcs/yamcs-python/blob/1082fee8a299010cc44416bbb7518fac0ef08b48/yamcs-client/yamcs/tmtc/client.py#L758-L773 | train |
yamcs/yamcs-python | yamcs-client/yamcs/tmtc/client.py | ProcessorClient.create_command_history_subscription | def create_command_history_subscription(self,
issued_command=None,
on_data=None,
timeout=60):
"""
Create a new command history subscription.
:param .IssuedCommand[] issued_command: (Optional) Previously issued
commands. If not provided updates
from any command are received.
:param on_data: Function that gets called with :class:`.CommandHistory`
updates.
:param float timeout: The amount of seconds to wait for the request
to complete.
:return: Future that can be used to manage the background websocket
subscription
:rtype: .CommandHistorySubscription
"""
options = web_pb2.CommandHistorySubscriptionRequest()
options.ignorePastCommands = True
if issued_command:
options.commandId.extend(_build_command_ids(issued_command))
manager = WebSocketSubscriptionManager(
self._client, resource='cmdhistory', options=options)
# Represent subscription as a future
subscription = CommandHistorySubscription(manager)
wrapped_callback = functools.partial(
_wrap_callback_parse_cmdhist_data, subscription, on_data)
manager.open(wrapped_callback, instance=self._instance,
processor=self._processor)
# Wait until a reply or exception is received
subscription.reply(timeout=timeout)
return subscription | python | def create_command_history_subscription(self,
issued_command=None,
on_data=None,
timeout=60):
"""
Create a new command history subscription.
:param .IssuedCommand[] issued_command: (Optional) Previously issued
commands. If not provided updates
from any command are received.
:param on_data: Function that gets called with :class:`.CommandHistory`
updates.
:param float timeout: The amount of seconds to wait for the request
to complete.
:return: Future that can be used to manage the background websocket
subscription
:rtype: .CommandHistorySubscription
"""
options = web_pb2.CommandHistorySubscriptionRequest()
options.ignorePastCommands = True
if issued_command:
options.commandId.extend(_build_command_ids(issued_command))
manager = WebSocketSubscriptionManager(
self._client, resource='cmdhistory', options=options)
# Represent subscription as a future
subscription = CommandHistorySubscription(manager)
wrapped_callback = functools.partial(
_wrap_callback_parse_cmdhist_data, subscription, on_data)
manager.open(wrapped_callback, instance=self._instance,
processor=self._processor)
# Wait until a reply or exception is received
subscription.reply(timeout=timeout)
return subscription | [
"def",
"create_command_history_subscription",
"(",
"self",
",",
"issued_command",
"=",
"None",
",",
"on_data",
"=",
"None",
",",
"timeout",
"=",
"60",
")",
":",
"options",
"=",
"web_pb2",
".",
"CommandHistorySubscriptionRequest",
"(",
")",
"options",
".",
"ignorePastCommands",
"=",
"True",
"if",
"issued_command",
":",
"options",
".",
"commandId",
".",
"extend",
"(",
"_build_command_ids",
"(",
"issued_command",
")",
")",
"manager",
"=",
"WebSocketSubscriptionManager",
"(",
"self",
".",
"_client",
",",
"resource",
"=",
"'cmdhistory'",
",",
"options",
"=",
"options",
")",
"# Represent subscription as a future",
"subscription",
"=",
"CommandHistorySubscription",
"(",
"manager",
")",
"wrapped_callback",
"=",
"functools",
".",
"partial",
"(",
"_wrap_callback_parse_cmdhist_data",
",",
"subscription",
",",
"on_data",
")",
"manager",
".",
"open",
"(",
"wrapped_callback",
",",
"instance",
"=",
"self",
".",
"_instance",
",",
"processor",
"=",
"self",
".",
"_processor",
")",
"# Wait until a reply or exception is received",
"subscription",
".",
"reply",
"(",
"timeout",
"=",
"timeout",
")",
"return",
"subscription"
]
| Create a new command history subscription.
:param .IssuedCommand[] issued_command: (Optional) Previously issued
commands. If not provided updates
from any command are received.
:param on_data: Function that gets called with :class:`.CommandHistory`
updates.
:param float timeout: The amount of seconds to wait for the request
to complete.
:return: Future that can be used to manage the background websocket
subscription
:rtype: .CommandHistorySubscription | [
"Create",
"a",
"new",
"command",
"history",
"subscription",
"."
]
| 1082fee8a299010cc44416bbb7518fac0ef08b48 | https://github.com/yamcs/yamcs-python/blob/1082fee8a299010cc44416bbb7518fac0ef08b48/yamcs-client/yamcs/tmtc/client.py#L775-L813 | train |
yamcs/yamcs-python | yamcs-client/yamcs/tmtc/client.py | ProcessorClient.create_parameter_subscription | def create_parameter_subscription(self,
parameters,
on_data=None,
abort_on_invalid=True,
update_on_expiration=False,
send_from_cache=True,
timeout=60):
"""
Create a new parameter subscription.
:param str[] parameters: Parameter names (or aliases).
:param on_data: Function that gets called with :class:`.ParameterData`
updates.
:param bool abort_on_invalid: If ``True`` an error is generated when
invalid parameters are specified.
:param bool update_on_expiration: If ``True`` an update is received
when a parameter value has become
expired. This update holds the
same value as the last known valid
value, but with status set to
``EXPIRED``.
:param bool send_from_cache: If ``True`` the last processed parameter
value is sent from parameter cache.
When ``False`` only newly processed
parameters are received.
:param float timeout: The amount of seconds to wait for the request
to complete.
:return: A Future that can be used to manage the background websocket
subscription.
:rtype: .ParameterSubscription
"""
options = web_pb2.ParameterSubscriptionRequest()
options.subscriptionId = -1 # This means 'create a new subscription'
options.abortOnInvalid = abort_on_invalid
options.updateOnExpiration = update_on_expiration
options.sendFromCache = send_from_cache
options.id.extend(_build_named_object_ids(parameters))
manager = WebSocketSubscriptionManager(
self._client, resource='parameter', options=options)
# Represent subscription as a future
subscription = ParameterSubscription(manager)
wrapped_callback = functools.partial(
_wrap_callback_parse_parameter_data, subscription, on_data)
manager.open(wrapped_callback, instance=self._instance,
processor=self._processor)
# Wait until a reply or exception is received
subscription.reply(timeout=timeout)
return subscription | python | def create_parameter_subscription(self,
parameters,
on_data=None,
abort_on_invalid=True,
update_on_expiration=False,
send_from_cache=True,
timeout=60):
"""
Create a new parameter subscription.
:param str[] parameters: Parameter names (or aliases).
:param on_data: Function that gets called with :class:`.ParameterData`
updates.
:param bool abort_on_invalid: If ``True`` an error is generated when
invalid parameters are specified.
:param bool update_on_expiration: If ``True`` an update is received
when a parameter value has become
expired. This update holds the
same value as the last known valid
value, but with status set to
``EXPIRED``.
:param bool send_from_cache: If ``True`` the last processed parameter
value is sent from parameter cache.
When ``False`` only newly processed
parameters are received.
:param float timeout: The amount of seconds to wait for the request
to complete.
:return: A Future that can be used to manage the background websocket
subscription.
:rtype: .ParameterSubscription
"""
options = web_pb2.ParameterSubscriptionRequest()
options.subscriptionId = -1 # This means 'create a new subscription'
options.abortOnInvalid = abort_on_invalid
options.updateOnExpiration = update_on_expiration
options.sendFromCache = send_from_cache
options.id.extend(_build_named_object_ids(parameters))
manager = WebSocketSubscriptionManager(
self._client, resource='parameter', options=options)
# Represent subscription as a future
subscription = ParameterSubscription(manager)
wrapped_callback = functools.partial(
_wrap_callback_parse_parameter_data, subscription, on_data)
manager.open(wrapped_callback, instance=self._instance,
processor=self._processor)
# Wait until a reply or exception is received
subscription.reply(timeout=timeout)
return subscription | [
"def",
"create_parameter_subscription",
"(",
"self",
",",
"parameters",
",",
"on_data",
"=",
"None",
",",
"abort_on_invalid",
"=",
"True",
",",
"update_on_expiration",
"=",
"False",
",",
"send_from_cache",
"=",
"True",
",",
"timeout",
"=",
"60",
")",
":",
"options",
"=",
"web_pb2",
".",
"ParameterSubscriptionRequest",
"(",
")",
"options",
".",
"subscriptionId",
"=",
"-",
"1",
"# This means 'create a new subscription'",
"options",
".",
"abortOnInvalid",
"=",
"abort_on_invalid",
"options",
".",
"updateOnExpiration",
"=",
"update_on_expiration",
"options",
".",
"sendFromCache",
"=",
"send_from_cache",
"options",
".",
"id",
".",
"extend",
"(",
"_build_named_object_ids",
"(",
"parameters",
")",
")",
"manager",
"=",
"WebSocketSubscriptionManager",
"(",
"self",
".",
"_client",
",",
"resource",
"=",
"'parameter'",
",",
"options",
"=",
"options",
")",
"# Represent subscription as a future",
"subscription",
"=",
"ParameterSubscription",
"(",
"manager",
")",
"wrapped_callback",
"=",
"functools",
".",
"partial",
"(",
"_wrap_callback_parse_parameter_data",
",",
"subscription",
",",
"on_data",
")",
"manager",
".",
"open",
"(",
"wrapped_callback",
",",
"instance",
"=",
"self",
".",
"_instance",
",",
"processor",
"=",
"self",
".",
"_processor",
")",
"# Wait until a reply or exception is received",
"subscription",
".",
"reply",
"(",
"timeout",
"=",
"timeout",
")",
"return",
"subscription"
]
| Create a new parameter subscription.
:param str[] parameters: Parameter names (or aliases).
:param on_data: Function that gets called with :class:`.ParameterData`
updates.
:param bool abort_on_invalid: If ``True`` an error is generated when
invalid parameters are specified.
:param bool update_on_expiration: If ``True`` an update is received
when a parameter value has become
expired. This update holds the
same value as the last known valid
value, but with status set to
``EXPIRED``.
:param bool send_from_cache: If ``True`` the last processed parameter
value is sent from parameter cache.
When ``False`` only newly processed
parameters are received.
:param float timeout: The amount of seconds to wait for the request
to complete.
:return: A Future that can be used to manage the background websocket
subscription.
:rtype: .ParameterSubscription | [
"Create",
"a",
"new",
"parameter",
"subscription",
"."
]
| 1082fee8a299010cc44416bbb7518fac0ef08b48 | https://github.com/yamcs/yamcs-python/blob/1082fee8a299010cc44416bbb7518fac0ef08b48/yamcs-client/yamcs/tmtc/client.py#L815-L869 | train |
yamcs/yamcs-python | yamcs-client/yamcs/tmtc/client.py | ProcessorClient.create_alarm_subscription | def create_alarm_subscription(self,
on_data=None,
timeout=60):
"""
Create a new alarm subscription.
:param on_data: Function that gets called with :class:`.AlarmEvent`
updates.
:param float timeout: The amount of seconds to wait for the request
to complete.
:return: A Future that can be used to manage the background websocket
subscription.
:rtype: .AlarmSubscription
"""
manager = WebSocketSubscriptionManager(self._client, resource='alarms')
# Represent subscription as a future
subscription = AlarmSubscription(manager)
wrapped_callback = functools.partial(
_wrap_callback_parse_alarm_data, subscription, on_data)
manager.open(wrapped_callback, instance=self._instance,
processor=self._processor)
# Wait until a reply or exception is received
subscription.reply(timeout=timeout)
return subscription | python | def create_alarm_subscription(self,
on_data=None,
timeout=60):
"""
Create a new alarm subscription.
:param on_data: Function that gets called with :class:`.AlarmEvent`
updates.
:param float timeout: The amount of seconds to wait for the request
to complete.
:return: A Future that can be used to manage the background websocket
subscription.
:rtype: .AlarmSubscription
"""
manager = WebSocketSubscriptionManager(self._client, resource='alarms')
# Represent subscription as a future
subscription = AlarmSubscription(manager)
wrapped_callback = functools.partial(
_wrap_callback_parse_alarm_data, subscription, on_data)
manager.open(wrapped_callback, instance=self._instance,
processor=self._processor)
# Wait until a reply or exception is received
subscription.reply(timeout=timeout)
return subscription | [
"def",
"create_alarm_subscription",
"(",
"self",
",",
"on_data",
"=",
"None",
",",
"timeout",
"=",
"60",
")",
":",
"manager",
"=",
"WebSocketSubscriptionManager",
"(",
"self",
".",
"_client",
",",
"resource",
"=",
"'alarms'",
")",
"# Represent subscription as a future",
"subscription",
"=",
"AlarmSubscription",
"(",
"manager",
")",
"wrapped_callback",
"=",
"functools",
".",
"partial",
"(",
"_wrap_callback_parse_alarm_data",
",",
"subscription",
",",
"on_data",
")",
"manager",
".",
"open",
"(",
"wrapped_callback",
",",
"instance",
"=",
"self",
".",
"_instance",
",",
"processor",
"=",
"self",
".",
"_processor",
")",
"# Wait until a reply or exception is received",
"subscription",
".",
"reply",
"(",
"timeout",
"=",
"timeout",
")",
"return",
"subscription"
]
| Create a new alarm subscription.
:param on_data: Function that gets called with :class:`.AlarmEvent`
updates.
:param float timeout: The amount of seconds to wait for the request
to complete.
:return: A Future that can be used to manage the background websocket
subscription.
:rtype: .AlarmSubscription | [
"Create",
"a",
"new",
"alarm",
"subscription",
"."
]
| 1082fee8a299010cc44416bbb7518fac0ef08b48 | https://github.com/yamcs/yamcs-python/blob/1082fee8a299010cc44416bbb7518fac0ef08b48/yamcs-client/yamcs/tmtc/client.py#L871-L900 | train |
RedHatQE/Sentaku | examples/todo_example/api.py | get_by | def get_by(self, name):
"""get element by name"""
return next((item for item in self if item.name == name), None) | python | def get_by(self, name):
"""get element by name"""
return next((item for item in self if item.name == name), None) | [
"def",
"get_by",
"(",
"self",
",",
"name",
")",
":",
"return",
"next",
"(",
"(",
"item",
"for",
"item",
"in",
"self",
"if",
"item",
".",
"name",
"==",
"name",
")",
",",
"None",
")"
]
| get element by name | [
"get",
"element",
"by",
"name"
]
| b336cef5b6ee2db4e8dff28dcdb2be35a1f3d01c | https://github.com/RedHatQE/Sentaku/blob/b336cef5b6ee2db4e8dff28dcdb2be35a1f3d01c/examples/todo_example/api.py#L14-L16 | train |
lowandrew/OLCTools | spadespipeline/quality.py | Quality.fastqc | def fastqc(self):
"""Run fastqc system calls"""
while True: # while daemon
threadlock = threading.Lock()
# Unpack the variables from the queue
(sample, systemcall, outputdir, fastqcreads) = self.qcqueue.get()
# Check to see if the output HTML file already exists
try:
_ = glob(os.path.join(outputdir, '*.html'))[0]
except IndexError:
# Make the output directory
make_path(outputdir)
# Run the system calls
outstr = str()
errstr = str()
out, err = run_subprocess(systemcall)
outstr += out
errstr += err
out, err = run_subprocess(fastqcreads)
outstr += out
errstr += err
# Acquire thread lock, and write the logs to file
threadlock.acquire()
write_to_logfile(systemcall, systemcall, self.logfile, sample.general.logout, sample.general.logerr,
None, None)
write_to_logfile(fastqcreads, fastqcreads, self.logfile, sample.general.logout, sample.general.logerr,
None, None)
write_to_logfile(outstr, errstr, self.logfile, sample.general.logout, sample.general.logerr, None, None)
threadlock.release()
# Rename the outputs
try:
shutil.move(os.path.join(outputdir, 'stdin_fastqc.html'),
os.path.join(outputdir, '{}_fastqc.html'.format(sample.name)))
shutil.move(os.path.join(outputdir, 'stdin_fastqc.zip'),
os.path.join(outputdir, '{}_fastqc.zip'.format(sample.name)))
except IOError:
pass
# Signal to qcqueue that job is done
self.qcqueue.task_done() | python | def fastqc(self):
"""Run fastqc system calls"""
while True: # while daemon
threadlock = threading.Lock()
# Unpack the variables from the queue
(sample, systemcall, outputdir, fastqcreads) = self.qcqueue.get()
# Check to see if the output HTML file already exists
try:
_ = glob(os.path.join(outputdir, '*.html'))[0]
except IndexError:
# Make the output directory
make_path(outputdir)
# Run the system calls
outstr = str()
errstr = str()
out, err = run_subprocess(systemcall)
outstr += out
errstr += err
out, err = run_subprocess(fastqcreads)
outstr += out
errstr += err
# Acquire thread lock, and write the logs to file
threadlock.acquire()
write_to_logfile(systemcall, systemcall, self.logfile, sample.general.logout, sample.general.logerr,
None, None)
write_to_logfile(fastqcreads, fastqcreads, self.logfile, sample.general.logout, sample.general.logerr,
None, None)
write_to_logfile(outstr, errstr, self.logfile, sample.general.logout, sample.general.logerr, None, None)
threadlock.release()
# Rename the outputs
try:
shutil.move(os.path.join(outputdir, 'stdin_fastqc.html'),
os.path.join(outputdir, '{}_fastqc.html'.format(sample.name)))
shutil.move(os.path.join(outputdir, 'stdin_fastqc.zip'),
os.path.join(outputdir, '{}_fastqc.zip'.format(sample.name)))
except IOError:
pass
# Signal to qcqueue that job is done
self.qcqueue.task_done() | [
"def",
"fastqc",
"(",
"self",
")",
":",
"while",
"True",
":",
"# while daemon",
"threadlock",
"=",
"threading",
".",
"Lock",
"(",
")",
"# Unpack the variables from the queue",
"(",
"sample",
",",
"systemcall",
",",
"outputdir",
",",
"fastqcreads",
")",
"=",
"self",
".",
"qcqueue",
".",
"get",
"(",
")",
"# Check to see if the output HTML file already exists",
"try",
":",
"_",
"=",
"glob",
"(",
"os",
".",
"path",
".",
"join",
"(",
"outputdir",
",",
"'*.html'",
")",
")",
"[",
"0",
"]",
"except",
"IndexError",
":",
"# Make the output directory",
"make_path",
"(",
"outputdir",
")",
"# Run the system calls",
"outstr",
"=",
"str",
"(",
")",
"errstr",
"=",
"str",
"(",
")",
"out",
",",
"err",
"=",
"run_subprocess",
"(",
"systemcall",
")",
"outstr",
"+=",
"out",
"errstr",
"+=",
"err",
"out",
",",
"err",
"=",
"run_subprocess",
"(",
"fastqcreads",
")",
"outstr",
"+=",
"out",
"errstr",
"+=",
"err",
"# Acquire thread lock, and write the logs to file",
"threadlock",
".",
"acquire",
"(",
")",
"write_to_logfile",
"(",
"systemcall",
",",
"systemcall",
",",
"self",
".",
"logfile",
",",
"sample",
".",
"general",
".",
"logout",
",",
"sample",
".",
"general",
".",
"logerr",
",",
"None",
",",
"None",
")",
"write_to_logfile",
"(",
"fastqcreads",
",",
"fastqcreads",
",",
"self",
".",
"logfile",
",",
"sample",
".",
"general",
".",
"logout",
",",
"sample",
".",
"general",
".",
"logerr",
",",
"None",
",",
"None",
")",
"write_to_logfile",
"(",
"outstr",
",",
"errstr",
",",
"self",
".",
"logfile",
",",
"sample",
".",
"general",
".",
"logout",
",",
"sample",
".",
"general",
".",
"logerr",
",",
"None",
",",
"None",
")",
"threadlock",
".",
"release",
"(",
")",
"# Rename the outputs",
"try",
":",
"shutil",
".",
"move",
"(",
"os",
".",
"path",
".",
"join",
"(",
"outputdir",
",",
"'stdin_fastqc.html'",
")",
",",
"os",
".",
"path",
".",
"join",
"(",
"outputdir",
",",
"'{}_fastqc.html'",
".",
"format",
"(",
"sample",
".",
"name",
")",
")",
")",
"shutil",
".",
"move",
"(",
"os",
".",
"path",
".",
"join",
"(",
"outputdir",
",",
"'stdin_fastqc.zip'",
")",
",",
"os",
".",
"path",
".",
"join",
"(",
"outputdir",
",",
"'{}_fastqc.zip'",
".",
"format",
"(",
"sample",
".",
"name",
")",
")",
")",
"except",
"IOError",
":",
"pass",
"# Signal to qcqueue that job is done",
"self",
".",
"qcqueue",
".",
"task_done",
"(",
")"
]
| Run fastqc system calls | [
"Run",
"fastqc",
"system",
"calls"
]
| 88aa90ac85f84d0bbeb03e43c29b0a9d36e4ce2a | https://github.com/lowandrew/OLCTools/blob/88aa90ac85f84d0bbeb03e43c29b0a9d36e4ce2a/spadespipeline/quality.py#L228-L266 | train |
lowandrew/OLCTools | spadespipeline/quality.py | Quality.trimquality | def trimquality(self):
"""Uses bbduk from the bbmap tool suite to quality and adapter trim"""
logging.info("Trimming fastq files")
# Iterate through strains with fastq files
with progressbar(self.metadata) as bar:
for sample in bar:
# As the metadata can be populated with 'NA' (string) if there are no fastq files, only process if
# :fastqfiles is a list
if type(sample.general.fastqfiles) is list:
# Check to see if the fastq files exist
fastqfiles = sorted(sample.general.fastqfiles)
# Define the output directory
outputdir = sample.general.outputdirectory
# Define the name of the trimmed fastq files
cleanforward = os.path.join(outputdir, '{}_R1_trimmed.fastq.gz'.format(sample.name))
cleanreverse = os.path.join(outputdir, '{}_R2_trimmed.fastq.gz'.format(sample.name))
# Incorporate read length into the minlength parameter - set it to 50 unless one or more of the
# reads has a lower calculated length than 50
try:
lesser_length = min(int(sample.run.forwardlength), int(sample.run.reverselength))
except ValueError:
lesser_length = int(sample.run.forwardlength)
min_len = 50 if lesser_length >= 50 else lesser_length
# Initialise a variable to store the number of bases to automatically trim from the beginning of
# each read, as these bases tend to have lower quality scores. If trimming the reads will cause
trim_left = 0
# If, for some reason, only the reverse reads are present, use the appropriate output file name
try:
if 'R2' in fastqfiles[0]:
if not os.path.isfile(cleanreverse):
out, \
err, \
bbdukcall = bbtools.bbduk_trim(forward_in=fastqfiles[0],
reverse_in=None,
forward_out=cleanreverse,
trimq=10,
minlength=min_len,
forcetrimleft=trim_left,
returncmd=True)
else:
bbdukcall = str()
out = str()
err = str()
else:
if not os.path.isfile(cleanforward):
out, \
err, \
bbdukcall = bbtools.bbduk_trim(forward_in=fastqfiles[0],
forward_out=cleanforward,
trimq=10,
minlength=min_len,
forcetrimleft=trim_left,
returncmd=True)
else:
bbdukcall = str()
out = str()
err = str()
except (IndexError, CalledProcessError):
bbdukcall = str()
out = str()
err = str()
# Write the command, stdout, and stderr to the logfile
write_to_logfile(bbdukcall, bbdukcall, self.logfile, sample.general.logout, sample.general.logerr,
None, None)
write_to_logfile(out, err, self.logfile, sample.general.logout, sample.general.logerr, None, None)
# Add the trimmed fastq files to a list
trimmedfastqfiles = sorted(glob(os.path.join(sample.general.outputdirectory, '*trimmed.fastq.gz')))
# Populate the metadata if the files exist
sample.general.trimmedfastqfiles = trimmedfastqfiles if trimmedfastqfiles else list()
# Add all the trimmed files to the metadata
logging.info('Fastq files trimmed') | python | def trimquality(self):
"""Uses bbduk from the bbmap tool suite to quality and adapter trim"""
logging.info("Trimming fastq files")
# Iterate through strains with fastq files
with progressbar(self.metadata) as bar:
for sample in bar:
# As the metadata can be populated with 'NA' (string) if there are no fastq files, only process if
# :fastqfiles is a list
if type(sample.general.fastqfiles) is list:
# Check to see if the fastq files exist
fastqfiles = sorted(sample.general.fastqfiles)
# Define the output directory
outputdir = sample.general.outputdirectory
# Define the name of the trimmed fastq files
cleanforward = os.path.join(outputdir, '{}_R1_trimmed.fastq.gz'.format(sample.name))
cleanreverse = os.path.join(outputdir, '{}_R2_trimmed.fastq.gz'.format(sample.name))
# Incorporate read length into the minlength parameter - set it to 50 unless one or more of the
# reads has a lower calculated length than 50
try:
lesser_length = min(int(sample.run.forwardlength), int(sample.run.reverselength))
except ValueError:
lesser_length = int(sample.run.forwardlength)
min_len = 50 if lesser_length >= 50 else lesser_length
# Initialise a variable to store the number of bases to automatically trim from the beginning of
# each read, as these bases tend to have lower quality scores. If trimming the reads will cause
trim_left = 0
# If, for some reason, only the reverse reads are present, use the appropriate output file name
try:
if 'R2' in fastqfiles[0]:
if not os.path.isfile(cleanreverse):
out, \
err, \
bbdukcall = bbtools.bbduk_trim(forward_in=fastqfiles[0],
reverse_in=None,
forward_out=cleanreverse,
trimq=10,
minlength=min_len,
forcetrimleft=trim_left,
returncmd=True)
else:
bbdukcall = str()
out = str()
err = str()
else:
if not os.path.isfile(cleanforward):
out, \
err, \
bbdukcall = bbtools.bbduk_trim(forward_in=fastqfiles[0],
forward_out=cleanforward,
trimq=10,
minlength=min_len,
forcetrimleft=trim_left,
returncmd=True)
else:
bbdukcall = str()
out = str()
err = str()
except (IndexError, CalledProcessError):
bbdukcall = str()
out = str()
err = str()
# Write the command, stdout, and stderr to the logfile
write_to_logfile(bbdukcall, bbdukcall, self.logfile, sample.general.logout, sample.general.logerr,
None, None)
write_to_logfile(out, err, self.logfile, sample.general.logout, sample.general.logerr, None, None)
# Add the trimmed fastq files to a list
trimmedfastqfiles = sorted(glob(os.path.join(sample.general.outputdirectory, '*trimmed.fastq.gz')))
# Populate the metadata if the files exist
sample.general.trimmedfastqfiles = trimmedfastqfiles if trimmedfastqfiles else list()
# Add all the trimmed files to the metadata
logging.info('Fastq files trimmed') | [
"def",
"trimquality",
"(",
"self",
")",
":",
"logging",
".",
"info",
"(",
"\"Trimming fastq files\"",
")",
"# Iterate through strains with fastq files",
"with",
"progressbar",
"(",
"self",
".",
"metadata",
")",
"as",
"bar",
":",
"for",
"sample",
"in",
"bar",
":",
"# As the metadata can be populated with 'NA' (string) if there are no fastq files, only process if",
"# :fastqfiles is a list",
"if",
"type",
"(",
"sample",
".",
"general",
".",
"fastqfiles",
")",
"is",
"list",
":",
"# Check to see if the fastq files exist",
"fastqfiles",
"=",
"sorted",
"(",
"sample",
".",
"general",
".",
"fastqfiles",
")",
"# Define the output directory",
"outputdir",
"=",
"sample",
".",
"general",
".",
"outputdirectory",
"# Define the name of the trimmed fastq files",
"cleanforward",
"=",
"os",
".",
"path",
".",
"join",
"(",
"outputdir",
",",
"'{}_R1_trimmed.fastq.gz'",
".",
"format",
"(",
"sample",
".",
"name",
")",
")",
"cleanreverse",
"=",
"os",
".",
"path",
".",
"join",
"(",
"outputdir",
",",
"'{}_R2_trimmed.fastq.gz'",
".",
"format",
"(",
"sample",
".",
"name",
")",
")",
"# Incorporate read length into the minlength parameter - set it to 50 unless one or more of the",
"# reads has a lower calculated length than 50",
"try",
":",
"lesser_length",
"=",
"min",
"(",
"int",
"(",
"sample",
".",
"run",
".",
"forwardlength",
")",
",",
"int",
"(",
"sample",
".",
"run",
".",
"reverselength",
")",
")",
"except",
"ValueError",
":",
"lesser_length",
"=",
"int",
"(",
"sample",
".",
"run",
".",
"forwardlength",
")",
"min_len",
"=",
"50",
"if",
"lesser_length",
">=",
"50",
"else",
"lesser_length",
"# Initialise a variable to store the number of bases to automatically trim from the beginning of",
"# each read, as these bases tend to have lower quality scores. If trimming the reads will cause",
"trim_left",
"=",
"0",
"# If, for some reason, only the reverse reads are present, use the appropriate output file name",
"try",
":",
"if",
"'R2'",
"in",
"fastqfiles",
"[",
"0",
"]",
":",
"if",
"not",
"os",
".",
"path",
".",
"isfile",
"(",
"cleanreverse",
")",
":",
"out",
",",
"err",
",",
"bbdukcall",
"=",
"bbtools",
".",
"bbduk_trim",
"(",
"forward_in",
"=",
"fastqfiles",
"[",
"0",
"]",
",",
"reverse_in",
"=",
"None",
",",
"forward_out",
"=",
"cleanreverse",
",",
"trimq",
"=",
"10",
",",
"minlength",
"=",
"min_len",
",",
"forcetrimleft",
"=",
"trim_left",
",",
"returncmd",
"=",
"True",
")",
"else",
":",
"bbdukcall",
"=",
"str",
"(",
")",
"out",
"=",
"str",
"(",
")",
"err",
"=",
"str",
"(",
")",
"else",
":",
"if",
"not",
"os",
".",
"path",
".",
"isfile",
"(",
"cleanforward",
")",
":",
"out",
",",
"err",
",",
"bbdukcall",
"=",
"bbtools",
".",
"bbduk_trim",
"(",
"forward_in",
"=",
"fastqfiles",
"[",
"0",
"]",
",",
"forward_out",
"=",
"cleanforward",
",",
"trimq",
"=",
"10",
",",
"minlength",
"=",
"min_len",
",",
"forcetrimleft",
"=",
"trim_left",
",",
"returncmd",
"=",
"True",
")",
"else",
":",
"bbdukcall",
"=",
"str",
"(",
")",
"out",
"=",
"str",
"(",
")",
"err",
"=",
"str",
"(",
")",
"except",
"(",
"IndexError",
",",
"CalledProcessError",
")",
":",
"bbdukcall",
"=",
"str",
"(",
")",
"out",
"=",
"str",
"(",
")",
"err",
"=",
"str",
"(",
")",
"# Write the command, stdout, and stderr to the logfile",
"write_to_logfile",
"(",
"bbdukcall",
",",
"bbdukcall",
",",
"self",
".",
"logfile",
",",
"sample",
".",
"general",
".",
"logout",
",",
"sample",
".",
"general",
".",
"logerr",
",",
"None",
",",
"None",
")",
"write_to_logfile",
"(",
"out",
",",
"err",
",",
"self",
".",
"logfile",
",",
"sample",
".",
"general",
".",
"logout",
",",
"sample",
".",
"general",
".",
"logerr",
",",
"None",
",",
"None",
")",
"# Add the trimmed fastq files to a list",
"trimmedfastqfiles",
"=",
"sorted",
"(",
"glob",
"(",
"os",
".",
"path",
".",
"join",
"(",
"sample",
".",
"general",
".",
"outputdirectory",
",",
"'*trimmed.fastq.gz'",
")",
")",
")",
"# Populate the metadata if the files exist",
"sample",
".",
"general",
".",
"trimmedfastqfiles",
"=",
"trimmedfastqfiles",
"if",
"trimmedfastqfiles",
"else",
"list",
"(",
")",
"# Add all the trimmed files to the metadata",
"logging",
".",
"info",
"(",
"'Fastq files trimmed'",
")"
]
| Uses bbduk from the bbmap tool suite to quality and adapter trim | [
"Uses",
"bbduk",
"from",
"the",
"bbmap",
"tool",
"suite",
"to",
"quality",
"and",
"adapter",
"trim"
]
| 88aa90ac85f84d0bbeb03e43c29b0a9d36e4ce2a | https://github.com/lowandrew/OLCTools/blob/88aa90ac85f84d0bbeb03e43c29b0a9d36e4ce2a/spadespipeline/quality.py#L268-L338 | train |
lowandrew/OLCTools | spadespipeline/quality.py | Quality.contamination_finder | def contamination_finder(self, input_path=None, report_path=None):
"""
Helper function to get confindr integrated into the assembly pipeline
"""
logging.info('Calculating contamination in reads')
if input_path is not None:
input_dir = input_path
else:
input_dir = self.path
if report_path is not None:
reportpath = report_path
else:
reportpath = os.path.join(input_dir, 'confindr')
confindr_report = os.path.join(input_dir, 'confindr', 'confindr_report.csv')
pipeline_report = os.path.join(reportpath, 'confindr_report.csv')
# Only proceed if the confindr report doesn't exist
if not os.path.isfile(confindr_report):
# # Create an object to store attributes to pass to confinder
# Clear and recreate the output folder
try:
shutil.rmtree(reportpath)
except IOError:
pass
make_path(reportpath)
# Run confindr
systemcall = 'confindr.py -i {input_dir} -o {output_dir} -d {database_dir} -bf 0.05'\
.format(input_dir=input_dir,
output_dir=os.path.join(input_dir, 'confindr'),
database_dir=os.path.join(self.reffilepath, 'ConFindr', 'databases'))
# Run the call
out, err = run_subprocess(systemcall)
write_to_logfile(systemcall, systemcall, self.logfile, None, None, None, None)
write_to_logfile(out, err, self.logfile, None, None, None, None)
logging.info('Contamination detection complete!')
# Load the confindr report into a dictionary using pandas
# https://stackoverflow.com/questions/33620982/reading-csv-file-as-dictionary-using-pandas
confindr_results = pandas.read_csv(confindr_report, index_col=0).T.to_dict()
# Find the results for each of the samples
for sample in self.metadata:
# Create a GenObject to store the results
sample.confindr = GenObject()
# Iterate through the dictionary to find the outputs for each sample
for line in confindr_results:
# If the current line corresponds to the sample of interest
if sample.name in line:
# Set the values using the appropriate keys as the attributes
sample.confindr.genus = confindr_results[line]['Genus'] if type(confindr_results[line]['Genus']) \
is not float else 'ND'
sample.confindr.num_contaminated_snvs = confindr_results[line]['NumContamSNVs']
sample.confindr.contam_status = confindr_results[line]['ContamStatus']
# Don't break parsing previous ConFindr reports that lack the percent contamination calculations
try:
sample.confindr.percent_contam = confindr_results[line]['PercentContam'] if \
str(confindr_results[line]['PercentContam']) != 'nan' else 0
except KeyError:
sample.confindr.percent_contam = 'ND'
try:
sample.confindr.percent_contam_std = \
confindr_results[line]['PercentContamStandardDeviation'] if \
str(confindr_results[line]['PercentContamStandardDeviation']) != 'nan' else 0
except KeyError:
sample.confindr.percent_contam_std = 'ND'
if sample.confindr.contam_status is True:
sample.confindr.contam_status = 'Contaminated'
elif sample.confindr.contam_status is False:
sample.confindr.contam_status = 'Clean'
# Re-write the output to be consistent with the rest of the pipeline
with open(pipeline_report, 'w') as csv:
data = 'Strain,Genus,NumContamSNVs,ContamStatus,PercentContam,PercentContamSTD\n'
for sample in self.metadata:
data += '{str},{genus},{numcontamsnv},{status},{pc},{pcs}\n'.format(
str=sample.name,
genus=sample.confindr.genus,
numcontamsnv=sample.confindr.num_contaminated_snvs,
status=sample.confindr.contam_status,
pc=sample.confindr.percent_contam,
pcs=sample.confindr.percent_contam_std
)
csv.write(data) | python | def contamination_finder(self, input_path=None, report_path=None):
"""
Helper function to get confindr integrated into the assembly pipeline
"""
logging.info('Calculating contamination in reads')
if input_path is not None:
input_dir = input_path
else:
input_dir = self.path
if report_path is not None:
reportpath = report_path
else:
reportpath = os.path.join(input_dir, 'confindr')
confindr_report = os.path.join(input_dir, 'confindr', 'confindr_report.csv')
pipeline_report = os.path.join(reportpath, 'confindr_report.csv')
# Only proceed if the confindr report doesn't exist
if not os.path.isfile(confindr_report):
# # Create an object to store attributes to pass to confinder
# Clear and recreate the output folder
try:
shutil.rmtree(reportpath)
except IOError:
pass
make_path(reportpath)
# Run confindr
systemcall = 'confindr.py -i {input_dir} -o {output_dir} -d {database_dir} -bf 0.05'\
.format(input_dir=input_dir,
output_dir=os.path.join(input_dir, 'confindr'),
database_dir=os.path.join(self.reffilepath, 'ConFindr', 'databases'))
# Run the call
out, err = run_subprocess(systemcall)
write_to_logfile(systemcall, systemcall, self.logfile, None, None, None, None)
write_to_logfile(out, err, self.logfile, None, None, None, None)
logging.info('Contamination detection complete!')
# Load the confindr report into a dictionary using pandas
# https://stackoverflow.com/questions/33620982/reading-csv-file-as-dictionary-using-pandas
confindr_results = pandas.read_csv(confindr_report, index_col=0).T.to_dict()
# Find the results for each of the samples
for sample in self.metadata:
# Create a GenObject to store the results
sample.confindr = GenObject()
# Iterate through the dictionary to find the outputs for each sample
for line in confindr_results:
# If the current line corresponds to the sample of interest
if sample.name in line:
# Set the values using the appropriate keys as the attributes
sample.confindr.genus = confindr_results[line]['Genus'] if type(confindr_results[line]['Genus']) \
is not float else 'ND'
sample.confindr.num_contaminated_snvs = confindr_results[line]['NumContamSNVs']
sample.confindr.contam_status = confindr_results[line]['ContamStatus']
# Don't break parsing previous ConFindr reports that lack the percent contamination calculations
try:
sample.confindr.percent_contam = confindr_results[line]['PercentContam'] if \
str(confindr_results[line]['PercentContam']) != 'nan' else 0
except KeyError:
sample.confindr.percent_contam = 'ND'
try:
sample.confindr.percent_contam_std = \
confindr_results[line]['PercentContamStandardDeviation'] if \
str(confindr_results[line]['PercentContamStandardDeviation']) != 'nan' else 0
except KeyError:
sample.confindr.percent_contam_std = 'ND'
if sample.confindr.contam_status is True:
sample.confindr.contam_status = 'Contaminated'
elif sample.confindr.contam_status is False:
sample.confindr.contam_status = 'Clean'
# Re-write the output to be consistent with the rest of the pipeline
with open(pipeline_report, 'w') as csv:
data = 'Strain,Genus,NumContamSNVs,ContamStatus,PercentContam,PercentContamSTD\n'
for sample in self.metadata:
data += '{str},{genus},{numcontamsnv},{status},{pc},{pcs}\n'.format(
str=sample.name,
genus=sample.confindr.genus,
numcontamsnv=sample.confindr.num_contaminated_snvs,
status=sample.confindr.contam_status,
pc=sample.confindr.percent_contam,
pcs=sample.confindr.percent_contam_std
)
csv.write(data) | [
"def",
"contamination_finder",
"(",
"self",
",",
"input_path",
"=",
"None",
",",
"report_path",
"=",
"None",
")",
":",
"logging",
".",
"info",
"(",
"'Calculating contamination in reads'",
")",
"if",
"input_path",
"is",
"not",
"None",
":",
"input_dir",
"=",
"input_path",
"else",
":",
"input_dir",
"=",
"self",
".",
"path",
"if",
"report_path",
"is",
"not",
"None",
":",
"reportpath",
"=",
"report_path",
"else",
":",
"reportpath",
"=",
"os",
".",
"path",
".",
"join",
"(",
"input_dir",
",",
"'confindr'",
")",
"confindr_report",
"=",
"os",
".",
"path",
".",
"join",
"(",
"input_dir",
",",
"'confindr'",
",",
"'confindr_report.csv'",
")",
"pipeline_report",
"=",
"os",
".",
"path",
".",
"join",
"(",
"reportpath",
",",
"'confindr_report.csv'",
")",
"# Only proceed if the confindr report doesn't exist",
"if",
"not",
"os",
".",
"path",
".",
"isfile",
"(",
"confindr_report",
")",
":",
"# # Create an object to store attributes to pass to confinder",
"# Clear and recreate the output folder",
"try",
":",
"shutil",
".",
"rmtree",
"(",
"reportpath",
")",
"except",
"IOError",
":",
"pass",
"make_path",
"(",
"reportpath",
")",
"# Run confindr",
"systemcall",
"=",
"'confindr.py -i {input_dir} -o {output_dir} -d {database_dir} -bf 0.05'",
".",
"format",
"(",
"input_dir",
"=",
"input_dir",
",",
"output_dir",
"=",
"os",
".",
"path",
".",
"join",
"(",
"input_dir",
",",
"'confindr'",
")",
",",
"database_dir",
"=",
"os",
".",
"path",
".",
"join",
"(",
"self",
".",
"reffilepath",
",",
"'ConFindr'",
",",
"'databases'",
")",
")",
"# Run the call",
"out",
",",
"err",
"=",
"run_subprocess",
"(",
"systemcall",
")",
"write_to_logfile",
"(",
"systemcall",
",",
"systemcall",
",",
"self",
".",
"logfile",
",",
"None",
",",
"None",
",",
"None",
",",
"None",
")",
"write_to_logfile",
"(",
"out",
",",
"err",
",",
"self",
".",
"logfile",
",",
"None",
",",
"None",
",",
"None",
",",
"None",
")",
"logging",
".",
"info",
"(",
"'Contamination detection complete!'",
")",
"# Load the confindr report into a dictionary using pandas",
"# https://stackoverflow.com/questions/33620982/reading-csv-file-as-dictionary-using-pandas",
"confindr_results",
"=",
"pandas",
".",
"read_csv",
"(",
"confindr_report",
",",
"index_col",
"=",
"0",
")",
".",
"T",
".",
"to_dict",
"(",
")",
"# Find the results for each of the samples",
"for",
"sample",
"in",
"self",
".",
"metadata",
":",
"# Create a GenObject to store the results",
"sample",
".",
"confindr",
"=",
"GenObject",
"(",
")",
"# Iterate through the dictionary to find the outputs for each sample",
"for",
"line",
"in",
"confindr_results",
":",
"# If the current line corresponds to the sample of interest",
"if",
"sample",
".",
"name",
"in",
"line",
":",
"# Set the values using the appropriate keys as the attributes",
"sample",
".",
"confindr",
".",
"genus",
"=",
"confindr_results",
"[",
"line",
"]",
"[",
"'Genus'",
"]",
"if",
"type",
"(",
"confindr_results",
"[",
"line",
"]",
"[",
"'Genus'",
"]",
")",
"is",
"not",
"float",
"else",
"'ND'",
"sample",
".",
"confindr",
".",
"num_contaminated_snvs",
"=",
"confindr_results",
"[",
"line",
"]",
"[",
"'NumContamSNVs'",
"]",
"sample",
".",
"confindr",
".",
"contam_status",
"=",
"confindr_results",
"[",
"line",
"]",
"[",
"'ContamStatus'",
"]",
"# Don't break parsing previous ConFindr reports that lack the percent contamination calculations",
"try",
":",
"sample",
".",
"confindr",
".",
"percent_contam",
"=",
"confindr_results",
"[",
"line",
"]",
"[",
"'PercentContam'",
"]",
"if",
"str",
"(",
"confindr_results",
"[",
"line",
"]",
"[",
"'PercentContam'",
"]",
")",
"!=",
"'nan'",
"else",
"0",
"except",
"KeyError",
":",
"sample",
".",
"confindr",
".",
"percent_contam",
"=",
"'ND'",
"try",
":",
"sample",
".",
"confindr",
".",
"percent_contam_std",
"=",
"confindr_results",
"[",
"line",
"]",
"[",
"'PercentContamStandardDeviation'",
"]",
"if",
"str",
"(",
"confindr_results",
"[",
"line",
"]",
"[",
"'PercentContamStandardDeviation'",
"]",
")",
"!=",
"'nan'",
"else",
"0",
"except",
"KeyError",
":",
"sample",
".",
"confindr",
".",
"percent_contam_std",
"=",
"'ND'",
"if",
"sample",
".",
"confindr",
".",
"contam_status",
"is",
"True",
":",
"sample",
".",
"confindr",
".",
"contam_status",
"=",
"'Contaminated'",
"elif",
"sample",
".",
"confindr",
".",
"contam_status",
"is",
"False",
":",
"sample",
".",
"confindr",
".",
"contam_status",
"=",
"'Clean'",
"# Re-write the output to be consistent with the rest of the pipeline",
"with",
"open",
"(",
"pipeline_report",
",",
"'w'",
")",
"as",
"csv",
":",
"data",
"=",
"'Strain,Genus,NumContamSNVs,ContamStatus,PercentContam,PercentContamSTD\\n'",
"for",
"sample",
"in",
"self",
".",
"metadata",
":",
"data",
"+=",
"'{str},{genus},{numcontamsnv},{status},{pc},{pcs}\\n'",
".",
"format",
"(",
"str",
"=",
"sample",
".",
"name",
",",
"genus",
"=",
"sample",
".",
"confindr",
".",
"genus",
",",
"numcontamsnv",
"=",
"sample",
".",
"confindr",
".",
"num_contaminated_snvs",
",",
"status",
"=",
"sample",
".",
"confindr",
".",
"contam_status",
",",
"pc",
"=",
"sample",
".",
"confindr",
".",
"percent_contam",
",",
"pcs",
"=",
"sample",
".",
"confindr",
".",
"percent_contam_std",
")",
"csv",
".",
"write",
"(",
"data",
")"
]
| Helper function to get confindr integrated into the assembly pipeline | [
"Helper",
"function",
"to",
"get",
"confindr",
"integrated",
"into",
"the",
"assembly",
"pipeline"
]
| 88aa90ac85f84d0bbeb03e43c29b0a9d36e4ce2a | https://github.com/lowandrew/OLCTools/blob/88aa90ac85f84d0bbeb03e43c29b0a9d36e4ce2a/spadespipeline/quality.py#L340-L418 | train |
lowandrew/OLCTools | spadespipeline/quality.py | Quality.estimate_genome_size | def estimate_genome_size(self):
"""
Use kmercountexact from the bbmap suite of tools to estimate the size of the genome
"""
logging.info('Estimating genome size using kmercountexact')
for sample in self.metadata:
# Initialise the name of the output file
sample[self.analysistype].peaksfile = os.path.join(sample[self.analysistype].outputdir, 'peaks.txt')
# Run the kmer counting command
out, err, cmd = bbtools.kmercountexact(forward_in=sorted(sample.general.fastqfiles)[0],
peaks=sample[self.analysistype].peaksfile,
returncmd=True,
threads=self.cpus)
# Set the command in the object
sample[self.analysistype].kmercountexactcmd = cmd
# Extract the genome size from the peaks file
sample[self.analysistype].genomesize = bbtools.genome_size(sample[self.analysistype].peaksfile)
write_to_logfile(out, err, self.logfile, sample.general.logout, sample.general.logerr, None, None) | python | def estimate_genome_size(self):
"""
Use kmercountexact from the bbmap suite of tools to estimate the size of the genome
"""
logging.info('Estimating genome size using kmercountexact')
for sample in self.metadata:
# Initialise the name of the output file
sample[self.analysistype].peaksfile = os.path.join(sample[self.analysistype].outputdir, 'peaks.txt')
# Run the kmer counting command
out, err, cmd = bbtools.kmercountexact(forward_in=sorted(sample.general.fastqfiles)[0],
peaks=sample[self.analysistype].peaksfile,
returncmd=True,
threads=self.cpus)
# Set the command in the object
sample[self.analysistype].kmercountexactcmd = cmd
# Extract the genome size from the peaks file
sample[self.analysistype].genomesize = bbtools.genome_size(sample[self.analysistype].peaksfile)
write_to_logfile(out, err, self.logfile, sample.general.logout, sample.general.logerr, None, None) | [
"def",
"estimate_genome_size",
"(",
"self",
")",
":",
"logging",
".",
"info",
"(",
"'Estimating genome size using kmercountexact'",
")",
"for",
"sample",
"in",
"self",
".",
"metadata",
":",
"# Initialise the name of the output file",
"sample",
"[",
"self",
".",
"analysistype",
"]",
".",
"peaksfile",
"=",
"os",
".",
"path",
".",
"join",
"(",
"sample",
"[",
"self",
".",
"analysistype",
"]",
".",
"outputdir",
",",
"'peaks.txt'",
")",
"# Run the kmer counting command",
"out",
",",
"err",
",",
"cmd",
"=",
"bbtools",
".",
"kmercountexact",
"(",
"forward_in",
"=",
"sorted",
"(",
"sample",
".",
"general",
".",
"fastqfiles",
")",
"[",
"0",
"]",
",",
"peaks",
"=",
"sample",
"[",
"self",
".",
"analysistype",
"]",
".",
"peaksfile",
",",
"returncmd",
"=",
"True",
",",
"threads",
"=",
"self",
".",
"cpus",
")",
"# Set the command in the object",
"sample",
"[",
"self",
".",
"analysistype",
"]",
".",
"kmercountexactcmd",
"=",
"cmd",
"# Extract the genome size from the peaks file",
"sample",
"[",
"self",
".",
"analysistype",
"]",
".",
"genomesize",
"=",
"bbtools",
".",
"genome_size",
"(",
"sample",
"[",
"self",
".",
"analysistype",
"]",
".",
"peaksfile",
")",
"write_to_logfile",
"(",
"out",
",",
"err",
",",
"self",
".",
"logfile",
",",
"sample",
".",
"general",
".",
"logout",
",",
"sample",
".",
"general",
".",
"logerr",
",",
"None",
",",
"None",
")"
]
| Use kmercountexact from the bbmap suite of tools to estimate the size of the genome | [
"Use",
"kmercountexact",
"from",
"the",
"bbmap",
"suite",
"of",
"tools",
"to",
"estimate",
"the",
"size",
"of",
"the",
"genome"
]
| 88aa90ac85f84d0bbeb03e43c29b0a9d36e4ce2a | https://github.com/lowandrew/OLCTools/blob/88aa90ac85f84d0bbeb03e43c29b0a9d36e4ce2a/spadespipeline/quality.py#L420-L437 | train |
lowandrew/OLCTools | spadespipeline/quality.py | Quality.error_correction | def error_correction(self):
"""
Use tadpole from the bbmap suite of tools to perform error correction of the reads
"""
logging.info('Error correcting reads')
for sample in self.metadata:
sample.general.trimmedcorrectedfastqfiles = [fastq.split('.fastq.gz')[0] + '_trimmed_corrected.fastq.gz'
for fastq in sorted(sample.general.fastqfiles)]
try:
if not os.path.isfile(sample.general.trimmedcorrectedfastqfiles[0]):
try:
out, err, cmd = bbtools.tadpole(forward_in=sorted(sample.general.trimmedfastqfiles)[0],
forward_out=sample.general.trimmedcorrectedfastqfiles[0],
returncmd=True,
mode='correct',
threads=self.cpus)
# Set the command in the object
sample[self.analysistype].errorcorrectcmd = cmd
write_to_logfile(out=out,
err=err,
logfile=self.logfile,
samplelog=sample.general.logout,
sampleerr=sample.general.logerr,
analysislog=None,
analysiserr=None)
except IndexError:
sample.general.trimmedcorrectedfastqfiles = list()
except CalledProcessError:
sample.general.trimmedcorrectedfastqfiles = sample.general.trimmedfastqfiles
except AttributeError:
sample.general.trimmedcorrectedfastqfiles = list()
except IndexError:
sample.general.trimmedcorrectedfastqfiles = list() | python | def error_correction(self):
"""
Use tadpole from the bbmap suite of tools to perform error correction of the reads
"""
logging.info('Error correcting reads')
for sample in self.metadata:
sample.general.trimmedcorrectedfastqfiles = [fastq.split('.fastq.gz')[0] + '_trimmed_corrected.fastq.gz'
for fastq in sorted(sample.general.fastqfiles)]
try:
if not os.path.isfile(sample.general.trimmedcorrectedfastqfiles[0]):
try:
out, err, cmd = bbtools.tadpole(forward_in=sorted(sample.general.trimmedfastqfiles)[0],
forward_out=sample.general.trimmedcorrectedfastqfiles[0],
returncmd=True,
mode='correct',
threads=self.cpus)
# Set the command in the object
sample[self.analysistype].errorcorrectcmd = cmd
write_to_logfile(out=out,
err=err,
logfile=self.logfile,
samplelog=sample.general.logout,
sampleerr=sample.general.logerr,
analysislog=None,
analysiserr=None)
except IndexError:
sample.general.trimmedcorrectedfastqfiles = list()
except CalledProcessError:
sample.general.trimmedcorrectedfastqfiles = sample.general.trimmedfastqfiles
except AttributeError:
sample.general.trimmedcorrectedfastqfiles = list()
except IndexError:
sample.general.trimmedcorrectedfastqfiles = list() | [
"def",
"error_correction",
"(",
"self",
")",
":",
"logging",
".",
"info",
"(",
"'Error correcting reads'",
")",
"for",
"sample",
"in",
"self",
".",
"metadata",
":",
"sample",
".",
"general",
".",
"trimmedcorrectedfastqfiles",
"=",
"[",
"fastq",
".",
"split",
"(",
"'.fastq.gz'",
")",
"[",
"0",
"]",
"+",
"'_trimmed_corrected.fastq.gz'",
"for",
"fastq",
"in",
"sorted",
"(",
"sample",
".",
"general",
".",
"fastqfiles",
")",
"]",
"try",
":",
"if",
"not",
"os",
".",
"path",
".",
"isfile",
"(",
"sample",
".",
"general",
".",
"trimmedcorrectedfastqfiles",
"[",
"0",
"]",
")",
":",
"try",
":",
"out",
",",
"err",
",",
"cmd",
"=",
"bbtools",
".",
"tadpole",
"(",
"forward_in",
"=",
"sorted",
"(",
"sample",
".",
"general",
".",
"trimmedfastqfiles",
")",
"[",
"0",
"]",
",",
"forward_out",
"=",
"sample",
".",
"general",
".",
"trimmedcorrectedfastqfiles",
"[",
"0",
"]",
",",
"returncmd",
"=",
"True",
",",
"mode",
"=",
"'correct'",
",",
"threads",
"=",
"self",
".",
"cpus",
")",
"# Set the command in the object",
"sample",
"[",
"self",
".",
"analysistype",
"]",
".",
"errorcorrectcmd",
"=",
"cmd",
"write_to_logfile",
"(",
"out",
"=",
"out",
",",
"err",
"=",
"err",
",",
"logfile",
"=",
"self",
".",
"logfile",
",",
"samplelog",
"=",
"sample",
".",
"general",
".",
"logout",
",",
"sampleerr",
"=",
"sample",
".",
"general",
".",
"logerr",
",",
"analysislog",
"=",
"None",
",",
"analysiserr",
"=",
"None",
")",
"except",
"IndexError",
":",
"sample",
".",
"general",
".",
"trimmedcorrectedfastqfiles",
"=",
"list",
"(",
")",
"except",
"CalledProcessError",
":",
"sample",
".",
"general",
".",
"trimmedcorrectedfastqfiles",
"=",
"sample",
".",
"general",
".",
"trimmedfastqfiles",
"except",
"AttributeError",
":",
"sample",
".",
"general",
".",
"trimmedcorrectedfastqfiles",
"=",
"list",
"(",
")",
"except",
"IndexError",
":",
"sample",
".",
"general",
".",
"trimmedcorrectedfastqfiles",
"=",
"list",
"(",
")"
]
| Use tadpole from the bbmap suite of tools to perform error correction of the reads | [
"Use",
"tadpole",
"from",
"the",
"bbmap",
"suite",
"of",
"tools",
"to",
"perform",
"error",
"correction",
"of",
"the",
"reads"
]
| 88aa90ac85f84d0bbeb03e43c29b0a9d36e4ce2a | https://github.com/lowandrew/OLCTools/blob/88aa90ac85f84d0bbeb03e43c29b0a9d36e4ce2a/spadespipeline/quality.py#L439-L471 | train |
lowandrew/OLCTools | spadespipeline/quality.py | Quality.normalise_reads | def normalise_reads(self):
"""
Use bbnorm from the bbmap suite of tools to perform read normalisation
"""
logging.info('Normalising reads to a kmer depth of 100')
for sample in self.metadata:
# Set the name of the normalised read files
sample.general.normalisedreads = [fastq.split('.fastq.gz')[0] + '_normalised.fastq.gz'
for fastq in sorted(sample.general.fastqfiles)]
try:
# Run the normalisation command
out, err, cmd = bbtools.bbnorm(forward_in=sorted(sample.general.trimmedcorrectedfastqfiles)[0],
forward_out=sample.general.normalisedreads[0],
returncmd=True,
threads=self.cpus)
sample[self.analysistype].normalisecmd = cmd
write_to_logfile(out, err, self.logfile, sample.general.logout, sample.general.logerr, None, None)
except CalledProcessError:
sample.general.normalisedreads = sample.general.trimmedfastqfiles
except IndexError:
sample.general.normalisedreads = list() | python | def normalise_reads(self):
"""
Use bbnorm from the bbmap suite of tools to perform read normalisation
"""
logging.info('Normalising reads to a kmer depth of 100')
for sample in self.metadata:
# Set the name of the normalised read files
sample.general.normalisedreads = [fastq.split('.fastq.gz')[0] + '_normalised.fastq.gz'
for fastq in sorted(sample.general.fastqfiles)]
try:
# Run the normalisation command
out, err, cmd = bbtools.bbnorm(forward_in=sorted(sample.general.trimmedcorrectedfastqfiles)[0],
forward_out=sample.general.normalisedreads[0],
returncmd=True,
threads=self.cpus)
sample[self.analysistype].normalisecmd = cmd
write_to_logfile(out, err, self.logfile, sample.general.logout, sample.general.logerr, None, None)
except CalledProcessError:
sample.general.normalisedreads = sample.general.trimmedfastqfiles
except IndexError:
sample.general.normalisedreads = list() | [
"def",
"normalise_reads",
"(",
"self",
")",
":",
"logging",
".",
"info",
"(",
"'Normalising reads to a kmer depth of 100'",
")",
"for",
"sample",
"in",
"self",
".",
"metadata",
":",
"# Set the name of the normalised read files",
"sample",
".",
"general",
".",
"normalisedreads",
"=",
"[",
"fastq",
".",
"split",
"(",
"'.fastq.gz'",
")",
"[",
"0",
"]",
"+",
"'_normalised.fastq.gz'",
"for",
"fastq",
"in",
"sorted",
"(",
"sample",
".",
"general",
".",
"fastqfiles",
")",
"]",
"try",
":",
"# Run the normalisation command",
"out",
",",
"err",
",",
"cmd",
"=",
"bbtools",
".",
"bbnorm",
"(",
"forward_in",
"=",
"sorted",
"(",
"sample",
".",
"general",
".",
"trimmedcorrectedfastqfiles",
")",
"[",
"0",
"]",
",",
"forward_out",
"=",
"sample",
".",
"general",
".",
"normalisedreads",
"[",
"0",
"]",
",",
"returncmd",
"=",
"True",
",",
"threads",
"=",
"self",
".",
"cpus",
")",
"sample",
"[",
"self",
".",
"analysistype",
"]",
".",
"normalisecmd",
"=",
"cmd",
"write_to_logfile",
"(",
"out",
",",
"err",
",",
"self",
".",
"logfile",
",",
"sample",
".",
"general",
".",
"logout",
",",
"sample",
".",
"general",
".",
"logerr",
",",
"None",
",",
"None",
")",
"except",
"CalledProcessError",
":",
"sample",
".",
"general",
".",
"normalisedreads",
"=",
"sample",
".",
"general",
".",
"trimmedfastqfiles",
"except",
"IndexError",
":",
"sample",
".",
"general",
".",
"normalisedreads",
"=",
"list",
"(",
")"
]
| Use bbnorm from the bbmap suite of tools to perform read normalisation | [
"Use",
"bbnorm",
"from",
"the",
"bbmap",
"suite",
"of",
"tools",
"to",
"perform",
"read",
"normalisation"
]
| 88aa90ac85f84d0bbeb03e43c29b0a9d36e4ce2a | https://github.com/lowandrew/OLCTools/blob/88aa90ac85f84d0bbeb03e43c29b0a9d36e4ce2a/spadespipeline/quality.py#L473-L493 | train |
lowandrew/OLCTools | spadespipeline/quality.py | Quality.merge_pairs | def merge_pairs(self):
"""
Use bbmerge from the bbmap suite of tools to merge paired-end reads
"""
logging.info('Merging paired reads')
for sample in self.metadata:
# Can only merge paired-end
if len(sample.general.fastqfiles) == 2:
# Set the name of the merged, and unmerged files
sample.general.mergedreads = \
os.path.join(sample.general.outputdirectory, '{}_paired.fastq.gz'.format(sample.name))
sample.general.unmergedforward = \
os.path.join(sample.general.outputdirectory, '{}_unpaired_R1.fastq.gz'.format(sample.name))
sample.general.unmergedreverse = \
os.path.join(sample.general.outputdirectory, '{}_unpaired_R2.fastq.gz'.format(sample.name))
try:
# Run the merging command - forward_in=sample.general.normalisedreads[0],
out, err, cmd = bbtools.bbmerge(forward_in=sorted(sample.general.trimmedcorrectedfastqfiles)[0],
merged_reads=sample.general.mergedreads,
returncmd=True,
outu1=sample.general.unmergedforward,
outu2=sample.general.unmergedreverse,
threads=self.cpus)
sample[self.analysistype].bbmergecmd = cmd
write_to_logfile(out, err, self.logfile, sample.general.logout, sample.general.logerr, None, None)
except CalledProcessError:
delattr(sample.general, 'mergedreads')
delattr(sample.general, 'unmergedforward')
delattr(sample.general, 'unmergedreverse')
except IndexError:
delattr(sample.general, 'mergedreads')
delattr(sample.general, 'unmergedforward')
delattr(sample.general, 'unmergedreverse')
else:
sample.general.mergedreads = sorted(sample.general.trimmedcorrectedfastqfiles)[0] | python | def merge_pairs(self):
"""
Use bbmerge from the bbmap suite of tools to merge paired-end reads
"""
logging.info('Merging paired reads')
for sample in self.metadata:
# Can only merge paired-end
if len(sample.general.fastqfiles) == 2:
# Set the name of the merged, and unmerged files
sample.general.mergedreads = \
os.path.join(sample.general.outputdirectory, '{}_paired.fastq.gz'.format(sample.name))
sample.general.unmergedforward = \
os.path.join(sample.general.outputdirectory, '{}_unpaired_R1.fastq.gz'.format(sample.name))
sample.general.unmergedreverse = \
os.path.join(sample.general.outputdirectory, '{}_unpaired_R2.fastq.gz'.format(sample.name))
try:
# Run the merging command - forward_in=sample.general.normalisedreads[0],
out, err, cmd = bbtools.bbmerge(forward_in=sorted(sample.general.trimmedcorrectedfastqfiles)[0],
merged_reads=sample.general.mergedreads,
returncmd=True,
outu1=sample.general.unmergedforward,
outu2=sample.general.unmergedreverse,
threads=self.cpus)
sample[self.analysistype].bbmergecmd = cmd
write_to_logfile(out, err, self.logfile, sample.general.logout, sample.general.logerr, None, None)
except CalledProcessError:
delattr(sample.general, 'mergedreads')
delattr(sample.general, 'unmergedforward')
delattr(sample.general, 'unmergedreverse')
except IndexError:
delattr(sample.general, 'mergedreads')
delattr(sample.general, 'unmergedforward')
delattr(sample.general, 'unmergedreverse')
else:
sample.general.mergedreads = sorted(sample.general.trimmedcorrectedfastqfiles)[0] | [
"def",
"merge_pairs",
"(",
"self",
")",
":",
"logging",
".",
"info",
"(",
"'Merging paired reads'",
")",
"for",
"sample",
"in",
"self",
".",
"metadata",
":",
"# Can only merge paired-end",
"if",
"len",
"(",
"sample",
".",
"general",
".",
"fastqfiles",
")",
"==",
"2",
":",
"# Set the name of the merged, and unmerged files",
"sample",
".",
"general",
".",
"mergedreads",
"=",
"os",
".",
"path",
".",
"join",
"(",
"sample",
".",
"general",
".",
"outputdirectory",
",",
"'{}_paired.fastq.gz'",
".",
"format",
"(",
"sample",
".",
"name",
")",
")",
"sample",
".",
"general",
".",
"unmergedforward",
"=",
"os",
".",
"path",
".",
"join",
"(",
"sample",
".",
"general",
".",
"outputdirectory",
",",
"'{}_unpaired_R1.fastq.gz'",
".",
"format",
"(",
"sample",
".",
"name",
")",
")",
"sample",
".",
"general",
".",
"unmergedreverse",
"=",
"os",
".",
"path",
".",
"join",
"(",
"sample",
".",
"general",
".",
"outputdirectory",
",",
"'{}_unpaired_R2.fastq.gz'",
".",
"format",
"(",
"sample",
".",
"name",
")",
")",
"try",
":",
"# Run the merging command - forward_in=sample.general.normalisedreads[0],",
"out",
",",
"err",
",",
"cmd",
"=",
"bbtools",
".",
"bbmerge",
"(",
"forward_in",
"=",
"sorted",
"(",
"sample",
".",
"general",
".",
"trimmedcorrectedfastqfiles",
")",
"[",
"0",
"]",
",",
"merged_reads",
"=",
"sample",
".",
"general",
".",
"mergedreads",
",",
"returncmd",
"=",
"True",
",",
"outu1",
"=",
"sample",
".",
"general",
".",
"unmergedforward",
",",
"outu2",
"=",
"sample",
".",
"general",
".",
"unmergedreverse",
",",
"threads",
"=",
"self",
".",
"cpus",
")",
"sample",
"[",
"self",
".",
"analysistype",
"]",
".",
"bbmergecmd",
"=",
"cmd",
"write_to_logfile",
"(",
"out",
",",
"err",
",",
"self",
".",
"logfile",
",",
"sample",
".",
"general",
".",
"logout",
",",
"sample",
".",
"general",
".",
"logerr",
",",
"None",
",",
"None",
")",
"except",
"CalledProcessError",
":",
"delattr",
"(",
"sample",
".",
"general",
",",
"'mergedreads'",
")",
"delattr",
"(",
"sample",
".",
"general",
",",
"'unmergedforward'",
")",
"delattr",
"(",
"sample",
".",
"general",
",",
"'unmergedreverse'",
")",
"except",
"IndexError",
":",
"delattr",
"(",
"sample",
".",
"general",
",",
"'mergedreads'",
")",
"delattr",
"(",
"sample",
".",
"general",
",",
"'unmergedforward'",
")",
"delattr",
"(",
"sample",
".",
"general",
",",
"'unmergedreverse'",
")",
"else",
":",
"sample",
".",
"general",
".",
"mergedreads",
"=",
"sorted",
"(",
"sample",
".",
"general",
".",
"trimmedcorrectedfastqfiles",
")",
"[",
"0",
"]"
]
| Use bbmerge from the bbmap suite of tools to merge paired-end reads | [
"Use",
"bbmerge",
"from",
"the",
"bbmap",
"suite",
"of",
"tools",
"to",
"merge",
"paired",
"-",
"end",
"reads"
]
| 88aa90ac85f84d0bbeb03e43c29b0a9d36e4ce2a | https://github.com/lowandrew/OLCTools/blob/88aa90ac85f84d0bbeb03e43c29b0a9d36e4ce2a/spadespipeline/quality.py#L495-L529 | train |
lowandrew/OLCTools | spadespipeline/quality.py | QualityFeatures.main | def main(self):
"""
Run all the methods required for pipeline outputs
"""
self.fasta_records()
self.fasta_stats()
self.find_largest_contig()
self.find_genome_length()
self.find_num_contigs()
self.find_n50()
self.perform_pilon()
self.clear_attributes() | python | def main(self):
"""
Run all the methods required for pipeline outputs
"""
self.fasta_records()
self.fasta_stats()
self.find_largest_contig()
self.find_genome_length()
self.find_num_contigs()
self.find_n50()
self.perform_pilon()
self.clear_attributes() | [
"def",
"main",
"(",
"self",
")",
":",
"self",
".",
"fasta_records",
"(",
")",
"self",
".",
"fasta_stats",
"(",
")",
"self",
".",
"find_largest_contig",
"(",
")",
"self",
".",
"find_genome_length",
"(",
")",
"self",
".",
"find_num_contigs",
"(",
")",
"self",
".",
"find_n50",
"(",
")",
"self",
".",
"perform_pilon",
"(",
")",
"self",
".",
"clear_attributes",
"(",
")"
]
| Run all the methods required for pipeline outputs | [
"Run",
"all",
"the",
"methods",
"required",
"for",
"pipeline",
"outputs"
]
| 88aa90ac85f84d0bbeb03e43c29b0a9d36e4ce2a | https://github.com/lowandrew/OLCTools/blob/88aa90ac85f84d0bbeb03e43c29b0a9d36e4ce2a/spadespipeline/quality.py#L560-L571 | train |
lowandrew/OLCTools | spadespipeline/quality.py | QualityFeatures.fasta_records | def fasta_records(self):
"""
Use SeqIO to create dictionaries of all records for each FASTA file
"""
for sample in self.metadata:
# Create the analysis-type specific attribute
setattr(sample, self.analysistype, GenObject())
# Create a dictionary of records for each file
try:
record_dict = SeqIO.to_dict(SeqIO.parse(sample.general.bestassemblyfile, "fasta"))
except FileNotFoundError:
record_dict = dict()
# Set the records dictionary as the attribute for the object
sample[self.analysistype].record_dict = record_dict | python | def fasta_records(self):
"""
Use SeqIO to create dictionaries of all records for each FASTA file
"""
for sample in self.metadata:
# Create the analysis-type specific attribute
setattr(sample, self.analysistype, GenObject())
# Create a dictionary of records for each file
try:
record_dict = SeqIO.to_dict(SeqIO.parse(sample.general.bestassemblyfile, "fasta"))
except FileNotFoundError:
record_dict = dict()
# Set the records dictionary as the attribute for the object
sample[self.analysistype].record_dict = record_dict | [
"def",
"fasta_records",
"(",
"self",
")",
":",
"for",
"sample",
"in",
"self",
".",
"metadata",
":",
"# Create the analysis-type specific attribute",
"setattr",
"(",
"sample",
",",
"self",
".",
"analysistype",
",",
"GenObject",
"(",
")",
")",
"# Create a dictionary of records for each file",
"try",
":",
"record_dict",
"=",
"SeqIO",
".",
"to_dict",
"(",
"SeqIO",
".",
"parse",
"(",
"sample",
".",
"general",
".",
"bestassemblyfile",
",",
"\"fasta\"",
")",
")",
"except",
"FileNotFoundError",
":",
"record_dict",
"=",
"dict",
"(",
")",
"# Set the records dictionary as the attribute for the object",
"sample",
"[",
"self",
".",
"analysistype",
"]",
".",
"record_dict",
"=",
"record_dict"
]
| Use SeqIO to create dictionaries of all records for each FASTA file | [
"Use",
"SeqIO",
"to",
"create",
"dictionaries",
"of",
"all",
"records",
"for",
"each",
"FASTA",
"file"
]
| 88aa90ac85f84d0bbeb03e43c29b0a9d36e4ce2a | https://github.com/lowandrew/OLCTools/blob/88aa90ac85f84d0bbeb03e43c29b0a9d36e4ce2a/spadespipeline/quality.py#L573-L586 | train |
lowandrew/OLCTools | spadespipeline/quality.py | QualityFeatures.fasta_stats | def fasta_stats(self):
"""
Parse the lengths of all contigs for each sample, as well as the total GC%
"""
for sample in self.metadata:
# Initialise variables to store appropriate values parsed from contig records
contig_lengths = list()
fasta_sequence = str()
for contig, record in sample[self.analysistype].record_dict.items():
# Append the length of the contig to the list
contig_lengths.append(len(record.seq))
# Add the contig sequence to the string
fasta_sequence += record.seq
# Set the reverse sorted (e.g. largest to smallest) list of contig sizes as the value
sample[self.analysistype].contig_lengths = sorted(contig_lengths, reverse=True)
try:
# Calculate the GC% of the total genome sequence using GC - format to have two decimal places
sample[self.analysistype].gc = float('{:0.2f}'.format(GC(fasta_sequence)))
except TypeError:
sample[self.analysistype].gc = 'NA' | python | def fasta_stats(self):
"""
Parse the lengths of all contigs for each sample, as well as the total GC%
"""
for sample in self.metadata:
# Initialise variables to store appropriate values parsed from contig records
contig_lengths = list()
fasta_sequence = str()
for contig, record in sample[self.analysistype].record_dict.items():
# Append the length of the contig to the list
contig_lengths.append(len(record.seq))
# Add the contig sequence to the string
fasta_sequence += record.seq
# Set the reverse sorted (e.g. largest to smallest) list of contig sizes as the value
sample[self.analysistype].contig_lengths = sorted(contig_lengths, reverse=True)
try:
# Calculate the GC% of the total genome sequence using GC - format to have two decimal places
sample[self.analysistype].gc = float('{:0.2f}'.format(GC(fasta_sequence)))
except TypeError:
sample[self.analysistype].gc = 'NA' | [
"def",
"fasta_stats",
"(",
"self",
")",
":",
"for",
"sample",
"in",
"self",
".",
"metadata",
":",
"# Initialise variables to store appropriate values parsed from contig records",
"contig_lengths",
"=",
"list",
"(",
")",
"fasta_sequence",
"=",
"str",
"(",
")",
"for",
"contig",
",",
"record",
"in",
"sample",
"[",
"self",
".",
"analysistype",
"]",
".",
"record_dict",
".",
"items",
"(",
")",
":",
"# Append the length of the contig to the list",
"contig_lengths",
".",
"append",
"(",
"len",
"(",
"record",
".",
"seq",
")",
")",
"# Add the contig sequence to the string",
"fasta_sequence",
"+=",
"record",
".",
"seq",
"# Set the reverse sorted (e.g. largest to smallest) list of contig sizes as the value",
"sample",
"[",
"self",
".",
"analysistype",
"]",
".",
"contig_lengths",
"=",
"sorted",
"(",
"contig_lengths",
",",
"reverse",
"=",
"True",
")",
"try",
":",
"# Calculate the GC% of the total genome sequence using GC - format to have two decimal places",
"sample",
"[",
"self",
".",
"analysistype",
"]",
".",
"gc",
"=",
"float",
"(",
"'{:0.2f}'",
".",
"format",
"(",
"GC",
"(",
"fasta_sequence",
")",
")",
")",
"except",
"TypeError",
":",
"sample",
"[",
"self",
".",
"analysistype",
"]",
".",
"gc",
"=",
"'NA'"
]
| Parse the lengths of all contigs for each sample, as well as the total GC% | [
"Parse",
"the",
"lengths",
"of",
"all",
"contigs",
"for",
"each",
"sample",
"as",
"well",
"as",
"the",
"total",
"GC%"
]
| 88aa90ac85f84d0bbeb03e43c29b0a9d36e4ce2a | https://github.com/lowandrew/OLCTools/blob/88aa90ac85f84d0bbeb03e43c29b0a9d36e4ce2a/spadespipeline/quality.py#L588-L607 | train |
lowandrew/OLCTools | spadespipeline/quality.py | QualityFeatures.find_largest_contig | def find_largest_contig(self):
"""
Determine the largest contig for each strain
"""
# for file_name, contig_lengths in contig_lengths_dict.items():
for sample in self.metadata:
# As the list is sorted in descending order, the largest contig is the first entry in the list
sample[self.analysistype].longest_contig = sample[self.analysistype].contig_lengths | python | def find_largest_contig(self):
"""
Determine the largest contig for each strain
"""
# for file_name, contig_lengths in contig_lengths_dict.items():
for sample in self.metadata:
# As the list is sorted in descending order, the largest contig is the first entry in the list
sample[self.analysistype].longest_contig = sample[self.analysistype].contig_lengths | [
"def",
"find_largest_contig",
"(",
"self",
")",
":",
"# for file_name, contig_lengths in contig_lengths_dict.items():",
"for",
"sample",
"in",
"self",
".",
"metadata",
":",
"# As the list is sorted in descending order, the largest contig is the first entry in the list",
"sample",
"[",
"self",
".",
"analysistype",
"]",
".",
"longest_contig",
"=",
"sample",
"[",
"self",
".",
"analysistype",
"]",
".",
"contig_lengths"
]
| Determine the largest contig for each strain | [
"Determine",
"the",
"largest",
"contig",
"for",
"each",
"strain"
]
| 88aa90ac85f84d0bbeb03e43c29b0a9d36e4ce2a | https://github.com/lowandrew/OLCTools/blob/88aa90ac85f84d0bbeb03e43c29b0a9d36e4ce2a/spadespipeline/quality.py#L609-L616 | train |
lowandrew/OLCTools | spadespipeline/quality.py | QualityFeatures.find_genome_length | def find_genome_length(self):
"""
Determine the total length of all the contigs for each strain
"""
for sample in self.metadata:
# Use the sum() method to add all the contig lengths in the list
sample[self.analysistype].genome_length = sum(sample[self.analysistype].contig_lengths) | python | def find_genome_length(self):
"""
Determine the total length of all the contigs for each strain
"""
for sample in self.metadata:
# Use the sum() method to add all the contig lengths in the list
sample[self.analysistype].genome_length = sum(sample[self.analysistype].contig_lengths) | [
"def",
"find_genome_length",
"(",
"self",
")",
":",
"for",
"sample",
"in",
"self",
".",
"metadata",
":",
"# Use the sum() method to add all the contig lengths in the list",
"sample",
"[",
"self",
".",
"analysistype",
"]",
".",
"genome_length",
"=",
"sum",
"(",
"sample",
"[",
"self",
".",
"analysistype",
"]",
".",
"contig_lengths",
")"
]
| Determine the total length of all the contigs for each strain | [
"Determine",
"the",
"total",
"length",
"of",
"all",
"the",
"contigs",
"for",
"each",
"strain"
]
| 88aa90ac85f84d0bbeb03e43c29b0a9d36e4ce2a | https://github.com/lowandrew/OLCTools/blob/88aa90ac85f84d0bbeb03e43c29b0a9d36e4ce2a/spadespipeline/quality.py#L618-L624 | train |
lowandrew/OLCTools | spadespipeline/quality.py | QualityFeatures.find_num_contigs | def find_num_contigs(self):
"""
Count the total number of contigs for each strain
"""
for sample in self.metadata:
# Use the len() method to count the number of entries in the list
sample[self.analysistype].num_contigs = len(sample[self.analysistype].contig_lengths) | python | def find_num_contigs(self):
"""
Count the total number of contigs for each strain
"""
for sample in self.metadata:
# Use the len() method to count the number of entries in the list
sample[self.analysistype].num_contigs = len(sample[self.analysistype].contig_lengths) | [
"def",
"find_num_contigs",
"(",
"self",
")",
":",
"for",
"sample",
"in",
"self",
".",
"metadata",
":",
"# Use the len() method to count the number of entries in the list",
"sample",
"[",
"self",
".",
"analysistype",
"]",
".",
"num_contigs",
"=",
"len",
"(",
"sample",
"[",
"self",
".",
"analysistype",
"]",
".",
"contig_lengths",
")"
]
| Count the total number of contigs for each strain | [
"Count",
"the",
"total",
"number",
"of",
"contigs",
"for",
"each",
"strain"
]
| 88aa90ac85f84d0bbeb03e43c29b0a9d36e4ce2a | https://github.com/lowandrew/OLCTools/blob/88aa90ac85f84d0bbeb03e43c29b0a9d36e4ce2a/spadespipeline/quality.py#L626-L632 | train |
lowandrew/OLCTools | spadespipeline/quality.py | QualityFeatures.find_n50 | def find_n50(self):
"""
Calculate the N50 for each strain. N50 is defined as the largest contig such that at least half of the total
genome size is contained in contigs equal to or larger than this contig
"""
for sample in self.metadata:
# Initialise the N50 attribute in case there is no assembly, and the attribute is not created in the loop
sample[self.analysistype].n50 = '-'
# Initialise a variable to store a running total of contig lengths
currentlength = 0
for contig_length in sample[self.analysistype].contig_lengths:
# Increment the current length with the length of the current contig
currentlength += contig_length
# If the current length is now greater than the total genome / 2, the current contig length is the N50
if currentlength >= sample[self.analysistype].genome_length * 0.5:
# Populate the dictionary, and break the loop
sample[self.analysistype].n50 = contig_length
break | python | def find_n50(self):
"""
Calculate the N50 for each strain. N50 is defined as the largest contig such that at least half of the total
genome size is contained in contigs equal to or larger than this contig
"""
for sample in self.metadata:
# Initialise the N50 attribute in case there is no assembly, and the attribute is not created in the loop
sample[self.analysistype].n50 = '-'
# Initialise a variable to store a running total of contig lengths
currentlength = 0
for contig_length in sample[self.analysistype].contig_lengths:
# Increment the current length with the length of the current contig
currentlength += contig_length
# If the current length is now greater than the total genome / 2, the current contig length is the N50
if currentlength >= sample[self.analysistype].genome_length * 0.5:
# Populate the dictionary, and break the loop
sample[self.analysistype].n50 = contig_length
break | [
"def",
"find_n50",
"(",
"self",
")",
":",
"for",
"sample",
"in",
"self",
".",
"metadata",
":",
"# Initialise the N50 attribute in case there is no assembly, and the attribute is not created in the loop",
"sample",
"[",
"self",
".",
"analysistype",
"]",
".",
"n50",
"=",
"'-'",
"# Initialise a variable to store a running total of contig lengths",
"currentlength",
"=",
"0",
"for",
"contig_length",
"in",
"sample",
"[",
"self",
".",
"analysistype",
"]",
".",
"contig_lengths",
":",
"# Increment the current length with the length of the current contig",
"currentlength",
"+=",
"contig_length",
"# If the current length is now greater than the total genome / 2, the current contig length is the N50",
"if",
"currentlength",
">=",
"sample",
"[",
"self",
".",
"analysistype",
"]",
".",
"genome_length",
"*",
"0.5",
":",
"# Populate the dictionary, and break the loop",
"sample",
"[",
"self",
".",
"analysistype",
"]",
".",
"n50",
"=",
"contig_length",
"break"
]
| Calculate the N50 for each strain. N50 is defined as the largest contig such that at least half of the total
genome size is contained in contigs equal to or larger than this contig | [
"Calculate",
"the",
"N50",
"for",
"each",
"strain",
".",
"N50",
"is",
"defined",
"as",
"the",
"largest",
"contig",
"such",
"that",
"at",
"least",
"half",
"of",
"the",
"total",
"genome",
"size",
"is",
"contained",
"in",
"contigs",
"equal",
"to",
"or",
"larger",
"than",
"this",
"contig"
]
| 88aa90ac85f84d0bbeb03e43c29b0a9d36e4ce2a | https://github.com/lowandrew/OLCTools/blob/88aa90ac85f84d0bbeb03e43c29b0a9d36e4ce2a/spadespipeline/quality.py#L634-L651 | train |
lowandrew/OLCTools | spadespipeline/quality.py | QualityFeatures.perform_pilon | def perform_pilon(self):
"""
Determine if pilon polishing should be attempted. Do not perform polishing if confindr determines that the
sample is contaminated or if there are > 500 contigs
"""
for sample in self.metadata:
try:
if sample[self.analysistype].num_contigs > 500 or sample.confindr.contam_status == 'Contaminated':
sample.general.polish = False
else:
sample.general.polish = True
except AttributeError:
sample.general.polish = True | python | def perform_pilon(self):
"""
Determine if pilon polishing should be attempted. Do not perform polishing if confindr determines that the
sample is contaminated or if there are > 500 contigs
"""
for sample in self.metadata:
try:
if sample[self.analysistype].num_contigs > 500 or sample.confindr.contam_status == 'Contaminated':
sample.general.polish = False
else:
sample.general.polish = True
except AttributeError:
sample.general.polish = True | [
"def",
"perform_pilon",
"(",
"self",
")",
":",
"for",
"sample",
"in",
"self",
".",
"metadata",
":",
"try",
":",
"if",
"sample",
"[",
"self",
".",
"analysistype",
"]",
".",
"num_contigs",
">",
"500",
"or",
"sample",
".",
"confindr",
".",
"contam_status",
"==",
"'Contaminated'",
":",
"sample",
".",
"general",
".",
"polish",
"=",
"False",
"else",
":",
"sample",
".",
"general",
".",
"polish",
"=",
"True",
"except",
"AttributeError",
":",
"sample",
".",
"general",
".",
"polish",
"=",
"True"
]
| Determine if pilon polishing should be attempted. Do not perform polishing if confindr determines that the
sample is contaminated or if there are > 500 contigs | [
"Determine",
"if",
"pilon",
"polishing",
"should",
"be",
"attempted",
".",
"Do",
"not",
"perform",
"polishing",
"if",
"confindr",
"determines",
"that",
"the",
"sample",
"is",
"contaminated",
"or",
"if",
"there",
"are",
">",
"500",
"contigs"
]
| 88aa90ac85f84d0bbeb03e43c29b0a9d36e4ce2a | https://github.com/lowandrew/OLCTools/blob/88aa90ac85f84d0bbeb03e43c29b0a9d36e4ce2a/spadespipeline/quality.py#L653-L665 | train |
lowandrew/OLCTools | spadespipeline/quality.py | QualityFeatures.clear_attributes | def clear_attributes(self):
"""
Remove the record_dict attribute from the object, as SeqRecords are not JSON-serializable. Also remove
the contig_lengths and longest_contig attributes, as they are large lists that make the .json file ugly
"""
for sample in self.metadata:
try:
delattr(sample[self.analysistype], 'record_dict')
delattr(sample[self.analysistype], 'contig_lengths')
delattr(sample[self.analysistype], 'longest_contig')
except AttributeError:
pass | python | def clear_attributes(self):
"""
Remove the record_dict attribute from the object, as SeqRecords are not JSON-serializable. Also remove
the contig_lengths and longest_contig attributes, as they are large lists that make the .json file ugly
"""
for sample in self.metadata:
try:
delattr(sample[self.analysistype], 'record_dict')
delattr(sample[self.analysistype], 'contig_lengths')
delattr(sample[self.analysistype], 'longest_contig')
except AttributeError:
pass | [
"def",
"clear_attributes",
"(",
"self",
")",
":",
"for",
"sample",
"in",
"self",
".",
"metadata",
":",
"try",
":",
"delattr",
"(",
"sample",
"[",
"self",
".",
"analysistype",
"]",
",",
"'record_dict'",
")",
"delattr",
"(",
"sample",
"[",
"self",
".",
"analysistype",
"]",
",",
"'contig_lengths'",
")",
"delattr",
"(",
"sample",
"[",
"self",
".",
"analysistype",
"]",
",",
"'longest_contig'",
")",
"except",
"AttributeError",
":",
"pass"
]
| Remove the record_dict attribute from the object, as SeqRecords are not JSON-serializable. Also remove
the contig_lengths and longest_contig attributes, as they are large lists that make the .json file ugly | [
"Remove",
"the",
"record_dict",
"attribute",
"from",
"the",
"object",
"as",
"SeqRecords",
"are",
"not",
"JSON",
"-",
"serializable",
".",
"Also",
"remove",
"the",
"contig_lengths",
"and",
"longest_contig",
"attributes",
"as",
"they",
"are",
"large",
"lists",
"that",
"make",
"the",
".",
"json",
"file",
"ugly"
]
| 88aa90ac85f84d0bbeb03e43c29b0a9d36e4ce2a | https://github.com/lowandrew/OLCTools/blob/88aa90ac85f84d0bbeb03e43c29b0a9d36e4ce2a/spadespipeline/quality.py#L667-L678 | train |
lowandrew/OLCTools | spadespipeline/quality.py | GenomeQAML.run_qaml | def run_qaml(self):
"""
Create and run the GenomeQAML system call
"""
logging.info('Running GenomeQAML quality assessment')
qaml_call = 'classify.py -t {tf} -r {rf}'\
.format(tf=self.qaml_path,
rf=self.qaml_report)
make_path(self.reportpath)
# Only attempt to assess assemblies if the report doesn't already exist
if not os.path.isfile(self.qaml_report):
# Run the system calls
out, err = run_subprocess(qaml_call)
# Acquire thread lock, and write the logs to file
self.threadlock.acquire()
write_to_logfile(qaml_call, qaml_call, self.logfile)
write_to_logfile(out, err, self.logfile)
self.threadlock.release() | python | def run_qaml(self):
"""
Create and run the GenomeQAML system call
"""
logging.info('Running GenomeQAML quality assessment')
qaml_call = 'classify.py -t {tf} -r {rf}'\
.format(tf=self.qaml_path,
rf=self.qaml_report)
make_path(self.reportpath)
# Only attempt to assess assemblies if the report doesn't already exist
if not os.path.isfile(self.qaml_report):
# Run the system calls
out, err = run_subprocess(qaml_call)
# Acquire thread lock, and write the logs to file
self.threadlock.acquire()
write_to_logfile(qaml_call, qaml_call, self.logfile)
write_to_logfile(out, err, self.logfile)
self.threadlock.release() | [
"def",
"run_qaml",
"(",
"self",
")",
":",
"logging",
".",
"info",
"(",
"'Running GenomeQAML quality assessment'",
")",
"qaml_call",
"=",
"'classify.py -t {tf} -r {rf}'",
".",
"format",
"(",
"tf",
"=",
"self",
".",
"qaml_path",
",",
"rf",
"=",
"self",
".",
"qaml_report",
")",
"make_path",
"(",
"self",
".",
"reportpath",
")",
"# Only attempt to assess assemblies if the report doesn't already exist",
"if",
"not",
"os",
".",
"path",
".",
"isfile",
"(",
"self",
".",
"qaml_report",
")",
":",
"# Run the system calls",
"out",
",",
"err",
"=",
"run_subprocess",
"(",
"qaml_call",
")",
"# Acquire thread lock, and write the logs to file",
"self",
".",
"threadlock",
".",
"acquire",
"(",
")",
"write_to_logfile",
"(",
"qaml_call",
",",
"qaml_call",
",",
"self",
".",
"logfile",
")",
"write_to_logfile",
"(",
"out",
",",
"err",
",",
"self",
".",
"logfile",
")",
"self",
".",
"threadlock",
".",
"release",
"(",
")"
]
| Create and run the GenomeQAML system call | [
"Create",
"and",
"run",
"the",
"GenomeQAML",
"system",
"call"
]
| 88aa90ac85f84d0bbeb03e43c29b0a9d36e4ce2a | https://github.com/lowandrew/OLCTools/blob/88aa90ac85f84d0bbeb03e43c29b0a9d36e4ce2a/spadespipeline/quality.py#L695-L712 | train |
lowandrew/OLCTools | spadespipeline/quality.py | GenomeQAML.parse_qaml | def parse_qaml(self):
"""
Parse the GenomeQAML report, and populate metadata objects
"""
logging.info('Parsing GenomeQAML outputs')
# A dictionary to store the parsed excel file in a more readable format
nesteddictionary = dict()
# Use pandas to read in the CSV file, and convert the pandas data frame to a dictionary (.to_dict())
dictionary = pandas.read_csv(self.qaml_report).to_dict()
# Iterate through the dictionary - each header from the CSV file
for header in dictionary:
# Sample is the primary key, and value is the value of the cell for that primary key + header combination
for sample, value in dictionary[header].items():
# Update the dictionary with the new data
try:
nesteddictionary[sample].update({header: value})
# Create the nested dictionary if it hasn't been created yet
except KeyError:
nesteddictionary[sample] = dict()
nesteddictionary[sample].update({header: value})
# Get the results into the metadata object
for sample in self.metadata:
# Initialise the plasmid extractor genobject
setattr(sample, self.analysistype, GenObject())
# Initialise the list of all plasmids
sample[self.analysistype].prediction = str()
# Iterate through the dictionary of results
for line in nesteddictionary:
# Extract the sample name from the dictionary
name = nesteddictionary[line]['Sample']
# Ensure that the names match
if name == sample.name:
# Append the plasmid name extracted from the dictionary to the list of plasmids
sample[self.analysistype].prediction = nesteddictionary[line]['Predicted_Class'] | python | def parse_qaml(self):
"""
Parse the GenomeQAML report, and populate metadata objects
"""
logging.info('Parsing GenomeQAML outputs')
# A dictionary to store the parsed excel file in a more readable format
nesteddictionary = dict()
# Use pandas to read in the CSV file, and convert the pandas data frame to a dictionary (.to_dict())
dictionary = pandas.read_csv(self.qaml_report).to_dict()
# Iterate through the dictionary - each header from the CSV file
for header in dictionary:
# Sample is the primary key, and value is the value of the cell for that primary key + header combination
for sample, value in dictionary[header].items():
# Update the dictionary with the new data
try:
nesteddictionary[sample].update({header: value})
# Create the nested dictionary if it hasn't been created yet
except KeyError:
nesteddictionary[sample] = dict()
nesteddictionary[sample].update({header: value})
# Get the results into the metadata object
for sample in self.metadata:
# Initialise the plasmid extractor genobject
setattr(sample, self.analysistype, GenObject())
# Initialise the list of all plasmids
sample[self.analysistype].prediction = str()
# Iterate through the dictionary of results
for line in nesteddictionary:
# Extract the sample name from the dictionary
name = nesteddictionary[line]['Sample']
# Ensure that the names match
if name == sample.name:
# Append the plasmid name extracted from the dictionary to the list of plasmids
sample[self.analysistype].prediction = nesteddictionary[line]['Predicted_Class'] | [
"def",
"parse_qaml",
"(",
"self",
")",
":",
"logging",
".",
"info",
"(",
"'Parsing GenomeQAML outputs'",
")",
"# A dictionary to store the parsed excel file in a more readable format",
"nesteddictionary",
"=",
"dict",
"(",
")",
"# Use pandas to read in the CSV file, and convert the pandas data frame to a dictionary (.to_dict())",
"dictionary",
"=",
"pandas",
".",
"read_csv",
"(",
"self",
".",
"qaml_report",
")",
".",
"to_dict",
"(",
")",
"# Iterate through the dictionary - each header from the CSV file",
"for",
"header",
"in",
"dictionary",
":",
"# Sample is the primary key, and value is the value of the cell for that primary key + header combination",
"for",
"sample",
",",
"value",
"in",
"dictionary",
"[",
"header",
"]",
".",
"items",
"(",
")",
":",
"# Update the dictionary with the new data",
"try",
":",
"nesteddictionary",
"[",
"sample",
"]",
".",
"update",
"(",
"{",
"header",
":",
"value",
"}",
")",
"# Create the nested dictionary if it hasn't been created yet",
"except",
"KeyError",
":",
"nesteddictionary",
"[",
"sample",
"]",
"=",
"dict",
"(",
")",
"nesteddictionary",
"[",
"sample",
"]",
".",
"update",
"(",
"{",
"header",
":",
"value",
"}",
")",
"# Get the results into the metadata object",
"for",
"sample",
"in",
"self",
".",
"metadata",
":",
"# Initialise the plasmid extractor genobject",
"setattr",
"(",
"sample",
",",
"self",
".",
"analysistype",
",",
"GenObject",
"(",
")",
")",
"# Initialise the list of all plasmids",
"sample",
"[",
"self",
".",
"analysistype",
"]",
".",
"prediction",
"=",
"str",
"(",
")",
"# Iterate through the dictionary of results",
"for",
"line",
"in",
"nesteddictionary",
":",
"# Extract the sample name from the dictionary",
"name",
"=",
"nesteddictionary",
"[",
"line",
"]",
"[",
"'Sample'",
"]",
"# Ensure that the names match",
"if",
"name",
"==",
"sample",
".",
"name",
":",
"# Append the plasmid name extracted from the dictionary to the list of plasmids",
"sample",
"[",
"self",
".",
"analysistype",
"]",
".",
"prediction",
"=",
"nesteddictionary",
"[",
"line",
"]",
"[",
"'Predicted_Class'",
"]"
]
| Parse the GenomeQAML report, and populate metadata objects | [
"Parse",
"the",
"GenomeQAML",
"report",
"and",
"populate",
"metadata",
"objects"
]
| 88aa90ac85f84d0bbeb03e43c29b0a9d36e4ce2a | https://github.com/lowandrew/OLCTools/blob/88aa90ac85f84d0bbeb03e43c29b0a9d36e4ce2a/spadespipeline/quality.py#L714-L747 | train |
JukeboxPipeline/jukeboxmaya | src/jukeboxmaya/addons/mayagenesis/mayagenesis.py | MayaGenesis.init | def init(self, ):
"""Initialize the plugin. Do nothing.
This function gets called when the plugin is loaded by the plugin manager.
:returns:
:rtype:
:raises:
"""
self.gw = None
pm = MayaPluginManager.get()
genesis = pm.get_plugin("Genesis")
self.GenesisWin = self.subclass_genesis(genesis.GenesisWin) | python | def init(self, ):
"""Initialize the plugin. Do nothing.
This function gets called when the plugin is loaded by the plugin manager.
:returns:
:rtype:
:raises:
"""
self.gw = None
pm = MayaPluginManager.get()
genesis = pm.get_plugin("Genesis")
self.GenesisWin = self.subclass_genesis(genesis.GenesisWin) | [
"def",
"init",
"(",
"self",
",",
")",
":",
"self",
".",
"gw",
"=",
"None",
"pm",
"=",
"MayaPluginManager",
".",
"get",
"(",
")",
"genesis",
"=",
"pm",
".",
"get_plugin",
"(",
"\"Genesis\"",
")",
"self",
".",
"GenesisWin",
"=",
"self",
".",
"subclass_genesis",
"(",
"genesis",
".",
"GenesisWin",
")"
]
| Initialize the plugin. Do nothing.
This function gets called when the plugin is loaded by the plugin manager.
:returns:
:rtype:
:raises: | [
"Initialize",
"the",
"plugin",
".",
"Do",
"nothing",
"."
]
| c8d6318d53cdb5493453c4a6b65ef75bdb2d5f2c | https://github.com/JukeboxPipeline/jukeboxmaya/blob/c8d6318d53cdb5493453c4a6b65ef75bdb2d5f2c/src/jukeboxmaya/addons/mayagenesis/mayagenesis.py#L34-L46 | train |
JukeboxPipeline/jukeboxmaya | src/jukeboxmaya/addons/mayagenesis/mayagenesis.py | MayaGenesis.save_lastfile | def save_lastfile(self, tfi):
"""Save the taskfile in the config
:param tfi: the last selected taskfileinfo
:type tfi: class:`jukeboxcore.filesys.TaskFileInfo`
:returns: None
:rtype: None
:raises: None
"""
tf = models.TaskFile.objects.get(task=tfi.task, version=tfi.version, releasetype=tfi.releasetype,
descriptor=tfi.descriptor, typ=tfi.typ)
c = self.get_config()
c['lastfile'] = tf.pk
c.write() | python | def save_lastfile(self, tfi):
"""Save the taskfile in the config
:param tfi: the last selected taskfileinfo
:type tfi: class:`jukeboxcore.filesys.TaskFileInfo`
:returns: None
:rtype: None
:raises: None
"""
tf = models.TaskFile.objects.get(task=tfi.task, version=tfi.version, releasetype=tfi.releasetype,
descriptor=tfi.descriptor, typ=tfi.typ)
c = self.get_config()
c['lastfile'] = tf.pk
c.write() | [
"def",
"save_lastfile",
"(",
"self",
",",
"tfi",
")",
":",
"tf",
"=",
"models",
".",
"TaskFile",
".",
"objects",
".",
"get",
"(",
"task",
"=",
"tfi",
".",
"task",
",",
"version",
"=",
"tfi",
".",
"version",
",",
"releasetype",
"=",
"tfi",
".",
"releasetype",
",",
"descriptor",
"=",
"tfi",
".",
"descriptor",
",",
"typ",
"=",
"tfi",
".",
"typ",
")",
"c",
"=",
"self",
".",
"get_config",
"(",
")",
"c",
"[",
"'lastfile'",
"]",
"=",
"tf",
".",
"pk",
"c",
".",
"write",
"(",
")"
]
| Save the taskfile in the config
:param tfi: the last selected taskfileinfo
:type tfi: class:`jukeboxcore.filesys.TaskFileInfo`
:returns: None
:rtype: None
:raises: None | [
"Save",
"the",
"taskfile",
"in",
"the",
"config"
]
| c8d6318d53cdb5493453c4a6b65ef75bdb2d5f2c | https://github.com/JukeboxPipeline/jukeboxmaya/blob/c8d6318d53cdb5493453c4a6b65ef75bdb2d5f2c/src/jukeboxmaya/addons/mayagenesis/mayagenesis.py#L101-L114 | train |
JukeboxPipeline/jukeboxmaya | src/jukeboxmaya/addons/mayagenesis/mayagenesis.py | MayaGenesis.subclass_genesis | def subclass_genesis(self, genesisclass):
"""Subclass the given genesis class and implement all abstract methods
:param genesisclass: the GenesisWin class to subclass
:type genesisclass: :class:`GenesisWin`
:returns: the subclass
:rtype: subclass of :class:`GenesisWin`
:raises: None
"""
class MayaGenesisWin(genesisclass):
"""Implementation of Genesis for maya
"""
def open_shot(self, taskfile):
"""Open the given taskfile
:param taskfile: the taskfile for the shot
:type taskfile: :class:`djadapter.models.TaskFile`
:returns: True if opening was successful
:rtype: bool
:raises: none
"""
return self.open_file(taskfile)
def save_shot(self, jbfile, tf):
"""Save the shot to the location of jbfile
:param jbfile: the jbfile that can be used to query the location
:type jbfile: :class:`jukebox.core.filesys.JB_File`
:param tf: the taskfile that is saved
:type tf: :class:`djadapter.models.TaskFile`
:returns: None
:rtype: None
:raises: None
"""
self.update_scene_node(tf)
self.save_file(jbfile)
def open_asset(self, taskfile):
"""Open the given taskfile
:param taskfile: the taskfile for the asset
:type taskfile: :class:`djadapter.models.TaskFile`
:returns: True if opening was successful
:rtype: bool
:raises: None
"""
return self.open_file(taskfile)
def save_asset(self, jbfile, tf):
"""Save the asset to the location of jbfile
:param jbfile: the jbfile that can be used to query the location
:type jbfile: :class:`jukebox.core.filesys.JB_File`
:param tf: the taskfile that is saved
:type tf: :class:`djadapter.models.TaskFile`
:returns: None
:rtype: None
:raises: NotImplementedError
"""
self.update_scene_node(tf)
self.save_file(jbfile)
def save_file(self, jbfile):
"""Physically save current scene to jbfile\'s location
:param jbfile: the jbfile that can be used to query the location
:type jbfile: :class:`jukebox.core.filesys.JB_File`
:returns: None
:rtype: None
:raises: None
"""
p = jbfile.get_fullpath()
p = os.path.expanduser(p)
typ = 'mayaBinary'
if jbfile.get_ext() == 'ma':
typ = 'mayaAscii'
cmds.file(rename = p)
cmds.file(save=True, defaultExtensions=False, type=typ)
def open_file(self, taskfile):
"""Open the given jbfile in maya
:param taskfile: the taskfile for the asset
:type taskfile: :class:`djadapter.models.TaskFile`
:returns: True if opening was successful
:rtype: bool
:raises: None
"""
r = self.check_modified()
if r is False:
return False
cmds.file(taskfile.path, open=True, force=True, ignoreVersion=True)
return True
def get_current_file(self, ):
"""Return the taskfile that is currently open or None if no taskfile is open
:returns: the open taskfile or None if no taskfile is open
:rtype: :class:`djadapter.models.TaskFile` | None
:raises: None
"""
node = jbscene.get_current_scene_node()
if not node:
return
tfid = cmds.getAttr('%s.taskfile_id' % node)
try:
return djadapter.taskfiles.get(id=tfid)
except djadapter.models.TaskFile.DoesNotExist:
log.error("No taskfile with id %s was found. Get current scene failed. Check your jb_sceneNode \'%s\'." % (tfid, node))
return
def get_scene_node(self, ):
"""Return the current scenen node or create one if it does not exist
:returns: Name of the scene node
:rtype: str
:raises: None
"""
scenenodes = cmds.ls(':jb_sceneNode*')
if len(scenenodes) > 1:
cmds.delete(scenenodes)
node = jbscene.get_current_scene_node()
if node is None:
cmds.namespace(set=':')
node = cmds.createNode('jb_sceneNode')
return node
def update_scene_node(self, tf):
"""Update the current scene node
:param tf: the taskfile that is saved
:type tf: :class:`djadapter.models.TaskFile`
:returns: None
:rtype: None
:raises: None
"""
node = self.get_scene_node()
cmds.setAttr('%s.taskfile_id' % node, lock=False)
cmds.setAttr('%s.taskfile_id' % node, tf.id)
cmds.setAttr('%s.taskfile_id' % node, lock=True)
def check_modified(self, ):
"""Check if the current scene was modified and ask the user to continue
This might save the scene if the user accepts to save before continuing.
:returns: True if the user accepted to continue.
:rtype: bool
:raises: None
"""
if not cmds.file(q=1, modified=1):
return True
curfile = cmds.file(q=1, sceneName=1)
r = cmds.confirmDialog( title='Save Changes', message='Save changes to %s?' % curfile,
button=['Save', 'Don\'t Save' ,'Cancel'],
defaultButton='Save', cancelButton='Cancel',
dismissString='Cancel')
if r == 'Cancel':
return False
if r == 'Save':
cmds.file(save=True, force=True)
return True
MayaGenesisWin.set_filetype(djadapter.FILETYPES['mayamainscene'],)
return MayaGenesisWin | python | def subclass_genesis(self, genesisclass):
"""Subclass the given genesis class and implement all abstract methods
:param genesisclass: the GenesisWin class to subclass
:type genesisclass: :class:`GenesisWin`
:returns: the subclass
:rtype: subclass of :class:`GenesisWin`
:raises: None
"""
class MayaGenesisWin(genesisclass):
"""Implementation of Genesis for maya
"""
def open_shot(self, taskfile):
"""Open the given taskfile
:param taskfile: the taskfile for the shot
:type taskfile: :class:`djadapter.models.TaskFile`
:returns: True if opening was successful
:rtype: bool
:raises: none
"""
return self.open_file(taskfile)
def save_shot(self, jbfile, tf):
"""Save the shot to the location of jbfile
:param jbfile: the jbfile that can be used to query the location
:type jbfile: :class:`jukebox.core.filesys.JB_File`
:param tf: the taskfile that is saved
:type tf: :class:`djadapter.models.TaskFile`
:returns: None
:rtype: None
:raises: None
"""
self.update_scene_node(tf)
self.save_file(jbfile)
def open_asset(self, taskfile):
"""Open the given taskfile
:param taskfile: the taskfile for the asset
:type taskfile: :class:`djadapter.models.TaskFile`
:returns: True if opening was successful
:rtype: bool
:raises: None
"""
return self.open_file(taskfile)
def save_asset(self, jbfile, tf):
"""Save the asset to the location of jbfile
:param jbfile: the jbfile that can be used to query the location
:type jbfile: :class:`jukebox.core.filesys.JB_File`
:param tf: the taskfile that is saved
:type tf: :class:`djadapter.models.TaskFile`
:returns: None
:rtype: None
:raises: NotImplementedError
"""
self.update_scene_node(tf)
self.save_file(jbfile)
def save_file(self, jbfile):
"""Physically save current scene to jbfile\'s location
:param jbfile: the jbfile that can be used to query the location
:type jbfile: :class:`jukebox.core.filesys.JB_File`
:returns: None
:rtype: None
:raises: None
"""
p = jbfile.get_fullpath()
p = os.path.expanduser(p)
typ = 'mayaBinary'
if jbfile.get_ext() == 'ma':
typ = 'mayaAscii'
cmds.file(rename = p)
cmds.file(save=True, defaultExtensions=False, type=typ)
def open_file(self, taskfile):
"""Open the given jbfile in maya
:param taskfile: the taskfile for the asset
:type taskfile: :class:`djadapter.models.TaskFile`
:returns: True if opening was successful
:rtype: bool
:raises: None
"""
r = self.check_modified()
if r is False:
return False
cmds.file(taskfile.path, open=True, force=True, ignoreVersion=True)
return True
def get_current_file(self, ):
"""Return the taskfile that is currently open or None if no taskfile is open
:returns: the open taskfile or None if no taskfile is open
:rtype: :class:`djadapter.models.TaskFile` | None
:raises: None
"""
node = jbscene.get_current_scene_node()
if not node:
return
tfid = cmds.getAttr('%s.taskfile_id' % node)
try:
return djadapter.taskfiles.get(id=tfid)
except djadapter.models.TaskFile.DoesNotExist:
log.error("No taskfile with id %s was found. Get current scene failed. Check your jb_sceneNode \'%s\'." % (tfid, node))
return
def get_scene_node(self, ):
"""Return the current scenen node or create one if it does not exist
:returns: Name of the scene node
:rtype: str
:raises: None
"""
scenenodes = cmds.ls(':jb_sceneNode*')
if len(scenenodes) > 1:
cmds.delete(scenenodes)
node = jbscene.get_current_scene_node()
if node is None:
cmds.namespace(set=':')
node = cmds.createNode('jb_sceneNode')
return node
def update_scene_node(self, tf):
"""Update the current scene node
:param tf: the taskfile that is saved
:type tf: :class:`djadapter.models.TaskFile`
:returns: None
:rtype: None
:raises: None
"""
node = self.get_scene_node()
cmds.setAttr('%s.taskfile_id' % node, lock=False)
cmds.setAttr('%s.taskfile_id' % node, tf.id)
cmds.setAttr('%s.taskfile_id' % node, lock=True)
def check_modified(self, ):
"""Check if the current scene was modified and ask the user to continue
This might save the scene if the user accepts to save before continuing.
:returns: True if the user accepted to continue.
:rtype: bool
:raises: None
"""
if not cmds.file(q=1, modified=1):
return True
curfile = cmds.file(q=1, sceneName=1)
r = cmds.confirmDialog( title='Save Changes', message='Save changes to %s?' % curfile,
button=['Save', 'Don\'t Save' ,'Cancel'],
defaultButton='Save', cancelButton='Cancel',
dismissString='Cancel')
if r == 'Cancel':
return False
if r == 'Save':
cmds.file(save=True, force=True)
return True
MayaGenesisWin.set_filetype(djadapter.FILETYPES['mayamainscene'],)
return MayaGenesisWin | [
"def",
"subclass_genesis",
"(",
"self",
",",
"genesisclass",
")",
":",
"class",
"MayaGenesisWin",
"(",
"genesisclass",
")",
":",
"\"\"\"Implementation of Genesis for maya\n \"\"\"",
"def",
"open_shot",
"(",
"self",
",",
"taskfile",
")",
":",
"\"\"\"Open the given taskfile\n\n :param taskfile: the taskfile for the shot\n :type taskfile: :class:`djadapter.models.TaskFile`\n :returns: True if opening was successful\n :rtype: bool\n :raises: none\n \"\"\"",
"return",
"self",
".",
"open_file",
"(",
"taskfile",
")",
"def",
"save_shot",
"(",
"self",
",",
"jbfile",
",",
"tf",
")",
":",
"\"\"\"Save the shot to the location of jbfile\n\n :param jbfile: the jbfile that can be used to query the location\n :type jbfile: :class:`jukebox.core.filesys.JB_File`\n :param tf: the taskfile that is saved\n :type tf: :class:`djadapter.models.TaskFile`\n :returns: None\n :rtype: None\n :raises: None\n \"\"\"",
"self",
".",
"update_scene_node",
"(",
"tf",
")",
"self",
".",
"save_file",
"(",
"jbfile",
")",
"def",
"open_asset",
"(",
"self",
",",
"taskfile",
")",
":",
"\"\"\"Open the given taskfile\n\n :param taskfile: the taskfile for the asset\n :type taskfile: :class:`djadapter.models.TaskFile`\n :returns: True if opening was successful\n :rtype: bool\n :raises: None\n \"\"\"",
"return",
"self",
".",
"open_file",
"(",
"taskfile",
")",
"def",
"save_asset",
"(",
"self",
",",
"jbfile",
",",
"tf",
")",
":",
"\"\"\"Save the asset to the location of jbfile\n\n :param jbfile: the jbfile that can be used to query the location\n :type jbfile: :class:`jukebox.core.filesys.JB_File`\n :param tf: the taskfile that is saved\n :type tf: :class:`djadapter.models.TaskFile`\n :returns: None\n :rtype: None\n :raises: NotImplementedError\n \"\"\"",
"self",
".",
"update_scene_node",
"(",
"tf",
")",
"self",
".",
"save_file",
"(",
"jbfile",
")",
"def",
"save_file",
"(",
"self",
",",
"jbfile",
")",
":",
"\"\"\"Physically save current scene to jbfile\\'s location\n\n :param jbfile: the jbfile that can be used to query the location\n :type jbfile: :class:`jukebox.core.filesys.JB_File`\n :returns: None\n :rtype: None\n :raises: None\n \"\"\"",
"p",
"=",
"jbfile",
".",
"get_fullpath",
"(",
")",
"p",
"=",
"os",
".",
"path",
".",
"expanduser",
"(",
"p",
")",
"typ",
"=",
"'mayaBinary'",
"if",
"jbfile",
".",
"get_ext",
"(",
")",
"==",
"'ma'",
":",
"typ",
"=",
"'mayaAscii'",
"cmds",
".",
"file",
"(",
"rename",
"=",
"p",
")",
"cmds",
".",
"file",
"(",
"save",
"=",
"True",
",",
"defaultExtensions",
"=",
"False",
",",
"type",
"=",
"typ",
")",
"def",
"open_file",
"(",
"self",
",",
"taskfile",
")",
":",
"\"\"\"Open the given jbfile in maya\n\n :param taskfile: the taskfile for the asset\n :type taskfile: :class:`djadapter.models.TaskFile`\n :returns: True if opening was successful\n :rtype: bool\n :raises: None\n \"\"\"",
"r",
"=",
"self",
".",
"check_modified",
"(",
")",
"if",
"r",
"is",
"False",
":",
"return",
"False",
"cmds",
".",
"file",
"(",
"taskfile",
".",
"path",
",",
"open",
"=",
"True",
",",
"force",
"=",
"True",
",",
"ignoreVersion",
"=",
"True",
")",
"return",
"True",
"def",
"get_current_file",
"(",
"self",
",",
")",
":",
"\"\"\"Return the taskfile that is currently open or None if no taskfile is open\n\n :returns: the open taskfile or None if no taskfile is open\n :rtype: :class:`djadapter.models.TaskFile` | None\n :raises: None\n \"\"\"",
"node",
"=",
"jbscene",
".",
"get_current_scene_node",
"(",
")",
"if",
"not",
"node",
":",
"return",
"tfid",
"=",
"cmds",
".",
"getAttr",
"(",
"'%s.taskfile_id'",
"%",
"node",
")",
"try",
":",
"return",
"djadapter",
".",
"taskfiles",
".",
"get",
"(",
"id",
"=",
"tfid",
")",
"except",
"djadapter",
".",
"models",
".",
"TaskFile",
".",
"DoesNotExist",
":",
"log",
".",
"error",
"(",
"\"No taskfile with id %s was found. Get current scene failed. Check your jb_sceneNode \\'%s\\'.\"",
"%",
"(",
"tfid",
",",
"node",
")",
")",
"return",
"def",
"get_scene_node",
"(",
"self",
",",
")",
":",
"\"\"\"Return the current scenen node or create one if it does not exist\n\n :returns: Name of the scene node\n :rtype: str\n :raises: None\n \"\"\"",
"scenenodes",
"=",
"cmds",
".",
"ls",
"(",
"':jb_sceneNode*'",
")",
"if",
"len",
"(",
"scenenodes",
")",
">",
"1",
":",
"cmds",
".",
"delete",
"(",
"scenenodes",
")",
"node",
"=",
"jbscene",
".",
"get_current_scene_node",
"(",
")",
"if",
"node",
"is",
"None",
":",
"cmds",
".",
"namespace",
"(",
"set",
"=",
"':'",
")",
"node",
"=",
"cmds",
".",
"createNode",
"(",
"'jb_sceneNode'",
")",
"return",
"node",
"def",
"update_scene_node",
"(",
"self",
",",
"tf",
")",
":",
"\"\"\"Update the current scene node\n\n :param tf: the taskfile that is saved\n :type tf: :class:`djadapter.models.TaskFile`\n :returns: None\n :rtype: None\n :raises: None\n \"\"\"",
"node",
"=",
"self",
".",
"get_scene_node",
"(",
")",
"cmds",
".",
"setAttr",
"(",
"'%s.taskfile_id'",
"%",
"node",
",",
"lock",
"=",
"False",
")",
"cmds",
".",
"setAttr",
"(",
"'%s.taskfile_id'",
"%",
"node",
",",
"tf",
".",
"id",
")",
"cmds",
".",
"setAttr",
"(",
"'%s.taskfile_id'",
"%",
"node",
",",
"lock",
"=",
"True",
")",
"def",
"check_modified",
"(",
"self",
",",
")",
":",
"\"\"\"Check if the current scene was modified and ask the user to continue\n\n This might save the scene if the user accepts to save before continuing.\n\n :returns: True if the user accepted to continue.\n :rtype: bool\n :raises: None\n \"\"\"",
"if",
"not",
"cmds",
".",
"file",
"(",
"q",
"=",
"1",
",",
"modified",
"=",
"1",
")",
":",
"return",
"True",
"curfile",
"=",
"cmds",
".",
"file",
"(",
"q",
"=",
"1",
",",
"sceneName",
"=",
"1",
")",
"r",
"=",
"cmds",
".",
"confirmDialog",
"(",
"title",
"=",
"'Save Changes'",
",",
"message",
"=",
"'Save changes to %s?'",
"%",
"curfile",
",",
"button",
"=",
"[",
"'Save'",
",",
"'Don\\'t Save'",
",",
"'Cancel'",
"]",
",",
"defaultButton",
"=",
"'Save'",
",",
"cancelButton",
"=",
"'Cancel'",
",",
"dismissString",
"=",
"'Cancel'",
")",
"if",
"r",
"==",
"'Cancel'",
":",
"return",
"False",
"if",
"r",
"==",
"'Save'",
":",
"cmds",
".",
"file",
"(",
"save",
"=",
"True",
",",
"force",
"=",
"True",
")",
"return",
"True",
"MayaGenesisWin",
".",
"set_filetype",
"(",
"djadapter",
".",
"FILETYPES",
"[",
"'mayamainscene'",
"]",
",",
")",
"return",
"MayaGenesisWin"
]
| Subclass the given genesis class and implement all abstract methods
:param genesisclass: the GenesisWin class to subclass
:type genesisclass: :class:`GenesisWin`
:returns: the subclass
:rtype: subclass of :class:`GenesisWin`
:raises: None | [
"Subclass",
"the",
"given",
"genesis",
"class",
"and",
"implement",
"all",
"abstract",
"methods"
]
| c8d6318d53cdb5493453c4a6b65ef75bdb2d5f2c | https://github.com/JukeboxPipeline/jukeboxmaya/blob/c8d6318d53cdb5493453c4a6b65ef75bdb2d5f2c/src/jukeboxmaya/addons/mayagenesis/mayagenesis.py#L116-L281 | train |
portfors-lab/sparkle | sparkle/run/calibration_runner.py | AbstractCalibrationRunner.stash_calibration | def stash_calibration(self, attenuations, freqs, frange, calname):
"""Save it for later"""
self.calibration_vector = attenuations
self.calibration_freqs = freqs
self.calibration_frange = frange
self.calname = calname | python | def stash_calibration(self, attenuations, freqs, frange, calname):
"""Save it for later"""
self.calibration_vector = attenuations
self.calibration_freqs = freqs
self.calibration_frange = frange
self.calname = calname | [
"def",
"stash_calibration",
"(",
"self",
",",
"attenuations",
",",
"freqs",
",",
"frange",
",",
"calname",
")",
":",
"self",
".",
"calibration_vector",
"=",
"attenuations",
"self",
".",
"calibration_freqs",
"=",
"freqs",
"self",
".",
"calibration_frange",
"=",
"frange",
"self",
".",
"calname",
"=",
"calname"
]
| Save it for later | [
"Save",
"it",
"for",
"later"
]
| 5fad1cf2bec58ec6b15d91da20f6236a74826110 | https://github.com/portfors-lab/sparkle/blob/5fad1cf2bec58ec6b15d91da20f6236a74826110/sparkle/run/calibration_runner.py#L20-L25 | train |
portfors-lab/sparkle | sparkle/run/calibration_runner.py | CalibrationRunner.set_stim_by_index | def set_stim_by_index(self, index):
"""Sets the stimulus to be generated to the one referenced by index
:param index: index number of stimulus to set from this class's internal list of stimuli
:type index: int
"""
# remove any current components
self.stimulus.clearComponents()
# add one to index because of tone curve
self.stimulus.insertComponent(self.stim_components[index]) | python | def set_stim_by_index(self, index):
"""Sets the stimulus to be generated to the one referenced by index
:param index: index number of stimulus to set from this class's internal list of stimuli
:type index: int
"""
# remove any current components
self.stimulus.clearComponents()
# add one to index because of tone curve
self.stimulus.insertComponent(self.stim_components[index]) | [
"def",
"set_stim_by_index",
"(",
"self",
",",
"index",
")",
":",
"# remove any current components",
"self",
".",
"stimulus",
".",
"clearComponents",
"(",
")",
"# add one to index because of tone curve",
"self",
".",
"stimulus",
".",
"insertComponent",
"(",
"self",
".",
"stim_components",
"[",
"index",
"]",
")"
]
| Sets the stimulus to be generated to the one referenced by index
:param index: index number of stimulus to set from this class's internal list of stimuli
:type index: int | [
"Sets",
"the",
"stimulus",
"to",
"be",
"generated",
"to",
"the",
"one",
"referenced",
"by",
"index"
]
| 5fad1cf2bec58ec6b15d91da20f6236a74826110 | https://github.com/portfors-lab/sparkle/blob/5fad1cf2bec58ec6b15d91da20f6236a74826110/sparkle/run/calibration_runner.py#L92-L101 | train |
portfors-lab/sparkle | sparkle/run/calibration_runner.py | CalibrationRunner.process_calibration | def process_calibration(self, save=True):
"""processes calibration control signal. Determines transfer function
of speaker to get frequency vs. attenuation curve.
:param save: Whether to save this calibration data to file
:type save: bool
:returns: numpy.ndarray, str, int, float -- frequency response (in dB), dataset name, calibration reference frequency, reference intensity
"""
if not self.save_data:
raise Exception("Cannot process an unsaved calibration")
avg_signal = np.mean(self.datafile.get_data(self.current_dataset_name + '/signal'), axis=0)
diffdB = attenuation_curve(self.stimulus.signal()[0], avg_signal,
self.stimulus.samplerate(), self.calf)
logger = logging.getLogger('main')
logger.debug('The maximum dB attenuation is {}, caldB {}'.format(max(diffdB), self.caldb))
# save a vector of only the calibration intensity results
self.datafile.init_data(self.current_dataset_name, mode='calibration',
dims=diffdB.shape,
nested_name='calibration_intensities')
self.datafile.append(self.current_dataset_name, diffdB,
nested_name='calibration_intensities')
relevant_info = {'frequencies': 'all', 'calibration_dB':self.caldb,
'calibration_voltage': self.calv, 'calibration_frequency': self.calf,
}
self.datafile.set_metadata('/'.join([self.current_dataset_name, 'calibration_intensities']),
relevant_info)
mean_reftone = np.mean(self.datafile.get_data(self.current_dataset_name + '/reference_tone'), axis=0)
tone_amp = signal_amplitude(mean_reftone, self.player.get_aifs())
db = calc_db(tone_amp, self.mphonesens, self.mphonedb)
# remove the reference tone from protocol
self.protocol_model.remove(0)
return diffdB, self.current_dataset_name, self.calf, db | python | def process_calibration(self, save=True):
"""processes calibration control signal. Determines transfer function
of speaker to get frequency vs. attenuation curve.
:param save: Whether to save this calibration data to file
:type save: bool
:returns: numpy.ndarray, str, int, float -- frequency response (in dB), dataset name, calibration reference frequency, reference intensity
"""
if not self.save_data:
raise Exception("Cannot process an unsaved calibration")
avg_signal = np.mean(self.datafile.get_data(self.current_dataset_name + '/signal'), axis=0)
diffdB = attenuation_curve(self.stimulus.signal()[0], avg_signal,
self.stimulus.samplerate(), self.calf)
logger = logging.getLogger('main')
logger.debug('The maximum dB attenuation is {}, caldB {}'.format(max(diffdB), self.caldb))
# save a vector of only the calibration intensity results
self.datafile.init_data(self.current_dataset_name, mode='calibration',
dims=diffdB.shape,
nested_name='calibration_intensities')
self.datafile.append(self.current_dataset_name, diffdB,
nested_name='calibration_intensities')
relevant_info = {'frequencies': 'all', 'calibration_dB':self.caldb,
'calibration_voltage': self.calv, 'calibration_frequency': self.calf,
}
self.datafile.set_metadata('/'.join([self.current_dataset_name, 'calibration_intensities']),
relevant_info)
mean_reftone = np.mean(self.datafile.get_data(self.current_dataset_name + '/reference_tone'), axis=0)
tone_amp = signal_amplitude(mean_reftone, self.player.get_aifs())
db = calc_db(tone_amp, self.mphonesens, self.mphonedb)
# remove the reference tone from protocol
self.protocol_model.remove(0)
return diffdB, self.current_dataset_name, self.calf, db | [
"def",
"process_calibration",
"(",
"self",
",",
"save",
"=",
"True",
")",
":",
"if",
"not",
"self",
".",
"save_data",
":",
"raise",
"Exception",
"(",
"\"Cannot process an unsaved calibration\"",
")",
"avg_signal",
"=",
"np",
".",
"mean",
"(",
"self",
".",
"datafile",
".",
"get_data",
"(",
"self",
".",
"current_dataset_name",
"+",
"'/signal'",
")",
",",
"axis",
"=",
"0",
")",
"diffdB",
"=",
"attenuation_curve",
"(",
"self",
".",
"stimulus",
".",
"signal",
"(",
")",
"[",
"0",
"]",
",",
"avg_signal",
",",
"self",
".",
"stimulus",
".",
"samplerate",
"(",
")",
",",
"self",
".",
"calf",
")",
"logger",
"=",
"logging",
".",
"getLogger",
"(",
"'main'",
")",
"logger",
".",
"debug",
"(",
"'The maximum dB attenuation is {}, caldB {}'",
".",
"format",
"(",
"max",
"(",
"diffdB",
")",
",",
"self",
".",
"caldb",
")",
")",
"# save a vector of only the calibration intensity results",
"self",
".",
"datafile",
".",
"init_data",
"(",
"self",
".",
"current_dataset_name",
",",
"mode",
"=",
"'calibration'",
",",
"dims",
"=",
"diffdB",
".",
"shape",
",",
"nested_name",
"=",
"'calibration_intensities'",
")",
"self",
".",
"datafile",
".",
"append",
"(",
"self",
".",
"current_dataset_name",
",",
"diffdB",
",",
"nested_name",
"=",
"'calibration_intensities'",
")",
"relevant_info",
"=",
"{",
"'frequencies'",
":",
"'all'",
",",
"'calibration_dB'",
":",
"self",
".",
"caldb",
",",
"'calibration_voltage'",
":",
"self",
".",
"calv",
",",
"'calibration_frequency'",
":",
"self",
".",
"calf",
",",
"}",
"self",
".",
"datafile",
".",
"set_metadata",
"(",
"'/'",
".",
"join",
"(",
"[",
"self",
".",
"current_dataset_name",
",",
"'calibration_intensities'",
"]",
")",
",",
"relevant_info",
")",
"mean_reftone",
"=",
"np",
".",
"mean",
"(",
"self",
".",
"datafile",
".",
"get_data",
"(",
"self",
".",
"current_dataset_name",
"+",
"'/reference_tone'",
")",
",",
"axis",
"=",
"0",
")",
"tone_amp",
"=",
"signal_amplitude",
"(",
"mean_reftone",
",",
"self",
".",
"player",
".",
"get_aifs",
"(",
")",
")",
"db",
"=",
"calc_db",
"(",
"tone_amp",
",",
"self",
".",
"mphonesens",
",",
"self",
".",
"mphonedb",
")",
"# remove the reference tone from protocol",
"self",
".",
"protocol_model",
".",
"remove",
"(",
"0",
")",
"return",
"diffdB",
",",
"self",
".",
"current_dataset_name",
",",
"self",
".",
"calf",
",",
"db"
]
| processes calibration control signal. Determines transfer function
of speaker to get frequency vs. attenuation curve.
:param save: Whether to save this calibration data to file
:type save: bool
:returns: numpy.ndarray, str, int, float -- frequency response (in dB), dataset name, calibration reference frequency, reference intensity | [
"processes",
"calibration",
"control",
"signal",
".",
"Determines",
"transfer",
"function",
"of",
"speaker",
"to",
"get",
"frequency",
"vs",
".",
"attenuation",
"curve",
"."
]
| 5fad1cf2bec58ec6b15d91da20f6236a74826110 | https://github.com/portfors-lab/sparkle/blob/5fad1cf2bec58ec6b15d91da20f6236a74826110/sparkle/run/calibration_runner.py#L161-L198 | train |
portfors-lab/sparkle | sparkle/gui/stim/explore_stim_editor.py | ExploreStimulusEditor.setModel | def setModel(self, model):
"Sets the StimulusModel for this editor"
self._model = model
self.ui.aofsSpnbx.setValue(model.samplerate()) | python | def setModel(self, model):
"Sets the StimulusModel for this editor"
self._model = model
self.ui.aofsSpnbx.setValue(model.samplerate()) | [
"def",
"setModel",
"(",
"self",
",",
"model",
")",
":",
"self",
".",
"_model",
"=",
"model",
"self",
".",
"ui",
".",
"aofsSpnbx",
".",
"setValue",
"(",
"model",
".",
"samplerate",
"(",
")",
")"
]
| Sets the StimulusModel for this editor | [
"Sets",
"the",
"StimulusModel",
"for",
"this",
"editor"
]
| 5fad1cf2bec58ec6b15d91da20f6236a74826110 | https://github.com/portfors-lab/sparkle/blob/5fad1cf2bec58ec6b15d91da20f6236a74826110/sparkle/gui/stim/explore_stim_editor.py#L31-L34 | train |
portfors-lab/sparkle | sparkle/gui/stim/explore_stim_editor.py | ExploreStimulusEditor.setStimIndex | def setStimIndex(self, row, stimIndex):
"Change out the component type in row to the one indexed by stimIndex"
newcomp = self._allComponents[row][stimIndex]
self._model.removeComponent(row, 1)
self._model.insertComponent(newcomp, row, 1) | python | def setStimIndex(self, row, stimIndex):
"Change out the component type in row to the one indexed by stimIndex"
newcomp = self._allComponents[row][stimIndex]
self._model.removeComponent(row, 1)
self._model.insertComponent(newcomp, row, 1) | [
"def",
"setStimIndex",
"(",
"self",
",",
"row",
",",
"stimIndex",
")",
":",
"newcomp",
"=",
"self",
".",
"_allComponents",
"[",
"row",
"]",
"[",
"stimIndex",
"]",
"self",
".",
"_model",
".",
"removeComponent",
"(",
"row",
",",
"1",
")",
"self",
".",
"_model",
".",
"insertComponent",
"(",
"newcomp",
",",
"row",
",",
"1",
")"
]
| Change out the component type in row to the one indexed by stimIndex | [
"Change",
"out",
"the",
"component",
"type",
"in",
"row",
"to",
"the",
"one",
"indexed",
"by",
"stimIndex"
]
| 5fad1cf2bec58ec6b15d91da20f6236a74826110 | https://github.com/portfors-lab/sparkle/blob/5fad1cf2bec58ec6b15d91da20f6236a74826110/sparkle/gui/stim/explore_stim_editor.py#L44-L48 | train |
portfors-lab/sparkle | sparkle/gui/stim/explore_stim_editor.py | ExploreStimulusEditor.addComponentEditor | def addComponentEditor(self):
"""Adds a new component to the model, and an editor for this component to this editor"""
row = self._model.rowCount()
comp_stack_editor = ExploreComponentEditor()
self.ui.trackStack.addWidget(comp_stack_editor)
idx_button = IndexButton(row)
idx_button.pickMe.connect(self.ui.trackStack.setCurrentIndex)
self.trackBtnGroup.addButton(idx_button)
self.ui.trackBtnLayout.addWidget(idx_button)
self.ui.trackStack.setCurrentIndex(row)
comp_stack_editor.closePlease.connect(self.removeComponentEditor)
delay = Silence()
comp_stack_editor.delaySpnbx.setValue(delay.duration())
self._model.insertComponent(delay, row,0)
self._allComponents.append([x() for x in self.stimuli_types if x.explore])
for stim in self._allComponents[row]:
editor = wrapComponent(stim).showEditor()
comp_stack_editor.addWidget(editor, stim.name)
exvocal = comp_stack_editor.widgetForName("Vocalization")
if exvocal is not None:
exvocal.filelistView.setSelectionMode(QtGui.QAbstractItemView.SingleSelection)
initcomp = self._allComponents[row][0]
self._model.insertComponent(initcomp, row, 1)
self.buttons.append(idx_button)
comp_stack_editor.exploreStimTypeCmbbx.currentIndexChanged.connect(lambda x : self.setStimIndex(row, x))
comp_stack_editor.delaySpnbx.valueChanged.connect(lambda x : self.setDelay(row, x))
comp_stack_editor.valueChanged.connect(self.valueChanged.emit)
return comp_stack_editor | python | def addComponentEditor(self):
"""Adds a new component to the model, and an editor for this component to this editor"""
row = self._model.rowCount()
comp_stack_editor = ExploreComponentEditor()
self.ui.trackStack.addWidget(comp_stack_editor)
idx_button = IndexButton(row)
idx_button.pickMe.connect(self.ui.trackStack.setCurrentIndex)
self.trackBtnGroup.addButton(idx_button)
self.ui.trackBtnLayout.addWidget(idx_button)
self.ui.trackStack.setCurrentIndex(row)
comp_stack_editor.closePlease.connect(self.removeComponentEditor)
delay = Silence()
comp_stack_editor.delaySpnbx.setValue(delay.duration())
self._model.insertComponent(delay, row,0)
self._allComponents.append([x() for x in self.stimuli_types if x.explore])
for stim in self._allComponents[row]:
editor = wrapComponent(stim).showEditor()
comp_stack_editor.addWidget(editor, stim.name)
exvocal = comp_stack_editor.widgetForName("Vocalization")
if exvocal is not None:
exvocal.filelistView.setSelectionMode(QtGui.QAbstractItemView.SingleSelection)
initcomp = self._allComponents[row][0]
self._model.insertComponent(initcomp, row, 1)
self.buttons.append(idx_button)
comp_stack_editor.exploreStimTypeCmbbx.currentIndexChanged.connect(lambda x : self.setStimIndex(row, x))
comp_stack_editor.delaySpnbx.valueChanged.connect(lambda x : self.setDelay(row, x))
comp_stack_editor.valueChanged.connect(self.valueChanged.emit)
return comp_stack_editor | [
"def",
"addComponentEditor",
"(",
"self",
")",
":",
"row",
"=",
"self",
".",
"_model",
".",
"rowCount",
"(",
")",
"comp_stack_editor",
"=",
"ExploreComponentEditor",
"(",
")",
"self",
".",
"ui",
".",
"trackStack",
".",
"addWidget",
"(",
"comp_stack_editor",
")",
"idx_button",
"=",
"IndexButton",
"(",
"row",
")",
"idx_button",
".",
"pickMe",
".",
"connect",
"(",
"self",
".",
"ui",
".",
"trackStack",
".",
"setCurrentIndex",
")",
"self",
".",
"trackBtnGroup",
".",
"addButton",
"(",
"idx_button",
")",
"self",
".",
"ui",
".",
"trackBtnLayout",
".",
"addWidget",
"(",
"idx_button",
")",
"self",
".",
"ui",
".",
"trackStack",
".",
"setCurrentIndex",
"(",
"row",
")",
"comp_stack_editor",
".",
"closePlease",
".",
"connect",
"(",
"self",
".",
"removeComponentEditor",
")",
"delay",
"=",
"Silence",
"(",
")",
"comp_stack_editor",
".",
"delaySpnbx",
".",
"setValue",
"(",
"delay",
".",
"duration",
"(",
")",
")",
"self",
".",
"_model",
".",
"insertComponent",
"(",
"delay",
",",
"row",
",",
"0",
")",
"self",
".",
"_allComponents",
".",
"append",
"(",
"[",
"x",
"(",
")",
"for",
"x",
"in",
"self",
".",
"stimuli_types",
"if",
"x",
".",
"explore",
"]",
")",
"for",
"stim",
"in",
"self",
".",
"_allComponents",
"[",
"row",
"]",
":",
"editor",
"=",
"wrapComponent",
"(",
"stim",
")",
".",
"showEditor",
"(",
")",
"comp_stack_editor",
".",
"addWidget",
"(",
"editor",
",",
"stim",
".",
"name",
")",
"exvocal",
"=",
"comp_stack_editor",
".",
"widgetForName",
"(",
"\"Vocalization\"",
")",
"if",
"exvocal",
"is",
"not",
"None",
":",
"exvocal",
".",
"filelistView",
".",
"setSelectionMode",
"(",
"QtGui",
".",
"QAbstractItemView",
".",
"SingleSelection",
")",
"initcomp",
"=",
"self",
".",
"_allComponents",
"[",
"row",
"]",
"[",
"0",
"]",
"self",
".",
"_model",
".",
"insertComponent",
"(",
"initcomp",
",",
"row",
",",
"1",
")",
"self",
".",
"buttons",
".",
"append",
"(",
"idx_button",
")",
"comp_stack_editor",
".",
"exploreStimTypeCmbbx",
".",
"currentIndexChanged",
".",
"connect",
"(",
"lambda",
"x",
":",
"self",
".",
"setStimIndex",
"(",
"row",
",",
"x",
")",
")",
"comp_stack_editor",
".",
"delaySpnbx",
".",
"valueChanged",
".",
"connect",
"(",
"lambda",
"x",
":",
"self",
".",
"setDelay",
"(",
"row",
",",
"x",
")",
")",
"comp_stack_editor",
".",
"valueChanged",
".",
"connect",
"(",
"self",
".",
"valueChanged",
".",
"emit",
")",
"return",
"comp_stack_editor"
]
| Adds a new component to the model, and an editor for this component to this editor | [
"Adds",
"a",
"new",
"component",
"to",
"the",
"model",
"and",
"an",
"editor",
"for",
"this",
"component",
"to",
"this",
"editor"
]
| 5fad1cf2bec58ec6b15d91da20f6236a74826110 | https://github.com/portfors-lab/sparkle/blob/5fad1cf2bec58ec6b15d91da20f6236a74826110/sparkle/gui/stim/explore_stim_editor.py#L63-L99 | train |
yamcs/yamcs-python | yamcs-client/yamcs/mdb/client.py | MDBClient.list_space_systems | def list_space_systems(self, page_size=None):
"""
Lists the space systems visible to this client.
Space systems are returned in lexicographical order.
:rtype: :class:`.SpaceSystem` iterator
"""
params = {}
if page_size is not None:
params['limit'] = page_size
return pagination.Iterator(
client=self._client,
path='/mdb/{}/space-systems'.format(self._instance),
params=params,
response_class=mdb_pb2.ListSpaceSystemsResponse,
items_key='spaceSystem',
item_mapper=SpaceSystem,
) | python | def list_space_systems(self, page_size=None):
"""
Lists the space systems visible to this client.
Space systems are returned in lexicographical order.
:rtype: :class:`.SpaceSystem` iterator
"""
params = {}
if page_size is not None:
params['limit'] = page_size
return pagination.Iterator(
client=self._client,
path='/mdb/{}/space-systems'.format(self._instance),
params=params,
response_class=mdb_pb2.ListSpaceSystemsResponse,
items_key='spaceSystem',
item_mapper=SpaceSystem,
) | [
"def",
"list_space_systems",
"(",
"self",
",",
"page_size",
"=",
"None",
")",
":",
"params",
"=",
"{",
"}",
"if",
"page_size",
"is",
"not",
"None",
":",
"params",
"[",
"'limit'",
"]",
"=",
"page_size",
"return",
"pagination",
".",
"Iterator",
"(",
"client",
"=",
"self",
".",
"_client",
",",
"path",
"=",
"'/mdb/{}/space-systems'",
".",
"format",
"(",
"self",
".",
"_instance",
")",
",",
"params",
"=",
"params",
",",
"response_class",
"=",
"mdb_pb2",
".",
"ListSpaceSystemsResponse",
",",
"items_key",
"=",
"'spaceSystem'",
",",
"item_mapper",
"=",
"SpaceSystem",
",",
")"
]
| Lists the space systems visible to this client.
Space systems are returned in lexicographical order.
:rtype: :class:`.SpaceSystem` iterator | [
"Lists",
"the",
"space",
"systems",
"visible",
"to",
"this",
"client",
"."
]
| 1082fee8a299010cc44416bbb7518fac0ef08b48 | https://github.com/yamcs/yamcs-python/blob/1082fee8a299010cc44416bbb7518fac0ef08b48/yamcs-client/yamcs/mdb/client.py#L15-L35 | train |
yamcs/yamcs-python | yamcs-client/yamcs/mdb/client.py | MDBClient.get_space_system | def get_space_system(self, name):
"""
Gets a single space system by its unique name.
:param str name: A fully-qualified XTCE name
:rtype: .SpaceSystem
"""
url = '/mdb/{}/space-systems{}'.format(self._instance, name)
response = self._client.get_proto(url)
message = mdb_pb2.SpaceSystemInfo()
message.ParseFromString(response.content)
return SpaceSystem(message) | python | def get_space_system(self, name):
"""
Gets a single space system by its unique name.
:param str name: A fully-qualified XTCE name
:rtype: .SpaceSystem
"""
url = '/mdb/{}/space-systems{}'.format(self._instance, name)
response = self._client.get_proto(url)
message = mdb_pb2.SpaceSystemInfo()
message.ParseFromString(response.content)
return SpaceSystem(message) | [
"def",
"get_space_system",
"(",
"self",
",",
"name",
")",
":",
"url",
"=",
"'/mdb/{}/space-systems{}'",
".",
"format",
"(",
"self",
".",
"_instance",
",",
"name",
")",
"response",
"=",
"self",
".",
"_client",
".",
"get_proto",
"(",
"url",
")",
"message",
"=",
"mdb_pb2",
".",
"SpaceSystemInfo",
"(",
")",
"message",
".",
"ParseFromString",
"(",
"response",
".",
"content",
")",
"return",
"SpaceSystem",
"(",
"message",
")"
]
| Gets a single space system by its unique name.
:param str name: A fully-qualified XTCE name
:rtype: .SpaceSystem | [
"Gets",
"a",
"single",
"space",
"system",
"by",
"its",
"unique",
"name",
"."
]
| 1082fee8a299010cc44416bbb7518fac0ef08b48 | https://github.com/yamcs/yamcs-python/blob/1082fee8a299010cc44416bbb7518fac0ef08b48/yamcs-client/yamcs/mdb/client.py#L37-L48 | train |
yamcs/yamcs-python | yamcs-client/yamcs/mdb/client.py | MDBClient.list_parameters | def list_parameters(self, parameter_type=None, page_size=None):
"""Lists the parameters visible to this client.
Parameters are returned in lexicographical order.
:param str parameter_type: The type of parameter
:rtype: :class:`.Parameter` iterator
"""
params = {'details': True}
if parameter_type is not None:
params['type'] = parameter_type
if page_size is not None:
params['limit'] = page_size
return pagination.Iterator(
client=self._client,
path='/mdb/{}/parameters'.format(self._instance),
params=params,
response_class=mdb_pb2.ListParametersResponse,
items_key='parameter',
item_mapper=Parameter,
) | python | def list_parameters(self, parameter_type=None, page_size=None):
"""Lists the parameters visible to this client.
Parameters are returned in lexicographical order.
:param str parameter_type: The type of parameter
:rtype: :class:`.Parameter` iterator
"""
params = {'details': True}
if parameter_type is not None:
params['type'] = parameter_type
if page_size is not None:
params['limit'] = page_size
return pagination.Iterator(
client=self._client,
path='/mdb/{}/parameters'.format(self._instance),
params=params,
response_class=mdb_pb2.ListParametersResponse,
items_key='parameter',
item_mapper=Parameter,
) | [
"def",
"list_parameters",
"(",
"self",
",",
"parameter_type",
"=",
"None",
",",
"page_size",
"=",
"None",
")",
":",
"params",
"=",
"{",
"'details'",
":",
"True",
"}",
"if",
"parameter_type",
"is",
"not",
"None",
":",
"params",
"[",
"'type'",
"]",
"=",
"parameter_type",
"if",
"page_size",
"is",
"not",
"None",
":",
"params",
"[",
"'limit'",
"]",
"=",
"page_size",
"return",
"pagination",
".",
"Iterator",
"(",
"client",
"=",
"self",
".",
"_client",
",",
"path",
"=",
"'/mdb/{}/parameters'",
".",
"format",
"(",
"self",
".",
"_instance",
")",
",",
"params",
"=",
"params",
",",
"response_class",
"=",
"mdb_pb2",
".",
"ListParametersResponse",
",",
"items_key",
"=",
"'parameter'",
",",
"item_mapper",
"=",
"Parameter",
",",
")"
]
| Lists the parameters visible to this client.
Parameters are returned in lexicographical order.
:param str parameter_type: The type of parameter
:rtype: :class:`.Parameter` iterator | [
"Lists",
"the",
"parameters",
"visible",
"to",
"this",
"client",
"."
]
| 1082fee8a299010cc44416bbb7518fac0ef08b48 | https://github.com/yamcs/yamcs-python/blob/1082fee8a299010cc44416bbb7518fac0ef08b48/yamcs-client/yamcs/mdb/client.py#L50-L72 | train |
yamcs/yamcs-python | yamcs-client/yamcs/mdb/client.py | MDBClient.get_parameter | def get_parameter(self, name):
"""
Gets a single parameter by its name.
:param str name: Either a fully-qualified XTCE name or an alias in the
format ``NAMESPACE/NAME``.
:rtype: .Parameter
"""
name = adapt_name_for_rest(name)
url = '/mdb/{}/parameters{}'.format(self._instance, name)
response = self._client.get_proto(url)
message = mdb_pb2.ParameterInfo()
message.ParseFromString(response.content)
return Parameter(message) | python | def get_parameter(self, name):
"""
Gets a single parameter by its name.
:param str name: Either a fully-qualified XTCE name or an alias in the
format ``NAMESPACE/NAME``.
:rtype: .Parameter
"""
name = adapt_name_for_rest(name)
url = '/mdb/{}/parameters{}'.format(self._instance, name)
response = self._client.get_proto(url)
message = mdb_pb2.ParameterInfo()
message.ParseFromString(response.content)
return Parameter(message) | [
"def",
"get_parameter",
"(",
"self",
",",
"name",
")",
":",
"name",
"=",
"adapt_name_for_rest",
"(",
"name",
")",
"url",
"=",
"'/mdb/{}/parameters{}'",
".",
"format",
"(",
"self",
".",
"_instance",
",",
"name",
")",
"response",
"=",
"self",
".",
"_client",
".",
"get_proto",
"(",
"url",
")",
"message",
"=",
"mdb_pb2",
".",
"ParameterInfo",
"(",
")",
"message",
".",
"ParseFromString",
"(",
"response",
".",
"content",
")",
"return",
"Parameter",
"(",
"message",
")"
]
| Gets a single parameter by its name.
:param str name: Either a fully-qualified XTCE name or an alias in the
format ``NAMESPACE/NAME``.
:rtype: .Parameter | [
"Gets",
"a",
"single",
"parameter",
"by",
"its",
"name",
"."
]
| 1082fee8a299010cc44416bbb7518fac0ef08b48 | https://github.com/yamcs/yamcs-python/blob/1082fee8a299010cc44416bbb7518fac0ef08b48/yamcs-client/yamcs/mdb/client.py#L74-L87 | train |
yamcs/yamcs-python | yamcs-client/yamcs/mdb/client.py | MDBClient.list_containers | def list_containers(self, page_size=None):
"""
Lists the containers visible to this client.
Containers are returned in lexicographical order.
:rtype: :class:`.Container` iterator
"""
params = {}
if page_size is not None:
params['limit'] = page_size
return pagination.Iterator(
client=self._client,
path='/mdb/{}/containers'.format(self._instance),
params=params,
response_class=mdb_pb2.ListContainersResponse,
items_key='container',
item_mapper=Container,
) | python | def list_containers(self, page_size=None):
"""
Lists the containers visible to this client.
Containers are returned in lexicographical order.
:rtype: :class:`.Container` iterator
"""
params = {}
if page_size is not None:
params['limit'] = page_size
return pagination.Iterator(
client=self._client,
path='/mdb/{}/containers'.format(self._instance),
params=params,
response_class=mdb_pb2.ListContainersResponse,
items_key='container',
item_mapper=Container,
) | [
"def",
"list_containers",
"(",
"self",
",",
"page_size",
"=",
"None",
")",
":",
"params",
"=",
"{",
"}",
"if",
"page_size",
"is",
"not",
"None",
":",
"params",
"[",
"'limit'",
"]",
"=",
"page_size",
"return",
"pagination",
".",
"Iterator",
"(",
"client",
"=",
"self",
".",
"_client",
",",
"path",
"=",
"'/mdb/{}/containers'",
".",
"format",
"(",
"self",
".",
"_instance",
")",
",",
"params",
"=",
"params",
",",
"response_class",
"=",
"mdb_pb2",
".",
"ListContainersResponse",
",",
"items_key",
"=",
"'container'",
",",
"item_mapper",
"=",
"Container",
",",
")"
]
| Lists the containers visible to this client.
Containers are returned in lexicographical order.
:rtype: :class:`.Container` iterator | [
"Lists",
"the",
"containers",
"visible",
"to",
"this",
"client",
"."
]
| 1082fee8a299010cc44416bbb7518fac0ef08b48 | https://github.com/yamcs/yamcs-python/blob/1082fee8a299010cc44416bbb7518fac0ef08b48/yamcs-client/yamcs/mdb/client.py#L89-L109 | train |
yamcs/yamcs-python | yamcs-client/yamcs/mdb/client.py | MDBClient.get_container | def get_container(self, name):
"""
Gets a single container by its unique name.
:param str name: Either a fully-qualified XTCE name or an alias in the
format ``NAMESPACE/NAME``.
:rtype: .Container
"""
name = adapt_name_for_rest(name)
url = '/mdb/{}/containers{}'.format(self._instance, name)
response = self._client.get_proto(url)
message = mdb_pb2.ContainerInfo()
message.ParseFromString(response.content)
return Container(message) | python | def get_container(self, name):
"""
Gets a single container by its unique name.
:param str name: Either a fully-qualified XTCE name or an alias in the
format ``NAMESPACE/NAME``.
:rtype: .Container
"""
name = adapt_name_for_rest(name)
url = '/mdb/{}/containers{}'.format(self._instance, name)
response = self._client.get_proto(url)
message = mdb_pb2.ContainerInfo()
message.ParseFromString(response.content)
return Container(message) | [
"def",
"get_container",
"(",
"self",
",",
"name",
")",
":",
"name",
"=",
"adapt_name_for_rest",
"(",
"name",
")",
"url",
"=",
"'/mdb/{}/containers{}'",
".",
"format",
"(",
"self",
".",
"_instance",
",",
"name",
")",
"response",
"=",
"self",
".",
"_client",
".",
"get_proto",
"(",
"url",
")",
"message",
"=",
"mdb_pb2",
".",
"ContainerInfo",
"(",
")",
"message",
".",
"ParseFromString",
"(",
"response",
".",
"content",
")",
"return",
"Container",
"(",
"message",
")"
]
| Gets a single container by its unique name.
:param str name: Either a fully-qualified XTCE name or an alias in the
format ``NAMESPACE/NAME``.
:rtype: .Container | [
"Gets",
"a",
"single",
"container",
"by",
"its",
"unique",
"name",
"."
]
| 1082fee8a299010cc44416bbb7518fac0ef08b48 | https://github.com/yamcs/yamcs-python/blob/1082fee8a299010cc44416bbb7518fac0ef08b48/yamcs-client/yamcs/mdb/client.py#L111-L124 | train |
yamcs/yamcs-python | yamcs-client/yamcs/mdb/client.py | MDBClient.list_commands | def list_commands(self, page_size=None):
"""
Lists the commands visible to this client.
Commands are returned in lexicographical order.
:rtype: :class:`.Command` iterator
"""
params = {}
if page_size is not None:
params['limit'] = page_size
return pagination.Iterator(
client=self._client,
path='/mdb/{}/commands'.format(self._instance),
params=params,
response_class=mdb_pb2.ListCommandsResponse,
items_key='command',
item_mapper=Command,
) | python | def list_commands(self, page_size=None):
"""
Lists the commands visible to this client.
Commands are returned in lexicographical order.
:rtype: :class:`.Command` iterator
"""
params = {}
if page_size is not None:
params['limit'] = page_size
return pagination.Iterator(
client=self._client,
path='/mdb/{}/commands'.format(self._instance),
params=params,
response_class=mdb_pb2.ListCommandsResponse,
items_key='command',
item_mapper=Command,
) | [
"def",
"list_commands",
"(",
"self",
",",
"page_size",
"=",
"None",
")",
":",
"params",
"=",
"{",
"}",
"if",
"page_size",
"is",
"not",
"None",
":",
"params",
"[",
"'limit'",
"]",
"=",
"page_size",
"return",
"pagination",
".",
"Iterator",
"(",
"client",
"=",
"self",
".",
"_client",
",",
"path",
"=",
"'/mdb/{}/commands'",
".",
"format",
"(",
"self",
".",
"_instance",
")",
",",
"params",
"=",
"params",
",",
"response_class",
"=",
"mdb_pb2",
".",
"ListCommandsResponse",
",",
"items_key",
"=",
"'command'",
",",
"item_mapper",
"=",
"Command",
",",
")"
]
| Lists the commands visible to this client.
Commands are returned in lexicographical order.
:rtype: :class:`.Command` iterator | [
"Lists",
"the",
"commands",
"visible",
"to",
"this",
"client",
"."
]
| 1082fee8a299010cc44416bbb7518fac0ef08b48 | https://github.com/yamcs/yamcs-python/blob/1082fee8a299010cc44416bbb7518fac0ef08b48/yamcs-client/yamcs/mdb/client.py#L126-L146 | train |
yamcs/yamcs-python | yamcs-client/yamcs/mdb/client.py | MDBClient.get_command | def get_command(self, name):
"""
Gets a single command by its unique name.
:param str name: Either a fully-qualified XTCE name or an alias in the
format ``NAMESPACE/NAME``.
:rtype: .Command
"""
name = adapt_name_for_rest(name)
url = '/mdb/{}/commands{}'.format(self._instance, name)
response = self._client.get_proto(url)
message = mdb_pb2.CommandInfo()
message.ParseFromString(response.content)
return Command(message) | python | def get_command(self, name):
"""
Gets a single command by its unique name.
:param str name: Either a fully-qualified XTCE name or an alias in the
format ``NAMESPACE/NAME``.
:rtype: .Command
"""
name = adapt_name_for_rest(name)
url = '/mdb/{}/commands{}'.format(self._instance, name)
response = self._client.get_proto(url)
message = mdb_pb2.CommandInfo()
message.ParseFromString(response.content)
return Command(message) | [
"def",
"get_command",
"(",
"self",
",",
"name",
")",
":",
"name",
"=",
"adapt_name_for_rest",
"(",
"name",
")",
"url",
"=",
"'/mdb/{}/commands{}'",
".",
"format",
"(",
"self",
".",
"_instance",
",",
"name",
")",
"response",
"=",
"self",
".",
"_client",
".",
"get_proto",
"(",
"url",
")",
"message",
"=",
"mdb_pb2",
".",
"CommandInfo",
"(",
")",
"message",
".",
"ParseFromString",
"(",
"response",
".",
"content",
")",
"return",
"Command",
"(",
"message",
")"
]
| Gets a single command by its unique name.
:param str name: Either a fully-qualified XTCE name or an alias in the
format ``NAMESPACE/NAME``.
:rtype: .Command | [
"Gets",
"a",
"single",
"command",
"by",
"its",
"unique",
"name",
"."
]
| 1082fee8a299010cc44416bbb7518fac0ef08b48 | https://github.com/yamcs/yamcs-python/blob/1082fee8a299010cc44416bbb7518fac0ef08b48/yamcs-client/yamcs/mdb/client.py#L148-L161 | train |
yamcs/yamcs-python | yamcs-client/yamcs/mdb/client.py | MDBClient.list_algorithms | def list_algorithms(self, page_size=None):
"""
Lists the algorithms visible to this client.
Algorithms are returned in lexicographical order.
:rtype: :class:`.Algorithm` iterator
"""
params = {}
if page_size is not None:
params['limit'] = page_size
return pagination.Iterator(
client=self._client,
path='/mdb/{}/algorithms'.format(self._instance),
params=params,
response_class=mdb_pb2.ListAlgorithmsResponse,
items_key='algorithm',
item_mapper=Algorithm,
) | python | def list_algorithms(self, page_size=None):
"""
Lists the algorithms visible to this client.
Algorithms are returned in lexicographical order.
:rtype: :class:`.Algorithm` iterator
"""
params = {}
if page_size is not None:
params['limit'] = page_size
return pagination.Iterator(
client=self._client,
path='/mdb/{}/algorithms'.format(self._instance),
params=params,
response_class=mdb_pb2.ListAlgorithmsResponse,
items_key='algorithm',
item_mapper=Algorithm,
) | [
"def",
"list_algorithms",
"(",
"self",
",",
"page_size",
"=",
"None",
")",
":",
"params",
"=",
"{",
"}",
"if",
"page_size",
"is",
"not",
"None",
":",
"params",
"[",
"'limit'",
"]",
"=",
"page_size",
"return",
"pagination",
".",
"Iterator",
"(",
"client",
"=",
"self",
".",
"_client",
",",
"path",
"=",
"'/mdb/{}/algorithms'",
".",
"format",
"(",
"self",
".",
"_instance",
")",
",",
"params",
"=",
"params",
",",
"response_class",
"=",
"mdb_pb2",
".",
"ListAlgorithmsResponse",
",",
"items_key",
"=",
"'algorithm'",
",",
"item_mapper",
"=",
"Algorithm",
",",
")"
]
| Lists the algorithms visible to this client.
Algorithms are returned in lexicographical order.
:rtype: :class:`.Algorithm` iterator | [
"Lists",
"the",
"algorithms",
"visible",
"to",
"this",
"client",
"."
]
| 1082fee8a299010cc44416bbb7518fac0ef08b48 | https://github.com/yamcs/yamcs-python/blob/1082fee8a299010cc44416bbb7518fac0ef08b48/yamcs-client/yamcs/mdb/client.py#L163-L183 | train |
yamcs/yamcs-python | yamcs-client/yamcs/mdb/client.py | MDBClient.get_algorithm | def get_algorithm(self, name):
"""
Gets a single algorithm by its unique name.
:param str name: Either a fully-qualified XTCE name or an alias in the
format ``NAMESPACE/NAME``.
:rtype: .Algorithm
"""
name = adapt_name_for_rest(name)
url = '/mdb/{}/algorithms{}'.format(self._instance, name)
response = self._client.get_proto(url)
message = mdb_pb2.AlgorithmInfo()
message.ParseFromString(response.content)
return Algorithm(message) | python | def get_algorithm(self, name):
"""
Gets a single algorithm by its unique name.
:param str name: Either a fully-qualified XTCE name or an alias in the
format ``NAMESPACE/NAME``.
:rtype: .Algorithm
"""
name = adapt_name_for_rest(name)
url = '/mdb/{}/algorithms{}'.format(self._instance, name)
response = self._client.get_proto(url)
message = mdb_pb2.AlgorithmInfo()
message.ParseFromString(response.content)
return Algorithm(message) | [
"def",
"get_algorithm",
"(",
"self",
",",
"name",
")",
":",
"name",
"=",
"adapt_name_for_rest",
"(",
"name",
")",
"url",
"=",
"'/mdb/{}/algorithms{}'",
".",
"format",
"(",
"self",
".",
"_instance",
",",
"name",
")",
"response",
"=",
"self",
".",
"_client",
".",
"get_proto",
"(",
"url",
")",
"message",
"=",
"mdb_pb2",
".",
"AlgorithmInfo",
"(",
")",
"message",
".",
"ParseFromString",
"(",
"response",
".",
"content",
")",
"return",
"Algorithm",
"(",
"message",
")"
]
| Gets a single algorithm by its unique name.
:param str name: Either a fully-qualified XTCE name or an alias in the
format ``NAMESPACE/NAME``.
:rtype: .Algorithm | [
"Gets",
"a",
"single",
"algorithm",
"by",
"its",
"unique",
"name",
"."
]
| 1082fee8a299010cc44416bbb7518fac0ef08b48 | https://github.com/yamcs/yamcs-python/blob/1082fee8a299010cc44416bbb7518fac0ef08b48/yamcs-client/yamcs/mdb/client.py#L185-L198 | train |
yamcs/yamcs-python | yamcs-client/yamcs/storage/client.py | Client.list_buckets | def list_buckets(self, instance):
"""
List the buckets for an instance.
:param str instance: A Yamcs instance name.
:rtype: ~collections.Iterable[.Bucket]
"""
# Server does not do pagination on listings of this resource.
# Return an iterator anyway for similarity with other API methods
response = self._client.get_proto(path='/buckets/' + instance)
message = rest_pb2.ListBucketsResponse()
message.ParseFromString(response.content)
buckets = getattr(message, 'bucket')
return iter([
Bucket(bucket, instance, self) for bucket in buckets]) | python | def list_buckets(self, instance):
"""
List the buckets for an instance.
:param str instance: A Yamcs instance name.
:rtype: ~collections.Iterable[.Bucket]
"""
# Server does not do pagination on listings of this resource.
# Return an iterator anyway for similarity with other API methods
response = self._client.get_proto(path='/buckets/' + instance)
message = rest_pb2.ListBucketsResponse()
message.ParseFromString(response.content)
buckets = getattr(message, 'bucket')
return iter([
Bucket(bucket, instance, self) for bucket in buckets]) | [
"def",
"list_buckets",
"(",
"self",
",",
"instance",
")",
":",
"# Server does not do pagination on listings of this resource.",
"# Return an iterator anyway for similarity with other API methods",
"response",
"=",
"self",
".",
"_client",
".",
"get_proto",
"(",
"path",
"=",
"'/buckets/'",
"+",
"instance",
")",
"message",
"=",
"rest_pb2",
".",
"ListBucketsResponse",
"(",
")",
"message",
".",
"ParseFromString",
"(",
"response",
".",
"content",
")",
"buckets",
"=",
"getattr",
"(",
"message",
",",
"'bucket'",
")",
"return",
"iter",
"(",
"[",
"Bucket",
"(",
"bucket",
",",
"instance",
",",
"self",
")",
"for",
"bucket",
"in",
"buckets",
"]",
")"
]
| List the buckets for an instance.
:param str instance: A Yamcs instance name.
:rtype: ~collections.Iterable[.Bucket] | [
"List",
"the",
"buckets",
"for",
"an",
"instance",
"."
]
| 1082fee8a299010cc44416bbb7518fac0ef08b48 | https://github.com/yamcs/yamcs-python/blob/1082fee8a299010cc44416bbb7518fac0ef08b48/yamcs-client/yamcs/storage/client.py#L21-L35 | train |
yamcs/yamcs-python | yamcs-client/yamcs/storage/client.py | Client.list_objects | def list_objects(self, instance, bucket_name, prefix=None, delimiter=None):
"""
List the objects for a bucket.
:param str instance: A Yamcs instance name.
:param str bucket_name: The name of the bucket.
:param str prefix: If specified, only objects that start with this
prefix are listed.
:param str delimiter: If specified, return only objects whose name
do not contain the delimiter after the prefix.
For the other objects, the response contains
(in the prefix response parameter) the name
truncated after the delimiter. Duplicates are
omitted.
"""
url = '/buckets/{}/{}'.format(instance, bucket_name)
params = {}
if prefix is not None:
params['prefix'] = prefix
if delimiter is not None:
params['delimiter'] = delimiter
response = self._client.get_proto(path=url, params=params)
message = rest_pb2.ListObjectsResponse()
message.ParseFromString(response.content)
return ObjectListing(message, instance, bucket_name, self) | python | def list_objects(self, instance, bucket_name, prefix=None, delimiter=None):
"""
List the objects for a bucket.
:param str instance: A Yamcs instance name.
:param str bucket_name: The name of the bucket.
:param str prefix: If specified, only objects that start with this
prefix are listed.
:param str delimiter: If specified, return only objects whose name
do not contain the delimiter after the prefix.
For the other objects, the response contains
(in the prefix response parameter) the name
truncated after the delimiter. Duplicates are
omitted.
"""
url = '/buckets/{}/{}'.format(instance, bucket_name)
params = {}
if prefix is not None:
params['prefix'] = prefix
if delimiter is not None:
params['delimiter'] = delimiter
response = self._client.get_proto(path=url, params=params)
message = rest_pb2.ListObjectsResponse()
message.ParseFromString(response.content)
return ObjectListing(message, instance, bucket_name, self) | [
"def",
"list_objects",
"(",
"self",
",",
"instance",
",",
"bucket_name",
",",
"prefix",
"=",
"None",
",",
"delimiter",
"=",
"None",
")",
":",
"url",
"=",
"'/buckets/{}/{}'",
".",
"format",
"(",
"instance",
",",
"bucket_name",
")",
"params",
"=",
"{",
"}",
"if",
"prefix",
"is",
"not",
"None",
":",
"params",
"[",
"'prefix'",
"]",
"=",
"prefix",
"if",
"delimiter",
"is",
"not",
"None",
":",
"params",
"[",
"'delimiter'",
"]",
"=",
"delimiter",
"response",
"=",
"self",
".",
"_client",
".",
"get_proto",
"(",
"path",
"=",
"url",
",",
"params",
"=",
"params",
")",
"message",
"=",
"rest_pb2",
".",
"ListObjectsResponse",
"(",
")",
"message",
".",
"ParseFromString",
"(",
"response",
".",
"content",
")",
"return",
"ObjectListing",
"(",
"message",
",",
"instance",
",",
"bucket_name",
",",
"self",
")"
]
| List the objects for a bucket.
:param str instance: A Yamcs instance name.
:param str bucket_name: The name of the bucket.
:param str prefix: If specified, only objects that start with this
prefix are listed.
:param str delimiter: If specified, return only objects whose name
do not contain the delimiter after the prefix.
For the other objects, the response contains
(in the prefix response parameter) the name
truncated after the delimiter. Duplicates are
omitted. | [
"List",
"the",
"objects",
"for",
"a",
"bucket",
"."
]
| 1082fee8a299010cc44416bbb7518fac0ef08b48 | https://github.com/yamcs/yamcs-python/blob/1082fee8a299010cc44416bbb7518fac0ef08b48/yamcs-client/yamcs/storage/client.py#L37-L61 | train |
yamcs/yamcs-python | yamcs-client/yamcs/storage/client.py | Client.create_bucket | def create_bucket(self, instance, bucket_name):
"""
Create a new bucket in the specified instance.
:param str instance: A Yamcs instance name.
:param str bucket_name: The name of the bucket.
"""
req = rest_pb2.CreateBucketRequest()
req.name = bucket_name
url = '/buckets/{}'.format(instance)
self._client.post_proto(url, data=req.SerializeToString()) | python | def create_bucket(self, instance, bucket_name):
"""
Create a new bucket in the specified instance.
:param str instance: A Yamcs instance name.
:param str bucket_name: The name of the bucket.
"""
req = rest_pb2.CreateBucketRequest()
req.name = bucket_name
url = '/buckets/{}'.format(instance)
self._client.post_proto(url, data=req.SerializeToString()) | [
"def",
"create_bucket",
"(",
"self",
",",
"instance",
",",
"bucket_name",
")",
":",
"req",
"=",
"rest_pb2",
".",
"CreateBucketRequest",
"(",
")",
"req",
".",
"name",
"=",
"bucket_name",
"url",
"=",
"'/buckets/{}'",
".",
"format",
"(",
"instance",
")",
"self",
".",
"_client",
".",
"post_proto",
"(",
"url",
",",
"data",
"=",
"req",
".",
"SerializeToString",
"(",
")",
")"
]
| Create a new bucket in the specified instance.
:param str instance: A Yamcs instance name.
:param str bucket_name: The name of the bucket. | [
"Create",
"a",
"new",
"bucket",
"in",
"the",
"specified",
"instance",
"."
]
| 1082fee8a299010cc44416bbb7518fac0ef08b48 | https://github.com/yamcs/yamcs-python/blob/1082fee8a299010cc44416bbb7518fac0ef08b48/yamcs-client/yamcs/storage/client.py#L63-L73 | train |
yamcs/yamcs-python | yamcs-client/yamcs/storage/client.py | Client.remove_bucket | def remove_bucket(self, instance, bucket_name):
"""
Remove a bucket from the specified instance.
:param str instance: A Yamcs instance name.
:param str bucket_name: The name of the bucket.
"""
url = '/buckets/{}/{}'.format(instance, bucket_name)
self._client.delete_proto(url) | python | def remove_bucket(self, instance, bucket_name):
"""
Remove a bucket from the specified instance.
:param str instance: A Yamcs instance name.
:param str bucket_name: The name of the bucket.
"""
url = '/buckets/{}/{}'.format(instance, bucket_name)
self._client.delete_proto(url) | [
"def",
"remove_bucket",
"(",
"self",
",",
"instance",
",",
"bucket_name",
")",
":",
"url",
"=",
"'/buckets/{}/{}'",
".",
"format",
"(",
"instance",
",",
"bucket_name",
")",
"self",
".",
"_client",
".",
"delete_proto",
"(",
"url",
")"
]
| Remove a bucket from the specified instance.
:param str instance: A Yamcs instance name.
:param str bucket_name: The name of the bucket. | [
"Remove",
"a",
"bucket",
"from",
"the",
"specified",
"instance",
"."
]
| 1082fee8a299010cc44416bbb7518fac0ef08b48 | https://github.com/yamcs/yamcs-python/blob/1082fee8a299010cc44416bbb7518fac0ef08b48/yamcs-client/yamcs/storage/client.py#L75-L83 | train |
yamcs/yamcs-python | yamcs-client/yamcs/storage/client.py | Client.upload_object | def upload_object(self, instance, bucket_name, object_name, file_obj,
content_type=None):
"""
Upload an object to a bucket.
:param str instance: A Yamcs instance name.
:param str bucket_name: The name of the bucket.
:param str object_name: The target name of the object.
:param file file_obj: The file (or file-like object) to upload.
:param str content_type: The content type associated to this object.
This is mainly useful when accessing an object
directly via a web browser. If unspecified, a
content type *may* be automatically derived
from the specified ``file_obj``.
"""
url = '/buckets/{}/{}/{}'.format(instance, bucket_name, object_name)
with open(file_obj, 'rb') as f:
if content_type:
files = {object_name: (object_name, f, content_type)}
else:
files = {object_name: (object_name, f)}
self._client.request(path=url, method='post', files=files) | python | def upload_object(self, instance, bucket_name, object_name, file_obj,
content_type=None):
"""
Upload an object to a bucket.
:param str instance: A Yamcs instance name.
:param str bucket_name: The name of the bucket.
:param str object_name: The target name of the object.
:param file file_obj: The file (or file-like object) to upload.
:param str content_type: The content type associated to this object.
This is mainly useful when accessing an object
directly via a web browser. If unspecified, a
content type *may* be automatically derived
from the specified ``file_obj``.
"""
url = '/buckets/{}/{}/{}'.format(instance, bucket_name, object_name)
with open(file_obj, 'rb') as f:
if content_type:
files = {object_name: (object_name, f, content_type)}
else:
files = {object_name: (object_name, f)}
self._client.request(path=url, method='post', files=files) | [
"def",
"upload_object",
"(",
"self",
",",
"instance",
",",
"bucket_name",
",",
"object_name",
",",
"file_obj",
",",
"content_type",
"=",
"None",
")",
":",
"url",
"=",
"'/buckets/{}/{}/{}'",
".",
"format",
"(",
"instance",
",",
"bucket_name",
",",
"object_name",
")",
"with",
"open",
"(",
"file_obj",
",",
"'rb'",
")",
"as",
"f",
":",
"if",
"content_type",
":",
"files",
"=",
"{",
"object_name",
":",
"(",
"object_name",
",",
"f",
",",
"content_type",
")",
"}",
"else",
":",
"files",
"=",
"{",
"object_name",
":",
"(",
"object_name",
",",
"f",
")",
"}",
"self",
".",
"_client",
".",
"request",
"(",
"path",
"=",
"url",
",",
"method",
"=",
"'post'",
",",
"files",
"=",
"files",
")"
]
| Upload an object to a bucket.
:param str instance: A Yamcs instance name.
:param str bucket_name: The name of the bucket.
:param str object_name: The target name of the object.
:param file file_obj: The file (or file-like object) to upload.
:param str content_type: The content type associated to this object.
This is mainly useful when accessing an object
directly via a web browser. If unspecified, a
content type *may* be automatically derived
from the specified ``file_obj``. | [
"Upload",
"an",
"object",
"to",
"a",
"bucket",
"."
]
| 1082fee8a299010cc44416bbb7518fac0ef08b48 | https://github.com/yamcs/yamcs-python/blob/1082fee8a299010cc44416bbb7518fac0ef08b48/yamcs-client/yamcs/storage/client.py#L97-L118 | train |
yamcs/yamcs-python | yamcs-client/yamcs/storage/client.py | Client.remove_object | def remove_object(self, instance, bucket_name, object_name):
"""
Remove an object from a bucket.
:param str instance: A Yamcs instance name.
:param str bucket_name: The name of the bucket.
:param str object_name: The object to remove.
"""
url = '/buckets/{}/{}/{}'.format(instance, bucket_name, object_name)
self._client.delete_proto(url) | python | def remove_object(self, instance, bucket_name, object_name):
"""
Remove an object from a bucket.
:param str instance: A Yamcs instance name.
:param str bucket_name: The name of the bucket.
:param str object_name: The object to remove.
"""
url = '/buckets/{}/{}/{}'.format(instance, bucket_name, object_name)
self._client.delete_proto(url) | [
"def",
"remove_object",
"(",
"self",
",",
"instance",
",",
"bucket_name",
",",
"object_name",
")",
":",
"url",
"=",
"'/buckets/{}/{}/{}'",
".",
"format",
"(",
"instance",
",",
"bucket_name",
",",
"object_name",
")",
"self",
".",
"_client",
".",
"delete_proto",
"(",
"url",
")"
]
| Remove an object from a bucket.
:param str instance: A Yamcs instance name.
:param str bucket_name: The name of the bucket.
:param str object_name: The object to remove. | [
"Remove",
"an",
"object",
"from",
"a",
"bucket",
"."
]
| 1082fee8a299010cc44416bbb7518fac0ef08b48 | https://github.com/yamcs/yamcs-python/blob/1082fee8a299010cc44416bbb7518fac0ef08b48/yamcs-client/yamcs/storage/client.py#L120-L129 | train |
IS-ENES-Data/esgf-pid | esgfpid/utils/timeutils.py | get_now_utc | def get_now_utc():
''' date in UTC, ISO format'''
# Helper class for UTC time
# Source: http://stackoverflow.com/questions/2331592/datetime-datetime-utcnow-why-no-tzinfo
ZERO = datetime.timedelta(0)
class UTC(datetime.tzinfo):
"""UTC"""
def utcoffset(self, dt):
return ZERO
def tzname(self, dt):
return "UTC"
def dst(self, dt):
return ZERO
#now = datetime.datetime.now(timezone.utc) # Python 3.2
now = datetime.datetime.now(UTC())
return now | python | def get_now_utc():
''' date in UTC, ISO format'''
# Helper class for UTC time
# Source: http://stackoverflow.com/questions/2331592/datetime-datetime-utcnow-why-no-tzinfo
ZERO = datetime.timedelta(0)
class UTC(datetime.tzinfo):
"""UTC"""
def utcoffset(self, dt):
return ZERO
def tzname(self, dt):
return "UTC"
def dst(self, dt):
return ZERO
#now = datetime.datetime.now(timezone.utc) # Python 3.2
now = datetime.datetime.now(UTC())
return now | [
"def",
"get_now_utc",
"(",
")",
":",
"# Helper class for UTC time",
"# Source: http://stackoverflow.com/questions/2331592/datetime-datetime-utcnow-why-no-tzinfo",
"ZERO",
"=",
"datetime",
".",
"timedelta",
"(",
"0",
")",
"class",
"UTC",
"(",
"datetime",
".",
"tzinfo",
")",
":",
"\"\"\"UTC\"\"\"",
"def",
"utcoffset",
"(",
"self",
",",
"dt",
")",
":",
"return",
"ZERO",
"def",
"tzname",
"(",
"self",
",",
"dt",
")",
":",
"return",
"\"UTC\"",
"def",
"dst",
"(",
"self",
",",
"dt",
")",
":",
"return",
"ZERO",
"#now = datetime.datetime.now(timezone.utc) # Python 3.2",
"now",
"=",
"datetime",
".",
"datetime",
".",
"now",
"(",
"UTC",
"(",
")",
")",
"return",
"now"
]
| date in UTC, ISO format | [
"date",
"in",
"UTC",
"ISO",
"format"
]
| 2f4909bb3ff79c0b6ed2932e0dd8b3bb6aec5e41 | https://github.com/IS-ENES-Data/esgf-pid/blob/2f4909bb3ff79c0b6ed2932e0dd8b3bb6aec5e41/esgfpid/utils/timeutils.py#L10-L27 | train |
sirfoga/pyhal | hal/data/linked_list.py | LinkedList.get | def get(self, position):
"""Gets value at index
:param position: index
:return: value at position
"""
counter = 0
current_node = self.head
while current_node is not None and counter <= position:
if counter == position:
return current_node.val
current_node = current_node.next_node
counter += 1
return None | python | def get(self, position):
"""Gets value at index
:param position: index
:return: value at position
"""
counter = 0
current_node = self.head
while current_node is not None and counter <= position:
if counter == position:
return current_node.val
current_node = current_node.next_node
counter += 1
return None | [
"def",
"get",
"(",
"self",
",",
"position",
")",
":",
"counter",
"=",
"0",
"current_node",
"=",
"self",
".",
"head",
"while",
"current_node",
"is",
"not",
"None",
"and",
"counter",
"<=",
"position",
":",
"if",
"counter",
"==",
"position",
":",
"return",
"current_node",
".",
"val",
"current_node",
"=",
"current_node",
".",
"next_node",
"counter",
"+=",
"1",
"return",
"None"
]
| Gets value at index
:param position: index
:return: value at position | [
"Gets",
"value",
"at",
"index"
]
| 4394d8a1f7e45bea28a255ec390f4962ee64d33a | https://github.com/sirfoga/pyhal/blob/4394d8a1f7e45bea28a255ec390f4962ee64d33a/hal/data/linked_list.py#L36-L53 | train |
sirfoga/pyhal | hal/data/linked_list.py | LinkedList.insert_first | def insert_first(self, val):
"""Insert in head
:param val: Object to insert
:return: True iff insertion completed successfully
"""
self.head = Node(val, next_node=self.head)
return True | python | def insert_first(self, val):
"""Insert in head
:param val: Object to insert
:return: True iff insertion completed successfully
"""
self.head = Node(val, next_node=self.head)
return True | [
"def",
"insert_first",
"(",
"self",
",",
"val",
")",
":",
"self",
".",
"head",
"=",
"Node",
"(",
"val",
",",
"next_node",
"=",
"self",
".",
"head",
")",
"return",
"True"
]
| Insert in head
:param val: Object to insert
:return: True iff insertion completed successfully | [
"Insert",
"in",
"head"
]
| 4394d8a1f7e45bea28a255ec390f4962ee64d33a | https://github.com/sirfoga/pyhal/blob/4394d8a1f7e45bea28a255ec390f4962ee64d33a/hal/data/linked_list.py#L91-L99 | train |
sirfoga/pyhal | hal/data/linked_list.py | LinkedList.insert | def insert(self, val, position=0):
"""Insert in position
:param val: Object to insert
:param position: Index of insertion
:return: bool: True iff insertion completed successfully
"""
if position <= 0: # at beginning
return self.insert_first(val)
counter = 0
last_node = self.head
current_node = self.head
while current_node is not None and counter <= position:
if counter == position:
last_node.next_node = Node(val, current_node)
return True
last_node = current_node
current_node = current_node.next_node
counter += 1
if current_node is None: # append to last element
last_node.next_node = Node(val, None)
return True | python | def insert(self, val, position=0):
"""Insert in position
:param val: Object to insert
:param position: Index of insertion
:return: bool: True iff insertion completed successfully
"""
if position <= 0: # at beginning
return self.insert_first(val)
counter = 0
last_node = self.head
current_node = self.head
while current_node is not None and counter <= position:
if counter == position:
last_node.next_node = Node(val, current_node)
return True
last_node = current_node
current_node = current_node.next_node
counter += 1
if current_node is None: # append to last element
last_node.next_node = Node(val, None)
return True | [
"def",
"insert",
"(",
"self",
",",
"val",
",",
"position",
"=",
"0",
")",
":",
"if",
"position",
"<=",
"0",
":",
"# at beginning",
"return",
"self",
".",
"insert_first",
"(",
"val",
")",
"counter",
"=",
"0",
"last_node",
"=",
"self",
".",
"head",
"current_node",
"=",
"self",
".",
"head",
"while",
"current_node",
"is",
"not",
"None",
"and",
"counter",
"<=",
"position",
":",
"if",
"counter",
"==",
"position",
":",
"last_node",
".",
"next_node",
"=",
"Node",
"(",
"val",
",",
"current_node",
")",
"return",
"True",
"last_node",
"=",
"current_node",
"current_node",
"=",
"current_node",
".",
"next_node",
"counter",
"+=",
"1",
"if",
"current_node",
"is",
"None",
":",
"# append to last element",
"last_node",
".",
"next_node",
"=",
"Node",
"(",
"val",
",",
"None",
")",
"return",
"True"
]
| Insert in position
:param val: Object to insert
:param position: Index of insertion
:return: bool: True iff insertion completed successfully | [
"Insert",
"in",
"position"
]
| 4394d8a1f7e45bea28a255ec390f4962ee64d33a | https://github.com/sirfoga/pyhal/blob/4394d8a1f7e45bea28a255ec390f4962ee64d33a/hal/data/linked_list.py#L101-L127 | train |
sirfoga/pyhal | hal/data/linked_list.py | LinkedList.remove | def remove(self, position):
"""Removes at index
:param position: Index of removal
:return: bool: True iff removal completed successfully
"""
if position <= 0: # at beginning
return self.remove_first()
if position >= self.length() - 1: # at end
return self.remove_last()
counter = 0
last_node = self.head
current_node = self.head
while current_node is not None and counter <= position:
if counter == position:
last_node.next_node = current_node.next_node # remove current
return True
last_node = current_node
current_node = current_node.next_node
counter += 1
return False | python | def remove(self, position):
"""Removes at index
:param position: Index of removal
:return: bool: True iff removal completed successfully
"""
if position <= 0: # at beginning
return self.remove_first()
if position >= self.length() - 1: # at end
return self.remove_last()
counter = 0
last_node = self.head
current_node = self.head
while current_node is not None and counter <= position:
if counter == position:
last_node.next_node = current_node.next_node # remove current
return True
last_node = current_node
current_node = current_node.next_node
counter += 1
return False | [
"def",
"remove",
"(",
"self",
",",
"position",
")",
":",
"if",
"position",
"<=",
"0",
":",
"# at beginning",
"return",
"self",
".",
"remove_first",
"(",
")",
"if",
"position",
">=",
"self",
".",
"length",
"(",
")",
"-",
"1",
":",
"# at end",
"return",
"self",
".",
"remove_last",
"(",
")",
"counter",
"=",
"0",
"last_node",
"=",
"self",
".",
"head",
"current_node",
"=",
"self",
".",
"head",
"while",
"current_node",
"is",
"not",
"None",
"and",
"counter",
"<=",
"position",
":",
"if",
"counter",
"==",
"position",
":",
"last_node",
".",
"next_node",
"=",
"current_node",
".",
"next_node",
"# remove current",
"return",
"True",
"last_node",
"=",
"current_node",
"current_node",
"=",
"current_node",
".",
"next_node",
"counter",
"+=",
"1",
"return",
"False"
]
| Removes at index
:param position: Index of removal
:return: bool: True iff removal completed successfully | [
"Removes",
"at",
"index"
]
| 4394d8a1f7e45bea28a255ec390f4962ee64d33a | https://github.com/sirfoga/pyhal/blob/4394d8a1f7e45bea28a255ec390f4962ee64d33a/hal/data/linked_list.py#L165-L190 | train |
sirfoga/pyhal | hal/data/linked_list.py | LinkedList.to_lst | def to_lst(self):
"""Cycle all items and puts them in a list
:return: list representation
"""
out = []
node = self.head
while node is not None:
out.append(node.val)
node = node.next_node
return out | python | def to_lst(self):
"""Cycle all items and puts them in a list
:return: list representation
"""
out = []
node = self.head
while node is not None:
out.append(node.val)
node = node.next_node
return out | [
"def",
"to_lst",
"(",
"self",
")",
":",
"out",
"=",
"[",
"]",
"node",
"=",
"self",
".",
"head",
"while",
"node",
"is",
"not",
"None",
":",
"out",
".",
"append",
"(",
"node",
".",
"val",
")",
"node",
"=",
"node",
".",
"next_node",
"return",
"out"
]
| Cycle all items and puts them in a list
:return: list representation | [
"Cycle",
"all",
"items",
"and",
"puts",
"them",
"in",
"a",
"list"
]
| 4394d8a1f7e45bea28a255ec390f4962ee64d33a | https://github.com/sirfoga/pyhal/blob/4394d8a1f7e45bea28a255ec390f4962ee64d33a/hal/data/linked_list.py#L192-L204 | train |
sirfoga/pyhal | hal/data/linked_list.py | LinkedList.execute | def execute(self, func, *args, **kwargs):
"""Executes function on each item
:param func: Function to execute on each item
:param args: args of function
:param kwargs: extra args of function
:return: list: Results of calling the function on each item
"""
return [
func(item, *args, **kwargs) for item in self.to_lst()
] | python | def execute(self, func, *args, **kwargs):
"""Executes function on each item
:param func: Function to execute on each item
:param args: args of function
:param kwargs: extra args of function
:return: list: Results of calling the function on each item
"""
return [
func(item, *args, **kwargs) for item in self.to_lst()
] | [
"def",
"execute",
"(",
"self",
",",
"func",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"return",
"[",
"func",
"(",
"item",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
"for",
"item",
"in",
"self",
".",
"to_lst",
"(",
")",
"]"
]
| Executes function on each item
:param func: Function to execute on each item
:param args: args of function
:param kwargs: extra args of function
:return: list: Results of calling the function on each item | [
"Executes",
"function",
"on",
"each",
"item"
]
| 4394d8a1f7e45bea28a255ec390f4962ee64d33a | https://github.com/sirfoga/pyhal/blob/4394d8a1f7e45bea28a255ec390f4962ee64d33a/hal/data/linked_list.py#L206-L216 | train |
loganasherjones/yapconf | yapconf/items.py | from_specification | def from_specification(specification, env_prefix=None, separator='.',
parent_names=None):
"""Used to create YapconfItems from a specification dictionary.
Args:
specification (dict): The specification used to
initialize ``YapconfSpec``
env_prefix (str): Prefix to add to environment names
separator (str): Separator for nested items
parent_names (list): Parents names of any given item
Returns:
A dictionary of names to YapconfItems
"""
items = {}
for item_name, item_info in six.iteritems(specification):
names = copy.copy(parent_names) if parent_names else []
items[item_name] = _generate_item(item_name,
item_info,
env_prefix,
separator,
names)
return items | python | def from_specification(specification, env_prefix=None, separator='.',
parent_names=None):
"""Used to create YapconfItems from a specification dictionary.
Args:
specification (dict): The specification used to
initialize ``YapconfSpec``
env_prefix (str): Prefix to add to environment names
separator (str): Separator for nested items
parent_names (list): Parents names of any given item
Returns:
A dictionary of names to YapconfItems
"""
items = {}
for item_name, item_info in six.iteritems(specification):
names = copy.copy(parent_names) if parent_names else []
items[item_name] = _generate_item(item_name,
item_info,
env_prefix,
separator,
names)
return items | [
"def",
"from_specification",
"(",
"specification",
",",
"env_prefix",
"=",
"None",
",",
"separator",
"=",
"'.'",
",",
"parent_names",
"=",
"None",
")",
":",
"items",
"=",
"{",
"}",
"for",
"item_name",
",",
"item_info",
"in",
"six",
".",
"iteritems",
"(",
"specification",
")",
":",
"names",
"=",
"copy",
".",
"copy",
"(",
"parent_names",
")",
"if",
"parent_names",
"else",
"[",
"]",
"items",
"[",
"item_name",
"]",
"=",
"_generate_item",
"(",
"item_name",
",",
"item_info",
",",
"env_prefix",
",",
"separator",
",",
"names",
")",
"return",
"items"
]
| Used to create YapconfItems from a specification dictionary.
Args:
specification (dict): The specification used to
initialize ``YapconfSpec``
env_prefix (str): Prefix to add to environment names
separator (str): Separator for nested items
parent_names (list): Parents names of any given item
Returns:
A dictionary of names to YapconfItems | [
"Used",
"to",
"create",
"YapconfItems",
"from",
"a",
"specification",
"dictionary",
"."
]
| d2970e6e7e3334615d4d978d8b0ca33006d79d16 | https://github.com/loganasherjones/yapconf/blob/d2970e6e7e3334615d4d978d8b0ca33006d79d16/yapconf/items.py#L23-L46 | train |
loganasherjones/yapconf | yapconf/items.py | YapconfItem.update_default | def update_default(self, new_default, respect_none=False):
"""Update our current default with the new_default.
Args:
new_default: New default to set.
respect_none: Flag to determine if ``None`` is a valid value.
"""
if new_default is not None:
self.default = new_default
elif new_default is None and respect_none:
self.default = None | python | def update_default(self, new_default, respect_none=False):
"""Update our current default with the new_default.
Args:
new_default: New default to set.
respect_none: Flag to determine if ``None`` is a valid value.
"""
if new_default is not None:
self.default = new_default
elif new_default is None and respect_none:
self.default = None | [
"def",
"update_default",
"(",
"self",
",",
"new_default",
",",
"respect_none",
"=",
"False",
")",
":",
"if",
"new_default",
"is",
"not",
"None",
":",
"self",
".",
"default",
"=",
"new_default",
"elif",
"new_default",
"is",
"None",
"and",
"respect_none",
":",
"self",
".",
"default",
"=",
"None"
]
| Update our current default with the new_default.
Args:
new_default: New default to set.
respect_none: Flag to determine if ``None`` is a valid value. | [
"Update",
"our",
"current",
"default",
"with",
"the",
"new_default",
"."
]
| d2970e6e7e3334615d4d978d8b0ca33006d79d16 | https://github.com/loganasherjones/yapconf/blob/d2970e6e7e3334615d4d978d8b0ca33006d79d16/yapconf/items.py#L243-L254 | train |
loganasherjones/yapconf | yapconf/items.py | YapconfItem.migrate_config | def migrate_config(self, current_config, config_to_migrate,
always_update, update_defaults):
"""Migrate config value in current_config, updating config_to_migrate.
Given the current_config object, it will attempt to find a value
based on all the names given. If no name could be found, then it
will simply set the value to the default.
If a value is found and is in the list of previous_defaults, it will
either update or keep the old value based on if update_defaults is
set.
If a non-default value is set it will either keep this value or update
it based on if ``always_update`` is true.
Args:
current_config (dict): Current configuration.
config_to_migrate (dict): Config to update.
always_update (bool): Always update value.
update_defaults (bool): Update values found in previous_defaults
"""
value = self._search_config_for_possible_names(current_config)
self._update_config(config_to_migrate, value,
always_update, update_defaults) | python | def migrate_config(self, current_config, config_to_migrate,
always_update, update_defaults):
"""Migrate config value in current_config, updating config_to_migrate.
Given the current_config object, it will attempt to find a value
based on all the names given. If no name could be found, then it
will simply set the value to the default.
If a value is found and is in the list of previous_defaults, it will
either update or keep the old value based on if update_defaults is
set.
If a non-default value is set it will either keep this value or update
it based on if ``always_update`` is true.
Args:
current_config (dict): Current configuration.
config_to_migrate (dict): Config to update.
always_update (bool): Always update value.
update_defaults (bool): Update values found in previous_defaults
"""
value = self._search_config_for_possible_names(current_config)
self._update_config(config_to_migrate, value,
always_update, update_defaults) | [
"def",
"migrate_config",
"(",
"self",
",",
"current_config",
",",
"config_to_migrate",
",",
"always_update",
",",
"update_defaults",
")",
":",
"value",
"=",
"self",
".",
"_search_config_for_possible_names",
"(",
"current_config",
")",
"self",
".",
"_update_config",
"(",
"config_to_migrate",
",",
"value",
",",
"always_update",
",",
"update_defaults",
")"
]
| Migrate config value in current_config, updating config_to_migrate.
Given the current_config object, it will attempt to find a value
based on all the names given. If no name could be found, then it
will simply set the value to the default.
If a value is found and is in the list of previous_defaults, it will
either update or keep the old value based on if update_defaults is
set.
If a non-default value is set it will either keep this value or update
it based on if ``always_update`` is true.
Args:
current_config (dict): Current configuration.
config_to_migrate (dict): Config to update.
always_update (bool): Always update value.
update_defaults (bool): Update values found in previous_defaults | [
"Migrate",
"config",
"value",
"in",
"current_config",
"updating",
"config_to_migrate",
"."
]
| d2970e6e7e3334615d4d978d8b0ca33006d79d16 | https://github.com/loganasherjones/yapconf/blob/d2970e6e7e3334615d4d978d8b0ca33006d79d16/yapconf/items.py#L256-L279 | train |
loganasherjones/yapconf | yapconf/items.py | YapconfItem.add_argument | def add_argument(self, parser, bootstrap=False):
"""Add this item as an argument to the given parser.
Args:
parser (argparse.ArgumentParser): The parser to add this item to.
bootstrap: Flag to indicate whether you only want to mark this
item as required or not
"""
if self.cli_expose:
args = self._get_argparse_names(parser.prefix_chars)
kwargs = self._get_argparse_kwargs(bootstrap)
parser.add_argument(*args, **kwargs) | python | def add_argument(self, parser, bootstrap=False):
"""Add this item as an argument to the given parser.
Args:
parser (argparse.ArgumentParser): The parser to add this item to.
bootstrap: Flag to indicate whether you only want to mark this
item as required or not
"""
if self.cli_expose:
args = self._get_argparse_names(parser.prefix_chars)
kwargs = self._get_argparse_kwargs(bootstrap)
parser.add_argument(*args, **kwargs) | [
"def",
"add_argument",
"(",
"self",
",",
"parser",
",",
"bootstrap",
"=",
"False",
")",
":",
"if",
"self",
".",
"cli_expose",
":",
"args",
"=",
"self",
".",
"_get_argparse_names",
"(",
"parser",
".",
"prefix_chars",
")",
"kwargs",
"=",
"self",
".",
"_get_argparse_kwargs",
"(",
"bootstrap",
")",
"parser",
".",
"add_argument",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")"
]
| Add this item as an argument to the given parser.
Args:
parser (argparse.ArgumentParser): The parser to add this item to.
bootstrap: Flag to indicate whether you only want to mark this
item as required or not | [
"Add",
"this",
"item",
"as",
"an",
"argument",
"to",
"the",
"given",
"parser",
"."
]
| d2970e6e7e3334615d4d978d8b0ca33006d79d16 | https://github.com/loganasherjones/yapconf/blob/d2970e6e7e3334615d4d978d8b0ca33006d79d16/yapconf/items.py#L281-L292 | train |
loganasherjones/yapconf | yapconf/items.py | YapconfItem.get_config_value | def get_config_value(self, overrides, skip_environment=False):
"""Get the configuration value from all overrides.
Iterates over all overrides given to see if a value can be pulled
out from them. It will convert each of these values to ensure they
are the correct type.
Args:
overrides: A list of tuples where each tuple is a label and a
dictionary representing a configuration.
skip_environment: Skip looking through the environment.
Returns:
The converted configuration value.
Raises:
YapconfItemNotFound: If an item is required but could not be found
in the configuration.
YapconfItemError: If a possible value was found but the type
cannot be determined.
YapconfValueError: If a possible value is found but during
conversion, an exception was raised.
"""
label, override, key = self._search_overrides(
overrides, skip_environment
)
if override is None and self.default is None and self.required:
raise YapconfItemNotFound(
'Could not find config value for {0}'.format(self.fq_name),
self
)
if override is None:
self.logger.debug(
'Config value not found for {0}, falling back to default.'
.format(self.name)
)
value = self.default
else:
value = override[key]
if value is None:
return value
converted_value = self.convert_config_value(value, label)
self._validate_value(converted_value)
return converted_value | python | def get_config_value(self, overrides, skip_environment=False):
"""Get the configuration value from all overrides.
Iterates over all overrides given to see if a value can be pulled
out from them. It will convert each of these values to ensure they
are the correct type.
Args:
overrides: A list of tuples where each tuple is a label and a
dictionary representing a configuration.
skip_environment: Skip looking through the environment.
Returns:
The converted configuration value.
Raises:
YapconfItemNotFound: If an item is required but could not be found
in the configuration.
YapconfItemError: If a possible value was found but the type
cannot be determined.
YapconfValueError: If a possible value is found but during
conversion, an exception was raised.
"""
label, override, key = self._search_overrides(
overrides, skip_environment
)
if override is None and self.default is None and self.required:
raise YapconfItemNotFound(
'Could not find config value for {0}'.format(self.fq_name),
self
)
if override is None:
self.logger.debug(
'Config value not found for {0}, falling back to default.'
.format(self.name)
)
value = self.default
else:
value = override[key]
if value is None:
return value
converted_value = self.convert_config_value(value, label)
self._validate_value(converted_value)
return converted_value | [
"def",
"get_config_value",
"(",
"self",
",",
"overrides",
",",
"skip_environment",
"=",
"False",
")",
":",
"label",
",",
"override",
",",
"key",
"=",
"self",
".",
"_search_overrides",
"(",
"overrides",
",",
"skip_environment",
")",
"if",
"override",
"is",
"None",
"and",
"self",
".",
"default",
"is",
"None",
"and",
"self",
".",
"required",
":",
"raise",
"YapconfItemNotFound",
"(",
"'Could not find config value for {0}'",
".",
"format",
"(",
"self",
".",
"fq_name",
")",
",",
"self",
")",
"if",
"override",
"is",
"None",
":",
"self",
".",
"logger",
".",
"debug",
"(",
"'Config value not found for {0}, falling back to default.'",
".",
"format",
"(",
"self",
".",
"name",
")",
")",
"value",
"=",
"self",
".",
"default",
"else",
":",
"value",
"=",
"override",
"[",
"key",
"]",
"if",
"value",
"is",
"None",
":",
"return",
"value",
"converted_value",
"=",
"self",
".",
"convert_config_value",
"(",
"value",
",",
"label",
")",
"self",
".",
"_validate_value",
"(",
"converted_value",
")",
"return",
"converted_value"
]
| Get the configuration value from all overrides.
Iterates over all overrides given to see if a value can be pulled
out from them. It will convert each of these values to ensure they
are the correct type.
Args:
overrides: A list of tuples where each tuple is a label and a
dictionary representing a configuration.
skip_environment: Skip looking through the environment.
Returns:
The converted configuration value.
Raises:
YapconfItemNotFound: If an item is required but could not be found
in the configuration.
YapconfItemError: If a possible value was found but the type
cannot be determined.
YapconfValueError: If a possible value is found but during
conversion, an exception was raised. | [
"Get",
"the",
"configuration",
"value",
"from",
"all",
"overrides",
"."
]
| d2970e6e7e3334615d4d978d8b0ca33006d79d16 | https://github.com/loganasherjones/yapconf/blob/d2970e6e7e3334615d4d978d8b0ca33006d79d16/yapconf/items.py#L294-L342 | train |
loganasherjones/yapconf | yapconf/items.py | YapconfBoolItem.add_argument | def add_argument(self, parser, bootstrap=False):
"""Add boolean item as an argument to the given parser.
An exclusive group is created on the parser, which will add
a boolean-style command line argument to the parser.
Examples:
A non-nested boolean value with the name 'debug' will result
in a command-line argument like the following:
'--debug/--no-debug'
Args:
parser (argparse.ArgumentParser): The parser to add this item to.
bootstrap (bool): Flag to indicate whether you only want to mark
this item as required or not.
"""
tmp_default = self.default
exclusive_grp = parser.add_mutually_exclusive_group()
self.default = True
args = self._get_argparse_names(parser.prefix_chars)
kwargs = self._get_argparse_kwargs(bootstrap)
exclusive_grp.add_argument(*args, **kwargs)
self.default = False
args = self._get_argparse_names(parser.prefix_chars)
kwargs = self._get_argparse_kwargs(bootstrap)
exclusive_grp.add_argument(*args, **kwargs)
self.default = tmp_default | python | def add_argument(self, parser, bootstrap=False):
"""Add boolean item as an argument to the given parser.
An exclusive group is created on the parser, which will add
a boolean-style command line argument to the parser.
Examples:
A non-nested boolean value with the name 'debug' will result
in a command-line argument like the following:
'--debug/--no-debug'
Args:
parser (argparse.ArgumentParser): The parser to add this item to.
bootstrap (bool): Flag to indicate whether you only want to mark
this item as required or not.
"""
tmp_default = self.default
exclusive_grp = parser.add_mutually_exclusive_group()
self.default = True
args = self._get_argparse_names(parser.prefix_chars)
kwargs = self._get_argparse_kwargs(bootstrap)
exclusive_grp.add_argument(*args, **kwargs)
self.default = False
args = self._get_argparse_names(parser.prefix_chars)
kwargs = self._get_argparse_kwargs(bootstrap)
exclusive_grp.add_argument(*args, **kwargs)
self.default = tmp_default | [
"def",
"add_argument",
"(",
"self",
",",
"parser",
",",
"bootstrap",
"=",
"False",
")",
":",
"tmp_default",
"=",
"self",
".",
"default",
"exclusive_grp",
"=",
"parser",
".",
"add_mutually_exclusive_group",
"(",
")",
"self",
".",
"default",
"=",
"True",
"args",
"=",
"self",
".",
"_get_argparse_names",
"(",
"parser",
".",
"prefix_chars",
")",
"kwargs",
"=",
"self",
".",
"_get_argparse_kwargs",
"(",
"bootstrap",
")",
"exclusive_grp",
".",
"add_argument",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
"self",
".",
"default",
"=",
"False",
"args",
"=",
"self",
".",
"_get_argparse_names",
"(",
"parser",
".",
"prefix_chars",
")",
"kwargs",
"=",
"self",
".",
"_get_argparse_kwargs",
"(",
"bootstrap",
")",
"exclusive_grp",
".",
"add_argument",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
"self",
".",
"default",
"=",
"tmp_default"
]
| Add boolean item as an argument to the given parser.
An exclusive group is created on the parser, which will add
a boolean-style command line argument to the parser.
Examples:
A non-nested boolean value with the name 'debug' will result
in a command-line argument like the following:
'--debug/--no-debug'
Args:
parser (argparse.ArgumentParser): The parser to add this item to.
bootstrap (bool): Flag to indicate whether you only want to mark
this item as required or not. | [
"Add",
"boolean",
"item",
"as",
"an",
"argument",
"to",
"the",
"given",
"parser",
"."
]
| d2970e6e7e3334615d4d978d8b0ca33006d79d16 | https://github.com/loganasherjones/yapconf/blob/d2970e6e7e3334615d4d978d8b0ca33006d79d16/yapconf/items.py#L591-L620 | train |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.