repo
stringlengths 7
55
| path
stringlengths 4
127
| func_name
stringlengths 1
88
| original_string
stringlengths 75
19.8k
| language
stringclasses 1
value | code
stringlengths 75
19.8k
| code_tokens
list | docstring
stringlengths 3
17.3k
| docstring_tokens
list | sha
stringlengths 40
40
| url
stringlengths 87
242
| partition
stringclasses 1
value |
---|---|---|---|---|---|---|---|---|---|---|---|
Kortemme-Lab/klab | klab/biblio/doi.py | DOI.get_info | def get_info(self):
'Retrieve the data from CrossRef.'
escaped_doi = urllib2.quote(self.doi, '')
html = get_resource("www.crossref.org", '/guestquery?queryType=doi&restype=unixref&doi=%s&doi_search=Search' % escaped_doi)
xml_matches = []
for m in re.finditer('(<doi_records>.*?</doi_records>)', html, re.DOTALL):
xml_matches.append(m.group(0))
if len(xml_matches) == 0:
raise DOIRetrievalException('No matches found for the DOI "%s".' % self.doi)
elif len(xml_matches) == 1:
return xml_matches[0]
else:
raise DOIRetrievalException('Multiple (%d) matches found for the DOI "%s".' % (len(xml_matches), self.doi)) | python | def get_info(self):
'Retrieve the data from CrossRef.'
escaped_doi = urllib2.quote(self.doi, '')
html = get_resource("www.crossref.org", '/guestquery?queryType=doi&restype=unixref&doi=%s&doi_search=Search' % escaped_doi)
xml_matches = []
for m in re.finditer('(<doi_records>.*?</doi_records>)', html, re.DOTALL):
xml_matches.append(m.group(0))
if len(xml_matches) == 0:
raise DOIRetrievalException('No matches found for the DOI "%s".' % self.doi)
elif len(xml_matches) == 1:
return xml_matches[0]
else:
raise DOIRetrievalException('Multiple (%d) matches found for the DOI "%s".' % (len(xml_matches), self.doi)) | [
"def",
"get_info",
"(",
"self",
")",
":",
"escaped_doi",
"=",
"urllib2",
".",
"quote",
"(",
"self",
".",
"doi",
",",
"''",
")",
"html",
"=",
"get_resource",
"(",
"\"www.crossref.org\"",
",",
"'/guestquery?queryType=doi&restype=unixref&doi=%s&doi_search=Search'",
"%",
"escaped_doi",
")",
"xml_matches",
"=",
"[",
"]",
"for",
"m",
"in",
"re",
".",
"finditer",
"(",
"'(<doi_records>.*?</doi_records>)'",
",",
"html",
",",
"re",
".",
"DOTALL",
")",
":",
"xml_matches",
".",
"append",
"(",
"m",
".",
"group",
"(",
"0",
")",
")",
"if",
"len",
"(",
"xml_matches",
")",
"==",
"0",
":",
"raise",
"DOIRetrievalException",
"(",
"'No matches found for the DOI \"%s\".'",
"%",
"self",
".",
"doi",
")",
"elif",
"len",
"(",
"xml_matches",
")",
"==",
"1",
":",
"return",
"xml_matches",
"[",
"0",
"]",
"else",
":",
"raise",
"DOIRetrievalException",
"(",
"'Multiple (%d) matches found for the DOI \"%s\".'",
"%",
"(",
"len",
"(",
"xml_matches",
")",
",",
"self",
".",
"doi",
")",
")"
]
| Retrieve the data from CrossRef. | [
"Retrieve",
"the",
"data",
"from",
"CrossRef",
"."
]
| 6d410ad08f1bd9f7cbbb28d7d946e94fbaaa2b6b | https://github.com/Kortemme-Lab/klab/blob/6d410ad08f1bd9f7cbbb28d7d946e94fbaaa2b6b/klab/biblio/doi.py#L103-L117 | train |
Kortemme-Lab/klab | klab/bio/spackle.py | Spackler.add_backbone_atoms_linearly_from_loop_filepaths | def add_backbone_atoms_linearly_from_loop_filepaths(self, loop_json_filepath, fasta_filepath, residue_ids):
'''A utility wrapper around add_backbone_atoms_linearly. Adds backbone atoms in a straight line from the first to
the last residue of residue_ids.
loop_json_filepath is a path to a JSON file using the JSON format for Rosetta loops files. This file identifies
the insertion points of the sequence.
fasta_filepath is a path to a FASTA file with one sequence. This sequence will be used as the sequence for
the inserted residues (between the start and stop residues defined in loop_json_filepath).
residue_ids is a list of PDB chain residues (columns 22-27 of ATOM lines in the PDB format). It is assumed that
they are sequential although the logic does not depend on that. This list should have the length length as the
sequence identified in the FASTA file.
'''
# Parse the loop file
loop_def = json.loads(read_file(loop_json_filepath))
assert(len(loop_def['LoopSet']) == 1)
start_res = loop_def['LoopSet'][0]['start']
end_res = loop_def['LoopSet'][0]['stop']
start_res = PDB.ChainResidueID2String(start_res['chainID'], (str(start_res['resSeq']) + start_res['iCode']).strip())
end_res = PDB.ChainResidueID2String(end_res['chainID'], (str(end_res['resSeq']) + end_res['iCode']).strip())
assert(start_res in residue_ids)
assert(end_res in residue_ids)
# Parse the FASTA file and extract the sequence
f = FASTA(read_file(fasta_filepath), strict = False)
assert(len(f.get_sequences()) == 1)
insertion_sequence = f.sequences[0][2]
if not len(residue_ids) == len(insertion_sequence):
raise Exception('The sequence in the FASTA file must have the same length as the list of residues.')
# Create the insertion sequence (a sub-sequence of the FASTA sequence)
# The post-condition is that the start and end residues are the first and last elements of kept_residues respectively
kept_residues = []
insertion_residue_map = {}
in_section = False
found_end = False
for x in range(len(residue_ids)):
residue_id = residue_ids[x]
if residue_id == start_res:
in_section = True
if in_section:
kept_residues.append(residue_id)
insertion_residue_map[residue_id] = insertion_sequence[x]
if residue_id == end_res:
found_end = True
break
if not kept_residues:
raise Exception('The insertion sequence is empty (check the start and end residue ids).')
if not found_end:
raise Exception('The end residue was not encountered when iterating over the insertion sequence (check the start and end residue ids).')
# Identify the start and end Residue objects
try:
start_res = self.residues[start_res[0]][start_res[1:]]
end_res = self.residues[end_res[0]][end_res[1:]]
except Exception, e:
raise Exception('The start or end residue could not be found in the PDB file.')
return self.add_backbone_atoms_linearly(start_res, end_res, kept_residues, insertion_residue_map) | python | def add_backbone_atoms_linearly_from_loop_filepaths(self, loop_json_filepath, fasta_filepath, residue_ids):
'''A utility wrapper around add_backbone_atoms_linearly. Adds backbone atoms in a straight line from the first to
the last residue of residue_ids.
loop_json_filepath is a path to a JSON file using the JSON format for Rosetta loops files. This file identifies
the insertion points of the sequence.
fasta_filepath is a path to a FASTA file with one sequence. This sequence will be used as the sequence for
the inserted residues (between the start and stop residues defined in loop_json_filepath).
residue_ids is a list of PDB chain residues (columns 22-27 of ATOM lines in the PDB format). It is assumed that
they are sequential although the logic does not depend on that. This list should have the length length as the
sequence identified in the FASTA file.
'''
# Parse the loop file
loop_def = json.loads(read_file(loop_json_filepath))
assert(len(loop_def['LoopSet']) == 1)
start_res = loop_def['LoopSet'][0]['start']
end_res = loop_def['LoopSet'][0]['stop']
start_res = PDB.ChainResidueID2String(start_res['chainID'], (str(start_res['resSeq']) + start_res['iCode']).strip())
end_res = PDB.ChainResidueID2String(end_res['chainID'], (str(end_res['resSeq']) + end_res['iCode']).strip())
assert(start_res in residue_ids)
assert(end_res in residue_ids)
# Parse the FASTA file and extract the sequence
f = FASTA(read_file(fasta_filepath), strict = False)
assert(len(f.get_sequences()) == 1)
insertion_sequence = f.sequences[0][2]
if not len(residue_ids) == len(insertion_sequence):
raise Exception('The sequence in the FASTA file must have the same length as the list of residues.')
# Create the insertion sequence (a sub-sequence of the FASTA sequence)
# The post-condition is that the start and end residues are the first and last elements of kept_residues respectively
kept_residues = []
insertion_residue_map = {}
in_section = False
found_end = False
for x in range(len(residue_ids)):
residue_id = residue_ids[x]
if residue_id == start_res:
in_section = True
if in_section:
kept_residues.append(residue_id)
insertion_residue_map[residue_id] = insertion_sequence[x]
if residue_id == end_res:
found_end = True
break
if not kept_residues:
raise Exception('The insertion sequence is empty (check the start and end residue ids).')
if not found_end:
raise Exception('The end residue was not encountered when iterating over the insertion sequence (check the start and end residue ids).')
# Identify the start and end Residue objects
try:
start_res = self.residues[start_res[0]][start_res[1:]]
end_res = self.residues[end_res[0]][end_res[1:]]
except Exception, e:
raise Exception('The start or end residue could not be found in the PDB file.')
return self.add_backbone_atoms_linearly(start_res, end_res, kept_residues, insertion_residue_map) | [
"def",
"add_backbone_atoms_linearly_from_loop_filepaths",
"(",
"self",
",",
"loop_json_filepath",
",",
"fasta_filepath",
",",
"residue_ids",
")",
":",
"# Parse the loop file",
"loop_def",
"=",
"json",
".",
"loads",
"(",
"read_file",
"(",
"loop_json_filepath",
")",
")",
"assert",
"(",
"len",
"(",
"loop_def",
"[",
"'LoopSet'",
"]",
")",
"==",
"1",
")",
"start_res",
"=",
"loop_def",
"[",
"'LoopSet'",
"]",
"[",
"0",
"]",
"[",
"'start'",
"]",
"end_res",
"=",
"loop_def",
"[",
"'LoopSet'",
"]",
"[",
"0",
"]",
"[",
"'stop'",
"]",
"start_res",
"=",
"PDB",
".",
"ChainResidueID2String",
"(",
"start_res",
"[",
"'chainID'",
"]",
",",
"(",
"str",
"(",
"start_res",
"[",
"'resSeq'",
"]",
")",
"+",
"start_res",
"[",
"'iCode'",
"]",
")",
".",
"strip",
"(",
")",
")",
"end_res",
"=",
"PDB",
".",
"ChainResidueID2String",
"(",
"end_res",
"[",
"'chainID'",
"]",
",",
"(",
"str",
"(",
"end_res",
"[",
"'resSeq'",
"]",
")",
"+",
"end_res",
"[",
"'iCode'",
"]",
")",
".",
"strip",
"(",
")",
")",
"assert",
"(",
"start_res",
"in",
"residue_ids",
")",
"assert",
"(",
"end_res",
"in",
"residue_ids",
")",
"# Parse the FASTA file and extract the sequence",
"f",
"=",
"FASTA",
"(",
"read_file",
"(",
"fasta_filepath",
")",
",",
"strict",
"=",
"False",
")",
"assert",
"(",
"len",
"(",
"f",
".",
"get_sequences",
"(",
")",
")",
"==",
"1",
")",
"insertion_sequence",
"=",
"f",
".",
"sequences",
"[",
"0",
"]",
"[",
"2",
"]",
"if",
"not",
"len",
"(",
"residue_ids",
")",
"==",
"len",
"(",
"insertion_sequence",
")",
":",
"raise",
"Exception",
"(",
"'The sequence in the FASTA file must have the same length as the list of residues.'",
")",
"# Create the insertion sequence (a sub-sequence of the FASTA sequence)",
"# The post-condition is that the start and end residues are the first and last elements of kept_residues respectively",
"kept_residues",
"=",
"[",
"]",
"insertion_residue_map",
"=",
"{",
"}",
"in_section",
"=",
"False",
"found_end",
"=",
"False",
"for",
"x",
"in",
"range",
"(",
"len",
"(",
"residue_ids",
")",
")",
":",
"residue_id",
"=",
"residue_ids",
"[",
"x",
"]",
"if",
"residue_id",
"==",
"start_res",
":",
"in_section",
"=",
"True",
"if",
"in_section",
":",
"kept_residues",
".",
"append",
"(",
"residue_id",
")",
"insertion_residue_map",
"[",
"residue_id",
"]",
"=",
"insertion_sequence",
"[",
"x",
"]",
"if",
"residue_id",
"==",
"end_res",
":",
"found_end",
"=",
"True",
"break",
"if",
"not",
"kept_residues",
":",
"raise",
"Exception",
"(",
"'The insertion sequence is empty (check the start and end residue ids).'",
")",
"if",
"not",
"found_end",
":",
"raise",
"Exception",
"(",
"'The end residue was not encountered when iterating over the insertion sequence (check the start and end residue ids).'",
")",
"# Identify the start and end Residue objects",
"try",
":",
"start_res",
"=",
"self",
".",
"residues",
"[",
"start_res",
"[",
"0",
"]",
"]",
"[",
"start_res",
"[",
"1",
":",
"]",
"]",
"end_res",
"=",
"self",
".",
"residues",
"[",
"end_res",
"[",
"0",
"]",
"]",
"[",
"end_res",
"[",
"1",
":",
"]",
"]",
"except",
"Exception",
",",
"e",
":",
"raise",
"Exception",
"(",
"'The start or end residue could not be found in the PDB file.'",
")",
"return",
"self",
".",
"add_backbone_atoms_linearly",
"(",
"start_res",
",",
"end_res",
",",
"kept_residues",
",",
"insertion_residue_map",
")"
]
| A utility wrapper around add_backbone_atoms_linearly. Adds backbone atoms in a straight line from the first to
the last residue of residue_ids.
loop_json_filepath is a path to a JSON file using the JSON format for Rosetta loops files. This file identifies
the insertion points of the sequence.
fasta_filepath is a path to a FASTA file with one sequence. This sequence will be used as the sequence for
the inserted residues (between the start and stop residues defined in loop_json_filepath).
residue_ids is a list of PDB chain residues (columns 22-27 of ATOM lines in the PDB format). It is assumed that
they are sequential although the logic does not depend on that. This list should have the length length as the
sequence identified in the FASTA file. | [
"A",
"utility",
"wrapper",
"around",
"add_backbone_atoms_linearly",
".",
"Adds",
"backbone",
"atoms",
"in",
"a",
"straight",
"line",
"from",
"the",
"first",
"to",
"the",
"last",
"residue",
"of",
"residue_ids",
"."
]
| 6d410ad08f1bd9f7cbbb28d7d946e94fbaaa2b6b | https://github.com/Kortemme-Lab/klab/blob/6d410ad08f1bd9f7cbbb28d7d946e94fbaaa2b6b/klab/bio/spackle.py#L46-L106 | train |
Kortemme-Lab/klab | klab/bio/spackle.py | Spackler.add_atoms_linearly | def add_atoms_linearly(self, start_atom, end_atom, new_atoms, jitterbug = 0.2):
'''A low-level function which adds new_atoms between start_atom and end_atom. This function does not validate the
input i.e. the calling functions are responsible for ensuring that the insertion makes sense.
Returns the PDB file content with the new atoms added. These atoms are given fresh serial numbers, starting
from the first serial number larger than the current serial numbers i.e. the ATOM serial numbers do not now
necessarily increase in document order.
The jitter adds some X, Y, Z variability to the new atoms. This is important in the Rosetta software suite when
placing backbone atoms as colinearly placed atoms will break the dihedral angle calculations (the dihedral angle
over 4 colinear atoms is undefined).
'''
atom_name_map = {
'CA' : ' CA ',
'C' : ' C ',
'N' : ' N ',
'O' : ' O ',
}
assert(start_atom.residue.chain == end_atom.residue.chain)
chain_id = start_atom.residue.chain
# Initialize steps
num_new_atoms = float(len(new_atoms))
X, Y, Z = start_atom.x, start_atom.y, start_atom.z
x_step = (end_atom.x - X) / (num_new_atoms + 1.0)
y_step = (end_atom.y - Y) / (num_new_atoms + 1.0)
z_step = (end_atom.z - Z) / (num_new_atoms + 1.0)
D = math.sqrt(x_step * x_step + y_step * y_step + z_step * z_step)
jitter = 0
if jitterbug:
jitter = (((x_step + y_step + z_step) / 3.0) * jitterbug) / D
new_lines = []
next_serial_number = max(sorted(self.atoms.keys())) + 1
round = 0
for new_atom in new_atoms:
X, Y, Z = X + x_step, Y + y_step, Z + z_step
if jitter:
if round % 3 == 0:
X, Y = X + jitter, Y - jitter
elif round % 3 == 1:
Y, Z = Y + jitter, Z - jitter
elif round % 3 == 2:
Z, X = Z + jitter, X - jitter
round += 1
residue_id, residue_type, atom_name = new_atom
assert(len(residue_type) == 3)
assert(len(residue_id) == 6)
new_lines.append('ATOM {0} {1} {2} {3} {4:>8.3f}{5:>8.3f}{6:>8.3f} 1.00 0.00 '.format(str(next_serial_number).rjust(5), atom_name_map[atom_name], residue_type, residue_id, X, Y, Z))
next_serial_number += 1
new_pdb = []
in_start_residue = False
for l in self.indexed_lines:
if l[0] and l[3].serial_number == start_atom.serial_number:
in_start_residue = True
if in_start_residue and l[3].serial_number != start_atom.serial_number:
new_pdb.extend(new_lines)
#colortext.warning('\n'.join(new_lines))
in_start_residue = False
if l[0]:
#print(l[2])
new_pdb.append(l[2])
else:
#print(l[1])
new_pdb.append(l[1])
return '\n'.join(new_pdb) | python | def add_atoms_linearly(self, start_atom, end_atom, new_atoms, jitterbug = 0.2):
'''A low-level function which adds new_atoms between start_atom and end_atom. This function does not validate the
input i.e. the calling functions are responsible for ensuring that the insertion makes sense.
Returns the PDB file content with the new atoms added. These atoms are given fresh serial numbers, starting
from the first serial number larger than the current serial numbers i.e. the ATOM serial numbers do not now
necessarily increase in document order.
The jitter adds some X, Y, Z variability to the new atoms. This is important in the Rosetta software suite when
placing backbone atoms as colinearly placed atoms will break the dihedral angle calculations (the dihedral angle
over 4 colinear atoms is undefined).
'''
atom_name_map = {
'CA' : ' CA ',
'C' : ' C ',
'N' : ' N ',
'O' : ' O ',
}
assert(start_atom.residue.chain == end_atom.residue.chain)
chain_id = start_atom.residue.chain
# Initialize steps
num_new_atoms = float(len(new_atoms))
X, Y, Z = start_atom.x, start_atom.y, start_atom.z
x_step = (end_atom.x - X) / (num_new_atoms + 1.0)
y_step = (end_atom.y - Y) / (num_new_atoms + 1.0)
z_step = (end_atom.z - Z) / (num_new_atoms + 1.0)
D = math.sqrt(x_step * x_step + y_step * y_step + z_step * z_step)
jitter = 0
if jitterbug:
jitter = (((x_step + y_step + z_step) / 3.0) * jitterbug) / D
new_lines = []
next_serial_number = max(sorted(self.atoms.keys())) + 1
round = 0
for new_atom in new_atoms:
X, Y, Z = X + x_step, Y + y_step, Z + z_step
if jitter:
if round % 3 == 0:
X, Y = X + jitter, Y - jitter
elif round % 3 == 1:
Y, Z = Y + jitter, Z - jitter
elif round % 3 == 2:
Z, X = Z + jitter, X - jitter
round += 1
residue_id, residue_type, atom_name = new_atom
assert(len(residue_type) == 3)
assert(len(residue_id) == 6)
new_lines.append('ATOM {0} {1} {2} {3} {4:>8.3f}{5:>8.3f}{6:>8.3f} 1.00 0.00 '.format(str(next_serial_number).rjust(5), atom_name_map[atom_name], residue_type, residue_id, X, Y, Z))
next_serial_number += 1
new_pdb = []
in_start_residue = False
for l in self.indexed_lines:
if l[0] and l[3].serial_number == start_atom.serial_number:
in_start_residue = True
if in_start_residue and l[3].serial_number != start_atom.serial_number:
new_pdb.extend(new_lines)
#colortext.warning('\n'.join(new_lines))
in_start_residue = False
if l[0]:
#print(l[2])
new_pdb.append(l[2])
else:
#print(l[1])
new_pdb.append(l[1])
return '\n'.join(new_pdb) | [
"def",
"add_atoms_linearly",
"(",
"self",
",",
"start_atom",
",",
"end_atom",
",",
"new_atoms",
",",
"jitterbug",
"=",
"0.2",
")",
":",
"atom_name_map",
"=",
"{",
"'CA'",
":",
"' CA '",
",",
"'C'",
":",
"' C '",
",",
"'N'",
":",
"' N '",
",",
"'O'",
":",
"' O '",
",",
"}",
"assert",
"(",
"start_atom",
".",
"residue",
".",
"chain",
"==",
"end_atom",
".",
"residue",
".",
"chain",
")",
"chain_id",
"=",
"start_atom",
".",
"residue",
".",
"chain",
"# Initialize steps",
"num_new_atoms",
"=",
"float",
"(",
"len",
"(",
"new_atoms",
")",
")",
"X",
",",
"Y",
",",
"Z",
"=",
"start_atom",
".",
"x",
",",
"start_atom",
".",
"y",
",",
"start_atom",
".",
"z",
"x_step",
"=",
"(",
"end_atom",
".",
"x",
"-",
"X",
")",
"/",
"(",
"num_new_atoms",
"+",
"1.0",
")",
"y_step",
"=",
"(",
"end_atom",
".",
"y",
"-",
"Y",
")",
"/",
"(",
"num_new_atoms",
"+",
"1.0",
")",
"z_step",
"=",
"(",
"end_atom",
".",
"z",
"-",
"Z",
")",
"/",
"(",
"num_new_atoms",
"+",
"1.0",
")",
"D",
"=",
"math",
".",
"sqrt",
"(",
"x_step",
"*",
"x_step",
"+",
"y_step",
"*",
"y_step",
"+",
"z_step",
"*",
"z_step",
")",
"jitter",
"=",
"0",
"if",
"jitterbug",
":",
"jitter",
"=",
"(",
"(",
"(",
"x_step",
"+",
"y_step",
"+",
"z_step",
")",
"/",
"3.0",
")",
"*",
"jitterbug",
")",
"/",
"D",
"new_lines",
"=",
"[",
"]",
"next_serial_number",
"=",
"max",
"(",
"sorted",
"(",
"self",
".",
"atoms",
".",
"keys",
"(",
")",
")",
")",
"+",
"1",
"round",
"=",
"0",
"for",
"new_atom",
"in",
"new_atoms",
":",
"X",
",",
"Y",
",",
"Z",
"=",
"X",
"+",
"x_step",
",",
"Y",
"+",
"y_step",
",",
"Z",
"+",
"z_step",
"if",
"jitter",
":",
"if",
"round",
"%",
"3",
"==",
"0",
":",
"X",
",",
"Y",
"=",
"X",
"+",
"jitter",
",",
"Y",
"-",
"jitter",
"elif",
"round",
"%",
"3",
"==",
"1",
":",
"Y",
",",
"Z",
"=",
"Y",
"+",
"jitter",
",",
"Z",
"-",
"jitter",
"elif",
"round",
"%",
"3",
"==",
"2",
":",
"Z",
",",
"X",
"=",
"Z",
"+",
"jitter",
",",
"X",
"-",
"jitter",
"round",
"+=",
"1",
"residue_id",
",",
"residue_type",
",",
"atom_name",
"=",
"new_atom",
"assert",
"(",
"len",
"(",
"residue_type",
")",
"==",
"3",
")",
"assert",
"(",
"len",
"(",
"residue_id",
")",
"==",
"6",
")",
"new_lines",
".",
"append",
"(",
"'ATOM {0} {1} {2} {3} {4:>8.3f}{5:>8.3f}{6:>8.3f} 1.00 0.00 '",
".",
"format",
"(",
"str",
"(",
"next_serial_number",
")",
".",
"rjust",
"(",
"5",
")",
",",
"atom_name_map",
"[",
"atom_name",
"]",
",",
"residue_type",
",",
"residue_id",
",",
"X",
",",
"Y",
",",
"Z",
")",
")",
"next_serial_number",
"+=",
"1",
"new_pdb",
"=",
"[",
"]",
"in_start_residue",
"=",
"False",
"for",
"l",
"in",
"self",
".",
"indexed_lines",
":",
"if",
"l",
"[",
"0",
"]",
"and",
"l",
"[",
"3",
"]",
".",
"serial_number",
"==",
"start_atom",
".",
"serial_number",
":",
"in_start_residue",
"=",
"True",
"if",
"in_start_residue",
"and",
"l",
"[",
"3",
"]",
".",
"serial_number",
"!=",
"start_atom",
".",
"serial_number",
":",
"new_pdb",
".",
"extend",
"(",
"new_lines",
")",
"#colortext.warning('\\n'.join(new_lines))",
"in_start_residue",
"=",
"False",
"if",
"l",
"[",
"0",
"]",
":",
"#print(l[2])",
"new_pdb",
".",
"append",
"(",
"l",
"[",
"2",
"]",
")",
"else",
":",
"#print(l[1])",
"new_pdb",
".",
"append",
"(",
"l",
"[",
"1",
"]",
")",
"return",
"'\\n'",
".",
"join",
"(",
"new_pdb",
")"
]
| A low-level function which adds new_atoms between start_atom and end_atom. This function does not validate the
input i.e. the calling functions are responsible for ensuring that the insertion makes sense.
Returns the PDB file content with the new atoms added. These atoms are given fresh serial numbers, starting
from the first serial number larger than the current serial numbers i.e. the ATOM serial numbers do not now
necessarily increase in document order.
The jitter adds some X, Y, Z variability to the new atoms. This is important in the Rosetta software suite when
placing backbone atoms as colinearly placed atoms will break the dihedral angle calculations (the dihedral angle
over 4 colinear atoms is undefined). | [
"A",
"low",
"-",
"level",
"function",
"which",
"adds",
"new_atoms",
"between",
"start_atom",
"and",
"end_atom",
".",
"This",
"function",
"does",
"not",
"validate",
"the",
"input",
"i",
".",
"e",
".",
"the",
"calling",
"functions",
"are",
"responsible",
"for",
"ensuring",
"that",
"the",
"insertion",
"makes",
"sense",
"."
]
| 6d410ad08f1bd9f7cbbb28d7d946e94fbaaa2b6b | https://github.com/Kortemme-Lab/klab/blob/6d410ad08f1bd9f7cbbb28d7d946e94fbaaa2b6b/klab/bio/spackle.py#L222-L291 | train |
PBR/MQ2 | MQ2/qtl.py | QTL.to_string | def to_string(self):
""" Return the string as it should be presented in a MapChart
input file.
"""
return '%s %s %s %s %s' % (
self.trait, self.start_position, self.peak_start_position,
self.peak_stop_position, self.stop_position) | python | def to_string(self):
""" Return the string as it should be presented in a MapChart
input file.
"""
return '%s %s %s %s %s' % (
self.trait, self.start_position, self.peak_start_position,
self.peak_stop_position, self.stop_position) | [
"def",
"to_string",
"(",
"self",
")",
":",
"return",
"'%s %s %s %s %s'",
"%",
"(",
"self",
".",
"trait",
",",
"self",
".",
"start_position",
",",
"self",
".",
"peak_start_position",
",",
"self",
".",
"peak_stop_position",
",",
"self",
".",
"stop_position",
")"
]
| Return the string as it should be presented in a MapChart
input file. | [
"Return",
"the",
"string",
"as",
"it",
"should",
"be",
"presented",
"in",
"a",
"MapChart",
"input",
"file",
"."
]
| 6d84dea47e6751333004743f588f03158e35c28d | https://github.com/PBR/MQ2/blob/6d84dea47e6751333004743f588f03158e35c28d/MQ2/qtl.py#L50-L57 | train |
aacanakin/glim | glim/cli.py | main | def main():
"""
The single entry point to glim command line interface.Main method is called
from pypi console_scripts key or by glim.py on root.This function
initializes a new app given the glim commands and app commands if app
exists.
Usage
-----
$ python glim/cli.py start
$ python glim.py start (on root folder)
"""
# register the global parser
preparser = argparse.ArgumentParser(description=description,
add_help=False)
preparser.add_argument('--env', '-e', dest='env',
default='development',
help='choose application environment')
# parse existing options
namespace, extra = preparser.parse_known_args()
env = namespace.env
# register the subparsers
parser = argparse.ArgumentParser(parents=[preparser],
description=description,
add_help=True)
subparsers = parser.add_subparsers(title='commands', help='commands')
# initialize a command adapter with subparsers
commandadapter = CommandAdapter(subparsers)
# register glim commands
commandadapter.register(glim.commands)
# register app commands
appcommands = import_module('app.commands', pass_errors=True)
commandadapter.register(appcommands)
app = None
if paths.app_exists() is False:
# check if a new app is being created
new = True if 'new' in extra else False
if ('help' in extra) or ('--help' in extra) or ('-h' in extra):
help = True
else:
help = False
if help:
parser.print_help()
exit()
else:
app = make_app(env, commandadapter)
args = parser.parse_args()
command = commandadapter.match(args)
commandadapter.dispatch(command, app) | python | def main():
"""
The single entry point to glim command line interface.Main method is called
from pypi console_scripts key or by glim.py on root.This function
initializes a new app given the glim commands and app commands if app
exists.
Usage
-----
$ python glim/cli.py start
$ python glim.py start (on root folder)
"""
# register the global parser
preparser = argparse.ArgumentParser(description=description,
add_help=False)
preparser.add_argument('--env', '-e', dest='env',
default='development',
help='choose application environment')
# parse existing options
namespace, extra = preparser.parse_known_args()
env = namespace.env
# register the subparsers
parser = argparse.ArgumentParser(parents=[preparser],
description=description,
add_help=True)
subparsers = parser.add_subparsers(title='commands', help='commands')
# initialize a command adapter with subparsers
commandadapter = CommandAdapter(subparsers)
# register glim commands
commandadapter.register(glim.commands)
# register app commands
appcommands = import_module('app.commands', pass_errors=True)
commandadapter.register(appcommands)
app = None
if paths.app_exists() is False:
# check if a new app is being created
new = True if 'new' in extra else False
if ('help' in extra) or ('--help' in extra) or ('-h' in extra):
help = True
else:
help = False
if help:
parser.print_help()
exit()
else:
app = make_app(env, commandadapter)
args = parser.parse_args()
command = commandadapter.match(args)
commandadapter.dispatch(command, app) | [
"def",
"main",
"(",
")",
":",
"# register the global parser",
"preparser",
"=",
"argparse",
".",
"ArgumentParser",
"(",
"description",
"=",
"description",
",",
"add_help",
"=",
"False",
")",
"preparser",
".",
"add_argument",
"(",
"'--env'",
",",
"'-e'",
",",
"dest",
"=",
"'env'",
",",
"default",
"=",
"'development'",
",",
"help",
"=",
"'choose application environment'",
")",
"# parse existing options",
"namespace",
",",
"extra",
"=",
"preparser",
".",
"parse_known_args",
"(",
")",
"env",
"=",
"namespace",
".",
"env",
"# register the subparsers",
"parser",
"=",
"argparse",
".",
"ArgumentParser",
"(",
"parents",
"=",
"[",
"preparser",
"]",
",",
"description",
"=",
"description",
",",
"add_help",
"=",
"True",
")",
"subparsers",
"=",
"parser",
".",
"add_subparsers",
"(",
"title",
"=",
"'commands'",
",",
"help",
"=",
"'commands'",
")",
"# initialize a command adapter with subparsers",
"commandadapter",
"=",
"CommandAdapter",
"(",
"subparsers",
")",
"# register glim commands",
"commandadapter",
".",
"register",
"(",
"glim",
".",
"commands",
")",
"# register app commands",
"appcommands",
"=",
"import_module",
"(",
"'app.commands'",
",",
"pass_errors",
"=",
"True",
")",
"commandadapter",
".",
"register",
"(",
"appcommands",
")",
"app",
"=",
"None",
"if",
"paths",
".",
"app_exists",
"(",
")",
"is",
"False",
":",
"# check if a new app is being created",
"new",
"=",
"True",
"if",
"'new'",
"in",
"extra",
"else",
"False",
"if",
"(",
"'help'",
"in",
"extra",
")",
"or",
"(",
"'--help'",
"in",
"extra",
")",
"or",
"(",
"'-h'",
"in",
"extra",
")",
":",
"help",
"=",
"True",
"else",
":",
"help",
"=",
"False",
"if",
"help",
":",
"parser",
".",
"print_help",
"(",
")",
"exit",
"(",
")",
"else",
":",
"app",
"=",
"make_app",
"(",
"env",
",",
"commandadapter",
")",
"args",
"=",
"parser",
".",
"parse_args",
"(",
")",
"command",
"=",
"commandadapter",
".",
"match",
"(",
"args",
")",
"commandadapter",
".",
"dispatch",
"(",
"command",
",",
"app",
")"
]
| The single entry point to glim command line interface.Main method is called
from pypi console_scripts key or by glim.py on root.This function
initializes a new app given the glim commands and app commands if app
exists.
Usage
-----
$ python glim/cli.py start
$ python glim.py start (on root folder) | [
"The",
"single",
"entry",
"point",
"to",
"glim",
"command",
"line",
"interface",
".",
"Main",
"method",
"is",
"called",
"from",
"pypi",
"console_scripts",
"key",
"or",
"by",
"glim",
".",
"py",
"on",
"root",
".",
"This",
"function",
"initializes",
"a",
"new",
"app",
"given",
"the",
"glim",
"commands",
"and",
"app",
"commands",
"if",
"app",
"exists",
"."
]
| 71a20ac149a1292c0d6c1dc7414985ea51854f7a | https://github.com/aacanakin/glim/blob/71a20ac149a1292c0d6c1dc7414985ea51854f7a/glim/cli.py#L35-L96 | train |
aacanakin/glim | glim/cli.py | make_app | def make_app(env, commandadapter=None):
"""
Function creates an app given environment
"""
mconfig = import_module('app.config.%s' % env, pass_errors=True)
if mconfig is None and paths.app_exists():
print(colored('Configuration for "%s" environment is not found' % env, 'red'))
return None
mstart = import_module('app.start')
mroutes = import_module('app.routes')
mcontrollers = import_module('app.controllers')
before = mstart.before
return Glim(commandadapter, mconfig, mroutes, mcontrollers, env, before) | python | def make_app(env, commandadapter=None):
"""
Function creates an app given environment
"""
mconfig = import_module('app.config.%s' % env, pass_errors=True)
if mconfig is None and paths.app_exists():
print(colored('Configuration for "%s" environment is not found' % env, 'red'))
return None
mstart = import_module('app.start')
mroutes = import_module('app.routes')
mcontrollers = import_module('app.controllers')
before = mstart.before
return Glim(commandadapter, mconfig, mroutes, mcontrollers, env, before) | [
"def",
"make_app",
"(",
"env",
",",
"commandadapter",
"=",
"None",
")",
":",
"mconfig",
"=",
"import_module",
"(",
"'app.config.%s'",
"%",
"env",
",",
"pass_errors",
"=",
"True",
")",
"if",
"mconfig",
"is",
"None",
"and",
"paths",
".",
"app_exists",
"(",
")",
":",
"print",
"(",
"colored",
"(",
"'Configuration for \"%s\" environment is not found'",
"%",
"env",
",",
"'red'",
")",
")",
"return",
"None",
"mstart",
"=",
"import_module",
"(",
"'app.start'",
")",
"mroutes",
"=",
"import_module",
"(",
"'app.routes'",
")",
"mcontrollers",
"=",
"import_module",
"(",
"'app.controllers'",
")",
"before",
"=",
"mstart",
".",
"before",
"return",
"Glim",
"(",
"commandadapter",
",",
"mconfig",
",",
"mroutes",
",",
"mcontrollers",
",",
"env",
",",
"before",
")"
]
| Function creates an app given environment | [
"Function",
"creates",
"an",
"app",
"given",
"environment"
]
| 71a20ac149a1292c0d6c1dc7414985ea51854f7a | https://github.com/aacanakin/glim/blob/71a20ac149a1292c0d6c1dc7414985ea51854f7a/glim/cli.py#L98-L111 | train |
projectshift/shift-boiler | boiler/user/views_social.py | BaseSocial.callback | def callback(self):
""" Generate callback url for provider """
next = request.args.get('next') or None
endpoint = 'social.{}.handle'.format(self.provider)
return url_for(endpoint, _external=True, next=next) | python | def callback(self):
""" Generate callback url for provider """
next = request.args.get('next') or None
endpoint = 'social.{}.handle'.format(self.provider)
return url_for(endpoint, _external=True, next=next) | [
"def",
"callback",
"(",
"self",
")",
":",
"next",
"=",
"request",
".",
"args",
".",
"get",
"(",
"'next'",
")",
"or",
"None",
"endpoint",
"=",
"'social.{}.handle'",
".",
"format",
"(",
"self",
".",
"provider",
")",
"return",
"url_for",
"(",
"endpoint",
",",
"_external",
"=",
"True",
",",
"next",
"=",
"next",
")"
]
| Generate callback url for provider | [
"Generate",
"callback",
"url",
"for",
"provider"
]
| 8e6f3a3e4b9493fb6c8bd16bed160ede153bfb0b | https://github.com/projectshift/shift-boiler/blob/8e6f3a3e4b9493fb6c8bd16bed160ede153bfb0b/boiler/user/views_social.py#L48-L52 | train |
projectshift/shift-boiler | boiler/user/views_social.py | BaseSocial.next | def next(self):
""" Where to redirect after authorization """
next = request.args.get('next')
if next is None:
params = self.default_redirect_params
next = url_for(self.default_redirect_endpoint, **params)
return next | python | def next(self):
""" Where to redirect after authorization """
next = request.args.get('next')
if next is None:
params = self.default_redirect_params
next = url_for(self.default_redirect_endpoint, **params)
return next | [
"def",
"next",
"(",
"self",
")",
":",
"next",
"=",
"request",
".",
"args",
".",
"get",
"(",
"'next'",
")",
"if",
"next",
"is",
"None",
":",
"params",
"=",
"self",
".",
"default_redirect_params",
"next",
"=",
"url_for",
"(",
"self",
".",
"default_redirect_endpoint",
",",
"*",
"*",
"params",
")",
"return",
"next"
]
| Where to redirect after authorization | [
"Where",
"to",
"redirect",
"after",
"authorization"
]
| 8e6f3a3e4b9493fb6c8bd16bed160ede153bfb0b | https://github.com/projectshift/shift-boiler/blob/8e6f3a3e4b9493fb6c8bd16bed160ede153bfb0b/boiler/user/views_social.py#L60-L66 | train |
projectshift/shift-boiler | boiler/user/views_social.py | BaseHandle.dispatch_request | def dispatch_request(self):
""" Handle redirect back from provider """
if current_user.is_authenticated:
return redirect(self.next)
# clear previous!
if 'social_data' in session:
del session['social_data']
res = self.app.authorized_response()
if res is None:
if self.flash: flash(self.auth_failed_msg, 'danger')
return redirect(self.next)
# retrieve profile
data = self.get_profile_data(res)
if data is None:
if self.flash: flash(self.data_failed_msg, 'danger')
return redirect(self.next)
# attempt login
try:
ok = user_service.attempt_social_login(self.provider, data['id'])
if ok:
if self.flash:
flash(self.logged_in_msg.format(self.provider), 'success')
return redirect(self.logged_in)
except x.AccountLocked as locked:
msg = self.lock_msg.format(locked.locked_until)
if self.flash: flash(msg, 'danger')
url = url_for(self.lock_redirect, **self.lock_redirect_params)
return redirect(url)
except x.EmailNotConfirmed:
return redirect(url_for(self.unconfirmed_email_endpoint))
# get data
email = data.get('email')
provider = data.get('provider')
id = data.get('id')
id_column = '{}_id'.format(provider)
# user exists: add social id to profile
user = user_service.first(email=email)
if user:
setattr(user, id_column, id)
user_service.save(user)
# no user: register
if not user:
cfg = current_app.config
send_welcome = cfg.get('USER_SEND_WELCOME_MESSAGE')
base_confirm_url = cfg.get('USER_BASE_EMAIL_CONFIRM_URL')
if not base_confirm_url:
endpoint = 'user.confirm.email.request'
base_confirm_url = url_for(endpoint, _external=True)
data = dict(email=email)
data[id_column] = id
user = user_service.register(
user_data=data,
send_welcome=send_welcome,
base_confirm_url=base_confirm_url
)
# email confirmed?
if user_service.require_confirmation and not user.email_confirmed:
return redirect(url_for(self.ok_endpoint, **self.ok_params))
# otherwise just login
user_service.force_login(user)
return redirect(self.force_login_redirect) | python | def dispatch_request(self):
""" Handle redirect back from provider """
if current_user.is_authenticated:
return redirect(self.next)
# clear previous!
if 'social_data' in session:
del session['social_data']
res = self.app.authorized_response()
if res is None:
if self.flash: flash(self.auth_failed_msg, 'danger')
return redirect(self.next)
# retrieve profile
data = self.get_profile_data(res)
if data is None:
if self.flash: flash(self.data_failed_msg, 'danger')
return redirect(self.next)
# attempt login
try:
ok = user_service.attempt_social_login(self.provider, data['id'])
if ok:
if self.flash:
flash(self.logged_in_msg.format(self.provider), 'success')
return redirect(self.logged_in)
except x.AccountLocked as locked:
msg = self.lock_msg.format(locked.locked_until)
if self.flash: flash(msg, 'danger')
url = url_for(self.lock_redirect, **self.lock_redirect_params)
return redirect(url)
except x.EmailNotConfirmed:
return redirect(url_for(self.unconfirmed_email_endpoint))
# get data
email = data.get('email')
provider = data.get('provider')
id = data.get('id')
id_column = '{}_id'.format(provider)
# user exists: add social id to profile
user = user_service.first(email=email)
if user:
setattr(user, id_column, id)
user_service.save(user)
# no user: register
if not user:
cfg = current_app.config
send_welcome = cfg.get('USER_SEND_WELCOME_MESSAGE')
base_confirm_url = cfg.get('USER_BASE_EMAIL_CONFIRM_URL')
if not base_confirm_url:
endpoint = 'user.confirm.email.request'
base_confirm_url = url_for(endpoint, _external=True)
data = dict(email=email)
data[id_column] = id
user = user_service.register(
user_data=data,
send_welcome=send_welcome,
base_confirm_url=base_confirm_url
)
# email confirmed?
if user_service.require_confirmation and not user.email_confirmed:
return redirect(url_for(self.ok_endpoint, **self.ok_params))
# otherwise just login
user_service.force_login(user)
return redirect(self.force_login_redirect) | [
"def",
"dispatch_request",
"(",
"self",
")",
":",
"if",
"current_user",
".",
"is_authenticated",
":",
"return",
"redirect",
"(",
"self",
".",
"next",
")",
"# clear previous!",
"if",
"'social_data'",
"in",
"session",
":",
"del",
"session",
"[",
"'social_data'",
"]",
"res",
"=",
"self",
".",
"app",
".",
"authorized_response",
"(",
")",
"if",
"res",
"is",
"None",
":",
"if",
"self",
".",
"flash",
":",
"flash",
"(",
"self",
".",
"auth_failed_msg",
",",
"'danger'",
")",
"return",
"redirect",
"(",
"self",
".",
"next",
")",
"# retrieve profile",
"data",
"=",
"self",
".",
"get_profile_data",
"(",
"res",
")",
"if",
"data",
"is",
"None",
":",
"if",
"self",
".",
"flash",
":",
"flash",
"(",
"self",
".",
"data_failed_msg",
",",
"'danger'",
")",
"return",
"redirect",
"(",
"self",
".",
"next",
")",
"# attempt login",
"try",
":",
"ok",
"=",
"user_service",
".",
"attempt_social_login",
"(",
"self",
".",
"provider",
",",
"data",
"[",
"'id'",
"]",
")",
"if",
"ok",
":",
"if",
"self",
".",
"flash",
":",
"flash",
"(",
"self",
".",
"logged_in_msg",
".",
"format",
"(",
"self",
".",
"provider",
")",
",",
"'success'",
")",
"return",
"redirect",
"(",
"self",
".",
"logged_in",
")",
"except",
"x",
".",
"AccountLocked",
"as",
"locked",
":",
"msg",
"=",
"self",
".",
"lock_msg",
".",
"format",
"(",
"locked",
".",
"locked_until",
")",
"if",
"self",
".",
"flash",
":",
"flash",
"(",
"msg",
",",
"'danger'",
")",
"url",
"=",
"url_for",
"(",
"self",
".",
"lock_redirect",
",",
"*",
"*",
"self",
".",
"lock_redirect_params",
")",
"return",
"redirect",
"(",
"url",
")",
"except",
"x",
".",
"EmailNotConfirmed",
":",
"return",
"redirect",
"(",
"url_for",
"(",
"self",
".",
"unconfirmed_email_endpoint",
")",
")",
"# get data",
"email",
"=",
"data",
".",
"get",
"(",
"'email'",
")",
"provider",
"=",
"data",
".",
"get",
"(",
"'provider'",
")",
"id",
"=",
"data",
".",
"get",
"(",
"'id'",
")",
"id_column",
"=",
"'{}_id'",
".",
"format",
"(",
"provider",
")",
"# user exists: add social id to profile",
"user",
"=",
"user_service",
".",
"first",
"(",
"email",
"=",
"email",
")",
"if",
"user",
":",
"setattr",
"(",
"user",
",",
"id_column",
",",
"id",
")",
"user_service",
".",
"save",
"(",
"user",
")",
"# no user: register",
"if",
"not",
"user",
":",
"cfg",
"=",
"current_app",
".",
"config",
"send_welcome",
"=",
"cfg",
".",
"get",
"(",
"'USER_SEND_WELCOME_MESSAGE'",
")",
"base_confirm_url",
"=",
"cfg",
".",
"get",
"(",
"'USER_BASE_EMAIL_CONFIRM_URL'",
")",
"if",
"not",
"base_confirm_url",
":",
"endpoint",
"=",
"'user.confirm.email.request'",
"base_confirm_url",
"=",
"url_for",
"(",
"endpoint",
",",
"_external",
"=",
"True",
")",
"data",
"=",
"dict",
"(",
"email",
"=",
"email",
")",
"data",
"[",
"id_column",
"]",
"=",
"id",
"user",
"=",
"user_service",
".",
"register",
"(",
"user_data",
"=",
"data",
",",
"send_welcome",
"=",
"send_welcome",
",",
"base_confirm_url",
"=",
"base_confirm_url",
")",
"# email confirmed?",
"if",
"user_service",
".",
"require_confirmation",
"and",
"not",
"user",
".",
"email_confirmed",
":",
"return",
"redirect",
"(",
"url_for",
"(",
"self",
".",
"ok_endpoint",
",",
"*",
"*",
"self",
".",
"ok_params",
")",
")",
"# otherwise just login",
"user_service",
".",
"force_login",
"(",
"user",
")",
"return",
"redirect",
"(",
"self",
".",
"force_login_redirect",
")"
]
| Handle redirect back from provider | [
"Handle",
"redirect",
"back",
"from",
"provider"
]
| 8e6f3a3e4b9493fb6c8bd16bed160ede153bfb0b | https://github.com/projectshift/shift-boiler/blob/8e6f3a3e4b9493fb6c8bd16bed160ede153bfb0b/boiler/user/views_social.py#L106-L176 | train |
tech-pi/doufo | src/python/doufo/tensor/binary.py | project | def project(v, n):
"""
Project Vector v onto plane with normal vector n.
"""
return v - matmul(v, n) * n / (norm(n) ** 2.0) | python | def project(v, n):
"""
Project Vector v onto plane with normal vector n.
"""
return v - matmul(v, n) * n / (norm(n) ** 2.0) | [
"def",
"project",
"(",
"v",
",",
"n",
")",
":",
"return",
"v",
"-",
"matmul",
"(",
"v",
",",
"n",
")",
"*",
"n",
"/",
"(",
"norm",
"(",
"n",
")",
"**",
"2.0",
")"
]
| Project Vector v onto plane with normal vector n. | [
"Project",
"Vector",
"v",
"onto",
"plane",
"with",
"normal",
"vector",
"n",
"."
]
| 3d375fef30670597768a6eef809b75b4b1b5a3fd | https://github.com/tech-pi/doufo/blob/3d375fef30670597768a6eef809b75b4b1b5a3fd/src/python/doufo/tensor/binary.py#L36-L40 | train |
46elks/elkme | elkme/main.py | main | def main():
"""Executed on run"""
args = parse_args()
if args.version:
from .__init__ import __version__, __release_date__
print('elkme %s (release date %s)' % (__version__, __release_date__))
print('(c) 2015-2017 46elks AB <[email protected]>')
print(small_elk)
exit(0)
conf, conf_status = config.init_config(args)
if not conf_status[0]:
errors.append(conf_status[1])
elif conf_status[1]:
print(conf_status[1])
message = parse_message(args)
if conf_status[1] and not message:
# No message but the configuration file was stored
sys.exit(0)
try:
elks_conn = Elks(auth = (conf['username'], conf['password']),
api_url = conf.get('api_url'))
except KeyError:
errors.append('API keys not properly set. Please refer to ' +
'`elkme --usage`, `elkme --help` or ' +
'https://46elks.github.io/elkme')
if not message:
print(USAGE, file=sys.stderr)
exit(-1)
for error in errors:
print('[ERROR] {}'.format(error))
exit(-1)
options = []
if args.flash:
options.append('flashsms')
try:
send_sms(elks_conn, conf, message, length=args.length, options=options)
except ElksException as e:
print(e, file=sys.stderr) | python | def main():
"""Executed on run"""
args = parse_args()
if args.version:
from .__init__ import __version__, __release_date__
print('elkme %s (release date %s)' % (__version__, __release_date__))
print('(c) 2015-2017 46elks AB <[email protected]>')
print(small_elk)
exit(0)
conf, conf_status = config.init_config(args)
if not conf_status[0]:
errors.append(conf_status[1])
elif conf_status[1]:
print(conf_status[1])
message = parse_message(args)
if conf_status[1] and not message:
# No message but the configuration file was stored
sys.exit(0)
try:
elks_conn = Elks(auth = (conf['username'], conf['password']),
api_url = conf.get('api_url'))
except KeyError:
errors.append('API keys not properly set. Please refer to ' +
'`elkme --usage`, `elkme --help` or ' +
'https://46elks.github.io/elkme')
if not message:
print(USAGE, file=sys.stderr)
exit(-1)
for error in errors:
print('[ERROR] {}'.format(error))
exit(-1)
options = []
if args.flash:
options.append('flashsms')
try:
send_sms(elks_conn, conf, message, length=args.length, options=options)
except ElksException as e:
print(e, file=sys.stderr) | [
"def",
"main",
"(",
")",
":",
"args",
"=",
"parse_args",
"(",
")",
"if",
"args",
".",
"version",
":",
"from",
".",
"__init__",
"import",
"__version__",
",",
"__release_date__",
"print",
"(",
"'elkme %s (release date %s)'",
"%",
"(",
"__version__",
",",
"__release_date__",
")",
")",
"print",
"(",
"'(c) 2015-2017 46elks AB <[email protected]>'",
")",
"print",
"(",
"small_elk",
")",
"exit",
"(",
"0",
")",
"conf",
",",
"conf_status",
"=",
"config",
".",
"init_config",
"(",
"args",
")",
"if",
"not",
"conf_status",
"[",
"0",
"]",
":",
"errors",
".",
"append",
"(",
"conf_status",
"[",
"1",
"]",
")",
"elif",
"conf_status",
"[",
"1",
"]",
":",
"print",
"(",
"conf_status",
"[",
"1",
"]",
")",
"message",
"=",
"parse_message",
"(",
"args",
")",
"if",
"conf_status",
"[",
"1",
"]",
"and",
"not",
"message",
":",
"# No message but the configuration file was stored",
"sys",
".",
"exit",
"(",
"0",
")",
"try",
":",
"elks_conn",
"=",
"Elks",
"(",
"auth",
"=",
"(",
"conf",
"[",
"'username'",
"]",
",",
"conf",
"[",
"'password'",
"]",
")",
",",
"api_url",
"=",
"conf",
".",
"get",
"(",
"'api_url'",
")",
")",
"except",
"KeyError",
":",
"errors",
".",
"append",
"(",
"'API keys not properly set. Please refer to '",
"+",
"'`elkme --usage`, `elkme --help` or '",
"+",
"'https://46elks.github.io/elkme'",
")",
"if",
"not",
"message",
":",
"print",
"(",
"USAGE",
",",
"file",
"=",
"sys",
".",
"stderr",
")",
"exit",
"(",
"-",
"1",
")",
"for",
"error",
"in",
"errors",
":",
"print",
"(",
"'[ERROR] {}'",
".",
"format",
"(",
"error",
")",
")",
"exit",
"(",
"-",
"1",
")",
"options",
"=",
"[",
"]",
"if",
"args",
".",
"flash",
":",
"options",
".",
"append",
"(",
"'flashsms'",
")",
"try",
":",
"send_sms",
"(",
"elks_conn",
",",
"conf",
",",
"message",
",",
"length",
"=",
"args",
".",
"length",
",",
"options",
"=",
"options",
")",
"except",
"ElksException",
"as",
"e",
":",
"print",
"(",
"e",
",",
"file",
"=",
"sys",
".",
"stderr",
")"
]
| Executed on run | [
"Executed",
"on",
"run"
]
| 6ebdce6f8ac852fc6f714d1f1b836f2777fece4e | https://github.com/46elks/elkme/blob/6ebdce6f8ac852fc6f714d1f1b836f2777fece4e/elkme/main.py#L48-L93 | train |
clement-alexandre/TotemBionet | totembionet/src/ggea/ggea.py | Graph._build_graph | def _build_graph(self) -> nx.DiGraph:
""" Private method to build the graph from the model. """
digraph = nx.DiGraph()
for state in self.model.all_states():
self._number_of_states += 1
for next_state in self.model.available_state(state):
self._number_of_transitions += 1
digraph.add_edge(
self._transform_state_to_string(state),
self._transform_state_to_string(next_state)
)
return digraph | python | def _build_graph(self) -> nx.DiGraph:
""" Private method to build the graph from the model. """
digraph = nx.DiGraph()
for state in self.model.all_states():
self._number_of_states += 1
for next_state in self.model.available_state(state):
self._number_of_transitions += 1
digraph.add_edge(
self._transform_state_to_string(state),
self._transform_state_to_string(next_state)
)
return digraph | [
"def",
"_build_graph",
"(",
"self",
")",
"->",
"nx",
".",
"DiGraph",
":",
"digraph",
"=",
"nx",
".",
"DiGraph",
"(",
")",
"for",
"state",
"in",
"self",
".",
"model",
".",
"all_states",
"(",
")",
":",
"self",
".",
"_number_of_states",
"+=",
"1",
"for",
"next_state",
"in",
"self",
".",
"model",
".",
"available_state",
"(",
"state",
")",
":",
"self",
".",
"_number_of_transitions",
"+=",
"1",
"digraph",
".",
"add_edge",
"(",
"self",
".",
"_transform_state_to_string",
"(",
"state",
")",
",",
"self",
".",
"_transform_state_to_string",
"(",
"next_state",
")",
")",
"return",
"digraph"
]
| Private method to build the graph from the model. | [
"Private",
"method",
"to",
"build",
"the",
"graph",
"from",
"the",
"model",
"."
]
| f37a2f9358c1ce49f21c4a868b904da5dcd4614f | https://github.com/clement-alexandre/TotemBionet/blob/f37a2f9358c1ce49f21c4a868b904da5dcd4614f/totembionet/src/ggea/ggea.py#L22-L33 | train |
clement-alexandre/TotemBionet | totembionet/src/ggea/ggea.py | Graph._transform_state_to_string | def _transform_state_to_string(self, state: State) -> str:
"""
Private method which transform a state to a string.
Examples
--------
The model contains 2 genes: operon = {0, 1, 2}
mucuB = {0, 1}
>>> graph._transform_state_to_string({operon: 1, mucuB: 0})
"10"
>>> graph._transform_state_to_string({operon: 2, mucuB: 1})
"21"
"""
return ''.join(str(state[gene]) for gene in self.model.genes) | python | def _transform_state_to_string(self, state: State) -> str:
"""
Private method which transform a state to a string.
Examples
--------
The model contains 2 genes: operon = {0, 1, 2}
mucuB = {0, 1}
>>> graph._transform_state_to_string({operon: 1, mucuB: 0})
"10"
>>> graph._transform_state_to_string({operon: 2, mucuB: 1})
"21"
"""
return ''.join(str(state[gene]) for gene in self.model.genes) | [
"def",
"_transform_state_to_string",
"(",
"self",
",",
"state",
":",
"State",
")",
"->",
"str",
":",
"return",
"''",
".",
"join",
"(",
"str",
"(",
"state",
"[",
"gene",
"]",
")",
"for",
"gene",
"in",
"self",
".",
"model",
".",
"genes",
")"
]
| Private method which transform a state to a string.
Examples
--------
The model contains 2 genes: operon = {0, 1, 2}
mucuB = {0, 1}
>>> graph._transform_state_to_string({operon: 1, mucuB: 0})
"10"
>>> graph._transform_state_to_string({operon: 2, mucuB: 1})
"21" | [
"Private",
"method",
"which",
"transform",
"a",
"state",
"to",
"a",
"string",
"."
]
| f37a2f9358c1ce49f21c4a868b904da5dcd4614f | https://github.com/clement-alexandre/TotemBionet/blob/f37a2f9358c1ce49f21c4a868b904da5dcd4614f/totembionet/src/ggea/ggea.py#L35-L51 | train |
clement-alexandre/TotemBionet | totembionet/src/ggea/ggea.py | Graph.as_dot | def as_dot(self) -> str:
""" Return as a string the dot version of the graph. """
return nx.drawing.nx_pydot.to_pydot(self._graph).to_string() | python | def as_dot(self) -> str:
""" Return as a string the dot version of the graph. """
return nx.drawing.nx_pydot.to_pydot(self._graph).to_string() | [
"def",
"as_dot",
"(",
"self",
")",
"->",
"str",
":",
"return",
"nx",
".",
"drawing",
".",
"nx_pydot",
".",
"to_pydot",
"(",
"self",
".",
"_graph",
")",
".",
"to_string",
"(",
")"
]
| Return as a string the dot version of the graph. | [
"Return",
"as",
"a",
"string",
"the",
"dot",
"version",
"of",
"the",
"graph",
"."
]
| f37a2f9358c1ce49f21c4a868b904da5dcd4614f | https://github.com/clement-alexandre/TotemBionet/blob/f37a2f9358c1ce49f21c4a868b904da5dcd4614f/totembionet/src/ggea/ggea.py#L65-L67 | train |
clement-alexandre/TotemBionet | totembionet/src/ggea/ggea.py | Graph.export_to_dot | def export_to_dot(self, filename: str = 'output') -> None:
""" Export the graph to the dot file "filename.dot". """
with open(filename + '.dot', 'w') as output:
output.write(self.as_dot()) | python | def export_to_dot(self, filename: str = 'output') -> None:
""" Export the graph to the dot file "filename.dot". """
with open(filename + '.dot', 'w') as output:
output.write(self.as_dot()) | [
"def",
"export_to_dot",
"(",
"self",
",",
"filename",
":",
"str",
"=",
"'output'",
")",
"->",
"None",
":",
"with",
"open",
"(",
"filename",
"+",
"'.dot'",
",",
"'w'",
")",
"as",
"output",
":",
"output",
".",
"write",
"(",
"self",
".",
"as_dot",
"(",
")",
")"
]
| Export the graph to the dot file "filename.dot". | [
"Export",
"the",
"graph",
"to",
"the",
"dot",
"file",
"filename",
".",
"dot",
"."
]
| f37a2f9358c1ce49f21c4a868b904da5dcd4614f | https://github.com/clement-alexandre/TotemBionet/blob/f37a2f9358c1ce49f21c4a868b904da5dcd4614f/totembionet/src/ggea/ggea.py#L69-L72 | train |
speechinformaticslab/vfclust | vfclust/vfclust.py | get_duration_measures | def get_duration_measures(source_file_path,
output_path=None,
phonemic=False,
semantic=False,
quiet=False,
similarity_file = None,
threshold = None):
"""Parses input arguments and runs clustering algorithm.
:param source_file_path: Required. Location of the .csv or .TextGrid file to be
analyzed.
:param output_path: Path to which to write the resultant csv file. If left None,
path will be set to the source_file_path. If set to False, no file will be
written.
:param phonemic: The letter used for phonetic clustering. Note: should be False if
semantic clustering is being used.
:param semantic: The word category used for semantic clustering. Note: should be
False if phonetic clustering is being used.
:param quiet: Set to True if you want to suppress output to the screen during processing.
:param similarity_file (optional): When doing semantic processing, this is the path of
a file containing custom term similarity scores that will be used for clustering.
If a custom file is used, the default LSA-based clustering will not be performed.
:param threshold (optional): When doing semantic processing, this threshold is used
in conjunction with a custom similarity file. The value is used as a semantic
similarity cutoff in clustering. This argument is required if a custom similarity
file is specified. This argument can also be used to override the built-in
cluster/chain thresholds.
:return data: A dictionary of measures derived by clustering the input response.
"""
#validate arguments here rather than where they're first passed in, in case this is used as a package
args = Args()
args.output_path = output_path
args.phonemic = phonemic
args.semantic = semantic
args.source_file_path = source_file_path
args.quiet = quiet
args.similarity_file = similarity_file
args.threshold = threshold
args = validate_arguments(args)
if args.phonemic:
response_category = args.phonemic
output_prefix = os.path.basename(args.source_file_path).split('.')[0] + "_vfclust_phonemic_" + args.phonemic
elif args.semantic:
response_category = args.semantic
output_prefix = os.path.basename(args.source_file_path).split('.')[0] + "_vfclust_semantic_" + args.semantic
else:
response_category = ""
output_prefix = ""
if args.output_path:
#want to output csv file
target_file_path = os.path.join(args.output_path, output_prefix + '.csv')
else:
#no output to system
target_file_path = False
engine = VFClustEngine(response_category=response_category,
response_file_path=args.source_file_path,
target_file_path=target_file_path,
quiet = args.quiet,
similarity_file = args.similarity_file,
threshold = args.threshold
)
return dict(engine.measures) | python | def get_duration_measures(source_file_path,
output_path=None,
phonemic=False,
semantic=False,
quiet=False,
similarity_file = None,
threshold = None):
"""Parses input arguments and runs clustering algorithm.
:param source_file_path: Required. Location of the .csv or .TextGrid file to be
analyzed.
:param output_path: Path to which to write the resultant csv file. If left None,
path will be set to the source_file_path. If set to False, no file will be
written.
:param phonemic: The letter used for phonetic clustering. Note: should be False if
semantic clustering is being used.
:param semantic: The word category used for semantic clustering. Note: should be
False if phonetic clustering is being used.
:param quiet: Set to True if you want to suppress output to the screen during processing.
:param similarity_file (optional): When doing semantic processing, this is the path of
a file containing custom term similarity scores that will be used for clustering.
If a custom file is used, the default LSA-based clustering will not be performed.
:param threshold (optional): When doing semantic processing, this threshold is used
in conjunction with a custom similarity file. The value is used as a semantic
similarity cutoff in clustering. This argument is required if a custom similarity
file is specified. This argument can also be used to override the built-in
cluster/chain thresholds.
:return data: A dictionary of measures derived by clustering the input response.
"""
#validate arguments here rather than where they're first passed in, in case this is used as a package
args = Args()
args.output_path = output_path
args.phonemic = phonemic
args.semantic = semantic
args.source_file_path = source_file_path
args.quiet = quiet
args.similarity_file = similarity_file
args.threshold = threshold
args = validate_arguments(args)
if args.phonemic:
response_category = args.phonemic
output_prefix = os.path.basename(args.source_file_path).split('.')[0] + "_vfclust_phonemic_" + args.phonemic
elif args.semantic:
response_category = args.semantic
output_prefix = os.path.basename(args.source_file_path).split('.')[0] + "_vfclust_semantic_" + args.semantic
else:
response_category = ""
output_prefix = ""
if args.output_path:
#want to output csv file
target_file_path = os.path.join(args.output_path, output_prefix + '.csv')
else:
#no output to system
target_file_path = False
engine = VFClustEngine(response_category=response_category,
response_file_path=args.source_file_path,
target_file_path=target_file_path,
quiet = args.quiet,
similarity_file = args.similarity_file,
threshold = args.threshold
)
return dict(engine.measures) | [
"def",
"get_duration_measures",
"(",
"source_file_path",
",",
"output_path",
"=",
"None",
",",
"phonemic",
"=",
"False",
",",
"semantic",
"=",
"False",
",",
"quiet",
"=",
"False",
",",
"similarity_file",
"=",
"None",
",",
"threshold",
"=",
"None",
")",
":",
"#validate arguments here rather than where they're first passed in, in case this is used as a package",
"args",
"=",
"Args",
"(",
")",
"args",
".",
"output_path",
"=",
"output_path",
"args",
".",
"phonemic",
"=",
"phonemic",
"args",
".",
"semantic",
"=",
"semantic",
"args",
".",
"source_file_path",
"=",
"source_file_path",
"args",
".",
"quiet",
"=",
"quiet",
"args",
".",
"similarity_file",
"=",
"similarity_file",
"args",
".",
"threshold",
"=",
"threshold",
"args",
"=",
"validate_arguments",
"(",
"args",
")",
"if",
"args",
".",
"phonemic",
":",
"response_category",
"=",
"args",
".",
"phonemic",
"output_prefix",
"=",
"os",
".",
"path",
".",
"basename",
"(",
"args",
".",
"source_file_path",
")",
".",
"split",
"(",
"'.'",
")",
"[",
"0",
"]",
"+",
"\"_vfclust_phonemic_\"",
"+",
"args",
".",
"phonemic",
"elif",
"args",
".",
"semantic",
":",
"response_category",
"=",
"args",
".",
"semantic",
"output_prefix",
"=",
"os",
".",
"path",
".",
"basename",
"(",
"args",
".",
"source_file_path",
")",
".",
"split",
"(",
"'.'",
")",
"[",
"0",
"]",
"+",
"\"_vfclust_semantic_\"",
"+",
"args",
".",
"semantic",
"else",
":",
"response_category",
"=",
"\"\"",
"output_prefix",
"=",
"\"\"",
"if",
"args",
".",
"output_path",
":",
"#want to output csv file",
"target_file_path",
"=",
"os",
".",
"path",
".",
"join",
"(",
"args",
".",
"output_path",
",",
"output_prefix",
"+",
"'.csv'",
")",
"else",
":",
"#no output to system",
"target_file_path",
"=",
"False",
"engine",
"=",
"VFClustEngine",
"(",
"response_category",
"=",
"response_category",
",",
"response_file_path",
"=",
"args",
".",
"source_file_path",
",",
"target_file_path",
"=",
"target_file_path",
",",
"quiet",
"=",
"args",
".",
"quiet",
",",
"similarity_file",
"=",
"args",
".",
"similarity_file",
",",
"threshold",
"=",
"args",
".",
"threshold",
")",
"return",
"dict",
"(",
"engine",
".",
"measures",
")"
]
| Parses input arguments and runs clustering algorithm.
:param source_file_path: Required. Location of the .csv or .TextGrid file to be
analyzed.
:param output_path: Path to which to write the resultant csv file. If left None,
path will be set to the source_file_path. If set to False, no file will be
written.
:param phonemic: The letter used for phonetic clustering. Note: should be False if
semantic clustering is being used.
:param semantic: The word category used for semantic clustering. Note: should be
False if phonetic clustering is being used.
:param quiet: Set to True if you want to suppress output to the screen during processing.
:param similarity_file (optional): When doing semantic processing, this is the path of
a file containing custom term similarity scores that will be used for clustering.
If a custom file is used, the default LSA-based clustering will not be performed.
:param threshold (optional): When doing semantic processing, this threshold is used
in conjunction with a custom similarity file. The value is used as a semantic
similarity cutoff in clustering. This argument is required if a custom similarity
file is specified. This argument can also be used to override the built-in
cluster/chain thresholds.
:return data: A dictionary of measures derived by clustering the input response. | [
"Parses",
"input",
"arguments",
"and",
"runs",
"clustering",
"algorithm",
"."
]
| 7ca733dea4782c828024765726cce65de095d33c | https://github.com/speechinformaticslab/vfclust/blob/7ca733dea4782c828024765726cce65de095d33c/vfclust/vfclust.py#L1597-L1666 | train |
speechinformaticslab/vfclust | vfclust/vfclust.py | ParsedResponse.create_from_textgrid | def create_from_textgrid(self,word_list):
""" Fills the ParsedResponse object with a list of TextGrid.Word objects originally from a .TextGrid file.
:param list word_list: List of TextGrid.Word objects corresponding to words/tokens in the subject response.
Modifies:
- self.timing_included: TextGrid files include timing information
- self.unit_list: fills it with Unit objects derived from the word_list argument.
If the type is 'SEMANTIC', the words in these units are automatically lemmatized and
made into compound words where appropriate.
"""
self.timing_included = True
for i, entry in enumerate(word_list):
self.unit_list.append(Unit(entry, format="TextGrid",
type=self.type,
index_in_timed_response=i))
# combine compound words, remove pluralizations, etc
if self.type == "SEMANTIC":
self.lemmatize()
self.tokenize() | python | def create_from_textgrid(self,word_list):
""" Fills the ParsedResponse object with a list of TextGrid.Word objects originally from a .TextGrid file.
:param list word_list: List of TextGrid.Word objects corresponding to words/tokens in the subject response.
Modifies:
- self.timing_included: TextGrid files include timing information
- self.unit_list: fills it with Unit objects derived from the word_list argument.
If the type is 'SEMANTIC', the words in these units are automatically lemmatized and
made into compound words where appropriate.
"""
self.timing_included = True
for i, entry in enumerate(word_list):
self.unit_list.append(Unit(entry, format="TextGrid",
type=self.type,
index_in_timed_response=i))
# combine compound words, remove pluralizations, etc
if self.type == "SEMANTIC":
self.lemmatize()
self.tokenize() | [
"def",
"create_from_textgrid",
"(",
"self",
",",
"word_list",
")",
":",
"self",
".",
"timing_included",
"=",
"True",
"for",
"i",
",",
"entry",
"in",
"enumerate",
"(",
"word_list",
")",
":",
"self",
".",
"unit_list",
".",
"append",
"(",
"Unit",
"(",
"entry",
",",
"format",
"=",
"\"TextGrid\"",
",",
"type",
"=",
"self",
".",
"type",
",",
"index_in_timed_response",
"=",
"i",
")",
")",
"# combine compound words, remove pluralizations, etc",
"if",
"self",
".",
"type",
"==",
"\"SEMANTIC\"",
":",
"self",
".",
"lemmatize",
"(",
")",
"self",
".",
"tokenize",
"(",
")"
]
| Fills the ParsedResponse object with a list of TextGrid.Word objects originally from a .TextGrid file.
:param list word_list: List of TextGrid.Word objects corresponding to words/tokens in the subject response.
Modifies:
- self.timing_included: TextGrid files include timing information
- self.unit_list: fills it with Unit objects derived from the word_list argument.
If the type is 'SEMANTIC', the words in these units are automatically lemmatized and
made into compound words where appropriate. | [
"Fills",
"the",
"ParsedResponse",
"object",
"with",
"a",
"list",
"of",
"TextGrid",
".",
"Word",
"objects",
"originally",
"from",
"a",
".",
"TextGrid",
"file",
"."
]
| 7ca733dea4782c828024765726cce65de095d33c | https://github.com/speechinformaticslab/vfclust/blob/7ca733dea4782c828024765726cce65de095d33c/vfclust/vfclust.py#L191-L211 | train |
speechinformaticslab/vfclust | vfclust/vfclust.py | ParsedResponse.lemmatize | def lemmatize(self):
"""Lemmatize all Units in self.unit_list.
Modifies:
- self.unit_list: converts the .text property into its lemmatized form.
This method lemmatizes all inflected variants of permissible words to
those words' respective canonical forms. This is done to ensure that
each instance of a permissible word will correspond to a term vector with
which semantic relatedness to other words' term vectors can be computed.
(Term vectors were derived from a corpus in which inflected words were
similarly lemmatized, meaning that , e.g., 'dogs' will not have a term
vector to use for semantic relatedness computation.)
"""
for unit in self.unit_list:
if lemmatizer.lemmatize(unit.text) in self.lemmas:
unit.text = lemmatizer.lemmatize(unit.text) | python | def lemmatize(self):
"""Lemmatize all Units in self.unit_list.
Modifies:
- self.unit_list: converts the .text property into its lemmatized form.
This method lemmatizes all inflected variants of permissible words to
those words' respective canonical forms. This is done to ensure that
each instance of a permissible word will correspond to a term vector with
which semantic relatedness to other words' term vectors can be computed.
(Term vectors were derived from a corpus in which inflected words were
similarly lemmatized, meaning that , e.g., 'dogs' will not have a term
vector to use for semantic relatedness computation.)
"""
for unit in self.unit_list:
if lemmatizer.lemmatize(unit.text) in self.lemmas:
unit.text = lemmatizer.lemmatize(unit.text) | [
"def",
"lemmatize",
"(",
"self",
")",
":",
"for",
"unit",
"in",
"self",
".",
"unit_list",
":",
"if",
"lemmatizer",
".",
"lemmatize",
"(",
"unit",
".",
"text",
")",
"in",
"self",
".",
"lemmas",
":",
"unit",
".",
"text",
"=",
"lemmatizer",
".",
"lemmatize",
"(",
"unit",
".",
"text",
")"
]
| Lemmatize all Units in self.unit_list.
Modifies:
- self.unit_list: converts the .text property into its lemmatized form.
This method lemmatizes all inflected variants of permissible words to
those words' respective canonical forms. This is done to ensure that
each instance of a permissible word will correspond to a term vector with
which semantic relatedness to other words' term vectors can be computed.
(Term vectors were derived from a corpus in which inflected words were
similarly lemmatized, meaning that , e.g., 'dogs' will not have a term
vector to use for semantic relatedness computation.) | [
"Lemmatize",
"all",
"Units",
"in",
"self",
".",
"unit_list",
"."
]
| 7ca733dea4782c828024765726cce65de095d33c | https://github.com/speechinformaticslab/vfclust/blob/7ca733dea4782c828024765726cce65de095d33c/vfclust/vfclust.py#L213-L229 | train |
speechinformaticslab/vfclust | vfclust/vfclust.py | ParsedResponse.tokenize | def tokenize(self):
"""Tokenizes all multiword names in the list of Units.
Modifies:
- (indirectly) self.unit_list, by combining words into compound words.
This is done because many names may be composed of multiple words, e.g.,
'grizzly bear'. In order to count the number of permissible words
generated, and also to compute semantic relatedness between these
multiword names and other names, multiword names must each be reduced to
a respective single token.
"""
if not self.quiet:
print
print "Finding compound words..."
# lists of animal names containing 2-5 separate words
compound_word_dict = {}
for compound_length in range(5,1,-1):
compound_word_dict[compound_length] = [name for name in self.names if len(name.split()) == compound_length]
current_index = 0
finished = False
while not finished:
for compound_length in range(5,1,-1): #[5, 4, 3, 2]
if current_index + compound_length - 1 < len(self.unit_list): #don't want to overstep bounds of the list
compound_word = ""
#create compound word
for word in self.unit_list[current_index:current_index + compound_length]:
compound_word += " " + word.text
compound_word = compound_word.strip() # remove initial white space
#check if compound word is in list
if compound_word in compound_word_dict[compound_length]:
#if so, create the compound word
self.make_compound_word(start_index = current_index, how_many = compound_length)
current_index += 1
break
else: #if no breaks for any number of words
current_index += 1
if current_index >= len(self.unit_list): # check here instead of at the top in case
# changing the unit list length introduces a bug
finished = True | python | def tokenize(self):
"""Tokenizes all multiword names in the list of Units.
Modifies:
- (indirectly) self.unit_list, by combining words into compound words.
This is done because many names may be composed of multiple words, e.g.,
'grizzly bear'. In order to count the number of permissible words
generated, and also to compute semantic relatedness between these
multiword names and other names, multiword names must each be reduced to
a respective single token.
"""
if not self.quiet:
print
print "Finding compound words..."
# lists of animal names containing 2-5 separate words
compound_word_dict = {}
for compound_length in range(5,1,-1):
compound_word_dict[compound_length] = [name for name in self.names if len(name.split()) == compound_length]
current_index = 0
finished = False
while not finished:
for compound_length in range(5,1,-1): #[5, 4, 3, 2]
if current_index + compound_length - 1 < len(self.unit_list): #don't want to overstep bounds of the list
compound_word = ""
#create compound word
for word in self.unit_list[current_index:current_index + compound_length]:
compound_word += " " + word.text
compound_word = compound_word.strip() # remove initial white space
#check if compound word is in list
if compound_word in compound_word_dict[compound_length]:
#if so, create the compound word
self.make_compound_word(start_index = current_index, how_many = compound_length)
current_index += 1
break
else: #if no breaks for any number of words
current_index += 1
if current_index >= len(self.unit_list): # check here instead of at the top in case
# changing the unit list length introduces a bug
finished = True | [
"def",
"tokenize",
"(",
"self",
")",
":",
"if",
"not",
"self",
".",
"quiet",
":",
"print",
"print",
"\"Finding compound words...\"",
"# lists of animal names containing 2-5 separate words",
"compound_word_dict",
"=",
"{",
"}",
"for",
"compound_length",
"in",
"range",
"(",
"5",
",",
"1",
",",
"-",
"1",
")",
":",
"compound_word_dict",
"[",
"compound_length",
"]",
"=",
"[",
"name",
"for",
"name",
"in",
"self",
".",
"names",
"if",
"len",
"(",
"name",
".",
"split",
"(",
")",
")",
"==",
"compound_length",
"]",
"current_index",
"=",
"0",
"finished",
"=",
"False",
"while",
"not",
"finished",
":",
"for",
"compound_length",
"in",
"range",
"(",
"5",
",",
"1",
",",
"-",
"1",
")",
":",
"#[5, 4, 3, 2]",
"if",
"current_index",
"+",
"compound_length",
"-",
"1",
"<",
"len",
"(",
"self",
".",
"unit_list",
")",
":",
"#don't want to overstep bounds of the list",
"compound_word",
"=",
"\"\"",
"#create compound word",
"for",
"word",
"in",
"self",
".",
"unit_list",
"[",
"current_index",
":",
"current_index",
"+",
"compound_length",
"]",
":",
"compound_word",
"+=",
"\" \"",
"+",
"word",
".",
"text",
"compound_word",
"=",
"compound_word",
".",
"strip",
"(",
")",
"# remove initial white space",
"#check if compound word is in list",
"if",
"compound_word",
"in",
"compound_word_dict",
"[",
"compound_length",
"]",
":",
"#if so, create the compound word",
"self",
".",
"make_compound_word",
"(",
"start_index",
"=",
"current_index",
",",
"how_many",
"=",
"compound_length",
")",
"current_index",
"+=",
"1",
"break",
"else",
":",
"#if no breaks for any number of words",
"current_index",
"+=",
"1",
"if",
"current_index",
">=",
"len",
"(",
"self",
".",
"unit_list",
")",
":",
"# check here instead of at the top in case",
"# changing the unit list length introduces a bug",
"finished",
"=",
"True"
]
| Tokenizes all multiword names in the list of Units.
Modifies:
- (indirectly) self.unit_list, by combining words into compound words.
This is done because many names may be composed of multiple words, e.g.,
'grizzly bear'. In order to count the number of permissible words
generated, and also to compute semantic relatedness between these
multiword names and other names, multiword names must each be reduced to
a respective single token. | [
"Tokenizes",
"all",
"multiword",
"names",
"in",
"the",
"list",
"of",
"Units",
"."
]
| 7ca733dea4782c828024765726cce65de095d33c | https://github.com/speechinformaticslab/vfclust/blob/7ca733dea4782c828024765726cce65de095d33c/vfclust/vfclust.py#L232-L273 | train |
speechinformaticslab/vfclust | vfclust/vfclust.py | ParsedResponse.make_compound_word | def make_compound_word(self, start_index, how_many):
"""Combines two Units in self.unit_list to make a compound word token.
:param int start_index: Index of first Unit in self.unit_list to be combined
:param int how_many: Index of how many Units in self.unit_list to be combined.
Modifies:
- self.unit_list: Modifies the Unit corresponding to the first word
in the compound word. Changes the .text property to include .text
properties from subsequent Units, separted by underscores. Modifies
the .original_text property to record each componentword separately.
Modifies the .end_time property to be the .end_time of the final unit
in the compound word. Finally, after extracting the text and timing
information, it removes all units in the compound word except for the
first.
.. note: This method is only used with semantic processing, so we don't need to worry
about the phonetic representation of Units.
"""
if not self.quiet:
compound_word = ""
for word in self.unit_list[start_index:start_index + how_many]:
compound_word += " " + word.text
print compound_word.strip(), "-->","_".join(compound_word.split())
#combine text
for other_unit in range(1, how_many):
self.unit_list[start_index].original_text.append(self.unit_list[start_index + other_unit].text)
self.unit_list[start_index].text += "_" + self.unit_list[start_index + other_unit].text
#start time is the same. End time is the end time of the LAST word
self.unit_list[start_index].end_time = self.unit_list[start_index + how_many - 1].end_time
#shorten unit_list
self.unit_list = self.unit_list[:start_index + 1] + self.unit_list[start_index + how_many:] | python | def make_compound_word(self, start_index, how_many):
"""Combines two Units in self.unit_list to make a compound word token.
:param int start_index: Index of first Unit in self.unit_list to be combined
:param int how_many: Index of how many Units in self.unit_list to be combined.
Modifies:
- self.unit_list: Modifies the Unit corresponding to the first word
in the compound word. Changes the .text property to include .text
properties from subsequent Units, separted by underscores. Modifies
the .original_text property to record each componentword separately.
Modifies the .end_time property to be the .end_time of the final unit
in the compound word. Finally, after extracting the text and timing
information, it removes all units in the compound word except for the
first.
.. note: This method is only used with semantic processing, so we don't need to worry
about the phonetic representation of Units.
"""
if not self.quiet:
compound_word = ""
for word in self.unit_list[start_index:start_index + how_many]:
compound_word += " " + word.text
print compound_word.strip(), "-->","_".join(compound_word.split())
#combine text
for other_unit in range(1, how_many):
self.unit_list[start_index].original_text.append(self.unit_list[start_index + other_unit].text)
self.unit_list[start_index].text += "_" + self.unit_list[start_index + other_unit].text
#start time is the same. End time is the end time of the LAST word
self.unit_list[start_index].end_time = self.unit_list[start_index + how_many - 1].end_time
#shorten unit_list
self.unit_list = self.unit_list[:start_index + 1] + self.unit_list[start_index + how_many:] | [
"def",
"make_compound_word",
"(",
"self",
",",
"start_index",
",",
"how_many",
")",
":",
"if",
"not",
"self",
".",
"quiet",
":",
"compound_word",
"=",
"\"\"",
"for",
"word",
"in",
"self",
".",
"unit_list",
"[",
"start_index",
":",
"start_index",
"+",
"how_many",
"]",
":",
"compound_word",
"+=",
"\" \"",
"+",
"word",
".",
"text",
"print",
"compound_word",
".",
"strip",
"(",
")",
",",
"\"-->\"",
",",
"\"_\"",
".",
"join",
"(",
"compound_word",
".",
"split",
"(",
")",
")",
"#combine text",
"for",
"other_unit",
"in",
"range",
"(",
"1",
",",
"how_many",
")",
":",
"self",
".",
"unit_list",
"[",
"start_index",
"]",
".",
"original_text",
".",
"append",
"(",
"self",
".",
"unit_list",
"[",
"start_index",
"+",
"other_unit",
"]",
".",
"text",
")",
"self",
".",
"unit_list",
"[",
"start_index",
"]",
".",
"text",
"+=",
"\"_\"",
"+",
"self",
".",
"unit_list",
"[",
"start_index",
"+",
"other_unit",
"]",
".",
"text",
"#start time is the same. End time is the end time of the LAST word",
"self",
".",
"unit_list",
"[",
"start_index",
"]",
".",
"end_time",
"=",
"self",
".",
"unit_list",
"[",
"start_index",
"+",
"how_many",
"-",
"1",
"]",
".",
"end_time",
"#shorten unit_list",
"self",
".",
"unit_list",
"=",
"self",
".",
"unit_list",
"[",
":",
"start_index",
"+",
"1",
"]",
"+",
"self",
".",
"unit_list",
"[",
"start_index",
"+",
"how_many",
":",
"]"
]
| Combines two Units in self.unit_list to make a compound word token.
:param int start_index: Index of first Unit in self.unit_list to be combined
:param int how_many: Index of how many Units in self.unit_list to be combined.
Modifies:
- self.unit_list: Modifies the Unit corresponding to the first word
in the compound word. Changes the .text property to include .text
properties from subsequent Units, separted by underscores. Modifies
the .original_text property to record each componentword separately.
Modifies the .end_time property to be the .end_time of the final unit
in the compound word. Finally, after extracting the text and timing
information, it removes all units in the compound word except for the
first.
.. note: This method is only used with semantic processing, so we don't need to worry
about the phonetic representation of Units. | [
"Combines",
"two",
"Units",
"in",
"self",
".",
"unit_list",
"to",
"make",
"a",
"compound",
"word",
"token",
"."
]
| 7ca733dea4782c828024765726cce65de095d33c | https://github.com/speechinformaticslab/vfclust/blob/7ca733dea4782c828024765726cce65de095d33c/vfclust/vfclust.py#L275-L310 | train |
speechinformaticslab/vfclust | vfclust/vfclust.py | ParsedResponse.remove_unit | def remove_unit(self, index):
'''Removes the unit at the given index in self.unit_list. Does not modify any other units.'''
if not self.quiet:
print "Removing", self.unit_list[index].text
self.unit_list.pop(index) | python | def remove_unit(self, index):
'''Removes the unit at the given index in self.unit_list. Does not modify any other units.'''
if not self.quiet:
print "Removing", self.unit_list[index].text
self.unit_list.pop(index) | [
"def",
"remove_unit",
"(",
"self",
",",
"index",
")",
":",
"if",
"not",
"self",
".",
"quiet",
":",
"print",
"\"Removing\"",
",",
"self",
".",
"unit_list",
"[",
"index",
"]",
".",
"text",
"self",
".",
"unit_list",
".",
"pop",
"(",
"index",
")"
]
| Removes the unit at the given index in self.unit_list. Does not modify any other units. | [
"Removes",
"the",
"unit",
"at",
"the",
"given",
"index",
"in",
"self",
".",
"unit_list",
".",
"Does",
"not",
"modify",
"any",
"other",
"units",
"."
]
| 7ca733dea4782c828024765726cce65de095d33c | https://github.com/speechinformaticslab/vfclust/blob/7ca733dea4782c828024765726cce65de095d33c/vfclust/vfclust.py#L312-L316 | train |
speechinformaticslab/vfclust | vfclust/vfclust.py | ParsedResponse.combine_same_stem_units | def combine_same_stem_units(self, index):
"""Combines adjacent words with the same stem into a single unit.
:param int index: Index of Unit in self.unit_list to be combined with the
subsequent Unit.
Modifies:
- self.unit_list: Modifies the .original_text property of the Unit
corresponding to the index. Changes the .end_time property to be the
.end_time of the next Unit, as Units with the same stem are considered
as single Unit inc lustering. Finally, after extracting the text and timing
information, it removes the unit at index+1.
"""
if not self.quiet:
combined_word = ""
for word in self.unit_list[index:index + 2]:
for original_word in word.original_text:
combined_word += " " + original_word
print combined_word.strip(), "-->","/".join(combined_word.split())
# edit word list to reflect what words are represented by this unit
self.unit_list[index].original_text.append(self.unit_list[index + 1].text)
#start time is the same. End time is the end time of the LAST word
self.unit_list[index].end_time = self.unit_list[index + 1].end_time
# remove word with duplicate stem
self.unit_list.pop(index + 1) | python | def combine_same_stem_units(self, index):
"""Combines adjacent words with the same stem into a single unit.
:param int index: Index of Unit in self.unit_list to be combined with the
subsequent Unit.
Modifies:
- self.unit_list: Modifies the .original_text property of the Unit
corresponding to the index. Changes the .end_time property to be the
.end_time of the next Unit, as Units with the same stem are considered
as single Unit inc lustering. Finally, after extracting the text and timing
information, it removes the unit at index+1.
"""
if not self.quiet:
combined_word = ""
for word in self.unit_list[index:index + 2]:
for original_word in word.original_text:
combined_word += " " + original_word
print combined_word.strip(), "-->","/".join(combined_word.split())
# edit word list to reflect what words are represented by this unit
self.unit_list[index].original_text.append(self.unit_list[index + 1].text)
#start time is the same. End time is the end time of the LAST word
self.unit_list[index].end_time = self.unit_list[index + 1].end_time
# remove word with duplicate stem
self.unit_list.pop(index + 1) | [
"def",
"combine_same_stem_units",
"(",
"self",
",",
"index",
")",
":",
"if",
"not",
"self",
".",
"quiet",
":",
"combined_word",
"=",
"\"\"",
"for",
"word",
"in",
"self",
".",
"unit_list",
"[",
"index",
":",
"index",
"+",
"2",
"]",
":",
"for",
"original_word",
"in",
"word",
".",
"original_text",
":",
"combined_word",
"+=",
"\" \"",
"+",
"original_word",
"print",
"combined_word",
".",
"strip",
"(",
")",
",",
"\"-->\"",
",",
"\"/\"",
".",
"join",
"(",
"combined_word",
".",
"split",
"(",
")",
")",
"# edit word list to reflect what words are represented by this unit",
"self",
".",
"unit_list",
"[",
"index",
"]",
".",
"original_text",
".",
"append",
"(",
"self",
".",
"unit_list",
"[",
"index",
"+",
"1",
"]",
".",
"text",
")",
"#start time is the same. End time is the end time of the LAST word",
"self",
".",
"unit_list",
"[",
"index",
"]",
".",
"end_time",
"=",
"self",
".",
"unit_list",
"[",
"index",
"+",
"1",
"]",
".",
"end_time",
"# remove word with duplicate stem",
"self",
".",
"unit_list",
".",
"pop",
"(",
"index",
"+",
"1",
")"
]
| Combines adjacent words with the same stem into a single unit.
:param int index: Index of Unit in self.unit_list to be combined with the
subsequent Unit.
Modifies:
- self.unit_list: Modifies the .original_text property of the Unit
corresponding to the index. Changes the .end_time property to be the
.end_time of the next Unit, as Units with the same stem are considered
as single Unit inc lustering. Finally, after extracting the text and timing
information, it removes the unit at index+1. | [
"Combines",
"adjacent",
"words",
"with",
"the",
"same",
"stem",
"into",
"a",
"single",
"unit",
"."
]
| 7ca733dea4782c828024765726cce65de095d33c | https://github.com/speechinformaticslab/vfclust/blob/7ca733dea4782c828024765726cce65de095d33c/vfclust/vfclust.py#L318-L347 | train |
speechinformaticslab/vfclust | vfclust/vfclust.py | ParsedResponse.display | def display(self):
"""Pretty-prints the ParsedResponse to the screen."""
table_list = []
table_list.append(("Text","Orig. Text","Start time","End time", "Phonetic"))
for unit in self.unit_list:
table_list.append((unit.text,
"/".join(unit.original_text),
unit.start_time,
unit.end_time,
unit.phonetic_representation))
print_table(table_list) | python | def display(self):
"""Pretty-prints the ParsedResponse to the screen."""
table_list = []
table_list.append(("Text","Orig. Text","Start time","End time", "Phonetic"))
for unit in self.unit_list:
table_list.append((unit.text,
"/".join(unit.original_text),
unit.start_time,
unit.end_time,
unit.phonetic_representation))
print_table(table_list) | [
"def",
"display",
"(",
"self",
")",
":",
"table_list",
"=",
"[",
"]",
"table_list",
".",
"append",
"(",
"(",
"\"Text\"",
",",
"\"Orig. Text\"",
",",
"\"Start time\"",
",",
"\"End time\"",
",",
"\"Phonetic\"",
")",
")",
"for",
"unit",
"in",
"self",
".",
"unit_list",
":",
"table_list",
".",
"append",
"(",
"(",
"unit",
".",
"text",
",",
"\"/\"",
".",
"join",
"(",
"unit",
".",
"original_text",
")",
",",
"unit",
".",
"start_time",
",",
"unit",
".",
"end_time",
",",
"unit",
".",
"phonetic_representation",
")",
")",
"print_table",
"(",
"table_list",
")"
]
| Pretty-prints the ParsedResponse to the screen. | [
"Pretty",
"-",
"prints",
"the",
"ParsedResponse",
"to",
"the",
"screen",
"."
]
| 7ca733dea4782c828024765726cce65de095d33c | https://github.com/speechinformaticslab/vfclust/blob/7ca733dea4782c828024765726cce65de095d33c/vfclust/vfclust.py#L350-L361 | train |
speechinformaticslab/vfclust | vfclust/vfclust.py | ParsedResponse.generate_phonetic_representation | def generate_phonetic_representation(self, word):
"""
Returns a generated phonetic representation for a word.
:param str word: a word to be phoneticized.
:return: A list of phonemes representing the phoneticized word.
This method is used for words for which there is no pronunication
entry in the CMU dictionary. The function generates a
pronunication for the word in the standard CMU format. This can then
be converted to a compact phonetic representation using
modify_phonetic_representation().
"""
with NamedTemporaryFile() as temp_file:
# Write the word to a temp file
temp_file.write(word)
#todo - clean up this messy t2p path
t2pargs = [os.path.abspath(os.path.join(os.path.dirname(__file__),'t2p/t2p')),
'-transcribe', os.path.join(data_path, 'cmudict.0.7a.tree'),
temp_file.name]
temp_file.seek(0)
output, error = subprocess.Popen(
t2pargs, stdout=subprocess.PIPE,
stderr=subprocess.PIPE
).communicate()
output = output.split()
phonetic_representation = output[1:]
return phonetic_representation | python | def generate_phonetic_representation(self, word):
"""
Returns a generated phonetic representation for a word.
:param str word: a word to be phoneticized.
:return: A list of phonemes representing the phoneticized word.
This method is used for words for which there is no pronunication
entry in the CMU dictionary. The function generates a
pronunication for the word in the standard CMU format. This can then
be converted to a compact phonetic representation using
modify_phonetic_representation().
"""
with NamedTemporaryFile() as temp_file:
# Write the word to a temp file
temp_file.write(word)
#todo - clean up this messy t2p path
t2pargs = [os.path.abspath(os.path.join(os.path.dirname(__file__),'t2p/t2p')),
'-transcribe', os.path.join(data_path, 'cmudict.0.7a.tree'),
temp_file.name]
temp_file.seek(0)
output, error = subprocess.Popen(
t2pargs, stdout=subprocess.PIPE,
stderr=subprocess.PIPE
).communicate()
output = output.split()
phonetic_representation = output[1:]
return phonetic_representation | [
"def",
"generate_phonetic_representation",
"(",
"self",
",",
"word",
")",
":",
"with",
"NamedTemporaryFile",
"(",
")",
"as",
"temp_file",
":",
"# Write the word to a temp file",
"temp_file",
".",
"write",
"(",
"word",
")",
"#todo - clean up this messy t2p path",
"t2pargs",
"=",
"[",
"os",
".",
"path",
".",
"abspath",
"(",
"os",
".",
"path",
".",
"join",
"(",
"os",
".",
"path",
".",
"dirname",
"(",
"__file__",
")",
",",
"'t2p/t2p'",
")",
")",
",",
"'-transcribe'",
",",
"os",
".",
"path",
".",
"join",
"(",
"data_path",
",",
"'cmudict.0.7a.tree'",
")",
",",
"temp_file",
".",
"name",
"]",
"temp_file",
".",
"seek",
"(",
"0",
")",
"output",
",",
"error",
"=",
"subprocess",
".",
"Popen",
"(",
"t2pargs",
",",
"stdout",
"=",
"subprocess",
".",
"PIPE",
",",
"stderr",
"=",
"subprocess",
".",
"PIPE",
")",
".",
"communicate",
"(",
")",
"output",
"=",
"output",
".",
"split",
"(",
")",
"phonetic_representation",
"=",
"output",
"[",
"1",
":",
"]",
"return",
"phonetic_representation"
]
| Returns a generated phonetic representation for a word.
:param str word: a word to be phoneticized.
:return: A list of phonemes representing the phoneticized word.
This method is used for words for which there is no pronunication
entry in the CMU dictionary. The function generates a
pronunication for the word in the standard CMU format. This can then
be converted to a compact phonetic representation using
modify_phonetic_representation(). | [
"Returns",
"a",
"generated",
"phonetic",
"representation",
"for",
"a",
"word",
"."
]
| 7ca733dea4782c828024765726cce65de095d33c | https://github.com/speechinformaticslab/vfclust/blob/7ca733dea4782c828024765726cce65de095d33c/vfclust/vfclust.py#L363-L392 | train |
speechinformaticslab/vfclust | vfclust/vfclust.py | ParsedResponse.modify_phonetic_representation | def modify_phonetic_representation(self, phonetic_representation):
""" Returns a compact phonetic representation given a CMUdict-formatted representation.
:param list phonetic_representation: a phonetic representation in standard
CMUdict formatting, i.e. a list of phonemes like ['HH', 'EH0', 'L', 'OW1']
:returns: A string representing a custom phonetic representation, where each phoneme is
mapped to a single ascii character.
Changing the phonetic representation from a list to a string is useful for calculating phonetic
simlarity scores.
"""
for i in range(len(phonetic_representation)):
# Remove numerical stress indicators
phonetic_representation[i] = re.sub('\d+', '', phonetic_representation[i])
multis = ['AA', 'AE', 'AH', 'AO', 'AW', 'AY', 'CH', 'DH', 'EH', 'ER',
'EY', 'HH', 'IH', 'IY', 'JH', 'NG', 'OW', 'OY', 'SH',
'TH', 'UH', 'UW', 'ZH']
singles = ['a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j', 'k', 'l',
'm', 'n', 'o', 'p', 'q', 'r', 's', 't', 'u', 'v', 'w']
for i in range(len(phonetic_representation)):
# Convert multicharacter phone symbols to arbitrary
# single-character symbols
if phonetic_representation[i] in multis:
phonetic_representation[i] = singles[multis.index(phonetic_representation[i])]
phonetic_representation = ''.join(phonetic_representation)
return phonetic_representation | python | def modify_phonetic_representation(self, phonetic_representation):
""" Returns a compact phonetic representation given a CMUdict-formatted representation.
:param list phonetic_representation: a phonetic representation in standard
CMUdict formatting, i.e. a list of phonemes like ['HH', 'EH0', 'L', 'OW1']
:returns: A string representing a custom phonetic representation, where each phoneme is
mapped to a single ascii character.
Changing the phonetic representation from a list to a string is useful for calculating phonetic
simlarity scores.
"""
for i in range(len(phonetic_representation)):
# Remove numerical stress indicators
phonetic_representation[i] = re.sub('\d+', '', phonetic_representation[i])
multis = ['AA', 'AE', 'AH', 'AO', 'AW', 'AY', 'CH', 'DH', 'EH', 'ER',
'EY', 'HH', 'IH', 'IY', 'JH', 'NG', 'OW', 'OY', 'SH',
'TH', 'UH', 'UW', 'ZH']
singles = ['a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j', 'k', 'l',
'm', 'n', 'o', 'p', 'q', 'r', 's', 't', 'u', 'v', 'w']
for i in range(len(phonetic_representation)):
# Convert multicharacter phone symbols to arbitrary
# single-character symbols
if phonetic_representation[i] in multis:
phonetic_representation[i] = singles[multis.index(phonetic_representation[i])]
phonetic_representation = ''.join(phonetic_representation)
return phonetic_representation | [
"def",
"modify_phonetic_representation",
"(",
"self",
",",
"phonetic_representation",
")",
":",
"for",
"i",
"in",
"range",
"(",
"len",
"(",
"phonetic_representation",
")",
")",
":",
"# Remove numerical stress indicators",
"phonetic_representation",
"[",
"i",
"]",
"=",
"re",
".",
"sub",
"(",
"'\\d+'",
",",
"''",
",",
"phonetic_representation",
"[",
"i",
"]",
")",
"multis",
"=",
"[",
"'AA'",
",",
"'AE'",
",",
"'AH'",
",",
"'AO'",
",",
"'AW'",
",",
"'AY'",
",",
"'CH'",
",",
"'DH'",
",",
"'EH'",
",",
"'ER'",
",",
"'EY'",
",",
"'HH'",
",",
"'IH'",
",",
"'IY'",
",",
"'JH'",
",",
"'NG'",
",",
"'OW'",
",",
"'OY'",
",",
"'SH'",
",",
"'TH'",
",",
"'UH'",
",",
"'UW'",
",",
"'ZH'",
"]",
"singles",
"=",
"[",
"'a'",
",",
"'b'",
",",
"'c'",
",",
"'d'",
",",
"'e'",
",",
"'f'",
",",
"'g'",
",",
"'h'",
",",
"'i'",
",",
"'j'",
",",
"'k'",
",",
"'l'",
",",
"'m'",
",",
"'n'",
",",
"'o'",
",",
"'p'",
",",
"'q'",
",",
"'r'",
",",
"'s'",
",",
"'t'",
",",
"'u'",
",",
"'v'",
",",
"'w'",
"]",
"for",
"i",
"in",
"range",
"(",
"len",
"(",
"phonetic_representation",
")",
")",
":",
"# Convert multicharacter phone symbols to arbitrary",
"# single-character symbols",
"if",
"phonetic_representation",
"[",
"i",
"]",
"in",
"multis",
":",
"phonetic_representation",
"[",
"i",
"]",
"=",
"singles",
"[",
"multis",
".",
"index",
"(",
"phonetic_representation",
"[",
"i",
"]",
")",
"]",
"phonetic_representation",
"=",
"''",
".",
"join",
"(",
"phonetic_representation",
")",
"return",
"phonetic_representation"
]
| Returns a compact phonetic representation given a CMUdict-formatted representation.
:param list phonetic_representation: a phonetic representation in standard
CMUdict formatting, i.e. a list of phonemes like ['HH', 'EH0', 'L', 'OW1']
:returns: A string representing a custom phonetic representation, where each phoneme is
mapped to a single ascii character.
Changing the phonetic representation from a list to a string is useful for calculating phonetic
simlarity scores. | [
"Returns",
"a",
"compact",
"phonetic",
"representation",
"given",
"a",
"CMUdict",
"-",
"formatted",
"representation",
"."
]
| 7ca733dea4782c828024765726cce65de095d33c | https://github.com/speechinformaticslab/vfclust/blob/7ca733dea4782c828024765726cce65de095d33c/vfclust/vfclust.py#L394-L424 | train |
speechinformaticslab/vfclust | vfclust/vfclust.py | ParsedResponse.clean | def clean(self):
""" Removes any Units that are not applicable given the current semantic or phonetic category.
Modifies:
- self.unit_list: Removes Units from this list that do not fit into the clustering category.
it does by by either combining units to make compound words, combining units with the
same stem, or eliminating units altogether if they do not conform to the category.
If the type is phonetic, this method also generates phonetic clusters for all Unit
objects in self.unit_list.
This method performs three main tasks:
1. Removes words that do not conform to the clustering category (i.e. start with the
wrong letter, or are not an animal).
2. Combine adjacent words with the same stem into a single unit. The NLTK Porter Stemmer
is used for determining whether stems are the same.
http://www.nltk.org/_modules/nltk/stem/porter.html
3. In the case of PHONETIC clustering, compute the phonetic representation of each unit.
"""
if not self.quiet:
print
print "Preprocessing input..."
print "Raw response:"
print self.display()
if not self.quiet:
print
print "Cleaning words..."
#weed out words not starting with the right letter or in the right category
current_index = 0
while current_index < len(self.unit_list):
word = self.unit_list[current_index].text
if self.type == "PHONETIC":
test = (word.startswith(self.letter_or_category) and #starts with required letter
not word.endswith('-') and # Weed out word fragments
'_' not in word and # Weed out, e.g., 'filledpause_um'
word.lower() in self.english_words) #make sure the word is english
elif self.type == "SEMANTIC":
test = word in self.permissible_words
if not test: #if test fails remove word
self.remove_unit(index = current_index)
else: # otherwise just increment, but check to see if you're at the end of the list
current_index += 1
#combine words with the same stem
current_index = 0
finished = False
while current_index < len(self.unit_list) - 1:
#don't combine for lists of length 0, 1
if stemmer.stem(self.unit_list[current_index].text) == \
stemmer.stem(self.unit_list[current_index + 1].text):
#if same stem as next, merge next unit with current unit
self.combine_same_stem_units(index = current_index)
else: # if not same stem, increment index
current_index += 1
#get phonetic representations
if self.type == "PHONETIC":
for unit in self.unit_list:
word = unit.text
#get phonetic representation
if word in self.cmudict:
# If word in CMUdict, get its phonetic representation
phonetic_representation = self.cmudict[word]
if word not in self.cmudict:
# Else, generate a phonetic representation for it
phonetic_representation = self.generate_phonetic_representation(word)
phonetic_representation = self.modify_phonetic_representation(phonetic_representation)
unit.phonetic_representation = phonetic_representation
if not self.quiet:
print
print "Cleaned response:"
print self.display() | python | def clean(self):
""" Removes any Units that are not applicable given the current semantic or phonetic category.
Modifies:
- self.unit_list: Removes Units from this list that do not fit into the clustering category.
it does by by either combining units to make compound words, combining units with the
same stem, or eliminating units altogether if they do not conform to the category.
If the type is phonetic, this method also generates phonetic clusters for all Unit
objects in self.unit_list.
This method performs three main tasks:
1. Removes words that do not conform to the clustering category (i.e. start with the
wrong letter, or are not an animal).
2. Combine adjacent words with the same stem into a single unit. The NLTK Porter Stemmer
is used for determining whether stems are the same.
http://www.nltk.org/_modules/nltk/stem/porter.html
3. In the case of PHONETIC clustering, compute the phonetic representation of each unit.
"""
if not self.quiet:
print
print "Preprocessing input..."
print "Raw response:"
print self.display()
if not self.quiet:
print
print "Cleaning words..."
#weed out words not starting with the right letter or in the right category
current_index = 0
while current_index < len(self.unit_list):
word = self.unit_list[current_index].text
if self.type == "PHONETIC":
test = (word.startswith(self.letter_or_category) and #starts with required letter
not word.endswith('-') and # Weed out word fragments
'_' not in word and # Weed out, e.g., 'filledpause_um'
word.lower() in self.english_words) #make sure the word is english
elif self.type == "SEMANTIC":
test = word in self.permissible_words
if not test: #if test fails remove word
self.remove_unit(index = current_index)
else: # otherwise just increment, but check to see if you're at the end of the list
current_index += 1
#combine words with the same stem
current_index = 0
finished = False
while current_index < len(self.unit_list) - 1:
#don't combine for lists of length 0, 1
if stemmer.stem(self.unit_list[current_index].text) == \
stemmer.stem(self.unit_list[current_index + 1].text):
#if same stem as next, merge next unit with current unit
self.combine_same_stem_units(index = current_index)
else: # if not same stem, increment index
current_index += 1
#get phonetic representations
if self.type == "PHONETIC":
for unit in self.unit_list:
word = unit.text
#get phonetic representation
if word in self.cmudict:
# If word in CMUdict, get its phonetic representation
phonetic_representation = self.cmudict[word]
if word not in self.cmudict:
# Else, generate a phonetic representation for it
phonetic_representation = self.generate_phonetic_representation(word)
phonetic_representation = self.modify_phonetic_representation(phonetic_representation)
unit.phonetic_representation = phonetic_representation
if not self.quiet:
print
print "Cleaned response:"
print self.display() | [
"def",
"clean",
"(",
"self",
")",
":",
"if",
"not",
"self",
".",
"quiet",
":",
"print",
"print",
"\"Preprocessing input...\"",
"print",
"\"Raw response:\"",
"print",
"self",
".",
"display",
"(",
")",
"if",
"not",
"self",
".",
"quiet",
":",
"print",
"print",
"\"Cleaning words...\"",
"#weed out words not starting with the right letter or in the right category",
"current_index",
"=",
"0",
"while",
"current_index",
"<",
"len",
"(",
"self",
".",
"unit_list",
")",
":",
"word",
"=",
"self",
".",
"unit_list",
"[",
"current_index",
"]",
".",
"text",
"if",
"self",
".",
"type",
"==",
"\"PHONETIC\"",
":",
"test",
"=",
"(",
"word",
".",
"startswith",
"(",
"self",
".",
"letter_or_category",
")",
"and",
"#starts with required letter",
"not",
"word",
".",
"endswith",
"(",
"'-'",
")",
"and",
"# Weed out word fragments",
"'_'",
"not",
"in",
"word",
"and",
"# Weed out, e.g., 'filledpause_um'",
"word",
".",
"lower",
"(",
")",
"in",
"self",
".",
"english_words",
")",
"#make sure the word is english",
"elif",
"self",
".",
"type",
"==",
"\"SEMANTIC\"",
":",
"test",
"=",
"word",
"in",
"self",
".",
"permissible_words",
"if",
"not",
"test",
":",
"#if test fails remove word",
"self",
".",
"remove_unit",
"(",
"index",
"=",
"current_index",
")",
"else",
":",
"# otherwise just increment, but check to see if you're at the end of the list",
"current_index",
"+=",
"1",
"#combine words with the same stem",
"current_index",
"=",
"0",
"finished",
"=",
"False",
"while",
"current_index",
"<",
"len",
"(",
"self",
".",
"unit_list",
")",
"-",
"1",
":",
"#don't combine for lists of length 0, 1",
"if",
"stemmer",
".",
"stem",
"(",
"self",
".",
"unit_list",
"[",
"current_index",
"]",
".",
"text",
")",
"==",
"stemmer",
".",
"stem",
"(",
"self",
".",
"unit_list",
"[",
"current_index",
"+",
"1",
"]",
".",
"text",
")",
":",
"#if same stem as next, merge next unit with current unit",
"self",
".",
"combine_same_stem_units",
"(",
"index",
"=",
"current_index",
")",
"else",
":",
"# if not same stem, increment index",
"current_index",
"+=",
"1",
"#get phonetic representations",
"if",
"self",
".",
"type",
"==",
"\"PHONETIC\"",
":",
"for",
"unit",
"in",
"self",
".",
"unit_list",
":",
"word",
"=",
"unit",
".",
"text",
"#get phonetic representation",
"if",
"word",
"in",
"self",
".",
"cmudict",
":",
"# If word in CMUdict, get its phonetic representation",
"phonetic_representation",
"=",
"self",
".",
"cmudict",
"[",
"word",
"]",
"if",
"word",
"not",
"in",
"self",
".",
"cmudict",
":",
"# Else, generate a phonetic representation for it",
"phonetic_representation",
"=",
"self",
".",
"generate_phonetic_representation",
"(",
"word",
")",
"phonetic_representation",
"=",
"self",
".",
"modify_phonetic_representation",
"(",
"phonetic_representation",
")",
"unit",
".",
"phonetic_representation",
"=",
"phonetic_representation",
"if",
"not",
"self",
".",
"quiet",
":",
"print",
"print",
"\"Cleaned response:\"",
"print",
"self",
".",
"display",
"(",
")"
]
| Removes any Units that are not applicable given the current semantic or phonetic category.
Modifies:
- self.unit_list: Removes Units from this list that do not fit into the clustering category.
it does by by either combining units to make compound words, combining units with the
same stem, or eliminating units altogether if they do not conform to the category.
If the type is phonetic, this method also generates phonetic clusters for all Unit
objects in self.unit_list.
This method performs three main tasks:
1. Removes words that do not conform to the clustering category (i.e. start with the
wrong letter, or are not an animal).
2. Combine adjacent words with the same stem into a single unit. The NLTK Porter Stemmer
is used for determining whether stems are the same.
http://www.nltk.org/_modules/nltk/stem/porter.html
3. In the case of PHONETIC clustering, compute the phonetic representation of each unit. | [
"Removes",
"any",
"Units",
"that",
"are",
"not",
"applicable",
"given",
"the",
"current",
"semantic",
"or",
"phonetic",
"category",
"."
]
| 7ca733dea4782c828024765726cce65de095d33c | https://github.com/speechinformaticslab/vfclust/blob/7ca733dea4782c828024765726cce65de095d33c/vfclust/vfclust.py#L426-L504 | train |
speechinformaticslab/vfclust | vfclust/vfclust.py | VFClustEngine.load_lsa_information | def load_lsa_information(self):
"""Loads a dictionary from disk that maps permissible words to their LSA term vectors."""
if not (49 < int(self.clustering_parameter) < 101):
raise Exception('Only LSA dimensionalities in the range 50-100' +
' are supported.')
if not self.quiet:
print "Loading LSA term vectors..."
#the protocol2 used the pickle highest protocol and this one is a smaller file
with open(os.path.join(data_path, self.category + '_' +
os.path.join('term_vector_dictionaries',
'term_vectors_dict' +
str(self.clustering_parameter) + '_cpickle.dat')),
'rb') as infile:
self.term_vectors = pickle.load(infile) | python | def load_lsa_information(self):
"""Loads a dictionary from disk that maps permissible words to their LSA term vectors."""
if not (49 < int(self.clustering_parameter) < 101):
raise Exception('Only LSA dimensionalities in the range 50-100' +
' are supported.')
if not self.quiet:
print "Loading LSA term vectors..."
#the protocol2 used the pickle highest protocol and this one is a smaller file
with open(os.path.join(data_path, self.category + '_' +
os.path.join('term_vector_dictionaries',
'term_vectors_dict' +
str(self.clustering_parameter) + '_cpickle.dat')),
'rb') as infile:
self.term_vectors = pickle.load(infile) | [
"def",
"load_lsa_information",
"(",
"self",
")",
":",
"if",
"not",
"(",
"49",
"<",
"int",
"(",
"self",
".",
"clustering_parameter",
")",
"<",
"101",
")",
":",
"raise",
"Exception",
"(",
"'Only LSA dimensionalities in the range 50-100'",
"+",
"' are supported.'",
")",
"if",
"not",
"self",
".",
"quiet",
":",
"print",
"\"Loading LSA term vectors...\"",
"#the protocol2 used the pickle highest protocol and this one is a smaller file",
"with",
"open",
"(",
"os",
".",
"path",
".",
"join",
"(",
"data_path",
",",
"self",
".",
"category",
"+",
"'_'",
"+",
"os",
".",
"path",
".",
"join",
"(",
"'term_vector_dictionaries'",
",",
"'term_vectors_dict'",
"+",
"str",
"(",
"self",
".",
"clustering_parameter",
")",
"+",
"'_cpickle.dat'",
")",
")",
",",
"'rb'",
")",
"as",
"infile",
":",
"self",
".",
"term_vectors",
"=",
"pickle",
".",
"load",
"(",
"infile",
")"
]
| Loads a dictionary from disk that maps permissible words to their LSA term vectors. | [
"Loads",
"a",
"dictionary",
"from",
"disk",
"that",
"maps",
"permissible",
"words",
"to",
"their",
"LSA",
"term",
"vectors",
"."
]
| 7ca733dea4782c828024765726cce65de095d33c | https://github.com/speechinformaticslab/vfclust/blob/7ca733dea4782c828024765726cce65de095d33c/vfclust/vfclust.py#L766-L780 | train |
speechinformaticslab/vfclust | vfclust/vfclust.py | VFClustEngine.get_similarity_measures | def get_similarity_measures(self):
"""Helper function for computing similarity measures."""
if not self.quiet:
print
print "Computing", self.current_similarity_measure, "similarity..."
self.compute_similarity_scores() | python | def get_similarity_measures(self):
"""Helper function for computing similarity measures."""
if not self.quiet:
print
print "Computing", self.current_similarity_measure, "similarity..."
self.compute_similarity_scores() | [
"def",
"get_similarity_measures",
"(",
"self",
")",
":",
"if",
"not",
"self",
".",
"quiet",
":",
"print",
"print",
"\"Computing\"",
",",
"self",
".",
"current_similarity_measure",
",",
"\"similarity...\"",
"self",
".",
"compute_similarity_scores",
"(",
")"
]
| Helper function for computing similarity measures. | [
"Helper",
"function",
"for",
"computing",
"similarity",
"measures",
"."
]
| 7ca733dea4782c828024765726cce65de095d33c | https://github.com/speechinformaticslab/vfclust/blob/7ca733dea4782c828024765726cce65de095d33c/vfclust/vfclust.py#L783-L789 | train |
speechinformaticslab/vfclust | vfclust/vfclust.py | VFClustEngine.get_raw_counts | def get_raw_counts(self):
"""Determines counts for unique words, repetitions, etc using the raw text response.
Adds the following measures to the self.measures dictionary:
- COUNT_total_words: count of words (i.e. utterances with semantic content) spoken
by the subject. Filled pauses, silences, coughs, breaths, words by the interviewer,
etc. are all excluded from this count.
- COUNT_permissible_words: Number of words spoken by the subject that qualify as a
valid response according to the clustering criteria. Compound words are counted
as a single word in SEMANTIC clustering, but as two words in PHONETIC clustering.
This is implemented by tokenizing SEMANTIC clustering responses in the __init__
method before calling the current method.
- COUNT_exact_repetitions: Number of words which repeat words spoken earlier in the
response. Responses in SEMANTIC clustering are lemmatized before this function is
called, so slight variations (dog, dogs) may be counted as exact responses.
- COUNT_stem_repetitions: Number of words stems identical to words uttered earlier in
the response, according to the Porter Stemmer. For example, 'sled' and 'sledding'
have the same stem ('sled'), and 'sledding' would be counted as a stem repetition.
- COUNT_examiner_words: Number of words uttered by the examiner. These start
with "E_" in .TextGrid files.
- COUNT_filled_pauses: Number of filled pauses uttered by the subject. These begin
with "FILLEDPAUSE_" in the .TextGrid file.
- COUNT_word_fragments: Number of word fragments uttered by the subject. These
end with "-" in the .TextGrid file.
- COUNT_asides: Words spoken by the subject that do not adhere to the test criteria are
counted as asides, i.e. words that do not start with the appropriate letter or that
do not represent an animal.
- COUNT_unique_permissible_words: Number of works spoken by the subject, less asides,
stem repetitions and exact repetitions.
"""
#for making the table at the end
words = []
labels = []
words_said = set()
# Words like "polar_bear" as one semantically but two phonetically
# Uncategorizable words are counted as asides
for unit in self.parsed_response:
word = unit.text
test = False
if self.type == "PHONETIC":
test = (word.startswith(self.letter) and
"T_" not in word and "E_" not in word and "!" not in word and # Weed out tags
"FILLEDPAUSE_" not in word and # Weed out filled pauses
not word.endswith('-') and # Weed out false starts
word.lower() in self.english_words) #weed out non-words
elif self.type == "SEMANTIC":
#automatically weed out all non-semantically-appropriate responses
test = (word in self.permissible_words)
if test:
self.measures['COUNT_total_words'] += 1
self.measures['COUNT_permissible_words'] += 1
if any(word == w for w in words_said):
self.measures['COUNT_exact_repetitions'] += 1
labels.append('EXACT REPETITION')
elif any(stemmer.stem(word) == stemmer.stem(w) for w in words_said):
self.measures['COUNT_stem_repetitions'] += 1
labels.append('STEM REPETITION')
else:
labels.append('PERMISSIBLE WORD')
words_said.add(word)
words.append(word)
elif word.lower().startswith('e_'):
self.measures['COUNT_examiner_words'] += 1
words.append(word)
labels.append('EXAMINER WORD')
elif word.endswith('-'):
self.measures['COUNT_word_fragments'] += 1
words.append(word)
labels.append('WORD FRAGMENT')
elif word.lower().startswith('filledpause'):
self.measures['COUNT_filled_pauses'] += 1
words.append(word)
labels.append('FILLED PAUSE')
elif word.lower() not in ['!sil', 't_noise', 't_cough', 't_lipsmack', 't_breath']:
self.measures['COUNT_total_words'] += 1
self.measures['COUNT_asides'] += 1
words.append(word)
labels.append('ASIDE')
if not self.quiet:
print
print "Labels:"
print_table([(word,label) for word,label in zip(words,labels)])
self.measures['COUNT_unique_permissible_words'] = \
self.measures['COUNT_permissible_words'] - \
self.measures['COUNT_exact_repetitions'] - \
self.measures['COUNT_stem_repetitions']
if not self.quiet:
print
print "Counts:"
collection_measures = [x for x in self.measures if x.startswith("COUNT_")]
collection_measures.sort()
if not self.quiet:
print_table([(k, str(self.measures[k])) for k in collection_measures]) | python | def get_raw_counts(self):
"""Determines counts for unique words, repetitions, etc using the raw text response.
Adds the following measures to the self.measures dictionary:
- COUNT_total_words: count of words (i.e. utterances with semantic content) spoken
by the subject. Filled pauses, silences, coughs, breaths, words by the interviewer,
etc. are all excluded from this count.
- COUNT_permissible_words: Number of words spoken by the subject that qualify as a
valid response according to the clustering criteria. Compound words are counted
as a single word in SEMANTIC clustering, but as two words in PHONETIC clustering.
This is implemented by tokenizing SEMANTIC clustering responses in the __init__
method before calling the current method.
- COUNT_exact_repetitions: Number of words which repeat words spoken earlier in the
response. Responses in SEMANTIC clustering are lemmatized before this function is
called, so slight variations (dog, dogs) may be counted as exact responses.
- COUNT_stem_repetitions: Number of words stems identical to words uttered earlier in
the response, according to the Porter Stemmer. For example, 'sled' and 'sledding'
have the same stem ('sled'), and 'sledding' would be counted as a stem repetition.
- COUNT_examiner_words: Number of words uttered by the examiner. These start
with "E_" in .TextGrid files.
- COUNT_filled_pauses: Number of filled pauses uttered by the subject. These begin
with "FILLEDPAUSE_" in the .TextGrid file.
- COUNT_word_fragments: Number of word fragments uttered by the subject. These
end with "-" in the .TextGrid file.
- COUNT_asides: Words spoken by the subject that do not adhere to the test criteria are
counted as asides, i.e. words that do not start with the appropriate letter or that
do not represent an animal.
- COUNT_unique_permissible_words: Number of works spoken by the subject, less asides,
stem repetitions and exact repetitions.
"""
#for making the table at the end
words = []
labels = []
words_said = set()
# Words like "polar_bear" as one semantically but two phonetically
# Uncategorizable words are counted as asides
for unit in self.parsed_response:
word = unit.text
test = False
if self.type == "PHONETIC":
test = (word.startswith(self.letter) and
"T_" not in word and "E_" not in word and "!" not in word and # Weed out tags
"FILLEDPAUSE_" not in word and # Weed out filled pauses
not word.endswith('-') and # Weed out false starts
word.lower() in self.english_words) #weed out non-words
elif self.type == "SEMANTIC":
#automatically weed out all non-semantically-appropriate responses
test = (word in self.permissible_words)
if test:
self.measures['COUNT_total_words'] += 1
self.measures['COUNT_permissible_words'] += 1
if any(word == w for w in words_said):
self.measures['COUNT_exact_repetitions'] += 1
labels.append('EXACT REPETITION')
elif any(stemmer.stem(word) == stemmer.stem(w) for w in words_said):
self.measures['COUNT_stem_repetitions'] += 1
labels.append('STEM REPETITION')
else:
labels.append('PERMISSIBLE WORD')
words_said.add(word)
words.append(word)
elif word.lower().startswith('e_'):
self.measures['COUNT_examiner_words'] += 1
words.append(word)
labels.append('EXAMINER WORD')
elif word.endswith('-'):
self.measures['COUNT_word_fragments'] += 1
words.append(word)
labels.append('WORD FRAGMENT')
elif word.lower().startswith('filledpause'):
self.measures['COUNT_filled_pauses'] += 1
words.append(word)
labels.append('FILLED PAUSE')
elif word.lower() not in ['!sil', 't_noise', 't_cough', 't_lipsmack', 't_breath']:
self.measures['COUNT_total_words'] += 1
self.measures['COUNT_asides'] += 1
words.append(word)
labels.append('ASIDE')
if not self.quiet:
print
print "Labels:"
print_table([(word,label) for word,label in zip(words,labels)])
self.measures['COUNT_unique_permissible_words'] = \
self.measures['COUNT_permissible_words'] - \
self.measures['COUNT_exact_repetitions'] - \
self.measures['COUNT_stem_repetitions']
if not self.quiet:
print
print "Counts:"
collection_measures = [x for x in self.measures if x.startswith("COUNT_")]
collection_measures.sort()
if not self.quiet:
print_table([(k, str(self.measures[k])) for k in collection_measures]) | [
"def",
"get_raw_counts",
"(",
"self",
")",
":",
"#for making the table at the end",
"words",
"=",
"[",
"]",
"labels",
"=",
"[",
"]",
"words_said",
"=",
"set",
"(",
")",
"# Words like \"polar_bear\" as one semantically but two phonetically",
"# Uncategorizable words are counted as asides",
"for",
"unit",
"in",
"self",
".",
"parsed_response",
":",
"word",
"=",
"unit",
".",
"text",
"test",
"=",
"False",
"if",
"self",
".",
"type",
"==",
"\"PHONETIC\"",
":",
"test",
"=",
"(",
"word",
".",
"startswith",
"(",
"self",
".",
"letter",
")",
"and",
"\"T_\"",
"not",
"in",
"word",
"and",
"\"E_\"",
"not",
"in",
"word",
"and",
"\"!\"",
"not",
"in",
"word",
"and",
"# Weed out tags",
"\"FILLEDPAUSE_\"",
"not",
"in",
"word",
"and",
"# Weed out filled pauses",
"not",
"word",
".",
"endswith",
"(",
"'-'",
")",
"and",
"# Weed out false starts",
"word",
".",
"lower",
"(",
")",
"in",
"self",
".",
"english_words",
")",
"#weed out non-words",
"elif",
"self",
".",
"type",
"==",
"\"SEMANTIC\"",
":",
"#automatically weed out all non-semantically-appropriate responses",
"test",
"=",
"(",
"word",
"in",
"self",
".",
"permissible_words",
")",
"if",
"test",
":",
"self",
".",
"measures",
"[",
"'COUNT_total_words'",
"]",
"+=",
"1",
"self",
".",
"measures",
"[",
"'COUNT_permissible_words'",
"]",
"+=",
"1",
"if",
"any",
"(",
"word",
"==",
"w",
"for",
"w",
"in",
"words_said",
")",
":",
"self",
".",
"measures",
"[",
"'COUNT_exact_repetitions'",
"]",
"+=",
"1",
"labels",
".",
"append",
"(",
"'EXACT REPETITION'",
")",
"elif",
"any",
"(",
"stemmer",
".",
"stem",
"(",
"word",
")",
"==",
"stemmer",
".",
"stem",
"(",
"w",
")",
"for",
"w",
"in",
"words_said",
")",
":",
"self",
".",
"measures",
"[",
"'COUNT_stem_repetitions'",
"]",
"+=",
"1",
"labels",
".",
"append",
"(",
"'STEM REPETITION'",
")",
"else",
":",
"labels",
".",
"append",
"(",
"'PERMISSIBLE WORD'",
")",
"words_said",
".",
"add",
"(",
"word",
")",
"words",
".",
"append",
"(",
"word",
")",
"elif",
"word",
".",
"lower",
"(",
")",
".",
"startswith",
"(",
"'e_'",
")",
":",
"self",
".",
"measures",
"[",
"'COUNT_examiner_words'",
"]",
"+=",
"1",
"words",
".",
"append",
"(",
"word",
")",
"labels",
".",
"append",
"(",
"'EXAMINER WORD'",
")",
"elif",
"word",
".",
"endswith",
"(",
"'-'",
")",
":",
"self",
".",
"measures",
"[",
"'COUNT_word_fragments'",
"]",
"+=",
"1",
"words",
".",
"append",
"(",
"word",
")",
"labels",
".",
"append",
"(",
"'WORD FRAGMENT'",
")",
"elif",
"word",
".",
"lower",
"(",
")",
".",
"startswith",
"(",
"'filledpause'",
")",
":",
"self",
".",
"measures",
"[",
"'COUNT_filled_pauses'",
"]",
"+=",
"1",
"words",
".",
"append",
"(",
"word",
")",
"labels",
".",
"append",
"(",
"'FILLED PAUSE'",
")",
"elif",
"word",
".",
"lower",
"(",
")",
"not",
"in",
"[",
"'!sil'",
",",
"'t_noise'",
",",
"'t_cough'",
",",
"'t_lipsmack'",
",",
"'t_breath'",
"]",
":",
"self",
".",
"measures",
"[",
"'COUNT_total_words'",
"]",
"+=",
"1",
"self",
".",
"measures",
"[",
"'COUNT_asides'",
"]",
"+=",
"1",
"words",
".",
"append",
"(",
"word",
")",
"labels",
".",
"append",
"(",
"'ASIDE'",
")",
"if",
"not",
"self",
".",
"quiet",
":",
"print",
"print",
"\"Labels:\"",
"print_table",
"(",
"[",
"(",
"word",
",",
"label",
")",
"for",
"word",
",",
"label",
"in",
"zip",
"(",
"words",
",",
"labels",
")",
"]",
")",
"self",
".",
"measures",
"[",
"'COUNT_unique_permissible_words'",
"]",
"=",
"self",
".",
"measures",
"[",
"'COUNT_permissible_words'",
"]",
"-",
"self",
".",
"measures",
"[",
"'COUNT_exact_repetitions'",
"]",
"-",
"self",
".",
"measures",
"[",
"'COUNT_stem_repetitions'",
"]",
"if",
"not",
"self",
".",
"quiet",
":",
"print",
"print",
"\"Counts:\"",
"collection_measures",
"=",
"[",
"x",
"for",
"x",
"in",
"self",
".",
"measures",
"if",
"x",
".",
"startswith",
"(",
"\"COUNT_\"",
")",
"]",
"collection_measures",
".",
"sort",
"(",
")",
"if",
"not",
"self",
".",
"quiet",
":",
"print_table",
"(",
"[",
"(",
"k",
",",
"str",
"(",
"self",
".",
"measures",
"[",
"k",
"]",
")",
")",
"for",
"k",
"in",
"collection_measures",
"]",
")"
]
| Determines counts for unique words, repetitions, etc using the raw text response.
Adds the following measures to the self.measures dictionary:
- COUNT_total_words: count of words (i.e. utterances with semantic content) spoken
by the subject. Filled pauses, silences, coughs, breaths, words by the interviewer,
etc. are all excluded from this count.
- COUNT_permissible_words: Number of words spoken by the subject that qualify as a
valid response according to the clustering criteria. Compound words are counted
as a single word in SEMANTIC clustering, but as two words in PHONETIC clustering.
This is implemented by tokenizing SEMANTIC clustering responses in the __init__
method before calling the current method.
- COUNT_exact_repetitions: Number of words which repeat words spoken earlier in the
response. Responses in SEMANTIC clustering are lemmatized before this function is
called, so slight variations (dog, dogs) may be counted as exact responses.
- COUNT_stem_repetitions: Number of words stems identical to words uttered earlier in
the response, according to the Porter Stemmer. For example, 'sled' and 'sledding'
have the same stem ('sled'), and 'sledding' would be counted as a stem repetition.
- COUNT_examiner_words: Number of words uttered by the examiner. These start
with "E_" in .TextGrid files.
- COUNT_filled_pauses: Number of filled pauses uttered by the subject. These begin
with "FILLEDPAUSE_" in the .TextGrid file.
- COUNT_word_fragments: Number of word fragments uttered by the subject. These
end with "-" in the .TextGrid file.
- COUNT_asides: Words spoken by the subject that do not adhere to the test criteria are
counted as asides, i.e. words that do not start with the appropriate letter or that
do not represent an animal.
- COUNT_unique_permissible_words: Number of works spoken by the subject, less asides,
stem repetitions and exact repetitions. | [
"Determines",
"counts",
"for",
"unique",
"words",
"repetitions",
"etc",
"using",
"the",
"raw",
"text",
"response",
"."
]
| 7ca733dea4782c828024765726cce65de095d33c | https://github.com/speechinformaticslab/vfclust/blob/7ca733dea4782c828024765726cce65de095d33c/vfclust/vfclust.py#L836-L934 | train |
speechinformaticslab/vfclust | vfclust/vfclust.py | VFClustEngine.compute_similarity_score | def compute_similarity_score(self, unit1, unit2):
""" Returns the similarity score between two words.
The type of similarity scoring method used depends on the currently active
method and clustering type.
:param unit1: Unit object corresponding to the first word.
:type unit1: Unit
:param unit2: Unit object corresponding to the second word.
:type unit2: Unit
:return: Number indicating degree of similarity of the two input words.
The maximum value is 1, and a higher value indicates that the words
are more similar.
:rtype : Float
The similarity method used depends both on the type of test being performed
(SEMANTIC or PHONETIC) and the similarity method currently assigned to the
self.current_similarity_measure property of the VFClustEngine object. The
similarity measures used are the following:
- PHONETIC/"phone": the phonetic similarity score (PSS) is calculated
between the phonetic representations of the input units. It is equal
to 1 minus the Levenshtein distance between two strings, normalized
to the length of the longer string. The strings should be compact
phonetic representations of the two words.
(This method is a modification of a Levenshtein distance function
available at http://hetland.org/coding/python/levenshtein.py.)
- PHONETIC/"biphone": the binary common-biphone score (CBS) depends
on whether two words share their initial and/or final biphone
(i.e., set of two phonemes). A score of 1 indicates that two words
have the same intial and/or final biphone; a score of 0 indicates
that two words have neither the same initial nor final biphone.
This is also calculated using the phonetic representation of the
two words.
- SEMANTIC/"lsa": a semantic relatedness score (SRS) is calculated
as the COSINE of the respective term vectors for the first and
second word in an LSA space of the specified clustering_parameter.
Unlike the PHONETIC methods, this method uses the .text property
of the input Unit objects.
"""
if self.type == "PHONETIC":
word1 = unit1.phonetic_representation
word2 = unit2.phonetic_representation
if self.current_similarity_measure == "phone":
word1_length, word2_length = len(word1), len(word2)
if word1_length > word2_length:
# Make sure n <= m, to use O(min(n,m)) space
word1, word2 = word2, word1
word1_length, word2_length = word2_length, word1_length
current = range(word1_length + 1)
for i in range(1, word2_length + 1):
previous, current = current, [i] + [0] * word1_length
for j in range(1, word1_length + 1):
add, delete = previous[j] + 1, current[j - 1] + 1
change = previous[j - 1]
if word1[j - 1] != word2[i - 1]:
change += 1
current[j] = min(add, delete, change)
phonetic_similarity_score = 1 - current[word1_length] / word2_length
return phonetic_similarity_score
elif self.current_similarity_measure == "biphone":
if word1[:2] == word2[:2] or word1[-2:] == word2[-2:]:
common_biphone_score = 1
else:
common_biphone_score = 0
return common_biphone_score
elif self.type == "SEMANTIC":
word1 = unit1.text
word2 = unit2.text
if self.current_similarity_measure == "lsa":
w1_vec = self.term_vectors[word1]
w2_vec = self.term_vectors[word2]
# semantic_relatedness_score = (numpy.dot(w1_vec, w2_vec) /
# numpy.linalg.norm(w1_vec) /
# numpy.linalg.norm(w2_vec))
dot = sum([w1*w2 for w1,w2 in zip(w1_vec, w2_vec)])
norm1 = sqrt(sum([w*w for w in w1_vec]))
norm2 = sqrt(sum([w*w for w in w2_vec]))
semantic_relatedness_score = dot/(norm1 * norm2)
return semantic_relatedness_score
elif self.current_similarity_measure == "custom":
#look it up in dict
try:
similarity = self.custom_similarity_scores[(word1,word2)]
except KeyError:
try:
similarity = self.custom_similarity_scores[(word2,word1)]
except KeyError:
if word1 == word2:
return self.same_word_similarity
#if they're the same word, they pass. This should only happen when checking with
# non-adjacent words in the same cluster
else:
return 0 #if words aren't found, they are defined as dissimilar
return similarity
return None | python | def compute_similarity_score(self, unit1, unit2):
""" Returns the similarity score between two words.
The type of similarity scoring method used depends on the currently active
method and clustering type.
:param unit1: Unit object corresponding to the first word.
:type unit1: Unit
:param unit2: Unit object corresponding to the second word.
:type unit2: Unit
:return: Number indicating degree of similarity of the two input words.
The maximum value is 1, and a higher value indicates that the words
are more similar.
:rtype : Float
The similarity method used depends both on the type of test being performed
(SEMANTIC or PHONETIC) and the similarity method currently assigned to the
self.current_similarity_measure property of the VFClustEngine object. The
similarity measures used are the following:
- PHONETIC/"phone": the phonetic similarity score (PSS) is calculated
between the phonetic representations of the input units. It is equal
to 1 minus the Levenshtein distance between two strings, normalized
to the length of the longer string. The strings should be compact
phonetic representations of the two words.
(This method is a modification of a Levenshtein distance function
available at http://hetland.org/coding/python/levenshtein.py.)
- PHONETIC/"biphone": the binary common-biphone score (CBS) depends
on whether two words share their initial and/or final biphone
(i.e., set of two phonemes). A score of 1 indicates that two words
have the same intial and/or final biphone; a score of 0 indicates
that two words have neither the same initial nor final biphone.
This is also calculated using the phonetic representation of the
two words.
- SEMANTIC/"lsa": a semantic relatedness score (SRS) is calculated
as the COSINE of the respective term vectors for the first and
second word in an LSA space of the specified clustering_parameter.
Unlike the PHONETIC methods, this method uses the .text property
of the input Unit objects.
"""
if self.type == "PHONETIC":
word1 = unit1.phonetic_representation
word2 = unit2.phonetic_representation
if self.current_similarity_measure == "phone":
word1_length, word2_length = len(word1), len(word2)
if word1_length > word2_length:
# Make sure n <= m, to use O(min(n,m)) space
word1, word2 = word2, word1
word1_length, word2_length = word2_length, word1_length
current = range(word1_length + 1)
for i in range(1, word2_length + 1):
previous, current = current, [i] + [0] * word1_length
for j in range(1, word1_length + 1):
add, delete = previous[j] + 1, current[j - 1] + 1
change = previous[j - 1]
if word1[j - 1] != word2[i - 1]:
change += 1
current[j] = min(add, delete, change)
phonetic_similarity_score = 1 - current[word1_length] / word2_length
return phonetic_similarity_score
elif self.current_similarity_measure == "biphone":
if word1[:2] == word2[:2] or word1[-2:] == word2[-2:]:
common_biphone_score = 1
else:
common_biphone_score = 0
return common_biphone_score
elif self.type == "SEMANTIC":
word1 = unit1.text
word2 = unit2.text
if self.current_similarity_measure == "lsa":
w1_vec = self.term_vectors[word1]
w2_vec = self.term_vectors[word2]
# semantic_relatedness_score = (numpy.dot(w1_vec, w2_vec) /
# numpy.linalg.norm(w1_vec) /
# numpy.linalg.norm(w2_vec))
dot = sum([w1*w2 for w1,w2 in zip(w1_vec, w2_vec)])
norm1 = sqrt(sum([w*w for w in w1_vec]))
norm2 = sqrt(sum([w*w for w in w2_vec]))
semantic_relatedness_score = dot/(norm1 * norm2)
return semantic_relatedness_score
elif self.current_similarity_measure == "custom":
#look it up in dict
try:
similarity = self.custom_similarity_scores[(word1,word2)]
except KeyError:
try:
similarity = self.custom_similarity_scores[(word2,word1)]
except KeyError:
if word1 == word2:
return self.same_word_similarity
#if they're the same word, they pass. This should only happen when checking with
# non-adjacent words in the same cluster
else:
return 0 #if words aren't found, they are defined as dissimilar
return similarity
return None | [
"def",
"compute_similarity_score",
"(",
"self",
",",
"unit1",
",",
"unit2",
")",
":",
"if",
"self",
".",
"type",
"==",
"\"PHONETIC\"",
":",
"word1",
"=",
"unit1",
".",
"phonetic_representation",
"word2",
"=",
"unit2",
".",
"phonetic_representation",
"if",
"self",
".",
"current_similarity_measure",
"==",
"\"phone\"",
":",
"word1_length",
",",
"word2_length",
"=",
"len",
"(",
"word1",
")",
",",
"len",
"(",
"word2",
")",
"if",
"word1_length",
">",
"word2_length",
":",
"# Make sure n <= m, to use O(min(n,m)) space",
"word1",
",",
"word2",
"=",
"word2",
",",
"word1",
"word1_length",
",",
"word2_length",
"=",
"word2_length",
",",
"word1_length",
"current",
"=",
"range",
"(",
"word1_length",
"+",
"1",
")",
"for",
"i",
"in",
"range",
"(",
"1",
",",
"word2_length",
"+",
"1",
")",
":",
"previous",
",",
"current",
"=",
"current",
",",
"[",
"i",
"]",
"+",
"[",
"0",
"]",
"*",
"word1_length",
"for",
"j",
"in",
"range",
"(",
"1",
",",
"word1_length",
"+",
"1",
")",
":",
"add",
",",
"delete",
"=",
"previous",
"[",
"j",
"]",
"+",
"1",
",",
"current",
"[",
"j",
"-",
"1",
"]",
"+",
"1",
"change",
"=",
"previous",
"[",
"j",
"-",
"1",
"]",
"if",
"word1",
"[",
"j",
"-",
"1",
"]",
"!=",
"word2",
"[",
"i",
"-",
"1",
"]",
":",
"change",
"+=",
"1",
"current",
"[",
"j",
"]",
"=",
"min",
"(",
"add",
",",
"delete",
",",
"change",
")",
"phonetic_similarity_score",
"=",
"1",
"-",
"current",
"[",
"word1_length",
"]",
"/",
"word2_length",
"return",
"phonetic_similarity_score",
"elif",
"self",
".",
"current_similarity_measure",
"==",
"\"biphone\"",
":",
"if",
"word1",
"[",
":",
"2",
"]",
"==",
"word2",
"[",
":",
"2",
"]",
"or",
"word1",
"[",
"-",
"2",
":",
"]",
"==",
"word2",
"[",
"-",
"2",
":",
"]",
":",
"common_biphone_score",
"=",
"1",
"else",
":",
"common_biphone_score",
"=",
"0",
"return",
"common_biphone_score",
"elif",
"self",
".",
"type",
"==",
"\"SEMANTIC\"",
":",
"word1",
"=",
"unit1",
".",
"text",
"word2",
"=",
"unit2",
".",
"text",
"if",
"self",
".",
"current_similarity_measure",
"==",
"\"lsa\"",
":",
"w1_vec",
"=",
"self",
".",
"term_vectors",
"[",
"word1",
"]",
"w2_vec",
"=",
"self",
".",
"term_vectors",
"[",
"word2",
"]",
"# semantic_relatedness_score = (numpy.dot(w1_vec, w2_vec) /",
"# numpy.linalg.norm(w1_vec) /",
"# numpy.linalg.norm(w2_vec))",
"dot",
"=",
"sum",
"(",
"[",
"w1",
"*",
"w2",
"for",
"w1",
",",
"w2",
"in",
"zip",
"(",
"w1_vec",
",",
"w2_vec",
")",
"]",
")",
"norm1",
"=",
"sqrt",
"(",
"sum",
"(",
"[",
"w",
"*",
"w",
"for",
"w",
"in",
"w1_vec",
"]",
")",
")",
"norm2",
"=",
"sqrt",
"(",
"sum",
"(",
"[",
"w",
"*",
"w",
"for",
"w",
"in",
"w2_vec",
"]",
")",
")",
"semantic_relatedness_score",
"=",
"dot",
"/",
"(",
"norm1",
"*",
"norm2",
")",
"return",
"semantic_relatedness_score",
"elif",
"self",
".",
"current_similarity_measure",
"==",
"\"custom\"",
":",
"#look it up in dict",
"try",
":",
"similarity",
"=",
"self",
".",
"custom_similarity_scores",
"[",
"(",
"word1",
",",
"word2",
")",
"]",
"except",
"KeyError",
":",
"try",
":",
"similarity",
"=",
"self",
".",
"custom_similarity_scores",
"[",
"(",
"word2",
",",
"word1",
")",
"]",
"except",
"KeyError",
":",
"if",
"word1",
"==",
"word2",
":",
"return",
"self",
".",
"same_word_similarity",
"#if they're the same word, they pass. This should only happen when checking with",
"# non-adjacent words in the same cluster",
"else",
":",
"return",
"0",
"#if words aren't found, they are defined as dissimilar",
"return",
"similarity",
"return",
"None"
]
| Returns the similarity score between two words.
The type of similarity scoring method used depends on the currently active
method and clustering type.
:param unit1: Unit object corresponding to the first word.
:type unit1: Unit
:param unit2: Unit object corresponding to the second word.
:type unit2: Unit
:return: Number indicating degree of similarity of the two input words.
The maximum value is 1, and a higher value indicates that the words
are more similar.
:rtype : Float
The similarity method used depends both on the type of test being performed
(SEMANTIC or PHONETIC) and the similarity method currently assigned to the
self.current_similarity_measure property of the VFClustEngine object. The
similarity measures used are the following:
- PHONETIC/"phone": the phonetic similarity score (PSS) is calculated
between the phonetic representations of the input units. It is equal
to 1 minus the Levenshtein distance between two strings, normalized
to the length of the longer string. The strings should be compact
phonetic representations of the two words.
(This method is a modification of a Levenshtein distance function
available at http://hetland.org/coding/python/levenshtein.py.)
- PHONETIC/"biphone": the binary common-biphone score (CBS) depends
on whether two words share their initial and/or final biphone
(i.e., set of two phonemes). A score of 1 indicates that two words
have the same intial and/or final biphone; a score of 0 indicates
that two words have neither the same initial nor final biphone.
This is also calculated using the phonetic representation of the
two words.
- SEMANTIC/"lsa": a semantic relatedness score (SRS) is calculated
as the COSINE of the respective term vectors for the first and
second word in an LSA space of the specified clustering_parameter.
Unlike the PHONETIC methods, this method uses the .text property
of the input Unit objects. | [
"Returns",
"the",
"similarity",
"score",
"between",
"two",
"words",
"."
]
| 7ca733dea4782c828024765726cce65de095d33c | https://github.com/speechinformaticslab/vfclust/blob/7ca733dea4782c828024765726cce65de095d33c/vfclust/vfclust.py#L943-L1042 | train |
speechinformaticslab/vfclust | vfclust/vfclust.py | VFClustEngine.compute_similarity_scores | def compute_similarity_scores(self):
""" Produce a list of similarity scores for each contiguous pair in a response.
Calls compute_similarity_score method for every adjacent pair of words. The results
are not used in clustering; this is merely to provide a visual representation to
print to the screen.
Modifies:
- self.similarity_scores: Fills the list with similarity scores between adjacent
words. At this point this list is never used outside of this method.
"""
for i,unit in enumerate(self.parsed_response):
if i < len(self.parsed_response) - 1:
next_unit = self.parsed_response[i + 1]
self.similarity_scores.append(self.compute_similarity_score(unit, next_unit))
if not self.quiet:
print self.current_similarity_measure, "similarity scores (adjacent) -- higher is closer:"
table = [("Word 1", "Word 2", "Score")] + \
[(self.parsed_response[i].text, self.parsed_response[i + 1].text,
"{0:.3f}".format(round(self.similarity_scores[i], 2)))
for i in range(len(self.parsed_response)-1)]
print_table(table) | python | def compute_similarity_scores(self):
""" Produce a list of similarity scores for each contiguous pair in a response.
Calls compute_similarity_score method for every adjacent pair of words. The results
are not used in clustering; this is merely to provide a visual representation to
print to the screen.
Modifies:
- self.similarity_scores: Fills the list with similarity scores between adjacent
words. At this point this list is never used outside of this method.
"""
for i,unit in enumerate(self.parsed_response):
if i < len(self.parsed_response) - 1:
next_unit = self.parsed_response[i + 1]
self.similarity_scores.append(self.compute_similarity_score(unit, next_unit))
if not self.quiet:
print self.current_similarity_measure, "similarity scores (adjacent) -- higher is closer:"
table = [("Word 1", "Word 2", "Score")] + \
[(self.parsed_response[i].text, self.parsed_response[i + 1].text,
"{0:.3f}".format(round(self.similarity_scores[i], 2)))
for i in range(len(self.parsed_response)-1)]
print_table(table) | [
"def",
"compute_similarity_scores",
"(",
"self",
")",
":",
"for",
"i",
",",
"unit",
"in",
"enumerate",
"(",
"self",
".",
"parsed_response",
")",
":",
"if",
"i",
"<",
"len",
"(",
"self",
".",
"parsed_response",
")",
"-",
"1",
":",
"next_unit",
"=",
"self",
".",
"parsed_response",
"[",
"i",
"+",
"1",
"]",
"self",
".",
"similarity_scores",
".",
"append",
"(",
"self",
".",
"compute_similarity_score",
"(",
"unit",
",",
"next_unit",
")",
")",
"if",
"not",
"self",
".",
"quiet",
":",
"print",
"self",
".",
"current_similarity_measure",
",",
"\"similarity scores (adjacent) -- higher is closer:\"",
"table",
"=",
"[",
"(",
"\"Word 1\"",
",",
"\"Word 2\"",
",",
"\"Score\"",
")",
"]",
"+",
"[",
"(",
"self",
".",
"parsed_response",
"[",
"i",
"]",
".",
"text",
",",
"self",
".",
"parsed_response",
"[",
"i",
"+",
"1",
"]",
".",
"text",
",",
"\"{0:.3f}\"",
".",
"format",
"(",
"round",
"(",
"self",
".",
"similarity_scores",
"[",
"i",
"]",
",",
"2",
")",
")",
")",
"for",
"i",
"in",
"range",
"(",
"len",
"(",
"self",
".",
"parsed_response",
")",
"-",
"1",
")",
"]",
"print_table",
"(",
"table",
")"
]
| Produce a list of similarity scores for each contiguous pair in a response.
Calls compute_similarity_score method for every adjacent pair of words. The results
are not used in clustering; this is merely to provide a visual representation to
print to the screen.
Modifies:
- self.similarity_scores: Fills the list with similarity scores between adjacent
words. At this point this list is never used outside of this method. | [
"Produce",
"a",
"list",
"of",
"similarity",
"scores",
"for",
"each",
"contiguous",
"pair",
"in",
"a",
"response",
"."
]
| 7ca733dea4782c828024765726cce65de095d33c | https://github.com/speechinformaticslab/vfclust/blob/7ca733dea4782c828024765726cce65de095d33c/vfclust/vfclust.py#L1044-L1067 | train |
speechinformaticslab/vfclust | vfclust/vfclust.py | VFClustEngine.compute_pairwise_similarity_score | def compute_pairwise_similarity_score(self):
"""Computes the average pairwise similarity score between all pairs of Units.
The pairwise similarity is calculated as the sum of similarity scores for all pairwise
word pairs in a response -- except any pair composed of a word and
itself -- divided by the total number of words in an attempt. I.e.,
the mean similarity for all pairwise word pairs.
Adds the following measures to the self.measures dictionary:
- COLLECTION_collection_pairwise_similarity_score_mean: mean of pairwise similarity scores
.. todo: divide by (count-1)?
"""
pairs = []
all_scores = []
for i, unit in enumerate(self.parsed_response):
for j, other_unit in enumerate(self.parsed_response):
if i != j:
pair = (i, j)
rev_pair = (j, i)
if pair not in pairs and rev_pair not in pairs:
score = self.compute_similarity_score(unit, other_unit)
pairs.append(pair)
pairs.append(rev_pair)
all_scores.append(score)
#remove any "same word" from the mean
all_scores = [i for i in all_scores if i != self.same_word_similarity]
self.measures["COLLECTION_" + self.current_similarity_measure + "_pairwise_similarity_score_mean"] = get_mean(
all_scores) \
if len(pairs) > 0 else 'NA' | python | def compute_pairwise_similarity_score(self):
"""Computes the average pairwise similarity score between all pairs of Units.
The pairwise similarity is calculated as the sum of similarity scores for all pairwise
word pairs in a response -- except any pair composed of a word and
itself -- divided by the total number of words in an attempt. I.e.,
the mean similarity for all pairwise word pairs.
Adds the following measures to the self.measures dictionary:
- COLLECTION_collection_pairwise_similarity_score_mean: mean of pairwise similarity scores
.. todo: divide by (count-1)?
"""
pairs = []
all_scores = []
for i, unit in enumerate(self.parsed_response):
for j, other_unit in enumerate(self.parsed_response):
if i != j:
pair = (i, j)
rev_pair = (j, i)
if pair not in pairs and rev_pair not in pairs:
score = self.compute_similarity_score(unit, other_unit)
pairs.append(pair)
pairs.append(rev_pair)
all_scores.append(score)
#remove any "same word" from the mean
all_scores = [i for i in all_scores if i != self.same_word_similarity]
self.measures["COLLECTION_" + self.current_similarity_measure + "_pairwise_similarity_score_mean"] = get_mean(
all_scores) \
if len(pairs) > 0 else 'NA' | [
"def",
"compute_pairwise_similarity_score",
"(",
"self",
")",
":",
"pairs",
"=",
"[",
"]",
"all_scores",
"=",
"[",
"]",
"for",
"i",
",",
"unit",
"in",
"enumerate",
"(",
"self",
".",
"parsed_response",
")",
":",
"for",
"j",
",",
"other_unit",
"in",
"enumerate",
"(",
"self",
".",
"parsed_response",
")",
":",
"if",
"i",
"!=",
"j",
":",
"pair",
"=",
"(",
"i",
",",
"j",
")",
"rev_pair",
"=",
"(",
"j",
",",
"i",
")",
"if",
"pair",
"not",
"in",
"pairs",
"and",
"rev_pair",
"not",
"in",
"pairs",
":",
"score",
"=",
"self",
".",
"compute_similarity_score",
"(",
"unit",
",",
"other_unit",
")",
"pairs",
".",
"append",
"(",
"pair",
")",
"pairs",
".",
"append",
"(",
"rev_pair",
")",
"all_scores",
".",
"append",
"(",
"score",
")",
"#remove any \"same word\" from the mean",
"all_scores",
"=",
"[",
"i",
"for",
"i",
"in",
"all_scores",
"if",
"i",
"!=",
"self",
".",
"same_word_similarity",
"]",
"self",
".",
"measures",
"[",
"\"COLLECTION_\"",
"+",
"self",
".",
"current_similarity_measure",
"+",
"\"_pairwise_similarity_score_mean\"",
"]",
"=",
"get_mean",
"(",
"all_scores",
")",
"if",
"len",
"(",
"pairs",
")",
">",
"0",
"else",
"'NA'"
]
| Computes the average pairwise similarity score between all pairs of Units.
The pairwise similarity is calculated as the sum of similarity scores for all pairwise
word pairs in a response -- except any pair composed of a word and
itself -- divided by the total number of words in an attempt. I.e.,
the mean similarity for all pairwise word pairs.
Adds the following measures to the self.measures dictionary:
- COLLECTION_collection_pairwise_similarity_score_mean: mean of pairwise similarity scores
.. todo: divide by (count-1)? | [
"Computes",
"the",
"average",
"pairwise",
"similarity",
"score",
"between",
"all",
"pairs",
"of",
"Units",
"."
]
| 7ca733dea4782c828024765726cce65de095d33c | https://github.com/speechinformaticslab/vfclust/blob/7ca733dea4782c828024765726cce65de095d33c/vfclust/vfclust.py#L1231-L1261 | train |
speechinformaticslab/vfclust | vfclust/vfclust.py | VFClustEngine.compute_collection_measures | def compute_collection_measures(self, no_singletons=False):
""" Computes summaries of measures using the discovered collections.
:param no_singletons: if True, omits collections of length 1 from all measures
and includes "no_singletons_" in the measure name.
Adds the following measures to the self.measures dictionary, prefaced by
COLLECTION_(similarity_measure)_(collection_type)_:
- count: number of collections
- size_mean: mean size of collections
- size_max: size of largest collection
- switch_count: number of changes between clusters
"""
prefix = "COLLECTION_" + self.current_similarity_measure + "_" + self.current_collection_type + "_"
if no_singletons:
prefix += "no_singletons_"
if no_singletons:
collection_sizes_temp = [x for x in self.collection_sizes if x != 1]
else: #include singletons
collection_sizes_temp = self.collection_sizes
self.measures[prefix + 'count'] = len(collection_sizes_temp)
self.measures[prefix + 'size_mean'] = get_mean(collection_sizes_temp) \
if self.measures[prefix + 'count'] > 0 else 0
self.measures[prefix + 'size_max'] = max(collection_sizes_temp) \
if len(collection_sizes_temp) > 0 else 0
self.measures[prefix + 'switch_count'] = self.measures[prefix + 'count'] - 1 | python | def compute_collection_measures(self, no_singletons=False):
""" Computes summaries of measures using the discovered collections.
:param no_singletons: if True, omits collections of length 1 from all measures
and includes "no_singletons_" in the measure name.
Adds the following measures to the self.measures dictionary, prefaced by
COLLECTION_(similarity_measure)_(collection_type)_:
- count: number of collections
- size_mean: mean size of collections
- size_max: size of largest collection
- switch_count: number of changes between clusters
"""
prefix = "COLLECTION_" + self.current_similarity_measure + "_" + self.current_collection_type + "_"
if no_singletons:
prefix += "no_singletons_"
if no_singletons:
collection_sizes_temp = [x for x in self.collection_sizes if x != 1]
else: #include singletons
collection_sizes_temp = self.collection_sizes
self.measures[prefix + 'count'] = len(collection_sizes_temp)
self.measures[prefix + 'size_mean'] = get_mean(collection_sizes_temp) \
if self.measures[prefix + 'count'] > 0 else 0
self.measures[prefix + 'size_max'] = max(collection_sizes_temp) \
if len(collection_sizes_temp) > 0 else 0
self.measures[prefix + 'switch_count'] = self.measures[prefix + 'count'] - 1 | [
"def",
"compute_collection_measures",
"(",
"self",
",",
"no_singletons",
"=",
"False",
")",
":",
"prefix",
"=",
"\"COLLECTION_\"",
"+",
"self",
".",
"current_similarity_measure",
"+",
"\"_\"",
"+",
"self",
".",
"current_collection_type",
"+",
"\"_\"",
"if",
"no_singletons",
":",
"prefix",
"+=",
"\"no_singletons_\"",
"if",
"no_singletons",
":",
"collection_sizes_temp",
"=",
"[",
"x",
"for",
"x",
"in",
"self",
".",
"collection_sizes",
"if",
"x",
"!=",
"1",
"]",
"else",
":",
"#include singletons",
"collection_sizes_temp",
"=",
"self",
".",
"collection_sizes",
"self",
".",
"measures",
"[",
"prefix",
"+",
"'count'",
"]",
"=",
"len",
"(",
"collection_sizes_temp",
")",
"self",
".",
"measures",
"[",
"prefix",
"+",
"'size_mean'",
"]",
"=",
"get_mean",
"(",
"collection_sizes_temp",
")",
"if",
"self",
".",
"measures",
"[",
"prefix",
"+",
"'count'",
"]",
">",
"0",
"else",
"0",
"self",
".",
"measures",
"[",
"prefix",
"+",
"'size_max'",
"]",
"=",
"max",
"(",
"collection_sizes_temp",
")",
"if",
"len",
"(",
"collection_sizes_temp",
")",
">",
"0",
"else",
"0",
"self",
".",
"measures",
"[",
"prefix",
"+",
"'switch_count'",
"]",
"=",
"self",
".",
"measures",
"[",
"prefix",
"+",
"'count'",
"]",
"-",
"1"
]
| Computes summaries of measures using the discovered collections.
:param no_singletons: if True, omits collections of length 1 from all measures
and includes "no_singletons_" in the measure name.
Adds the following measures to the self.measures dictionary, prefaced by
COLLECTION_(similarity_measure)_(collection_type)_:
- count: number of collections
- size_mean: mean size of collections
- size_max: size of largest collection
- switch_count: number of changes between clusters | [
"Computes",
"summaries",
"of",
"measures",
"using",
"the",
"discovered",
"collections",
"."
]
| 7ca733dea4782c828024765726cce65de095d33c | https://github.com/speechinformaticslab/vfclust/blob/7ca733dea4782c828024765726cce65de095d33c/vfclust/vfclust.py#L1264-L1296 | train |
speechinformaticslab/vfclust | vfclust/vfclust.py | VFClustEngine.compute_duration_measures | def compute_duration_measures(self):
""" Helper function for computing measures derived from timing information.
These are only computed if the response is textgrid with timing information.
All times are in seconds.
"""
prefix = "TIMING_" + self.current_similarity_measure + "_" + self.current_collection_type + "_"
if self.response_format == 'TextGrid':
self.compute_response_vowel_duration("TIMING_") #prefixes don't need collection or measure type
self.compute_response_continuant_duration("TIMING_")
self.compute_between_collection_interval_duration(prefix)
self.compute_within_collection_interval_duration(prefix)
#these give different values depending on whether singleton clusters are counted or not
self.compute_within_collection_vowel_duration(prefix, no_singletons = True)
self.compute_within_collection_continuant_duration(prefix, no_singletons = True)
self.compute_within_collection_vowel_duration(prefix, no_singletons = False)
self.compute_within_collection_continuant_duration(prefix, no_singletons = False) | python | def compute_duration_measures(self):
""" Helper function for computing measures derived from timing information.
These are only computed if the response is textgrid with timing information.
All times are in seconds.
"""
prefix = "TIMING_" + self.current_similarity_measure + "_" + self.current_collection_type + "_"
if self.response_format == 'TextGrid':
self.compute_response_vowel_duration("TIMING_") #prefixes don't need collection or measure type
self.compute_response_continuant_duration("TIMING_")
self.compute_between_collection_interval_duration(prefix)
self.compute_within_collection_interval_duration(prefix)
#these give different values depending on whether singleton clusters are counted or not
self.compute_within_collection_vowel_duration(prefix, no_singletons = True)
self.compute_within_collection_continuant_duration(prefix, no_singletons = True)
self.compute_within_collection_vowel_duration(prefix, no_singletons = False)
self.compute_within_collection_continuant_duration(prefix, no_singletons = False) | [
"def",
"compute_duration_measures",
"(",
"self",
")",
":",
"prefix",
"=",
"\"TIMING_\"",
"+",
"self",
".",
"current_similarity_measure",
"+",
"\"_\"",
"+",
"self",
".",
"current_collection_type",
"+",
"\"_\"",
"if",
"self",
".",
"response_format",
"==",
"'TextGrid'",
":",
"self",
".",
"compute_response_vowel_duration",
"(",
"\"TIMING_\"",
")",
"#prefixes don't need collection or measure type",
"self",
".",
"compute_response_continuant_duration",
"(",
"\"TIMING_\"",
")",
"self",
".",
"compute_between_collection_interval_duration",
"(",
"prefix",
")",
"self",
".",
"compute_within_collection_interval_duration",
"(",
"prefix",
")",
"#these give different values depending on whether singleton clusters are counted or not",
"self",
".",
"compute_within_collection_vowel_duration",
"(",
"prefix",
",",
"no_singletons",
"=",
"True",
")",
"self",
".",
"compute_within_collection_continuant_duration",
"(",
"prefix",
",",
"no_singletons",
"=",
"True",
")",
"self",
".",
"compute_within_collection_vowel_duration",
"(",
"prefix",
",",
"no_singletons",
"=",
"False",
")",
"self",
".",
"compute_within_collection_continuant_duration",
"(",
"prefix",
",",
"no_singletons",
"=",
"False",
")"
]
| Helper function for computing measures derived from timing information.
These are only computed if the response is textgrid with timing information.
All times are in seconds. | [
"Helper",
"function",
"for",
"computing",
"measures",
"derived",
"from",
"timing",
"information",
"."
]
| 7ca733dea4782c828024765726cce65de095d33c | https://github.com/speechinformaticslab/vfclust/blob/7ca733dea4782c828024765726cce65de095d33c/vfclust/vfclust.py#L1305-L1326 | train |
speechinformaticslab/vfclust | vfclust/vfclust.py | VFClustEngine.compute_response_vowel_duration | def compute_response_vowel_duration(self, prefix):
"""Computes mean vowel duration in entire response.
:param str prefix: Prefix for the key entry in self.measures.
Adds the following measures to the self.measures dictionary:
- TIMING_(similarity_measure)_(collection_type)_response_vowel_duration_mean: average
vowel duration of all vowels in the response.
"""
durations = []
for word in self.full_timed_response:
if word.phones:
for phone in word.phones:
if phone.string in self.vowels:
durations.append(phone.end - phone.start)
self.measures[prefix + 'response_vowel_duration_mean'] = get_mean(durations) \
if len(durations) > 0 else 'NA'
if not self.quiet:
print "Mean response vowel duration:", self.measures[prefix + 'response_vowel_duration_mean'] | python | def compute_response_vowel_duration(self, prefix):
"""Computes mean vowel duration in entire response.
:param str prefix: Prefix for the key entry in self.measures.
Adds the following measures to the self.measures dictionary:
- TIMING_(similarity_measure)_(collection_type)_response_vowel_duration_mean: average
vowel duration of all vowels in the response.
"""
durations = []
for word in self.full_timed_response:
if word.phones:
for phone in word.phones:
if phone.string in self.vowels:
durations.append(phone.end - phone.start)
self.measures[prefix + 'response_vowel_duration_mean'] = get_mean(durations) \
if len(durations) > 0 else 'NA'
if not self.quiet:
print "Mean response vowel duration:", self.measures[prefix + 'response_vowel_duration_mean'] | [
"def",
"compute_response_vowel_duration",
"(",
"self",
",",
"prefix",
")",
":",
"durations",
"=",
"[",
"]",
"for",
"word",
"in",
"self",
".",
"full_timed_response",
":",
"if",
"word",
".",
"phones",
":",
"for",
"phone",
"in",
"word",
".",
"phones",
":",
"if",
"phone",
".",
"string",
"in",
"self",
".",
"vowels",
":",
"durations",
".",
"append",
"(",
"phone",
".",
"end",
"-",
"phone",
".",
"start",
")",
"self",
".",
"measures",
"[",
"prefix",
"+",
"'response_vowel_duration_mean'",
"]",
"=",
"get_mean",
"(",
"durations",
")",
"if",
"len",
"(",
"durations",
")",
">",
"0",
"else",
"'NA'",
"if",
"not",
"self",
".",
"quiet",
":",
"print",
"\"Mean response vowel duration:\"",
",",
"self",
".",
"measures",
"[",
"prefix",
"+",
"'response_vowel_duration_mean'",
"]"
]
| Computes mean vowel duration in entire response.
:param str prefix: Prefix for the key entry in self.measures.
Adds the following measures to the self.measures dictionary:
- TIMING_(similarity_measure)_(collection_type)_response_vowel_duration_mean: average
vowel duration of all vowels in the response. | [
"Computes",
"mean",
"vowel",
"duration",
"in",
"entire",
"response",
"."
]
| 7ca733dea4782c828024765726cce65de095d33c | https://github.com/speechinformaticslab/vfclust/blob/7ca733dea4782c828024765726cce65de095d33c/vfclust/vfclust.py#L1329-L1349 | train |
speechinformaticslab/vfclust | vfclust/vfclust.py | VFClustEngine.compute_between_collection_interval_duration | def compute_between_collection_interval_duration(self, prefix):
"""Calculates BETWEEN-collection intervals for the current collection and measure type
and takes their mean.
:param str prefix: Prefix for the key entry in self.measures.
Negative intervals (for overlapping clusters) are counted as 0 seconds. Intervals are
calculated as being the difference between the ending time of the last word in a collection
and the start time of the first word in the subsequent collection.
Note that these intervals are not necessarily silences, and may include asides, filled
pauses, words from the examiner, etc.
Adds the following measures to the self.measures dictionary:
- TIMING_(similarity_measure)_(collection_type)_between_collection_interval_duration_mean:
average interval duration separating clusters
"""
durations = [] # duration of each collection
for collection in self.collection_list:
# Entry, with timing, in timed_response for first word in collection
start = collection[0].start_time
# Entry, with timing, in timed_response for last word in collection
end = collection[-1].end_time
durations.append((start, end))
# calculation between-duration intervals
interstices = [durations[i + 1][0] - durations[i][1] for i, d in enumerate(durations[:-1])]
# Replace negative interstices (for overlapping clusters) with
# interstices of duration 0
for i, entry in enumerate(interstices):
if interstices[i] < 0:
interstices[i] = 0
self.measures[prefix + 'between_collection_interval_duration_mean'] = get_mean(interstices) \
if len(interstices) > 0 else 'NA'
if not self.quiet:
print
print self.current_similarity_measure + " between-" + self.current_collection_type + " durations"
table = [(self.current_collection_type + " 1 (start,end)", "Interval",
self.current_collection_type + " 2 (start,end)")] + \
[(str(d1), str(i1), str(d2)) for d1, i1, d2 in zip(durations[:-1], interstices, durations[1:])]
print_table(table)
print
print "Mean " + self.current_similarity_measure + " between-" + self.current_collection_type + " duration", \
self.measures[prefix + 'between_collection_interval_duration_mean'] | python | def compute_between_collection_interval_duration(self, prefix):
"""Calculates BETWEEN-collection intervals for the current collection and measure type
and takes their mean.
:param str prefix: Prefix for the key entry in self.measures.
Negative intervals (for overlapping clusters) are counted as 0 seconds. Intervals are
calculated as being the difference between the ending time of the last word in a collection
and the start time of the first word in the subsequent collection.
Note that these intervals are not necessarily silences, and may include asides, filled
pauses, words from the examiner, etc.
Adds the following measures to the self.measures dictionary:
- TIMING_(similarity_measure)_(collection_type)_between_collection_interval_duration_mean:
average interval duration separating clusters
"""
durations = [] # duration of each collection
for collection in self.collection_list:
# Entry, with timing, in timed_response for first word in collection
start = collection[0].start_time
# Entry, with timing, in timed_response for last word in collection
end = collection[-1].end_time
durations.append((start, end))
# calculation between-duration intervals
interstices = [durations[i + 1][0] - durations[i][1] for i, d in enumerate(durations[:-1])]
# Replace negative interstices (for overlapping clusters) with
# interstices of duration 0
for i, entry in enumerate(interstices):
if interstices[i] < 0:
interstices[i] = 0
self.measures[prefix + 'between_collection_interval_duration_mean'] = get_mean(interstices) \
if len(interstices) > 0 else 'NA'
if not self.quiet:
print
print self.current_similarity_measure + " between-" + self.current_collection_type + " durations"
table = [(self.current_collection_type + " 1 (start,end)", "Interval",
self.current_collection_type + " 2 (start,end)")] + \
[(str(d1), str(i1), str(d2)) for d1, i1, d2 in zip(durations[:-1], interstices, durations[1:])]
print_table(table)
print
print "Mean " + self.current_similarity_measure + " between-" + self.current_collection_type + " duration", \
self.measures[prefix + 'between_collection_interval_duration_mean'] | [
"def",
"compute_between_collection_interval_duration",
"(",
"self",
",",
"prefix",
")",
":",
"durations",
"=",
"[",
"]",
"# duration of each collection",
"for",
"collection",
"in",
"self",
".",
"collection_list",
":",
"# Entry, with timing, in timed_response for first word in collection",
"start",
"=",
"collection",
"[",
"0",
"]",
".",
"start_time",
"# Entry, with timing, in timed_response for last word in collection",
"end",
"=",
"collection",
"[",
"-",
"1",
"]",
".",
"end_time",
"durations",
".",
"append",
"(",
"(",
"start",
",",
"end",
")",
")",
"# calculation between-duration intervals",
"interstices",
"=",
"[",
"durations",
"[",
"i",
"+",
"1",
"]",
"[",
"0",
"]",
"-",
"durations",
"[",
"i",
"]",
"[",
"1",
"]",
"for",
"i",
",",
"d",
"in",
"enumerate",
"(",
"durations",
"[",
":",
"-",
"1",
"]",
")",
"]",
"# Replace negative interstices (for overlapping clusters) with",
"# interstices of duration 0",
"for",
"i",
",",
"entry",
"in",
"enumerate",
"(",
"interstices",
")",
":",
"if",
"interstices",
"[",
"i",
"]",
"<",
"0",
":",
"interstices",
"[",
"i",
"]",
"=",
"0",
"self",
".",
"measures",
"[",
"prefix",
"+",
"'between_collection_interval_duration_mean'",
"]",
"=",
"get_mean",
"(",
"interstices",
")",
"if",
"len",
"(",
"interstices",
")",
">",
"0",
"else",
"'NA'",
"if",
"not",
"self",
".",
"quiet",
":",
"print",
"print",
"self",
".",
"current_similarity_measure",
"+",
"\" between-\"",
"+",
"self",
".",
"current_collection_type",
"+",
"\" durations\"",
"table",
"=",
"[",
"(",
"self",
".",
"current_collection_type",
"+",
"\" 1 (start,end)\"",
",",
"\"Interval\"",
",",
"self",
".",
"current_collection_type",
"+",
"\" 2 (start,end)\"",
")",
"]",
"+",
"[",
"(",
"str",
"(",
"d1",
")",
",",
"str",
"(",
"i1",
")",
",",
"str",
"(",
"d2",
")",
")",
"for",
"d1",
",",
"i1",
",",
"d2",
"in",
"zip",
"(",
"durations",
"[",
":",
"-",
"1",
"]",
",",
"interstices",
",",
"durations",
"[",
"1",
":",
"]",
")",
"]",
"print_table",
"(",
"table",
")",
"print",
"print",
"\"Mean \"",
"+",
"self",
".",
"current_similarity_measure",
"+",
"\" between-\"",
"+",
"self",
".",
"current_collection_type",
"+",
"\" duration\"",
",",
"self",
".",
"measures",
"[",
"prefix",
"+",
"'between_collection_interval_duration_mean'",
"]"
]
| Calculates BETWEEN-collection intervals for the current collection and measure type
and takes their mean.
:param str prefix: Prefix for the key entry in self.measures.
Negative intervals (for overlapping clusters) are counted as 0 seconds. Intervals are
calculated as being the difference between the ending time of the last word in a collection
and the start time of the first word in the subsequent collection.
Note that these intervals are not necessarily silences, and may include asides, filled
pauses, words from the examiner, etc.
Adds the following measures to the self.measures dictionary:
- TIMING_(similarity_measure)_(collection_type)_between_collection_interval_duration_mean:
average interval duration separating clusters | [
"Calculates",
"BETWEEN",
"-",
"collection",
"intervals",
"for",
"the",
"current",
"collection",
"and",
"measure",
"type",
"and",
"takes",
"their",
"mean",
"."
]
| 7ca733dea4782c828024765726cce65de095d33c | https://github.com/speechinformaticslab/vfclust/blob/7ca733dea4782c828024765726cce65de095d33c/vfclust/vfclust.py#L1377-L1425 | train |
speechinformaticslab/vfclust | vfclust/vfclust.py | VFClustEngine.compute_within_collection_interval_duration | def compute_within_collection_interval_duration(self, prefix):
"""Calculates mean between-word duration WITHIN collections.
:param str prefix: Prefix for the key entry in self.measures.
Calculates the mean time between the end of each word in the collection
and the beginning of the next word. Note that these times do not necessarily
reflect pauses, as collection members could be separated by asides or other noises.
Adds the following measures to the self.measures dictionary:
- TIMING_(similarity_measure)_(collection_type)_within_collection_interval_duration_mean
"""
interstices = []
for cluster in self.collection_list:
# Make sure cluster is not a singleton
if len(cluster) > 1:
for i in range(len(cluster)):
if i != len(cluster) - 1:
interstice = cluster[i+1].start_time - cluster[i].end_time
interstices.append(interstice)
self.measures[prefix + 'within_collection_interval_duration_mean'] = get_mean(interstices) \
if len(interstices) > 0 else 'NA'
if not self.quiet:
print "Mean within-" + self.current_similarity_measure + "-" + self.current_collection_type + \
" between-word duration:", self.measures[prefix + 'within_collection_interval_duration_mean'] | python | def compute_within_collection_interval_duration(self, prefix):
"""Calculates mean between-word duration WITHIN collections.
:param str prefix: Prefix for the key entry in self.measures.
Calculates the mean time between the end of each word in the collection
and the beginning of the next word. Note that these times do not necessarily
reflect pauses, as collection members could be separated by asides or other noises.
Adds the following measures to the self.measures dictionary:
- TIMING_(similarity_measure)_(collection_type)_within_collection_interval_duration_mean
"""
interstices = []
for cluster in self.collection_list:
# Make sure cluster is not a singleton
if len(cluster) > 1:
for i in range(len(cluster)):
if i != len(cluster) - 1:
interstice = cluster[i+1].start_time - cluster[i].end_time
interstices.append(interstice)
self.measures[prefix + 'within_collection_interval_duration_mean'] = get_mean(interstices) \
if len(interstices) > 0 else 'NA'
if not self.quiet:
print "Mean within-" + self.current_similarity_measure + "-" + self.current_collection_type + \
" between-word duration:", self.measures[prefix + 'within_collection_interval_duration_mean'] | [
"def",
"compute_within_collection_interval_duration",
"(",
"self",
",",
"prefix",
")",
":",
"interstices",
"=",
"[",
"]",
"for",
"cluster",
"in",
"self",
".",
"collection_list",
":",
"# Make sure cluster is not a singleton",
"if",
"len",
"(",
"cluster",
")",
">",
"1",
":",
"for",
"i",
"in",
"range",
"(",
"len",
"(",
"cluster",
")",
")",
":",
"if",
"i",
"!=",
"len",
"(",
"cluster",
")",
"-",
"1",
":",
"interstice",
"=",
"cluster",
"[",
"i",
"+",
"1",
"]",
".",
"start_time",
"-",
"cluster",
"[",
"i",
"]",
".",
"end_time",
"interstices",
".",
"append",
"(",
"interstice",
")",
"self",
".",
"measures",
"[",
"prefix",
"+",
"'within_collection_interval_duration_mean'",
"]",
"=",
"get_mean",
"(",
"interstices",
")",
"if",
"len",
"(",
"interstices",
")",
">",
"0",
"else",
"'NA'",
"if",
"not",
"self",
".",
"quiet",
":",
"print",
"\"Mean within-\"",
"+",
"self",
".",
"current_similarity_measure",
"+",
"\"-\"",
"+",
"self",
".",
"current_collection_type",
"+",
"\" between-word duration:\"",
",",
"self",
".",
"measures",
"[",
"prefix",
"+",
"'within_collection_interval_duration_mean'",
"]"
]
| Calculates mean between-word duration WITHIN collections.
:param str prefix: Prefix for the key entry in self.measures.
Calculates the mean time between the end of each word in the collection
and the beginning of the next word. Note that these times do not necessarily
reflect pauses, as collection members could be separated by asides or other noises.
Adds the following measures to the self.measures dictionary:
- TIMING_(similarity_measure)_(collection_type)_within_collection_interval_duration_mean | [
"Calculates",
"mean",
"between",
"-",
"word",
"duration",
"WITHIN",
"collections",
"."
]
| 7ca733dea4782c828024765726cce65de095d33c | https://github.com/speechinformaticslab/vfclust/blob/7ca733dea4782c828024765726cce65de095d33c/vfclust/vfclust.py#L1430-L1457 | train |
speechinformaticslab/vfclust | vfclust/vfclust.py | VFClustEngine.compute_within_collection_vowel_duration | def compute_within_collection_vowel_duration(self, prefix, no_singletons=False):
""" Computes the mean duration of vowels from Units within clusters.
:param str prefix: Prefix for the key entry in self.measures
:param bool no_singletons: If False, excludes collections of length 1 from calculations
and adds "no_singletons" to the prefix
Adds the following measures to the self.measures dictionary:
- TIMING_(similarity_measure)_(collection_type)_within_collection_vowel_duration_mean
"""
if no_singletons:
min_size = 2
else:
prefix += "no_singletons_"
min_size = 1
durations = []
for cluster in self.collection_list:
if len(cluster) >= min_size:
for word in cluster:
word = self.full_timed_response[word.index_in_timed_response]
for phone in word.phones:
if phone.string in self.vowels:
durations.append(phone.end - phone.start)
self.measures[prefix + 'within_collection_vowel_duration_mean'] = get_mean(durations) \
if len(durations) > 0 else 'NA'
if not self.quiet:
if no_singletons:
print "Mean within-" + self.current_similarity_measure + "-" + self.current_collection_type + \
" vowel duration, excluding singletons:", \
self.measures[prefix + 'within_collection_vowel_duration_mean']
else:
print "Mean within-" + self.current_similarity_measure + "-" + self.current_collection_type + \
" vowel duration, including singletons:", \
self.measures[prefix + 'within_collection_vowel_duration_mean'] | python | def compute_within_collection_vowel_duration(self, prefix, no_singletons=False):
""" Computes the mean duration of vowels from Units within clusters.
:param str prefix: Prefix for the key entry in self.measures
:param bool no_singletons: If False, excludes collections of length 1 from calculations
and adds "no_singletons" to the prefix
Adds the following measures to the self.measures dictionary:
- TIMING_(similarity_measure)_(collection_type)_within_collection_vowel_duration_mean
"""
if no_singletons:
min_size = 2
else:
prefix += "no_singletons_"
min_size = 1
durations = []
for cluster in self.collection_list:
if len(cluster) >= min_size:
for word in cluster:
word = self.full_timed_response[word.index_in_timed_response]
for phone in word.phones:
if phone.string in self.vowels:
durations.append(phone.end - phone.start)
self.measures[prefix + 'within_collection_vowel_duration_mean'] = get_mean(durations) \
if len(durations) > 0 else 'NA'
if not self.quiet:
if no_singletons:
print "Mean within-" + self.current_similarity_measure + "-" + self.current_collection_type + \
" vowel duration, excluding singletons:", \
self.measures[prefix + 'within_collection_vowel_duration_mean']
else:
print "Mean within-" + self.current_similarity_measure + "-" + self.current_collection_type + \
" vowel duration, including singletons:", \
self.measures[prefix + 'within_collection_vowel_duration_mean'] | [
"def",
"compute_within_collection_vowel_duration",
"(",
"self",
",",
"prefix",
",",
"no_singletons",
"=",
"False",
")",
":",
"if",
"no_singletons",
":",
"min_size",
"=",
"2",
"else",
":",
"prefix",
"+=",
"\"no_singletons_\"",
"min_size",
"=",
"1",
"durations",
"=",
"[",
"]",
"for",
"cluster",
"in",
"self",
".",
"collection_list",
":",
"if",
"len",
"(",
"cluster",
")",
">=",
"min_size",
":",
"for",
"word",
"in",
"cluster",
":",
"word",
"=",
"self",
".",
"full_timed_response",
"[",
"word",
".",
"index_in_timed_response",
"]",
"for",
"phone",
"in",
"word",
".",
"phones",
":",
"if",
"phone",
".",
"string",
"in",
"self",
".",
"vowels",
":",
"durations",
".",
"append",
"(",
"phone",
".",
"end",
"-",
"phone",
".",
"start",
")",
"self",
".",
"measures",
"[",
"prefix",
"+",
"'within_collection_vowel_duration_mean'",
"]",
"=",
"get_mean",
"(",
"durations",
")",
"if",
"len",
"(",
"durations",
")",
">",
"0",
"else",
"'NA'",
"if",
"not",
"self",
".",
"quiet",
":",
"if",
"no_singletons",
":",
"print",
"\"Mean within-\"",
"+",
"self",
".",
"current_similarity_measure",
"+",
"\"-\"",
"+",
"self",
".",
"current_collection_type",
"+",
"\" vowel duration, excluding singletons:\"",
",",
"self",
".",
"measures",
"[",
"prefix",
"+",
"'within_collection_vowel_duration_mean'",
"]",
"else",
":",
"print",
"\"Mean within-\"",
"+",
"self",
".",
"current_similarity_measure",
"+",
"\"-\"",
"+",
"self",
".",
"current_collection_type",
"+",
"\" vowel duration, including singletons:\"",
",",
"self",
".",
"measures",
"[",
"prefix",
"+",
"'within_collection_vowel_duration_mean'",
"]"
]
| Computes the mean duration of vowels from Units within clusters.
:param str prefix: Prefix for the key entry in self.measures
:param bool no_singletons: If False, excludes collections of length 1 from calculations
and adds "no_singletons" to the prefix
Adds the following measures to the self.measures dictionary:
- TIMING_(similarity_measure)_(collection_type)_within_collection_vowel_duration_mean | [
"Computes",
"the",
"mean",
"duration",
"of",
"vowels",
"from",
"Units",
"within",
"clusters",
"."
]
| 7ca733dea4782c828024765726cce65de095d33c | https://github.com/speechinformaticslab/vfclust/blob/7ca733dea4782c828024765726cce65de095d33c/vfclust/vfclust.py#L1461-L1498 | train |
speechinformaticslab/vfclust | vfclust/vfclust.py | VFClustEngine.print_output | def print_output(self):
""" Outputs final list of measures to screen a csv file.
The .csv file created has the same name as the input file, with
"vfclust_TYPE_CATEGORY" appended to the filename, where TYPE indicates
the type of task performed done (SEMANTIC or PHONETIC) and CATEGORY
indicates the category requirement of the stimulus (i.e. 'f' or 'animals'
for phonetic and semantic fluency test, respectively.
"""
if self.response_format == "csv":
for key in self.measures:
if "TIMING_" in key:
self.measures[key] = "NA"
if not self.quiet:
print
print self.type.upper() + " RESULTS:"
keys = [e for e in self.measures if 'COUNT_' in e]
keys.sort()
print "Counts:"
print_table([(entry, str(self.measures[entry])) for entry in keys])
keys = [e for e in self.measures if 'COLLECTION_' in e]
keys.sort()
print
print "Collection measures:"
print_table([(entry, str(self.measures[entry])) for entry in keys])
if self.response_format == "TextGrid":
keys = [e for e in self.measures if 'TIMING_' in e]
keys.sort()
print
print "Time-based measures:"
print_table([(entry, str(self.measures[entry])) for entry in keys])
#write to CSV file
if self.target_file:
with open(self.target_file, 'w') as outfile:
header = ['file_id'] + \
[self.type + "_" + e for e in self.measures if 'COUNT_' in e] + \
[self.type + "_" + e for e in self.measures if 'COLLECTION_' in e] + \
[self.type + "_" + e for e in self.measures if 'TIMING_' in e]
writer = csv.writer(outfile, quoting=csv.QUOTE_MINIMAL)
writer.writerow(header)
#the split/join gets rid of the type appended just above
writer.writerow([self.measures["file_id"]] +
[self.measures["_".join(e.split('_')[1:])] for e in header[1:]]) | python | def print_output(self):
""" Outputs final list of measures to screen a csv file.
The .csv file created has the same name as the input file, with
"vfclust_TYPE_CATEGORY" appended to the filename, where TYPE indicates
the type of task performed done (SEMANTIC or PHONETIC) and CATEGORY
indicates the category requirement of the stimulus (i.e. 'f' or 'animals'
for phonetic and semantic fluency test, respectively.
"""
if self.response_format == "csv":
for key in self.measures:
if "TIMING_" in key:
self.measures[key] = "NA"
if not self.quiet:
print
print self.type.upper() + " RESULTS:"
keys = [e for e in self.measures if 'COUNT_' in e]
keys.sort()
print "Counts:"
print_table([(entry, str(self.measures[entry])) for entry in keys])
keys = [e for e in self.measures if 'COLLECTION_' in e]
keys.sort()
print
print "Collection measures:"
print_table([(entry, str(self.measures[entry])) for entry in keys])
if self.response_format == "TextGrid":
keys = [e for e in self.measures if 'TIMING_' in e]
keys.sort()
print
print "Time-based measures:"
print_table([(entry, str(self.measures[entry])) for entry in keys])
#write to CSV file
if self.target_file:
with open(self.target_file, 'w') as outfile:
header = ['file_id'] + \
[self.type + "_" + e for e in self.measures if 'COUNT_' in e] + \
[self.type + "_" + e for e in self.measures if 'COLLECTION_' in e] + \
[self.type + "_" + e for e in self.measures if 'TIMING_' in e]
writer = csv.writer(outfile, quoting=csv.QUOTE_MINIMAL)
writer.writerow(header)
#the split/join gets rid of the type appended just above
writer.writerow([self.measures["file_id"]] +
[self.measures["_".join(e.split('_')[1:])] for e in header[1:]]) | [
"def",
"print_output",
"(",
"self",
")",
":",
"if",
"self",
".",
"response_format",
"==",
"\"csv\"",
":",
"for",
"key",
"in",
"self",
".",
"measures",
":",
"if",
"\"TIMING_\"",
"in",
"key",
":",
"self",
".",
"measures",
"[",
"key",
"]",
"=",
"\"NA\"",
"if",
"not",
"self",
".",
"quiet",
":",
"print",
"print",
"self",
".",
"type",
".",
"upper",
"(",
")",
"+",
"\" RESULTS:\"",
"keys",
"=",
"[",
"e",
"for",
"e",
"in",
"self",
".",
"measures",
"if",
"'COUNT_'",
"in",
"e",
"]",
"keys",
".",
"sort",
"(",
")",
"print",
"\"Counts:\"",
"print_table",
"(",
"[",
"(",
"entry",
",",
"str",
"(",
"self",
".",
"measures",
"[",
"entry",
"]",
")",
")",
"for",
"entry",
"in",
"keys",
"]",
")",
"keys",
"=",
"[",
"e",
"for",
"e",
"in",
"self",
".",
"measures",
"if",
"'COLLECTION_'",
"in",
"e",
"]",
"keys",
".",
"sort",
"(",
")",
"print",
"print",
"\"Collection measures:\"",
"print_table",
"(",
"[",
"(",
"entry",
",",
"str",
"(",
"self",
".",
"measures",
"[",
"entry",
"]",
")",
")",
"for",
"entry",
"in",
"keys",
"]",
")",
"if",
"self",
".",
"response_format",
"==",
"\"TextGrid\"",
":",
"keys",
"=",
"[",
"e",
"for",
"e",
"in",
"self",
".",
"measures",
"if",
"'TIMING_'",
"in",
"e",
"]",
"keys",
".",
"sort",
"(",
")",
"print",
"print",
"\"Time-based measures:\"",
"print_table",
"(",
"[",
"(",
"entry",
",",
"str",
"(",
"self",
".",
"measures",
"[",
"entry",
"]",
")",
")",
"for",
"entry",
"in",
"keys",
"]",
")",
"#write to CSV file",
"if",
"self",
".",
"target_file",
":",
"with",
"open",
"(",
"self",
".",
"target_file",
",",
"'w'",
")",
"as",
"outfile",
":",
"header",
"=",
"[",
"'file_id'",
"]",
"+",
"[",
"self",
".",
"type",
"+",
"\"_\"",
"+",
"e",
"for",
"e",
"in",
"self",
".",
"measures",
"if",
"'COUNT_'",
"in",
"e",
"]",
"+",
"[",
"self",
".",
"type",
"+",
"\"_\"",
"+",
"e",
"for",
"e",
"in",
"self",
".",
"measures",
"if",
"'COLLECTION_'",
"in",
"e",
"]",
"+",
"[",
"self",
".",
"type",
"+",
"\"_\"",
"+",
"e",
"for",
"e",
"in",
"self",
".",
"measures",
"if",
"'TIMING_'",
"in",
"e",
"]",
"writer",
"=",
"csv",
".",
"writer",
"(",
"outfile",
",",
"quoting",
"=",
"csv",
".",
"QUOTE_MINIMAL",
")",
"writer",
".",
"writerow",
"(",
"header",
")",
"#the split/join gets rid of the type appended just above",
"writer",
".",
"writerow",
"(",
"[",
"self",
".",
"measures",
"[",
"\"file_id\"",
"]",
"]",
"+",
"[",
"self",
".",
"measures",
"[",
"\"_\"",
".",
"join",
"(",
"e",
".",
"split",
"(",
"'_'",
")",
"[",
"1",
":",
"]",
")",
"]",
"for",
"e",
"in",
"header",
"[",
"1",
":",
"]",
"]",
")"
]
| Outputs final list of measures to screen a csv file.
The .csv file created has the same name as the input file, with
"vfclust_TYPE_CATEGORY" appended to the filename, where TYPE indicates
the type of task performed done (SEMANTIC or PHONETIC) and CATEGORY
indicates the category requirement of the stimulus (i.e. 'f' or 'animals'
for phonetic and semantic fluency test, respectively. | [
"Outputs",
"final",
"list",
"of",
"measures",
"to",
"screen",
"a",
"csv",
"file",
"."
]
| 7ca733dea4782c828024765726cce65de095d33c | https://github.com/speechinformaticslab/vfclust/blob/7ca733dea4782c828024765726cce65de095d33c/vfclust/vfclust.py#L1546-L1593 | train |
inveniosoftware/invenio-pidrelations | invenio_pidrelations/contrib/versioning.py | PIDNodeVersioning.insert_child | def insert_child(self, child_pid, index=-1):
"""Insert a Version child PID."""
if child_pid.status != PIDStatus.REGISTERED:
raise PIDRelationConsistencyError(
"Version PIDs should have status 'REGISTERED'. Use "
"insert_draft_child to insert 'RESERVED' draft PID.")
with db.session.begin_nested():
# if there is a draft and "child" is inserted as the last version,
# it should be inserted before the draft.
draft = self.draft_child
if draft and index == -1:
index = self.index(draft)
super(PIDNodeVersioning, self).insert_child(child_pid, index=index)
self.update_redirect() | python | def insert_child(self, child_pid, index=-1):
"""Insert a Version child PID."""
if child_pid.status != PIDStatus.REGISTERED:
raise PIDRelationConsistencyError(
"Version PIDs should have status 'REGISTERED'. Use "
"insert_draft_child to insert 'RESERVED' draft PID.")
with db.session.begin_nested():
# if there is a draft and "child" is inserted as the last version,
# it should be inserted before the draft.
draft = self.draft_child
if draft and index == -1:
index = self.index(draft)
super(PIDNodeVersioning, self).insert_child(child_pid, index=index)
self.update_redirect() | [
"def",
"insert_child",
"(",
"self",
",",
"child_pid",
",",
"index",
"=",
"-",
"1",
")",
":",
"if",
"child_pid",
".",
"status",
"!=",
"PIDStatus",
".",
"REGISTERED",
":",
"raise",
"PIDRelationConsistencyError",
"(",
"\"Version PIDs should have status 'REGISTERED'. Use \"",
"\"insert_draft_child to insert 'RESERVED' draft PID.\"",
")",
"with",
"db",
".",
"session",
".",
"begin_nested",
"(",
")",
":",
"# if there is a draft and \"child\" is inserted as the last version,",
"# it should be inserted before the draft.",
"draft",
"=",
"self",
".",
"draft_child",
"if",
"draft",
"and",
"index",
"==",
"-",
"1",
":",
"index",
"=",
"self",
".",
"index",
"(",
"draft",
")",
"super",
"(",
"PIDNodeVersioning",
",",
"self",
")",
".",
"insert_child",
"(",
"child_pid",
",",
"index",
"=",
"index",
")",
"self",
".",
"update_redirect",
"(",
")"
]
| Insert a Version child PID. | [
"Insert",
"a",
"Version",
"child",
"PID",
"."
]
| a49f3725cf595b663c5b04814280b231f88bc333 | https://github.com/inveniosoftware/invenio-pidrelations/blob/a49f3725cf595b663c5b04814280b231f88bc333/invenio_pidrelations/contrib/versioning.py#L79-L92 | train |
inveniosoftware/invenio-pidrelations | invenio_pidrelations/contrib/versioning.py | PIDNodeVersioning.remove_child | def remove_child(self, child_pid):
"""Remove a Version child PID.
Extends the base method call by redirecting from the parent to the
last child.
"""
if child_pid.status == PIDStatus.RESERVED:
raise PIDRelationConsistencyError(
"Version PIDs should not have status 'RESERVED'. Use "
"remove_draft_child to remove a draft PID.")
with db.session.begin_nested():
super(PIDNodeVersioning, self).remove_child(child_pid,
reorder=True)
self.update_redirect() | python | def remove_child(self, child_pid):
"""Remove a Version child PID.
Extends the base method call by redirecting from the parent to the
last child.
"""
if child_pid.status == PIDStatus.RESERVED:
raise PIDRelationConsistencyError(
"Version PIDs should not have status 'RESERVED'. Use "
"remove_draft_child to remove a draft PID.")
with db.session.begin_nested():
super(PIDNodeVersioning, self).remove_child(child_pid,
reorder=True)
self.update_redirect() | [
"def",
"remove_child",
"(",
"self",
",",
"child_pid",
")",
":",
"if",
"child_pid",
".",
"status",
"==",
"PIDStatus",
".",
"RESERVED",
":",
"raise",
"PIDRelationConsistencyError",
"(",
"\"Version PIDs should not have status 'RESERVED'. Use \"",
"\"remove_draft_child to remove a draft PID.\"",
")",
"with",
"db",
".",
"session",
".",
"begin_nested",
"(",
")",
":",
"super",
"(",
"PIDNodeVersioning",
",",
"self",
")",
".",
"remove_child",
"(",
"child_pid",
",",
"reorder",
"=",
"True",
")",
"self",
".",
"update_redirect",
"(",
")"
]
| Remove a Version child PID.
Extends the base method call by redirecting from the parent to the
last child. | [
"Remove",
"a",
"Version",
"child",
"PID",
"."
]
| a49f3725cf595b663c5b04814280b231f88bc333 | https://github.com/inveniosoftware/invenio-pidrelations/blob/a49f3725cf595b663c5b04814280b231f88bc333/invenio_pidrelations/contrib/versioning.py#L94-L107 | train |
inveniosoftware/invenio-pidrelations | invenio_pidrelations/contrib/versioning.py | PIDNodeVersioning.insert_draft_child | def insert_draft_child(self, child_pid):
"""Insert a draft child to versioning."""
if child_pid.status != PIDStatus.RESERVED:
raise PIDRelationConsistencyError(
"Draft child should have status 'RESERVED'")
if not self.draft_child:
with db.session.begin_nested():
super(PIDNodeVersioning, self).insert_child(child_pid,
index=-1)
else:
raise PIDRelationConsistencyError(
"Draft child already exists for this relation: {0}".format(
self.draft_child)) | python | def insert_draft_child(self, child_pid):
"""Insert a draft child to versioning."""
if child_pid.status != PIDStatus.RESERVED:
raise PIDRelationConsistencyError(
"Draft child should have status 'RESERVED'")
if not self.draft_child:
with db.session.begin_nested():
super(PIDNodeVersioning, self).insert_child(child_pid,
index=-1)
else:
raise PIDRelationConsistencyError(
"Draft child already exists for this relation: {0}".format(
self.draft_child)) | [
"def",
"insert_draft_child",
"(",
"self",
",",
"child_pid",
")",
":",
"if",
"child_pid",
".",
"status",
"!=",
"PIDStatus",
".",
"RESERVED",
":",
"raise",
"PIDRelationConsistencyError",
"(",
"\"Draft child should have status 'RESERVED'\"",
")",
"if",
"not",
"self",
".",
"draft_child",
":",
"with",
"db",
".",
"session",
".",
"begin_nested",
"(",
")",
":",
"super",
"(",
"PIDNodeVersioning",
",",
"self",
")",
".",
"insert_child",
"(",
"child_pid",
",",
"index",
"=",
"-",
"1",
")",
"else",
":",
"raise",
"PIDRelationConsistencyError",
"(",
"\"Draft child already exists for this relation: {0}\"",
".",
"format",
"(",
"self",
".",
"draft_child",
")",
")"
]
| Insert a draft child to versioning. | [
"Insert",
"a",
"draft",
"child",
"to",
"versioning",
"."
]
| a49f3725cf595b663c5b04814280b231f88bc333 | https://github.com/inveniosoftware/invenio-pidrelations/blob/a49f3725cf595b663c5b04814280b231f88bc333/invenio_pidrelations/contrib/versioning.py#L127-L140 | train |
inveniosoftware/invenio-pidrelations | invenio_pidrelations/contrib/versioning.py | PIDNodeVersioning.remove_draft_child | def remove_draft_child(self):
"""Remove the draft child from versioning."""
if self.draft_child:
with db.session.begin_nested():
super(PIDNodeVersioning, self).remove_child(self.draft_child,
reorder=True) | python | def remove_draft_child(self):
"""Remove the draft child from versioning."""
if self.draft_child:
with db.session.begin_nested():
super(PIDNodeVersioning, self).remove_child(self.draft_child,
reorder=True) | [
"def",
"remove_draft_child",
"(",
"self",
")",
":",
"if",
"self",
".",
"draft_child",
":",
"with",
"db",
".",
"session",
".",
"begin_nested",
"(",
")",
":",
"super",
"(",
"PIDNodeVersioning",
",",
"self",
")",
".",
"remove_child",
"(",
"self",
".",
"draft_child",
",",
"reorder",
"=",
"True",
")"
]
| Remove the draft child from versioning. | [
"Remove",
"the",
"draft",
"child",
"from",
"versioning",
"."
]
| a49f3725cf595b663c5b04814280b231f88bc333 | https://github.com/inveniosoftware/invenio-pidrelations/blob/a49f3725cf595b663c5b04814280b231f88bc333/invenio_pidrelations/contrib/versioning.py#L142-L147 | train |
inveniosoftware/invenio-pidrelations | invenio_pidrelations/contrib/versioning.py | PIDNodeVersioning.update_redirect | def update_redirect(self):
"""Update the parent redirect to the current last child.
This method should be called on the parent PID node.
Use this method when the status of a PID changed (ex: draft changed
from RESERVED to REGISTERED)
"""
if self.last_child:
self._resolved_pid.redirect(self.last_child)
elif any(map(lambda pid: pid.status not in [PIDStatus.DELETED,
PIDStatus.REGISTERED,
PIDStatus.RESERVED],
super(PIDNodeVersioning, self).children.all())):
raise PIDRelationConsistencyError(
"Invalid relation state. Only REGISTERED, RESERVED "
"and DELETED PIDs are supported."
) | python | def update_redirect(self):
"""Update the parent redirect to the current last child.
This method should be called on the parent PID node.
Use this method when the status of a PID changed (ex: draft changed
from RESERVED to REGISTERED)
"""
if self.last_child:
self._resolved_pid.redirect(self.last_child)
elif any(map(lambda pid: pid.status not in [PIDStatus.DELETED,
PIDStatus.REGISTERED,
PIDStatus.RESERVED],
super(PIDNodeVersioning, self).children.all())):
raise PIDRelationConsistencyError(
"Invalid relation state. Only REGISTERED, RESERVED "
"and DELETED PIDs are supported."
) | [
"def",
"update_redirect",
"(",
"self",
")",
":",
"if",
"self",
".",
"last_child",
":",
"self",
".",
"_resolved_pid",
".",
"redirect",
"(",
"self",
".",
"last_child",
")",
"elif",
"any",
"(",
"map",
"(",
"lambda",
"pid",
":",
"pid",
".",
"status",
"not",
"in",
"[",
"PIDStatus",
".",
"DELETED",
",",
"PIDStatus",
".",
"REGISTERED",
",",
"PIDStatus",
".",
"RESERVED",
"]",
",",
"super",
"(",
"PIDNodeVersioning",
",",
"self",
")",
".",
"children",
".",
"all",
"(",
")",
")",
")",
":",
"raise",
"PIDRelationConsistencyError",
"(",
"\"Invalid relation state. Only REGISTERED, RESERVED \"",
"\"and DELETED PIDs are supported.\"",
")"
]
| Update the parent redirect to the current last child.
This method should be called on the parent PID node.
Use this method when the status of a PID changed (ex: draft changed
from RESERVED to REGISTERED) | [
"Update",
"the",
"parent",
"redirect",
"to",
"the",
"current",
"last",
"child",
"."
]
| a49f3725cf595b663c5b04814280b231f88bc333 | https://github.com/inveniosoftware/invenio-pidrelations/blob/a49f3725cf595b663c5b04814280b231f88bc333/invenio_pidrelations/contrib/versioning.py#L149-L166 | train |
adaptive-learning/proso-apps | proso_feedback/views.py | feedback | def feedback(request):
"""
Send feedback to the authors of the system.
GET parameters:
html
turn on the HTML version of the API
POST parameters (JSON):
text:
the main feedback content
email (optional):
user's e-mail
username (optional):
user's name
"""
if request.method == 'GET':
return render(request, 'feedback_feedback.html', {}, help_text=feedback.__doc__)
if request.method == 'POST':
feedback_data = json_body(request.body.decode("utf-8"))
feedback_data['user_agent'] = Session.objects.get_current_session().http_user_agent.content
if not feedback_data.get('username'):
feedback_data['username'] = request.user.username
if not feedback_data.get('email'):
feedback_data['email'] = request.user.email
comment = Comment.objects.create(
username=feedback_data['username'],
email=feedback_data['email'],
text=feedback_data['text'])
if get_config('proso_feedback', 'send_emails', default=True):
feedback_domain = get_config('proso_feedback', 'domain', required=True)
feedback_to = get_config('proso_feedback', 'to', required=True)
if is_likely_worthless(feedback_data):
mail_from = 'spam@' + feedback_domain
else:
mail_from = 'feedback@' + feedback_domain
text_content = render_to_string("emails/feedback.plain.txt", {
"feedback": feedback_data,
"user": request.user,
})
html_content = render_to_string("emails/feedback.html", {
"feedback": feedback_data,
"user": request.user,
})
subject = feedback_domain + ' feedback ' + str(comment.id)
mail = EmailMultiAlternatives(
subject,
text_content,
mail_from,
feedback_to,
)
mail.attach_alternative(html_content, "text/html")
mail.send()
LOGGER.debug("email sent %s\n", text_content)
return HttpResponse('ok', status=201)
else:
return HttpResponseBadRequest("method %s is not allowed".format(request.method)) | python | def feedback(request):
"""
Send feedback to the authors of the system.
GET parameters:
html
turn on the HTML version of the API
POST parameters (JSON):
text:
the main feedback content
email (optional):
user's e-mail
username (optional):
user's name
"""
if request.method == 'GET':
return render(request, 'feedback_feedback.html', {}, help_text=feedback.__doc__)
if request.method == 'POST':
feedback_data = json_body(request.body.decode("utf-8"))
feedback_data['user_agent'] = Session.objects.get_current_session().http_user_agent.content
if not feedback_data.get('username'):
feedback_data['username'] = request.user.username
if not feedback_data.get('email'):
feedback_data['email'] = request.user.email
comment = Comment.objects.create(
username=feedback_data['username'],
email=feedback_data['email'],
text=feedback_data['text'])
if get_config('proso_feedback', 'send_emails', default=True):
feedback_domain = get_config('proso_feedback', 'domain', required=True)
feedback_to = get_config('proso_feedback', 'to', required=True)
if is_likely_worthless(feedback_data):
mail_from = 'spam@' + feedback_domain
else:
mail_from = 'feedback@' + feedback_domain
text_content = render_to_string("emails/feedback.plain.txt", {
"feedback": feedback_data,
"user": request.user,
})
html_content = render_to_string("emails/feedback.html", {
"feedback": feedback_data,
"user": request.user,
})
subject = feedback_domain + ' feedback ' + str(comment.id)
mail = EmailMultiAlternatives(
subject,
text_content,
mail_from,
feedback_to,
)
mail.attach_alternative(html_content, "text/html")
mail.send()
LOGGER.debug("email sent %s\n", text_content)
return HttpResponse('ok', status=201)
else:
return HttpResponseBadRequest("method %s is not allowed".format(request.method)) | [
"def",
"feedback",
"(",
"request",
")",
":",
"if",
"request",
".",
"method",
"==",
"'GET'",
":",
"return",
"render",
"(",
"request",
",",
"'feedback_feedback.html'",
",",
"{",
"}",
",",
"help_text",
"=",
"feedback",
".",
"__doc__",
")",
"if",
"request",
".",
"method",
"==",
"'POST'",
":",
"feedback_data",
"=",
"json_body",
"(",
"request",
".",
"body",
".",
"decode",
"(",
"\"utf-8\"",
")",
")",
"feedback_data",
"[",
"'user_agent'",
"]",
"=",
"Session",
".",
"objects",
".",
"get_current_session",
"(",
")",
".",
"http_user_agent",
".",
"content",
"if",
"not",
"feedback_data",
".",
"get",
"(",
"'username'",
")",
":",
"feedback_data",
"[",
"'username'",
"]",
"=",
"request",
".",
"user",
".",
"username",
"if",
"not",
"feedback_data",
".",
"get",
"(",
"'email'",
")",
":",
"feedback_data",
"[",
"'email'",
"]",
"=",
"request",
".",
"user",
".",
"email",
"comment",
"=",
"Comment",
".",
"objects",
".",
"create",
"(",
"username",
"=",
"feedback_data",
"[",
"'username'",
"]",
",",
"email",
"=",
"feedback_data",
"[",
"'email'",
"]",
",",
"text",
"=",
"feedback_data",
"[",
"'text'",
"]",
")",
"if",
"get_config",
"(",
"'proso_feedback'",
",",
"'send_emails'",
",",
"default",
"=",
"True",
")",
":",
"feedback_domain",
"=",
"get_config",
"(",
"'proso_feedback'",
",",
"'domain'",
",",
"required",
"=",
"True",
")",
"feedback_to",
"=",
"get_config",
"(",
"'proso_feedback'",
",",
"'to'",
",",
"required",
"=",
"True",
")",
"if",
"is_likely_worthless",
"(",
"feedback_data",
")",
":",
"mail_from",
"=",
"'spam@'",
"+",
"feedback_domain",
"else",
":",
"mail_from",
"=",
"'feedback@'",
"+",
"feedback_domain",
"text_content",
"=",
"render_to_string",
"(",
"\"emails/feedback.plain.txt\"",
",",
"{",
"\"feedback\"",
":",
"feedback_data",
",",
"\"user\"",
":",
"request",
".",
"user",
",",
"}",
")",
"html_content",
"=",
"render_to_string",
"(",
"\"emails/feedback.html\"",
",",
"{",
"\"feedback\"",
":",
"feedback_data",
",",
"\"user\"",
":",
"request",
".",
"user",
",",
"}",
")",
"subject",
"=",
"feedback_domain",
"+",
"' feedback '",
"+",
"str",
"(",
"comment",
".",
"id",
")",
"mail",
"=",
"EmailMultiAlternatives",
"(",
"subject",
",",
"text_content",
",",
"mail_from",
",",
"feedback_to",
",",
")",
"mail",
".",
"attach_alternative",
"(",
"html_content",
",",
"\"text/html\"",
")",
"mail",
".",
"send",
"(",
")",
"LOGGER",
".",
"debug",
"(",
"\"email sent %s\\n\"",
",",
"text_content",
")",
"return",
"HttpResponse",
"(",
"'ok'",
",",
"status",
"=",
"201",
")",
"else",
":",
"return",
"HttpResponseBadRequest",
"(",
"\"method %s is not allowed\"",
".",
"format",
"(",
"request",
".",
"method",
")",
")"
]
| Send feedback to the authors of the system.
GET parameters:
html
turn on the HTML version of the API
POST parameters (JSON):
text:
the main feedback content
email (optional):
user's e-mail
username (optional):
user's name | [
"Send",
"feedback",
"to",
"the",
"authors",
"of",
"the",
"system",
"."
]
| 8278c72e498d6ef8d392cc47b48473f4ec037142 | https://github.com/adaptive-learning/proso-apps/blob/8278c72e498d6ef8d392cc47b48473f4ec037142/proso_feedback/views.py#L23-L79 | train |
adaptive-learning/proso-apps | proso_feedback/views.py | rating | def rating(request):
"""
Rate the current practice.
GET parameters:
html
turn on the HTML version of the API
POST parameters (JSON):
value:
one of the following numbers (how difficult questions are?):
(1) too easy,
(2) appropriate,
(3) too difficult
or one of the following numbers (how difficult questions should be?):
(4) much easier
(5) bit easier
(6) the same
(7) bit harder
(8) much harder
"""
if request.method == 'GET':
return render(request, 'feedback_rating.html', {}, help_text=rating.__doc__)
if request.method == 'POST':
data = json_body(request.body.decode("utf-8"))
if data['value'] not in list(range(1, 9)):
return render_json(
request,
{'error': _('The given value is not valid.'), 'error_type': 'invalid_value'},
template='feedback_json.html', status=400
)
rating_object = Rating(
user=request.user,
value=data['value'],
)
rating_object.save()
return HttpResponse('ok', status=201)
else:
return HttpResponseBadRequest("method %s is not allowed".format(request.method)) | python | def rating(request):
"""
Rate the current practice.
GET parameters:
html
turn on the HTML version of the API
POST parameters (JSON):
value:
one of the following numbers (how difficult questions are?):
(1) too easy,
(2) appropriate,
(3) too difficult
or one of the following numbers (how difficult questions should be?):
(4) much easier
(5) bit easier
(6) the same
(7) bit harder
(8) much harder
"""
if request.method == 'GET':
return render(request, 'feedback_rating.html', {}, help_text=rating.__doc__)
if request.method == 'POST':
data = json_body(request.body.decode("utf-8"))
if data['value'] not in list(range(1, 9)):
return render_json(
request,
{'error': _('The given value is not valid.'), 'error_type': 'invalid_value'},
template='feedback_json.html', status=400
)
rating_object = Rating(
user=request.user,
value=data['value'],
)
rating_object.save()
return HttpResponse('ok', status=201)
else:
return HttpResponseBadRequest("method %s is not allowed".format(request.method)) | [
"def",
"rating",
"(",
"request",
")",
":",
"if",
"request",
".",
"method",
"==",
"'GET'",
":",
"return",
"render",
"(",
"request",
",",
"'feedback_rating.html'",
",",
"{",
"}",
",",
"help_text",
"=",
"rating",
".",
"__doc__",
")",
"if",
"request",
".",
"method",
"==",
"'POST'",
":",
"data",
"=",
"json_body",
"(",
"request",
".",
"body",
".",
"decode",
"(",
"\"utf-8\"",
")",
")",
"if",
"data",
"[",
"'value'",
"]",
"not",
"in",
"list",
"(",
"range",
"(",
"1",
",",
"9",
")",
")",
":",
"return",
"render_json",
"(",
"request",
",",
"{",
"'error'",
":",
"_",
"(",
"'The given value is not valid.'",
")",
",",
"'error_type'",
":",
"'invalid_value'",
"}",
",",
"template",
"=",
"'feedback_json.html'",
",",
"status",
"=",
"400",
")",
"rating_object",
"=",
"Rating",
"(",
"user",
"=",
"request",
".",
"user",
",",
"value",
"=",
"data",
"[",
"'value'",
"]",
",",
")",
"rating_object",
".",
"save",
"(",
")",
"return",
"HttpResponse",
"(",
"'ok'",
",",
"status",
"=",
"201",
")",
"else",
":",
"return",
"HttpResponseBadRequest",
"(",
"\"method %s is not allowed\"",
".",
"format",
"(",
"request",
".",
"method",
")",
")"
]
| Rate the current practice.
GET parameters:
html
turn on the HTML version of the API
POST parameters (JSON):
value:
one of the following numbers (how difficult questions are?):
(1) too easy,
(2) appropriate,
(3) too difficult
or one of the following numbers (how difficult questions should be?):
(4) much easier
(5) bit easier
(6) the same
(7) bit harder
(8) much harder | [
"Rate",
"the",
"current",
"practice",
"."
]
| 8278c72e498d6ef8d392cc47b48473f4ec037142 | https://github.com/adaptive-learning/proso-apps/blob/8278c72e498d6ef8d392cc47b48473f4ec037142/proso_feedback/views.py#L83-L121 | train |
projectshift/shift-boiler | boiler/cli/colors.py | colour | def colour(colour, message, bold=False):
""" Color a message """
return style(fg=colour, text=message, bold=bold) | python | def colour(colour, message, bold=False):
""" Color a message """
return style(fg=colour, text=message, bold=bold) | [
"def",
"colour",
"(",
"colour",
",",
"message",
",",
"bold",
"=",
"False",
")",
":",
"return",
"style",
"(",
"fg",
"=",
"colour",
",",
"text",
"=",
"message",
",",
"bold",
"=",
"bold",
")"
]
| Color a message | [
"Color",
"a",
"message"
]
| 8e6f3a3e4b9493fb6c8bd16bed160ede153bfb0b | https://github.com/projectshift/shift-boiler/blob/8e6f3a3e4b9493fb6c8bd16bed160ede153bfb0b/boiler/cli/colors.py#L4-L6 | train |
kata198/QueryableList | QueryableList/Base.py | QueryableListBase.customFilter | def customFilter(self, filterFunc):
'''
customFilter - Apply a custom filter to elements and return a QueryableList of matches
@param filterFunc <lambda/function< - A lambda/function that is passed an item, and
returns True if the item matches (will be returned), otherwise False.
@return - A QueryableList object of the same type, with only the matching objects returned.
'''
ret = self.__class__()
for item in self:
if filterFunc(item):
ret.append(item)
return ret | python | def customFilter(self, filterFunc):
'''
customFilter - Apply a custom filter to elements and return a QueryableList of matches
@param filterFunc <lambda/function< - A lambda/function that is passed an item, and
returns True if the item matches (will be returned), otherwise False.
@return - A QueryableList object of the same type, with only the matching objects returned.
'''
ret = self.__class__()
for item in self:
if filterFunc(item):
ret.append(item)
return ret | [
"def",
"customFilter",
"(",
"self",
",",
"filterFunc",
")",
":",
"ret",
"=",
"self",
".",
"__class__",
"(",
")",
"for",
"item",
"in",
"self",
":",
"if",
"filterFunc",
"(",
"item",
")",
":",
"ret",
".",
"append",
"(",
"item",
")",
"return",
"ret"
]
| customFilter - Apply a custom filter to elements and return a QueryableList of matches
@param filterFunc <lambda/function< - A lambda/function that is passed an item, and
returns True if the item matches (will be returned), otherwise False.
@return - A QueryableList object of the same type, with only the matching objects returned. | [
"customFilter",
"-",
"Apply",
"a",
"custom",
"filter",
"to",
"elements",
"and",
"return",
"a",
"QueryableList",
"of",
"matches"
]
| 279286d46205ce8268af42e03b75820a7483fddb | https://github.com/kata198/QueryableList/blob/279286d46205ce8268af42e03b75820a7483fddb/QueryableList/Base.py#L152-L166 | train |
kata198/QueryableList | QueryableList/Base.py | QueryableListBase.sort_by | def sort_by(self, fieldName, reverse=False):
'''
sort_by - Return a copy of this collection, sorted by the given fieldName.
The fieldName is accessed the same way as other filtering, so it supports custom properties, etc.
@param fieldName <str> - The name of the field on which to sort by
@param reverse <bool> Default False - If True, list will be in reverse order.
@return <QueryableList> - A QueryableList of the same type with the elements sorted based on arguments.
'''
return self.__class__(
sorted(self, key = lambda item : self._get_item_value(item, fieldName), reverse=reverse)
) | python | def sort_by(self, fieldName, reverse=False):
'''
sort_by - Return a copy of this collection, sorted by the given fieldName.
The fieldName is accessed the same way as other filtering, so it supports custom properties, etc.
@param fieldName <str> - The name of the field on which to sort by
@param reverse <bool> Default False - If True, list will be in reverse order.
@return <QueryableList> - A QueryableList of the same type with the elements sorted based on arguments.
'''
return self.__class__(
sorted(self, key = lambda item : self._get_item_value(item, fieldName), reverse=reverse)
) | [
"def",
"sort_by",
"(",
"self",
",",
"fieldName",
",",
"reverse",
"=",
"False",
")",
":",
"return",
"self",
".",
"__class__",
"(",
"sorted",
"(",
"self",
",",
"key",
"=",
"lambda",
"item",
":",
"self",
".",
"_get_item_value",
"(",
"item",
",",
"fieldName",
")",
",",
"reverse",
"=",
"reverse",
")",
")"
]
| sort_by - Return a copy of this collection, sorted by the given fieldName.
The fieldName is accessed the same way as other filtering, so it supports custom properties, etc.
@param fieldName <str> - The name of the field on which to sort by
@param reverse <bool> Default False - If True, list will be in reverse order.
@return <QueryableList> - A QueryableList of the same type with the elements sorted based on arguments. | [
"sort_by",
"-",
"Return",
"a",
"copy",
"of",
"this",
"collection",
"sorted",
"by",
"the",
"given",
"fieldName",
"."
]
| 279286d46205ce8268af42e03b75820a7483fddb | https://github.com/kata198/QueryableList/blob/279286d46205ce8268af42e03b75820a7483fddb/QueryableList/Base.py#L180-L194 | train |
MacHu-GWU/sqlalchemy_mate-project | sqlalchemy_mate/credential.py | Credential.uri | def uri(self):
"""
Return sqlalchemy connect string URI.
"""
return self.uri_template.format(
host=self.host,
port="" if self.port is None else self.port,
database=self.database,
username=self.username,
password="" if self.password is None else self.password,
has_password="" if self.password is None else ":",
has_port="" if self.port is None else ":",
) | python | def uri(self):
"""
Return sqlalchemy connect string URI.
"""
return self.uri_template.format(
host=self.host,
port="" if self.port is None else self.port,
database=self.database,
username=self.username,
password="" if self.password is None else self.password,
has_password="" if self.password is None else ":",
has_port="" if self.port is None else ":",
) | [
"def",
"uri",
"(",
"self",
")",
":",
"return",
"self",
".",
"uri_template",
".",
"format",
"(",
"host",
"=",
"self",
".",
"host",
",",
"port",
"=",
"\"\"",
"if",
"self",
".",
"port",
"is",
"None",
"else",
"self",
".",
"port",
",",
"database",
"=",
"self",
".",
"database",
",",
"username",
"=",
"self",
".",
"username",
",",
"password",
"=",
"\"\"",
"if",
"self",
".",
"password",
"is",
"None",
"else",
"self",
".",
"password",
",",
"has_password",
"=",
"\"\"",
"if",
"self",
".",
"password",
"is",
"None",
"else",
"\":\"",
",",
"has_port",
"=",
"\"\"",
"if",
"self",
".",
"port",
"is",
"None",
"else",
"\":\"",
",",
")"
]
| Return sqlalchemy connect string URI. | [
"Return",
"sqlalchemy",
"connect",
"string",
"URI",
"."
]
| 946754744c8870f083fd7b4339fca15d1d6128b2 | https://github.com/MacHu-GWU/sqlalchemy_mate-project/blob/946754744c8870f083fd7b4339fca15d1d6128b2/sqlalchemy_mate/credential.py#L46-L58 | train |
MacHu-GWU/sqlalchemy_mate-project | sqlalchemy_mate/credential.py | Credential.from_json | def from_json(cls, json_file, json_path=None, key_mapping=None):
"""
Load connection credential from json file.
:param json_file: str, path to json file
:param json_path: str, dot notation of the path to the credential dict.
:param key_mapping: dict, map 'host', 'port', 'database', 'username', 'password'
to custom alias, for example ``{'host': 'h', 'port': 'p', 'database': 'db', 'username': 'user', 'password': 'pwd'}``. This params are used to adapt any json data.
:rtype:
:return:
Example:
Your json file::
{
"credentials": {
"db1": {
"h": "example.com",
"p": 1234,
"db": "test",
"user": "admin",
"pwd": "admin",
},
"db2": {
...
}
}
}
Usage::
cred = Credential.from_json(
"path-to-json-file", "credentials.db1",
dict(host="h", port="p", database="db", username="user", password="pwd")
)
"""
cls._validate_key_mapping(key_mapping)
with open(json_file, "rb") as f:
data = json.loads(f.read().decode("utf-8"))
return cls._from_json_data(data, json_path, key_mapping) | python | def from_json(cls, json_file, json_path=None, key_mapping=None):
"""
Load connection credential from json file.
:param json_file: str, path to json file
:param json_path: str, dot notation of the path to the credential dict.
:param key_mapping: dict, map 'host', 'port', 'database', 'username', 'password'
to custom alias, for example ``{'host': 'h', 'port': 'p', 'database': 'db', 'username': 'user', 'password': 'pwd'}``. This params are used to adapt any json data.
:rtype:
:return:
Example:
Your json file::
{
"credentials": {
"db1": {
"h": "example.com",
"p": 1234,
"db": "test",
"user": "admin",
"pwd": "admin",
},
"db2": {
...
}
}
}
Usage::
cred = Credential.from_json(
"path-to-json-file", "credentials.db1",
dict(host="h", port="p", database="db", username="user", password="pwd")
)
"""
cls._validate_key_mapping(key_mapping)
with open(json_file, "rb") as f:
data = json.loads(f.read().decode("utf-8"))
return cls._from_json_data(data, json_path, key_mapping) | [
"def",
"from_json",
"(",
"cls",
",",
"json_file",
",",
"json_path",
"=",
"None",
",",
"key_mapping",
"=",
"None",
")",
":",
"cls",
".",
"_validate_key_mapping",
"(",
"key_mapping",
")",
"with",
"open",
"(",
"json_file",
",",
"\"rb\"",
")",
"as",
"f",
":",
"data",
"=",
"json",
".",
"loads",
"(",
"f",
".",
"read",
"(",
")",
".",
"decode",
"(",
"\"utf-8\"",
")",
")",
"return",
"cls",
".",
"_from_json_data",
"(",
"data",
",",
"json_path",
",",
"key_mapping",
")"
]
| Load connection credential from json file.
:param json_file: str, path to json file
:param json_path: str, dot notation of the path to the credential dict.
:param key_mapping: dict, map 'host', 'port', 'database', 'username', 'password'
to custom alias, for example ``{'host': 'h', 'port': 'p', 'database': 'db', 'username': 'user', 'password': 'pwd'}``. This params are used to adapt any json data.
:rtype:
:return:
Example:
Your json file::
{
"credentials": {
"db1": {
"h": "example.com",
"p": 1234,
"db": "test",
"user": "admin",
"pwd": "admin",
},
"db2": {
...
}
}
}
Usage::
cred = Credential.from_json(
"path-to-json-file", "credentials.db1",
dict(host="h", port="p", database="db", username="user", password="pwd")
) | [
"Load",
"connection",
"credential",
"from",
"json",
"file",
"."
]
| 946754744c8870f083fd7b4339fca15d1d6128b2 | https://github.com/MacHu-GWU/sqlalchemy_mate-project/blob/946754744c8870f083fd7b4339fca15d1d6128b2/sqlalchemy_mate/credential.py#L86-L127 | train |
MacHu-GWU/sqlalchemy_mate-project | sqlalchemy_mate/credential.py | Credential.from_s3_json | def from_s3_json(cls, bucket_name, key,
json_path=None, key_mapping=None,
aws_profile=None,
aws_access_key_id=None,
aws_secret_access_key=None,
region_name=None): # pragma: no cover
"""
Load database credential from json on s3.
:param bucket_name: str
:param key: str
:param aws_profile: if None, assume that you are using this from
AWS cloud. (service on the same cloud doesn't need profile name)
:param aws_access_key_id: str, not recommend to use
:param aws_secret_access_key: str, not recommend to use
:param region_name: str
"""
import boto3
ses = boto3.Session(
aws_access_key_id=aws_access_key_id,
aws_secret_access_key=aws_secret_access_key,
region_name=region_name,
profile_name=aws_profile,
)
s3 = ses.resource("s3")
bucket = s3.Bucket(bucket_name)
object = bucket.Object(key)
data = json.loads(object.get()["Body"].read().decode("utf-8"))
return cls._from_json_data(data, json_path, key_mapping) | python | def from_s3_json(cls, bucket_name, key,
json_path=None, key_mapping=None,
aws_profile=None,
aws_access_key_id=None,
aws_secret_access_key=None,
region_name=None): # pragma: no cover
"""
Load database credential from json on s3.
:param bucket_name: str
:param key: str
:param aws_profile: if None, assume that you are using this from
AWS cloud. (service on the same cloud doesn't need profile name)
:param aws_access_key_id: str, not recommend to use
:param aws_secret_access_key: str, not recommend to use
:param region_name: str
"""
import boto3
ses = boto3.Session(
aws_access_key_id=aws_access_key_id,
aws_secret_access_key=aws_secret_access_key,
region_name=region_name,
profile_name=aws_profile,
)
s3 = ses.resource("s3")
bucket = s3.Bucket(bucket_name)
object = bucket.Object(key)
data = json.loads(object.get()["Body"].read().decode("utf-8"))
return cls._from_json_data(data, json_path, key_mapping) | [
"def",
"from_s3_json",
"(",
"cls",
",",
"bucket_name",
",",
"key",
",",
"json_path",
"=",
"None",
",",
"key_mapping",
"=",
"None",
",",
"aws_profile",
"=",
"None",
",",
"aws_access_key_id",
"=",
"None",
",",
"aws_secret_access_key",
"=",
"None",
",",
"region_name",
"=",
"None",
")",
":",
"# pragma: no cover",
"import",
"boto3",
"ses",
"=",
"boto3",
".",
"Session",
"(",
"aws_access_key_id",
"=",
"aws_access_key_id",
",",
"aws_secret_access_key",
"=",
"aws_secret_access_key",
",",
"region_name",
"=",
"region_name",
",",
"profile_name",
"=",
"aws_profile",
",",
")",
"s3",
"=",
"ses",
".",
"resource",
"(",
"\"s3\"",
")",
"bucket",
"=",
"s3",
".",
"Bucket",
"(",
"bucket_name",
")",
"object",
"=",
"bucket",
".",
"Object",
"(",
"key",
")",
"data",
"=",
"json",
".",
"loads",
"(",
"object",
".",
"get",
"(",
")",
"[",
"\"Body\"",
"]",
".",
"read",
"(",
")",
".",
"decode",
"(",
"\"utf-8\"",
")",
")",
"return",
"cls",
".",
"_from_json_data",
"(",
"data",
",",
"json_path",
",",
"key_mapping",
")"
]
| Load database credential from json on s3.
:param bucket_name: str
:param key: str
:param aws_profile: if None, assume that you are using this from
AWS cloud. (service on the same cloud doesn't need profile name)
:param aws_access_key_id: str, not recommend to use
:param aws_secret_access_key: str, not recommend to use
:param region_name: str | [
"Load",
"database",
"credential",
"from",
"json",
"on",
"s3",
"."
]
| 946754744c8870f083fd7b4339fca15d1d6128b2 | https://github.com/MacHu-GWU/sqlalchemy_mate-project/blob/946754744c8870f083fd7b4339fca15d1d6128b2/sqlalchemy_mate/credential.py#L159-L188 | train |
MacHu-GWU/sqlalchemy_mate-project | sqlalchemy_mate/credential.py | Credential.from_env | def from_env(cls, prefix, kms_decrypt=False, aws_profile=None):
"""
Load database credential from env variable.
- host: ENV.{PREFIX}_HOST
- port: ENV.{PREFIX}_PORT
- database: ENV.{PREFIX}_DATABASE
- username: ENV.{PREFIX}_USERNAME
- password: ENV.{PREFIX}_PASSWORD
:param prefix: str
:param kms_decrypt: bool
:param aws_profile: str
"""
if len(prefix) < 1:
raise ValueError("prefix can't be empty")
if len(set(prefix).difference(set(string.ascii_uppercase + "_"))):
raise ValueError("prefix can only use [A-Z] and '_'!")
if not prefix.endswith("_"):
prefix = prefix + "_"
data = dict(
host=os.getenv(prefix + "HOST"),
port=os.getenv(prefix + "PORT"),
database=os.getenv(prefix + "DATABASE"),
username=os.getenv(prefix + "USERNAME"),
password=os.getenv(prefix + "PASSWORD"),
)
if kms_decrypt is True: # pragma: no cover
import boto3
from base64 import b64decode
if aws_profile is not None:
kms = boto3.client("kms")
else:
ses = boto3.Session(profile_name=aws_profile)
kms = ses.client("kms")
def decrypt(kms, text):
return kms.decrypt(
CiphertextBlob=b64decode(text.encode("utf-8"))
)["Plaintext"].decode("utf-8")
data = {
key: value if value is None else decrypt(kms, str(value))
for key, value in data.items()
}
return cls(**data) | python | def from_env(cls, prefix, kms_decrypt=False, aws_profile=None):
"""
Load database credential from env variable.
- host: ENV.{PREFIX}_HOST
- port: ENV.{PREFIX}_PORT
- database: ENV.{PREFIX}_DATABASE
- username: ENV.{PREFIX}_USERNAME
- password: ENV.{PREFIX}_PASSWORD
:param prefix: str
:param kms_decrypt: bool
:param aws_profile: str
"""
if len(prefix) < 1:
raise ValueError("prefix can't be empty")
if len(set(prefix).difference(set(string.ascii_uppercase + "_"))):
raise ValueError("prefix can only use [A-Z] and '_'!")
if not prefix.endswith("_"):
prefix = prefix + "_"
data = dict(
host=os.getenv(prefix + "HOST"),
port=os.getenv(prefix + "PORT"),
database=os.getenv(prefix + "DATABASE"),
username=os.getenv(prefix + "USERNAME"),
password=os.getenv(prefix + "PASSWORD"),
)
if kms_decrypt is True: # pragma: no cover
import boto3
from base64 import b64decode
if aws_profile is not None:
kms = boto3.client("kms")
else:
ses = boto3.Session(profile_name=aws_profile)
kms = ses.client("kms")
def decrypt(kms, text):
return kms.decrypt(
CiphertextBlob=b64decode(text.encode("utf-8"))
)["Plaintext"].decode("utf-8")
data = {
key: value if value is None else decrypt(kms, str(value))
for key, value in data.items()
}
return cls(**data) | [
"def",
"from_env",
"(",
"cls",
",",
"prefix",
",",
"kms_decrypt",
"=",
"False",
",",
"aws_profile",
"=",
"None",
")",
":",
"if",
"len",
"(",
"prefix",
")",
"<",
"1",
":",
"raise",
"ValueError",
"(",
"\"prefix can't be empty\"",
")",
"if",
"len",
"(",
"set",
"(",
"prefix",
")",
".",
"difference",
"(",
"set",
"(",
"string",
".",
"ascii_uppercase",
"+",
"\"_\"",
")",
")",
")",
":",
"raise",
"ValueError",
"(",
"\"prefix can only use [A-Z] and '_'!\"",
")",
"if",
"not",
"prefix",
".",
"endswith",
"(",
"\"_\"",
")",
":",
"prefix",
"=",
"prefix",
"+",
"\"_\"",
"data",
"=",
"dict",
"(",
"host",
"=",
"os",
".",
"getenv",
"(",
"prefix",
"+",
"\"HOST\"",
")",
",",
"port",
"=",
"os",
".",
"getenv",
"(",
"prefix",
"+",
"\"PORT\"",
")",
",",
"database",
"=",
"os",
".",
"getenv",
"(",
"prefix",
"+",
"\"DATABASE\"",
")",
",",
"username",
"=",
"os",
".",
"getenv",
"(",
"prefix",
"+",
"\"USERNAME\"",
")",
",",
"password",
"=",
"os",
".",
"getenv",
"(",
"prefix",
"+",
"\"PASSWORD\"",
")",
",",
")",
"if",
"kms_decrypt",
"is",
"True",
":",
"# pragma: no cover",
"import",
"boto3",
"from",
"base64",
"import",
"b64decode",
"if",
"aws_profile",
"is",
"not",
"None",
":",
"kms",
"=",
"boto3",
".",
"client",
"(",
"\"kms\"",
")",
"else",
":",
"ses",
"=",
"boto3",
".",
"Session",
"(",
"profile_name",
"=",
"aws_profile",
")",
"kms",
"=",
"ses",
".",
"client",
"(",
"\"kms\"",
")",
"def",
"decrypt",
"(",
"kms",
",",
"text",
")",
":",
"return",
"kms",
".",
"decrypt",
"(",
"CiphertextBlob",
"=",
"b64decode",
"(",
"text",
".",
"encode",
"(",
"\"utf-8\"",
")",
")",
")",
"[",
"\"Plaintext\"",
"]",
".",
"decode",
"(",
"\"utf-8\"",
")",
"data",
"=",
"{",
"key",
":",
"value",
"if",
"value",
"is",
"None",
"else",
"decrypt",
"(",
"kms",
",",
"str",
"(",
"value",
")",
")",
"for",
"key",
",",
"value",
"in",
"data",
".",
"items",
"(",
")",
"}",
"return",
"cls",
"(",
"*",
"*",
"data",
")"
]
| Load database credential from env variable.
- host: ENV.{PREFIX}_HOST
- port: ENV.{PREFIX}_PORT
- database: ENV.{PREFIX}_DATABASE
- username: ENV.{PREFIX}_USERNAME
- password: ENV.{PREFIX}_PASSWORD
:param prefix: str
:param kms_decrypt: bool
:param aws_profile: str | [
"Load",
"database",
"credential",
"from",
"env",
"variable",
"."
]
| 946754744c8870f083fd7b4339fca15d1d6128b2 | https://github.com/MacHu-GWU/sqlalchemy_mate-project/blob/946754744c8870f083fd7b4339fca15d1d6128b2/sqlalchemy_mate/credential.py#L191-L241 | train |
MacHu-GWU/sqlalchemy_mate-project | sqlalchemy_mate/credential.py | Credential.to_dict | def to_dict(self):
"""
Convert credentials into a dict.
"""
return dict(
host=self.host,
port=self.port,
database=self.database,
username=self.username,
password=self.password,
) | python | def to_dict(self):
"""
Convert credentials into a dict.
"""
return dict(
host=self.host,
port=self.port,
database=self.database,
username=self.username,
password=self.password,
) | [
"def",
"to_dict",
"(",
"self",
")",
":",
"return",
"dict",
"(",
"host",
"=",
"self",
".",
"host",
",",
"port",
"=",
"self",
".",
"port",
",",
"database",
"=",
"self",
".",
"database",
",",
"username",
"=",
"self",
".",
"username",
",",
"password",
"=",
"self",
".",
"password",
",",
")"
]
| Convert credentials into a dict. | [
"Convert",
"credentials",
"into",
"a",
"dict",
"."
]
| 946754744c8870f083fd7b4339fca15d1d6128b2 | https://github.com/MacHu-GWU/sqlalchemy_mate-project/blob/946754744c8870f083fd7b4339fca15d1d6128b2/sqlalchemy_mate/credential.py#L243-L253 | train |
gregoil/ipdbugger | ipdbugger/__init__.py | start_debugging | def start_debugging():
"""Start a debugging session after catching an exception.
This prints the traceback and start ipdb session in the frame of the error.
"""
exc_type, exc_value, exc_tb = sys.exc_info()
# If the exception has been annotated to be re-raised, raise the exception
if hasattr(exc_value, '_ipdbugger_let_raise'):
raise_(*sys.exc_info())
print()
for line in traceback.format_exception(exc_type, exc_value, exc_tb):
print(colored(line, 'red'), end=' ')
# Get the frame with the error.
test_frame = sys._getframe(-1).f_back
from ipdb.__main__ import wrap_sys_excepthook
wrap_sys_excepthook()
IPDBugger(exc_info=sys.exc_info()).set_trace(test_frame) | python | def start_debugging():
"""Start a debugging session after catching an exception.
This prints the traceback and start ipdb session in the frame of the error.
"""
exc_type, exc_value, exc_tb = sys.exc_info()
# If the exception has been annotated to be re-raised, raise the exception
if hasattr(exc_value, '_ipdbugger_let_raise'):
raise_(*sys.exc_info())
print()
for line in traceback.format_exception(exc_type, exc_value, exc_tb):
print(colored(line, 'red'), end=' ')
# Get the frame with the error.
test_frame = sys._getframe(-1).f_back
from ipdb.__main__ import wrap_sys_excepthook
wrap_sys_excepthook()
IPDBugger(exc_info=sys.exc_info()).set_trace(test_frame) | [
"def",
"start_debugging",
"(",
")",
":",
"exc_type",
",",
"exc_value",
",",
"exc_tb",
"=",
"sys",
".",
"exc_info",
"(",
")",
"# If the exception has been annotated to be re-raised, raise the exception",
"if",
"hasattr",
"(",
"exc_value",
",",
"'_ipdbugger_let_raise'",
")",
":",
"raise_",
"(",
"*",
"sys",
".",
"exc_info",
"(",
")",
")",
"print",
"(",
")",
"for",
"line",
"in",
"traceback",
".",
"format_exception",
"(",
"exc_type",
",",
"exc_value",
",",
"exc_tb",
")",
":",
"print",
"(",
"colored",
"(",
"line",
",",
"'red'",
")",
",",
"end",
"=",
"' '",
")",
"# Get the frame with the error.",
"test_frame",
"=",
"sys",
".",
"_getframe",
"(",
"-",
"1",
")",
".",
"f_back",
"from",
"ipdb",
".",
"__main__",
"import",
"wrap_sys_excepthook",
"wrap_sys_excepthook",
"(",
")",
"IPDBugger",
"(",
"exc_info",
"=",
"sys",
".",
"exc_info",
"(",
")",
")",
".",
"set_trace",
"(",
"test_frame",
")"
]
| Start a debugging session after catching an exception.
This prints the traceback and start ipdb session in the frame of the error. | [
"Start",
"a",
"debugging",
"session",
"after",
"catching",
"an",
"exception",
"."
]
| 9575734ec26f6be86ae263496d50eb60bb988b21 | https://github.com/gregoil/ipdbugger/blob/9575734ec26f6be86ae263496d50eb60bb988b21/ipdbugger/__init__.py#L85-L105 | train |
gregoil/ipdbugger | ipdbugger/__init__.py | get_last_lineno | def get_last_lineno(node):
"""Recursively find the last line number of the ast node."""
max_lineno = 0
if hasattr(node, "lineno"):
max_lineno = node.lineno
for _, field in ast.iter_fields(node):
if isinstance(field, list):
for value in field:
if isinstance(value, ast.AST):
max_lineno = max(max_lineno, get_last_lineno(value))
elif isinstance(field, ast.AST):
max_lineno = max(max_lineno, get_last_lineno(field))
return max_lineno | python | def get_last_lineno(node):
"""Recursively find the last line number of the ast node."""
max_lineno = 0
if hasattr(node, "lineno"):
max_lineno = node.lineno
for _, field in ast.iter_fields(node):
if isinstance(field, list):
for value in field:
if isinstance(value, ast.AST):
max_lineno = max(max_lineno, get_last_lineno(value))
elif isinstance(field, ast.AST):
max_lineno = max(max_lineno, get_last_lineno(field))
return max_lineno | [
"def",
"get_last_lineno",
"(",
"node",
")",
":",
"max_lineno",
"=",
"0",
"if",
"hasattr",
"(",
"node",
",",
"\"lineno\"",
")",
":",
"max_lineno",
"=",
"node",
".",
"lineno",
"for",
"_",
",",
"field",
"in",
"ast",
".",
"iter_fields",
"(",
"node",
")",
":",
"if",
"isinstance",
"(",
"field",
",",
"list",
")",
":",
"for",
"value",
"in",
"field",
":",
"if",
"isinstance",
"(",
"value",
",",
"ast",
".",
"AST",
")",
":",
"max_lineno",
"=",
"max",
"(",
"max_lineno",
",",
"get_last_lineno",
"(",
"value",
")",
")",
"elif",
"isinstance",
"(",
"field",
",",
"ast",
".",
"AST",
")",
":",
"max_lineno",
"=",
"max",
"(",
"max_lineno",
",",
"get_last_lineno",
"(",
"field",
")",
")",
"return",
"max_lineno"
]
| Recursively find the last line number of the ast node. | [
"Recursively",
"find",
"the",
"last",
"line",
"number",
"of",
"the",
"ast",
"node",
"."
]
| 9575734ec26f6be86ae263496d50eb60bb988b21 | https://github.com/gregoil/ipdbugger/blob/9575734ec26f6be86ae263496d50eb60bb988b21/ipdbugger/__init__.py#L271-L287 | train |
gregoil/ipdbugger | ipdbugger/__init__.py | IPDBugger.do_raise | def do_raise(self, arg):
"""Raise the last exception caught."""
self.do_continue(arg)
# Annotating the exception for a continual re-raise
_, exc_value, _ = self.exc_info
exc_value._ipdbugger_let_raise = True
raise_(*self.exc_info) | python | def do_raise(self, arg):
"""Raise the last exception caught."""
self.do_continue(arg)
# Annotating the exception for a continual re-raise
_, exc_value, _ = self.exc_info
exc_value._ipdbugger_let_raise = True
raise_(*self.exc_info) | [
"def",
"do_raise",
"(",
"self",
",",
"arg",
")",
":",
"self",
".",
"do_continue",
"(",
"arg",
")",
"# Annotating the exception for a continual re-raise",
"_",
",",
"exc_value",
",",
"_",
"=",
"self",
".",
"exc_info",
"exc_value",
".",
"_ipdbugger_let_raise",
"=",
"True",
"raise_",
"(",
"*",
"self",
".",
"exc_info",
")"
]
| Raise the last exception caught. | [
"Raise",
"the",
"last",
"exception",
"caught",
"."
]
| 9575734ec26f6be86ae263496d50eb60bb988b21 | https://github.com/gregoil/ipdbugger/blob/9575734ec26f6be86ae263496d50eb60bb988b21/ipdbugger/__init__.py#L48-L56 | train |
gregoil/ipdbugger | ipdbugger/__init__.py | IPDBugger.do_retry | def do_retry(self, arg):
"""Rerun the previous command."""
prev_line = self.curframe.f_lineno - 1
# Make sure not to jump to the middle of the previous statement
while True:
try:
self.curframe.f_lineno = prev_line
break
except ValueError:
prev_line -= 1
self.do_jump(prev_line)
self.do_continue(arg)
return 1 | python | def do_retry(self, arg):
"""Rerun the previous command."""
prev_line = self.curframe.f_lineno - 1
# Make sure not to jump to the middle of the previous statement
while True:
try:
self.curframe.f_lineno = prev_line
break
except ValueError:
prev_line -= 1
self.do_jump(prev_line)
self.do_continue(arg)
return 1 | [
"def",
"do_retry",
"(",
"self",
",",
"arg",
")",
":",
"prev_line",
"=",
"self",
".",
"curframe",
".",
"f_lineno",
"-",
"1",
"# Make sure not to jump to the middle of the previous statement",
"while",
"True",
":",
"try",
":",
"self",
".",
"curframe",
".",
"f_lineno",
"=",
"prev_line",
"break",
"except",
"ValueError",
":",
"prev_line",
"-=",
"1",
"self",
".",
"do_jump",
"(",
"prev_line",
")",
"self",
".",
"do_continue",
"(",
"arg",
")",
"return",
"1"
]
| Rerun the previous command. | [
"Rerun",
"the",
"previous",
"command",
"."
]
| 9575734ec26f6be86ae263496d50eb60bb988b21 | https://github.com/gregoil/ipdbugger/blob/9575734ec26f6be86ae263496d50eb60bb988b21/ipdbugger/__init__.py#L58-L72 | train |
gregoil/ipdbugger | ipdbugger/__init__.py | IPDBugger.dispatch_line | def dispatch_line(self, frame):
"""Handle line action and return the next line callback."""
callback = TerminalPdb.dispatch_line(self, frame)
# If the ipdb session ended, don't return a callback for the next line
if self.stoplineno == -1:
return None
return callback | python | def dispatch_line(self, frame):
"""Handle line action and return the next line callback."""
callback = TerminalPdb.dispatch_line(self, frame)
# If the ipdb session ended, don't return a callback for the next line
if self.stoplineno == -1:
return None
return callback | [
"def",
"dispatch_line",
"(",
"self",
",",
"frame",
")",
":",
"callback",
"=",
"TerminalPdb",
".",
"dispatch_line",
"(",
"self",
",",
"frame",
")",
"# If the ipdb session ended, don't return a callback for the next line",
"if",
"self",
".",
"stoplineno",
"==",
"-",
"1",
":",
"return",
"None",
"return",
"callback"
]
| Handle line action and return the next line callback. | [
"Handle",
"line",
"action",
"and",
"return",
"the",
"next",
"line",
"callback",
"."
]
| 9575734ec26f6be86ae263496d50eb60bb988b21 | https://github.com/gregoil/ipdbugger/blob/9575734ec26f6be86ae263496d50eb60bb988b21/ipdbugger/__init__.py#L74-L82 | train |
gregoil/ipdbugger | ipdbugger/__init__.py | ErrorsCatchTransformer.wrap_with_try | def wrap_with_try(self, node):
"""Wrap an ast node in a 'try' node to enter debug on exception."""
handlers = []
if self.ignore_exceptions is None:
handlers.append(ast.ExceptHandler(type=None,
name=None,
body=[ast.Raise()]))
else:
ignores_nodes = self.ignore_exceptions
handlers.append(ast.ExceptHandler(type=ast.Tuple(ignores_nodes,
ast.Load()),
name=None,
body=[ast.Raise()]))
if self.catch_exception is None or \
get_node_value(self.catch_exception) not in \
(get_node_value(ast_node)
for ast_node in self.ignore_exceptions):
call_extra_parameters = [] if IS_PYTHON_3 else [None, None]
start_debug_cmd = ast.Expr(
value=ast.Call(ast.Name("start_debugging", ast.Load()),
[], [], *call_extra_parameters))
catch_exception_type = None
if self.catch_exception is not None:
catch_exception_type = self.catch_exception
handlers.append(ast.ExceptHandler(type=catch_exception_type,
name=None,
body=[start_debug_cmd]))
try_except_extra_params = {"finalbody": []} if IS_PYTHON_3 else {}
new_node = self.ast_try_except(orelse=[], body=[node],
handlers=handlers,
**try_except_extra_params)
return ast.copy_location(new_node, node) | python | def wrap_with_try(self, node):
"""Wrap an ast node in a 'try' node to enter debug on exception."""
handlers = []
if self.ignore_exceptions is None:
handlers.append(ast.ExceptHandler(type=None,
name=None,
body=[ast.Raise()]))
else:
ignores_nodes = self.ignore_exceptions
handlers.append(ast.ExceptHandler(type=ast.Tuple(ignores_nodes,
ast.Load()),
name=None,
body=[ast.Raise()]))
if self.catch_exception is None or \
get_node_value(self.catch_exception) not in \
(get_node_value(ast_node)
for ast_node in self.ignore_exceptions):
call_extra_parameters = [] if IS_PYTHON_3 else [None, None]
start_debug_cmd = ast.Expr(
value=ast.Call(ast.Name("start_debugging", ast.Load()),
[], [], *call_extra_parameters))
catch_exception_type = None
if self.catch_exception is not None:
catch_exception_type = self.catch_exception
handlers.append(ast.ExceptHandler(type=catch_exception_type,
name=None,
body=[start_debug_cmd]))
try_except_extra_params = {"finalbody": []} if IS_PYTHON_3 else {}
new_node = self.ast_try_except(orelse=[], body=[node],
handlers=handlers,
**try_except_extra_params)
return ast.copy_location(new_node, node) | [
"def",
"wrap_with_try",
"(",
"self",
",",
"node",
")",
":",
"handlers",
"=",
"[",
"]",
"if",
"self",
".",
"ignore_exceptions",
"is",
"None",
":",
"handlers",
".",
"append",
"(",
"ast",
".",
"ExceptHandler",
"(",
"type",
"=",
"None",
",",
"name",
"=",
"None",
",",
"body",
"=",
"[",
"ast",
".",
"Raise",
"(",
")",
"]",
")",
")",
"else",
":",
"ignores_nodes",
"=",
"self",
".",
"ignore_exceptions",
"handlers",
".",
"append",
"(",
"ast",
".",
"ExceptHandler",
"(",
"type",
"=",
"ast",
".",
"Tuple",
"(",
"ignores_nodes",
",",
"ast",
".",
"Load",
"(",
")",
")",
",",
"name",
"=",
"None",
",",
"body",
"=",
"[",
"ast",
".",
"Raise",
"(",
")",
"]",
")",
")",
"if",
"self",
".",
"catch_exception",
"is",
"None",
"or",
"get_node_value",
"(",
"self",
".",
"catch_exception",
")",
"not",
"in",
"(",
"get_node_value",
"(",
"ast_node",
")",
"for",
"ast_node",
"in",
"self",
".",
"ignore_exceptions",
")",
":",
"call_extra_parameters",
"=",
"[",
"]",
"if",
"IS_PYTHON_3",
"else",
"[",
"None",
",",
"None",
"]",
"start_debug_cmd",
"=",
"ast",
".",
"Expr",
"(",
"value",
"=",
"ast",
".",
"Call",
"(",
"ast",
".",
"Name",
"(",
"\"start_debugging\"",
",",
"ast",
".",
"Load",
"(",
")",
")",
",",
"[",
"]",
",",
"[",
"]",
",",
"*",
"call_extra_parameters",
")",
")",
"catch_exception_type",
"=",
"None",
"if",
"self",
".",
"catch_exception",
"is",
"not",
"None",
":",
"catch_exception_type",
"=",
"self",
".",
"catch_exception",
"handlers",
".",
"append",
"(",
"ast",
".",
"ExceptHandler",
"(",
"type",
"=",
"catch_exception_type",
",",
"name",
"=",
"None",
",",
"body",
"=",
"[",
"start_debug_cmd",
"]",
")",
")",
"try_except_extra_params",
"=",
"{",
"\"finalbody\"",
":",
"[",
"]",
"}",
"if",
"IS_PYTHON_3",
"else",
"{",
"}",
"new_node",
"=",
"self",
".",
"ast_try_except",
"(",
"orelse",
"=",
"[",
"]",
",",
"body",
"=",
"[",
"node",
"]",
",",
"handlers",
"=",
"handlers",
",",
"*",
"*",
"try_except_extra_params",
")",
"return",
"ast",
".",
"copy_location",
"(",
"new_node",
",",
"node",
")"
]
| Wrap an ast node in a 'try' node to enter debug on exception. | [
"Wrap",
"an",
"ast",
"node",
"in",
"a",
"try",
"node",
"to",
"enter",
"debug",
"on",
"exception",
"."
]
| 9575734ec26f6be86ae263496d50eb60bb988b21 | https://github.com/gregoil/ipdbugger/blob/9575734ec26f6be86ae263496d50eb60bb988b21/ipdbugger/__init__.py#L134-L175 | train |
gregoil/ipdbugger | ipdbugger/__init__.py | ErrorsCatchTransformer.try_except_handler | def try_except_handler(self, node):
"""Handler for try except statement to ignore excepted exceptions."""
# List all excepted exception's names
excepted_types = []
for handler in node.handlers:
if handler.type is None:
excepted_types = None
break
if isinstance(handler.type, ast.Tuple):
excepted_types.extend([exception_type for exception_type
in handler.type.elts])
else:
excepted_types.append(handler.type)
new_exception_list = self.ignore_exceptions
if self.ignore_exceptions is not None:
if excepted_types is None:
new_exception_list = None
else:
new_exception_list = list(set(excepted_types +
self.ignore_exceptions))
# Set the new ignore list, and save the old one
old_exception_handlers, self.ignore_exceptions = \
self.ignore_exceptions, new_exception_list
# Run recursively on all sub nodes with the new ignore list
node.body = [self.visit(node_item) for node_item in node.body]
# Revert changes from ignore list
self.ignore_exceptions = old_exception_handlers | python | def try_except_handler(self, node):
"""Handler for try except statement to ignore excepted exceptions."""
# List all excepted exception's names
excepted_types = []
for handler in node.handlers:
if handler.type is None:
excepted_types = None
break
if isinstance(handler.type, ast.Tuple):
excepted_types.extend([exception_type for exception_type
in handler.type.elts])
else:
excepted_types.append(handler.type)
new_exception_list = self.ignore_exceptions
if self.ignore_exceptions is not None:
if excepted_types is None:
new_exception_list = None
else:
new_exception_list = list(set(excepted_types +
self.ignore_exceptions))
# Set the new ignore list, and save the old one
old_exception_handlers, self.ignore_exceptions = \
self.ignore_exceptions, new_exception_list
# Run recursively on all sub nodes with the new ignore list
node.body = [self.visit(node_item) for node_item in node.body]
# Revert changes from ignore list
self.ignore_exceptions = old_exception_handlers | [
"def",
"try_except_handler",
"(",
"self",
",",
"node",
")",
":",
"# List all excepted exception's names",
"excepted_types",
"=",
"[",
"]",
"for",
"handler",
"in",
"node",
".",
"handlers",
":",
"if",
"handler",
".",
"type",
"is",
"None",
":",
"excepted_types",
"=",
"None",
"break",
"if",
"isinstance",
"(",
"handler",
".",
"type",
",",
"ast",
".",
"Tuple",
")",
":",
"excepted_types",
".",
"extend",
"(",
"[",
"exception_type",
"for",
"exception_type",
"in",
"handler",
".",
"type",
".",
"elts",
"]",
")",
"else",
":",
"excepted_types",
".",
"append",
"(",
"handler",
".",
"type",
")",
"new_exception_list",
"=",
"self",
".",
"ignore_exceptions",
"if",
"self",
".",
"ignore_exceptions",
"is",
"not",
"None",
":",
"if",
"excepted_types",
"is",
"None",
":",
"new_exception_list",
"=",
"None",
"else",
":",
"new_exception_list",
"=",
"list",
"(",
"set",
"(",
"excepted_types",
"+",
"self",
".",
"ignore_exceptions",
")",
")",
"# Set the new ignore list, and save the old one",
"old_exception_handlers",
",",
"self",
".",
"ignore_exceptions",
"=",
"self",
".",
"ignore_exceptions",
",",
"new_exception_list",
"# Run recursively on all sub nodes with the new ignore list",
"node",
".",
"body",
"=",
"[",
"self",
".",
"visit",
"(",
"node_item",
")",
"for",
"node_item",
"in",
"node",
".",
"body",
"]",
"# Revert changes from ignore list",
"self",
".",
"ignore_exceptions",
"=",
"old_exception_handlers"
]
| Handler for try except statement to ignore excepted exceptions. | [
"Handler",
"for",
"try",
"except",
"statement",
"to",
"ignore",
"excepted",
"exceptions",
"."
]
| 9575734ec26f6be86ae263496d50eb60bb988b21 | https://github.com/gregoil/ipdbugger/blob/9575734ec26f6be86ae263496d50eb60bb988b21/ipdbugger/__init__.py#L177-L210 | train |
gregoil/ipdbugger | ipdbugger/__init__.py | ErrorsCatchTransformer.visit_Call | def visit_Call(self, node):
"""Propagate 'debug' wrapper into inner function calls if needed.
Args:
node (ast.AST): node statement to surround.
"""
if self.depth == 0:
return node
if self.ignore_exceptions is None:
ignore_exceptions = ast.Name("None", ast.Load())
else:
ignore_exceptions = ast.List(self.ignore_exceptions, ast.Load())
catch_exception_type = self.catch_exception \
if self.catch_exception else "None"
catch_exception = ast.Name(catch_exception_type, ast.Load())
depth = ast.Num(self.depth - 1 if self.depth > 0 else -1)
debug_node_name = ast.Name("debug", ast.Load())
call_extra_parameters = [] if IS_PYTHON_3 else [None, None]
node.func = ast.Call(debug_node_name,
[node.func, ignore_exceptions,
catch_exception, depth],
[], *call_extra_parameters)
return node | python | def visit_Call(self, node):
"""Propagate 'debug' wrapper into inner function calls if needed.
Args:
node (ast.AST): node statement to surround.
"""
if self.depth == 0:
return node
if self.ignore_exceptions is None:
ignore_exceptions = ast.Name("None", ast.Load())
else:
ignore_exceptions = ast.List(self.ignore_exceptions, ast.Load())
catch_exception_type = self.catch_exception \
if self.catch_exception else "None"
catch_exception = ast.Name(catch_exception_type, ast.Load())
depth = ast.Num(self.depth - 1 if self.depth > 0 else -1)
debug_node_name = ast.Name("debug", ast.Load())
call_extra_parameters = [] if IS_PYTHON_3 else [None, None]
node.func = ast.Call(debug_node_name,
[node.func, ignore_exceptions,
catch_exception, depth],
[], *call_extra_parameters)
return node | [
"def",
"visit_Call",
"(",
"self",
",",
"node",
")",
":",
"if",
"self",
".",
"depth",
"==",
"0",
":",
"return",
"node",
"if",
"self",
".",
"ignore_exceptions",
"is",
"None",
":",
"ignore_exceptions",
"=",
"ast",
".",
"Name",
"(",
"\"None\"",
",",
"ast",
".",
"Load",
"(",
")",
")",
"else",
":",
"ignore_exceptions",
"=",
"ast",
".",
"List",
"(",
"self",
".",
"ignore_exceptions",
",",
"ast",
".",
"Load",
"(",
")",
")",
"catch_exception_type",
"=",
"self",
".",
"catch_exception",
"if",
"self",
".",
"catch_exception",
"else",
"\"None\"",
"catch_exception",
"=",
"ast",
".",
"Name",
"(",
"catch_exception_type",
",",
"ast",
".",
"Load",
"(",
")",
")",
"depth",
"=",
"ast",
".",
"Num",
"(",
"self",
".",
"depth",
"-",
"1",
"if",
"self",
".",
"depth",
">",
"0",
"else",
"-",
"1",
")",
"debug_node_name",
"=",
"ast",
".",
"Name",
"(",
"\"debug\"",
",",
"ast",
".",
"Load",
"(",
")",
")",
"call_extra_parameters",
"=",
"[",
"]",
"if",
"IS_PYTHON_3",
"else",
"[",
"None",
",",
"None",
"]",
"node",
".",
"func",
"=",
"ast",
".",
"Call",
"(",
"debug_node_name",
",",
"[",
"node",
".",
"func",
",",
"ignore_exceptions",
",",
"catch_exception",
",",
"depth",
"]",
",",
"[",
"]",
",",
"*",
"call_extra_parameters",
")",
"return",
"node"
]
| Propagate 'debug' wrapper into inner function calls if needed.
Args:
node (ast.AST): node statement to surround. | [
"Propagate",
"debug",
"wrapper",
"into",
"inner",
"function",
"calls",
"if",
"needed",
"."
]
| 9575734ec26f6be86ae263496d50eb60bb988b21 | https://github.com/gregoil/ipdbugger/blob/9575734ec26f6be86ae263496d50eb60bb988b21/ipdbugger/__init__.py#L213-L241 | train |
PBR/MQ2 | MQ2/plugins/csv_plugin.py | get_qtls_from_rqtl_data | def get_qtls_from_rqtl_data(matrix, lod_threshold):
""" Retrieve the list of significants QTLs for the given input
matrix and using the specified LOD threshold.
This assumes one QTL per linkage group.
:arg matrix, the MapQTL file read in memory
:arg threshold, threshold used to determine if a given LOD value is
reflective the presence of a QTL.
"""
t_matrix = list(zip(*matrix))
qtls = [['Trait', 'Linkage Group', 'Position', 'Exact marker', 'LOD']]
# row 0: markers
# row 1: chr
# row 2: pos
for row in t_matrix[3:]:
lgroup = None
max_lod = None
peak = None
cnt = 1
while cnt < len(row):
if lgroup is None:
lgroup = t_matrix[1][cnt]
if lgroup == t_matrix[1][cnt]:
if max_lod is None:
max_lod = float(row[cnt])
if float(row[cnt]) > float(max_lod):
max_lod = float(row[cnt])
peak = cnt
else:
if max_lod \
and float(max_lod) > float(lod_threshold) \
and peak:
qtl = [row[0], # trait
t_matrix[1][peak], # LG
t_matrix[2][peak], # pos
t_matrix[0][peak], # marker
max_lod, # LOD value
]
qtls.append(qtl)
lgroup = None
max_lod = None
peak = cnt
cnt = cnt + 1
return qtls | python | def get_qtls_from_rqtl_data(matrix, lod_threshold):
""" Retrieve the list of significants QTLs for the given input
matrix and using the specified LOD threshold.
This assumes one QTL per linkage group.
:arg matrix, the MapQTL file read in memory
:arg threshold, threshold used to determine if a given LOD value is
reflective the presence of a QTL.
"""
t_matrix = list(zip(*matrix))
qtls = [['Trait', 'Linkage Group', 'Position', 'Exact marker', 'LOD']]
# row 0: markers
# row 1: chr
# row 2: pos
for row in t_matrix[3:]:
lgroup = None
max_lod = None
peak = None
cnt = 1
while cnt < len(row):
if lgroup is None:
lgroup = t_matrix[1][cnt]
if lgroup == t_matrix[1][cnt]:
if max_lod is None:
max_lod = float(row[cnt])
if float(row[cnt]) > float(max_lod):
max_lod = float(row[cnt])
peak = cnt
else:
if max_lod \
and float(max_lod) > float(lod_threshold) \
and peak:
qtl = [row[0], # trait
t_matrix[1][peak], # LG
t_matrix[2][peak], # pos
t_matrix[0][peak], # marker
max_lod, # LOD value
]
qtls.append(qtl)
lgroup = None
max_lod = None
peak = cnt
cnt = cnt + 1
return qtls | [
"def",
"get_qtls_from_rqtl_data",
"(",
"matrix",
",",
"lod_threshold",
")",
":",
"t_matrix",
"=",
"list",
"(",
"zip",
"(",
"*",
"matrix",
")",
")",
"qtls",
"=",
"[",
"[",
"'Trait'",
",",
"'Linkage Group'",
",",
"'Position'",
",",
"'Exact marker'",
",",
"'LOD'",
"]",
"]",
"# row 0: markers",
"# row 1: chr",
"# row 2: pos",
"for",
"row",
"in",
"t_matrix",
"[",
"3",
":",
"]",
":",
"lgroup",
"=",
"None",
"max_lod",
"=",
"None",
"peak",
"=",
"None",
"cnt",
"=",
"1",
"while",
"cnt",
"<",
"len",
"(",
"row",
")",
":",
"if",
"lgroup",
"is",
"None",
":",
"lgroup",
"=",
"t_matrix",
"[",
"1",
"]",
"[",
"cnt",
"]",
"if",
"lgroup",
"==",
"t_matrix",
"[",
"1",
"]",
"[",
"cnt",
"]",
":",
"if",
"max_lod",
"is",
"None",
":",
"max_lod",
"=",
"float",
"(",
"row",
"[",
"cnt",
"]",
")",
"if",
"float",
"(",
"row",
"[",
"cnt",
"]",
")",
">",
"float",
"(",
"max_lod",
")",
":",
"max_lod",
"=",
"float",
"(",
"row",
"[",
"cnt",
"]",
")",
"peak",
"=",
"cnt",
"else",
":",
"if",
"max_lod",
"and",
"float",
"(",
"max_lod",
")",
">",
"float",
"(",
"lod_threshold",
")",
"and",
"peak",
":",
"qtl",
"=",
"[",
"row",
"[",
"0",
"]",
",",
"# trait",
"t_matrix",
"[",
"1",
"]",
"[",
"peak",
"]",
",",
"# LG",
"t_matrix",
"[",
"2",
"]",
"[",
"peak",
"]",
",",
"# pos",
"t_matrix",
"[",
"0",
"]",
"[",
"peak",
"]",
",",
"# marker",
"max_lod",
",",
"# LOD value",
"]",
"qtls",
".",
"append",
"(",
"qtl",
")",
"lgroup",
"=",
"None",
"max_lod",
"=",
"None",
"peak",
"=",
"cnt",
"cnt",
"=",
"cnt",
"+",
"1",
"return",
"qtls"
]
| Retrieve the list of significants QTLs for the given input
matrix and using the specified LOD threshold.
This assumes one QTL per linkage group.
:arg matrix, the MapQTL file read in memory
:arg threshold, threshold used to determine if a given LOD value is
reflective the presence of a QTL. | [
"Retrieve",
"the",
"list",
"of",
"significants",
"QTLs",
"for",
"the",
"given",
"input",
"matrix",
"and",
"using",
"the",
"specified",
"LOD",
"threshold",
".",
"This",
"assumes",
"one",
"QTL",
"per",
"linkage",
"group",
"."
]
| 6d84dea47e6751333004743f588f03158e35c28d | https://github.com/PBR/MQ2/blob/6d84dea47e6751333004743f588f03158e35c28d/MQ2/plugins/csv_plugin.py#L55-L100 | train |
projectshift/shift-boiler | boiler/user/session_interface.py | BoilerSessionInterface.save_session | def save_session(self, *args, **kwargs):
"""
Save session
Skip setting session cookie if requested via g.stateless_sessions
"""
# do not send session cookie
if g.get('stateless_sessions'):
return
# send cookie
return super(BoilerSessionInterface, self).save_session(
*args,
**kwargs
) | python | def save_session(self, *args, **kwargs):
"""
Save session
Skip setting session cookie if requested via g.stateless_sessions
"""
# do not send session cookie
if g.get('stateless_sessions'):
return
# send cookie
return super(BoilerSessionInterface, self).save_session(
*args,
**kwargs
) | [
"def",
"save_session",
"(",
"self",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"# do not send session cookie",
"if",
"g",
".",
"get",
"(",
"'stateless_sessions'",
")",
":",
"return",
"# send cookie",
"return",
"super",
"(",
"BoilerSessionInterface",
",",
"self",
")",
".",
"save_session",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")"
]
| Save session
Skip setting session cookie if requested via g.stateless_sessions | [
"Save",
"session",
"Skip",
"setting",
"session",
"cookie",
"if",
"requested",
"via",
"g",
".",
"stateless_sessions"
]
| 8e6f3a3e4b9493fb6c8bd16bed160ede153bfb0b | https://github.com/projectshift/shift-boiler/blob/8e6f3a3e4b9493fb6c8bd16bed160ede153bfb0b/boiler/user/session_interface.py#L45-L59 | train |
brunato/lograptor | lograptor/channels.py | FileChannel.prune_old | def prune_old(self):
"""
Removes the directories that are older than a certain date.
"""
path = self.pubdir
dirmask = self.dirmask
expire = self.expire
expire_limit = int(time.time()) - (86400 * expire)
logger.info('Pruning directories older than %d days', expire)
if not os.path.isdir(path):
logger.warning('Dir %r not found -- skipping pruning', path)
return
for entry in os.listdir(path):
logger.debug('Found: %r', entry)
if os.path.isdir(os.path.join(path, entry)):
try:
stamp = time.mktime(time.strptime(entry, dirmask))
except ValueError as e:
logger.info('Dir %r did not match dirmask %r: %r', entry, dirmask, e)
logger.info('Skipping %r', entry)
continue
if stamp < expire_limit:
shutil.rmtree(os.path.join(path, entry))
logger.info('File Publisher: Pruned old dir: %r', entry)
else:
logger.info('%r is still active', entry)
else:
logger.info('%r is not a directory. Skipping.', entry)
logger.info('Finished with pruning') | python | def prune_old(self):
"""
Removes the directories that are older than a certain date.
"""
path = self.pubdir
dirmask = self.dirmask
expire = self.expire
expire_limit = int(time.time()) - (86400 * expire)
logger.info('Pruning directories older than %d days', expire)
if not os.path.isdir(path):
logger.warning('Dir %r not found -- skipping pruning', path)
return
for entry in os.listdir(path):
logger.debug('Found: %r', entry)
if os.path.isdir(os.path.join(path, entry)):
try:
stamp = time.mktime(time.strptime(entry, dirmask))
except ValueError as e:
logger.info('Dir %r did not match dirmask %r: %r', entry, dirmask, e)
logger.info('Skipping %r', entry)
continue
if stamp < expire_limit:
shutil.rmtree(os.path.join(path, entry))
logger.info('File Publisher: Pruned old dir: %r', entry)
else:
logger.info('%r is still active', entry)
else:
logger.info('%r is not a directory. Skipping.', entry)
logger.info('Finished with pruning') | [
"def",
"prune_old",
"(",
"self",
")",
":",
"path",
"=",
"self",
".",
"pubdir",
"dirmask",
"=",
"self",
".",
"dirmask",
"expire",
"=",
"self",
".",
"expire",
"expire_limit",
"=",
"int",
"(",
"time",
".",
"time",
"(",
")",
")",
"-",
"(",
"86400",
"*",
"expire",
")",
"logger",
".",
"info",
"(",
"'Pruning directories older than %d days'",
",",
"expire",
")",
"if",
"not",
"os",
".",
"path",
".",
"isdir",
"(",
"path",
")",
":",
"logger",
".",
"warning",
"(",
"'Dir %r not found -- skipping pruning'",
",",
"path",
")",
"return",
"for",
"entry",
"in",
"os",
".",
"listdir",
"(",
"path",
")",
":",
"logger",
".",
"debug",
"(",
"'Found: %r'",
",",
"entry",
")",
"if",
"os",
".",
"path",
".",
"isdir",
"(",
"os",
".",
"path",
".",
"join",
"(",
"path",
",",
"entry",
")",
")",
":",
"try",
":",
"stamp",
"=",
"time",
".",
"mktime",
"(",
"time",
".",
"strptime",
"(",
"entry",
",",
"dirmask",
")",
")",
"except",
"ValueError",
"as",
"e",
":",
"logger",
".",
"info",
"(",
"'Dir %r did not match dirmask %r: %r'",
",",
"entry",
",",
"dirmask",
",",
"e",
")",
"logger",
".",
"info",
"(",
"'Skipping %r'",
",",
"entry",
")",
"continue",
"if",
"stamp",
"<",
"expire_limit",
":",
"shutil",
".",
"rmtree",
"(",
"os",
".",
"path",
".",
"join",
"(",
"path",
",",
"entry",
")",
")",
"logger",
".",
"info",
"(",
"'File Publisher: Pruned old dir: %r'",
",",
"entry",
")",
"else",
":",
"logger",
".",
"info",
"(",
"'%r is still active'",
",",
"entry",
")",
"else",
":",
"logger",
".",
"info",
"(",
"'%r is not a directory. Skipping.'",
",",
"entry",
")",
"logger",
".",
"info",
"(",
"'Finished with pruning'",
")"
]
| Removes the directories that are older than a certain date. | [
"Removes",
"the",
"directories",
"that",
"are",
"older",
"than",
"a",
"certain",
"date",
"."
]
| b1f09fe1b429ed15110610092704ef12d253f3c9 | https://github.com/brunato/lograptor/blob/b1f09fe1b429ed15110610092704ef12d253f3c9/lograptor/channels.py#L512-L545 | train |
brunato/lograptor | lograptor/channels.py | FileChannel.send_report | def send_report(self, report_parts):
"""
Publish the report parts to local files. Each report part is a text
with a title and specific extension. For html and plaintext sending
the report part is unique, for csv send also the stats and unparsed
string are plain text and report items are csv texts.
"""
logger.info('Checking and creating the report directory')
report_parts = sorted(
filter(lambda x: x.fmt in self.formats, report_parts),
key=lambda x: self.formats.index(x.fmt)
)
workdir = os.path.join(self.pubdir, self.dirname)
if not os.path.isdir(workdir):
try:
os.makedirs(workdir)
except OSError as e:
logger.error('Error creating directory "{0}": {0}'.format(workdir, e))
return
fmtname = '{0}-{1}-{2}.{3}' if len(report_parts) > 1 else '{0}-{2}.{3}'
for i, text_part in enumerate(filter(lambda x: x.fmt in self.formats, report_parts)):
filename = fmtname.format(self.filename, i, socket.gethostname(), text_part.ext)
repfile = os.path.join(workdir, filename)
logger.info('Dumping the report part %d into %r', i, repfile)
fh = open(repfile, 'w')
fh.write(text_part.text)
fh.close()
print('Report part saved in: %r' % repfile)
if self.notify:
logger.info('Creating an email message')
email_address = self.config.get('main', 'email_address')
smtp_server = self.config.get('main', 'smtp_server')
publoc = os.path.join(self.pubroot, self.dirname)
eml = MIMEText('New lograptor report is available at:\r\n{0}'.format(publoc))
eml['Subject'] = '{0} system events: {1} (report notification)'.format(
socket.gethostname(), time.strftime('%c', time.localtime())
)
eml['Date'] = formatdate()
eml['From'] = email_address
eml['To'] = ', '.join(self.notify)
eml['X-Mailer'] = u'{0}-{1}'.format(package_name, __version__)
mail_message(smtp_server, eml.as_string(), email_address, self.notify)
print('Notification mailed to: {0}'.format(','.join(self.notify)))
if self.rawlogs:
logfilename = '{0}.log'.format(self.filename)
logfile = os.path.join(workdir, '{0}.gz'.format(logfilename))
logger.info('Gzipping logs and writing them to %r', logfilename)
outfh = open(logfile, 'w+b')
do_chunked_gzip(self.rawfh, outfh, logfilename)
outfh.close()
print('Gzipped logs saved in: {0}'.format(logfile))
# Purge old reports
self.prune_old() | python | def send_report(self, report_parts):
"""
Publish the report parts to local files. Each report part is a text
with a title and specific extension. For html and plaintext sending
the report part is unique, for csv send also the stats and unparsed
string are plain text and report items are csv texts.
"""
logger.info('Checking and creating the report directory')
report_parts = sorted(
filter(lambda x: x.fmt in self.formats, report_parts),
key=lambda x: self.formats.index(x.fmt)
)
workdir = os.path.join(self.pubdir, self.dirname)
if not os.path.isdir(workdir):
try:
os.makedirs(workdir)
except OSError as e:
logger.error('Error creating directory "{0}": {0}'.format(workdir, e))
return
fmtname = '{0}-{1}-{2}.{3}' if len(report_parts) > 1 else '{0}-{2}.{3}'
for i, text_part in enumerate(filter(lambda x: x.fmt in self.formats, report_parts)):
filename = fmtname.format(self.filename, i, socket.gethostname(), text_part.ext)
repfile = os.path.join(workdir, filename)
logger.info('Dumping the report part %d into %r', i, repfile)
fh = open(repfile, 'w')
fh.write(text_part.text)
fh.close()
print('Report part saved in: %r' % repfile)
if self.notify:
logger.info('Creating an email message')
email_address = self.config.get('main', 'email_address')
smtp_server = self.config.get('main', 'smtp_server')
publoc = os.path.join(self.pubroot, self.dirname)
eml = MIMEText('New lograptor report is available at:\r\n{0}'.format(publoc))
eml['Subject'] = '{0} system events: {1} (report notification)'.format(
socket.gethostname(), time.strftime('%c', time.localtime())
)
eml['Date'] = formatdate()
eml['From'] = email_address
eml['To'] = ', '.join(self.notify)
eml['X-Mailer'] = u'{0}-{1}'.format(package_name, __version__)
mail_message(smtp_server, eml.as_string(), email_address, self.notify)
print('Notification mailed to: {0}'.format(','.join(self.notify)))
if self.rawlogs:
logfilename = '{0}.log'.format(self.filename)
logfile = os.path.join(workdir, '{0}.gz'.format(logfilename))
logger.info('Gzipping logs and writing them to %r', logfilename)
outfh = open(logfile, 'w+b')
do_chunked_gzip(self.rawfh, outfh, logfilename)
outfh.close()
print('Gzipped logs saved in: {0}'.format(logfile))
# Purge old reports
self.prune_old() | [
"def",
"send_report",
"(",
"self",
",",
"report_parts",
")",
":",
"logger",
".",
"info",
"(",
"'Checking and creating the report directory'",
")",
"report_parts",
"=",
"sorted",
"(",
"filter",
"(",
"lambda",
"x",
":",
"x",
".",
"fmt",
"in",
"self",
".",
"formats",
",",
"report_parts",
")",
",",
"key",
"=",
"lambda",
"x",
":",
"self",
".",
"formats",
".",
"index",
"(",
"x",
".",
"fmt",
")",
")",
"workdir",
"=",
"os",
".",
"path",
".",
"join",
"(",
"self",
".",
"pubdir",
",",
"self",
".",
"dirname",
")",
"if",
"not",
"os",
".",
"path",
".",
"isdir",
"(",
"workdir",
")",
":",
"try",
":",
"os",
".",
"makedirs",
"(",
"workdir",
")",
"except",
"OSError",
"as",
"e",
":",
"logger",
".",
"error",
"(",
"'Error creating directory \"{0}\": {0}'",
".",
"format",
"(",
"workdir",
",",
"e",
")",
")",
"return",
"fmtname",
"=",
"'{0}-{1}-{2}.{3}'",
"if",
"len",
"(",
"report_parts",
")",
">",
"1",
"else",
"'{0}-{2}.{3}'",
"for",
"i",
",",
"text_part",
"in",
"enumerate",
"(",
"filter",
"(",
"lambda",
"x",
":",
"x",
".",
"fmt",
"in",
"self",
".",
"formats",
",",
"report_parts",
")",
")",
":",
"filename",
"=",
"fmtname",
".",
"format",
"(",
"self",
".",
"filename",
",",
"i",
",",
"socket",
".",
"gethostname",
"(",
")",
",",
"text_part",
".",
"ext",
")",
"repfile",
"=",
"os",
".",
"path",
".",
"join",
"(",
"workdir",
",",
"filename",
")",
"logger",
".",
"info",
"(",
"'Dumping the report part %d into %r'",
",",
"i",
",",
"repfile",
")",
"fh",
"=",
"open",
"(",
"repfile",
",",
"'w'",
")",
"fh",
".",
"write",
"(",
"text_part",
".",
"text",
")",
"fh",
".",
"close",
"(",
")",
"print",
"(",
"'Report part saved in: %r'",
"%",
"repfile",
")",
"if",
"self",
".",
"notify",
":",
"logger",
".",
"info",
"(",
"'Creating an email message'",
")",
"email_address",
"=",
"self",
".",
"config",
".",
"get",
"(",
"'main'",
",",
"'email_address'",
")",
"smtp_server",
"=",
"self",
".",
"config",
".",
"get",
"(",
"'main'",
",",
"'smtp_server'",
")",
"publoc",
"=",
"os",
".",
"path",
".",
"join",
"(",
"self",
".",
"pubroot",
",",
"self",
".",
"dirname",
")",
"eml",
"=",
"MIMEText",
"(",
"'New lograptor report is available at:\\r\\n{0}'",
".",
"format",
"(",
"publoc",
")",
")",
"eml",
"[",
"'Subject'",
"]",
"=",
"'{0} system events: {1} (report notification)'",
".",
"format",
"(",
"socket",
".",
"gethostname",
"(",
")",
",",
"time",
".",
"strftime",
"(",
"'%c'",
",",
"time",
".",
"localtime",
"(",
")",
")",
")",
"eml",
"[",
"'Date'",
"]",
"=",
"formatdate",
"(",
")",
"eml",
"[",
"'From'",
"]",
"=",
"email_address",
"eml",
"[",
"'To'",
"]",
"=",
"', '",
".",
"join",
"(",
"self",
".",
"notify",
")",
"eml",
"[",
"'X-Mailer'",
"]",
"=",
"u'{0}-{1}'",
".",
"format",
"(",
"package_name",
",",
"__version__",
")",
"mail_message",
"(",
"smtp_server",
",",
"eml",
".",
"as_string",
"(",
")",
",",
"email_address",
",",
"self",
".",
"notify",
")",
"print",
"(",
"'Notification mailed to: {0}'",
".",
"format",
"(",
"','",
".",
"join",
"(",
"self",
".",
"notify",
")",
")",
")",
"if",
"self",
".",
"rawlogs",
":",
"logfilename",
"=",
"'{0}.log'",
".",
"format",
"(",
"self",
".",
"filename",
")",
"logfile",
"=",
"os",
".",
"path",
".",
"join",
"(",
"workdir",
",",
"'{0}.gz'",
".",
"format",
"(",
"logfilename",
")",
")",
"logger",
".",
"info",
"(",
"'Gzipping logs and writing them to %r'",
",",
"logfilename",
")",
"outfh",
"=",
"open",
"(",
"logfile",
",",
"'w+b'",
")",
"do_chunked_gzip",
"(",
"self",
".",
"rawfh",
",",
"outfh",
",",
"logfilename",
")",
"outfh",
".",
"close",
"(",
")",
"print",
"(",
"'Gzipped logs saved in: {0}'",
".",
"format",
"(",
"logfile",
")",
")",
"# Purge old reports",
"self",
".",
"prune_old",
"(",
")"
]
| Publish the report parts to local files. Each report part is a text
with a title and specific extension. For html and plaintext sending
the report part is unique, for csv send also the stats and unparsed
string are plain text and report items are csv texts. | [
"Publish",
"the",
"report",
"parts",
"to",
"local",
"files",
".",
"Each",
"report",
"part",
"is",
"a",
"text",
"with",
"a",
"title",
"and",
"specific",
"extension",
".",
"For",
"html",
"and",
"plaintext",
"sending",
"the",
"report",
"part",
"is",
"unique",
"for",
"csv",
"send",
"also",
"the",
"stats",
"and",
"unparsed",
"string",
"are",
"plain",
"text",
"and",
"report",
"items",
"are",
"csv",
"texts",
"."
]
| b1f09fe1b429ed15110610092704ef12d253f3c9 | https://github.com/brunato/lograptor/blob/b1f09fe1b429ed15110610092704ef12d253f3c9/lograptor/channels.py#L562-L623 | train |
mardix/Mocha | mocha/utils.py | _InspectMethodsDecorators.parse | def parse(self):
"""
Return the list of string of all the decorators found
"""
self._parse(self.method)
return list(set([deco for deco in self.decos if deco])) | python | def parse(self):
"""
Return the list of string of all the decorators found
"""
self._parse(self.method)
return list(set([deco for deco in self.decos if deco])) | [
"def",
"parse",
"(",
"self",
")",
":",
"self",
".",
"_parse",
"(",
"self",
".",
"method",
")",
"return",
"list",
"(",
"set",
"(",
"[",
"deco",
"for",
"deco",
"in",
"self",
".",
"decos",
"if",
"deco",
"]",
")",
")"
]
| Return the list of string of all the decorators found | [
"Return",
"the",
"list",
"of",
"string",
"of",
"all",
"the",
"decorators",
"found"
]
| bce481cb31a0972061dd99bc548701411dcb9de3 | https://github.com/mardix/Mocha/blob/bce481cb31a0972061dd99bc548701411dcb9de3/mocha/utils.py#L365-L370 | train |
trendels/rhino | rhino/response.py | filter_304_headers | def filter_304_headers(headers):
"""Filter a list of headers to include in a "304 Not Modified" response."""
return [(k, v) for k, v in headers if k.lower() not in _filter_from_304] | python | def filter_304_headers(headers):
"""Filter a list of headers to include in a "304 Not Modified" response."""
return [(k, v) for k, v in headers if k.lower() not in _filter_from_304] | [
"def",
"filter_304_headers",
"(",
"headers",
")",
":",
"return",
"[",
"(",
"k",
",",
"v",
")",
"for",
"k",
",",
"v",
"in",
"headers",
"if",
"k",
".",
"lower",
"(",
")",
"not",
"in",
"_filter_from_304",
"]"
]
| Filter a list of headers to include in a "304 Not Modified" response. | [
"Filter",
"a",
"list",
"of",
"headers",
"to",
"include",
"in",
"a",
"304",
"Not",
"Modified",
"response",
"."
]
| f1f0ef21b6080a2bd130b38b5bef163074c94aed | https://github.com/trendels/rhino/blob/f1f0ef21b6080a2bd130b38b5bef163074c94aed/rhino/response.py#L41-L43 | train |
trendels/rhino | rhino/response.py | response | def response(code, body='', etag=None, last_modified=None, expires=None, **kw):
"""Helper to build an HTTP response.
Parameters:
code
: An integer status code.
body
: The response body. See `Response.__init__` for details.
etag
: A value for the ETag header. Double quotes will be added unless the
string starts and ends with a double quote.
last_modified
: A value for the Last-Modified header as a datetime.datetime object
or Unix timestamp.
expires
: A value for the Expires header as number of seconds, datetime.timedelta
or datetime.datetime object.
Note: a value of type int or float is interpreted as a number of
seconds in the future, *not* as Unix timestamp.
**kw
: All other keyword arguments are interpreted as response headers.
The names will be converted to header names by replacing
underscores with hyphens and converting to title case
(e.g. `x_powered_by` => `X-Powered-By`).
"""
if etag is not None:
if not (etag[0] == '"' and etag[-1] == '"'):
etag = '"%s"' % etag
kw['etag'] = etag
if last_modified is not None:
kw['last_modified'] = datetime_to_httpdate(last_modified)
if expires is not None:
if isinstance(expires, datetime):
kw['expires'] = datetime_to_httpdate(expires)
else:
kw['expires'] = timedelta_to_httpdate(expires)
headers = [(k.replace('_', '-').title(), v) for k, v in sorted(kw.items())]
return Response(code, headers, body) | python | def response(code, body='', etag=None, last_modified=None, expires=None, **kw):
"""Helper to build an HTTP response.
Parameters:
code
: An integer status code.
body
: The response body. See `Response.__init__` for details.
etag
: A value for the ETag header. Double quotes will be added unless the
string starts and ends with a double quote.
last_modified
: A value for the Last-Modified header as a datetime.datetime object
or Unix timestamp.
expires
: A value for the Expires header as number of seconds, datetime.timedelta
or datetime.datetime object.
Note: a value of type int or float is interpreted as a number of
seconds in the future, *not* as Unix timestamp.
**kw
: All other keyword arguments are interpreted as response headers.
The names will be converted to header names by replacing
underscores with hyphens and converting to title case
(e.g. `x_powered_by` => `X-Powered-By`).
"""
if etag is not None:
if not (etag[0] == '"' and etag[-1] == '"'):
etag = '"%s"' % etag
kw['etag'] = etag
if last_modified is not None:
kw['last_modified'] = datetime_to_httpdate(last_modified)
if expires is not None:
if isinstance(expires, datetime):
kw['expires'] = datetime_to_httpdate(expires)
else:
kw['expires'] = timedelta_to_httpdate(expires)
headers = [(k.replace('_', '-').title(), v) for k, v in sorted(kw.items())]
return Response(code, headers, body) | [
"def",
"response",
"(",
"code",
",",
"body",
"=",
"''",
",",
"etag",
"=",
"None",
",",
"last_modified",
"=",
"None",
",",
"expires",
"=",
"None",
",",
"*",
"*",
"kw",
")",
":",
"if",
"etag",
"is",
"not",
"None",
":",
"if",
"not",
"(",
"etag",
"[",
"0",
"]",
"==",
"'\"'",
"and",
"etag",
"[",
"-",
"1",
"]",
"==",
"'\"'",
")",
":",
"etag",
"=",
"'\"%s\"'",
"%",
"etag",
"kw",
"[",
"'etag'",
"]",
"=",
"etag",
"if",
"last_modified",
"is",
"not",
"None",
":",
"kw",
"[",
"'last_modified'",
"]",
"=",
"datetime_to_httpdate",
"(",
"last_modified",
")",
"if",
"expires",
"is",
"not",
"None",
":",
"if",
"isinstance",
"(",
"expires",
",",
"datetime",
")",
":",
"kw",
"[",
"'expires'",
"]",
"=",
"datetime_to_httpdate",
"(",
"expires",
")",
"else",
":",
"kw",
"[",
"'expires'",
"]",
"=",
"timedelta_to_httpdate",
"(",
"expires",
")",
"headers",
"=",
"[",
"(",
"k",
".",
"replace",
"(",
"'_'",
",",
"'-'",
")",
".",
"title",
"(",
")",
",",
"v",
")",
"for",
"k",
",",
"v",
"in",
"sorted",
"(",
"kw",
".",
"items",
"(",
")",
")",
"]",
"return",
"Response",
"(",
"code",
",",
"headers",
",",
"body",
")"
]
| Helper to build an HTTP response.
Parameters:
code
: An integer status code.
body
: The response body. See `Response.__init__` for details.
etag
: A value for the ETag header. Double quotes will be added unless the
string starts and ends with a double quote.
last_modified
: A value for the Last-Modified header as a datetime.datetime object
or Unix timestamp.
expires
: A value for the Expires header as number of seconds, datetime.timedelta
or datetime.datetime object.
Note: a value of type int or float is interpreted as a number of
seconds in the future, *not* as Unix timestamp.
**kw
: All other keyword arguments are interpreted as response headers.
The names will be converted to header names by replacing
underscores with hyphens and converting to title case
(e.g. `x_powered_by` => `X-Powered-By`). | [
"Helper",
"to",
"build",
"an",
"HTTP",
"response",
"."
]
| f1f0ef21b6080a2bd130b38b5bef163074c94aed | https://github.com/trendels/rhino/blob/f1f0ef21b6080a2bd130b38b5bef163074c94aed/rhino/response.py#L362-L404 | train |
trendels/rhino | rhino/response.py | Response.body | def body(self):
"""Seralizes and returns the response body.
On subsequent access, returns the cached value."""
if self._body is None:
raw_body = self._raw_body
if self._body_writer is None:
self._body = raw_body() if callable(raw_body) else raw_body
else:
self._body = self._body_writer(raw_body)
return self._body | python | def body(self):
"""Seralizes and returns the response body.
On subsequent access, returns the cached value."""
if self._body is None:
raw_body = self._raw_body
if self._body_writer is None:
self._body = raw_body() if callable(raw_body) else raw_body
else:
self._body = self._body_writer(raw_body)
return self._body | [
"def",
"body",
"(",
"self",
")",
":",
"if",
"self",
".",
"_body",
"is",
"None",
":",
"raw_body",
"=",
"self",
".",
"_raw_body",
"if",
"self",
".",
"_body_writer",
"is",
"None",
":",
"self",
".",
"_body",
"=",
"raw_body",
"(",
")",
"if",
"callable",
"(",
"raw_body",
")",
"else",
"raw_body",
"else",
":",
"self",
".",
"_body",
"=",
"self",
".",
"_body_writer",
"(",
"raw_body",
")",
"return",
"self",
".",
"_body"
]
| Seralizes and returns the response body.
On subsequent access, returns the cached value. | [
"Seralizes",
"and",
"returns",
"the",
"response",
"body",
"."
]
| f1f0ef21b6080a2bd130b38b5bef163074c94aed | https://github.com/trendels/rhino/blob/f1f0ef21b6080a2bd130b38b5bef163074c94aed/rhino/response.py#L189-L200 | train |
trendels/rhino | rhino/response.py | Response.set_cookie | def set_cookie(self, key, value='', max_age=None, path='/', domain=None,
secure=False, httponly=False, expires=None):
"""Set a response cookie.
Parameters:
key
: The cookie name.
value
: The cookie value.
max_age
: The maximum age of the cookie in seconds, or as a
datetime.timedelta object.
path
: Restrict the cookie to this path (default: '/').
domain
: Restrict the cookie to his domain.
secure
: When True, instruct the client to only sent the cookie over HTTPS.
httponly
: When True, instruct the client to disallow javascript access to
the cookie.
expires
: Another way of specifying the maximum age of the cookie. Accepts
the same values as max_age (number of seconds, datetime.timedelta).
Additionaly accepts a datetime.datetime object.
Note: a value of type int or float is interpreted as a number of
seconds in the future, *not* as Unix timestamp.
"""
key, value = key.encode('utf-8'), value.encode('utf-8')
cookie = SimpleCookie({key: value})
m = cookie[key]
if max_age is not None:
if isinstance(max_age, timedelta):
m['max-age'] = int(total_seconds(max_age))
else:
m['max-age'] = int(max_age)
if path is not None: m['path'] = path.encode('utf-8')
if domain is not None: m['domain'] = domain.encode('utf-8')
if secure: m['secure'] = True
if httponly: m['httponly'] = True
if expires is not None:
# 'expires' expects an offset in seconds, like max-age
if isinstance(expires, datetime):
expires = total_seconds(expires - datetime.utcnow())
elif isinstance(expires, timedelta):
expires = total_seconds(expires)
m['expires'] = int(expires)
self.headers.add_header('Set-Cookie', m.OutputString()) | python | def set_cookie(self, key, value='', max_age=None, path='/', domain=None,
secure=False, httponly=False, expires=None):
"""Set a response cookie.
Parameters:
key
: The cookie name.
value
: The cookie value.
max_age
: The maximum age of the cookie in seconds, or as a
datetime.timedelta object.
path
: Restrict the cookie to this path (default: '/').
domain
: Restrict the cookie to his domain.
secure
: When True, instruct the client to only sent the cookie over HTTPS.
httponly
: When True, instruct the client to disallow javascript access to
the cookie.
expires
: Another way of specifying the maximum age of the cookie. Accepts
the same values as max_age (number of seconds, datetime.timedelta).
Additionaly accepts a datetime.datetime object.
Note: a value of type int or float is interpreted as a number of
seconds in the future, *not* as Unix timestamp.
"""
key, value = key.encode('utf-8'), value.encode('utf-8')
cookie = SimpleCookie({key: value})
m = cookie[key]
if max_age is not None:
if isinstance(max_age, timedelta):
m['max-age'] = int(total_seconds(max_age))
else:
m['max-age'] = int(max_age)
if path is not None: m['path'] = path.encode('utf-8')
if domain is not None: m['domain'] = domain.encode('utf-8')
if secure: m['secure'] = True
if httponly: m['httponly'] = True
if expires is not None:
# 'expires' expects an offset in seconds, like max-age
if isinstance(expires, datetime):
expires = total_seconds(expires - datetime.utcnow())
elif isinstance(expires, timedelta):
expires = total_seconds(expires)
m['expires'] = int(expires)
self.headers.add_header('Set-Cookie', m.OutputString()) | [
"def",
"set_cookie",
"(",
"self",
",",
"key",
",",
"value",
"=",
"''",
",",
"max_age",
"=",
"None",
",",
"path",
"=",
"'/'",
",",
"domain",
"=",
"None",
",",
"secure",
"=",
"False",
",",
"httponly",
"=",
"False",
",",
"expires",
"=",
"None",
")",
":",
"key",
",",
"value",
"=",
"key",
".",
"encode",
"(",
"'utf-8'",
")",
",",
"value",
".",
"encode",
"(",
"'utf-8'",
")",
"cookie",
"=",
"SimpleCookie",
"(",
"{",
"key",
":",
"value",
"}",
")",
"m",
"=",
"cookie",
"[",
"key",
"]",
"if",
"max_age",
"is",
"not",
"None",
":",
"if",
"isinstance",
"(",
"max_age",
",",
"timedelta",
")",
":",
"m",
"[",
"'max-age'",
"]",
"=",
"int",
"(",
"total_seconds",
"(",
"max_age",
")",
")",
"else",
":",
"m",
"[",
"'max-age'",
"]",
"=",
"int",
"(",
"max_age",
")",
"if",
"path",
"is",
"not",
"None",
":",
"m",
"[",
"'path'",
"]",
"=",
"path",
".",
"encode",
"(",
"'utf-8'",
")",
"if",
"domain",
"is",
"not",
"None",
":",
"m",
"[",
"'domain'",
"]",
"=",
"domain",
".",
"encode",
"(",
"'utf-8'",
")",
"if",
"secure",
":",
"m",
"[",
"'secure'",
"]",
"=",
"True",
"if",
"httponly",
":",
"m",
"[",
"'httponly'",
"]",
"=",
"True",
"if",
"expires",
"is",
"not",
"None",
":",
"# 'expires' expects an offset in seconds, like max-age",
"if",
"isinstance",
"(",
"expires",
",",
"datetime",
")",
":",
"expires",
"=",
"total_seconds",
"(",
"expires",
"-",
"datetime",
".",
"utcnow",
"(",
")",
")",
"elif",
"isinstance",
"(",
"expires",
",",
"timedelta",
")",
":",
"expires",
"=",
"total_seconds",
"(",
"expires",
")",
"m",
"[",
"'expires'",
"]",
"=",
"int",
"(",
"expires",
")",
"self",
".",
"headers",
".",
"add_header",
"(",
"'Set-Cookie'",
",",
"m",
".",
"OutputString",
"(",
")",
")"
]
| Set a response cookie.
Parameters:
key
: The cookie name.
value
: The cookie value.
max_age
: The maximum age of the cookie in seconds, or as a
datetime.timedelta object.
path
: Restrict the cookie to this path (default: '/').
domain
: Restrict the cookie to his domain.
secure
: When True, instruct the client to only sent the cookie over HTTPS.
httponly
: When True, instruct the client to disallow javascript access to
the cookie.
expires
: Another way of specifying the maximum age of the cookie. Accepts
the same values as max_age (number of seconds, datetime.timedelta).
Additionaly accepts a datetime.datetime object.
Note: a value of type int or float is interpreted as a number of
seconds in the future, *not* as Unix timestamp. | [
"Set",
"a",
"response",
"cookie",
"."
]
| f1f0ef21b6080a2bd130b38b5bef163074c94aed | https://github.com/trendels/rhino/blob/f1f0ef21b6080a2bd130b38b5bef163074c94aed/rhino/response.py#L210-L259 | train |
trendels/rhino | rhino/response.py | Response.conditional_to | def conditional_to(self, request):
"""Return a response that is conditional to a given request.
Returns the Response object unchanged, or a new Response object
with a "304 Not Modified" status code.
"""
if not self.code == 200:
return self
request_headers = request.headers
response_headers = self.headers
if_none_match = request_headers.get('If-None-Match')
if_modified_since = request_headers.get('If-Modified-Since')
etag_ok, date_ok = False, False
if if_none_match:
etag = response_headers.get('ETag')
if etag and match_etag(etag, if_none_match, weak=True):
etag_ok = True
if if_modified_since:
last_modified = response_headers.get('Last-Modified')
if last_modified:
try:
modified_ts = httpdate_to_timestamp(last_modified)
last_valid_ts = httpdate_to_timestamp(if_modified_since)
if modified_ts <= last_valid_ts:
date_ok = True
except:
pass # Ignore invalid dates
if if_none_match and not etag_ok:
return self
elif if_modified_since and not date_ok:
return self
elif etag_ok or date_ok:
headers = filter_304_headers(self.headers.items())
if 'Date' not in self.headers:
headers.append(('Date', datetime_to_httpdate(time.time())))
return Response(status=304, headers=headers, body='')
return self | python | def conditional_to(self, request):
"""Return a response that is conditional to a given request.
Returns the Response object unchanged, or a new Response object
with a "304 Not Modified" status code.
"""
if not self.code == 200:
return self
request_headers = request.headers
response_headers = self.headers
if_none_match = request_headers.get('If-None-Match')
if_modified_since = request_headers.get('If-Modified-Since')
etag_ok, date_ok = False, False
if if_none_match:
etag = response_headers.get('ETag')
if etag and match_etag(etag, if_none_match, weak=True):
etag_ok = True
if if_modified_since:
last_modified = response_headers.get('Last-Modified')
if last_modified:
try:
modified_ts = httpdate_to_timestamp(last_modified)
last_valid_ts = httpdate_to_timestamp(if_modified_since)
if modified_ts <= last_valid_ts:
date_ok = True
except:
pass # Ignore invalid dates
if if_none_match and not etag_ok:
return self
elif if_modified_since and not date_ok:
return self
elif etag_ok or date_ok:
headers = filter_304_headers(self.headers.items())
if 'Date' not in self.headers:
headers.append(('Date', datetime_to_httpdate(time.time())))
return Response(status=304, headers=headers, body='')
return self | [
"def",
"conditional_to",
"(",
"self",
",",
"request",
")",
":",
"if",
"not",
"self",
".",
"code",
"==",
"200",
":",
"return",
"self",
"request_headers",
"=",
"request",
".",
"headers",
"response_headers",
"=",
"self",
".",
"headers",
"if_none_match",
"=",
"request_headers",
".",
"get",
"(",
"'If-None-Match'",
")",
"if_modified_since",
"=",
"request_headers",
".",
"get",
"(",
"'If-Modified-Since'",
")",
"etag_ok",
",",
"date_ok",
"=",
"False",
",",
"False",
"if",
"if_none_match",
":",
"etag",
"=",
"response_headers",
".",
"get",
"(",
"'ETag'",
")",
"if",
"etag",
"and",
"match_etag",
"(",
"etag",
",",
"if_none_match",
",",
"weak",
"=",
"True",
")",
":",
"etag_ok",
"=",
"True",
"if",
"if_modified_since",
":",
"last_modified",
"=",
"response_headers",
".",
"get",
"(",
"'Last-Modified'",
")",
"if",
"last_modified",
":",
"try",
":",
"modified_ts",
"=",
"httpdate_to_timestamp",
"(",
"last_modified",
")",
"last_valid_ts",
"=",
"httpdate_to_timestamp",
"(",
"if_modified_since",
")",
"if",
"modified_ts",
"<=",
"last_valid_ts",
":",
"date_ok",
"=",
"True",
"except",
":",
"pass",
"# Ignore invalid dates",
"if",
"if_none_match",
"and",
"not",
"etag_ok",
":",
"return",
"self",
"elif",
"if_modified_since",
"and",
"not",
"date_ok",
":",
"return",
"self",
"elif",
"etag_ok",
"or",
"date_ok",
":",
"headers",
"=",
"filter_304_headers",
"(",
"self",
".",
"headers",
".",
"items",
"(",
")",
")",
"if",
"'Date'",
"not",
"in",
"self",
".",
"headers",
":",
"headers",
".",
"append",
"(",
"(",
"'Date'",
",",
"datetime_to_httpdate",
"(",
"time",
".",
"time",
"(",
")",
")",
")",
")",
"return",
"Response",
"(",
"status",
"=",
"304",
",",
"headers",
"=",
"headers",
",",
"body",
"=",
"''",
")",
"return",
"self"
]
| Return a response that is conditional to a given request.
Returns the Response object unchanged, or a new Response object
with a "304 Not Modified" status code. | [
"Return",
"a",
"response",
"that",
"is",
"conditional",
"to",
"a",
"given",
"request",
"."
]
| f1f0ef21b6080a2bd130b38b5bef163074c94aed | https://github.com/trendels/rhino/blob/f1f0ef21b6080a2bd130b38b5bef163074c94aed/rhino/response.py#L269-L311 | train |
assamite/creamas | creamas/examples/grid/ukko.py | _get_ukko_report | def _get_ukko_report():
'''Get Ukko's report from the fixed URL.
'''
with urllib.request.urlopen(URL_UKKO_REPORT) as response:
ret = str(response.read())
return ret | python | def _get_ukko_report():
'''Get Ukko's report from the fixed URL.
'''
with urllib.request.urlopen(URL_UKKO_REPORT) as response:
ret = str(response.read())
return ret | [
"def",
"_get_ukko_report",
"(",
")",
":",
"with",
"urllib",
".",
"request",
".",
"urlopen",
"(",
"URL_UKKO_REPORT",
")",
"as",
"response",
":",
"ret",
"=",
"str",
"(",
"response",
".",
"read",
"(",
")",
")",
"return",
"ret"
]
| Get Ukko's report from the fixed URL. | [
"Get",
"Ukko",
"s",
"report",
"from",
"the",
"fixed",
"URL",
"."
]
| 54dc3e31c97a3f938e58272f8ab80b6bcafeff58 | https://github.com/assamite/creamas/blob/54dc3e31c97a3f938e58272f8ab80b6bcafeff58/creamas/examples/grid/ukko.py#L41-L46 | train |
assamite/creamas | creamas/examples/grid/ukko.py | get_nodes | def get_nodes(n=8, exclude=[], loop=None):
'''Get Ukko nodes with the least amount of load.
May return less than *n* nodes if there are not as many nodes available,
the nodes are reserved or the nodes are on the exclude list.
:param int n: Number of Ukko nodes to return.
:param list exclude: Nodes to exclude from the returned list.
:param loop:
asyncio's event loop to test if each returned node is currently
loggable. The test is done by trying to connect to the node with
(async)ssh.
:rtype list:
:returns: Locations of Ukko nodes with the least amount of load
'''
report = _get_ukko_report()
nodes = _parse_ukko_report(report)
ret = []
while len(ret) < n and len(nodes) > 0:
node = nodes[0]
if node not in exclude:
reachable = True
if loop is not None:
reachable = loop.run_until_complete(_test_node(node))
if reachable:
ret.append(node)
nodes = nodes[1:]
return ret | python | def get_nodes(n=8, exclude=[], loop=None):
'''Get Ukko nodes with the least amount of load.
May return less than *n* nodes if there are not as many nodes available,
the nodes are reserved or the nodes are on the exclude list.
:param int n: Number of Ukko nodes to return.
:param list exclude: Nodes to exclude from the returned list.
:param loop:
asyncio's event loop to test if each returned node is currently
loggable. The test is done by trying to connect to the node with
(async)ssh.
:rtype list:
:returns: Locations of Ukko nodes with the least amount of load
'''
report = _get_ukko_report()
nodes = _parse_ukko_report(report)
ret = []
while len(ret) < n and len(nodes) > 0:
node = nodes[0]
if node not in exclude:
reachable = True
if loop is not None:
reachable = loop.run_until_complete(_test_node(node))
if reachable:
ret.append(node)
nodes = nodes[1:]
return ret | [
"def",
"get_nodes",
"(",
"n",
"=",
"8",
",",
"exclude",
"=",
"[",
"]",
",",
"loop",
"=",
"None",
")",
":",
"report",
"=",
"_get_ukko_report",
"(",
")",
"nodes",
"=",
"_parse_ukko_report",
"(",
"report",
")",
"ret",
"=",
"[",
"]",
"while",
"len",
"(",
"ret",
")",
"<",
"n",
"and",
"len",
"(",
"nodes",
")",
">",
"0",
":",
"node",
"=",
"nodes",
"[",
"0",
"]",
"if",
"node",
"not",
"in",
"exclude",
":",
"reachable",
"=",
"True",
"if",
"loop",
"is",
"not",
"None",
":",
"reachable",
"=",
"loop",
".",
"run_until_complete",
"(",
"_test_node",
"(",
"node",
")",
")",
"if",
"reachable",
":",
"ret",
".",
"append",
"(",
"node",
")",
"nodes",
"=",
"nodes",
"[",
"1",
":",
"]",
"return",
"ret"
]
| Get Ukko nodes with the least amount of load.
May return less than *n* nodes if there are not as many nodes available,
the nodes are reserved or the nodes are on the exclude list.
:param int n: Number of Ukko nodes to return.
:param list exclude: Nodes to exclude from the returned list.
:param loop:
asyncio's event loop to test if each returned node is currently
loggable. The test is done by trying to connect to the node with
(async)ssh.
:rtype list:
:returns: Locations of Ukko nodes with the least amount of load | [
"Get",
"Ukko",
"nodes",
"with",
"the",
"least",
"amount",
"of",
"load",
"."
]
| 54dc3e31c97a3f938e58272f8ab80b6bcafeff58 | https://github.com/assamite/creamas/blob/54dc3e31c97a3f938e58272f8ab80b6bcafeff58/creamas/examples/grid/ukko.py#L60-L88 | train |
brunato/lograptor | lograptor/tui.py | get_unix_ioctl_terminal_size | def get_unix_ioctl_terminal_size():
"""Get the terminal size of a UNIX terminal using the ioctl UNIX command."""
def ioctl_gwinsz(fd):
try:
import fcntl
import termios
import struct
return struct.unpack('hh', fcntl.ioctl(fd, termios.TIOCGWINSZ, '1234'))
except (IOError, OSError):
return None
cr = ioctl_gwinsz(0) or ioctl_gwinsz(1) or ioctl_gwinsz(2)
if not cr:
try:
f = open(os.ctermid())
cr = ioctl_gwinsz(f.fileno())
f.close()
except (IOError, OSError):
pass
if not cr:
try:
cr = (os.environ['LINES'], os.environ['COLUMNS'])
except KeyError:
return None
return int(cr[1]), int(cr[0]) | python | def get_unix_ioctl_terminal_size():
"""Get the terminal size of a UNIX terminal using the ioctl UNIX command."""
def ioctl_gwinsz(fd):
try:
import fcntl
import termios
import struct
return struct.unpack('hh', fcntl.ioctl(fd, termios.TIOCGWINSZ, '1234'))
except (IOError, OSError):
return None
cr = ioctl_gwinsz(0) or ioctl_gwinsz(1) or ioctl_gwinsz(2)
if not cr:
try:
f = open(os.ctermid())
cr = ioctl_gwinsz(f.fileno())
f.close()
except (IOError, OSError):
pass
if not cr:
try:
cr = (os.environ['LINES'], os.environ['COLUMNS'])
except KeyError:
return None
return int(cr[1]), int(cr[0]) | [
"def",
"get_unix_ioctl_terminal_size",
"(",
")",
":",
"def",
"ioctl_gwinsz",
"(",
"fd",
")",
":",
"try",
":",
"import",
"fcntl",
"import",
"termios",
"import",
"struct",
"return",
"struct",
".",
"unpack",
"(",
"'hh'",
",",
"fcntl",
".",
"ioctl",
"(",
"fd",
",",
"termios",
".",
"TIOCGWINSZ",
",",
"'1234'",
")",
")",
"except",
"(",
"IOError",
",",
"OSError",
")",
":",
"return",
"None",
"cr",
"=",
"ioctl_gwinsz",
"(",
"0",
")",
"or",
"ioctl_gwinsz",
"(",
"1",
")",
"or",
"ioctl_gwinsz",
"(",
"2",
")",
"if",
"not",
"cr",
":",
"try",
":",
"f",
"=",
"open",
"(",
"os",
".",
"ctermid",
"(",
")",
")",
"cr",
"=",
"ioctl_gwinsz",
"(",
"f",
".",
"fileno",
"(",
")",
")",
"f",
".",
"close",
"(",
")",
"except",
"(",
"IOError",
",",
"OSError",
")",
":",
"pass",
"if",
"not",
"cr",
":",
"try",
":",
"cr",
"=",
"(",
"os",
".",
"environ",
"[",
"'LINES'",
"]",
",",
"os",
".",
"environ",
"[",
"'COLUMNS'",
"]",
")",
"except",
"KeyError",
":",
"return",
"None",
"return",
"int",
"(",
"cr",
"[",
"1",
"]",
")",
",",
"int",
"(",
"cr",
"[",
"0",
"]",
")"
]
| Get the terminal size of a UNIX terminal using the ioctl UNIX command. | [
"Get",
"the",
"terminal",
"size",
"of",
"a",
"UNIX",
"terminal",
"using",
"the",
"ioctl",
"UNIX",
"command",
"."
]
| b1f09fe1b429ed15110610092704ef12d253f3c9 | https://github.com/brunato/lograptor/blob/b1f09fe1b429ed15110610092704ef12d253f3c9/lograptor/tui.py#L94-L118 | train |
PBR/MQ2 | MQ2/add_marker_to_qtls.py | add_marker_to_qtl | def add_marker_to_qtl(qtl, map_list):
"""Add the closest marker to the given QTL.
:arg qtl: a row of the QTL list.
:arg map_list: the genetic map containing the list of markers.
"""
closest = ''
diff = None
for marker in map_list:
if qtl[1] == marker[1]:
tmp_diff = float(qtl[2]) - float(marker[2])
if diff is None or abs(diff) > abs(tmp_diff):
diff = tmp_diff
closest = marker
if closest != '':
closest = closest[0]
return closest | python | def add_marker_to_qtl(qtl, map_list):
"""Add the closest marker to the given QTL.
:arg qtl: a row of the QTL list.
:arg map_list: the genetic map containing the list of markers.
"""
closest = ''
diff = None
for marker in map_list:
if qtl[1] == marker[1]:
tmp_diff = float(qtl[2]) - float(marker[2])
if diff is None or abs(diff) > abs(tmp_diff):
diff = tmp_diff
closest = marker
if closest != '':
closest = closest[0]
return closest | [
"def",
"add_marker_to_qtl",
"(",
"qtl",
",",
"map_list",
")",
":",
"closest",
"=",
"''",
"diff",
"=",
"None",
"for",
"marker",
"in",
"map_list",
":",
"if",
"qtl",
"[",
"1",
"]",
"==",
"marker",
"[",
"1",
"]",
":",
"tmp_diff",
"=",
"float",
"(",
"qtl",
"[",
"2",
"]",
")",
"-",
"float",
"(",
"marker",
"[",
"2",
"]",
")",
"if",
"diff",
"is",
"None",
"or",
"abs",
"(",
"diff",
")",
">",
"abs",
"(",
"tmp_diff",
")",
":",
"diff",
"=",
"tmp_diff",
"closest",
"=",
"marker",
"if",
"closest",
"!=",
"''",
":",
"closest",
"=",
"closest",
"[",
"0",
"]",
"return",
"closest"
]
| Add the closest marker to the given QTL.
:arg qtl: a row of the QTL list.
:arg map_list: the genetic map containing the list of markers. | [
"Add",
"the",
"closest",
"marker",
"to",
"the",
"given",
"QTL",
"."
]
| 6d84dea47e6751333004743f588f03158e35c28d | https://github.com/PBR/MQ2/blob/6d84dea47e6751333004743f588f03158e35c28d/MQ2/add_marker_to_qtls.py#L38-L55 | train |
PBR/MQ2 | MQ2/add_marker_to_qtls.py | add_marker_to_qtls | def add_marker_to_qtls(qtlfile, mapfile, outputfile='qtls_with_mk.csv'):
"""This function adds to a list of QTLs, the closest marker to the
QTL peak.
:arg qtlfile: a CSV list of all the QTLs found.
The file should be structured as follow::
Trait, Linkage group, position, other columns
The other columns will not matter as long as the first three
columns are as such.
:arg mapfile: a CSV representation of the map used for the QTL
mapping analysis.
The file should be structured as follow::
Marker, Linkage group, position
:kwarg outputfile: the name of the output file in which the list of
QTLs with their closest marker will be written.
"""
qtl_list = read_input_file(qtlfile, ',')
map_list = read_input_file(mapfile, ',')
if not qtl_list or not map_list: # pragma: no cover
return
qtl_list[0].append('Closest marker')
qtls = []
qtls.append(qtl_list[0])
for qtl in qtl_list[1:]:
qtl.append(add_marker_to_qtl(qtl, map_list))
qtls.append(qtl)
LOG.info('- %s QTLs processed in %s' % (len(qtls), qtlfile))
write_matrix(outputfile, qtls) | python | def add_marker_to_qtls(qtlfile, mapfile, outputfile='qtls_with_mk.csv'):
"""This function adds to a list of QTLs, the closest marker to the
QTL peak.
:arg qtlfile: a CSV list of all the QTLs found.
The file should be structured as follow::
Trait, Linkage group, position, other columns
The other columns will not matter as long as the first three
columns are as such.
:arg mapfile: a CSV representation of the map used for the QTL
mapping analysis.
The file should be structured as follow::
Marker, Linkage group, position
:kwarg outputfile: the name of the output file in which the list of
QTLs with their closest marker will be written.
"""
qtl_list = read_input_file(qtlfile, ',')
map_list = read_input_file(mapfile, ',')
if not qtl_list or not map_list: # pragma: no cover
return
qtl_list[0].append('Closest marker')
qtls = []
qtls.append(qtl_list[0])
for qtl in qtl_list[1:]:
qtl.append(add_marker_to_qtl(qtl, map_list))
qtls.append(qtl)
LOG.info('- %s QTLs processed in %s' % (len(qtls), qtlfile))
write_matrix(outputfile, qtls) | [
"def",
"add_marker_to_qtls",
"(",
"qtlfile",
",",
"mapfile",
",",
"outputfile",
"=",
"'qtls_with_mk.csv'",
")",
":",
"qtl_list",
"=",
"read_input_file",
"(",
"qtlfile",
",",
"','",
")",
"map_list",
"=",
"read_input_file",
"(",
"mapfile",
",",
"','",
")",
"if",
"not",
"qtl_list",
"or",
"not",
"map_list",
":",
"# pragma: no cover",
"return",
"qtl_list",
"[",
"0",
"]",
".",
"append",
"(",
"'Closest marker'",
")",
"qtls",
"=",
"[",
"]",
"qtls",
".",
"append",
"(",
"qtl_list",
"[",
"0",
"]",
")",
"for",
"qtl",
"in",
"qtl_list",
"[",
"1",
":",
"]",
":",
"qtl",
".",
"append",
"(",
"add_marker_to_qtl",
"(",
"qtl",
",",
"map_list",
")",
")",
"qtls",
".",
"append",
"(",
"qtl",
")",
"LOG",
".",
"info",
"(",
"'- %s QTLs processed in %s'",
"%",
"(",
"len",
"(",
"qtls",
")",
",",
"qtlfile",
")",
")",
"write_matrix",
"(",
"outputfile",
",",
"qtls",
")"
]
| This function adds to a list of QTLs, the closest marker to the
QTL peak.
:arg qtlfile: a CSV list of all the QTLs found.
The file should be structured as follow::
Trait, Linkage group, position, other columns
The other columns will not matter as long as the first three
columns are as such.
:arg mapfile: a CSV representation of the map used for the QTL
mapping analysis.
The file should be structured as follow::
Marker, Linkage group, position
:kwarg outputfile: the name of the output file in which the list of
QTLs with their closest marker will be written. | [
"This",
"function",
"adds",
"to",
"a",
"list",
"of",
"QTLs",
"the",
"closest",
"marker",
"to",
"the",
"QTL",
"peak",
"."
]
| 6d84dea47e6751333004743f588f03158e35c28d | https://github.com/PBR/MQ2/blob/6d84dea47e6751333004743f588f03158e35c28d/MQ2/add_marker_to_qtls.py#L58-L87 | train |
kata198/QueryableList | QueryableList/Builder.py | QueryBuilder.addFilter | def addFilter(self, filterMethod=FILTER_METHOD_AND, **kwargs):
'''
addFilter - Add a filter to this query.
@param filterMethod <str> - The filter method to use (AND or OR), default: 'AND'
@param additional args - Filter arguments. @see QueryableListBase.filter
@raises ValueError if filterMethod is not one of known methods.
'''
filterMethod = filterMethod.upper()
if filterMethod not in FILTER_METHODS:
raise ValueError('Unknown filter method, %s. Must be one of: %s' %(str(filterMethod), repr(FILTER_METHODS)))
self.filters.append((filterMethod, kwargs)) | python | def addFilter(self, filterMethod=FILTER_METHOD_AND, **kwargs):
'''
addFilter - Add a filter to this query.
@param filterMethod <str> - The filter method to use (AND or OR), default: 'AND'
@param additional args - Filter arguments. @see QueryableListBase.filter
@raises ValueError if filterMethod is not one of known methods.
'''
filterMethod = filterMethod.upper()
if filterMethod not in FILTER_METHODS:
raise ValueError('Unknown filter method, %s. Must be one of: %s' %(str(filterMethod), repr(FILTER_METHODS)))
self.filters.append((filterMethod, kwargs)) | [
"def",
"addFilter",
"(",
"self",
",",
"filterMethod",
"=",
"FILTER_METHOD_AND",
",",
"*",
"*",
"kwargs",
")",
":",
"filterMethod",
"=",
"filterMethod",
".",
"upper",
"(",
")",
"if",
"filterMethod",
"not",
"in",
"FILTER_METHODS",
":",
"raise",
"ValueError",
"(",
"'Unknown filter method, %s. Must be one of: %s'",
"%",
"(",
"str",
"(",
"filterMethod",
")",
",",
"repr",
"(",
"FILTER_METHODS",
")",
")",
")",
"self",
".",
"filters",
".",
"append",
"(",
"(",
"filterMethod",
",",
"kwargs",
")",
")"
]
| addFilter - Add a filter to this query.
@param filterMethod <str> - The filter method to use (AND or OR), default: 'AND'
@param additional args - Filter arguments. @see QueryableListBase.filter
@raises ValueError if filterMethod is not one of known methods. | [
"addFilter",
"-",
"Add",
"a",
"filter",
"to",
"this",
"query",
"."
]
| 279286d46205ce8268af42e03b75820a7483fddb | https://github.com/kata198/QueryableList/blob/279286d46205ce8268af42e03b75820a7483fddb/QueryableList/Builder.py#L33-L47 | train |
kata198/QueryableList | QueryableList/Builder.py | QueryBuilder.execute | def execute(self, lst):
'''
execute - Execute the series of filters, in order, on the provided list.
@param lst <list/ A QueryableList type> - The list to filter. If you already know the types of items within
the list, you can pick a QueryableList implementing class to get faster results. Otherwise, if a list type that does
not extend QueryableListBase is provided, QueryableListMixed will be used (Supports both object-like and dict-like items)
@return - QueryableList of results. If you provided #lst as a QueryableList type already, that same type will be returned.
Otherwise, a QueryableListMixed will be returned.
'''
from . import QueryableListMixed
if not issubclass(lst.__class__, QueryableListBase):
lst = QueryableListMixed(lst)
filters = copy.copy(self.filters)
nextFilter = filters.popleft()
while nextFilter:
(filterMethod, filterArgs) = nextFilter
lst = self._applyFilter(lst, filterMethod, filterArgs)
if len(lst) == 0:
return lst
try:
nextFilter = filters.popleft()
except:
break
return lst | python | def execute(self, lst):
'''
execute - Execute the series of filters, in order, on the provided list.
@param lst <list/ A QueryableList type> - The list to filter. If you already know the types of items within
the list, you can pick a QueryableList implementing class to get faster results. Otherwise, if a list type that does
not extend QueryableListBase is provided, QueryableListMixed will be used (Supports both object-like and dict-like items)
@return - QueryableList of results. If you provided #lst as a QueryableList type already, that same type will be returned.
Otherwise, a QueryableListMixed will be returned.
'''
from . import QueryableListMixed
if not issubclass(lst.__class__, QueryableListBase):
lst = QueryableListMixed(lst)
filters = copy.copy(self.filters)
nextFilter = filters.popleft()
while nextFilter:
(filterMethod, filterArgs) = nextFilter
lst = self._applyFilter(lst, filterMethod, filterArgs)
if len(lst) == 0:
return lst
try:
nextFilter = filters.popleft()
except:
break
return lst | [
"def",
"execute",
"(",
"self",
",",
"lst",
")",
":",
"from",
".",
"import",
"QueryableListMixed",
"if",
"not",
"issubclass",
"(",
"lst",
".",
"__class__",
",",
"QueryableListBase",
")",
":",
"lst",
"=",
"QueryableListMixed",
"(",
"lst",
")",
"filters",
"=",
"copy",
".",
"copy",
"(",
"self",
".",
"filters",
")",
"nextFilter",
"=",
"filters",
".",
"popleft",
"(",
")",
"while",
"nextFilter",
":",
"(",
"filterMethod",
",",
"filterArgs",
")",
"=",
"nextFilter",
"lst",
"=",
"self",
".",
"_applyFilter",
"(",
"lst",
",",
"filterMethod",
",",
"filterArgs",
")",
"if",
"len",
"(",
"lst",
")",
"==",
"0",
":",
"return",
"lst",
"try",
":",
"nextFilter",
"=",
"filters",
".",
"popleft",
"(",
")",
"except",
":",
"break",
"return",
"lst"
]
| execute - Execute the series of filters, in order, on the provided list.
@param lst <list/ A QueryableList type> - The list to filter. If you already know the types of items within
the list, you can pick a QueryableList implementing class to get faster results. Otherwise, if a list type that does
not extend QueryableListBase is provided, QueryableListMixed will be used (Supports both object-like and dict-like items)
@return - QueryableList of results. If you provided #lst as a QueryableList type already, that same type will be returned.
Otherwise, a QueryableListMixed will be returned. | [
"execute",
"-",
"Execute",
"the",
"series",
"of",
"filters",
"in",
"order",
"on",
"the",
"provided",
"list",
"."
]
| 279286d46205ce8268af42e03b75820a7483fddb | https://github.com/kata198/QueryableList/blob/279286d46205ce8268af42e03b75820a7483fddb/QueryableList/Builder.py#L65-L90 | train |
kata198/QueryableList | QueryableList/Builder.py | QueryBuilder.copy | def copy(self):
'''
copy - Create a copy of this query.
@return <QueryBuilder> - a copy of this query
'''
ret = QueryBuilder()
ret.filters = copy.copy(self.filters)
return ret | python | def copy(self):
'''
copy - Create a copy of this query.
@return <QueryBuilder> - a copy of this query
'''
ret = QueryBuilder()
ret.filters = copy.copy(self.filters)
return ret | [
"def",
"copy",
"(",
"self",
")",
":",
"ret",
"=",
"QueryBuilder",
"(",
")",
"ret",
".",
"filters",
"=",
"copy",
".",
"copy",
"(",
"self",
".",
"filters",
")",
"return",
"ret"
]
| copy - Create a copy of this query.
@return <QueryBuilder> - a copy of this query | [
"copy",
"-",
"Create",
"a",
"copy",
"of",
"this",
"query",
"."
]
| 279286d46205ce8268af42e03b75820a7483fddb | https://github.com/kata198/QueryableList/blob/279286d46205ce8268af42e03b75820a7483fddb/QueryableList/Builder.py#L92-L100 | train |
kata198/QueryableList | QueryableList/Builder.py | QueryBuilder._applyFilter | def _applyFilter(lst, filterMethod, filterArgs):
'''
_applyFilter - Applies the given filter method on a set of args
private method - used by execute
@return QueryableList - a QueryableList containing the elements of the resulting filter
'''
if filterMethod == FILTER_METHOD_AND:
return lst.filterAnd(**filterArgs)
else: # ALready validated in addFIlter that type is AND or OR
return lst.filterOr(**filterArgs) | python | def _applyFilter(lst, filterMethod, filterArgs):
'''
_applyFilter - Applies the given filter method on a set of args
private method - used by execute
@return QueryableList - a QueryableList containing the elements of the resulting filter
'''
if filterMethod == FILTER_METHOD_AND:
return lst.filterAnd(**filterArgs)
else: # ALready validated in addFIlter that type is AND or OR
return lst.filterOr(**filterArgs) | [
"def",
"_applyFilter",
"(",
"lst",
",",
"filterMethod",
",",
"filterArgs",
")",
":",
"if",
"filterMethod",
"==",
"FILTER_METHOD_AND",
":",
"return",
"lst",
".",
"filterAnd",
"(",
"*",
"*",
"filterArgs",
")",
"else",
":",
"# ALready validated in addFIlter that type is AND or OR",
"return",
"lst",
".",
"filterOr",
"(",
"*",
"*",
"filterArgs",
")"
]
| _applyFilter - Applies the given filter method on a set of args
private method - used by execute
@return QueryableList - a QueryableList containing the elements of the resulting filter | [
"_applyFilter",
"-",
"Applies",
"the",
"given",
"filter",
"method",
"on",
"a",
"set",
"of",
"args"
]
| 279286d46205ce8268af42e03b75820a7483fddb | https://github.com/kata198/QueryableList/blob/279286d46205ce8268af42e03b75820a7483fddb/QueryableList/Builder.py#L103-L114 | train |
Fire-Proof/cuepy | cuepy/cuepy.py | CorsairSDK._raise_corsair_error | def _raise_corsair_error(self, error=None, message=""):
"""
Raise error message based on the last reported error from the SDK
:param error: specify error type
:type error: int
:param message: specify error message
:type message: str
"""
if error is None:
error = self.last_error()
raise error(message) | python | def _raise_corsair_error(self, error=None, message=""):
"""
Raise error message based on the last reported error from the SDK
:param error: specify error type
:type error: int
:param message: specify error message
:type message: str
"""
if error is None:
error = self.last_error()
raise error(message) | [
"def",
"_raise_corsair_error",
"(",
"self",
",",
"error",
"=",
"None",
",",
"message",
"=",
"\"\"",
")",
":",
"if",
"error",
"is",
"None",
":",
"error",
"=",
"self",
".",
"last_error",
"(",
")",
"raise",
"error",
"(",
"message",
")"
]
| Raise error message based on the last reported error from the SDK
:param error: specify error type
:type error: int
:param message: specify error message
:type message: str | [
"Raise",
"error",
"message",
"based",
"on",
"the",
"last",
"reported",
"error",
"from",
"the",
"SDK"
]
| 5add7d62a31589bcdc7d2103c9c482bf718556ec | https://github.com/Fire-Proof/cuepy/blob/5add7d62a31589bcdc7d2103c9c482bf718556ec/cuepy/cuepy.py#L26-L37 | train |
Fire-Proof/cuepy | cuepy/cuepy.py | CorsairSDK.device_count | def device_count(self):
"""
Find amount of CUE devices
:returns: amount of CUE devices
:rtype: int
"""
device_count = get_device_count(self.corsair_sdk)
if device_count == -1:
self._raise_corsair_error()
return device_count | python | def device_count(self):
"""
Find amount of CUE devices
:returns: amount of CUE devices
:rtype: int
"""
device_count = get_device_count(self.corsair_sdk)
if device_count == -1:
self._raise_corsair_error()
return device_count | [
"def",
"device_count",
"(",
"self",
")",
":",
"device_count",
"=",
"get_device_count",
"(",
"self",
".",
"corsair_sdk",
")",
"if",
"device_count",
"==",
"-",
"1",
":",
"self",
".",
"_raise_corsair_error",
"(",
")",
"return",
"device_count"
]
| Find amount of CUE devices
:returns: amount of CUE devices
:rtype: int | [
"Find",
"amount",
"of",
"CUE",
"devices"
]
| 5add7d62a31589bcdc7d2103c9c482bf718556ec | https://github.com/Fire-Proof/cuepy/blob/5add7d62a31589bcdc7d2103c9c482bf718556ec/cuepy/cuepy.py#L48-L58 | train |
Fire-Proof/cuepy | cuepy/cuepy.py | CorsairSDK.led_id_from_char | def led_id_from_char(self, char):
"""
Get id of a led by the letter
Only between A-Z
:param char: Character to find led_id from
:type char: str
:returns: id for led
:rtype: int
"""
led_id = get_led_id_for_key_name(self.corsair_sdk, bytes(char))
if led_id == 0:
self._raise_corsair_error()
return led_id | python | def led_id_from_char(self, char):
"""
Get id of a led by the letter
Only between A-Z
:param char: Character to find led_id from
:type char: str
:returns: id for led
:rtype: int
"""
led_id = get_led_id_for_key_name(self.corsair_sdk, bytes(char))
if led_id == 0:
self._raise_corsair_error()
return led_id | [
"def",
"led_id_from_char",
"(",
"self",
",",
"char",
")",
":",
"led_id",
"=",
"get_led_id_for_key_name",
"(",
"self",
".",
"corsair_sdk",
",",
"bytes",
"(",
"char",
")",
")",
"if",
"led_id",
"==",
"0",
":",
"self",
".",
"_raise_corsair_error",
"(",
")",
"return",
"led_id"
]
| Get id of a led by the letter
Only between A-Z
:param char: Character to find led_id from
:type char: str
:returns: id for led
:rtype: int | [
"Get",
"id",
"of",
"a",
"led",
"by",
"the",
"letter",
"Only",
"between",
"A",
"-",
"Z"
]
| 5add7d62a31589bcdc7d2103c9c482bf718556ec | https://github.com/Fire-Proof/cuepy/blob/5add7d62a31589bcdc7d2103c9c482bf718556ec/cuepy/cuepy.py#L60-L73 | train |
Fire-Proof/cuepy | cuepy/cuepy.py | CorsairSDK.set_led | def set_led(self, led_id, color):
"""
Set color of an led
:param led_id: id of led to set color
:type led_id: int
:param color: list of rgb values of new colors. eg. [255, 255, 255]
:type color: list
:returns: true if successful
:rtype: bool
"""
if not set_leds_color(self.corsair_sdk, LedColor(led_id, *color)):
self._raise_corsair_error()
return True | python | def set_led(self, led_id, color):
"""
Set color of an led
:param led_id: id of led to set color
:type led_id: int
:param color: list of rgb values of new colors. eg. [255, 255, 255]
:type color: list
:returns: true if successful
:rtype: bool
"""
if not set_leds_color(self.corsair_sdk, LedColor(led_id, *color)):
self._raise_corsair_error()
return True | [
"def",
"set_led",
"(",
"self",
",",
"led_id",
",",
"color",
")",
":",
"if",
"not",
"set_leds_color",
"(",
"self",
".",
"corsair_sdk",
",",
"LedColor",
"(",
"led_id",
",",
"*",
"color",
")",
")",
":",
"self",
".",
"_raise_corsair_error",
"(",
")",
"return",
"True"
]
| Set color of an led
:param led_id: id of led to set color
:type led_id: int
:param color: list of rgb values of new colors. eg. [255, 255, 255]
:type color: list
:returns: true if successful
:rtype: bool | [
"Set",
"color",
"of",
"an",
"led"
]
| 5add7d62a31589bcdc7d2103c9c482bf718556ec | https://github.com/Fire-Proof/cuepy/blob/5add7d62a31589bcdc7d2103c9c482bf718556ec/cuepy/cuepy.py#L75-L88 | train |
Fire-Proof/cuepy | cuepy/cuepy.py | CorsairSDK.request_control | def request_control(self, device_id, access_mode=True):
"""
Request exclusive control of device
:param device_id: id of device
:type device_id: int
:param access_mode: True=exclusive, False=shared
:type access_mode: bool
:returns: true if successful
:rtype: bool
"""
if access_mode:
if not request_control(self.corsair_sdk, device_id):
self._raise_corsair_error()
return True
else:
self.reload() | python | def request_control(self, device_id, access_mode=True):
"""
Request exclusive control of device
:param device_id: id of device
:type device_id: int
:param access_mode: True=exclusive, False=shared
:type access_mode: bool
:returns: true if successful
:rtype: bool
"""
if access_mode:
if not request_control(self.corsair_sdk, device_id):
self._raise_corsair_error()
return True
else:
self.reload() | [
"def",
"request_control",
"(",
"self",
",",
"device_id",
",",
"access_mode",
"=",
"True",
")",
":",
"if",
"access_mode",
":",
"if",
"not",
"request_control",
"(",
"self",
".",
"corsair_sdk",
",",
"device_id",
")",
":",
"self",
".",
"_raise_corsair_error",
"(",
")",
"return",
"True",
"else",
":",
"self",
".",
"reload",
"(",
")"
]
| Request exclusive control of device
:param device_id: id of device
:type device_id: int
:param access_mode: True=exclusive, False=shared
:type access_mode: bool
:returns: true if successful
:rtype: bool | [
"Request",
"exclusive",
"control",
"of",
"device"
]
| 5add7d62a31589bcdc7d2103c9c482bf718556ec | https://github.com/Fire-Proof/cuepy/blob/5add7d62a31589bcdc7d2103c9c482bf718556ec/cuepy/cuepy.py#L101-L117 | train |
Fire-Proof/cuepy | cuepy/cuepy.py | CorsairSDK.device | def device(self, device_id, *args, **kwargs):
"""
Return a Device object based on id
:param device_id: id of device
:type device_id: int
:param args: extra parameters
:param kwargs: extra parameters
:returns: Device object
:rtype: Device
"""
return Device(device_id, self.corsair_sdk, self._corsair_sdk_path, *args, **kwargs) | python | def device(self, device_id, *args, **kwargs):
"""
Return a Device object based on id
:param device_id: id of device
:type device_id: int
:param args: extra parameters
:param kwargs: extra parameters
:returns: Device object
:rtype: Device
"""
return Device(device_id, self.corsair_sdk, self._corsair_sdk_path, *args, **kwargs) | [
"def",
"device",
"(",
"self",
",",
"device_id",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"return",
"Device",
"(",
"device_id",
",",
"self",
".",
"corsair_sdk",
",",
"self",
".",
"_corsair_sdk_path",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")"
]
| Return a Device object based on id
:param device_id: id of device
:type device_id: int
:param args: extra parameters
:param kwargs: extra parameters
:returns: Device object
:rtype: Device | [
"Return",
"a",
"Device",
"object",
"based",
"on",
"id"
]
| 5add7d62a31589bcdc7d2103c9c482bf718556ec | https://github.com/Fire-Proof/cuepy/blob/5add7d62a31589bcdc7d2103c9c482bf718556ec/cuepy/cuepy.py#L146-L157 | train |
Fire-Proof/cuepy | cuepy/cuepy.py | Device.device_info | def device_info(self, device_id=None):
"""
Return device information, if device_id is not specified, return for this device
:param device_id: id of device
:type device_id: int
:returns: dict containing information about device
:rtype: dict
"""
if device_id is None:
device_id = self.device_id
return get_device_info(self.corsair_sdk, device_id) | python | def device_info(self, device_id=None):
"""
Return device information, if device_id is not specified, return for this device
:param device_id: id of device
:type device_id: int
:returns: dict containing information about device
:rtype: dict
"""
if device_id is None:
device_id = self.device_id
return get_device_info(self.corsair_sdk, device_id) | [
"def",
"device_info",
"(",
"self",
",",
"device_id",
"=",
"None",
")",
":",
"if",
"device_id",
"is",
"None",
":",
"device_id",
"=",
"self",
".",
"device_id",
"return",
"get_device_info",
"(",
"self",
".",
"corsair_sdk",
",",
"device_id",
")"
]
| Return device information, if device_id is not specified, return for this device
:param device_id: id of device
:type device_id: int
:returns: dict containing information about device
:rtype: dict | [
"Return",
"device",
"information",
"if",
"device_id",
"is",
"not",
"specified",
"return",
"for",
"this",
"device"
]
| 5add7d62a31589bcdc7d2103c9c482bf718556ec | https://github.com/Fire-Proof/cuepy/blob/5add7d62a31589bcdc7d2103c9c482bf718556ec/cuepy/cuepy.py#L187-L198 | train |
Kortemme-Lab/klab | klab/bio/uniprot.py | get_obsolete_acc_to_uniparc | def get_obsolete_acc_to_uniparc(acc):
''' Tries to determine the UniParc ID for obsolete ACCs which are not returned using uniprot_map.
:param acc: The UniProt accession number.
:return: The corresponding UniParc ID.
Warning: This is a fragile function as the underlying website generation or URL could change.
'''
contents = http_get('www.uniprot.org/uniparc/?query={0}'.format(acc))
mtchs = re.findall(r'"UPI[A-Z0-9]+?"', contents, re.DOTALL)
uniparc_id = set([m[1:-1] for m in mtchs])
if len(uniparc_id) == 1:
return uniparc_id.pop()
elif len(uniparc_id) > 1:
raise Exception('Multiple UPI identifiers found.')
return None | python | def get_obsolete_acc_to_uniparc(acc):
''' Tries to determine the UniParc ID for obsolete ACCs which are not returned using uniprot_map.
:param acc: The UniProt accession number.
:return: The corresponding UniParc ID.
Warning: This is a fragile function as the underlying website generation or URL could change.
'''
contents = http_get('www.uniprot.org/uniparc/?query={0}'.format(acc))
mtchs = re.findall(r'"UPI[A-Z0-9]+?"', contents, re.DOTALL)
uniparc_id = set([m[1:-1] for m in mtchs])
if len(uniparc_id) == 1:
return uniparc_id.pop()
elif len(uniparc_id) > 1:
raise Exception('Multiple UPI identifiers found.')
return None | [
"def",
"get_obsolete_acc_to_uniparc",
"(",
"acc",
")",
":",
"contents",
"=",
"http_get",
"(",
"'www.uniprot.org/uniparc/?query={0}'",
".",
"format",
"(",
"acc",
")",
")",
"mtchs",
"=",
"re",
".",
"findall",
"(",
"r'\"UPI[A-Z0-9]+?\"'",
",",
"contents",
",",
"re",
".",
"DOTALL",
")",
"uniparc_id",
"=",
"set",
"(",
"[",
"m",
"[",
"1",
":",
"-",
"1",
"]",
"for",
"m",
"in",
"mtchs",
"]",
")",
"if",
"len",
"(",
"uniparc_id",
")",
"==",
"1",
":",
"return",
"uniparc_id",
".",
"pop",
"(",
")",
"elif",
"len",
"(",
"uniparc_id",
")",
">",
"1",
":",
"raise",
"Exception",
"(",
"'Multiple UPI identifiers found.'",
")",
"return",
"None"
]
| Tries to determine the UniParc ID for obsolete ACCs which are not returned using uniprot_map.
:param acc: The UniProt accession number.
:return: The corresponding UniParc ID.
Warning: This is a fragile function as the underlying website generation or URL could change. | [
"Tries",
"to",
"determine",
"the",
"UniParc",
"ID",
"for",
"obsolete",
"ACCs",
"which",
"are",
"not",
"returned",
"using",
"uniprot_map",
"."
]
| 6d410ad08f1bd9f7cbbb28d7d946e94fbaaa2b6b | https://github.com/Kortemme-Lab/klab/blob/6d410ad08f1bd9f7cbbb28d7d946e94fbaaa2b6b/klab/bio/uniprot.py#L34-L49 | train |
Kortemme-Lab/klab | klab/bio/uniprot.py | get_common_PDB_IDs | def get_common_PDB_IDs(pdb_id, cache_dir = None, exception_on_failure = True):
'''This function takes a PDB ID, maps it to UniProt ACCs, then returns the common set of PDB IDs related to those ACCs.
The purpose is to find any PDB files related to pdb_id, particularly for complexes, such that the other PDB files
contain identical sequences or mutant complexes.'''
m = pdb_to_uniparc([pdb_id], cache_dir = cache_dir)
UniProtACs = []
if pdb_id in m:
for entry in m[pdb_id]:
if entry.UniProtACs:
UniProtACs.extend(entry.UniProtACs)
elif exception_on_failure:
raise Exception('No UniProtAC for one entry.Lookup failed.')
elif exception_on_failure:
raise Exception('Lookup failed.')
if not UniProtACs:
if exception_on_failure:
raise Exception('Lookup failed.')
else:
return None
common_set = set(uniprot_map('ACC', 'PDB_ID', [UniProtACs[0]], cache_dir = cache_dir).get(UniProtACs[0], []))
for acc in UniProtACs[1:]:
common_set = common_set.intersection(set(uniprot_map('ACC', 'PDB_ID', [acc], cache_dir = cache_dir).get(acc, [])))
return sorted(common_set) | python | def get_common_PDB_IDs(pdb_id, cache_dir = None, exception_on_failure = True):
'''This function takes a PDB ID, maps it to UniProt ACCs, then returns the common set of PDB IDs related to those ACCs.
The purpose is to find any PDB files related to pdb_id, particularly for complexes, such that the other PDB files
contain identical sequences or mutant complexes.'''
m = pdb_to_uniparc([pdb_id], cache_dir = cache_dir)
UniProtACs = []
if pdb_id in m:
for entry in m[pdb_id]:
if entry.UniProtACs:
UniProtACs.extend(entry.UniProtACs)
elif exception_on_failure:
raise Exception('No UniProtAC for one entry.Lookup failed.')
elif exception_on_failure:
raise Exception('Lookup failed.')
if not UniProtACs:
if exception_on_failure:
raise Exception('Lookup failed.')
else:
return None
common_set = set(uniprot_map('ACC', 'PDB_ID', [UniProtACs[0]], cache_dir = cache_dir).get(UniProtACs[0], []))
for acc in UniProtACs[1:]:
common_set = common_set.intersection(set(uniprot_map('ACC', 'PDB_ID', [acc], cache_dir = cache_dir).get(acc, [])))
return sorted(common_set) | [
"def",
"get_common_PDB_IDs",
"(",
"pdb_id",
",",
"cache_dir",
"=",
"None",
",",
"exception_on_failure",
"=",
"True",
")",
":",
"m",
"=",
"pdb_to_uniparc",
"(",
"[",
"pdb_id",
"]",
",",
"cache_dir",
"=",
"cache_dir",
")",
"UniProtACs",
"=",
"[",
"]",
"if",
"pdb_id",
"in",
"m",
":",
"for",
"entry",
"in",
"m",
"[",
"pdb_id",
"]",
":",
"if",
"entry",
".",
"UniProtACs",
":",
"UniProtACs",
".",
"extend",
"(",
"entry",
".",
"UniProtACs",
")",
"elif",
"exception_on_failure",
":",
"raise",
"Exception",
"(",
"'No UniProtAC for one entry.Lookup failed.'",
")",
"elif",
"exception_on_failure",
":",
"raise",
"Exception",
"(",
"'Lookup failed.'",
")",
"if",
"not",
"UniProtACs",
":",
"if",
"exception_on_failure",
":",
"raise",
"Exception",
"(",
"'Lookup failed.'",
")",
"else",
":",
"return",
"None",
"common_set",
"=",
"set",
"(",
"uniprot_map",
"(",
"'ACC'",
",",
"'PDB_ID'",
",",
"[",
"UniProtACs",
"[",
"0",
"]",
"]",
",",
"cache_dir",
"=",
"cache_dir",
")",
".",
"get",
"(",
"UniProtACs",
"[",
"0",
"]",
",",
"[",
"]",
")",
")",
"for",
"acc",
"in",
"UniProtACs",
"[",
"1",
":",
"]",
":",
"common_set",
"=",
"common_set",
".",
"intersection",
"(",
"set",
"(",
"uniprot_map",
"(",
"'ACC'",
",",
"'PDB_ID'",
",",
"[",
"acc",
"]",
",",
"cache_dir",
"=",
"cache_dir",
")",
".",
"get",
"(",
"acc",
",",
"[",
"]",
")",
")",
")",
"return",
"sorted",
"(",
"common_set",
")"
]
| This function takes a PDB ID, maps it to UniProt ACCs, then returns the common set of PDB IDs related to those ACCs.
The purpose is to find any PDB files related to pdb_id, particularly for complexes, such that the other PDB files
contain identical sequences or mutant complexes. | [
"This",
"function",
"takes",
"a",
"PDB",
"ID",
"maps",
"it",
"to",
"UniProt",
"ACCs",
"then",
"returns",
"the",
"common",
"set",
"of",
"PDB",
"IDs",
"related",
"to",
"those",
"ACCs",
".",
"The",
"purpose",
"is",
"to",
"find",
"any",
"PDB",
"files",
"related",
"to",
"pdb_id",
"particularly",
"for",
"complexes",
"such",
"that",
"the",
"other",
"PDB",
"files",
"contain",
"identical",
"sequences",
"or",
"mutant",
"complexes",
"."
]
| 6d410ad08f1bd9f7cbbb28d7d946e94fbaaa2b6b | https://github.com/Kortemme-Lab/klab/blob/6d410ad08f1bd9f7cbbb28d7d946e94fbaaa2b6b/klab/bio/uniprot.py#L892-L915 | train |
Kortemme-Lab/klab | klab/bio/uniprot.py | UniProtACEntry._parse_sequence_tag | def _parse_sequence_tag(self):
'''Parses the sequence and atomic mass.'''
#main_tags = self._dom.getElementsByTagName("uniprot")
#assert(len(main_tags) == 1)
#entry_tags = main_tags[0].getElementsByTagName("entry")
#assert(len(entry_tags) == 1)
#entry_tags[0]
entry_tag = self.entry_tag
# only get sequence tags that are direct children of the entry tag (sequence tags can also be children of entry.comment.conflict)
sequence_tags = [child for child in entry_tag.childNodes if child.nodeType == child.ELEMENT_NODE and child.tagName == 'sequence']
assert(len(sequence_tags) == 1)
sequence_tag = sequence_tags[0]
# atomic mass, sequence, CRC64 digest
self.atomic_mass = float(sequence_tag.getAttribute("mass"))
self.sequence = "".join(sequence_tag.firstChild.nodeValue.strip().split("\n"))
self.sequence_length = int(sequence_tag.getAttribute("length"))
self.CRC64Digest = sequence_tag.getAttribute("checksum") | python | def _parse_sequence_tag(self):
'''Parses the sequence and atomic mass.'''
#main_tags = self._dom.getElementsByTagName("uniprot")
#assert(len(main_tags) == 1)
#entry_tags = main_tags[0].getElementsByTagName("entry")
#assert(len(entry_tags) == 1)
#entry_tags[0]
entry_tag = self.entry_tag
# only get sequence tags that are direct children of the entry tag (sequence tags can also be children of entry.comment.conflict)
sequence_tags = [child for child in entry_tag.childNodes if child.nodeType == child.ELEMENT_NODE and child.tagName == 'sequence']
assert(len(sequence_tags) == 1)
sequence_tag = sequence_tags[0]
# atomic mass, sequence, CRC64 digest
self.atomic_mass = float(sequence_tag.getAttribute("mass"))
self.sequence = "".join(sequence_tag.firstChild.nodeValue.strip().split("\n"))
self.sequence_length = int(sequence_tag.getAttribute("length"))
self.CRC64Digest = sequence_tag.getAttribute("checksum") | [
"def",
"_parse_sequence_tag",
"(",
"self",
")",
":",
"#main_tags = self._dom.getElementsByTagName(\"uniprot\")",
"#assert(len(main_tags) == 1)",
"#entry_tags = main_tags[0].getElementsByTagName(\"entry\")",
"#assert(len(entry_tags) == 1)",
"#entry_tags[0]",
"entry_tag",
"=",
"self",
".",
"entry_tag",
"# only get sequence tags that are direct children of the entry tag (sequence tags can also be children of entry.comment.conflict)",
"sequence_tags",
"=",
"[",
"child",
"for",
"child",
"in",
"entry_tag",
".",
"childNodes",
"if",
"child",
".",
"nodeType",
"==",
"child",
".",
"ELEMENT_NODE",
"and",
"child",
".",
"tagName",
"==",
"'sequence'",
"]",
"assert",
"(",
"len",
"(",
"sequence_tags",
")",
"==",
"1",
")",
"sequence_tag",
"=",
"sequence_tags",
"[",
"0",
"]",
"# atomic mass, sequence, CRC64 digest",
"self",
".",
"atomic_mass",
"=",
"float",
"(",
"sequence_tag",
".",
"getAttribute",
"(",
"\"mass\"",
")",
")",
"self",
".",
"sequence",
"=",
"\"\"",
".",
"join",
"(",
"sequence_tag",
".",
"firstChild",
".",
"nodeValue",
".",
"strip",
"(",
")",
".",
"split",
"(",
"\"\\n\"",
")",
")",
"self",
".",
"sequence_length",
"=",
"int",
"(",
"sequence_tag",
".",
"getAttribute",
"(",
"\"length\"",
")",
")",
"self",
".",
"CRC64Digest",
"=",
"sequence_tag",
".",
"getAttribute",
"(",
"\"checksum\"",
")"
]
| Parses the sequence and atomic mass. | [
"Parses",
"the",
"sequence",
"and",
"atomic",
"mass",
"."
]
| 6d410ad08f1bd9f7cbbb28d7d946e94fbaaa2b6b | https://github.com/Kortemme-Lab/klab/blob/6d410ad08f1bd9f7cbbb28d7d946e94fbaaa2b6b/klab/bio/uniprot.py#L542-L560 | train |
joe513/django-cool-pagination | django_cool_paginator/templatetags/cool_paginate.py | cool_paginate | def cool_paginate(context, **kwargs) -> dict:
"""Main function for pagination process."""
names = (
'size',
'next_name',
'previous_name',
'elastic',
'page_obj',
)
return_dict = {name: value for name, value in zip(names, map(kwargs.get, names))}
if context.get('request'):
return_dict['request'] = context['request']
else:
raise RequestNotExists(
'Unable to find request in your template context,'
'please make sure that you have the request context processor enabled'
)
if not return_dict.get('page_obj'):
if context.get('page_obj'):
return_dict['page_obj'] = context['page_obj']
else:
raise PageNotSpecified(
'You customized paginator standard name, '
"but haven't specified it in {% cool_paginate %} tag."
)
if not return_dict.get('elastic'):
return_dict['elastic'] = getattr(settings, 'COOL_PAGINATOR_ELASTIC', 10)
return return_dict | python | def cool_paginate(context, **kwargs) -> dict:
"""Main function for pagination process."""
names = (
'size',
'next_name',
'previous_name',
'elastic',
'page_obj',
)
return_dict = {name: value for name, value in zip(names, map(kwargs.get, names))}
if context.get('request'):
return_dict['request'] = context['request']
else:
raise RequestNotExists(
'Unable to find request in your template context,'
'please make sure that you have the request context processor enabled'
)
if not return_dict.get('page_obj'):
if context.get('page_obj'):
return_dict['page_obj'] = context['page_obj']
else:
raise PageNotSpecified(
'You customized paginator standard name, '
"but haven't specified it in {% cool_paginate %} tag."
)
if not return_dict.get('elastic'):
return_dict['elastic'] = getattr(settings, 'COOL_PAGINATOR_ELASTIC', 10)
return return_dict | [
"def",
"cool_paginate",
"(",
"context",
",",
"*",
"*",
"kwargs",
")",
"->",
"dict",
":",
"names",
"=",
"(",
"'size'",
",",
"'next_name'",
",",
"'previous_name'",
",",
"'elastic'",
",",
"'page_obj'",
",",
")",
"return_dict",
"=",
"{",
"name",
":",
"value",
"for",
"name",
",",
"value",
"in",
"zip",
"(",
"names",
",",
"map",
"(",
"kwargs",
".",
"get",
",",
"names",
")",
")",
"}",
"if",
"context",
".",
"get",
"(",
"'request'",
")",
":",
"return_dict",
"[",
"'request'",
"]",
"=",
"context",
"[",
"'request'",
"]",
"else",
":",
"raise",
"RequestNotExists",
"(",
"'Unable to find request in your template context,'",
"'please make sure that you have the request context processor enabled'",
")",
"if",
"not",
"return_dict",
".",
"get",
"(",
"'page_obj'",
")",
":",
"if",
"context",
".",
"get",
"(",
"'page_obj'",
")",
":",
"return_dict",
"[",
"'page_obj'",
"]",
"=",
"context",
"[",
"'page_obj'",
"]",
"else",
":",
"raise",
"PageNotSpecified",
"(",
"'You customized paginator standard name, '",
"\"but haven't specified it in {% cool_paginate %} tag.\"",
")",
"if",
"not",
"return_dict",
".",
"get",
"(",
"'elastic'",
")",
":",
"return_dict",
"[",
"'elastic'",
"]",
"=",
"getattr",
"(",
"settings",
",",
"'COOL_PAGINATOR_ELASTIC'",
",",
"10",
")",
"return",
"return_dict"
]
| Main function for pagination process. | [
"Main",
"function",
"for",
"pagination",
"process",
"."
]
| ed75a151a016aef0f5216fdb1e3610597872a3ef | https://github.com/joe513/django-cool-pagination/blob/ed75a151a016aef0f5216fdb1e3610597872a3ef/django_cool_paginator/templatetags/cool_paginate.py#L32-L65 | train |
projectshift/shift-boiler | boiler/timer/restart_timer.py | time_restarts | def time_restarts(data_path):
""" When called will create a file and measure its mtime on restarts """
path = os.path.join(data_path, 'last_restarted')
if not os.path.isfile(path):
with open(path, 'a'):
os.utime(path, None)
last_modified = os.stat(path).st_mtime
with open(path, 'a'):
os.utime(path, None)
now = os.stat(path).st_mtime
dif = round(now - last_modified, 2)
last_restart = datetime.fromtimestamp(now).strftime('%H:%M:%S')
result = 'LAST RESTART WAS {} SECONDS AGO at {}'.format(dif, last_restart)
print(style(fg='green', bg='red', text=result)) | python | def time_restarts(data_path):
""" When called will create a file and measure its mtime on restarts """
path = os.path.join(data_path, 'last_restarted')
if not os.path.isfile(path):
with open(path, 'a'):
os.utime(path, None)
last_modified = os.stat(path).st_mtime
with open(path, 'a'):
os.utime(path, None)
now = os.stat(path).st_mtime
dif = round(now - last_modified, 2)
last_restart = datetime.fromtimestamp(now).strftime('%H:%M:%S')
result = 'LAST RESTART WAS {} SECONDS AGO at {}'.format(dif, last_restart)
print(style(fg='green', bg='red', text=result)) | [
"def",
"time_restarts",
"(",
"data_path",
")",
":",
"path",
"=",
"os",
".",
"path",
".",
"join",
"(",
"data_path",
",",
"'last_restarted'",
")",
"if",
"not",
"os",
".",
"path",
".",
"isfile",
"(",
"path",
")",
":",
"with",
"open",
"(",
"path",
",",
"'a'",
")",
":",
"os",
".",
"utime",
"(",
"path",
",",
"None",
")",
"last_modified",
"=",
"os",
".",
"stat",
"(",
"path",
")",
".",
"st_mtime",
"with",
"open",
"(",
"path",
",",
"'a'",
")",
":",
"os",
".",
"utime",
"(",
"path",
",",
"None",
")",
"now",
"=",
"os",
".",
"stat",
"(",
"path",
")",
".",
"st_mtime",
"dif",
"=",
"round",
"(",
"now",
"-",
"last_modified",
",",
"2",
")",
"last_restart",
"=",
"datetime",
".",
"fromtimestamp",
"(",
"now",
")",
".",
"strftime",
"(",
"'%H:%M:%S'",
")",
"result",
"=",
"'LAST RESTART WAS {} SECONDS AGO at {}'",
".",
"format",
"(",
"dif",
",",
"last_restart",
")",
"print",
"(",
"style",
"(",
"fg",
"=",
"'green'",
",",
"bg",
"=",
"'red'",
",",
"text",
"=",
"result",
")",
")"
]
| When called will create a file and measure its mtime on restarts | [
"When",
"called",
"will",
"create",
"a",
"file",
"and",
"measure",
"its",
"mtime",
"on",
"restarts"
]
| 8e6f3a3e4b9493fb6c8bd16bed160ede153bfb0b | https://github.com/projectshift/shift-boiler/blob/8e6f3a3e4b9493fb6c8bd16bed160ede153bfb0b/boiler/timer/restart_timer.py#L6-L22 | train |
MacHu-GWU/sqlalchemy_mate-project | sqlalchemy_mate/pt.py | from_stmt | def from_stmt(stmt, engine, **kwargs):
"""
Execute a query in form of texture clause, return the result in form of
:class:`PrettyTable`.
:type stmt: TextClause
:param stmt:
:type engine: Engine
:param engine:
:rtype: PrettyTable
"""
result_proxy = engine.execute(stmt, **kwargs)
return from_db_cursor(result_proxy.cursor) | python | def from_stmt(stmt, engine, **kwargs):
"""
Execute a query in form of texture clause, return the result in form of
:class:`PrettyTable`.
:type stmt: TextClause
:param stmt:
:type engine: Engine
:param engine:
:rtype: PrettyTable
"""
result_proxy = engine.execute(stmt, **kwargs)
return from_db_cursor(result_proxy.cursor) | [
"def",
"from_stmt",
"(",
"stmt",
",",
"engine",
",",
"*",
"*",
"kwargs",
")",
":",
"result_proxy",
"=",
"engine",
".",
"execute",
"(",
"stmt",
",",
"*",
"*",
"kwargs",
")",
"return",
"from_db_cursor",
"(",
"result_proxy",
".",
"cursor",
")"
]
| Execute a query in form of texture clause, return the result in form of
:class:`PrettyTable`.
:type stmt: TextClause
:param stmt:
:type engine: Engine
:param engine:
:rtype: PrettyTable | [
"Execute",
"a",
"query",
"in",
"form",
"of",
"texture",
"clause",
"return",
"the",
"result",
"in",
"form",
"of"
]
| 946754744c8870f083fd7b4339fca15d1d6128b2 | https://github.com/MacHu-GWU/sqlalchemy_mate-project/blob/946754744c8870f083fd7b4339fca15d1d6128b2/sqlalchemy_mate/pt.py#L34-L49 | train |
Kortemme-Lab/klab | klab/benchmarking/analysis/ddg_monomeric_stability_analysis.py | BenchmarkRun.compute_stability_classification | def compute_stability_classification(self, predicted_data, record, dataframe_record):
'''Calculate the stability classification for this case.'''
stability_classification, stability_classication_x_cutoff, stability_classication_y_cutoff = None, self.stability_classication_x_cutoff, self.stability_classication_y_cutoff
if record['DDG'] != None:
stability_classification = fraction_correct([record['DDG']], [predicted_data[self.ddg_analysis_type]], x_cutoff = stability_classication_x_cutoff, y_cutoff = stability_classication_y_cutoff)
stability_classification = int(stability_classification)
assert(stability_classification == 0 or stability_classification == 1)
dataframe_record['StabilityClassification'] = stability_classification | python | def compute_stability_classification(self, predicted_data, record, dataframe_record):
'''Calculate the stability classification for this case.'''
stability_classification, stability_classication_x_cutoff, stability_classication_y_cutoff = None, self.stability_classication_x_cutoff, self.stability_classication_y_cutoff
if record['DDG'] != None:
stability_classification = fraction_correct([record['DDG']], [predicted_data[self.ddg_analysis_type]], x_cutoff = stability_classication_x_cutoff, y_cutoff = stability_classication_y_cutoff)
stability_classification = int(stability_classification)
assert(stability_classification == 0 or stability_classification == 1)
dataframe_record['StabilityClassification'] = stability_classification | [
"def",
"compute_stability_classification",
"(",
"self",
",",
"predicted_data",
",",
"record",
",",
"dataframe_record",
")",
":",
"stability_classification",
",",
"stability_classication_x_cutoff",
",",
"stability_classication_y_cutoff",
"=",
"None",
",",
"self",
".",
"stability_classication_x_cutoff",
",",
"self",
".",
"stability_classication_y_cutoff",
"if",
"record",
"[",
"'DDG'",
"]",
"!=",
"None",
":",
"stability_classification",
"=",
"fraction_correct",
"(",
"[",
"record",
"[",
"'DDG'",
"]",
"]",
",",
"[",
"predicted_data",
"[",
"self",
".",
"ddg_analysis_type",
"]",
"]",
",",
"x_cutoff",
"=",
"stability_classication_x_cutoff",
",",
"y_cutoff",
"=",
"stability_classication_y_cutoff",
")",
"stability_classification",
"=",
"int",
"(",
"stability_classification",
")",
"assert",
"(",
"stability_classification",
"==",
"0",
"or",
"stability_classification",
"==",
"1",
")",
"dataframe_record",
"[",
"'StabilityClassification'",
"]",
"=",
"stability_classification"
]
| Calculate the stability classification for this case. | [
"Calculate",
"the",
"stability",
"classification",
"for",
"this",
"case",
"."
]
| 6d410ad08f1bd9f7cbbb28d7d946e94fbaaa2b6b | https://github.com/Kortemme-Lab/klab/blob/6d410ad08f1bd9f7cbbb28d7d946e94fbaaa2b6b/klab/benchmarking/analysis/ddg_monomeric_stability_analysis.py#L595-L602 | train |
Kortemme-Lab/klab | klab/benchmarking/analysis/ddg_monomeric_stability_analysis.py | BenchmarkRun.compute_absolute_error | def compute_absolute_error(self, predicted_data, record, dataframe_record):
'''Calculate the absolute error for this case.'''
absolute_error = abs(record['DDG'] - predicted_data[self.ddg_analysis_type])
dataframe_record['AbsoluteError'] = absolute_error | python | def compute_absolute_error(self, predicted_data, record, dataframe_record):
'''Calculate the absolute error for this case.'''
absolute_error = abs(record['DDG'] - predicted_data[self.ddg_analysis_type])
dataframe_record['AbsoluteError'] = absolute_error | [
"def",
"compute_absolute_error",
"(",
"self",
",",
"predicted_data",
",",
"record",
",",
"dataframe_record",
")",
":",
"absolute_error",
"=",
"abs",
"(",
"record",
"[",
"'DDG'",
"]",
"-",
"predicted_data",
"[",
"self",
".",
"ddg_analysis_type",
"]",
")",
"dataframe_record",
"[",
"'AbsoluteError'",
"]",
"=",
"absolute_error"
]
| Calculate the absolute error for this case. | [
"Calculate",
"the",
"absolute",
"error",
"for",
"this",
"case",
"."
]
| 6d410ad08f1bd9f7cbbb28d7d946e94fbaaa2b6b | https://github.com/Kortemme-Lab/klab/blob/6d410ad08f1bd9f7cbbb28d7d946e94fbaaa2b6b/klab/benchmarking/analysis/ddg_monomeric_stability_analysis.py#L605-L608 | train |
Kortemme-Lab/klab | klab/benchmarking/analysis/ddg_monomeric_stability_analysis.py | BenchmarkRun.count_residues | def count_residues(self, record, pdb_record):
'''Count the number of residues in the chains for the case.'''
mutations = self.get_record_mutations(record)
pdb_chains = set([m['Chain'] for m in mutations])
assert(len(pdb_chains) == 1) # we expect monomeric cases
pdb_chain = pdb_chains.pop()
return len(pdb_record.get('Chains', {}).get(pdb_chain, {}).get('Sequence', '')) | python | def count_residues(self, record, pdb_record):
'''Count the number of residues in the chains for the case.'''
mutations = self.get_record_mutations(record)
pdb_chains = set([m['Chain'] for m in mutations])
assert(len(pdb_chains) == 1) # we expect monomeric cases
pdb_chain = pdb_chains.pop()
return len(pdb_record.get('Chains', {}).get(pdb_chain, {}).get('Sequence', '')) | [
"def",
"count_residues",
"(",
"self",
",",
"record",
",",
"pdb_record",
")",
":",
"mutations",
"=",
"self",
".",
"get_record_mutations",
"(",
"record",
")",
"pdb_chains",
"=",
"set",
"(",
"[",
"m",
"[",
"'Chain'",
"]",
"for",
"m",
"in",
"mutations",
"]",
")",
"assert",
"(",
"len",
"(",
"pdb_chains",
")",
"==",
"1",
")",
"# we expect monomeric cases",
"pdb_chain",
"=",
"pdb_chains",
".",
"pop",
"(",
")",
"return",
"len",
"(",
"pdb_record",
".",
"get",
"(",
"'Chains'",
",",
"{",
"}",
")",
".",
"get",
"(",
"pdb_chain",
",",
"{",
"}",
")",
".",
"get",
"(",
"'Sequence'",
",",
"''",
")",
")"
]
| Count the number of residues in the chains for the case. | [
"Count",
"the",
"number",
"of",
"residues",
"in",
"the",
"chains",
"for",
"the",
"case",
"."
]
| 6d410ad08f1bd9f7cbbb28d7d946e94fbaaa2b6b | https://github.com/Kortemme-Lab/klab/blob/6d410ad08f1bd9f7cbbb28d7d946e94fbaaa2b6b/klab/benchmarking/analysis/ddg_monomeric_stability_analysis.py#L615-L621 | train |
Kortemme-Lab/klab | klab/benchmarking/analysis/ddg_monomeric_stability_analysis.py | BenchmarkRun.full_analysis | def full_analysis(self, analysis_set, output_directory, verbose = True, compile_pdf = True, quick_plots = False):
'''Combines calculate_metrics, write_dataframe_to_csv, and plot'''
if not os.path.isdir(output_directory):
os.makedirs(output_directory)
self.analysis_directory = output_directory
self.calculate_metrics(analysis_set = analysis_set, analysis_directory = output_directory, verbose = verbose)
self.write_dataframe_to_csv( os.path.join(output_directory, 'data.csv') )
# Return latex_report
return self.plot(analysis_set = analysis_set, analysis_directory = output_directory, matplotlib_plots = True, verbose = verbose, compile_pdf = compile_pdf, quick_plots = quick_plots) | python | def full_analysis(self, analysis_set, output_directory, verbose = True, compile_pdf = True, quick_plots = False):
'''Combines calculate_metrics, write_dataframe_to_csv, and plot'''
if not os.path.isdir(output_directory):
os.makedirs(output_directory)
self.analysis_directory = output_directory
self.calculate_metrics(analysis_set = analysis_set, analysis_directory = output_directory, verbose = verbose)
self.write_dataframe_to_csv( os.path.join(output_directory, 'data.csv') )
# Return latex_report
return self.plot(analysis_set = analysis_set, analysis_directory = output_directory, matplotlib_plots = True, verbose = verbose, compile_pdf = compile_pdf, quick_plots = quick_plots) | [
"def",
"full_analysis",
"(",
"self",
",",
"analysis_set",
",",
"output_directory",
",",
"verbose",
"=",
"True",
",",
"compile_pdf",
"=",
"True",
",",
"quick_plots",
"=",
"False",
")",
":",
"if",
"not",
"os",
".",
"path",
".",
"isdir",
"(",
"output_directory",
")",
":",
"os",
".",
"makedirs",
"(",
"output_directory",
")",
"self",
".",
"analysis_directory",
"=",
"output_directory",
"self",
".",
"calculate_metrics",
"(",
"analysis_set",
"=",
"analysis_set",
",",
"analysis_directory",
"=",
"output_directory",
",",
"verbose",
"=",
"verbose",
")",
"self",
".",
"write_dataframe_to_csv",
"(",
"os",
".",
"path",
".",
"join",
"(",
"output_directory",
",",
"'data.csv'",
")",
")",
"# Return latex_report",
"return",
"self",
".",
"plot",
"(",
"analysis_set",
"=",
"analysis_set",
",",
"analysis_directory",
"=",
"output_directory",
",",
"matplotlib_plots",
"=",
"True",
",",
"verbose",
"=",
"verbose",
",",
"compile_pdf",
"=",
"compile_pdf",
",",
"quick_plots",
"=",
"quick_plots",
")"
]
| Combines calculate_metrics, write_dataframe_to_csv, and plot | [
"Combines",
"calculate_metrics",
"write_dataframe_to_csv",
"and",
"plot"
]
| 6d410ad08f1bd9f7cbbb28d7d946e94fbaaa2b6b | https://github.com/Kortemme-Lab/klab/blob/6d410ad08f1bd9f7cbbb28d7d946e94fbaaa2b6b/klab/benchmarking/analysis/ddg_monomeric_stability_analysis.py#L792-L801 | train |
Kortemme-Lab/klab | klab/benchmarking/analysis/ddg_monomeric_stability_analysis.py | BenchmarkRun.get_unique_ajps | def get_unique_ajps( benchmark_runs ):
"""
Determines which join parameters are unique
"""
br_ajps = {}
for br in benchmark_runs:
for ajp in br.additional_join_parameters:
if ajp not in br_ajps:
br_ajps[ajp] = set()
br_ajps[ajp].add( br.additional_join_parameters[ajp]['short_name'] )
unique_ajps = []
for ajp in br_ajps:
if len( br_ajps[ajp] ) > 1:
unique_ajps.append( ajp )
return unique_ajps | python | def get_unique_ajps( benchmark_runs ):
"""
Determines which join parameters are unique
"""
br_ajps = {}
for br in benchmark_runs:
for ajp in br.additional_join_parameters:
if ajp not in br_ajps:
br_ajps[ajp] = set()
br_ajps[ajp].add( br.additional_join_parameters[ajp]['short_name'] )
unique_ajps = []
for ajp in br_ajps:
if len( br_ajps[ajp] ) > 1:
unique_ajps.append( ajp )
return unique_ajps | [
"def",
"get_unique_ajps",
"(",
"benchmark_runs",
")",
":",
"br_ajps",
"=",
"{",
"}",
"for",
"br",
"in",
"benchmark_runs",
":",
"for",
"ajp",
"in",
"br",
".",
"additional_join_parameters",
":",
"if",
"ajp",
"not",
"in",
"br_ajps",
":",
"br_ajps",
"[",
"ajp",
"]",
"=",
"set",
"(",
")",
"br_ajps",
"[",
"ajp",
"]",
".",
"add",
"(",
"br",
".",
"additional_join_parameters",
"[",
"ajp",
"]",
"[",
"'short_name'",
"]",
")",
"unique_ajps",
"=",
"[",
"]",
"for",
"ajp",
"in",
"br_ajps",
":",
"if",
"len",
"(",
"br_ajps",
"[",
"ajp",
"]",
")",
">",
"1",
":",
"unique_ajps",
".",
"append",
"(",
"ajp",
")",
"return",
"unique_ajps"
]
| Determines which join parameters are unique | [
"Determines",
"which",
"join",
"parameters",
"are",
"unique"
]
| 6d410ad08f1bd9f7cbbb28d7d946e94fbaaa2b6b | https://github.com/Kortemme-Lab/klab/blob/6d410ad08f1bd9f7cbbb28d7d946e94fbaaa2b6b/klab/benchmarking/analysis/ddg_monomeric_stability_analysis.py#L822-L836 | train |
Kortemme-Lab/klab | klab/benchmarking/analysis/ddg_monomeric_stability_analysis.py | BenchmarkRun.plot_optimum_prediction_fraction_correct_cutoffs_over_range | def plot_optimum_prediction_fraction_correct_cutoffs_over_range(self, analysis_set, min_stability_classication_x_cutoff, max_stability_classication_x_cutoff, suppress_plot = False, analysis_file_prefix = None, verbose = True):
'''Plots the optimum cutoff for the predictions to maximize the fraction correct metric over a range of experimental cutoffs.
Returns the average scalar corresponding to the best value of fraction correct over a range of cutoff values for the experimental cutoffs.'''
# Filenames
analysis_set_prefix = ''
#if analysis_set:
# analysis_set_prefix = '_{0}'.format(analysis_set)
plot_filename = None
if not suppress_plot:
output_filename_prefix = '{0}{1}optimum_fraction_correct_at_varying_kcal_mol'.format(analysis_file_prefix, analysis_set_prefix)
plot_filename = output_filename_prefix + '.png'
csv_filename = output_filename_prefix + '.txt'
# Create CSV input
lines = ['ExperimentalCutoff,BestPredictionCutoff']
x_cutoff = min_stability_classication_x_cutoff
x_values = []
y_values = []
avg_scale = 0
plot_graph = self.generate_plots and not(suppress_plot)
while x_cutoff < max_stability_classication_x_cutoff + 0.1:
max_value_cutoff, max_value, fraction_correct_range = self.determine_optimum_fraction_correct_cutoffs(analysis_set, self.dataframe, x_cutoff)
if plot_graph:
lines.append(','.join(map(str, (x_cutoff, max_value_cutoff))))
x_values.append(x_cutoff)
y_values.append(max_value_cutoff)
avg_scale += max_value_cutoff / x_cutoff
x_cutoff += 0.1
if plot_graph:
write_file(csv_filename, '\n'.join(lines))
# Determine the average scalar needed to fit the plot
avg_scale = avg_scale / len(x_values)
x_values = numpy.array(x_values)
y_values = numpy.array(y_values)
scalars = y_values / x_values
average_scalar = numpy.mean(scalars)
plot_label_1 = 'Scalar == %0.2f' % average_scalar
plot_label_2 = 'sigma == %0.2f' % numpy.std(scalars)
# Create plot
if plot_graph:
if not(os.path.exists(plot_filename) and not(self.recreate_graphs)):
if verbose:
self.log('Saving scatterplot to %s.' % plot_filename)
self.log('Saving plot of approximate optimal fraction correct cutoffs over varying experimental cutoffs to %s.' % plot_filename)
title = 'Optimum cutoff for fraction correct metric at varying experimental cutoffs'
if analysis_set:
title += ' for {0}'.format(analysis_set)
r_script = '''library(ggplot2)
library(gridExtra)
library(scales)
library(qualV)
png('%(plot_filename)s', height=4096, width=4096, bg="white", res=600)
plot_data <- read.csv('%(csv_filename)s', header=T)
max_y = max(plot_data$BestPredictionCutoff)
p <- ggplot(data = plot_data, aes(x = ExperimentalCutoff, y = BestPredictionCutoff)) +
xlab("Experimental cutoff (kcal/mol)") +
ylab("Optimal prediction cutoff (energy units)") +
ggtitle("%(title)s") +
geom_point() +
geom_line() +
geom_smooth() +
geom_text(hjust=0, size=4, color="black", aes(0.5, max_y, fontface="plain", family = "sans", label="%(plot_label_1)s"), parse = T) +
geom_text(hjust=0, size=4, color="black", aes(0.5, max_y - 0.5, fontface="plain", family = "sans", label="%(plot_label_2)s"), parse = T)
p
dev.off()'''
RInterface._runRScript(r_script % locals())
return average_scalar, plot_filename | python | def plot_optimum_prediction_fraction_correct_cutoffs_over_range(self, analysis_set, min_stability_classication_x_cutoff, max_stability_classication_x_cutoff, suppress_plot = False, analysis_file_prefix = None, verbose = True):
'''Plots the optimum cutoff for the predictions to maximize the fraction correct metric over a range of experimental cutoffs.
Returns the average scalar corresponding to the best value of fraction correct over a range of cutoff values for the experimental cutoffs.'''
# Filenames
analysis_set_prefix = ''
#if analysis_set:
# analysis_set_prefix = '_{0}'.format(analysis_set)
plot_filename = None
if not suppress_plot:
output_filename_prefix = '{0}{1}optimum_fraction_correct_at_varying_kcal_mol'.format(analysis_file_prefix, analysis_set_prefix)
plot_filename = output_filename_prefix + '.png'
csv_filename = output_filename_prefix + '.txt'
# Create CSV input
lines = ['ExperimentalCutoff,BestPredictionCutoff']
x_cutoff = min_stability_classication_x_cutoff
x_values = []
y_values = []
avg_scale = 0
plot_graph = self.generate_plots and not(suppress_plot)
while x_cutoff < max_stability_classication_x_cutoff + 0.1:
max_value_cutoff, max_value, fraction_correct_range = self.determine_optimum_fraction_correct_cutoffs(analysis_set, self.dataframe, x_cutoff)
if plot_graph:
lines.append(','.join(map(str, (x_cutoff, max_value_cutoff))))
x_values.append(x_cutoff)
y_values.append(max_value_cutoff)
avg_scale += max_value_cutoff / x_cutoff
x_cutoff += 0.1
if plot_graph:
write_file(csv_filename, '\n'.join(lines))
# Determine the average scalar needed to fit the plot
avg_scale = avg_scale / len(x_values)
x_values = numpy.array(x_values)
y_values = numpy.array(y_values)
scalars = y_values / x_values
average_scalar = numpy.mean(scalars)
plot_label_1 = 'Scalar == %0.2f' % average_scalar
plot_label_2 = 'sigma == %0.2f' % numpy.std(scalars)
# Create plot
if plot_graph:
if not(os.path.exists(plot_filename) and not(self.recreate_graphs)):
if verbose:
self.log('Saving scatterplot to %s.' % plot_filename)
self.log('Saving plot of approximate optimal fraction correct cutoffs over varying experimental cutoffs to %s.' % plot_filename)
title = 'Optimum cutoff for fraction correct metric at varying experimental cutoffs'
if analysis_set:
title += ' for {0}'.format(analysis_set)
r_script = '''library(ggplot2)
library(gridExtra)
library(scales)
library(qualV)
png('%(plot_filename)s', height=4096, width=4096, bg="white", res=600)
plot_data <- read.csv('%(csv_filename)s', header=T)
max_y = max(plot_data$BestPredictionCutoff)
p <- ggplot(data = plot_data, aes(x = ExperimentalCutoff, y = BestPredictionCutoff)) +
xlab("Experimental cutoff (kcal/mol)") +
ylab("Optimal prediction cutoff (energy units)") +
ggtitle("%(title)s") +
geom_point() +
geom_line() +
geom_smooth() +
geom_text(hjust=0, size=4, color="black", aes(0.5, max_y, fontface="plain", family = "sans", label="%(plot_label_1)s"), parse = T) +
geom_text(hjust=0, size=4, color="black", aes(0.5, max_y - 0.5, fontface="plain", family = "sans", label="%(plot_label_2)s"), parse = T)
p
dev.off()'''
RInterface._runRScript(r_script % locals())
return average_scalar, plot_filename | [
"def",
"plot_optimum_prediction_fraction_correct_cutoffs_over_range",
"(",
"self",
",",
"analysis_set",
",",
"min_stability_classication_x_cutoff",
",",
"max_stability_classication_x_cutoff",
",",
"suppress_plot",
"=",
"False",
",",
"analysis_file_prefix",
"=",
"None",
",",
"verbose",
"=",
"True",
")",
":",
"# Filenames",
"analysis_set_prefix",
"=",
"''",
"#if analysis_set:",
"# analysis_set_prefix = '_{0}'.format(analysis_set)",
"plot_filename",
"=",
"None",
"if",
"not",
"suppress_plot",
":",
"output_filename_prefix",
"=",
"'{0}{1}optimum_fraction_correct_at_varying_kcal_mol'",
".",
"format",
"(",
"analysis_file_prefix",
",",
"analysis_set_prefix",
")",
"plot_filename",
"=",
"output_filename_prefix",
"+",
"'.png'",
"csv_filename",
"=",
"output_filename_prefix",
"+",
"'.txt'",
"# Create CSV input",
"lines",
"=",
"[",
"'ExperimentalCutoff,BestPredictionCutoff'",
"]",
"x_cutoff",
"=",
"min_stability_classication_x_cutoff",
"x_values",
"=",
"[",
"]",
"y_values",
"=",
"[",
"]",
"avg_scale",
"=",
"0",
"plot_graph",
"=",
"self",
".",
"generate_plots",
"and",
"not",
"(",
"suppress_plot",
")",
"while",
"x_cutoff",
"<",
"max_stability_classication_x_cutoff",
"+",
"0.1",
":",
"max_value_cutoff",
",",
"max_value",
",",
"fraction_correct_range",
"=",
"self",
".",
"determine_optimum_fraction_correct_cutoffs",
"(",
"analysis_set",
",",
"self",
".",
"dataframe",
",",
"x_cutoff",
")",
"if",
"plot_graph",
":",
"lines",
".",
"append",
"(",
"','",
".",
"join",
"(",
"map",
"(",
"str",
",",
"(",
"x_cutoff",
",",
"max_value_cutoff",
")",
")",
")",
")",
"x_values",
".",
"append",
"(",
"x_cutoff",
")",
"y_values",
".",
"append",
"(",
"max_value_cutoff",
")",
"avg_scale",
"+=",
"max_value_cutoff",
"/",
"x_cutoff",
"x_cutoff",
"+=",
"0.1",
"if",
"plot_graph",
":",
"write_file",
"(",
"csv_filename",
",",
"'\\n'",
".",
"join",
"(",
"lines",
")",
")",
"# Determine the average scalar needed to fit the plot",
"avg_scale",
"=",
"avg_scale",
"/",
"len",
"(",
"x_values",
")",
"x_values",
"=",
"numpy",
".",
"array",
"(",
"x_values",
")",
"y_values",
"=",
"numpy",
".",
"array",
"(",
"y_values",
")",
"scalars",
"=",
"y_values",
"/",
"x_values",
"average_scalar",
"=",
"numpy",
".",
"mean",
"(",
"scalars",
")",
"plot_label_1",
"=",
"'Scalar == %0.2f'",
"%",
"average_scalar",
"plot_label_2",
"=",
"'sigma == %0.2f'",
"%",
"numpy",
".",
"std",
"(",
"scalars",
")",
"# Create plot",
"if",
"plot_graph",
":",
"if",
"not",
"(",
"os",
".",
"path",
".",
"exists",
"(",
"plot_filename",
")",
"and",
"not",
"(",
"self",
".",
"recreate_graphs",
")",
")",
":",
"if",
"verbose",
":",
"self",
".",
"log",
"(",
"'Saving scatterplot to %s.'",
"%",
"plot_filename",
")",
"self",
".",
"log",
"(",
"'Saving plot of approximate optimal fraction correct cutoffs over varying experimental cutoffs to %s.'",
"%",
"plot_filename",
")",
"title",
"=",
"'Optimum cutoff for fraction correct metric at varying experimental cutoffs'",
"if",
"analysis_set",
":",
"title",
"+=",
"' for {0}'",
".",
"format",
"(",
"analysis_set",
")",
"r_script",
"=",
"'''library(ggplot2)\nlibrary(gridExtra)\nlibrary(scales)\nlibrary(qualV)\n\npng('%(plot_filename)s', height=4096, width=4096, bg=\"white\", res=600)\nplot_data <- read.csv('%(csv_filename)s', header=T)\n\nmax_y = max(plot_data$BestPredictionCutoff)\np <- ggplot(data = plot_data, aes(x = ExperimentalCutoff, y = BestPredictionCutoff)) +\n xlab(\"Experimental cutoff (kcal/mol)\") +\n ylab(\"Optimal prediction cutoff (energy units)\") +\n ggtitle(\"%(title)s\") +\n geom_point() +\n geom_line() +\n geom_smooth() +\n geom_text(hjust=0, size=4, color=\"black\", aes(0.5, max_y, fontface=\"plain\", family = \"sans\", label=\"%(plot_label_1)s\"), parse = T) +\n geom_text(hjust=0, size=4, color=\"black\", aes(0.5, max_y - 0.5, fontface=\"plain\", family = \"sans\", label=\"%(plot_label_2)s\"), parse = T)\np\ndev.off()'''",
"RInterface",
".",
"_runRScript",
"(",
"r_script",
"%",
"locals",
"(",
")",
")",
"return",
"average_scalar",
",",
"plot_filename"
]
| Plots the optimum cutoff for the predictions to maximize the fraction correct metric over a range of experimental cutoffs.
Returns the average scalar corresponding to the best value of fraction correct over a range of cutoff values for the experimental cutoffs. | [
"Plots",
"the",
"optimum",
"cutoff",
"for",
"the",
"predictions",
"to",
"maximize",
"the",
"fraction",
"correct",
"metric",
"over",
"a",
"range",
"of",
"experimental",
"cutoffs",
".",
"Returns",
"the",
"average",
"scalar",
"corresponding",
"to",
"the",
"best",
"value",
"of",
"fraction",
"correct",
"over",
"a",
"range",
"of",
"cutoff",
"values",
"for",
"the",
"experimental",
"cutoffs",
"."
]
| 6d410ad08f1bd9f7cbbb28d7d946e94fbaaa2b6b | https://github.com/Kortemme-Lab/klab/blob/6d410ad08f1bd9f7cbbb28d7d946e94fbaaa2b6b/klab/benchmarking/analysis/ddg_monomeric_stability_analysis.py#L1759-L1833 | train |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.