repo
stringlengths 7
55
| path
stringlengths 4
127
| func_name
stringlengths 1
88
| original_string
stringlengths 75
19.8k
| language
stringclasses 1
value | code
stringlengths 75
19.8k
| code_tokens
list | docstring
stringlengths 3
17.3k
| docstring_tokens
list | sha
stringlengths 40
40
| url
stringlengths 87
242
| partition
stringclasses 1
value |
---|---|---|---|---|---|---|---|---|---|---|---|
mojaie/chorus | chorus/v2000reader.py | atoms | def atoms(lines):
"""Parse atom block into atom objects
Returns:
dict: networkx nodes
"""
# Convert sdf style charge to actual charge
conv_charge_table = {0: 0, 1: 3, 2: 2, 3: 1, 4: 0, 5: -1, 6: -2, 7: -3}
results = {}
for i, line in enumerate(lines):
symbol = line[31:34].rstrip()
try:
atom = Atom(symbol)
except KeyError:
raise ValueError(symbol)
xpos = float(line[0:10])
ypos = float(line[10:20])
zpos = float(line[20:30])
atom.coords = (xpos, ypos, zpos)
atom.mass_diff = int(line[34:37])
old_sdf_charge = int(line[37:40])
atom.charge = conv_charge_table[old_sdf_charge]
if old_sdf_charge == 4:
atom.radical = 1
# atom.stereo_flag = int(line[40:43]) # Not used
# valence = int(line[46:49])
# if valence:
# atom.valence = valence
results[i + 1] = {"atom": atom}
return results | python | def atoms(lines):
"""Parse atom block into atom objects
Returns:
dict: networkx nodes
"""
# Convert sdf style charge to actual charge
conv_charge_table = {0: 0, 1: 3, 2: 2, 3: 1, 4: 0, 5: -1, 6: -2, 7: -3}
results = {}
for i, line in enumerate(lines):
symbol = line[31:34].rstrip()
try:
atom = Atom(symbol)
except KeyError:
raise ValueError(symbol)
xpos = float(line[0:10])
ypos = float(line[10:20])
zpos = float(line[20:30])
atom.coords = (xpos, ypos, zpos)
atom.mass_diff = int(line[34:37])
old_sdf_charge = int(line[37:40])
atom.charge = conv_charge_table[old_sdf_charge]
if old_sdf_charge == 4:
atom.radical = 1
# atom.stereo_flag = int(line[40:43]) # Not used
# valence = int(line[46:49])
# if valence:
# atom.valence = valence
results[i + 1] = {"atom": atom}
return results | [
"def",
"atoms",
"(",
"lines",
")",
":",
"# Convert sdf style charge to actual charge",
"conv_charge_table",
"=",
"{",
"0",
":",
"0",
",",
"1",
":",
"3",
",",
"2",
":",
"2",
",",
"3",
":",
"1",
",",
"4",
":",
"0",
",",
"5",
":",
"-",
"1",
",",
"6",
":",
"-",
"2",
",",
"7",
":",
"-",
"3",
"}",
"results",
"=",
"{",
"}",
"for",
"i",
",",
"line",
"in",
"enumerate",
"(",
"lines",
")",
":",
"symbol",
"=",
"line",
"[",
"31",
":",
"34",
"]",
".",
"rstrip",
"(",
")",
"try",
":",
"atom",
"=",
"Atom",
"(",
"symbol",
")",
"except",
"KeyError",
":",
"raise",
"ValueError",
"(",
"symbol",
")",
"xpos",
"=",
"float",
"(",
"line",
"[",
"0",
":",
"10",
"]",
")",
"ypos",
"=",
"float",
"(",
"line",
"[",
"10",
":",
"20",
"]",
")",
"zpos",
"=",
"float",
"(",
"line",
"[",
"20",
":",
"30",
"]",
")",
"atom",
".",
"coords",
"=",
"(",
"xpos",
",",
"ypos",
",",
"zpos",
")",
"atom",
".",
"mass_diff",
"=",
"int",
"(",
"line",
"[",
"34",
":",
"37",
"]",
")",
"old_sdf_charge",
"=",
"int",
"(",
"line",
"[",
"37",
":",
"40",
"]",
")",
"atom",
".",
"charge",
"=",
"conv_charge_table",
"[",
"old_sdf_charge",
"]",
"if",
"old_sdf_charge",
"==",
"4",
":",
"atom",
".",
"radical",
"=",
"1",
"# atom.stereo_flag = int(line[40:43]) # Not used",
"# valence = int(line[46:49])",
"# if valence:",
"# atom.valence = valence",
"results",
"[",
"i",
"+",
"1",
"]",
"=",
"{",
"\"atom\"",
":",
"atom",
"}",
"return",
"results"
]
| Parse atom block into atom objects
Returns:
dict: networkx nodes | [
"Parse",
"atom",
"block",
"into",
"atom",
"objects"
]
| fc7fe23a0272554c67671645ab07830b315eeb1b | https://github.com/mojaie/chorus/blob/fc7fe23a0272554c67671645ab07830b315eeb1b/chorus/v2000reader.py#L71-L100 | train |
mojaie/chorus | chorus/v2000reader.py | bonds | def bonds(lines, atoms):
"""Parse bond block into bond objects
Returns:
dict: networkx adjacency dict
"""
# Convert sdf style stereobond (see chem.model.bond.Bond)
conv_stereo_table = {0: 0, 1: 1, 3: 3, 4: 3, 6: 2}
results = {a: {} for a in atoms}
for line in lines:
bond = Bond()
first = int(line[0:3])
second = int(line[3:6])
if first > second:
bond.is_lower_first = 0
order = int(line[6:9])
if order < 4:
bond.order = order
bond.type = conv_stereo_table[int(line[9:12])]
results[first][second] = {"bond": bond}
results[second][first] = {"bond": bond}
return results | python | def bonds(lines, atoms):
"""Parse bond block into bond objects
Returns:
dict: networkx adjacency dict
"""
# Convert sdf style stereobond (see chem.model.bond.Bond)
conv_stereo_table = {0: 0, 1: 1, 3: 3, 4: 3, 6: 2}
results = {a: {} for a in atoms}
for line in lines:
bond = Bond()
first = int(line[0:3])
second = int(line[3:6])
if first > second:
bond.is_lower_first = 0
order = int(line[6:9])
if order < 4:
bond.order = order
bond.type = conv_stereo_table[int(line[9:12])]
results[first][second] = {"bond": bond}
results[second][first] = {"bond": bond}
return results | [
"def",
"bonds",
"(",
"lines",
",",
"atoms",
")",
":",
"# Convert sdf style stereobond (see chem.model.bond.Bond)",
"conv_stereo_table",
"=",
"{",
"0",
":",
"0",
",",
"1",
":",
"1",
",",
"3",
":",
"3",
",",
"4",
":",
"3",
",",
"6",
":",
"2",
"}",
"results",
"=",
"{",
"a",
":",
"{",
"}",
"for",
"a",
"in",
"atoms",
"}",
"for",
"line",
"in",
"lines",
":",
"bond",
"=",
"Bond",
"(",
")",
"first",
"=",
"int",
"(",
"line",
"[",
"0",
":",
"3",
"]",
")",
"second",
"=",
"int",
"(",
"line",
"[",
"3",
":",
"6",
"]",
")",
"if",
"first",
">",
"second",
":",
"bond",
".",
"is_lower_first",
"=",
"0",
"order",
"=",
"int",
"(",
"line",
"[",
"6",
":",
"9",
"]",
")",
"if",
"order",
"<",
"4",
":",
"bond",
".",
"order",
"=",
"order",
"bond",
".",
"type",
"=",
"conv_stereo_table",
"[",
"int",
"(",
"line",
"[",
"9",
":",
"12",
"]",
")",
"]",
"results",
"[",
"first",
"]",
"[",
"second",
"]",
"=",
"{",
"\"bond\"",
":",
"bond",
"}",
"results",
"[",
"second",
"]",
"[",
"first",
"]",
"=",
"{",
"\"bond\"",
":",
"bond",
"}",
"return",
"results"
]
| Parse bond block into bond objects
Returns:
dict: networkx adjacency dict | [
"Parse",
"bond",
"block",
"into",
"bond",
"objects"
]
| fc7fe23a0272554c67671645ab07830b315eeb1b | https://github.com/mojaie/chorus/blob/fc7fe23a0272554c67671645ab07830b315eeb1b/chorus/v2000reader.py#L103-L124 | train |
mojaie/chorus | chorus/v2000reader.py | properties | def properties(lines):
"""Parse properties block
Returns:
dict: {property_type: (atom_index, value)}
"""
results = {}
for i, line in enumerate(lines):
type_ = line[3:6]
if type_ not in ["CHG", "RAD", "ISO"]:
continue # Other properties are not supported yet
count = int(line[6:9])
results[type_] = []
for j in range(count):
idx = int(line[10 + j * 8: 13 + j * 8])
val = int(line[14 + j * 8: 17 + j * 8])
results[type_].append((idx, val))
return results | python | def properties(lines):
"""Parse properties block
Returns:
dict: {property_type: (atom_index, value)}
"""
results = {}
for i, line in enumerate(lines):
type_ = line[3:6]
if type_ not in ["CHG", "RAD", "ISO"]:
continue # Other properties are not supported yet
count = int(line[6:9])
results[type_] = []
for j in range(count):
idx = int(line[10 + j * 8: 13 + j * 8])
val = int(line[14 + j * 8: 17 + j * 8])
results[type_].append((idx, val))
return results | [
"def",
"properties",
"(",
"lines",
")",
":",
"results",
"=",
"{",
"}",
"for",
"i",
",",
"line",
"in",
"enumerate",
"(",
"lines",
")",
":",
"type_",
"=",
"line",
"[",
"3",
":",
"6",
"]",
"if",
"type_",
"not",
"in",
"[",
"\"CHG\"",
",",
"\"RAD\"",
",",
"\"ISO\"",
"]",
":",
"continue",
"# Other properties are not supported yet",
"count",
"=",
"int",
"(",
"line",
"[",
"6",
":",
"9",
"]",
")",
"results",
"[",
"type_",
"]",
"=",
"[",
"]",
"for",
"j",
"in",
"range",
"(",
"count",
")",
":",
"idx",
"=",
"int",
"(",
"line",
"[",
"10",
"+",
"j",
"*",
"8",
":",
"13",
"+",
"j",
"*",
"8",
"]",
")",
"val",
"=",
"int",
"(",
"line",
"[",
"14",
"+",
"j",
"*",
"8",
":",
"17",
"+",
"j",
"*",
"8",
"]",
")",
"results",
"[",
"type_",
"]",
".",
"append",
"(",
"(",
"idx",
",",
"val",
")",
")",
"return",
"results"
]
| Parse properties block
Returns:
dict: {property_type: (atom_index, value)} | [
"Parse",
"properties",
"block"
]
| fc7fe23a0272554c67671645ab07830b315eeb1b | https://github.com/mojaie/chorus/blob/fc7fe23a0272554c67671645ab07830b315eeb1b/chorus/v2000reader.py#L127-L144 | train |
mojaie/chorus | chorus/v2000reader.py | add_properties | def add_properties(props, mol):
"""apply properties to the molecule object
Returns:
None (alter molecule object directly)
"""
if not props:
return
# The properties supersedes all charge and radical values in the atom block
for _, atom in mol.atoms_iter():
atom.charge = 0
atom.multi = 1
atom.mass = None
for prop in props.get("CHG", []):
mol.atom(prop[0]).charge = prop[1]
for prop in props.get("RAD", []):
mol.atom(prop[0]).multi = prop[1]
for prop in props.get("ISO", []):
mol.atom(prop[0]).mass = prop[1] | python | def add_properties(props, mol):
"""apply properties to the molecule object
Returns:
None (alter molecule object directly)
"""
if not props:
return
# The properties supersedes all charge and radical values in the atom block
for _, atom in mol.atoms_iter():
atom.charge = 0
atom.multi = 1
atom.mass = None
for prop in props.get("CHG", []):
mol.atom(prop[0]).charge = prop[1]
for prop in props.get("RAD", []):
mol.atom(prop[0]).multi = prop[1]
for prop in props.get("ISO", []):
mol.atom(prop[0]).mass = prop[1] | [
"def",
"add_properties",
"(",
"props",
",",
"mol",
")",
":",
"if",
"not",
"props",
":",
"return",
"# The properties supersedes all charge and radical values in the atom block",
"for",
"_",
",",
"atom",
"in",
"mol",
".",
"atoms_iter",
"(",
")",
":",
"atom",
".",
"charge",
"=",
"0",
"atom",
".",
"multi",
"=",
"1",
"atom",
".",
"mass",
"=",
"None",
"for",
"prop",
"in",
"props",
".",
"get",
"(",
"\"CHG\"",
",",
"[",
"]",
")",
":",
"mol",
".",
"atom",
"(",
"prop",
"[",
"0",
"]",
")",
".",
"charge",
"=",
"prop",
"[",
"1",
"]",
"for",
"prop",
"in",
"props",
".",
"get",
"(",
"\"RAD\"",
",",
"[",
"]",
")",
":",
"mol",
".",
"atom",
"(",
"prop",
"[",
"0",
"]",
")",
".",
"multi",
"=",
"prop",
"[",
"1",
"]",
"for",
"prop",
"in",
"props",
".",
"get",
"(",
"\"ISO\"",
",",
"[",
"]",
")",
":",
"mol",
".",
"atom",
"(",
"prop",
"[",
"0",
"]",
")",
".",
"mass",
"=",
"prop",
"[",
"1",
"]"
]
| apply properties to the molecule object
Returns:
None (alter molecule object directly) | [
"apply",
"properties",
"to",
"the",
"molecule",
"object"
]
| fc7fe23a0272554c67671645ab07830b315eeb1b | https://github.com/mojaie/chorus/blob/fc7fe23a0272554c67671645ab07830b315eeb1b/chorus/v2000reader.py#L147-L165 | train |
mojaie/chorus | chorus/v2000reader.py | molecule | def molecule(lines):
"""Parse molfile part into molecule object
Args:
lines (list): lines of molfile part
Raises:
ValueError: Symbol not defined in periodictable.yaml
(Polymer expression not supported yet)
"""
count_line = lines[3]
num_atoms = int(count_line[0:3])
num_bonds = int(count_line[3:6])
# chiral_flag = int(count_line[12:15]) # Not used
# num_prop = int(count_line[30:33]) # "No longer supported"
compound = Compound()
compound.graph._node = atoms(lines[4: num_atoms+4])
compound.graph._adj = bonds(lines[num_atoms+4: num_atoms+num_bonds+4],
compound.graph._node.keys())
props = properties(lines[num_atoms+num_bonds+4:])
add_properties(props, compound)
return compound | python | def molecule(lines):
"""Parse molfile part into molecule object
Args:
lines (list): lines of molfile part
Raises:
ValueError: Symbol not defined in periodictable.yaml
(Polymer expression not supported yet)
"""
count_line = lines[3]
num_atoms = int(count_line[0:3])
num_bonds = int(count_line[3:6])
# chiral_flag = int(count_line[12:15]) # Not used
# num_prop = int(count_line[30:33]) # "No longer supported"
compound = Compound()
compound.graph._node = atoms(lines[4: num_atoms+4])
compound.graph._adj = bonds(lines[num_atoms+4: num_atoms+num_bonds+4],
compound.graph._node.keys())
props = properties(lines[num_atoms+num_bonds+4:])
add_properties(props, compound)
return compound | [
"def",
"molecule",
"(",
"lines",
")",
":",
"count_line",
"=",
"lines",
"[",
"3",
"]",
"num_atoms",
"=",
"int",
"(",
"count_line",
"[",
"0",
":",
"3",
"]",
")",
"num_bonds",
"=",
"int",
"(",
"count_line",
"[",
"3",
":",
"6",
"]",
")",
"# chiral_flag = int(count_line[12:15]) # Not used",
"# num_prop = int(count_line[30:33]) # \"No longer supported\"",
"compound",
"=",
"Compound",
"(",
")",
"compound",
".",
"graph",
".",
"_node",
"=",
"atoms",
"(",
"lines",
"[",
"4",
":",
"num_atoms",
"+",
"4",
"]",
")",
"compound",
".",
"graph",
".",
"_adj",
"=",
"bonds",
"(",
"lines",
"[",
"num_atoms",
"+",
"4",
":",
"num_atoms",
"+",
"num_bonds",
"+",
"4",
"]",
",",
"compound",
".",
"graph",
".",
"_node",
".",
"keys",
"(",
")",
")",
"props",
"=",
"properties",
"(",
"lines",
"[",
"num_atoms",
"+",
"num_bonds",
"+",
"4",
":",
"]",
")",
"add_properties",
"(",
"props",
",",
"compound",
")",
"return",
"compound"
]
| Parse molfile part into molecule object
Args:
lines (list): lines of molfile part
Raises:
ValueError: Symbol not defined in periodictable.yaml
(Polymer expression not supported yet) | [
"Parse",
"molfile",
"part",
"into",
"molecule",
"object"
]
| fc7fe23a0272554c67671645ab07830b315eeb1b | https://github.com/mojaie/chorus/blob/fc7fe23a0272554c67671645ab07830b315eeb1b/chorus/v2000reader.py#L168-L189 | train |
mojaie/chorus | chorus/v2000reader.py | mol_supplier | def mol_supplier(lines, no_halt, assign_descriptors):
"""Yields molecules generated from CTAB text
Args:
lines (iterable): CTAB text lines
no_halt (boolean):
True: shows warning messages for invalid format and go on.
False: throws an exception for it and stop parsing.
assign_descriptors (boolean):
if True, default descriptors are automatically assigned.
"""
def sdf_block(lns):
mol = []
opt = []
is_mol = True
for line in lns:
if line.startswith("$$$$"):
yield mol[:], opt[:]
is_mol = True
mol.clear()
opt.clear()
elif line.startswith("M END"):
is_mol = False
elif is_mol:
mol.append(line.rstrip())
else:
opt.append(line.rstrip())
if mol:
yield mol, opt
for i, (mol, opt) in enumerate(sdf_block(lines)):
try:
c = molecule(mol)
if assign_descriptors:
molutil.assign_descriptors(c)
except ValueError as err:
if no_halt:
print("Unsupported symbol: {} (#{} in v2000reader)".format(
err, i + 1))
c = molutil.null_molecule(assign_descriptors)
else:
raise ValueError("Unsupported symbol: {}".format(err))
except RuntimeError as err:
if no_halt:
print(
"Failed to minimize ring: {} (#{} in v2000reader)".format(
err, i + 1)
)
else:
raise RuntimeError("Failed to minimize ring: {}".format(err))
except:
if no_halt:
print("Unexpected error (#{} in v2000reader)".format(i + 1))
c = molutil.null_molecule(assign_descriptors)
c.data = optional_data(opt)
yield c
continue
else:
print(traceback.format_exc())
raise Exception("Unsupported Error")
c.data = optional_data(opt)
yield c | python | def mol_supplier(lines, no_halt, assign_descriptors):
"""Yields molecules generated from CTAB text
Args:
lines (iterable): CTAB text lines
no_halt (boolean):
True: shows warning messages for invalid format and go on.
False: throws an exception for it and stop parsing.
assign_descriptors (boolean):
if True, default descriptors are automatically assigned.
"""
def sdf_block(lns):
mol = []
opt = []
is_mol = True
for line in lns:
if line.startswith("$$$$"):
yield mol[:], opt[:]
is_mol = True
mol.clear()
opt.clear()
elif line.startswith("M END"):
is_mol = False
elif is_mol:
mol.append(line.rstrip())
else:
opt.append(line.rstrip())
if mol:
yield mol, opt
for i, (mol, opt) in enumerate(sdf_block(lines)):
try:
c = molecule(mol)
if assign_descriptors:
molutil.assign_descriptors(c)
except ValueError as err:
if no_halt:
print("Unsupported symbol: {} (#{} in v2000reader)".format(
err, i + 1))
c = molutil.null_molecule(assign_descriptors)
else:
raise ValueError("Unsupported symbol: {}".format(err))
except RuntimeError as err:
if no_halt:
print(
"Failed to minimize ring: {} (#{} in v2000reader)".format(
err, i + 1)
)
else:
raise RuntimeError("Failed to minimize ring: {}".format(err))
except:
if no_halt:
print("Unexpected error (#{} in v2000reader)".format(i + 1))
c = molutil.null_molecule(assign_descriptors)
c.data = optional_data(opt)
yield c
continue
else:
print(traceback.format_exc())
raise Exception("Unsupported Error")
c.data = optional_data(opt)
yield c | [
"def",
"mol_supplier",
"(",
"lines",
",",
"no_halt",
",",
"assign_descriptors",
")",
":",
"def",
"sdf_block",
"(",
"lns",
")",
":",
"mol",
"=",
"[",
"]",
"opt",
"=",
"[",
"]",
"is_mol",
"=",
"True",
"for",
"line",
"in",
"lns",
":",
"if",
"line",
".",
"startswith",
"(",
"\"$$$$\"",
")",
":",
"yield",
"mol",
"[",
":",
"]",
",",
"opt",
"[",
":",
"]",
"is_mol",
"=",
"True",
"mol",
".",
"clear",
"(",
")",
"opt",
".",
"clear",
"(",
")",
"elif",
"line",
".",
"startswith",
"(",
"\"M END\"",
")",
":",
"is_mol",
"=",
"False",
"elif",
"is_mol",
":",
"mol",
".",
"append",
"(",
"line",
".",
"rstrip",
"(",
")",
")",
"else",
":",
"opt",
".",
"append",
"(",
"line",
".",
"rstrip",
"(",
")",
")",
"if",
"mol",
":",
"yield",
"mol",
",",
"opt",
"for",
"i",
",",
"(",
"mol",
",",
"opt",
")",
"in",
"enumerate",
"(",
"sdf_block",
"(",
"lines",
")",
")",
":",
"try",
":",
"c",
"=",
"molecule",
"(",
"mol",
")",
"if",
"assign_descriptors",
":",
"molutil",
".",
"assign_descriptors",
"(",
"c",
")",
"except",
"ValueError",
"as",
"err",
":",
"if",
"no_halt",
":",
"print",
"(",
"\"Unsupported symbol: {} (#{} in v2000reader)\"",
".",
"format",
"(",
"err",
",",
"i",
"+",
"1",
")",
")",
"c",
"=",
"molutil",
".",
"null_molecule",
"(",
"assign_descriptors",
")",
"else",
":",
"raise",
"ValueError",
"(",
"\"Unsupported symbol: {}\"",
".",
"format",
"(",
"err",
")",
")",
"except",
"RuntimeError",
"as",
"err",
":",
"if",
"no_halt",
":",
"print",
"(",
"\"Failed to minimize ring: {} (#{} in v2000reader)\"",
".",
"format",
"(",
"err",
",",
"i",
"+",
"1",
")",
")",
"else",
":",
"raise",
"RuntimeError",
"(",
"\"Failed to minimize ring: {}\"",
".",
"format",
"(",
"err",
")",
")",
"except",
":",
"if",
"no_halt",
":",
"print",
"(",
"\"Unexpected error (#{} in v2000reader)\"",
".",
"format",
"(",
"i",
"+",
"1",
")",
")",
"c",
"=",
"molutil",
".",
"null_molecule",
"(",
"assign_descriptors",
")",
"c",
".",
"data",
"=",
"optional_data",
"(",
"opt",
")",
"yield",
"c",
"continue",
"else",
":",
"print",
"(",
"traceback",
".",
"format_exc",
"(",
")",
")",
"raise",
"Exception",
"(",
"\"Unsupported Error\"",
")",
"c",
".",
"data",
"=",
"optional_data",
"(",
"opt",
")",
"yield",
"c"
]
| Yields molecules generated from CTAB text
Args:
lines (iterable): CTAB text lines
no_halt (boolean):
True: shows warning messages for invalid format and go on.
False: throws an exception for it and stop parsing.
assign_descriptors (boolean):
if True, default descriptors are automatically assigned. | [
"Yields",
"molecules",
"generated",
"from",
"CTAB",
"text"
]
| fc7fe23a0272554c67671645ab07830b315eeb1b | https://github.com/mojaie/chorus/blob/fc7fe23a0272554c67671645ab07830b315eeb1b/chorus/v2000reader.py#L192-L253 | train |
mojaie/chorus | chorus/v2000reader.py | mols_from_text | def mols_from_text(text, no_halt=True, assign_descriptors=True):
"""Returns molecules generated from sdfile text
Throws:
StopIteration: if the text does not have molecule
ValueError: if Unsupported symbol is found
"""
if isinstance(text, bytes):
t = tx.decode(text)
else:
t = text
# Lazy line splitter. More efficient memory usage than str.split.
exp = re.compile(r"[^\n]*\n|.")
sp = (x.group(0) for x in re.finditer(exp, t))
for c in mol_supplier(sp, no_halt, assign_descriptors):
yield c | python | def mols_from_text(text, no_halt=True, assign_descriptors=True):
"""Returns molecules generated from sdfile text
Throws:
StopIteration: if the text does not have molecule
ValueError: if Unsupported symbol is found
"""
if isinstance(text, bytes):
t = tx.decode(text)
else:
t = text
# Lazy line splitter. More efficient memory usage than str.split.
exp = re.compile(r"[^\n]*\n|.")
sp = (x.group(0) for x in re.finditer(exp, t))
for c in mol_supplier(sp, no_halt, assign_descriptors):
yield c | [
"def",
"mols_from_text",
"(",
"text",
",",
"no_halt",
"=",
"True",
",",
"assign_descriptors",
"=",
"True",
")",
":",
"if",
"isinstance",
"(",
"text",
",",
"bytes",
")",
":",
"t",
"=",
"tx",
".",
"decode",
"(",
"text",
")",
"else",
":",
"t",
"=",
"text",
"# Lazy line splitter. More efficient memory usage than str.split.",
"exp",
"=",
"re",
".",
"compile",
"(",
"r\"[^\\n]*\\n|.\"",
")",
"sp",
"=",
"(",
"x",
".",
"group",
"(",
"0",
")",
"for",
"x",
"in",
"re",
".",
"finditer",
"(",
"exp",
",",
"t",
")",
")",
"for",
"c",
"in",
"mol_supplier",
"(",
"sp",
",",
"no_halt",
",",
"assign_descriptors",
")",
":",
"yield",
"c"
]
| Returns molecules generated from sdfile text
Throws:
StopIteration: if the text does not have molecule
ValueError: if Unsupported symbol is found | [
"Returns",
"molecules",
"generated",
"from",
"sdfile",
"text"
]
| fc7fe23a0272554c67671645ab07830b315eeb1b | https://github.com/mojaie/chorus/blob/fc7fe23a0272554c67671645ab07830b315eeb1b/chorus/v2000reader.py#L256-L271 | train |
mojaie/chorus | chorus/v2000reader.py | mol_from_text | def mol_from_text(text, assign_descriptors=True):
"""Parse CTAB text and return first one as a Compound object.
Throws:
StopIteration: if the text does not have molecule
ValueError: if Unsupported symbol is found
"""
cg = mols_from_text(text, False, assign_descriptors)
return next(cg) | python | def mol_from_text(text, assign_descriptors=True):
"""Parse CTAB text and return first one as a Compound object.
Throws:
StopIteration: if the text does not have molecule
ValueError: if Unsupported symbol is found
"""
cg = mols_from_text(text, False, assign_descriptors)
return next(cg) | [
"def",
"mol_from_text",
"(",
"text",
",",
"assign_descriptors",
"=",
"True",
")",
":",
"cg",
"=",
"mols_from_text",
"(",
"text",
",",
"False",
",",
"assign_descriptors",
")",
"return",
"next",
"(",
"cg",
")"
]
| Parse CTAB text and return first one as a Compound object.
Throws:
StopIteration: if the text does not have molecule
ValueError: if Unsupported symbol is found | [
"Parse",
"CTAB",
"text",
"and",
"return",
"first",
"one",
"as",
"a",
"Compound",
"object",
"."
]
| fc7fe23a0272554c67671645ab07830b315eeb1b | https://github.com/mojaie/chorus/blob/fc7fe23a0272554c67671645ab07830b315eeb1b/chorus/v2000reader.py#L274-L282 | train |
mojaie/chorus | chorus/v2000reader.py | mol_from_file | def mol_from_file(path, assign_descriptors=True):
"""Parse CTAB file and return first one as a Compound object."""
cs = mols_from_file(path, False, assign_descriptors)
return next(cs) | python | def mol_from_file(path, assign_descriptors=True):
"""Parse CTAB file and return first one as a Compound object."""
cs = mols_from_file(path, False, assign_descriptors)
return next(cs) | [
"def",
"mol_from_file",
"(",
"path",
",",
"assign_descriptors",
"=",
"True",
")",
":",
"cs",
"=",
"mols_from_file",
"(",
"path",
",",
"False",
",",
"assign_descriptors",
")",
"return",
"next",
"(",
"cs",
")"
]
| Parse CTAB file and return first one as a Compound object. | [
"Parse",
"CTAB",
"file",
"and",
"return",
"first",
"one",
"as",
"a",
"Compound",
"object",
"."
]
| fc7fe23a0272554c67671645ab07830b315eeb1b | https://github.com/mojaie/chorus/blob/fc7fe23a0272554c67671645ab07830b315eeb1b/chorus/v2000reader.py#L293-L296 | train |
brutus/wdiffhtml | wdiffhtml/settings.py | load_from_resource | def load_from_resource(name):
"""
Returns the contents of a file resource.
If the resource exists in the users data directory, it is used instead
of the default resource.
"""
filepath = Path(USER_DIR) / name
if filepath.exists():
with filepath.open() as fh:
return fh.read()
else:
return resource_string('wdiffhtml', 'data/' + name).decode('utf-8') | python | def load_from_resource(name):
"""
Returns the contents of a file resource.
If the resource exists in the users data directory, it is used instead
of the default resource.
"""
filepath = Path(USER_DIR) / name
if filepath.exists():
with filepath.open() as fh:
return fh.read()
else:
return resource_string('wdiffhtml', 'data/' + name).decode('utf-8') | [
"def",
"load_from_resource",
"(",
"name",
")",
":",
"filepath",
"=",
"Path",
"(",
"USER_DIR",
")",
"/",
"name",
"if",
"filepath",
".",
"exists",
"(",
")",
":",
"with",
"filepath",
".",
"open",
"(",
")",
"as",
"fh",
":",
"return",
"fh",
".",
"read",
"(",
")",
"else",
":",
"return",
"resource_string",
"(",
"'wdiffhtml'",
",",
"'data/'",
"+",
"name",
")",
".",
"decode",
"(",
"'utf-8'",
")"
]
| Returns the contents of a file resource.
If the resource exists in the users data directory, it is used instead
of the default resource. | [
"Returns",
"the",
"contents",
"of",
"a",
"file",
"resource",
"."
]
| e97b524a7945f7a626e33ec141343120c524d9fa | https://github.com/brutus/wdiffhtml/blob/e97b524a7945f7a626e33ec141343120c524d9fa/wdiffhtml/settings.py#L40-L53 | train |
micolous/python-slackrealtime | src/slackrealtime/__init__.py | connect | def connect(token, protocol=RtmProtocol, factory=WebSocketClientFactory, factory_kwargs=None, api_url=None, debug=False):
"""
Creates a new connection to the Slack Real-Time API.
Returns (connection) which represents this connection to the API server.
"""
if factory_kwargs is None:
factory_kwargs = dict()
metadata = request_session(token, api_url)
wsfactory = factory(metadata.url, **factory_kwargs)
if debug:
warnings.warn('debug=True has been deprecated in autobahn 0.14.0')
wsfactory.protocol = lambda *a,**k: protocol(*a,**k)._seedMetadata(metadata)
connection = connectWS(wsfactory)
return connection | python | def connect(token, protocol=RtmProtocol, factory=WebSocketClientFactory, factory_kwargs=None, api_url=None, debug=False):
"""
Creates a new connection to the Slack Real-Time API.
Returns (connection) which represents this connection to the API server.
"""
if factory_kwargs is None:
factory_kwargs = dict()
metadata = request_session(token, api_url)
wsfactory = factory(metadata.url, **factory_kwargs)
if debug:
warnings.warn('debug=True has been deprecated in autobahn 0.14.0')
wsfactory.protocol = lambda *a,**k: protocol(*a,**k)._seedMetadata(metadata)
connection = connectWS(wsfactory)
return connection | [
"def",
"connect",
"(",
"token",
",",
"protocol",
"=",
"RtmProtocol",
",",
"factory",
"=",
"WebSocketClientFactory",
",",
"factory_kwargs",
"=",
"None",
",",
"api_url",
"=",
"None",
",",
"debug",
"=",
"False",
")",
":",
"if",
"factory_kwargs",
"is",
"None",
":",
"factory_kwargs",
"=",
"dict",
"(",
")",
"metadata",
"=",
"request_session",
"(",
"token",
",",
"api_url",
")",
"wsfactory",
"=",
"factory",
"(",
"metadata",
".",
"url",
",",
"*",
"*",
"factory_kwargs",
")",
"if",
"debug",
":",
"warnings",
".",
"warn",
"(",
"'debug=True has been deprecated in autobahn 0.14.0'",
")",
"wsfactory",
".",
"protocol",
"=",
"lambda",
"*",
"a",
",",
"*",
"*",
"k",
":",
"protocol",
"(",
"*",
"a",
",",
"*",
"*",
"k",
")",
".",
"_seedMetadata",
"(",
"metadata",
")",
"connection",
"=",
"connectWS",
"(",
"wsfactory",
")",
"return",
"connection"
]
| Creates a new connection to the Slack Real-Time API.
Returns (connection) which represents this connection to the API server. | [
"Creates",
"a",
"new",
"connection",
"to",
"the",
"Slack",
"Real",
"-",
"Time",
"API",
"."
]
| e9c94416f979a6582110ebba09c147de2bfe20a1 | https://github.com/micolous/python-slackrealtime/blob/e9c94416f979a6582110ebba09c147de2bfe20a1/src/slackrealtime/__init__.py#L26-L43 | train |
xlfe/reticul8 | python/reticul8/block_compressor.py | block_compressor.next_block | def next_block(self):
"""
This could probably be improved; at the moment it starts by trying to overshoot the
desired compressed block size, then it reduces the input bytes one by one until it
has met the required block size
"""
assert self.pos <= self.input_len
if self.pos == self.input_len:
return None
# Overshoot
i = self.START_OVERSHOOT
while True:
try_size = int(self.bs * i)
size = self.check_request_size(try_size)
c, d = self.compress_next_chunk(size)
if size != try_size:
break
if len(d) < self.bs:
i += self.OVERSHOOT_INCREASE
else:
break
# Reduce by one byte until we hit the target
while True:
if len(d) <= self.bs:
self.c = c
# self.c = self.factory()
crc32 = zlib.crc32(self.get_input(size), 0xffffffff) & 0xffffffff
self.pos += size
self.compressed_bytes += len(d)
return crc32, size, d
size -= 1
if size == 0:
return None
c, d = self.compress_next_chunk(size) | python | def next_block(self):
"""
This could probably be improved; at the moment it starts by trying to overshoot the
desired compressed block size, then it reduces the input bytes one by one until it
has met the required block size
"""
assert self.pos <= self.input_len
if self.pos == self.input_len:
return None
# Overshoot
i = self.START_OVERSHOOT
while True:
try_size = int(self.bs * i)
size = self.check_request_size(try_size)
c, d = self.compress_next_chunk(size)
if size != try_size:
break
if len(d) < self.bs:
i += self.OVERSHOOT_INCREASE
else:
break
# Reduce by one byte until we hit the target
while True:
if len(d) <= self.bs:
self.c = c
# self.c = self.factory()
crc32 = zlib.crc32(self.get_input(size), 0xffffffff) & 0xffffffff
self.pos += size
self.compressed_bytes += len(d)
return crc32, size, d
size -= 1
if size == 0:
return None
c, d = self.compress_next_chunk(size) | [
"def",
"next_block",
"(",
"self",
")",
":",
"assert",
"self",
".",
"pos",
"<=",
"self",
".",
"input_len",
"if",
"self",
".",
"pos",
"==",
"self",
".",
"input_len",
":",
"return",
"None",
"# Overshoot",
"i",
"=",
"self",
".",
"START_OVERSHOOT",
"while",
"True",
":",
"try_size",
"=",
"int",
"(",
"self",
".",
"bs",
"*",
"i",
")",
"size",
"=",
"self",
".",
"check_request_size",
"(",
"try_size",
")",
"c",
",",
"d",
"=",
"self",
".",
"compress_next_chunk",
"(",
"size",
")",
"if",
"size",
"!=",
"try_size",
":",
"break",
"if",
"len",
"(",
"d",
")",
"<",
"self",
".",
"bs",
":",
"i",
"+=",
"self",
".",
"OVERSHOOT_INCREASE",
"else",
":",
"break",
"# Reduce by one byte until we hit the target",
"while",
"True",
":",
"if",
"len",
"(",
"d",
")",
"<=",
"self",
".",
"bs",
":",
"self",
".",
"c",
"=",
"c",
"# self.c = self.factory()",
"crc32",
"=",
"zlib",
".",
"crc32",
"(",
"self",
".",
"get_input",
"(",
"size",
")",
",",
"0xffffffff",
")",
"&",
"0xffffffff",
"self",
".",
"pos",
"+=",
"size",
"self",
".",
"compressed_bytes",
"+=",
"len",
"(",
"d",
")",
"return",
"crc32",
",",
"size",
",",
"d",
"size",
"-=",
"1",
"if",
"size",
"==",
"0",
":",
"return",
"None",
"c",
",",
"d",
"=",
"self",
".",
"compress_next_chunk",
"(",
"size",
")"
]
| This could probably be improved; at the moment it starts by trying to overshoot the
desired compressed block size, then it reduces the input bytes one by one until it
has met the required block size | [
"This",
"could",
"probably",
"be",
"improved",
";",
"at",
"the",
"moment",
"it",
"starts",
"by",
"trying",
"to",
"overshoot",
"the",
"desired",
"compressed",
"block",
"size",
"then",
"it",
"reduces",
"the",
"input",
"bytes",
"one",
"by",
"one",
"until",
"it",
"has",
"met",
"the",
"required",
"block",
"size"
]
| 0f9503f7a0731ae09adfe4c9af9b57327a7f9d84 | https://github.com/xlfe/reticul8/blob/0f9503f7a0731ae09adfe4c9af9b57327a7f9d84/python/reticul8/block_compressor.py#L57-L101 | train |
mesbahamin/chronophore | chronophore/chronophore.py | set_up_logging | def set_up_logging(log_file, console_log_level):
"""Configure logging settings and return a logger object."""
logger = logging.getLogger()
logger.setLevel(logging.DEBUG)
fh = logging.FileHandler(str(log_file))
fh.setLevel(logging.DEBUG)
ch = logging.StreamHandler()
ch.setLevel(console_log_level)
formatter = logging.Formatter(
"{asctime} {levelname} ({name}): {message}", style='{'
)
fh.setFormatter(formatter)
ch.setFormatter(formatter)
logger.addHandler(fh)
logger.addHandler(ch)
return logger | python | def set_up_logging(log_file, console_log_level):
"""Configure logging settings and return a logger object."""
logger = logging.getLogger()
logger.setLevel(logging.DEBUG)
fh = logging.FileHandler(str(log_file))
fh.setLevel(logging.DEBUG)
ch = logging.StreamHandler()
ch.setLevel(console_log_level)
formatter = logging.Formatter(
"{asctime} {levelname} ({name}): {message}", style='{'
)
fh.setFormatter(formatter)
ch.setFormatter(formatter)
logger.addHandler(fh)
logger.addHandler(ch)
return logger | [
"def",
"set_up_logging",
"(",
"log_file",
",",
"console_log_level",
")",
":",
"logger",
"=",
"logging",
".",
"getLogger",
"(",
")",
"logger",
".",
"setLevel",
"(",
"logging",
".",
"DEBUG",
")",
"fh",
"=",
"logging",
".",
"FileHandler",
"(",
"str",
"(",
"log_file",
")",
")",
"fh",
".",
"setLevel",
"(",
"logging",
".",
"DEBUG",
")",
"ch",
"=",
"logging",
".",
"StreamHandler",
"(",
")",
"ch",
".",
"setLevel",
"(",
"console_log_level",
")",
"formatter",
"=",
"logging",
".",
"Formatter",
"(",
"\"{asctime} {levelname} ({name}): {message}\"",
",",
"style",
"=",
"'{'",
")",
"fh",
".",
"setFormatter",
"(",
"formatter",
")",
"ch",
".",
"setFormatter",
"(",
"formatter",
")",
"logger",
".",
"addHandler",
"(",
"fh",
")",
"logger",
".",
"addHandler",
"(",
"ch",
")",
"return",
"logger"
]
| Configure logging settings and return a logger object. | [
"Configure",
"logging",
"settings",
"and",
"return",
"a",
"logger",
"object",
"."
]
| ee140c61b4dfada966f078de8304bac737cec6f7 | https://github.com/mesbahamin/chronophore/blob/ee140c61b4dfada966f078de8304bac737cec6f7/chronophore/chronophore.py#L48-L64 | train |
mesbahamin/chronophore | chronophore/chronophore.py | main | def main():
"""Run Chronophore based on the command line arguments."""
args = get_args()
# Make Chronophore's directories and files in $HOME
DATA_DIR = pathlib.Path(appdirs.user_data_dir(__title__))
LOG_FILE = pathlib.Path(appdirs.user_log_dir(__title__), 'debug.log')
os.makedirs(str(DATA_DIR), exist_ok=True)
os.makedirs(str(LOG_FILE.parent), exist_ok=True)
if args.version:
print('{} {}'.format(__title__, __version__))
raise SystemExit
if args.debug:
CONSOLE_LOG_LEVEL = logging.DEBUG
elif args.verbose:
CONSOLE_LOG_LEVEL = logging.INFO
else:
CONSOLE_LOG_LEVEL = logging.WARNING
logger = set_up_logging(LOG_FILE, CONSOLE_LOG_LEVEL)
logger.debug('-'*80)
logger.info('{} {}'.format(__title__, __version__))
logger.debug('Log File: {}'.format(LOG_FILE))
logger.debug('Data Directory: {}'.format(DATA_DIR))
if args.testdb:
DATABASE_FILE = DATA_DIR.joinpath('test.sqlite')
logger.info('Using test database.')
else:
DATABASE_FILE = DATA_DIR.joinpath('chronophore.sqlite')
logger.debug('Database File: {}'.format(DATABASE_FILE))
engine = create_engine('sqlite:///{}'.format(str(DATABASE_FILE)))
Base.metadata.create_all(engine)
Session.configure(bind=engine)
if args.log_sql:
logging.getLogger('sqlalchemy.engine').setLevel(logging.INFO)
if args.testdb:
add_test_users(session=Session())
controller.flag_forgotten_entries(session=Session())
if args.tk:
from chronophore.tkview import TkChronophoreUI
TkChronophoreUI()
else:
try:
from PyQt5.QtWidgets import QApplication
except ImportError:
print(
'Error: PyQt5, which chronophore uses for its'
+ ' graphical interface, is not installed.'
+ "\nInstall it with 'pip install PyQt5'"
+ " or use the old Tk ui with 'chronophore --tk'."
)
raise SystemExit
else:
from chronophore.qtview import QtChronophoreUI
app = QApplication(sys.argv)
chrono_ui = QtChronophoreUI()
chrono_ui.show()
sys.exit(app.exec_())
logger.debug('{} stopping'.format(__title__)) | python | def main():
"""Run Chronophore based on the command line arguments."""
args = get_args()
# Make Chronophore's directories and files in $HOME
DATA_DIR = pathlib.Path(appdirs.user_data_dir(__title__))
LOG_FILE = pathlib.Path(appdirs.user_log_dir(__title__), 'debug.log')
os.makedirs(str(DATA_DIR), exist_ok=True)
os.makedirs(str(LOG_FILE.parent), exist_ok=True)
if args.version:
print('{} {}'.format(__title__, __version__))
raise SystemExit
if args.debug:
CONSOLE_LOG_LEVEL = logging.DEBUG
elif args.verbose:
CONSOLE_LOG_LEVEL = logging.INFO
else:
CONSOLE_LOG_LEVEL = logging.WARNING
logger = set_up_logging(LOG_FILE, CONSOLE_LOG_LEVEL)
logger.debug('-'*80)
logger.info('{} {}'.format(__title__, __version__))
logger.debug('Log File: {}'.format(LOG_FILE))
logger.debug('Data Directory: {}'.format(DATA_DIR))
if args.testdb:
DATABASE_FILE = DATA_DIR.joinpath('test.sqlite')
logger.info('Using test database.')
else:
DATABASE_FILE = DATA_DIR.joinpath('chronophore.sqlite')
logger.debug('Database File: {}'.format(DATABASE_FILE))
engine = create_engine('sqlite:///{}'.format(str(DATABASE_FILE)))
Base.metadata.create_all(engine)
Session.configure(bind=engine)
if args.log_sql:
logging.getLogger('sqlalchemy.engine').setLevel(logging.INFO)
if args.testdb:
add_test_users(session=Session())
controller.flag_forgotten_entries(session=Session())
if args.tk:
from chronophore.tkview import TkChronophoreUI
TkChronophoreUI()
else:
try:
from PyQt5.QtWidgets import QApplication
except ImportError:
print(
'Error: PyQt5, which chronophore uses for its'
+ ' graphical interface, is not installed.'
+ "\nInstall it with 'pip install PyQt5'"
+ " or use the old Tk ui with 'chronophore --tk'."
)
raise SystemExit
else:
from chronophore.qtview import QtChronophoreUI
app = QApplication(sys.argv)
chrono_ui = QtChronophoreUI()
chrono_ui.show()
sys.exit(app.exec_())
logger.debug('{} stopping'.format(__title__)) | [
"def",
"main",
"(",
")",
":",
"args",
"=",
"get_args",
"(",
")",
"# Make Chronophore's directories and files in $HOME",
"DATA_DIR",
"=",
"pathlib",
".",
"Path",
"(",
"appdirs",
".",
"user_data_dir",
"(",
"__title__",
")",
")",
"LOG_FILE",
"=",
"pathlib",
".",
"Path",
"(",
"appdirs",
".",
"user_log_dir",
"(",
"__title__",
")",
",",
"'debug.log'",
")",
"os",
".",
"makedirs",
"(",
"str",
"(",
"DATA_DIR",
")",
",",
"exist_ok",
"=",
"True",
")",
"os",
".",
"makedirs",
"(",
"str",
"(",
"LOG_FILE",
".",
"parent",
")",
",",
"exist_ok",
"=",
"True",
")",
"if",
"args",
".",
"version",
":",
"print",
"(",
"'{} {}'",
".",
"format",
"(",
"__title__",
",",
"__version__",
")",
")",
"raise",
"SystemExit",
"if",
"args",
".",
"debug",
":",
"CONSOLE_LOG_LEVEL",
"=",
"logging",
".",
"DEBUG",
"elif",
"args",
".",
"verbose",
":",
"CONSOLE_LOG_LEVEL",
"=",
"logging",
".",
"INFO",
"else",
":",
"CONSOLE_LOG_LEVEL",
"=",
"logging",
".",
"WARNING",
"logger",
"=",
"set_up_logging",
"(",
"LOG_FILE",
",",
"CONSOLE_LOG_LEVEL",
")",
"logger",
".",
"debug",
"(",
"'-'",
"*",
"80",
")",
"logger",
".",
"info",
"(",
"'{} {}'",
".",
"format",
"(",
"__title__",
",",
"__version__",
")",
")",
"logger",
".",
"debug",
"(",
"'Log File: {}'",
".",
"format",
"(",
"LOG_FILE",
")",
")",
"logger",
".",
"debug",
"(",
"'Data Directory: {}'",
".",
"format",
"(",
"DATA_DIR",
")",
")",
"if",
"args",
".",
"testdb",
":",
"DATABASE_FILE",
"=",
"DATA_DIR",
".",
"joinpath",
"(",
"'test.sqlite'",
")",
"logger",
".",
"info",
"(",
"'Using test database.'",
")",
"else",
":",
"DATABASE_FILE",
"=",
"DATA_DIR",
".",
"joinpath",
"(",
"'chronophore.sqlite'",
")",
"logger",
".",
"debug",
"(",
"'Database File: {}'",
".",
"format",
"(",
"DATABASE_FILE",
")",
")",
"engine",
"=",
"create_engine",
"(",
"'sqlite:///{}'",
".",
"format",
"(",
"str",
"(",
"DATABASE_FILE",
")",
")",
")",
"Base",
".",
"metadata",
".",
"create_all",
"(",
"engine",
")",
"Session",
".",
"configure",
"(",
"bind",
"=",
"engine",
")",
"if",
"args",
".",
"log_sql",
":",
"logging",
".",
"getLogger",
"(",
"'sqlalchemy.engine'",
")",
".",
"setLevel",
"(",
"logging",
".",
"INFO",
")",
"if",
"args",
".",
"testdb",
":",
"add_test_users",
"(",
"session",
"=",
"Session",
"(",
")",
")",
"controller",
".",
"flag_forgotten_entries",
"(",
"session",
"=",
"Session",
"(",
")",
")",
"if",
"args",
".",
"tk",
":",
"from",
"chronophore",
".",
"tkview",
"import",
"TkChronophoreUI",
"TkChronophoreUI",
"(",
")",
"else",
":",
"try",
":",
"from",
"PyQt5",
".",
"QtWidgets",
"import",
"QApplication",
"except",
"ImportError",
":",
"print",
"(",
"'Error: PyQt5, which chronophore uses for its'",
"+",
"' graphical interface, is not installed.'",
"+",
"\"\\nInstall it with 'pip install PyQt5'\"",
"+",
"\" or use the old Tk ui with 'chronophore --tk'.\"",
")",
"raise",
"SystemExit",
"else",
":",
"from",
"chronophore",
".",
"qtview",
"import",
"QtChronophoreUI",
"app",
"=",
"QApplication",
"(",
"sys",
".",
"argv",
")",
"chrono_ui",
"=",
"QtChronophoreUI",
"(",
")",
"chrono_ui",
".",
"show",
"(",
")",
"sys",
".",
"exit",
"(",
"app",
".",
"exec_",
"(",
")",
")",
"logger",
".",
"debug",
"(",
"'{} stopping'",
".",
"format",
"(",
"__title__",
")",
")"
]
| Run Chronophore based on the command line arguments. | [
"Run",
"Chronophore",
"based",
"on",
"the",
"command",
"line",
"arguments",
"."
]
| ee140c61b4dfada966f078de8304bac737cec6f7 | https://github.com/mesbahamin/chronophore/blob/ee140c61b4dfada966f078de8304bac737cec6f7/chronophore/chronophore.py#L67-L134 | train |
zsimic/runez | src/runez/logsetup.py | _ContextFilter.filter | def filter(self, record):
"""Determines if the record should be logged and injects context info into the record. Always returns True"""
fmt = LogManager.spec.context_format
if fmt:
data = self.context.to_dict()
if data:
record.context = fmt % ",".join("%s=%s" % (key, val) for key, val in sorted(data.items()) if key and val)
else:
record.context = ""
return True | python | def filter(self, record):
"""Determines if the record should be logged and injects context info into the record. Always returns True"""
fmt = LogManager.spec.context_format
if fmt:
data = self.context.to_dict()
if data:
record.context = fmt % ",".join("%s=%s" % (key, val) for key, val in sorted(data.items()) if key and val)
else:
record.context = ""
return True | [
"def",
"filter",
"(",
"self",
",",
"record",
")",
":",
"fmt",
"=",
"LogManager",
".",
"spec",
".",
"context_format",
"if",
"fmt",
":",
"data",
"=",
"self",
".",
"context",
".",
"to_dict",
"(",
")",
"if",
"data",
":",
"record",
".",
"context",
"=",
"fmt",
"%",
"\",\"",
".",
"join",
"(",
"\"%s=%s\"",
"%",
"(",
"key",
",",
"val",
")",
"for",
"key",
",",
"val",
"in",
"sorted",
"(",
"data",
".",
"items",
"(",
")",
")",
"if",
"key",
"and",
"val",
")",
"else",
":",
"record",
".",
"context",
"=",
"\"\"",
"return",
"True"
]
| Determines if the record should be logged and injects context info into the record. Always returns True | [
"Determines",
"if",
"the",
"record",
"should",
"be",
"logged",
"and",
"injects",
"context",
"info",
"into",
"the",
"record",
".",
"Always",
"returns",
"True"
]
| 14363b719a1aae1528859a501a22d075ce0abfcc | https://github.com/zsimic/runez/blob/14363b719a1aae1528859a501a22d075ce0abfcc/src/runez/logsetup.py#L142-L151 | train |
zsimic/runez | src/runez/logsetup.py | LogManager.enable_faulthandler | def enable_faulthandler(cls, signum=signal.SIGUSR1):
"""
Enable dumping thread stack traces when specified signals are received, similar to java's handling of SIGQUIT
Note: this must be called from the surviving process in case of daemonization.
Note that SIGQUIT does not work in all environments with a python process.
:param int|None signum: Signal number to register for full thread stack dump (use None to disable)
"""
with cls._lock:
if not signum:
cls._disable_faulthandler()
return
if not cls.file_handler or faulthandler is None:
return
cls.faulthandler_signum = signum
dump_file = cls.file_handler.stream
faulthandler.enable(file=dump_file, all_threads=True)
faulthandler.register(signum, file=dump_file, all_threads=True, chain=False) | python | def enable_faulthandler(cls, signum=signal.SIGUSR1):
"""
Enable dumping thread stack traces when specified signals are received, similar to java's handling of SIGQUIT
Note: this must be called from the surviving process in case of daemonization.
Note that SIGQUIT does not work in all environments with a python process.
:param int|None signum: Signal number to register for full thread stack dump (use None to disable)
"""
with cls._lock:
if not signum:
cls._disable_faulthandler()
return
if not cls.file_handler or faulthandler is None:
return
cls.faulthandler_signum = signum
dump_file = cls.file_handler.stream
faulthandler.enable(file=dump_file, all_threads=True)
faulthandler.register(signum, file=dump_file, all_threads=True, chain=False) | [
"def",
"enable_faulthandler",
"(",
"cls",
",",
"signum",
"=",
"signal",
".",
"SIGUSR1",
")",
":",
"with",
"cls",
".",
"_lock",
":",
"if",
"not",
"signum",
":",
"cls",
".",
"_disable_faulthandler",
"(",
")",
"return",
"if",
"not",
"cls",
".",
"file_handler",
"or",
"faulthandler",
"is",
"None",
":",
"return",
"cls",
".",
"faulthandler_signum",
"=",
"signum",
"dump_file",
"=",
"cls",
".",
"file_handler",
".",
"stream",
"faulthandler",
".",
"enable",
"(",
"file",
"=",
"dump_file",
",",
"all_threads",
"=",
"True",
")",
"faulthandler",
".",
"register",
"(",
"signum",
",",
"file",
"=",
"dump_file",
",",
"all_threads",
"=",
"True",
",",
"chain",
"=",
"False",
")"
]
| Enable dumping thread stack traces when specified signals are received, similar to java's handling of SIGQUIT
Note: this must be called from the surviving process in case of daemonization.
Note that SIGQUIT does not work in all environments with a python process.
:param int|None signum: Signal number to register for full thread stack dump (use None to disable) | [
"Enable",
"dumping",
"thread",
"stack",
"traces",
"when",
"specified",
"signals",
"are",
"received",
"similar",
"to",
"java",
"s",
"handling",
"of",
"SIGQUIT"
]
| 14363b719a1aae1528859a501a22d075ce0abfcc | https://github.com/zsimic/runez/blob/14363b719a1aae1528859a501a22d075ce0abfcc/src/runez/logsetup.py#L383-L401 | train |
zsimic/runez | src/runez/logsetup.py | LogManager.override_spec | def override_spec(cls, **kwargs):
"""OVerride 'spec' and '_default_spec' with given values"""
cls._default_spec.set(**kwargs)
cls.spec.set(**kwargs) | python | def override_spec(cls, **kwargs):
"""OVerride 'spec' and '_default_spec' with given values"""
cls._default_spec.set(**kwargs)
cls.spec.set(**kwargs) | [
"def",
"override_spec",
"(",
"cls",
",",
"*",
"*",
"kwargs",
")",
":",
"cls",
".",
"_default_spec",
".",
"set",
"(",
"*",
"*",
"kwargs",
")",
"cls",
".",
"spec",
".",
"set",
"(",
"*",
"*",
"kwargs",
")"
]
| OVerride 'spec' and '_default_spec' with given values | [
"OVerride",
"spec",
"and",
"_default_spec",
"with",
"given",
"values"
]
| 14363b719a1aae1528859a501a22d075ce0abfcc | https://github.com/zsimic/runez/blob/14363b719a1aae1528859a501a22d075ce0abfcc/src/runez/logsetup.py#L404-L407 | train |
zsimic/runez | src/runez/logsetup.py | LogManager._fix_logging_shortcuts | def _fix_logging_shortcuts(cls):
"""
Fix standard logging shortcuts to correctly report logging module.
This is only useful if you:
- actually use %(name) and care about it being correct
- you would still like to use the logging.info() etc shortcuts
So basically you'd like to write this:
import logging
logging.info("hello")
Instead of this:
import logging
LOG = logging.getLogger(__name__)
LOG.info("hello")
"""
if cls.is_using_format("%(pathname)s %(filename)s %(funcName)s %(module)s"):
logging._srcfile = cls._logging_snapshot._srcfile
else:
logging._srcfile = None
logging.logProcesses = cls.is_using_format("%(process)d")
logging.logThreads = cls.is_using_format("%(thread)d %(threadName)s")
def getframe():
return sys._getframe(4)
def log(level, msg, *args, **kwargs):
"""Wrapper to make logging.info() etc report the right module %(name)"""
name = get_caller_name()
logger = logging.getLogger(name)
try:
logging.currentframe = getframe
logger.log(level, msg, *args, **kwargs)
finally:
logging.currentframe = ORIGINAL_CF
def wrap(level, **kwargs):
"""Wrap corresponding logging shortcut function"""
original = getattr(logging, logging.getLevelName(level).lower())
f = partial(log, level, **kwargs)
f.__doc__ = original.__doc__
return f
logging.critical = wrap(logging.CRITICAL)
logging.fatal = logging.critical
logging.error = wrap(logging.ERROR)
logging.exception = partial(logging.error, exc_info=True)
logging.warning = wrap(logging.WARNING)
logging.info = wrap(logging.INFO)
logging.debug = wrap(logging.DEBUG)
logging.log = log | python | def _fix_logging_shortcuts(cls):
"""
Fix standard logging shortcuts to correctly report logging module.
This is only useful if you:
- actually use %(name) and care about it being correct
- you would still like to use the logging.info() etc shortcuts
So basically you'd like to write this:
import logging
logging.info("hello")
Instead of this:
import logging
LOG = logging.getLogger(__name__)
LOG.info("hello")
"""
if cls.is_using_format("%(pathname)s %(filename)s %(funcName)s %(module)s"):
logging._srcfile = cls._logging_snapshot._srcfile
else:
logging._srcfile = None
logging.logProcesses = cls.is_using_format("%(process)d")
logging.logThreads = cls.is_using_format("%(thread)d %(threadName)s")
def getframe():
return sys._getframe(4)
def log(level, msg, *args, **kwargs):
"""Wrapper to make logging.info() etc report the right module %(name)"""
name = get_caller_name()
logger = logging.getLogger(name)
try:
logging.currentframe = getframe
logger.log(level, msg, *args, **kwargs)
finally:
logging.currentframe = ORIGINAL_CF
def wrap(level, **kwargs):
"""Wrap corresponding logging shortcut function"""
original = getattr(logging, logging.getLevelName(level).lower())
f = partial(log, level, **kwargs)
f.__doc__ = original.__doc__
return f
logging.critical = wrap(logging.CRITICAL)
logging.fatal = logging.critical
logging.error = wrap(logging.ERROR)
logging.exception = partial(logging.error, exc_info=True)
logging.warning = wrap(logging.WARNING)
logging.info = wrap(logging.INFO)
logging.debug = wrap(logging.DEBUG)
logging.log = log | [
"def",
"_fix_logging_shortcuts",
"(",
"cls",
")",
":",
"if",
"cls",
".",
"is_using_format",
"(",
"\"%(pathname)s %(filename)s %(funcName)s %(module)s\"",
")",
":",
"logging",
".",
"_srcfile",
"=",
"cls",
".",
"_logging_snapshot",
".",
"_srcfile",
"else",
":",
"logging",
".",
"_srcfile",
"=",
"None",
"logging",
".",
"logProcesses",
"=",
"cls",
".",
"is_using_format",
"(",
"\"%(process)d\"",
")",
"logging",
".",
"logThreads",
"=",
"cls",
".",
"is_using_format",
"(",
"\"%(thread)d %(threadName)s\"",
")",
"def",
"getframe",
"(",
")",
":",
"return",
"sys",
".",
"_getframe",
"(",
"4",
")",
"def",
"log",
"(",
"level",
",",
"msg",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"\"\"\"Wrapper to make logging.info() etc report the right module %(name)\"\"\"",
"name",
"=",
"get_caller_name",
"(",
")",
"logger",
"=",
"logging",
".",
"getLogger",
"(",
"name",
")",
"try",
":",
"logging",
".",
"currentframe",
"=",
"getframe",
"logger",
".",
"log",
"(",
"level",
",",
"msg",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
"finally",
":",
"logging",
".",
"currentframe",
"=",
"ORIGINAL_CF",
"def",
"wrap",
"(",
"level",
",",
"*",
"*",
"kwargs",
")",
":",
"\"\"\"Wrap corresponding logging shortcut function\"\"\"",
"original",
"=",
"getattr",
"(",
"logging",
",",
"logging",
".",
"getLevelName",
"(",
"level",
")",
".",
"lower",
"(",
")",
")",
"f",
"=",
"partial",
"(",
"log",
",",
"level",
",",
"*",
"*",
"kwargs",
")",
"f",
".",
"__doc__",
"=",
"original",
".",
"__doc__",
"return",
"f",
"logging",
".",
"critical",
"=",
"wrap",
"(",
"logging",
".",
"CRITICAL",
")",
"logging",
".",
"fatal",
"=",
"logging",
".",
"critical",
"logging",
".",
"error",
"=",
"wrap",
"(",
"logging",
".",
"ERROR",
")",
"logging",
".",
"exception",
"=",
"partial",
"(",
"logging",
".",
"error",
",",
"exc_info",
"=",
"True",
")",
"logging",
".",
"warning",
"=",
"wrap",
"(",
"logging",
".",
"WARNING",
")",
"logging",
".",
"info",
"=",
"wrap",
"(",
"logging",
".",
"INFO",
")",
"logging",
".",
"debug",
"=",
"wrap",
"(",
"logging",
".",
"DEBUG",
")",
"logging",
".",
"log",
"=",
"log"
]
| Fix standard logging shortcuts to correctly report logging module.
This is only useful if you:
- actually use %(name) and care about it being correct
- you would still like to use the logging.info() etc shortcuts
So basically you'd like to write this:
import logging
logging.info("hello")
Instead of this:
import logging
LOG = logging.getLogger(__name__)
LOG.info("hello") | [
"Fix",
"standard",
"logging",
"shortcuts",
"to",
"correctly",
"report",
"logging",
"module",
"."
]
| 14363b719a1aae1528859a501a22d075ce0abfcc | https://github.com/zsimic/runez/blob/14363b719a1aae1528859a501a22d075ce0abfcc/src/runez/logsetup.py#L439-L491 | train |
dfm/casjobs | casjobs.py | CasJobs._parse_single | def _parse_single(self, text, tagname):
"""
A hack to get the content of the XML responses from the CAS server.
## Arguments
* `text` (str): The XML string to parse.
* `tagname` (str): The tag that contains the info that we want.
## Returns
* `content` (str): The contents of the tag.
"""
return minidom.parseString(text)\
.getElementsByTagName(tagname)[0].firstChild.data | python | def _parse_single(self, text, tagname):
"""
A hack to get the content of the XML responses from the CAS server.
## Arguments
* `text` (str): The XML string to parse.
* `tagname` (str): The tag that contains the info that we want.
## Returns
* `content` (str): The contents of the tag.
"""
return minidom.parseString(text)\
.getElementsByTagName(tagname)[0].firstChild.data | [
"def",
"_parse_single",
"(",
"self",
",",
"text",
",",
"tagname",
")",
":",
"return",
"minidom",
".",
"parseString",
"(",
"text",
")",
".",
"getElementsByTagName",
"(",
"tagname",
")",
"[",
"0",
"]",
".",
"firstChild",
".",
"data"
]
| A hack to get the content of the XML responses from the CAS server.
## Arguments
* `text` (str): The XML string to parse.
* `tagname` (str): The tag that contains the info that we want.
## Returns
* `content` (str): The contents of the tag. | [
"A",
"hack",
"to",
"get",
"the",
"content",
"of",
"the",
"XML",
"responses",
"from",
"the",
"CAS",
"server",
"."
]
| 1cc3f5511cc254d776082909221787e3c037ac16 | https://github.com/dfm/casjobs/blob/1cc3f5511cc254d776082909221787e3c037ac16/casjobs.py#L93-L108 | train |
dfm/casjobs | casjobs.py | CasJobs.quick | def quick(self, q, context=None, task_name="quickie", system=False):
"""
Run a quick job.
## Arguments
* `q` (str): The SQL query.
## Keyword Arguments
* `context` (str): Casjobs context used for this query.
* `task_name` (str): The task name.
* `system` (bool) : Whether or not to run this job as a system job (not
visible in the web UI or history)
## Returns
* `results` (str): The result of the job as a long string.
"""
if not context:
context = self.context
params = {"qry": q, "context": context, "taskname": task_name,
"isSystem": system}
r = self._send_request("ExecuteQuickJob", params=params)
return self._parse_single(r.text, "string") | python | def quick(self, q, context=None, task_name="quickie", system=False):
"""
Run a quick job.
## Arguments
* `q` (str): The SQL query.
## Keyword Arguments
* `context` (str): Casjobs context used for this query.
* `task_name` (str): The task name.
* `system` (bool) : Whether or not to run this job as a system job (not
visible in the web UI or history)
## Returns
* `results` (str): The result of the job as a long string.
"""
if not context:
context = self.context
params = {"qry": q, "context": context, "taskname": task_name,
"isSystem": system}
r = self._send_request("ExecuteQuickJob", params=params)
return self._parse_single(r.text, "string") | [
"def",
"quick",
"(",
"self",
",",
"q",
",",
"context",
"=",
"None",
",",
"task_name",
"=",
"\"quickie\"",
",",
"system",
"=",
"False",
")",
":",
"if",
"not",
"context",
":",
"context",
"=",
"self",
".",
"context",
"params",
"=",
"{",
"\"qry\"",
":",
"q",
",",
"\"context\"",
":",
"context",
",",
"\"taskname\"",
":",
"task_name",
",",
"\"isSystem\"",
":",
"system",
"}",
"r",
"=",
"self",
".",
"_send_request",
"(",
"\"ExecuteQuickJob\"",
",",
"params",
"=",
"params",
")",
"return",
"self",
".",
"_parse_single",
"(",
"r",
".",
"text",
",",
"\"string\"",
")"
]
| Run a quick job.
## Arguments
* `q` (str): The SQL query.
## Keyword Arguments
* `context` (str): Casjobs context used for this query.
* `task_name` (str): The task name.
* `system` (bool) : Whether or not to run this job as a system job (not
visible in the web UI or history)
## Returns
* `results` (str): The result of the job as a long string. | [
"Run",
"a",
"quick",
"job",
"."
]
| 1cc3f5511cc254d776082909221787e3c037ac16 | https://github.com/dfm/casjobs/blob/1cc3f5511cc254d776082909221787e3c037ac16/casjobs.py#L110-L135 | train |
dfm/casjobs | casjobs.py | CasJobs.submit | def submit(self, q, context=None, task_name="casjobs", estimate=30):
"""
Submit a job to CasJobs.
## Arguments
* `q` (str): The SQL query.
## Keyword Arguments
* `context` (str): Casjobs context used for this query.
* `task_name` (str): The task name.
* `estimate` (int): Estimate of the time this job will take (in minutes).
## Returns
* `job_id` (int): The submission ID.
"""
if not context:
context = self.context
params = {"qry": q, "context": context, "taskname": task_name,
"estimate": estimate}
r = self._send_request("SubmitJob", params=params)
job_id = int(self._parse_single(r.text, "long"))
return job_id | python | def submit(self, q, context=None, task_name="casjobs", estimate=30):
"""
Submit a job to CasJobs.
## Arguments
* `q` (str): The SQL query.
## Keyword Arguments
* `context` (str): Casjobs context used for this query.
* `task_name` (str): The task name.
* `estimate` (int): Estimate of the time this job will take (in minutes).
## Returns
* `job_id` (int): The submission ID.
"""
if not context:
context = self.context
params = {"qry": q, "context": context, "taskname": task_name,
"estimate": estimate}
r = self._send_request("SubmitJob", params=params)
job_id = int(self._parse_single(r.text, "long"))
return job_id | [
"def",
"submit",
"(",
"self",
",",
"q",
",",
"context",
"=",
"None",
",",
"task_name",
"=",
"\"casjobs\"",
",",
"estimate",
"=",
"30",
")",
":",
"if",
"not",
"context",
":",
"context",
"=",
"self",
".",
"context",
"params",
"=",
"{",
"\"qry\"",
":",
"q",
",",
"\"context\"",
":",
"context",
",",
"\"taskname\"",
":",
"task_name",
",",
"\"estimate\"",
":",
"estimate",
"}",
"r",
"=",
"self",
".",
"_send_request",
"(",
"\"SubmitJob\"",
",",
"params",
"=",
"params",
")",
"job_id",
"=",
"int",
"(",
"self",
".",
"_parse_single",
"(",
"r",
".",
"text",
",",
"\"long\"",
")",
")",
"return",
"job_id"
]
| Submit a job to CasJobs.
## Arguments
* `q` (str): The SQL query.
## Keyword Arguments
* `context` (str): Casjobs context used for this query.
* `task_name` (str): The task name.
* `estimate` (int): Estimate of the time this job will take (in minutes).
## Returns
* `job_id` (int): The submission ID. | [
"Submit",
"a",
"job",
"to",
"CasJobs",
"."
]
| 1cc3f5511cc254d776082909221787e3c037ac16 | https://github.com/dfm/casjobs/blob/1cc3f5511cc254d776082909221787e3c037ac16/casjobs.py#L137-L162 | train |
dfm/casjobs | casjobs.py | CasJobs.status | def status(self, job_id):
"""
Check the status of a job.
## Arguments
* `job_id` (int): The job to check.
## Returns
* `code` (int): The status.
* `status` (str): The human-readable name of the current status.
"""
params = {"jobid": job_id}
r = self._send_request("GetJobStatus", params=params)
status = int(self._parse_single(r.text, "int"))
return status, self.status_codes[status] | python | def status(self, job_id):
"""
Check the status of a job.
## Arguments
* `job_id` (int): The job to check.
## Returns
* `code` (int): The status.
* `status` (str): The human-readable name of the current status.
"""
params = {"jobid": job_id}
r = self._send_request("GetJobStatus", params=params)
status = int(self._parse_single(r.text, "int"))
return status, self.status_codes[status] | [
"def",
"status",
"(",
"self",
",",
"job_id",
")",
":",
"params",
"=",
"{",
"\"jobid\"",
":",
"job_id",
"}",
"r",
"=",
"self",
".",
"_send_request",
"(",
"\"GetJobStatus\"",
",",
"params",
"=",
"params",
")",
"status",
"=",
"int",
"(",
"self",
".",
"_parse_single",
"(",
"r",
".",
"text",
",",
"\"int\"",
")",
")",
"return",
"status",
",",
"self",
".",
"status_codes",
"[",
"status",
"]"
]
| Check the status of a job.
## Arguments
* `job_id` (int): The job to check.
## Returns
* `code` (int): The status.
* `status` (str): The human-readable name of the current status. | [
"Check",
"the",
"status",
"of",
"a",
"job",
"."
]
| 1cc3f5511cc254d776082909221787e3c037ac16 | https://github.com/dfm/casjobs/blob/1cc3f5511cc254d776082909221787e3c037ac16/casjobs.py#L164-L181 | train |
dfm/casjobs | casjobs.py | CasJobs.monitor | def monitor(self, job_id, timeout=5):
"""
Monitor the status of a job.
## Arguments
* `job_id` (int): The job to check.
* `timeout` (float): The time to wait between checks (in sec).
## Returns
* `code` (int): The status.
* `status` (str): The human-readable name of the current status.
"""
while True:
status = self.status(job_id)
logging.info("Monitoring job: %d - Status: %d, %s"
%(job_id, status[0], status[1]))
if status[0] in [3, 4, 5]:
return status
time.sleep(timeout) | python | def monitor(self, job_id, timeout=5):
"""
Monitor the status of a job.
## Arguments
* `job_id` (int): The job to check.
* `timeout` (float): The time to wait between checks (in sec).
## Returns
* `code` (int): The status.
* `status` (str): The human-readable name of the current status.
"""
while True:
status = self.status(job_id)
logging.info("Monitoring job: %d - Status: %d, %s"
%(job_id, status[0], status[1]))
if status[0] in [3, 4, 5]:
return status
time.sleep(timeout) | [
"def",
"monitor",
"(",
"self",
",",
"job_id",
",",
"timeout",
"=",
"5",
")",
":",
"while",
"True",
":",
"status",
"=",
"self",
".",
"status",
"(",
"job_id",
")",
"logging",
".",
"info",
"(",
"\"Monitoring job: %d - Status: %d, %s\"",
"%",
"(",
"job_id",
",",
"status",
"[",
"0",
"]",
",",
"status",
"[",
"1",
"]",
")",
")",
"if",
"status",
"[",
"0",
"]",
"in",
"[",
"3",
",",
"4",
",",
"5",
"]",
":",
"return",
"status",
"time",
".",
"sleep",
"(",
"timeout",
")"
]
| Monitor the status of a job.
## Arguments
* `job_id` (int): The job to check.
* `timeout` (float): The time to wait between checks (in sec).
## Returns
* `code` (int): The status.
* `status` (str): The human-readable name of the current status. | [
"Monitor",
"the",
"status",
"of",
"a",
"job",
"."
]
| 1cc3f5511cc254d776082909221787e3c037ac16 | https://github.com/dfm/casjobs/blob/1cc3f5511cc254d776082909221787e3c037ac16/casjobs.py#L195-L216 | train |
dfm/casjobs | casjobs.py | CasJobs.request_output | def request_output(self, table, outtype):
"""
Request the output for a given table.
## Arguments
* `table` (str): The name of the table to export.
* `outtype` (str): The type of output. Must be one of:
CSV - Comma Seperated Values
DataSet - XML DataSet
FITS - Flexible Image Transfer System (FITS Binary)
VOTable - XML Virtual Observatory VOTABLE
"""
job_types = ["CSV", "DataSet", "FITS", "VOTable"]
assert outtype in job_types
params = {"tableName": table, "type": outtype}
r = self._send_request("SubmitExtractJob", params=params)
job_id = int(self._parse_single(r.text, "long"))
return job_id | python | def request_output(self, table, outtype):
"""
Request the output for a given table.
## Arguments
* `table` (str): The name of the table to export.
* `outtype` (str): The type of output. Must be one of:
CSV - Comma Seperated Values
DataSet - XML DataSet
FITS - Flexible Image Transfer System (FITS Binary)
VOTable - XML Virtual Observatory VOTABLE
"""
job_types = ["CSV", "DataSet", "FITS", "VOTable"]
assert outtype in job_types
params = {"tableName": table, "type": outtype}
r = self._send_request("SubmitExtractJob", params=params)
job_id = int(self._parse_single(r.text, "long"))
return job_id | [
"def",
"request_output",
"(",
"self",
",",
"table",
",",
"outtype",
")",
":",
"job_types",
"=",
"[",
"\"CSV\"",
",",
"\"DataSet\"",
",",
"\"FITS\"",
",",
"\"VOTable\"",
"]",
"assert",
"outtype",
"in",
"job_types",
"params",
"=",
"{",
"\"tableName\"",
":",
"table",
",",
"\"type\"",
":",
"outtype",
"}",
"r",
"=",
"self",
".",
"_send_request",
"(",
"\"SubmitExtractJob\"",
",",
"params",
"=",
"params",
")",
"job_id",
"=",
"int",
"(",
"self",
".",
"_parse_single",
"(",
"r",
".",
"text",
",",
"\"long\"",
")",
")",
"return",
"job_id"
]
| Request the output for a given table.
## Arguments
* `table` (str): The name of the table to export.
* `outtype` (str): The type of output. Must be one of:
CSV - Comma Seperated Values
DataSet - XML DataSet
FITS - Flexible Image Transfer System (FITS Binary)
VOTable - XML Virtual Observatory VOTABLE | [
"Request",
"the",
"output",
"for",
"a",
"given",
"table",
"."
]
| 1cc3f5511cc254d776082909221787e3c037ac16 | https://github.com/dfm/casjobs/blob/1cc3f5511cc254d776082909221787e3c037ac16/casjobs.py#L238-L257 | train |
dfm/casjobs | casjobs.py | CasJobs.get_output | def get_output(self, job_id, outfn):
"""
Download an output file given the id of the output request job.
## Arguments
* `job_id` (int): The id of the _output_ job.
* `outfn` (str): The file where the output should be stored.
May also be a file-like object with a 'write' method.
"""
job_info = self.job_info(jobid=job_id)[0]
# Make sure that the job is finished.
status = int(job_info["Status"])
if status != 5:
raise Exception("The status of job %d is %d (%s)"
%(job_id, status, self.status_codes[status]))
# Try to download the output file.
remotefn = job_info["OutputLoc"]
r = requests.get(remotefn)
# Make sure that the request went through.
code = r.status_code
if code != 200:
raise Exception("Getting file %s yielded status: %d"
%(remotefn, code))
# Save the data to a file.
try:
outfn.write(r.content)
except AttributeError:
f = open(outfn, "wb")
f.write(r.content)
f.close() | python | def get_output(self, job_id, outfn):
"""
Download an output file given the id of the output request job.
## Arguments
* `job_id` (int): The id of the _output_ job.
* `outfn` (str): The file where the output should be stored.
May also be a file-like object with a 'write' method.
"""
job_info = self.job_info(jobid=job_id)[0]
# Make sure that the job is finished.
status = int(job_info["Status"])
if status != 5:
raise Exception("The status of job %d is %d (%s)"
%(job_id, status, self.status_codes[status]))
# Try to download the output file.
remotefn = job_info["OutputLoc"]
r = requests.get(remotefn)
# Make sure that the request went through.
code = r.status_code
if code != 200:
raise Exception("Getting file %s yielded status: %d"
%(remotefn, code))
# Save the data to a file.
try:
outfn.write(r.content)
except AttributeError:
f = open(outfn, "wb")
f.write(r.content)
f.close() | [
"def",
"get_output",
"(",
"self",
",",
"job_id",
",",
"outfn",
")",
":",
"job_info",
"=",
"self",
".",
"job_info",
"(",
"jobid",
"=",
"job_id",
")",
"[",
"0",
"]",
"# Make sure that the job is finished.",
"status",
"=",
"int",
"(",
"job_info",
"[",
"\"Status\"",
"]",
")",
"if",
"status",
"!=",
"5",
":",
"raise",
"Exception",
"(",
"\"The status of job %d is %d (%s)\"",
"%",
"(",
"job_id",
",",
"status",
",",
"self",
".",
"status_codes",
"[",
"status",
"]",
")",
")",
"# Try to download the output file.",
"remotefn",
"=",
"job_info",
"[",
"\"OutputLoc\"",
"]",
"r",
"=",
"requests",
".",
"get",
"(",
"remotefn",
")",
"# Make sure that the request went through.",
"code",
"=",
"r",
".",
"status_code",
"if",
"code",
"!=",
"200",
":",
"raise",
"Exception",
"(",
"\"Getting file %s yielded status: %d\"",
"%",
"(",
"remotefn",
",",
"code",
")",
")",
"# Save the data to a file.",
"try",
":",
"outfn",
".",
"write",
"(",
"r",
".",
"content",
")",
"except",
"AttributeError",
":",
"f",
"=",
"open",
"(",
"outfn",
",",
"\"wb\"",
")",
"f",
".",
"write",
"(",
"r",
".",
"content",
")",
"f",
".",
"close",
"(",
")"
]
| Download an output file given the id of the output request job.
## Arguments
* `job_id` (int): The id of the _output_ job.
* `outfn` (str): The file where the output should be stored.
May also be a file-like object with a 'write' method. | [
"Download",
"an",
"output",
"file",
"given",
"the",
"id",
"of",
"the",
"output",
"request",
"job",
"."
]
| 1cc3f5511cc254d776082909221787e3c037ac16 | https://github.com/dfm/casjobs/blob/1cc3f5511cc254d776082909221787e3c037ac16/casjobs.py#L259-L294 | train |
dfm/casjobs | casjobs.py | CasJobs.request_and_get_output | def request_and_get_output(self, table, outtype, outfn):
"""
Shorthand for requesting an output file and then downloading it when
ready.
## Arguments
* `table` (str): The name of the table to export.
* `outtype` (str): The type of output. Must be one of:
CSV - Comma Seperated Values
DataSet - XML DataSet
FITS - Flexible Image Transfer System (FITS Binary)
VOTable - XML Virtual Observatory VOTABLE
* `outfn` (str): The file where the output should be stored.
May also be a file-like object with a 'write' method.
"""
job_id = self.request_output(table, outtype)
status = self.monitor(job_id)
if status[0] != 5:
raise Exception("Output request failed.")
self.get_output(job_id, outfn) | python | def request_and_get_output(self, table, outtype, outfn):
"""
Shorthand for requesting an output file and then downloading it when
ready.
## Arguments
* `table` (str): The name of the table to export.
* `outtype` (str): The type of output. Must be one of:
CSV - Comma Seperated Values
DataSet - XML DataSet
FITS - Flexible Image Transfer System (FITS Binary)
VOTable - XML Virtual Observatory VOTABLE
* `outfn` (str): The file where the output should be stored.
May also be a file-like object with a 'write' method.
"""
job_id = self.request_output(table, outtype)
status = self.monitor(job_id)
if status[0] != 5:
raise Exception("Output request failed.")
self.get_output(job_id, outfn) | [
"def",
"request_and_get_output",
"(",
"self",
",",
"table",
",",
"outtype",
",",
"outfn",
")",
":",
"job_id",
"=",
"self",
".",
"request_output",
"(",
"table",
",",
"outtype",
")",
"status",
"=",
"self",
".",
"monitor",
"(",
"job_id",
")",
"if",
"status",
"[",
"0",
"]",
"!=",
"5",
":",
"raise",
"Exception",
"(",
"\"Output request failed.\"",
")",
"self",
".",
"get_output",
"(",
"job_id",
",",
"outfn",
")"
]
| Shorthand for requesting an output file and then downloading it when
ready.
## Arguments
* `table` (str): The name of the table to export.
* `outtype` (str): The type of output. Must be one of:
CSV - Comma Seperated Values
DataSet - XML DataSet
FITS - Flexible Image Transfer System (FITS Binary)
VOTable - XML Virtual Observatory VOTABLE
* `outfn` (str): The file where the output should be stored.
May also be a file-like object with a 'write' method. | [
"Shorthand",
"for",
"requesting",
"an",
"output",
"file",
"and",
"then",
"downloading",
"it",
"when",
"ready",
"."
]
| 1cc3f5511cc254d776082909221787e3c037ac16 | https://github.com/dfm/casjobs/blob/1cc3f5511cc254d776082909221787e3c037ac16/casjobs.py#L296-L317 | train |
dfm/casjobs | casjobs.py | CasJobs.drop_table | def drop_table(self, table):
"""
Drop a table from the MyDB context.
## Arguments
* `table` (str): The name of the table to drop.
"""
job_id = self.submit("DROP TABLE %s"%table, context="MYDB")
status = self.monitor(job_id)
if status[0] != 5:
raise Exception("Couldn't drop table %s"%table) | python | def drop_table(self, table):
"""
Drop a table from the MyDB context.
## Arguments
* `table` (str): The name of the table to drop.
"""
job_id = self.submit("DROP TABLE %s"%table, context="MYDB")
status = self.monitor(job_id)
if status[0] != 5:
raise Exception("Couldn't drop table %s"%table) | [
"def",
"drop_table",
"(",
"self",
",",
"table",
")",
":",
"job_id",
"=",
"self",
".",
"submit",
"(",
"\"DROP TABLE %s\"",
"%",
"table",
",",
"context",
"=",
"\"MYDB\"",
")",
"status",
"=",
"self",
".",
"monitor",
"(",
"job_id",
")",
"if",
"status",
"[",
"0",
"]",
"!=",
"5",
":",
"raise",
"Exception",
"(",
"\"Couldn't drop table %s\"",
"%",
"table",
")"
]
| Drop a table from the MyDB context.
## Arguments
* `table` (str): The name of the table to drop. | [
"Drop",
"a",
"table",
"from",
"the",
"MyDB",
"context",
"."
]
| 1cc3f5511cc254d776082909221787e3c037ac16 | https://github.com/dfm/casjobs/blob/1cc3f5511cc254d776082909221787e3c037ac16/casjobs.py#L319-L331 | train |
dfm/casjobs | casjobs.py | CasJobs.count | def count(self, q):
"""
Shorthand for counting the results of a specific query.
## Arguments
* `q` (str): The query to count. This will be executed as:
`"SELECT COUNT(*) %s" % q`.
## Returns
* `count` (int): The resulting count.
"""
q = "SELECT COUNT(*) %s"%q
return int(self.quick(q).split("\n")[1]) | python | def count(self, q):
"""
Shorthand for counting the results of a specific query.
## Arguments
* `q` (str): The query to count. This will be executed as:
`"SELECT COUNT(*) %s" % q`.
## Returns
* `count` (int): The resulting count.
"""
q = "SELECT COUNT(*) %s"%q
return int(self.quick(q).split("\n")[1]) | [
"def",
"count",
"(",
"self",
",",
"q",
")",
":",
"q",
"=",
"\"SELECT COUNT(*) %s\"",
"%",
"q",
"return",
"int",
"(",
"self",
".",
"quick",
"(",
"q",
")",
".",
"split",
"(",
"\"\\n\"",
")",
"[",
"1",
"]",
")"
]
| Shorthand for counting the results of a specific query.
## Arguments
* `q` (str): The query to count. This will be executed as:
`"SELECT COUNT(*) %s" % q`.
## Returns
* `count` (int): The resulting count. | [
"Shorthand",
"for",
"counting",
"the",
"results",
"of",
"a",
"specific",
"query",
"."
]
| 1cc3f5511cc254d776082909221787e3c037ac16 | https://github.com/dfm/casjobs/blob/1cc3f5511cc254d776082909221787e3c037ac16/casjobs.py#L333-L348 | train |
dfm/casjobs | casjobs.py | CasJobs.list_tables | def list_tables(self):
"""
Lists the tables in mydb.
## Returns
* `tables` (list): A list of strings with all the table names from mydb.
"""
q = 'SELECT Distinct TABLE_NAME FROM information_schema.TABLES'
res = self.quick(q, context='MYDB', task_name='listtables', system=True)
# the first line is a header and the last is always empty
# also, the table names have " as the first and last characters
return [l[1:-1]for l in res.split('\n')[1:-1]] | python | def list_tables(self):
"""
Lists the tables in mydb.
## Returns
* `tables` (list): A list of strings with all the table names from mydb.
"""
q = 'SELECT Distinct TABLE_NAME FROM information_schema.TABLES'
res = self.quick(q, context='MYDB', task_name='listtables', system=True)
# the first line is a header and the last is always empty
# also, the table names have " as the first and last characters
return [l[1:-1]for l in res.split('\n')[1:-1]] | [
"def",
"list_tables",
"(",
"self",
")",
":",
"q",
"=",
"'SELECT Distinct TABLE_NAME FROM information_schema.TABLES'",
"res",
"=",
"self",
".",
"quick",
"(",
"q",
",",
"context",
"=",
"'MYDB'",
",",
"task_name",
"=",
"'listtables'",
",",
"system",
"=",
"True",
")",
"# the first line is a header and the last is always empty",
"# also, the table names have \" as the first and last characters",
"return",
"[",
"l",
"[",
"1",
":",
"-",
"1",
"]",
"for",
"l",
"in",
"res",
".",
"split",
"(",
"'\\n'",
")",
"[",
"1",
":",
"-",
"1",
"]",
"]"
]
| Lists the tables in mydb.
## Returns
* `tables` (list): A list of strings with all the table names from mydb. | [
"Lists",
"the",
"tables",
"in",
"mydb",
"."
]
| 1cc3f5511cc254d776082909221787e3c037ac16 | https://github.com/dfm/casjobs/blob/1cc3f5511cc254d776082909221787e3c037ac16/casjobs.py#L350-L362 | train |
ncraike/fang | examples/multiple_dependencies.py | multiply_and_add | def multiply_and_add(n):
'''Multiply the given number n by some configured multiplier, and
then add a configured offset.'''
multiplier, offset = di.resolver.unpack(multiply_and_add)
return (multiplier * n) + offset | python | def multiply_and_add(n):
'''Multiply the given number n by some configured multiplier, and
then add a configured offset.'''
multiplier, offset = di.resolver.unpack(multiply_and_add)
return (multiplier * n) + offset | [
"def",
"multiply_and_add",
"(",
"n",
")",
":",
"multiplier",
",",
"offset",
"=",
"di",
".",
"resolver",
".",
"unpack",
"(",
"multiply_and_add",
")",
"return",
"(",
"multiplier",
"*",
"n",
")",
"+",
"offset"
]
| Multiply the given number n by some configured multiplier, and
then add a configured offset. | [
"Multiply",
"the",
"given",
"number",
"n",
"by",
"some",
"configured",
"multiplier",
"and",
"then",
"add",
"a",
"configured",
"offset",
"."
]
| 2d9e1216c866e450059017f83ab775f7716eda7a | https://github.com/ncraike/fang/blob/2d9e1216c866e450059017f83ab775f7716eda7a/examples/multiple_dependencies.py#L13-L17 | train |
mastro35/flows | flows/Actions/InputTailAction.py | TailAction.flush_buffer | def flush_buffer(self):
''' Flush the buffer of the tail '''
if len(self.buffer) > 0:
return_value = ''.join(self.buffer)
self.buffer.clear()
self.send_message(return_value)
self.last_flush_date = datetime.datetime.now() | python | def flush_buffer(self):
''' Flush the buffer of the tail '''
if len(self.buffer) > 0:
return_value = ''.join(self.buffer)
self.buffer.clear()
self.send_message(return_value)
self.last_flush_date = datetime.datetime.now() | [
"def",
"flush_buffer",
"(",
"self",
")",
":",
"if",
"len",
"(",
"self",
".",
"buffer",
")",
">",
"0",
":",
"return_value",
"=",
"''",
".",
"join",
"(",
"self",
".",
"buffer",
")",
"self",
".",
"buffer",
".",
"clear",
"(",
")",
"self",
".",
"send_message",
"(",
"return_value",
")",
"self",
".",
"last_flush_date",
"=",
"datetime",
".",
"datetime",
".",
"now",
"(",
")"
]
| Flush the buffer of the tail | [
"Flush",
"the",
"buffer",
"of",
"the",
"tail"
]
| 05e488385673a69597b5b39c7728795aa4d5eb18 | https://github.com/mastro35/flows/blob/05e488385673a69597b5b39c7728795aa4d5eb18/flows/Actions/InputTailAction.py#L64-L70 | train |
zsimic/runez | src/runez/base.py | Slotted.set | def set(self, *args, **kwargs):
"""Conveniently set one or more fields at a time.
Args:
*args: Optionally set from other objects, available fields from the passed object are used in order
**kwargs: Set from given key/value pairs (only names defined in __slots__ are used)
"""
if args:
for arg in args:
if arg is not None:
for name in self.__slots__:
self._set(name, getattr(arg, name, UNSET))
for name in kwargs:
self._set(name, kwargs.get(name, UNSET)) | python | def set(self, *args, **kwargs):
"""Conveniently set one or more fields at a time.
Args:
*args: Optionally set from other objects, available fields from the passed object are used in order
**kwargs: Set from given key/value pairs (only names defined in __slots__ are used)
"""
if args:
for arg in args:
if arg is not None:
for name in self.__slots__:
self._set(name, getattr(arg, name, UNSET))
for name in kwargs:
self._set(name, kwargs.get(name, UNSET)) | [
"def",
"set",
"(",
"self",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"if",
"args",
":",
"for",
"arg",
"in",
"args",
":",
"if",
"arg",
"is",
"not",
"None",
":",
"for",
"name",
"in",
"self",
".",
"__slots__",
":",
"self",
".",
"_set",
"(",
"name",
",",
"getattr",
"(",
"arg",
",",
"name",
",",
"UNSET",
")",
")",
"for",
"name",
"in",
"kwargs",
":",
"self",
".",
"_set",
"(",
"name",
",",
"kwargs",
".",
"get",
"(",
"name",
",",
"UNSET",
")",
")"
]
| Conveniently set one or more fields at a time.
Args:
*args: Optionally set from other objects, available fields from the passed object are used in order
**kwargs: Set from given key/value pairs (only names defined in __slots__ are used) | [
"Conveniently",
"set",
"one",
"or",
"more",
"fields",
"at",
"a",
"time",
"."
]
| 14363b719a1aae1528859a501a22d075ce0abfcc | https://github.com/zsimic/runez/blob/14363b719a1aae1528859a501a22d075ce0abfcc/src/runez/base.py#L105-L118 | train |
zsimic/runez | src/runez/base.py | ThreadGlobalContext.enable | def enable(self):
"""Enable contextual logging"""
with self._lock:
if self.filter is None:
self.filter = self._filter_type(self) | python | def enable(self):
"""Enable contextual logging"""
with self._lock:
if self.filter is None:
self.filter = self._filter_type(self) | [
"def",
"enable",
"(",
"self",
")",
":",
"with",
"self",
".",
"_lock",
":",
"if",
"self",
".",
"filter",
"is",
"None",
":",
"self",
".",
"filter",
"=",
"self",
".",
"_filter_type",
"(",
"self",
")"
]
| Enable contextual logging | [
"Enable",
"contextual",
"logging"
]
| 14363b719a1aae1528859a501a22d075ce0abfcc | https://github.com/zsimic/runez/blob/14363b719a1aae1528859a501a22d075ce0abfcc/src/runez/base.py#L161-L165 | train |
zsimic/runez | src/runez/base.py | ThreadGlobalContext.set_threadlocal | def set_threadlocal(self, **values):
"""Set current thread's logging context to specified `values`"""
with self._lock:
self._ensure_threadlocal()
self._tpayload.context = values | python | def set_threadlocal(self, **values):
"""Set current thread's logging context to specified `values`"""
with self._lock:
self._ensure_threadlocal()
self._tpayload.context = values | [
"def",
"set_threadlocal",
"(",
"self",
",",
"*",
"*",
"values",
")",
":",
"with",
"self",
".",
"_lock",
":",
"self",
".",
"_ensure_threadlocal",
"(",
")",
"self",
".",
"_tpayload",
".",
"context",
"=",
"values"
]
| Set current thread's logging context to specified `values` | [
"Set",
"current",
"thread",
"s",
"logging",
"context",
"to",
"specified",
"values"
]
| 14363b719a1aae1528859a501a22d075ce0abfcc | https://github.com/zsimic/runez/blob/14363b719a1aae1528859a501a22d075ce0abfcc/src/runez/base.py#L175-L179 | train |
zsimic/runez | src/runez/base.py | ThreadGlobalContext.add_threadlocal | def add_threadlocal(self, **values):
"""Add `values` to current thread's logging context"""
with self._lock:
self._ensure_threadlocal()
self._tpayload.context.update(**values) | python | def add_threadlocal(self, **values):
"""Add `values` to current thread's logging context"""
with self._lock:
self._ensure_threadlocal()
self._tpayload.context.update(**values) | [
"def",
"add_threadlocal",
"(",
"self",
",",
"*",
"*",
"values",
")",
":",
"with",
"self",
".",
"_lock",
":",
"self",
".",
"_ensure_threadlocal",
"(",
")",
"self",
".",
"_tpayload",
".",
"context",
".",
"update",
"(",
"*",
"*",
"values",
")"
]
| Add `values` to current thread's logging context | [
"Add",
"values",
"to",
"current",
"thread",
"s",
"logging",
"context"
]
| 14363b719a1aae1528859a501a22d075ce0abfcc | https://github.com/zsimic/runez/blob/14363b719a1aae1528859a501a22d075ce0abfcc/src/runez/base.py#L181-L185 | train |
zsimic/runez | src/runez/base.py | ThreadGlobalContext.add_global | def add_global(self, **values):
"""Add `values` to global logging context"""
with self._lock:
self._ensure_global()
self._gpayload.update(**values) | python | def add_global(self, **values):
"""Add `values` to global logging context"""
with self._lock:
self._ensure_global()
self._gpayload.update(**values) | [
"def",
"add_global",
"(",
"self",
",",
"*",
"*",
"values",
")",
":",
"with",
"self",
".",
"_lock",
":",
"self",
".",
"_ensure_global",
"(",
")",
"self",
".",
"_gpayload",
".",
"update",
"(",
"*",
"*",
"values",
")"
]
| Add `values` to global logging context | [
"Add",
"values",
"to",
"global",
"logging",
"context"
]
| 14363b719a1aae1528859a501a22d075ce0abfcc | https://github.com/zsimic/runez/blob/14363b719a1aae1528859a501a22d075ce0abfcc/src/runez/base.py#L209-L213 | train |
mojaie/chorus | chorus/draw/helper.py | display_terminal_carbon | def display_terminal_carbon(mol):
"""Set visible=True to the terminal carbon atoms.
"""
for i, a in mol.atoms_iter():
if mol.neighbor_count(i) == 1:
a.visible = True | python | def display_terminal_carbon(mol):
"""Set visible=True to the terminal carbon atoms.
"""
for i, a in mol.atoms_iter():
if mol.neighbor_count(i) == 1:
a.visible = True | [
"def",
"display_terminal_carbon",
"(",
"mol",
")",
":",
"for",
"i",
",",
"a",
"in",
"mol",
".",
"atoms_iter",
"(",
")",
":",
"if",
"mol",
".",
"neighbor_count",
"(",
"i",
")",
"==",
"1",
":",
"a",
".",
"visible",
"=",
"True"
]
| Set visible=True to the terminal carbon atoms. | [
"Set",
"visible",
"=",
"True",
"to",
"the",
"terminal",
"carbon",
"atoms",
"."
]
| fc7fe23a0272554c67671645ab07830b315eeb1b | https://github.com/mojaie/chorus/blob/fc7fe23a0272554c67671645ab07830b315eeb1b/chorus/draw/helper.py#L16-L21 | train |
mojaie/chorus | chorus/draw/helper.py | equalize_terminal_double_bond | def equalize_terminal_double_bond(mol):
"""Show equalized double bond if it is connected to terminal atom.
"""
for i, a in mol.atoms_iter():
if mol.neighbor_count(i) == 1:
nb = list(mol.neighbors(i).values())[0]
if nb.order == 2:
nb.type = 2 | python | def equalize_terminal_double_bond(mol):
"""Show equalized double bond if it is connected to terminal atom.
"""
for i, a in mol.atoms_iter():
if mol.neighbor_count(i) == 1:
nb = list(mol.neighbors(i).values())[0]
if nb.order == 2:
nb.type = 2 | [
"def",
"equalize_terminal_double_bond",
"(",
"mol",
")",
":",
"for",
"i",
",",
"a",
"in",
"mol",
".",
"atoms_iter",
"(",
")",
":",
"if",
"mol",
".",
"neighbor_count",
"(",
"i",
")",
"==",
"1",
":",
"nb",
"=",
"list",
"(",
"mol",
".",
"neighbors",
"(",
"i",
")",
".",
"values",
"(",
")",
")",
"[",
"0",
"]",
"if",
"nb",
".",
"order",
"==",
"2",
":",
"nb",
".",
"type",
"=",
"2"
]
| Show equalized double bond if it is connected to terminal atom. | [
"Show",
"equalized",
"double",
"bond",
"if",
"it",
"is",
"connected",
"to",
"terminal",
"atom",
"."
]
| fc7fe23a0272554c67671645ab07830b315eeb1b | https://github.com/mojaie/chorus/blob/fc7fe23a0272554c67671645ab07830b315eeb1b/chorus/draw/helper.py#L24-L31 | train |
mojaie/chorus | chorus/draw/helper.py | spine_to_terminal_wedge | def spine_to_terminal_wedge(mol):
"""Arrange stereo wedge direction from spine to terminal atom
"""
for i, a in mol.atoms_iter():
if mol.neighbor_count(i) == 1:
ni, nb = list(mol.neighbors(i).items())[0]
if nb.order == 1 and nb.type in (1, 2) \
and ni > i != nb.is_lower_first:
nb.is_lower_first = not nb.is_lower_first
nb.type = {1: 2, 2: 1}[nb.type] | python | def spine_to_terminal_wedge(mol):
"""Arrange stereo wedge direction from spine to terminal atom
"""
for i, a in mol.atoms_iter():
if mol.neighbor_count(i) == 1:
ni, nb = list(mol.neighbors(i).items())[0]
if nb.order == 1 and nb.type in (1, 2) \
and ni > i != nb.is_lower_first:
nb.is_lower_first = not nb.is_lower_first
nb.type = {1: 2, 2: 1}[nb.type] | [
"def",
"spine_to_terminal_wedge",
"(",
"mol",
")",
":",
"for",
"i",
",",
"a",
"in",
"mol",
".",
"atoms_iter",
"(",
")",
":",
"if",
"mol",
".",
"neighbor_count",
"(",
"i",
")",
"==",
"1",
":",
"ni",
",",
"nb",
"=",
"list",
"(",
"mol",
".",
"neighbors",
"(",
"i",
")",
".",
"items",
"(",
")",
")",
"[",
"0",
"]",
"if",
"nb",
".",
"order",
"==",
"1",
"and",
"nb",
".",
"type",
"in",
"(",
"1",
",",
"2",
")",
"and",
"ni",
">",
"i",
"!=",
"nb",
".",
"is_lower_first",
":",
"nb",
".",
"is_lower_first",
"=",
"not",
"nb",
".",
"is_lower_first",
"nb",
".",
"type",
"=",
"{",
"1",
":",
"2",
",",
"2",
":",
"1",
"}",
"[",
"nb",
".",
"type",
"]"
]
| Arrange stereo wedge direction from spine to terminal atom | [
"Arrange",
"stereo",
"wedge",
"direction",
"from",
"spine",
"to",
"terminal",
"atom"
]
| fc7fe23a0272554c67671645ab07830b315eeb1b | https://github.com/mojaie/chorus/blob/fc7fe23a0272554c67671645ab07830b315eeb1b/chorus/draw/helper.py#L34-L43 | train |
mojaie/chorus | chorus/draw/helper.py | format_ring_double_bond | def format_ring_double_bond(mol):
"""Set double bonds around the ring.
"""
mol.require("Topology")
mol.require("ScaleAndCenter")
for r in sorted(mol.rings, key=len, reverse=True):
vertices = [mol.atom(n).coords for n in r]
try:
if geometry.is_clockwise(vertices):
cpath = iterator.consecutive(itertools.cycle(r), 2)
else:
cpath = iterator.consecutive(itertools.cycle(reversed(r)), 2)
except ValueError:
continue
for _ in r:
u, v = next(cpath)
b = mol.bond(u, v)
if b.order == 2:
b.type = int((u > v) == b.is_lower_first) | python | def format_ring_double_bond(mol):
"""Set double bonds around the ring.
"""
mol.require("Topology")
mol.require("ScaleAndCenter")
for r in sorted(mol.rings, key=len, reverse=True):
vertices = [mol.atom(n).coords for n in r]
try:
if geometry.is_clockwise(vertices):
cpath = iterator.consecutive(itertools.cycle(r), 2)
else:
cpath = iterator.consecutive(itertools.cycle(reversed(r)), 2)
except ValueError:
continue
for _ in r:
u, v = next(cpath)
b = mol.bond(u, v)
if b.order == 2:
b.type = int((u > v) == b.is_lower_first) | [
"def",
"format_ring_double_bond",
"(",
"mol",
")",
":",
"mol",
".",
"require",
"(",
"\"Topology\"",
")",
"mol",
".",
"require",
"(",
"\"ScaleAndCenter\"",
")",
"for",
"r",
"in",
"sorted",
"(",
"mol",
".",
"rings",
",",
"key",
"=",
"len",
",",
"reverse",
"=",
"True",
")",
":",
"vertices",
"=",
"[",
"mol",
".",
"atom",
"(",
"n",
")",
".",
"coords",
"for",
"n",
"in",
"r",
"]",
"try",
":",
"if",
"geometry",
".",
"is_clockwise",
"(",
"vertices",
")",
":",
"cpath",
"=",
"iterator",
".",
"consecutive",
"(",
"itertools",
".",
"cycle",
"(",
"r",
")",
",",
"2",
")",
"else",
":",
"cpath",
"=",
"iterator",
".",
"consecutive",
"(",
"itertools",
".",
"cycle",
"(",
"reversed",
"(",
"r",
")",
")",
",",
"2",
")",
"except",
"ValueError",
":",
"continue",
"for",
"_",
"in",
"r",
":",
"u",
",",
"v",
"=",
"next",
"(",
"cpath",
")",
"b",
"=",
"mol",
".",
"bond",
"(",
"u",
",",
"v",
")",
"if",
"b",
".",
"order",
"==",
"2",
":",
"b",
".",
"type",
"=",
"int",
"(",
"(",
"u",
">",
"v",
")",
"==",
"b",
".",
"is_lower_first",
")"
]
| Set double bonds around the ring. | [
"Set",
"double",
"bonds",
"around",
"the",
"ring",
"."
]
| fc7fe23a0272554c67671645ab07830b315eeb1b | https://github.com/mojaie/chorus/blob/fc7fe23a0272554c67671645ab07830b315eeb1b/chorus/draw/helper.py#L46-L64 | train |
mojaie/chorus | chorus/draw/helper.py | ready_to_draw | def ready_to_draw(mol):
"""Shortcut function to prepare molecule to draw.
Overwrite this function for customized appearance.
It is recommended to clone the molecule before draw
because all the methods above are destructive.
"""
copied = molutil.clone(mol)
# display_terminal_carbon(mol)
equalize_terminal_double_bond(copied)
# spine_to_terminal_wedge(copied)
scale_and_center(copied)
format_ring_double_bond(copied)
return copied | python | def ready_to_draw(mol):
"""Shortcut function to prepare molecule to draw.
Overwrite this function for customized appearance.
It is recommended to clone the molecule before draw
because all the methods above are destructive.
"""
copied = molutil.clone(mol)
# display_terminal_carbon(mol)
equalize_terminal_double_bond(copied)
# spine_to_terminal_wedge(copied)
scale_and_center(copied)
format_ring_double_bond(copied)
return copied | [
"def",
"ready_to_draw",
"(",
"mol",
")",
":",
"copied",
"=",
"molutil",
".",
"clone",
"(",
"mol",
")",
"# display_terminal_carbon(mol)",
"equalize_terminal_double_bond",
"(",
"copied",
")",
"# spine_to_terminal_wedge(copied)",
"scale_and_center",
"(",
"copied",
")",
"format_ring_double_bond",
"(",
"copied",
")",
"return",
"copied"
]
| Shortcut function to prepare molecule to draw.
Overwrite this function for customized appearance.
It is recommended to clone the molecule before draw
because all the methods above are destructive. | [
"Shortcut",
"function",
"to",
"prepare",
"molecule",
"to",
"draw",
".",
"Overwrite",
"this",
"function",
"for",
"customized",
"appearance",
".",
"It",
"is",
"recommended",
"to",
"clone",
"the",
"molecule",
"before",
"draw",
"because",
"all",
"the",
"methods",
"above",
"are",
"destructive",
"."
]
| fc7fe23a0272554c67671645ab07830b315eeb1b | https://github.com/mojaie/chorus/blob/fc7fe23a0272554c67671645ab07830b315eeb1b/chorus/draw/helper.py#L113-L125 | train |
adblair/configloader | configloader/__init__.py | ConfigLoader.update_from_object | def update_from_object(self, obj, criterion=lambda key: key.isupper()):
"""
Update dict from the attributes of a module, class or other object.
By default only attributes with all-uppercase names will be retrieved.
Use the ``criterion`` argument to modify that behaviour.
:arg obj: Either the actual module/object, or its absolute name, e.g.
'my_app.settings'.
:arg criterion: Callable that must return True when passed the name
of an attribute, if that attribute is to be used.
:type criterion: :py:class:`function`
.. versionadded:: 1.0
"""
log.debug('Loading config from {0}'.format(obj))
if isinstance(obj, basestring):
if '.' in obj:
path, name = obj.rsplit('.', 1)
mod = __import__(path, globals(), locals(), [name], 0)
obj = getattr(mod, name)
else:
obj = __import__(obj, globals(), locals(), [], 0)
self.update(
(key, getattr(obj, key))
for key in filter(criterion, dir(obj))
) | python | def update_from_object(self, obj, criterion=lambda key: key.isupper()):
"""
Update dict from the attributes of a module, class or other object.
By default only attributes with all-uppercase names will be retrieved.
Use the ``criterion`` argument to modify that behaviour.
:arg obj: Either the actual module/object, or its absolute name, e.g.
'my_app.settings'.
:arg criterion: Callable that must return True when passed the name
of an attribute, if that attribute is to be used.
:type criterion: :py:class:`function`
.. versionadded:: 1.0
"""
log.debug('Loading config from {0}'.format(obj))
if isinstance(obj, basestring):
if '.' in obj:
path, name = obj.rsplit('.', 1)
mod = __import__(path, globals(), locals(), [name], 0)
obj = getattr(mod, name)
else:
obj = __import__(obj, globals(), locals(), [], 0)
self.update(
(key, getattr(obj, key))
for key in filter(criterion, dir(obj))
) | [
"def",
"update_from_object",
"(",
"self",
",",
"obj",
",",
"criterion",
"=",
"lambda",
"key",
":",
"key",
".",
"isupper",
"(",
")",
")",
":",
"log",
".",
"debug",
"(",
"'Loading config from {0}'",
".",
"format",
"(",
"obj",
")",
")",
"if",
"isinstance",
"(",
"obj",
",",
"basestring",
")",
":",
"if",
"'.'",
"in",
"obj",
":",
"path",
",",
"name",
"=",
"obj",
".",
"rsplit",
"(",
"'.'",
",",
"1",
")",
"mod",
"=",
"__import__",
"(",
"path",
",",
"globals",
"(",
")",
",",
"locals",
"(",
")",
",",
"[",
"name",
"]",
",",
"0",
")",
"obj",
"=",
"getattr",
"(",
"mod",
",",
"name",
")",
"else",
":",
"obj",
"=",
"__import__",
"(",
"obj",
",",
"globals",
"(",
")",
",",
"locals",
"(",
")",
",",
"[",
"]",
",",
"0",
")",
"self",
".",
"update",
"(",
"(",
"key",
",",
"getattr",
"(",
"obj",
",",
"key",
")",
")",
"for",
"key",
"in",
"filter",
"(",
"criterion",
",",
"dir",
"(",
"obj",
")",
")",
")"
]
| Update dict from the attributes of a module, class or other object.
By default only attributes with all-uppercase names will be retrieved.
Use the ``criterion`` argument to modify that behaviour.
:arg obj: Either the actual module/object, or its absolute name, e.g.
'my_app.settings'.
:arg criterion: Callable that must return True when passed the name
of an attribute, if that attribute is to be used.
:type criterion: :py:class:`function`
.. versionadded:: 1.0 | [
"Update",
"dict",
"from",
"the",
"attributes",
"of",
"a",
"module",
"class",
"or",
"other",
"object",
"."
]
| c56eb568a376243400bb72992ca927c35922c827 | https://github.com/adblair/configloader/blob/c56eb568a376243400bb72992ca927c35922c827/configloader/__init__.py#L46-L73 | train |
adblair/configloader | configloader/__init__.py | ConfigLoader.update_from_env_namespace | def update_from_env_namespace(self, namespace):
"""
Update dict from any environment variables that have a given prefix.
The common prefix is removed when converting the variable names to
dictionary keys. For example, if the following environment variables
were set::
MY_APP_SETTING1=foo
MY_APP_SETTING2=bar
Then calling ``.update_from_env_namespace('MY_APP')`` would be
equivalent to calling
``.update({'SETTING1': 'foo', 'SETTING2': 'bar'})``.
:arg namespace: Common environment variable prefix.
:type env_var: :py:class:`str`
"""
self.update(ConfigLoader(os.environ).namespace(namespace)) | python | def update_from_env_namespace(self, namespace):
"""
Update dict from any environment variables that have a given prefix.
The common prefix is removed when converting the variable names to
dictionary keys. For example, if the following environment variables
were set::
MY_APP_SETTING1=foo
MY_APP_SETTING2=bar
Then calling ``.update_from_env_namespace('MY_APP')`` would be
equivalent to calling
``.update({'SETTING1': 'foo', 'SETTING2': 'bar'})``.
:arg namespace: Common environment variable prefix.
:type env_var: :py:class:`str`
"""
self.update(ConfigLoader(os.environ).namespace(namespace)) | [
"def",
"update_from_env_namespace",
"(",
"self",
",",
"namespace",
")",
":",
"self",
".",
"update",
"(",
"ConfigLoader",
"(",
"os",
".",
"environ",
")",
".",
"namespace",
"(",
"namespace",
")",
")"
]
| Update dict from any environment variables that have a given prefix.
The common prefix is removed when converting the variable names to
dictionary keys. For example, if the following environment variables
were set::
MY_APP_SETTING1=foo
MY_APP_SETTING2=bar
Then calling ``.update_from_env_namespace('MY_APP')`` would be
equivalent to calling
``.update({'SETTING1': 'foo', 'SETTING2': 'bar'})``.
:arg namespace: Common environment variable prefix.
:type env_var: :py:class:`str` | [
"Update",
"dict",
"from",
"any",
"environment",
"variables",
"that",
"have",
"a",
"given",
"prefix",
"."
]
| c56eb568a376243400bb72992ca927c35922c827 | https://github.com/adblair/configloader/blob/c56eb568a376243400bb72992ca927c35922c827/configloader/__init__.py#L119-L137 | train |
adblair/configloader | configloader/__init__.py | ConfigLoader.update_from | def update_from(
self,
obj=None,
yaml_env=None,
yaml_file=None,
json_env=None,
json_file=None,
env_namespace=None,
):
"""
Update dict from several sources at once.
This is simply a convenience method that can be used as an alternative
to making several calls to the various
:meth:`~ConfigLoader.update_from_*` methods.
Updates will be applied in the order that the parameters are listed
below, with each source taking precedence over those before it.
:arg obj: Object or name of object, e.g. 'myapp.settings'.
:arg yaml_env: Name of an environment variable containing the path to
a YAML config file.
:arg yaml_file: Path to a YAML config file, or a file-like object.
:arg json_env: Name of an environment variable containing the path to
a JSON config file.
:arg json_file: Path to a JSON config file, or a file-like object.
:arg env_namespace: Common prefix of the environment variables
containing the desired config.
"""
if obj:
self.update_from_object(obj)
if yaml_env:
self.update_from_yaml_env(yaml_env)
if yaml_file:
self.update_from_yaml_file(yaml_file)
if json_env:
self.update_from_json_env(json_env)
if json_file:
self.update_from_json_file(json_file)
if env_namespace:
self.update_from_env_namespace(env_namespace) | python | def update_from(
self,
obj=None,
yaml_env=None,
yaml_file=None,
json_env=None,
json_file=None,
env_namespace=None,
):
"""
Update dict from several sources at once.
This is simply a convenience method that can be used as an alternative
to making several calls to the various
:meth:`~ConfigLoader.update_from_*` methods.
Updates will be applied in the order that the parameters are listed
below, with each source taking precedence over those before it.
:arg obj: Object or name of object, e.g. 'myapp.settings'.
:arg yaml_env: Name of an environment variable containing the path to
a YAML config file.
:arg yaml_file: Path to a YAML config file, or a file-like object.
:arg json_env: Name of an environment variable containing the path to
a JSON config file.
:arg json_file: Path to a JSON config file, or a file-like object.
:arg env_namespace: Common prefix of the environment variables
containing the desired config.
"""
if obj:
self.update_from_object(obj)
if yaml_env:
self.update_from_yaml_env(yaml_env)
if yaml_file:
self.update_from_yaml_file(yaml_file)
if json_env:
self.update_from_json_env(json_env)
if json_file:
self.update_from_json_file(json_file)
if env_namespace:
self.update_from_env_namespace(env_namespace) | [
"def",
"update_from",
"(",
"self",
",",
"obj",
"=",
"None",
",",
"yaml_env",
"=",
"None",
",",
"yaml_file",
"=",
"None",
",",
"json_env",
"=",
"None",
",",
"json_file",
"=",
"None",
",",
"env_namespace",
"=",
"None",
",",
")",
":",
"if",
"obj",
":",
"self",
".",
"update_from_object",
"(",
"obj",
")",
"if",
"yaml_env",
":",
"self",
".",
"update_from_yaml_env",
"(",
"yaml_env",
")",
"if",
"yaml_file",
":",
"self",
".",
"update_from_yaml_file",
"(",
"yaml_file",
")",
"if",
"json_env",
":",
"self",
".",
"update_from_json_env",
"(",
"json_env",
")",
"if",
"json_file",
":",
"self",
".",
"update_from_json_file",
"(",
"json_file",
")",
"if",
"env_namespace",
":",
"self",
".",
"update_from_env_namespace",
"(",
"env_namespace",
")"
]
| Update dict from several sources at once.
This is simply a convenience method that can be used as an alternative
to making several calls to the various
:meth:`~ConfigLoader.update_from_*` methods.
Updates will be applied in the order that the parameters are listed
below, with each source taking precedence over those before it.
:arg obj: Object or name of object, e.g. 'myapp.settings'.
:arg yaml_env: Name of an environment variable containing the path to
a YAML config file.
:arg yaml_file: Path to a YAML config file, or a file-like object.
:arg json_env: Name of an environment variable containing the path to
a JSON config file.
:arg json_file: Path to a JSON config file, or a file-like object.
:arg env_namespace: Common prefix of the environment variables
containing the desired config. | [
"Update",
"dict",
"from",
"several",
"sources",
"at",
"once",
"."
]
| c56eb568a376243400bb72992ca927c35922c827 | https://github.com/adblair/configloader/blob/c56eb568a376243400bb72992ca927c35922c827/configloader/__init__.py#L139-L179 | train |
adblair/configloader | configloader/__init__.py | ConfigLoader.namespace | def namespace(self, namespace, key_transform=lambda key: key):
"""
Return a copy with only the keys from a given namespace.
The common prefix will be removed in the returned dict. Example::
>>> from configloader import ConfigLoader
>>> config = ConfigLoader(
... MY_APP_SETTING1='a',
... EXTERNAL_LIB_SETTING1='b',
... EXTERNAL_LIB_SETTING2='c',
... )
>>> config.namespace('EXTERNAL_LIB')
ConfigLoader({'SETTING1': 'b', 'SETTING2': 'c'})
:arg namespace: Common prefix.
:arg key_transform: Function through which to pass each key when
creating the new dictionary.
:return: New config dict.
:rtype: :class:`ConfigLoader`
"""
namespace = namespace.rstrip('_') + '_'
return ConfigLoader(
(key_transform(key[len(namespace):]), value)
for key, value in self.items()
if key[:len(namespace)] == namespace
) | python | def namespace(self, namespace, key_transform=lambda key: key):
"""
Return a copy with only the keys from a given namespace.
The common prefix will be removed in the returned dict. Example::
>>> from configloader import ConfigLoader
>>> config = ConfigLoader(
... MY_APP_SETTING1='a',
... EXTERNAL_LIB_SETTING1='b',
... EXTERNAL_LIB_SETTING2='c',
... )
>>> config.namespace('EXTERNAL_LIB')
ConfigLoader({'SETTING1': 'b', 'SETTING2': 'c'})
:arg namespace: Common prefix.
:arg key_transform: Function through which to pass each key when
creating the new dictionary.
:return: New config dict.
:rtype: :class:`ConfigLoader`
"""
namespace = namespace.rstrip('_') + '_'
return ConfigLoader(
(key_transform(key[len(namespace):]), value)
for key, value in self.items()
if key[:len(namespace)] == namespace
) | [
"def",
"namespace",
"(",
"self",
",",
"namespace",
",",
"key_transform",
"=",
"lambda",
"key",
":",
"key",
")",
":",
"namespace",
"=",
"namespace",
".",
"rstrip",
"(",
"'_'",
")",
"+",
"'_'",
"return",
"ConfigLoader",
"(",
"(",
"key_transform",
"(",
"key",
"[",
"len",
"(",
"namespace",
")",
":",
"]",
")",
",",
"value",
")",
"for",
"key",
",",
"value",
"in",
"self",
".",
"items",
"(",
")",
"if",
"key",
"[",
":",
"len",
"(",
"namespace",
")",
"]",
"==",
"namespace",
")"
]
| Return a copy with only the keys from a given namespace.
The common prefix will be removed in the returned dict. Example::
>>> from configloader import ConfigLoader
>>> config = ConfigLoader(
... MY_APP_SETTING1='a',
... EXTERNAL_LIB_SETTING1='b',
... EXTERNAL_LIB_SETTING2='c',
... )
>>> config.namespace('EXTERNAL_LIB')
ConfigLoader({'SETTING1': 'b', 'SETTING2': 'c'})
:arg namespace: Common prefix.
:arg key_transform: Function through which to pass each key when
creating the new dictionary.
:return: New config dict.
:rtype: :class:`ConfigLoader` | [
"Return",
"a",
"copy",
"with",
"only",
"the",
"keys",
"from",
"a",
"given",
"namespace",
"."
]
| c56eb568a376243400bb72992ca927c35922c827 | https://github.com/adblair/configloader/blob/c56eb568a376243400bb72992ca927c35922c827/configloader/__init__.py#L181-L208 | train |
adblair/configloader | configloader/__init__.py | ConfigLoader.namespace_lower | def namespace_lower(self, namespace):
"""
Return a copy with only the keys from a given namespace, lower-cased.
The keys in the returned dict will be transformed to lower case after
filtering, so they can be easily passed as keyword arguments to other
functions. This is just syntactic sugar for calling
:meth:`~ConfigLoader.namespace` with
``key_transform=lambda key: key.lower()``.
Example::
>>> from configloader import ConfigLoader
>>> config = ConfigLoader(
... MY_APP_SETTING1='a',
... EXTERNAL_LIB_SETTING1='b',
... EXTERNAL_LIB_SETTING2='c',
... )
>>> config.namespace_lower('EXTERNAL_LIB')
ConfigLoader({'setting1': 'b', 'setting2': 'c'})
:arg namespace: Common prefix.
:return: New config dict.
:rtype: :class:`ConfigLoader`
"""
return self.namespace(namespace, key_transform=lambda key: key.lower()) | python | def namespace_lower(self, namespace):
"""
Return a copy with only the keys from a given namespace, lower-cased.
The keys in the returned dict will be transformed to lower case after
filtering, so they can be easily passed as keyword arguments to other
functions. This is just syntactic sugar for calling
:meth:`~ConfigLoader.namespace` with
``key_transform=lambda key: key.lower()``.
Example::
>>> from configloader import ConfigLoader
>>> config = ConfigLoader(
... MY_APP_SETTING1='a',
... EXTERNAL_LIB_SETTING1='b',
... EXTERNAL_LIB_SETTING2='c',
... )
>>> config.namespace_lower('EXTERNAL_LIB')
ConfigLoader({'setting1': 'b', 'setting2': 'c'})
:arg namespace: Common prefix.
:return: New config dict.
:rtype: :class:`ConfigLoader`
"""
return self.namespace(namespace, key_transform=lambda key: key.lower()) | [
"def",
"namespace_lower",
"(",
"self",
",",
"namespace",
")",
":",
"return",
"self",
".",
"namespace",
"(",
"namespace",
",",
"key_transform",
"=",
"lambda",
"key",
":",
"key",
".",
"lower",
"(",
")",
")"
]
| Return a copy with only the keys from a given namespace, lower-cased.
The keys in the returned dict will be transformed to lower case after
filtering, so they can be easily passed as keyword arguments to other
functions. This is just syntactic sugar for calling
:meth:`~ConfigLoader.namespace` with
``key_transform=lambda key: key.lower()``.
Example::
>>> from configloader import ConfigLoader
>>> config = ConfigLoader(
... MY_APP_SETTING1='a',
... EXTERNAL_LIB_SETTING1='b',
... EXTERNAL_LIB_SETTING2='c',
... )
>>> config.namespace_lower('EXTERNAL_LIB')
ConfigLoader({'setting1': 'b', 'setting2': 'c'})
:arg namespace: Common prefix.
:return: New config dict.
:rtype: :class:`ConfigLoader` | [
"Return",
"a",
"copy",
"with",
"only",
"the",
"keys",
"from",
"a",
"given",
"namespace",
"lower",
"-",
"cased",
"."
]
| c56eb568a376243400bb72992ca927c35922c827 | https://github.com/adblair/configloader/blob/c56eb568a376243400bb72992ca927c35922c827/configloader/__init__.py#L210-L236 | train |
ehansis/ozelot | ozelot/etl/util.py | render_diagram | def render_diagram(root_task, out_base, max_param_len=20, horizontal=False, colored=False):
"""Render a diagram of the ETL pipeline
All upstream tasks (i.e. requirements) of :attr:`root_task` are rendered.
Nodes are, by default, styled as simple rects. This style is augmented by any
:attr:`diagram_style` attributes of the tasks.
.. note:: This function requires the 'dot' executable from the GraphViz package to be installed
and its location configured in your `project_config.py` variable :attr:`DOT_EXECUTABLE`.
Args:
root_task (luigi.Task): Task instance that defines the 'upstream root' of the pipeline
out_base (str): base output file name (file endings will be appended)
max_param_len (int): Maximum shown length of task parameter values
horizontal (bool): If True, layout graph left-to-right instead of top-to-bottom
colored (bool): If True, show task completion status by color of nodes
"""
import re
import codecs
import subprocess
from ozelot import config
from ozelot.etl.tasks import get_task_name, get_task_param_string
# the graph - lines in dot file
lines = [u"digraph G {"]
if horizontal:
lines.append(u"rankdir=LR;")
# helper function: make unique task id from task name and parameters:
# task name + parameter string, with spaces replaced with _ and all non-alphanumerical characters stripped
def get_id(task):
s = get_task_name(task) + "_" + get_task_param_string(task)
return re.sub(r'\W+', '', re.sub(' ', '_', s))
# node names of tasks that have already been added to the graph
existing_nodes = set()
# edge sets (tuples of two node names) that have already been added
existing_edges = set()
# recursion function for generating the pipeline graph
def _build(task, parent_id=None):
tid = get_id(task)
# add node if it's not already there
if tid not in existing_nodes:
# build task label: task name plus dictionary of parameters as table
params = task.to_str_params()
param_list = ""
for k, v in params.items():
# truncate param value if necessary, and add "..."
if len(v) > max_param_len:
v = v[:max_param_len] + "..."
param_list += "<TR><TD ALIGN=\"LEFT\">" \
"<FONT POINT-SIZE=\"10\">{:s}</FONT>" \
"</TD><TD ALIGN=\"LEFT\">" \
"<FONT POINT-SIZE=\"10\">{:s}</FONT>" \
"</TD></TR>".format(k, v)
label = "<TABLE BORDER=\"0\" CELLSPACING=\"1\" CELLPADDING=\"1\">" \
"<TR><TD COLSPAN=\"2\" ALIGN=\"CENTER\">" \
"<FONT POINT-SIZE=\"12\">{:s}</FONT>" \
"</TD></TR>" \
"".format(get_task_name(task)) + param_list + "</TABLE>"
style = getattr(task, 'diagram_style', [])
if colored:
color = ', color="{:s}"'.format("green" if task.complete() else "red")
else:
color = ''
# add a node for the task
lines.append(u"{name:s} [label=< {label:s} >, shape=\"rect\" {color:s}, style=\"{style:s}\"];\n"
u"".format(name=tid,
label=label,
color=color,
style=','.join(style)))
existing_nodes.add(tid)
# recurse over requirements
for req in task.requires():
_build(req, parent_id=tid)
# add edge from current node to (upstream) parent, if it doesn't already exist
if parent_id is not None and (tid, parent_id) not in existing_edges:
lines.append(u"{source:s} -> {target:s};\n".format(source=tid, target=parent_id))
# generate pipeline graph
_build(root_task)
# close the graph definition
lines.append(u"}")
# write description in DOT format
with codecs.open(out_base + '.dot', 'w', encoding='utf-8') as f:
f.write(u"\n".join(lines))
# check existence of DOT_EXECUTABLE variable and file
if not hasattr(config, 'DOT_EXECUTABLE'):
raise RuntimeError("Please configure the 'DOT_EXECUTABLE' variable in your 'project_config.py'")
if not os.path.exists(config.DOT_EXECUTABLE):
raise IOError("Could not find file pointed to by 'DOT_EXECUTABLE': " + str(config.DOT_EXECUTABLE))
# render to image using DOT
# noinspection PyUnresolvedReferences
subprocess.check_call([
config.DOT_EXECUTABLE,
'-T', 'png',
'-o', out_base + '.png',
out_base + '.dot'
]) | python | def render_diagram(root_task, out_base, max_param_len=20, horizontal=False, colored=False):
"""Render a diagram of the ETL pipeline
All upstream tasks (i.e. requirements) of :attr:`root_task` are rendered.
Nodes are, by default, styled as simple rects. This style is augmented by any
:attr:`diagram_style` attributes of the tasks.
.. note:: This function requires the 'dot' executable from the GraphViz package to be installed
and its location configured in your `project_config.py` variable :attr:`DOT_EXECUTABLE`.
Args:
root_task (luigi.Task): Task instance that defines the 'upstream root' of the pipeline
out_base (str): base output file name (file endings will be appended)
max_param_len (int): Maximum shown length of task parameter values
horizontal (bool): If True, layout graph left-to-right instead of top-to-bottom
colored (bool): If True, show task completion status by color of nodes
"""
import re
import codecs
import subprocess
from ozelot import config
from ozelot.etl.tasks import get_task_name, get_task_param_string
# the graph - lines in dot file
lines = [u"digraph G {"]
if horizontal:
lines.append(u"rankdir=LR;")
# helper function: make unique task id from task name and parameters:
# task name + parameter string, with spaces replaced with _ and all non-alphanumerical characters stripped
def get_id(task):
s = get_task_name(task) + "_" + get_task_param_string(task)
return re.sub(r'\W+', '', re.sub(' ', '_', s))
# node names of tasks that have already been added to the graph
existing_nodes = set()
# edge sets (tuples of two node names) that have already been added
existing_edges = set()
# recursion function for generating the pipeline graph
def _build(task, parent_id=None):
tid = get_id(task)
# add node if it's not already there
if tid not in existing_nodes:
# build task label: task name plus dictionary of parameters as table
params = task.to_str_params()
param_list = ""
for k, v in params.items():
# truncate param value if necessary, and add "..."
if len(v) > max_param_len:
v = v[:max_param_len] + "..."
param_list += "<TR><TD ALIGN=\"LEFT\">" \
"<FONT POINT-SIZE=\"10\">{:s}</FONT>" \
"</TD><TD ALIGN=\"LEFT\">" \
"<FONT POINT-SIZE=\"10\">{:s}</FONT>" \
"</TD></TR>".format(k, v)
label = "<TABLE BORDER=\"0\" CELLSPACING=\"1\" CELLPADDING=\"1\">" \
"<TR><TD COLSPAN=\"2\" ALIGN=\"CENTER\">" \
"<FONT POINT-SIZE=\"12\">{:s}</FONT>" \
"</TD></TR>" \
"".format(get_task_name(task)) + param_list + "</TABLE>"
style = getattr(task, 'diagram_style', [])
if colored:
color = ', color="{:s}"'.format("green" if task.complete() else "red")
else:
color = ''
# add a node for the task
lines.append(u"{name:s} [label=< {label:s} >, shape=\"rect\" {color:s}, style=\"{style:s}\"];\n"
u"".format(name=tid,
label=label,
color=color,
style=','.join(style)))
existing_nodes.add(tid)
# recurse over requirements
for req in task.requires():
_build(req, parent_id=tid)
# add edge from current node to (upstream) parent, if it doesn't already exist
if parent_id is not None and (tid, parent_id) not in existing_edges:
lines.append(u"{source:s} -> {target:s};\n".format(source=tid, target=parent_id))
# generate pipeline graph
_build(root_task)
# close the graph definition
lines.append(u"}")
# write description in DOT format
with codecs.open(out_base + '.dot', 'w', encoding='utf-8') as f:
f.write(u"\n".join(lines))
# check existence of DOT_EXECUTABLE variable and file
if not hasattr(config, 'DOT_EXECUTABLE'):
raise RuntimeError("Please configure the 'DOT_EXECUTABLE' variable in your 'project_config.py'")
if not os.path.exists(config.DOT_EXECUTABLE):
raise IOError("Could not find file pointed to by 'DOT_EXECUTABLE': " + str(config.DOT_EXECUTABLE))
# render to image using DOT
# noinspection PyUnresolvedReferences
subprocess.check_call([
config.DOT_EXECUTABLE,
'-T', 'png',
'-o', out_base + '.png',
out_base + '.dot'
]) | [
"def",
"render_diagram",
"(",
"root_task",
",",
"out_base",
",",
"max_param_len",
"=",
"20",
",",
"horizontal",
"=",
"False",
",",
"colored",
"=",
"False",
")",
":",
"import",
"re",
"import",
"codecs",
"import",
"subprocess",
"from",
"ozelot",
"import",
"config",
"from",
"ozelot",
".",
"etl",
".",
"tasks",
"import",
"get_task_name",
",",
"get_task_param_string",
"# the graph - lines in dot file",
"lines",
"=",
"[",
"u\"digraph G {\"",
"]",
"if",
"horizontal",
":",
"lines",
".",
"append",
"(",
"u\"rankdir=LR;\"",
")",
"# helper function: make unique task id from task name and parameters:",
"# task name + parameter string, with spaces replaced with _ and all non-alphanumerical characters stripped",
"def",
"get_id",
"(",
"task",
")",
":",
"s",
"=",
"get_task_name",
"(",
"task",
")",
"+",
"\"_\"",
"+",
"get_task_param_string",
"(",
"task",
")",
"return",
"re",
".",
"sub",
"(",
"r'\\W+'",
",",
"''",
",",
"re",
".",
"sub",
"(",
"' '",
",",
"'_'",
",",
"s",
")",
")",
"# node names of tasks that have already been added to the graph",
"existing_nodes",
"=",
"set",
"(",
")",
"# edge sets (tuples of two node names) that have already been added",
"existing_edges",
"=",
"set",
"(",
")",
"# recursion function for generating the pipeline graph",
"def",
"_build",
"(",
"task",
",",
"parent_id",
"=",
"None",
")",
":",
"tid",
"=",
"get_id",
"(",
"task",
")",
"# add node if it's not already there",
"if",
"tid",
"not",
"in",
"existing_nodes",
":",
"# build task label: task name plus dictionary of parameters as table",
"params",
"=",
"task",
".",
"to_str_params",
"(",
")",
"param_list",
"=",
"\"\"",
"for",
"k",
",",
"v",
"in",
"params",
".",
"items",
"(",
")",
":",
"# truncate param value if necessary, and add \"...\"",
"if",
"len",
"(",
"v",
")",
">",
"max_param_len",
":",
"v",
"=",
"v",
"[",
":",
"max_param_len",
"]",
"+",
"\"...\"",
"param_list",
"+=",
"\"<TR><TD ALIGN=\\\"LEFT\\\">\"",
"\"<FONT POINT-SIZE=\\\"10\\\">{:s}</FONT>\"",
"\"</TD><TD ALIGN=\\\"LEFT\\\">\"",
"\"<FONT POINT-SIZE=\\\"10\\\">{:s}</FONT>\"",
"\"</TD></TR>\"",
".",
"format",
"(",
"k",
",",
"v",
")",
"label",
"=",
"\"<TABLE BORDER=\\\"0\\\" CELLSPACING=\\\"1\\\" CELLPADDING=\\\"1\\\">\"",
"\"<TR><TD COLSPAN=\\\"2\\\" ALIGN=\\\"CENTER\\\">\"",
"\"<FONT POINT-SIZE=\\\"12\\\">{:s}</FONT>\"",
"\"</TD></TR>\"",
"\"\"",
".",
"format",
"(",
"get_task_name",
"(",
"task",
")",
")",
"+",
"param_list",
"+",
"\"</TABLE>\"",
"style",
"=",
"getattr",
"(",
"task",
",",
"'diagram_style'",
",",
"[",
"]",
")",
"if",
"colored",
":",
"color",
"=",
"', color=\"{:s}\"'",
".",
"format",
"(",
"\"green\"",
"if",
"task",
".",
"complete",
"(",
")",
"else",
"\"red\"",
")",
"else",
":",
"color",
"=",
"''",
"# add a node for the task",
"lines",
".",
"append",
"(",
"u\"{name:s} [label=< {label:s} >, shape=\\\"rect\\\" {color:s}, style=\\\"{style:s}\\\"];\\n\"",
"u\"\"",
".",
"format",
"(",
"name",
"=",
"tid",
",",
"label",
"=",
"label",
",",
"color",
"=",
"color",
",",
"style",
"=",
"','",
".",
"join",
"(",
"style",
")",
")",
")",
"existing_nodes",
".",
"add",
"(",
"tid",
")",
"# recurse over requirements",
"for",
"req",
"in",
"task",
".",
"requires",
"(",
")",
":",
"_build",
"(",
"req",
",",
"parent_id",
"=",
"tid",
")",
"# add edge from current node to (upstream) parent, if it doesn't already exist",
"if",
"parent_id",
"is",
"not",
"None",
"and",
"(",
"tid",
",",
"parent_id",
")",
"not",
"in",
"existing_edges",
":",
"lines",
".",
"append",
"(",
"u\"{source:s} -> {target:s};\\n\"",
".",
"format",
"(",
"source",
"=",
"tid",
",",
"target",
"=",
"parent_id",
")",
")",
"# generate pipeline graph",
"_build",
"(",
"root_task",
")",
"# close the graph definition",
"lines",
".",
"append",
"(",
"u\"}\"",
")",
"# write description in DOT format",
"with",
"codecs",
".",
"open",
"(",
"out_base",
"+",
"'.dot'",
",",
"'w'",
",",
"encoding",
"=",
"'utf-8'",
")",
"as",
"f",
":",
"f",
".",
"write",
"(",
"u\"\\n\"",
".",
"join",
"(",
"lines",
")",
")",
"# check existence of DOT_EXECUTABLE variable and file",
"if",
"not",
"hasattr",
"(",
"config",
",",
"'DOT_EXECUTABLE'",
")",
":",
"raise",
"RuntimeError",
"(",
"\"Please configure the 'DOT_EXECUTABLE' variable in your 'project_config.py'\"",
")",
"if",
"not",
"os",
".",
"path",
".",
"exists",
"(",
"config",
".",
"DOT_EXECUTABLE",
")",
":",
"raise",
"IOError",
"(",
"\"Could not find file pointed to by 'DOT_EXECUTABLE': \"",
"+",
"str",
"(",
"config",
".",
"DOT_EXECUTABLE",
")",
")",
"# render to image using DOT",
"# noinspection PyUnresolvedReferences",
"subprocess",
".",
"check_call",
"(",
"[",
"config",
".",
"DOT_EXECUTABLE",
",",
"'-T'",
",",
"'png'",
",",
"'-o'",
",",
"out_base",
"+",
"'.png'",
",",
"out_base",
"+",
"'.dot'",
"]",
")"
]
| Render a diagram of the ETL pipeline
All upstream tasks (i.e. requirements) of :attr:`root_task` are rendered.
Nodes are, by default, styled as simple rects. This style is augmented by any
:attr:`diagram_style` attributes of the tasks.
.. note:: This function requires the 'dot' executable from the GraphViz package to be installed
and its location configured in your `project_config.py` variable :attr:`DOT_EXECUTABLE`.
Args:
root_task (luigi.Task): Task instance that defines the 'upstream root' of the pipeline
out_base (str): base output file name (file endings will be appended)
max_param_len (int): Maximum shown length of task parameter values
horizontal (bool): If True, layout graph left-to-right instead of top-to-bottom
colored (bool): If True, show task completion status by color of nodes | [
"Render",
"a",
"diagram",
"of",
"the",
"ETL",
"pipeline"
]
| 948675e02eb6fca940450f5cb814f53e97159e5b | https://github.com/ehansis/ozelot/blob/948675e02eb6fca940450f5cb814f53e97159e5b/ozelot/etl/util.py#L12-L127 | train |
ehansis/ozelot | ozelot/etl/util.py | sanitize | def sanitize(s,
normalize_whitespace=True,
normalize_unicode=True,
form='NFKC',
enforce_encoding=True,
encoding='utf-8'):
"""Normalize a string
Args:
s (unicode string): input unicode string
normalize_whitespace (bool): if True, normalize all whitespace to single spaces (including newlines),
strip whitespace at start/end
normalize_unicode (bool): if True, normalize unicode form to 'form'
form (str): unicode form
enforce_encoding (bool): if True, encode string to target encoding and re-decode, ignoring errors
and stripping all characters not part of the encoding
encoding (str): target encoding for the above
Returns:
str: unicode output string
"""
if enforce_encoding:
s = s.encode(encoding, errors='ignore').decode(encoding, errors='ignore')
if normalize_unicode:
s = unicodedata.normalize(form, s)
if normalize_whitespace:
s = re.sub(r'\s+', ' ', s).strip()
return s | python | def sanitize(s,
normalize_whitespace=True,
normalize_unicode=True,
form='NFKC',
enforce_encoding=True,
encoding='utf-8'):
"""Normalize a string
Args:
s (unicode string): input unicode string
normalize_whitespace (bool): if True, normalize all whitespace to single spaces (including newlines),
strip whitespace at start/end
normalize_unicode (bool): if True, normalize unicode form to 'form'
form (str): unicode form
enforce_encoding (bool): if True, encode string to target encoding and re-decode, ignoring errors
and stripping all characters not part of the encoding
encoding (str): target encoding for the above
Returns:
str: unicode output string
"""
if enforce_encoding:
s = s.encode(encoding, errors='ignore').decode(encoding, errors='ignore')
if normalize_unicode:
s = unicodedata.normalize(form, s)
if normalize_whitespace:
s = re.sub(r'\s+', ' ', s).strip()
return s | [
"def",
"sanitize",
"(",
"s",
",",
"normalize_whitespace",
"=",
"True",
",",
"normalize_unicode",
"=",
"True",
",",
"form",
"=",
"'NFKC'",
",",
"enforce_encoding",
"=",
"True",
",",
"encoding",
"=",
"'utf-8'",
")",
":",
"if",
"enforce_encoding",
":",
"s",
"=",
"s",
".",
"encode",
"(",
"encoding",
",",
"errors",
"=",
"'ignore'",
")",
".",
"decode",
"(",
"encoding",
",",
"errors",
"=",
"'ignore'",
")",
"if",
"normalize_unicode",
":",
"s",
"=",
"unicodedata",
".",
"normalize",
"(",
"form",
",",
"s",
")",
"if",
"normalize_whitespace",
":",
"s",
"=",
"re",
".",
"sub",
"(",
"r'\\s+'",
",",
"' '",
",",
"s",
")",
".",
"strip",
"(",
")",
"return",
"s"
]
| Normalize a string
Args:
s (unicode string): input unicode string
normalize_whitespace (bool): if True, normalize all whitespace to single spaces (including newlines),
strip whitespace at start/end
normalize_unicode (bool): if True, normalize unicode form to 'form'
form (str): unicode form
enforce_encoding (bool): if True, encode string to target encoding and re-decode, ignoring errors
and stripping all characters not part of the encoding
encoding (str): target encoding for the above
Returns:
str: unicode output string | [
"Normalize",
"a",
"string"
]
| 948675e02eb6fca940450f5cb814f53e97159e5b | https://github.com/ehansis/ozelot/blob/948675e02eb6fca940450f5cb814f53e97159e5b/ozelot/etl/util.py#L130-L161 | train |
gmdzy2010/dingtalk_sdk_gmdzy2010 | dingtalk_sdk_gmdzy2010/authority_request.py | PersistentCodeRequest.get_ticket_for_sns_token | def get_ticket_for_sns_token(self):
"""This is a shortcut for getting the sns_token, as a post data of
request body."""
self.logger.info("%s\t%s" % (self.request_method, self.request_url))
return {
"openid": self.get_openid(),
"persistent_code": self.get_persistent_code(),
} | python | def get_ticket_for_sns_token(self):
"""This is a shortcut for getting the sns_token, as a post data of
request body."""
self.logger.info("%s\t%s" % (self.request_method, self.request_url))
return {
"openid": self.get_openid(),
"persistent_code": self.get_persistent_code(),
} | [
"def",
"get_ticket_for_sns_token",
"(",
"self",
")",
":",
"self",
".",
"logger",
".",
"info",
"(",
"\"%s\\t%s\"",
"%",
"(",
"self",
".",
"request_method",
",",
"self",
".",
"request_url",
")",
")",
"return",
"{",
"\"openid\"",
":",
"self",
".",
"get_openid",
"(",
")",
",",
"\"persistent_code\"",
":",
"self",
".",
"get_persistent_code",
"(",
")",
",",
"}"
]
| This is a shortcut for getting the sns_token, as a post data of
request body. | [
"This",
"is",
"a",
"shortcut",
"for",
"getting",
"the",
"sns_token",
"as",
"a",
"post",
"data",
"of",
"request",
"body",
"."
]
| b06cb1f78f89be9554dcb6101af8bc72718a9ecd | https://github.com/gmdzy2010/dingtalk_sdk_gmdzy2010/blob/b06cb1f78f89be9554dcb6101af8bc72718a9ecd/dingtalk_sdk_gmdzy2010/authority_request.py#L86-L93 | train |
ehansis/ozelot | examples/superheroes/superheroes/models.py | reinitialize | def reinitialize():
"""Drop all tables for all models, then re-create them
"""
from ozelot import client
# import all additional models needed in this project
# noinspection PyUnresolvedReferences
from ozelot.orm.target import ORMTargetMarker
client = client.get_client()
base.Base.drop_all(client)
base.Base.create_all(client) | python | def reinitialize():
"""Drop all tables for all models, then re-create them
"""
from ozelot import client
# import all additional models needed in this project
# noinspection PyUnresolvedReferences
from ozelot.orm.target import ORMTargetMarker
client = client.get_client()
base.Base.drop_all(client)
base.Base.create_all(client) | [
"def",
"reinitialize",
"(",
")",
":",
"from",
"ozelot",
"import",
"client",
"# import all additional models needed in this project",
"# noinspection PyUnresolvedReferences",
"from",
"ozelot",
".",
"orm",
".",
"target",
"import",
"ORMTargetMarker",
"client",
"=",
"client",
".",
"get_client",
"(",
")",
"base",
".",
"Base",
".",
"drop_all",
"(",
"client",
")",
"base",
".",
"Base",
".",
"create_all",
"(",
"client",
")"
]
| Drop all tables for all models, then re-create them | [
"Drop",
"all",
"tables",
"for",
"all",
"models",
"then",
"re",
"-",
"create",
"them"
]
| 948675e02eb6fca940450f5cb814f53e97159e5b | https://github.com/ehansis/ozelot/blob/948675e02eb6fca940450f5cb814f53e97159e5b/examples/superheroes/superheroes/models.py#L130-L141 | train |
ncraike/fang | fang/dependency_register.py | DependencyRegister._unwrap_func | def _unwrap_func(cls, decorated_func):
'''
This unwraps a decorated func, returning the inner wrapped func.
This may become unnecessary with Python 3.4's inspect.unwrap().
'''
if click is not None:
# Workaround for click.command() decorator not setting
# __wrapped__
if isinstance(decorated_func, click.Command):
return cls._unwrap_func(decorated_func.callback)
if hasattr(decorated_func, '__wrapped__'):
# Recursion: unwrap more if needed
return cls._unwrap_func(decorated_func.__wrapped__)
else:
# decorated_func isn't actually decorated, no more
# unwrapping to do
return decorated_func | python | def _unwrap_func(cls, decorated_func):
'''
This unwraps a decorated func, returning the inner wrapped func.
This may become unnecessary with Python 3.4's inspect.unwrap().
'''
if click is not None:
# Workaround for click.command() decorator not setting
# __wrapped__
if isinstance(decorated_func, click.Command):
return cls._unwrap_func(decorated_func.callback)
if hasattr(decorated_func, '__wrapped__'):
# Recursion: unwrap more if needed
return cls._unwrap_func(decorated_func.__wrapped__)
else:
# decorated_func isn't actually decorated, no more
# unwrapping to do
return decorated_func | [
"def",
"_unwrap_func",
"(",
"cls",
",",
"decorated_func",
")",
":",
"if",
"click",
"is",
"not",
"None",
":",
"# Workaround for click.command() decorator not setting",
"# __wrapped__",
"if",
"isinstance",
"(",
"decorated_func",
",",
"click",
".",
"Command",
")",
":",
"return",
"cls",
".",
"_unwrap_func",
"(",
"decorated_func",
".",
"callback",
")",
"if",
"hasattr",
"(",
"decorated_func",
",",
"'__wrapped__'",
")",
":",
"# Recursion: unwrap more if needed",
"return",
"cls",
".",
"_unwrap_func",
"(",
"decorated_func",
".",
"__wrapped__",
")",
"else",
":",
"# decorated_func isn't actually decorated, no more",
"# unwrapping to do",
"return",
"decorated_func"
]
| This unwraps a decorated func, returning the inner wrapped func.
This may become unnecessary with Python 3.4's inspect.unwrap(). | [
"This",
"unwraps",
"a",
"decorated",
"func",
"returning",
"the",
"inner",
"wrapped",
"func",
"."
]
| 2d9e1216c866e450059017f83ab775f7716eda7a | https://github.com/ncraike/fang/blob/2d9e1216c866e450059017f83ab775f7716eda7a/fang/dependency_register.py#L21-L39 | train |
ncraike/fang | fang/dependency_register.py | DependencyRegister._register_dependent | def _register_dependent(self, dependent, resource_name):
'''
Register a mapping of the dependent to resource name.
After calling, dependency_register.dependents[dependent] should
contain resource_name.
'''
if dependent not in self.dependents:
self.dependents[dependent] = []
self.dependents[dependent].insert(0, resource_name) | python | def _register_dependent(self, dependent, resource_name):
'''
Register a mapping of the dependent to resource name.
After calling, dependency_register.dependents[dependent] should
contain resource_name.
'''
if dependent not in self.dependents:
self.dependents[dependent] = []
self.dependents[dependent].insert(0, resource_name) | [
"def",
"_register_dependent",
"(",
"self",
",",
"dependent",
",",
"resource_name",
")",
":",
"if",
"dependent",
"not",
"in",
"self",
".",
"dependents",
":",
"self",
".",
"dependents",
"[",
"dependent",
"]",
"=",
"[",
"]",
"self",
".",
"dependents",
"[",
"dependent",
"]",
".",
"insert",
"(",
"0",
",",
"resource_name",
")"
]
| Register a mapping of the dependent to resource name.
After calling, dependency_register.dependents[dependent] should
contain resource_name. | [
"Register",
"a",
"mapping",
"of",
"the",
"dependent",
"to",
"resource",
"name",
"."
]
| 2d9e1216c866e450059017f83ab775f7716eda7a | https://github.com/ncraike/fang/blob/2d9e1216c866e450059017f83ab775f7716eda7a/fang/dependency_register.py#L50-L59 | train |
ncraike/fang | fang/dependency_register.py | DependencyRegister.register | def register(self, resource_name, dependent=None):
'''
Register the given dependent as depending on the "resource"
named by resource_name.
'''
if dependent is None:
# Give a partial usable as a decorator
return partial(self.register, resource_name)
dependent = self._unwrap_dependent(dependent)
self._register_dependent(dependent, resource_name)
self._register_resource_dependency(resource_name, dependent)
# Return dependent to ease use as decorator
return dependent | python | def register(self, resource_name, dependent=None):
'''
Register the given dependent as depending on the "resource"
named by resource_name.
'''
if dependent is None:
# Give a partial usable as a decorator
return partial(self.register, resource_name)
dependent = self._unwrap_dependent(dependent)
self._register_dependent(dependent, resource_name)
self._register_resource_dependency(resource_name, dependent)
# Return dependent to ease use as decorator
return dependent | [
"def",
"register",
"(",
"self",
",",
"resource_name",
",",
"dependent",
"=",
"None",
")",
":",
"if",
"dependent",
"is",
"None",
":",
"# Give a partial usable as a decorator",
"return",
"partial",
"(",
"self",
".",
"register",
",",
"resource_name",
")",
"dependent",
"=",
"self",
".",
"_unwrap_dependent",
"(",
"dependent",
")",
"self",
".",
"_register_dependent",
"(",
"dependent",
",",
"resource_name",
")",
"self",
".",
"_register_resource_dependency",
"(",
"resource_name",
",",
"dependent",
")",
"# Return dependent to ease use as decorator",
"return",
"dependent"
]
| Register the given dependent as depending on the "resource"
named by resource_name. | [
"Register",
"the",
"given",
"dependent",
"as",
"depending",
"on",
"the",
"resource",
"named",
"by",
"resource_name",
"."
]
| 2d9e1216c866e450059017f83ab775f7716eda7a | https://github.com/ncraike/fang/blob/2d9e1216c866e450059017f83ab775f7716eda7a/fang/dependency_register.py#L66-L80 | train |
thorgate/django-esteid | esteid/middleware.py | BaseIdCardMiddleware.verify_ocsp | def verify_ocsp(cls, certificate, issuer):
""" Runs OCSP verification and returns error code - 0 means success
"""
return OCSPVerifier(certificate, issuer,
cls.get_ocsp_url(),
cls.get_ocsp_responder_certificate_path()).verify() | python | def verify_ocsp(cls, certificate, issuer):
""" Runs OCSP verification and returns error code - 0 means success
"""
return OCSPVerifier(certificate, issuer,
cls.get_ocsp_url(),
cls.get_ocsp_responder_certificate_path()).verify() | [
"def",
"verify_ocsp",
"(",
"cls",
",",
"certificate",
",",
"issuer",
")",
":",
"return",
"OCSPVerifier",
"(",
"certificate",
",",
"issuer",
",",
"cls",
".",
"get_ocsp_url",
"(",
")",
",",
"cls",
".",
"get_ocsp_responder_certificate_path",
"(",
")",
")",
".",
"verify",
"(",
")"
]
| Runs OCSP verification and returns error code - 0 means success | [
"Runs",
"OCSP",
"verification",
"and",
"returns",
"error",
"code",
"-",
"0",
"means",
"success"
]
| 407ae513e357fedea0e3e42198df8eb9d9ff0646 | https://github.com/thorgate/django-esteid/blob/407ae513e357fedea0e3e42198df8eb9d9ff0646/esteid/middleware.py#L144-L150 | train |
lsst-sqre/documenteer | documenteer/requestsutils.py | requests_retry_session | def requests_retry_session(
retries=3,
backoff_factor=0.3,
status_forcelist=(500, 502, 504),
session=None):
"""Create a requests session that handles errors by retrying.
Parameters
----------
retries : `int`, optional
Number of retries to attempt.
backoff_factor : `float`, optional
Backoff factor.
status_forcelist : sequence of `str`, optional
Status codes that must be retried.
session : `requests.Session`
An existing requests session to configure.
Returns
-------
session : `requests.Session`
Requests session that can take ``get`` and ``post`` methods, for
example.
Notes
-----
This function is based on
https://www.peterbe.com/plog/best-practice-with-retries-with-requests
by Peter Bengtsson.
"""
session = session or requests.Session()
retry = Retry(
total=retries,
read=retries,
connect=retries,
backoff_factor=backoff_factor,
status_forcelist=status_forcelist,
)
adapter = HTTPAdapter(max_retries=retry)
session.mount('http://', adapter)
session.mount('https://', adapter)
return session | python | def requests_retry_session(
retries=3,
backoff_factor=0.3,
status_forcelist=(500, 502, 504),
session=None):
"""Create a requests session that handles errors by retrying.
Parameters
----------
retries : `int`, optional
Number of retries to attempt.
backoff_factor : `float`, optional
Backoff factor.
status_forcelist : sequence of `str`, optional
Status codes that must be retried.
session : `requests.Session`
An existing requests session to configure.
Returns
-------
session : `requests.Session`
Requests session that can take ``get`` and ``post`` methods, for
example.
Notes
-----
This function is based on
https://www.peterbe.com/plog/best-practice-with-retries-with-requests
by Peter Bengtsson.
"""
session = session or requests.Session()
retry = Retry(
total=retries,
read=retries,
connect=retries,
backoff_factor=backoff_factor,
status_forcelist=status_forcelist,
)
adapter = HTTPAdapter(max_retries=retry)
session.mount('http://', adapter)
session.mount('https://', adapter)
return session | [
"def",
"requests_retry_session",
"(",
"retries",
"=",
"3",
",",
"backoff_factor",
"=",
"0.3",
",",
"status_forcelist",
"=",
"(",
"500",
",",
"502",
",",
"504",
")",
",",
"session",
"=",
"None",
")",
":",
"session",
"=",
"session",
"or",
"requests",
".",
"Session",
"(",
")",
"retry",
"=",
"Retry",
"(",
"total",
"=",
"retries",
",",
"read",
"=",
"retries",
",",
"connect",
"=",
"retries",
",",
"backoff_factor",
"=",
"backoff_factor",
",",
"status_forcelist",
"=",
"status_forcelist",
",",
")",
"adapter",
"=",
"HTTPAdapter",
"(",
"max_retries",
"=",
"retry",
")",
"session",
".",
"mount",
"(",
"'http://'",
",",
"adapter",
")",
"session",
".",
"mount",
"(",
"'https://'",
",",
"adapter",
")",
"return",
"session"
]
| Create a requests session that handles errors by retrying.
Parameters
----------
retries : `int`, optional
Number of retries to attempt.
backoff_factor : `float`, optional
Backoff factor.
status_forcelist : sequence of `str`, optional
Status codes that must be retried.
session : `requests.Session`
An existing requests session to configure.
Returns
-------
session : `requests.Session`
Requests session that can take ``get`` and ``post`` methods, for
example.
Notes
-----
This function is based on
https://www.peterbe.com/plog/best-practice-with-retries-with-requests
by Peter Bengtsson. | [
"Create",
"a",
"requests",
"session",
"that",
"handles",
"errors",
"by",
"retrying",
"."
]
| 75f02901a80042b28d074df1cc1dca32eb8e38c8 | https://github.com/lsst-sqre/documenteer/blob/75f02901a80042b28d074df1cc1dca32eb8e38c8/documenteer/requestsutils.py#L11-L52 | train |
lsst-sqre/documenteer | documenteer/sphinxconfig/technoteconf.py | configure_technote | def configure_technote(meta_stream):
"""Builds a ``dict`` of Sphinx configuration variables given a central
configuration for LSST Design Documents and a metadata YAML file.
This function refactors the common Sphinx ``conf.py`` script so that basic
configurations are managed centrally in this module, while author-updatable
metadata is stored in a ``metadata.yaml`` file in the document's
repository. To use this function, a ``conf.py`` need only look like
.. code:: python
import os
from documenteer.sphinxconfig.technoteconf import configure_technote
metadata_path = os.path.join(os.path.dirname(__file__), 'metadata.yaml')
with open(metadata_path, 'r') as f:
confs = configure_technote(f)
_g = global()
_g.update(confs)
And ``metadata.yaml`` looks like:
.. code-block:: yaml
doc_id: 'LDM-152'
doc_title: 'Data Management Middleware Design'
copyright: '2015, AURA/LSST'
authors:
- 'Kian-Tat Lim'
- 'Ray Plante'
- 'Gregory Dubois-Felsmann'
# Current document version
last_revised: 'October 10, 2013'
version: '10.0'
# dev_version_suffix: None # e.g. 'alpha'/'beta'/'rc' if necessary
Parameters
----------
meta_stream : file handle
A file stream (e.g., from :func:`open`) for the ``metadata.yaml``
document in a design document's repository.
Returns
-------
confs : `dict`
Dictionary of configurations that should be added to the ``conf.py``
global namespace.
"""
_metadata = yaml.load(meta_stream)
confs = _build_confs(_metadata)
return confs | python | def configure_technote(meta_stream):
"""Builds a ``dict`` of Sphinx configuration variables given a central
configuration for LSST Design Documents and a metadata YAML file.
This function refactors the common Sphinx ``conf.py`` script so that basic
configurations are managed centrally in this module, while author-updatable
metadata is stored in a ``metadata.yaml`` file in the document's
repository. To use this function, a ``conf.py`` need only look like
.. code:: python
import os
from documenteer.sphinxconfig.technoteconf import configure_technote
metadata_path = os.path.join(os.path.dirname(__file__), 'metadata.yaml')
with open(metadata_path, 'r') as f:
confs = configure_technote(f)
_g = global()
_g.update(confs)
And ``metadata.yaml`` looks like:
.. code-block:: yaml
doc_id: 'LDM-152'
doc_title: 'Data Management Middleware Design'
copyright: '2015, AURA/LSST'
authors:
- 'Kian-Tat Lim'
- 'Ray Plante'
- 'Gregory Dubois-Felsmann'
# Current document version
last_revised: 'October 10, 2013'
version: '10.0'
# dev_version_suffix: None # e.g. 'alpha'/'beta'/'rc' if necessary
Parameters
----------
meta_stream : file handle
A file stream (e.g., from :func:`open`) for the ``metadata.yaml``
document in a design document's repository.
Returns
-------
confs : `dict`
Dictionary of configurations that should be added to the ``conf.py``
global namespace.
"""
_metadata = yaml.load(meta_stream)
confs = _build_confs(_metadata)
return confs | [
"def",
"configure_technote",
"(",
"meta_stream",
")",
":",
"_metadata",
"=",
"yaml",
".",
"load",
"(",
"meta_stream",
")",
"confs",
"=",
"_build_confs",
"(",
"_metadata",
")",
"return",
"confs"
]
| Builds a ``dict`` of Sphinx configuration variables given a central
configuration for LSST Design Documents and a metadata YAML file.
This function refactors the common Sphinx ``conf.py`` script so that basic
configurations are managed centrally in this module, while author-updatable
metadata is stored in a ``metadata.yaml`` file in the document's
repository. To use this function, a ``conf.py`` need only look like
.. code:: python
import os
from documenteer.sphinxconfig.technoteconf import configure_technote
metadata_path = os.path.join(os.path.dirname(__file__), 'metadata.yaml')
with open(metadata_path, 'r') as f:
confs = configure_technote(f)
_g = global()
_g.update(confs)
And ``metadata.yaml`` looks like:
.. code-block:: yaml
doc_id: 'LDM-152'
doc_title: 'Data Management Middleware Design'
copyright: '2015, AURA/LSST'
authors:
- 'Kian-Tat Lim'
- 'Ray Plante'
- 'Gregory Dubois-Felsmann'
# Current document version
last_revised: 'October 10, 2013'
version: '10.0'
# dev_version_suffix: None # e.g. 'alpha'/'beta'/'rc' if necessary
Parameters
----------
meta_stream : file handle
A file stream (e.g., from :func:`open`) for the ``metadata.yaml``
document in a design document's repository.
Returns
-------
confs : `dict`
Dictionary of configurations that should be added to the ``conf.py``
global namespace. | [
"Builds",
"a",
"dict",
"of",
"Sphinx",
"configuration",
"variables",
"given",
"a",
"central",
"configuration",
"for",
"LSST",
"Design",
"Documents",
"and",
"a",
"metadata",
"YAML",
"file",
"."
]
| 75f02901a80042b28d074df1cc1dca32eb8e38c8 | https://github.com/lsst-sqre/documenteer/blob/75f02901a80042b28d074df1cc1dca32eb8e38c8/documenteer/sphinxconfig/technoteconf.py#L17-L67 | train |
thorgate/django-esteid | esteid/config.py | ocsp_responder_certificate_path | def ocsp_responder_certificate_path():
"""Get ocsp responder certificate path
Test: TEST_of_SK_OCSP_RESPONDER_2011.pem
Live: sk-ocsp-responder-certificates.pem
Note: These files are distributed under esteid/certs
:return:
"""
certificate_path = getattr(settings, 'ESTEID_OCSP_RESPONDER_CERTIFICATE_PATH', 'TEST_of_SK_OCSP_RESPONDER_2011.pem')
if certificate_path in ['TEST_of_SK_OCSP_RESPONDER_2011.pem', 'sk-ocsp-responder-certificates.pem']:
return os.path.join(os.path.dirname(__file__), 'certs', certificate_path)
return certificate_path | python | def ocsp_responder_certificate_path():
"""Get ocsp responder certificate path
Test: TEST_of_SK_OCSP_RESPONDER_2011.pem
Live: sk-ocsp-responder-certificates.pem
Note: These files are distributed under esteid/certs
:return:
"""
certificate_path = getattr(settings, 'ESTEID_OCSP_RESPONDER_CERTIFICATE_PATH', 'TEST_of_SK_OCSP_RESPONDER_2011.pem')
if certificate_path in ['TEST_of_SK_OCSP_RESPONDER_2011.pem', 'sk-ocsp-responder-certificates.pem']:
return os.path.join(os.path.dirname(__file__), 'certs', certificate_path)
return certificate_path | [
"def",
"ocsp_responder_certificate_path",
"(",
")",
":",
"certificate_path",
"=",
"getattr",
"(",
"settings",
",",
"'ESTEID_OCSP_RESPONDER_CERTIFICATE_PATH'",
",",
"'TEST_of_SK_OCSP_RESPONDER_2011.pem'",
")",
"if",
"certificate_path",
"in",
"[",
"'TEST_of_SK_OCSP_RESPONDER_2011.pem'",
",",
"'sk-ocsp-responder-certificates.pem'",
"]",
":",
"return",
"os",
".",
"path",
".",
"join",
"(",
"os",
".",
"path",
".",
"dirname",
"(",
"__file__",
")",
",",
"'certs'",
",",
"certificate_path",
")",
"return",
"certificate_path"
]
| Get ocsp responder certificate path
Test: TEST_of_SK_OCSP_RESPONDER_2011.pem
Live: sk-ocsp-responder-certificates.pem
Note: These files are distributed under esteid/certs
:return: | [
"Get",
"ocsp",
"responder",
"certificate",
"path"
]
| 407ae513e357fedea0e3e42198df8eb9d9ff0646 | https://github.com/thorgate/django-esteid/blob/407ae513e357fedea0e3e42198df8eb9d9ff0646/esteid/config.py#L40-L55 | train |
miguelgrinberg/Flask-MarrowMailer | flask_marrowmailer.py | Mailer.new | def new(self, **kwargs):
'''Return a new ``Message`` instance. The arguments are
passed to the ``marrow.mailer.Message`` constructor.'''
app = self.app or current_app
mailer = app.extensions['marrowmailer']
msg = mailer.new(**kwargs)
msg.__class__ = Message
return msg | python | def new(self, **kwargs):
'''Return a new ``Message`` instance. The arguments are
passed to the ``marrow.mailer.Message`` constructor.'''
app = self.app or current_app
mailer = app.extensions['marrowmailer']
msg = mailer.new(**kwargs)
msg.__class__ = Message
return msg | [
"def",
"new",
"(",
"self",
",",
"*",
"*",
"kwargs",
")",
":",
"app",
"=",
"self",
".",
"app",
"or",
"current_app",
"mailer",
"=",
"app",
".",
"extensions",
"[",
"'marrowmailer'",
"]",
"msg",
"=",
"mailer",
".",
"new",
"(",
"*",
"*",
"kwargs",
")",
"msg",
".",
"__class__",
"=",
"Message",
"return",
"msg"
]
| Return a new ``Message`` instance. The arguments are
passed to the ``marrow.mailer.Message`` constructor. | [
"Return",
"a",
"new",
"Message",
"instance",
".",
"The",
"arguments",
"are",
"passed",
"to",
"the",
"marrow",
".",
"mailer",
".",
"Message",
"constructor",
"."
]
| daf1ac0745fb31db2f43f4f7dc24c6f50ae96764 | https://github.com/miguelgrinberg/Flask-MarrowMailer/blob/daf1ac0745fb31db2f43f4f7dc24c6f50ae96764/flask_marrowmailer.py#L62-L69 | train |
miguelgrinberg/Flask-MarrowMailer | flask_marrowmailer.py | Mailer.send | def send(self, msg):
'''Send the message. If message is an iterable, then send
all the messages.'''
app = self.app or current_app
mailer = app.extensions['marrowmailer']
mailer.start()
if not hasattr(msg, '__iter__'):
result = mailer.send(msg)
else:
result = map(lambda message: mailer.send(message), msg)
mailer.stop()
return result | python | def send(self, msg):
'''Send the message. If message is an iterable, then send
all the messages.'''
app = self.app or current_app
mailer = app.extensions['marrowmailer']
mailer.start()
if not hasattr(msg, '__iter__'):
result = mailer.send(msg)
else:
result = map(lambda message: mailer.send(message), msg)
mailer.stop()
return result | [
"def",
"send",
"(",
"self",
",",
"msg",
")",
":",
"app",
"=",
"self",
".",
"app",
"or",
"current_app",
"mailer",
"=",
"app",
".",
"extensions",
"[",
"'marrowmailer'",
"]",
"mailer",
".",
"start",
"(",
")",
"if",
"not",
"hasattr",
"(",
"msg",
",",
"'__iter__'",
")",
":",
"result",
"=",
"mailer",
".",
"send",
"(",
"msg",
")",
"else",
":",
"result",
"=",
"map",
"(",
"lambda",
"message",
":",
"mailer",
".",
"send",
"(",
"message",
")",
",",
"msg",
")",
"mailer",
".",
"stop",
"(",
")",
"return",
"result"
]
| Send the message. If message is an iterable, then send
all the messages. | [
"Send",
"the",
"message",
".",
"If",
"message",
"is",
"an",
"iterable",
"then",
"send",
"all",
"the",
"messages",
"."
]
| daf1ac0745fb31db2f43f4f7dc24c6f50ae96764 | https://github.com/miguelgrinberg/Flask-MarrowMailer/blob/daf1ac0745fb31db2f43f4f7dc24c6f50ae96764/flask_marrowmailer.py#L71-L82 | train |
brutus/wdiffhtml | wdiffhtml/__init__.py | wdiff | def wdiff(
settings, wrap_with_html=False, fold_breaks=False, hard_breaks=False
):
"""
Returns the results of `wdiff` in a HTML compatible format.
Needs a :cls:`settings.Settings` object.
If *wrap_with_html* is set, the *diff* is returned in a full HTML document
structure.
If *fold_breaks* is set, `<ins>` and `<del>` tags are allowed to span line
breaks
If *hard_breaks* is set, line breaks are replaced with `<br />` tags.
"""
diff = generate_wdiff(settings.org_file, settings.new_file, fold_breaks)
if wrap_with_html:
return wrap_content(diff, settings, hard_breaks)
else:
return diff | python | def wdiff(
settings, wrap_with_html=False, fold_breaks=False, hard_breaks=False
):
"""
Returns the results of `wdiff` in a HTML compatible format.
Needs a :cls:`settings.Settings` object.
If *wrap_with_html* is set, the *diff* is returned in a full HTML document
structure.
If *fold_breaks* is set, `<ins>` and `<del>` tags are allowed to span line
breaks
If *hard_breaks* is set, line breaks are replaced with `<br />` tags.
"""
diff = generate_wdiff(settings.org_file, settings.new_file, fold_breaks)
if wrap_with_html:
return wrap_content(diff, settings, hard_breaks)
else:
return diff | [
"def",
"wdiff",
"(",
"settings",
",",
"wrap_with_html",
"=",
"False",
",",
"fold_breaks",
"=",
"False",
",",
"hard_breaks",
"=",
"False",
")",
":",
"diff",
"=",
"generate_wdiff",
"(",
"settings",
".",
"org_file",
",",
"settings",
".",
"new_file",
",",
"fold_breaks",
")",
"if",
"wrap_with_html",
":",
"return",
"wrap_content",
"(",
"diff",
",",
"settings",
",",
"hard_breaks",
")",
"else",
":",
"return",
"diff"
]
| Returns the results of `wdiff` in a HTML compatible format.
Needs a :cls:`settings.Settings` object.
If *wrap_with_html* is set, the *diff* is returned in a full HTML document
structure.
If *fold_breaks* is set, `<ins>` and `<del>` tags are allowed to span line
breaks
If *hard_breaks* is set, line breaks are replaced with `<br />` tags. | [
"Returns",
"the",
"results",
"of",
"wdiff",
"in",
"a",
"HTML",
"compatible",
"format",
"."
]
| e97b524a7945f7a626e33ec141343120c524d9fa | https://github.com/brutus/wdiffhtml/blob/e97b524a7945f7a626e33ec141343120c524d9fa/wdiffhtml/__init__.py#L62-L83 | train |
mesbahamin/chronophore | chronophore/config.py | _load_config | def _load_config(config_file):
"""Load settings from config file and return them as a dict. If the
config file is not found, or if it is invalid, create and use a
default config file.
:param config_file: `pathlib.Path` object. Path to config file.
:return: Dictionary of config options.
"""
logger.debug('Config file: {}'.format(config_file))
parser = configparser.ConfigParser()
try:
with config_file.open('r') as f:
parser.read_file(f)
except FileNotFoundError as e:
logger.warning('Config file not found')
parser = _use_default(config_file)
except configparser.ParsingError as e:
logger.warning('Error in config file: {}'.format(e))
parser = _use_default(config_file)
finally:
try:
config = _load_options(parser)
except (configparser.NoOptionError):
parser = _use_default(config_file)
config = _load_options(parser)
logger.debug('Config loaded: {}'.format(config_file))
return config | python | def _load_config(config_file):
"""Load settings from config file and return them as a dict. If the
config file is not found, or if it is invalid, create and use a
default config file.
:param config_file: `pathlib.Path` object. Path to config file.
:return: Dictionary of config options.
"""
logger.debug('Config file: {}'.format(config_file))
parser = configparser.ConfigParser()
try:
with config_file.open('r') as f:
parser.read_file(f)
except FileNotFoundError as e:
logger.warning('Config file not found')
parser = _use_default(config_file)
except configparser.ParsingError as e:
logger.warning('Error in config file: {}'.format(e))
parser = _use_default(config_file)
finally:
try:
config = _load_options(parser)
except (configparser.NoOptionError):
parser = _use_default(config_file)
config = _load_options(parser)
logger.debug('Config loaded: {}'.format(config_file))
return config | [
"def",
"_load_config",
"(",
"config_file",
")",
":",
"logger",
".",
"debug",
"(",
"'Config file: {}'",
".",
"format",
"(",
"config_file",
")",
")",
"parser",
"=",
"configparser",
".",
"ConfigParser",
"(",
")",
"try",
":",
"with",
"config_file",
".",
"open",
"(",
"'r'",
")",
"as",
"f",
":",
"parser",
".",
"read_file",
"(",
"f",
")",
"except",
"FileNotFoundError",
"as",
"e",
":",
"logger",
".",
"warning",
"(",
"'Config file not found'",
")",
"parser",
"=",
"_use_default",
"(",
"config_file",
")",
"except",
"configparser",
".",
"ParsingError",
"as",
"e",
":",
"logger",
".",
"warning",
"(",
"'Error in config file: {}'",
".",
"format",
"(",
"e",
")",
")",
"parser",
"=",
"_use_default",
"(",
"config_file",
")",
"finally",
":",
"try",
":",
"config",
"=",
"_load_options",
"(",
"parser",
")",
"except",
"(",
"configparser",
".",
"NoOptionError",
")",
":",
"parser",
"=",
"_use_default",
"(",
"config_file",
")",
"config",
"=",
"_load_options",
"(",
"parser",
")",
"logger",
".",
"debug",
"(",
"'Config loaded: {}'",
".",
"format",
"(",
"config_file",
")",
")",
"return",
"config"
]
| Load settings from config file and return them as a dict. If the
config file is not found, or if it is invalid, create and use a
default config file.
:param config_file: `pathlib.Path` object. Path to config file.
:return: Dictionary of config options. | [
"Load",
"settings",
"from",
"config",
"file",
"and",
"return",
"them",
"as",
"a",
"dict",
".",
"If",
"the",
"config",
"file",
"is",
"not",
"found",
"or",
"if",
"it",
"is",
"invalid",
"create",
"and",
"use",
"a",
"default",
"config",
"file",
"."
]
| ee140c61b4dfada966f078de8304bac737cec6f7 | https://github.com/mesbahamin/chronophore/blob/ee140c61b4dfada966f078de8304bac737cec6f7/chronophore/config.py#L13-L44 | train |
mesbahamin/chronophore | chronophore/config.py | _load_options | def _load_options(parser):
"""Load config options from parser and return them as a dict.
:param parser: `ConfigParser` object with the values loaded.
:return: Dictionary of config options.
"""
config = dict(
MESSAGE_DURATION=parser.getint('gui', 'message_duration'),
GUI_WELCOME_LABLE=parser.get('gui', 'gui_welcome_label'),
FULL_USER_NAMES=parser.getboolean('gui', 'full_user_names'),
LARGE_FONT_SIZE=parser.getint('gui', 'large_font_size'),
MEDIUM_FONT_SIZE=parser.getint('gui', 'medium_font_size'),
SMALL_FONT_SIZE=parser.getint('gui', 'small_font_size'),
TINY_FONT_SIZE=parser.getint('gui', 'tiny_font_size'),
MAX_INPUT_LENGTH=parser.getint('gui', 'max_input_length'),
)
return config | python | def _load_options(parser):
"""Load config options from parser and return them as a dict.
:param parser: `ConfigParser` object with the values loaded.
:return: Dictionary of config options.
"""
config = dict(
MESSAGE_DURATION=parser.getint('gui', 'message_duration'),
GUI_WELCOME_LABLE=parser.get('gui', 'gui_welcome_label'),
FULL_USER_NAMES=parser.getboolean('gui', 'full_user_names'),
LARGE_FONT_SIZE=parser.getint('gui', 'large_font_size'),
MEDIUM_FONT_SIZE=parser.getint('gui', 'medium_font_size'),
SMALL_FONT_SIZE=parser.getint('gui', 'small_font_size'),
TINY_FONT_SIZE=parser.getint('gui', 'tiny_font_size'),
MAX_INPUT_LENGTH=parser.getint('gui', 'max_input_length'),
)
return config | [
"def",
"_load_options",
"(",
"parser",
")",
":",
"config",
"=",
"dict",
"(",
"MESSAGE_DURATION",
"=",
"parser",
".",
"getint",
"(",
"'gui'",
",",
"'message_duration'",
")",
",",
"GUI_WELCOME_LABLE",
"=",
"parser",
".",
"get",
"(",
"'gui'",
",",
"'gui_welcome_label'",
")",
",",
"FULL_USER_NAMES",
"=",
"parser",
".",
"getboolean",
"(",
"'gui'",
",",
"'full_user_names'",
")",
",",
"LARGE_FONT_SIZE",
"=",
"parser",
".",
"getint",
"(",
"'gui'",
",",
"'large_font_size'",
")",
",",
"MEDIUM_FONT_SIZE",
"=",
"parser",
".",
"getint",
"(",
"'gui'",
",",
"'medium_font_size'",
")",
",",
"SMALL_FONT_SIZE",
"=",
"parser",
".",
"getint",
"(",
"'gui'",
",",
"'small_font_size'",
")",
",",
"TINY_FONT_SIZE",
"=",
"parser",
".",
"getint",
"(",
"'gui'",
",",
"'tiny_font_size'",
")",
",",
"MAX_INPUT_LENGTH",
"=",
"parser",
".",
"getint",
"(",
"'gui'",
",",
"'max_input_length'",
")",
",",
")",
"return",
"config"
]
| Load config options from parser and return them as a dict.
:param parser: `ConfigParser` object with the values loaded.
:return: Dictionary of config options. | [
"Load",
"config",
"options",
"from",
"parser",
"and",
"return",
"them",
"as",
"a",
"dict",
"."
]
| ee140c61b4dfada966f078de8304bac737cec6f7 | https://github.com/mesbahamin/chronophore/blob/ee140c61b4dfada966f078de8304bac737cec6f7/chronophore/config.py#L47-L63 | train |
mesbahamin/chronophore | chronophore/config.py | _use_default | def _use_default(config_file):
"""Write default values to a config file. If another config file
already exists, back it up before replacing it with the new file.
:param config_file: `pathlib.Path` object. Path to config file.
:return: `ConfigParser` object with the values loaded.
"""
default_config = OrderedDict((
(
'gui',
OrderedDict(
(
('message_duration', 5),
('gui_welcome_label', 'Welcome to the STEM Learning Center!'),
('full_user_names', True),
('large_font_size', 30),
('medium_font_size', 18),
('small_font_size', 15),
('tiny_font_size', 10),
('max_input_length', 9),
)
),
),
))
parser = configparser.ConfigParser()
parser.read_dict(default_config)
if config_file.exists():
backup = config_file.with_suffix('.bak')
os.rename(str(config_file), str(backup))
logger.info('{} moved to {}.'.format(config_file, backup))
with config_file.open('w') as f:
parser.write(f)
logger.info('Default config file created.')
return parser | python | def _use_default(config_file):
"""Write default values to a config file. If another config file
already exists, back it up before replacing it with the new file.
:param config_file: `pathlib.Path` object. Path to config file.
:return: `ConfigParser` object with the values loaded.
"""
default_config = OrderedDict((
(
'gui',
OrderedDict(
(
('message_duration', 5),
('gui_welcome_label', 'Welcome to the STEM Learning Center!'),
('full_user_names', True),
('large_font_size', 30),
('medium_font_size', 18),
('small_font_size', 15),
('tiny_font_size', 10),
('max_input_length', 9),
)
),
),
))
parser = configparser.ConfigParser()
parser.read_dict(default_config)
if config_file.exists():
backup = config_file.with_suffix('.bak')
os.rename(str(config_file), str(backup))
logger.info('{} moved to {}.'.format(config_file, backup))
with config_file.open('w') as f:
parser.write(f)
logger.info('Default config file created.')
return parser | [
"def",
"_use_default",
"(",
"config_file",
")",
":",
"default_config",
"=",
"OrderedDict",
"(",
"(",
"(",
"'gui'",
",",
"OrderedDict",
"(",
"(",
"(",
"'message_duration'",
",",
"5",
")",
",",
"(",
"'gui_welcome_label'",
",",
"'Welcome to the STEM Learning Center!'",
")",
",",
"(",
"'full_user_names'",
",",
"True",
")",
",",
"(",
"'large_font_size'",
",",
"30",
")",
",",
"(",
"'medium_font_size'",
",",
"18",
")",
",",
"(",
"'small_font_size'",
",",
"15",
")",
",",
"(",
"'tiny_font_size'",
",",
"10",
")",
",",
"(",
"'max_input_length'",
",",
"9",
")",
",",
")",
")",
",",
")",
",",
")",
")",
"parser",
"=",
"configparser",
".",
"ConfigParser",
"(",
")",
"parser",
".",
"read_dict",
"(",
"default_config",
")",
"if",
"config_file",
".",
"exists",
"(",
")",
":",
"backup",
"=",
"config_file",
".",
"with_suffix",
"(",
"'.bak'",
")",
"os",
".",
"rename",
"(",
"str",
"(",
"config_file",
")",
",",
"str",
"(",
"backup",
")",
")",
"logger",
".",
"info",
"(",
"'{} moved to {}.'",
".",
"format",
"(",
"config_file",
",",
"backup",
")",
")",
"with",
"config_file",
".",
"open",
"(",
"'w'",
")",
"as",
"f",
":",
"parser",
".",
"write",
"(",
"f",
")",
"logger",
".",
"info",
"(",
"'Default config file created.'",
")",
"return",
"parser"
]
| Write default values to a config file. If another config file
already exists, back it up before replacing it with the new file.
:param config_file: `pathlib.Path` object. Path to config file.
:return: `ConfigParser` object with the values loaded. | [
"Write",
"default",
"values",
"to",
"a",
"config",
"file",
".",
"If",
"another",
"config",
"file",
"already",
"exists",
"back",
"it",
"up",
"before",
"replacing",
"it",
"with",
"the",
"new",
"file",
"."
]
| ee140c61b4dfada966f078de8304bac737cec6f7 | https://github.com/mesbahamin/chronophore/blob/ee140c61b4dfada966f078de8304bac737cec6f7/chronophore/config.py#L66-L104 | train |
mojaie/chorus | chorus/model/graphmol.py | Compound.add_atom | def add_atom(self, key, atom):
"""Set an atom. Existing atom will be overwritten."""
self.graph.add_node(key, atom=atom) | python | def add_atom(self, key, atom):
"""Set an atom. Existing atom will be overwritten."""
self.graph.add_node(key, atom=atom) | [
"def",
"add_atom",
"(",
"self",
",",
"key",
",",
"atom",
")",
":",
"self",
".",
"graph",
".",
"add_node",
"(",
"key",
",",
"atom",
"=",
"atom",
")"
]
| Set an atom. Existing atom will be overwritten. | [
"Set",
"an",
"atom",
".",
"Existing",
"atom",
"will",
"be",
"overwritten",
"."
]
| fc7fe23a0272554c67671645ab07830b315eeb1b | https://github.com/mojaie/chorus/blob/fc7fe23a0272554c67671645ab07830b315eeb1b/chorus/model/graphmol.py#L79-L81 | train |
mojaie/chorus | chorus/model/graphmol.py | Compound.atoms_iter | def atoms_iter(self):
"""Iterate over atoms."""
for n, atom in self.graph.nodes.data("atom"):
yield n, atom | python | def atoms_iter(self):
"""Iterate over atoms."""
for n, atom in self.graph.nodes.data("atom"):
yield n, atom | [
"def",
"atoms_iter",
"(",
"self",
")",
":",
"for",
"n",
",",
"atom",
"in",
"self",
".",
"graph",
".",
"nodes",
".",
"data",
"(",
"\"atom\"",
")",
":",
"yield",
"n",
",",
"atom"
]
| Iterate over atoms. | [
"Iterate",
"over",
"atoms",
"."
]
| fc7fe23a0272554c67671645ab07830b315eeb1b | https://github.com/mojaie/chorus/blob/fc7fe23a0272554c67671645ab07830b315eeb1b/chorus/model/graphmol.py#L87-L90 | train |
mojaie/chorus | chorus/model/graphmol.py | Compound.add_bond | def add_bond(self, key1, key2, bond):
"""Set a bond. Existing bond will be overwritten."""
self.graph.add_edge(key1, key2, bond=bond) | python | def add_bond(self, key1, key2, bond):
"""Set a bond. Existing bond will be overwritten."""
self.graph.add_edge(key1, key2, bond=bond) | [
"def",
"add_bond",
"(",
"self",
",",
"key1",
",",
"key2",
",",
"bond",
")",
":",
"self",
".",
"graph",
".",
"add_edge",
"(",
"key1",
",",
"key2",
",",
"bond",
"=",
"bond",
")"
]
| Set a bond. Existing bond will be overwritten. | [
"Set",
"a",
"bond",
".",
"Existing",
"bond",
"will",
"be",
"overwritten",
"."
]
| fc7fe23a0272554c67671645ab07830b315eeb1b | https://github.com/mojaie/chorus/blob/fc7fe23a0272554c67671645ab07830b315eeb1b/chorus/model/graphmol.py#L100-L102 | train |
mojaie/chorus | chorus/model/graphmol.py | Compound.bonds_iter | def bonds_iter(self):
"""Iterate over bonds."""
for u, v, bond in self.graph.edges.data("bond"):
yield u, v, bond | python | def bonds_iter(self):
"""Iterate over bonds."""
for u, v, bond in self.graph.edges.data("bond"):
yield u, v, bond | [
"def",
"bonds_iter",
"(",
"self",
")",
":",
"for",
"u",
",",
"v",
",",
"bond",
"in",
"self",
".",
"graph",
".",
"edges",
".",
"data",
"(",
"\"bond\"",
")",
":",
"yield",
"u",
",",
"v",
",",
"bond"
]
| Iterate over bonds. | [
"Iterate",
"over",
"bonds",
"."
]
| fc7fe23a0272554c67671645ab07830b315eeb1b | https://github.com/mojaie/chorus/blob/fc7fe23a0272554c67671645ab07830b315eeb1b/chorus/model/graphmol.py#L108-L111 | train |
mojaie/chorus | chorus/model/graphmol.py | Compound.neighbors | def neighbors(self, key):
"""Return dict of neighbor atom index and connecting bond."""
return {n: attr["bond"] for n, attr in self.graph[key].items()} | python | def neighbors(self, key):
"""Return dict of neighbor atom index and connecting bond."""
return {n: attr["bond"] for n, attr in self.graph[key].items()} | [
"def",
"neighbors",
"(",
"self",
",",
"key",
")",
":",
"return",
"{",
"n",
":",
"attr",
"[",
"\"bond\"",
"]",
"for",
"n",
",",
"attr",
"in",
"self",
".",
"graph",
"[",
"key",
"]",
".",
"items",
"(",
")",
"}"
]
| Return dict of neighbor atom index and connecting bond. | [
"Return",
"dict",
"of",
"neighbor",
"atom",
"index",
"and",
"connecting",
"bond",
"."
]
| fc7fe23a0272554c67671645ab07830b315eeb1b | https://github.com/mojaie/chorus/blob/fc7fe23a0272554c67671645ab07830b315eeb1b/chorus/model/graphmol.py#L121-L123 | train |
mojaie/chorus | chorus/model/graphmol.py | Compound.neighbors_iter | def neighbors_iter(self):
"""Iterate over atoms and return its neighbors."""
for n, adj in self.graph.adj.items():
yield n, {n: attr["bond"] for n, attr in adj.items()} | python | def neighbors_iter(self):
"""Iterate over atoms and return its neighbors."""
for n, adj in self.graph.adj.items():
yield n, {n: attr["bond"] for n, attr in adj.items()} | [
"def",
"neighbors_iter",
"(",
"self",
")",
":",
"for",
"n",
",",
"adj",
"in",
"self",
".",
"graph",
".",
"adj",
".",
"items",
"(",
")",
":",
"yield",
"n",
",",
"{",
"n",
":",
"attr",
"[",
"\"bond\"",
"]",
"for",
"n",
",",
"attr",
"in",
"adj",
".",
"items",
"(",
")",
"}"
]
| Iterate over atoms and return its neighbors. | [
"Iterate",
"over",
"atoms",
"and",
"return",
"its",
"neighbors",
"."
]
| fc7fe23a0272554c67671645ab07830b315eeb1b | https://github.com/mojaie/chorus/blob/fc7fe23a0272554c67671645ab07830b315eeb1b/chorus/model/graphmol.py#L129-L132 | train |
mojaie/chorus | chorus/model/graphmol.py | Compound.clear | def clear(self):
"""Empty the instance """
# self.graph = nx.Graph()
self.graph.clear()
self.data.clear()
self.descriptors.clear()
self.size2d = None
self.rings = None
self.scaffolds = None
self.isolated = None | python | def clear(self):
"""Empty the instance """
# self.graph = nx.Graph()
self.graph.clear()
self.data.clear()
self.descriptors.clear()
self.size2d = None
self.rings = None
self.scaffolds = None
self.isolated = None | [
"def",
"clear",
"(",
"self",
")",
":",
"# self.graph = nx.Graph()",
"self",
".",
"graph",
".",
"clear",
"(",
")",
"self",
".",
"data",
".",
"clear",
"(",
")",
"self",
".",
"descriptors",
".",
"clear",
"(",
")",
"self",
".",
"size2d",
"=",
"None",
"self",
".",
"rings",
"=",
"None",
"self",
".",
"scaffolds",
"=",
"None",
"self",
".",
"isolated",
"=",
"None"
]
| Empty the instance | [
"Empty",
"the",
"instance"
]
| fc7fe23a0272554c67671645ab07830b315eeb1b | https://github.com/mojaie/chorus/blob/fc7fe23a0272554c67671645ab07830b315eeb1b/chorus/model/graphmol.py#L134-L143 | train |
mojaie/chorus | chorus/draw/qt.py | Qt._convert | def _convert(self, pos):
""" For QPainter coordinate system, reflect over X axis and
translate from center to top-left
"""
px = pos[0] + self.logical_size.width() / 2
py = self.logical_size.height() / 2 - pos[1]
return px, py | python | def _convert(self, pos):
""" For QPainter coordinate system, reflect over X axis and
translate from center to top-left
"""
px = pos[0] + self.logical_size.width() / 2
py = self.logical_size.height() / 2 - pos[1]
return px, py | [
"def",
"_convert",
"(",
"self",
",",
"pos",
")",
":",
"px",
"=",
"pos",
"[",
"0",
"]",
"+",
"self",
".",
"logical_size",
".",
"width",
"(",
")",
"/",
"2",
"py",
"=",
"self",
".",
"logical_size",
".",
"height",
"(",
")",
"/",
"2",
"-",
"pos",
"[",
"1",
"]",
"return",
"px",
",",
"py"
]
| For QPainter coordinate system, reflect over X axis and
translate from center to top-left | [
"For",
"QPainter",
"coordinate",
"system",
"reflect",
"over",
"X",
"axis",
"and",
"translate",
"from",
"center",
"to",
"top",
"-",
"left"
]
| fc7fe23a0272554c67671645ab07830b315eeb1b | https://github.com/mojaie/chorus/blob/fc7fe23a0272554c67671645ab07830b315eeb1b/chorus/draw/qt.py#L181-L187 | train |
lsst-sqre/documenteer | documenteer/sphinxrunner.py | run_sphinx | def run_sphinx(root_dir):
"""Run the Sphinx build process.
Parameters
----------
root_dir : `str`
Root directory of the Sphinx project and content source. This directory
conatains both the root ``index.rst`` file and the ``conf.py``
configuration file.
Returns
-------
status : `int`
Sphinx status code. ``0`` is expected. Greater than ``0`` indicates
an error.
Notes
-----
This function implements similar internals to Sphinx's own ``sphinx-build``
command. Most configurations are hard-coded to defaults appropriate for
building stack documentation, but flexibility can be added later as
needs are identified.
"""
logger = logging.getLogger(__name__)
# This replicates what Sphinx's internal command line hander does in
# https://github.com/sphinx-doc/sphinx/blob/master/sphinx/cmd/build.py
# build_main()
# configuration
root_dir = os.path.abspath(root_dir)
srcdir = root_dir # root directory of Sphinx content
confdir = root_dir # directory where conf.py is located
outdir = os.path.join(root_dir, '_build', 'html')
doctreedir = os.path.join(root_dir, '_build', 'doctree')
builder = 'html'
confoverrides = {}
status = sys.stdout # set to None for 'quiet' mode
warning = sys.stderr
error = sys.stderr
freshenv = False # attempt to re-use existing build artificats
warningiserror = False
tags = []
verbosity = 0
jobs = 1 # number of processes
force_all = True
filenames = []
logger.debug('Sphinx config: srcdir={0}'.format(srcdir))
logger.debug('Sphinx config: confdir={0}'.format(confdir))
logger.debug('Sphinx config: outdir={0}'.format(outdir))
logger.debug('Sphinx config: doctreedir={0}'.format(doctreedir))
logger.debug('Sphinx config: builder={0}'.format(builder))
logger.debug('Sphinx config: freshenv={0:b}'.format(freshenv))
logger.debug('Sphinx config: warningiserror={0:b}'.format(warningiserror))
logger.debug('Sphinx config: verbosity={0:d}'.format(verbosity))
logger.debug('Sphinx config: jobs={0:d}'.format(jobs))
logger.debug('Sphinx config: force_all={0:b}'.format(force_all))
app = None
try:
with patch_docutils(), docutils_namespace():
app = Sphinx(
srcdir, confdir, outdir, doctreedir, builder,
confoverrides, status, warning, freshenv,
warningiserror, tags, verbosity, jobs)
app.build(force_all, filenames)
return app.statuscode
except (Exception, KeyboardInterrupt) as exc:
args = MockSphinxNamespace(verbosity=verbosity, traceback=True)
handle_exception(app, args, exc, error)
return 1 | python | def run_sphinx(root_dir):
"""Run the Sphinx build process.
Parameters
----------
root_dir : `str`
Root directory of the Sphinx project and content source. This directory
conatains both the root ``index.rst`` file and the ``conf.py``
configuration file.
Returns
-------
status : `int`
Sphinx status code. ``0`` is expected. Greater than ``0`` indicates
an error.
Notes
-----
This function implements similar internals to Sphinx's own ``sphinx-build``
command. Most configurations are hard-coded to defaults appropriate for
building stack documentation, but flexibility can be added later as
needs are identified.
"""
logger = logging.getLogger(__name__)
# This replicates what Sphinx's internal command line hander does in
# https://github.com/sphinx-doc/sphinx/blob/master/sphinx/cmd/build.py
# build_main()
# configuration
root_dir = os.path.abspath(root_dir)
srcdir = root_dir # root directory of Sphinx content
confdir = root_dir # directory where conf.py is located
outdir = os.path.join(root_dir, '_build', 'html')
doctreedir = os.path.join(root_dir, '_build', 'doctree')
builder = 'html'
confoverrides = {}
status = sys.stdout # set to None for 'quiet' mode
warning = sys.stderr
error = sys.stderr
freshenv = False # attempt to re-use existing build artificats
warningiserror = False
tags = []
verbosity = 0
jobs = 1 # number of processes
force_all = True
filenames = []
logger.debug('Sphinx config: srcdir={0}'.format(srcdir))
logger.debug('Sphinx config: confdir={0}'.format(confdir))
logger.debug('Sphinx config: outdir={0}'.format(outdir))
logger.debug('Sphinx config: doctreedir={0}'.format(doctreedir))
logger.debug('Sphinx config: builder={0}'.format(builder))
logger.debug('Sphinx config: freshenv={0:b}'.format(freshenv))
logger.debug('Sphinx config: warningiserror={0:b}'.format(warningiserror))
logger.debug('Sphinx config: verbosity={0:d}'.format(verbosity))
logger.debug('Sphinx config: jobs={0:d}'.format(jobs))
logger.debug('Sphinx config: force_all={0:b}'.format(force_all))
app = None
try:
with patch_docutils(), docutils_namespace():
app = Sphinx(
srcdir, confdir, outdir, doctreedir, builder,
confoverrides, status, warning, freshenv,
warningiserror, tags, verbosity, jobs)
app.build(force_all, filenames)
return app.statuscode
except (Exception, KeyboardInterrupt) as exc:
args = MockSphinxNamespace(verbosity=verbosity, traceback=True)
handle_exception(app, args, exc, error)
return 1 | [
"def",
"run_sphinx",
"(",
"root_dir",
")",
":",
"logger",
"=",
"logging",
".",
"getLogger",
"(",
"__name__",
")",
"# This replicates what Sphinx's internal command line hander does in",
"# https://github.com/sphinx-doc/sphinx/blob/master/sphinx/cmd/build.py",
"# build_main()",
"# configuration",
"root_dir",
"=",
"os",
".",
"path",
".",
"abspath",
"(",
"root_dir",
")",
"srcdir",
"=",
"root_dir",
"# root directory of Sphinx content",
"confdir",
"=",
"root_dir",
"# directory where conf.py is located",
"outdir",
"=",
"os",
".",
"path",
".",
"join",
"(",
"root_dir",
",",
"'_build'",
",",
"'html'",
")",
"doctreedir",
"=",
"os",
".",
"path",
".",
"join",
"(",
"root_dir",
",",
"'_build'",
",",
"'doctree'",
")",
"builder",
"=",
"'html'",
"confoverrides",
"=",
"{",
"}",
"status",
"=",
"sys",
".",
"stdout",
"# set to None for 'quiet' mode",
"warning",
"=",
"sys",
".",
"stderr",
"error",
"=",
"sys",
".",
"stderr",
"freshenv",
"=",
"False",
"# attempt to re-use existing build artificats",
"warningiserror",
"=",
"False",
"tags",
"=",
"[",
"]",
"verbosity",
"=",
"0",
"jobs",
"=",
"1",
"# number of processes",
"force_all",
"=",
"True",
"filenames",
"=",
"[",
"]",
"logger",
".",
"debug",
"(",
"'Sphinx config: srcdir={0}'",
".",
"format",
"(",
"srcdir",
")",
")",
"logger",
".",
"debug",
"(",
"'Sphinx config: confdir={0}'",
".",
"format",
"(",
"confdir",
")",
")",
"logger",
".",
"debug",
"(",
"'Sphinx config: outdir={0}'",
".",
"format",
"(",
"outdir",
")",
")",
"logger",
".",
"debug",
"(",
"'Sphinx config: doctreedir={0}'",
".",
"format",
"(",
"doctreedir",
")",
")",
"logger",
".",
"debug",
"(",
"'Sphinx config: builder={0}'",
".",
"format",
"(",
"builder",
")",
")",
"logger",
".",
"debug",
"(",
"'Sphinx config: freshenv={0:b}'",
".",
"format",
"(",
"freshenv",
")",
")",
"logger",
".",
"debug",
"(",
"'Sphinx config: warningiserror={0:b}'",
".",
"format",
"(",
"warningiserror",
")",
")",
"logger",
".",
"debug",
"(",
"'Sphinx config: verbosity={0:d}'",
".",
"format",
"(",
"verbosity",
")",
")",
"logger",
".",
"debug",
"(",
"'Sphinx config: jobs={0:d}'",
".",
"format",
"(",
"jobs",
")",
")",
"logger",
".",
"debug",
"(",
"'Sphinx config: force_all={0:b}'",
".",
"format",
"(",
"force_all",
")",
")",
"app",
"=",
"None",
"try",
":",
"with",
"patch_docutils",
"(",
")",
",",
"docutils_namespace",
"(",
")",
":",
"app",
"=",
"Sphinx",
"(",
"srcdir",
",",
"confdir",
",",
"outdir",
",",
"doctreedir",
",",
"builder",
",",
"confoverrides",
",",
"status",
",",
"warning",
",",
"freshenv",
",",
"warningiserror",
",",
"tags",
",",
"verbosity",
",",
"jobs",
")",
"app",
".",
"build",
"(",
"force_all",
",",
"filenames",
")",
"return",
"app",
".",
"statuscode",
"except",
"(",
"Exception",
",",
"KeyboardInterrupt",
")",
"as",
"exc",
":",
"args",
"=",
"MockSphinxNamespace",
"(",
"verbosity",
"=",
"verbosity",
",",
"traceback",
"=",
"True",
")",
"handle_exception",
"(",
"app",
",",
"args",
",",
"exc",
",",
"error",
")",
"return",
"1"
]
| Run the Sphinx build process.
Parameters
----------
root_dir : `str`
Root directory of the Sphinx project and content source. This directory
conatains both the root ``index.rst`` file and the ``conf.py``
configuration file.
Returns
-------
status : `int`
Sphinx status code. ``0`` is expected. Greater than ``0`` indicates
an error.
Notes
-----
This function implements similar internals to Sphinx's own ``sphinx-build``
command. Most configurations are hard-coded to defaults appropriate for
building stack documentation, but flexibility can be added later as
needs are identified. | [
"Run",
"the",
"Sphinx",
"build",
"process",
"."
]
| 75f02901a80042b28d074df1cc1dca32eb8e38c8 | https://github.com/lsst-sqre/documenteer/blob/75f02901a80042b28d074df1cc1dca32eb8e38c8/documenteer/sphinxrunner.py#L19-L90 | train |
Antojitos/guacamole-cli | src/guacamole_cli/__init__.py | get_settings | def get_settings(config_file):
"""Search and load a configuration file."""
default_settings = {
'general': {
'endpoint': 'http://guacamole.antojitos.io/files/',
'shortener': 'http://t.antojitos.io/api/v1/urls',
}
}
settings = configparser.ConfigParser()
try:
settings.read_dict(default_settings)
except AttributeError:
# using python 2.7
for section, options in default_settings.items():
settings.add_section(section)
for option, value in options.items():
settings.set(section, option, value)
if config_file is not None and os.path.exists(config_file):
settings.read(config_file)
return settings
if os.path.exists(CONFIG_FILE):
settings.read(CONFIG_FILE)
return settings
return settings | python | def get_settings(config_file):
"""Search and load a configuration file."""
default_settings = {
'general': {
'endpoint': 'http://guacamole.antojitos.io/files/',
'shortener': 'http://t.antojitos.io/api/v1/urls',
}
}
settings = configparser.ConfigParser()
try:
settings.read_dict(default_settings)
except AttributeError:
# using python 2.7
for section, options in default_settings.items():
settings.add_section(section)
for option, value in options.items():
settings.set(section, option, value)
if config_file is not None and os.path.exists(config_file):
settings.read(config_file)
return settings
if os.path.exists(CONFIG_FILE):
settings.read(CONFIG_FILE)
return settings
return settings | [
"def",
"get_settings",
"(",
"config_file",
")",
":",
"default_settings",
"=",
"{",
"'general'",
":",
"{",
"'endpoint'",
":",
"'http://guacamole.antojitos.io/files/'",
",",
"'shortener'",
":",
"'http://t.antojitos.io/api/v1/urls'",
",",
"}",
"}",
"settings",
"=",
"configparser",
".",
"ConfigParser",
"(",
")",
"try",
":",
"settings",
".",
"read_dict",
"(",
"default_settings",
")",
"except",
"AttributeError",
":",
"# using python 2.7",
"for",
"section",
",",
"options",
"in",
"default_settings",
".",
"items",
"(",
")",
":",
"settings",
".",
"add_section",
"(",
"section",
")",
"for",
"option",
",",
"value",
"in",
"options",
".",
"items",
"(",
")",
":",
"settings",
".",
"set",
"(",
"section",
",",
"option",
",",
"value",
")",
"if",
"config_file",
"is",
"not",
"None",
"and",
"os",
".",
"path",
".",
"exists",
"(",
"config_file",
")",
":",
"settings",
".",
"read",
"(",
"config_file",
")",
"return",
"settings",
"if",
"os",
".",
"path",
".",
"exists",
"(",
"CONFIG_FILE",
")",
":",
"settings",
".",
"read",
"(",
"CONFIG_FILE",
")",
"return",
"settings",
"return",
"settings"
]
| Search and load a configuration file. | [
"Search",
"and",
"load",
"a",
"configuration",
"file",
"."
]
| e3ae6b8eb08379ffb784978587bf24b168af73d0 | https://github.com/Antojitos/guacamole-cli/blob/e3ae6b8eb08379ffb784978587bf24b168af73d0/src/guacamole_cli/__init__.py#L16-L41 | train |
ShadowBlip/Neteria | neteria/encryption.py | Encryption.encrypt | def encrypt(self, message, public_key):
"""Encrypts a string using a given rsa.PublicKey object. If the message
is larger than the key, it will split it up into a list and encrypt
each line in the list.
Args:
message (string): The string to encrypt.
public_key (rsa.PublicKey): The key object used to encrypt the
message. Only the paired private key can decrypt it.
Returns:
A json string of the list of encrypted lines of the message.
"""
# Get the maximum message length based on the key
max_str_len = rsa.common.byte_size(public_key.n) - 11
# If the message is longer than the key size, split it into a list to
# be encrypted
if len(message) > max_str_len:
message = textwrap.wrap(message, width=max_str_len)
else:
message = [message]
# Create a list for the encrypted message to send
enc_msg = []
# If we have a long message, loop through and encrypt each part of the
# string
for line in message:
# Encrypt the line in the message into a bytestring
enc_line = rsa.encrypt(line, public_key)
# Convert the encrypted bytestring into ASCII, so we can send it
# over the network
enc_line_converted = binascii.b2a_base64(enc_line)
enc_msg.append(enc_line_converted)
# Serialize the encrypted message again with json
enc_msg = json.dumps(enc_msg)
# Return the list of encrypted strings
return enc_msg | python | def encrypt(self, message, public_key):
"""Encrypts a string using a given rsa.PublicKey object. If the message
is larger than the key, it will split it up into a list and encrypt
each line in the list.
Args:
message (string): The string to encrypt.
public_key (rsa.PublicKey): The key object used to encrypt the
message. Only the paired private key can decrypt it.
Returns:
A json string of the list of encrypted lines of the message.
"""
# Get the maximum message length based on the key
max_str_len = rsa.common.byte_size(public_key.n) - 11
# If the message is longer than the key size, split it into a list to
# be encrypted
if len(message) > max_str_len:
message = textwrap.wrap(message, width=max_str_len)
else:
message = [message]
# Create a list for the encrypted message to send
enc_msg = []
# If we have a long message, loop through and encrypt each part of the
# string
for line in message:
# Encrypt the line in the message into a bytestring
enc_line = rsa.encrypt(line, public_key)
# Convert the encrypted bytestring into ASCII, so we can send it
# over the network
enc_line_converted = binascii.b2a_base64(enc_line)
enc_msg.append(enc_line_converted)
# Serialize the encrypted message again with json
enc_msg = json.dumps(enc_msg)
# Return the list of encrypted strings
return enc_msg | [
"def",
"encrypt",
"(",
"self",
",",
"message",
",",
"public_key",
")",
":",
"# Get the maximum message length based on the key",
"max_str_len",
"=",
"rsa",
".",
"common",
".",
"byte_size",
"(",
"public_key",
".",
"n",
")",
"-",
"11",
"# If the message is longer than the key size, split it into a list to",
"# be encrypted",
"if",
"len",
"(",
"message",
")",
">",
"max_str_len",
":",
"message",
"=",
"textwrap",
".",
"wrap",
"(",
"message",
",",
"width",
"=",
"max_str_len",
")",
"else",
":",
"message",
"=",
"[",
"message",
"]",
"# Create a list for the encrypted message to send",
"enc_msg",
"=",
"[",
"]",
"# If we have a long message, loop through and encrypt each part of the",
"# string",
"for",
"line",
"in",
"message",
":",
"# Encrypt the line in the message into a bytestring",
"enc_line",
"=",
"rsa",
".",
"encrypt",
"(",
"line",
",",
"public_key",
")",
"# Convert the encrypted bytestring into ASCII, so we can send it",
"# over the network",
"enc_line_converted",
"=",
"binascii",
".",
"b2a_base64",
"(",
"enc_line",
")",
"enc_msg",
".",
"append",
"(",
"enc_line_converted",
")",
"# Serialize the encrypted message again with json",
"enc_msg",
"=",
"json",
".",
"dumps",
"(",
"enc_msg",
")",
"# Return the list of encrypted strings",
"return",
"enc_msg"
]
| Encrypts a string using a given rsa.PublicKey object. If the message
is larger than the key, it will split it up into a list and encrypt
each line in the list.
Args:
message (string): The string to encrypt.
public_key (rsa.PublicKey): The key object used to encrypt the
message. Only the paired private key can decrypt it.
Returns:
A json string of the list of encrypted lines of the message. | [
"Encrypts",
"a",
"string",
"using",
"a",
"given",
"rsa",
".",
"PublicKey",
"object",
".",
"If",
"the",
"message",
"is",
"larger",
"than",
"the",
"key",
"it",
"will",
"split",
"it",
"up",
"into",
"a",
"list",
"and",
"encrypt",
"each",
"line",
"in",
"the",
"list",
"."
]
| 1a8c976eb2beeca0a5a272a34ac58b2c114495a4 | https://github.com/ShadowBlip/Neteria/blob/1a8c976eb2beeca0a5a272a34ac58b2c114495a4/neteria/encryption.py#L43-L87 | train |
ShadowBlip/Neteria | neteria/encryption.py | Encryption.decrypt | def decrypt(self, message):
"""Decrypts a string using our own private key object.
Args:
message (string): The string of the message to decrypt.
Returns:
The unencrypted string.
"""
# Unserialize the encrypted message
message = json.loads(message)
# Set up a list for the unencrypted lines of the message
unencrypted_msg = []
for line in message:
# Convert from ascii back to bytestring
enc_line = binascii.a2b_base64(line)
# Decrypt the line using our private key
unencrypted_line = rsa.decrypt(enc_line, self.private_key)
unencrypted_msg.append(unencrypted_line)
# Convert the message from a list back into a string
unencrypted_msg = "".join(unencrypted_msg)
return unencrypted_msg | python | def decrypt(self, message):
"""Decrypts a string using our own private key object.
Args:
message (string): The string of the message to decrypt.
Returns:
The unencrypted string.
"""
# Unserialize the encrypted message
message = json.loads(message)
# Set up a list for the unencrypted lines of the message
unencrypted_msg = []
for line in message:
# Convert from ascii back to bytestring
enc_line = binascii.a2b_base64(line)
# Decrypt the line using our private key
unencrypted_line = rsa.decrypt(enc_line, self.private_key)
unencrypted_msg.append(unencrypted_line)
# Convert the message from a list back into a string
unencrypted_msg = "".join(unencrypted_msg)
return unencrypted_msg | [
"def",
"decrypt",
"(",
"self",
",",
"message",
")",
":",
"# Unserialize the encrypted message",
"message",
"=",
"json",
".",
"loads",
"(",
"message",
")",
"# Set up a list for the unencrypted lines of the message",
"unencrypted_msg",
"=",
"[",
"]",
"for",
"line",
"in",
"message",
":",
"# Convert from ascii back to bytestring",
"enc_line",
"=",
"binascii",
".",
"a2b_base64",
"(",
"line",
")",
"# Decrypt the line using our private key",
"unencrypted_line",
"=",
"rsa",
".",
"decrypt",
"(",
"enc_line",
",",
"self",
".",
"private_key",
")",
"unencrypted_msg",
".",
"append",
"(",
"unencrypted_line",
")",
"# Convert the message from a list back into a string",
"unencrypted_msg",
"=",
"\"\"",
".",
"join",
"(",
"unencrypted_msg",
")",
"return",
"unencrypted_msg"
]
| Decrypts a string using our own private key object.
Args:
message (string): The string of the message to decrypt.
Returns:
The unencrypted string. | [
"Decrypts",
"a",
"string",
"using",
"our",
"own",
"private",
"key",
"object",
"."
]
| 1a8c976eb2beeca0a5a272a34ac58b2c114495a4 | https://github.com/ShadowBlip/Neteria/blob/1a8c976eb2beeca0a5a272a34ac58b2c114495a4/neteria/encryption.py#L90-L120 | train |
mastro35/flows | flows/Actions/InputWatchdogAction.py | DannyFileSystemEventHandler.on_any_event | def on_any_event(self, event):
"""On any event method"""
for delegate in self.delegates:
if hasattr(delegate, "on_any_event"):
delegate.on_any_event(event) | python | def on_any_event(self, event):
"""On any event method"""
for delegate in self.delegates:
if hasattr(delegate, "on_any_event"):
delegate.on_any_event(event) | [
"def",
"on_any_event",
"(",
"self",
",",
"event",
")",
":",
"for",
"delegate",
"in",
"self",
".",
"delegates",
":",
"if",
"hasattr",
"(",
"delegate",
",",
"\"on_any_event\"",
")",
":",
"delegate",
".",
"on_any_event",
"(",
"event",
")"
]
| On any event method | [
"On",
"any",
"event",
"method"
]
| 05e488385673a69597b5b39c7728795aa4d5eb18 | https://github.com/mastro35/flows/blob/05e488385673a69597b5b39c7728795aa4d5eb18/flows/Actions/InputWatchdogAction.py#L28-L32 | train |
mastro35/flows | flows/Actions/InputWatchdogAction.py | DannyFileSystemEventHandler.on_created | def on_created(self, event):
"""On created method"""
for delegate in self.delegates:
if hasattr(delegate, "on_created"):
delegate.on_created(event) | python | def on_created(self, event):
"""On created method"""
for delegate in self.delegates:
if hasattr(delegate, "on_created"):
delegate.on_created(event) | [
"def",
"on_created",
"(",
"self",
",",
"event",
")",
":",
"for",
"delegate",
"in",
"self",
".",
"delegates",
":",
"if",
"hasattr",
"(",
"delegate",
",",
"\"on_created\"",
")",
":",
"delegate",
".",
"on_created",
"(",
"event",
")"
]
| On created method | [
"On",
"created",
"method"
]
| 05e488385673a69597b5b39c7728795aa4d5eb18 | https://github.com/mastro35/flows/blob/05e488385673a69597b5b39c7728795aa4d5eb18/flows/Actions/InputWatchdogAction.py#L34-L38 | train |
mastro35/flows | flows/Actions/InputWatchdogAction.py | DannyFileSystemEventHandler.on_deleted | def on_deleted(self, event):
"""On deleted method"""
for delegate in self.delegates:
if hasattr(delegate, "on_deleted"):
delegate.on_deleted(event) | python | def on_deleted(self, event):
"""On deleted method"""
for delegate in self.delegates:
if hasattr(delegate, "on_deleted"):
delegate.on_deleted(event) | [
"def",
"on_deleted",
"(",
"self",
",",
"event",
")",
":",
"for",
"delegate",
"in",
"self",
".",
"delegates",
":",
"if",
"hasattr",
"(",
"delegate",
",",
"\"on_deleted\"",
")",
":",
"delegate",
".",
"on_deleted",
"(",
"event",
")"
]
| On deleted method | [
"On",
"deleted",
"method"
]
| 05e488385673a69597b5b39c7728795aa4d5eb18 | https://github.com/mastro35/flows/blob/05e488385673a69597b5b39c7728795aa4d5eb18/flows/Actions/InputWatchdogAction.py#L40-L44 | train |
mastro35/flows | flows/Actions/InputWatchdogAction.py | DannyFileSystemEventHandler.on_modified | def on_modified(self, event):
"""On modified method"""
for delegate in self.delegates:
if hasattr(delegate, "on_modified"):
delegate.on_modified(event) | python | def on_modified(self, event):
"""On modified method"""
for delegate in self.delegates:
if hasattr(delegate, "on_modified"):
delegate.on_modified(event) | [
"def",
"on_modified",
"(",
"self",
",",
"event",
")",
":",
"for",
"delegate",
"in",
"self",
".",
"delegates",
":",
"if",
"hasattr",
"(",
"delegate",
",",
"\"on_modified\"",
")",
":",
"delegate",
".",
"on_modified",
"(",
"event",
")"
]
| On modified method | [
"On",
"modified",
"method"
]
| 05e488385673a69597b5b39c7728795aa4d5eb18 | https://github.com/mastro35/flows/blob/05e488385673a69597b5b39c7728795aa4d5eb18/flows/Actions/InputWatchdogAction.py#L46-L50 | train |
mastro35/flows | flows/Actions/InputWatchdogAction.py | DannyFileSystemEventHandler.on_moved | def on_moved(self, event):
"""On moved method"""
for delegate in self.delegates:
if hasattr(delegate, "on_moved"):
delegate.on_moved(event) | python | def on_moved(self, event):
"""On moved method"""
for delegate in self.delegates:
if hasattr(delegate, "on_moved"):
delegate.on_moved(event) | [
"def",
"on_moved",
"(",
"self",
",",
"event",
")",
":",
"for",
"delegate",
"in",
"self",
".",
"delegates",
":",
"if",
"hasattr",
"(",
"delegate",
",",
"\"on_moved\"",
")",
":",
"delegate",
".",
"on_moved",
"(",
"event",
")"
]
| On moved method | [
"On",
"moved",
"method"
]
| 05e488385673a69597b5b39c7728795aa4d5eb18 | https://github.com/mastro35/flows/blob/05e488385673a69597b5b39c7728795aa4d5eb18/flows/Actions/InputWatchdogAction.py#L52-L56 | train |
mastro35/flows | flows/Actions/InputWatchdogAction.py | WatchdogAction.on_created | def on_created(self, event):
'''Fired when something's been created'''
if self.trigger != "create":
return
action_input = ActionInput(event, "", self.name)
flows.Global.MESSAGE_DISPATCHER.send_message(action_input) | python | def on_created(self, event):
'''Fired when something's been created'''
if self.trigger != "create":
return
action_input = ActionInput(event, "", self.name)
flows.Global.MESSAGE_DISPATCHER.send_message(action_input) | [
"def",
"on_created",
"(",
"self",
",",
"event",
")",
":",
"if",
"self",
".",
"trigger",
"!=",
"\"create\"",
":",
"return",
"action_input",
"=",
"ActionInput",
"(",
"event",
",",
"\"\"",
",",
"self",
".",
"name",
")",
"flows",
".",
"Global",
".",
"MESSAGE_DISPATCHER",
".",
"send_message",
"(",
"action_input",
")"
]
| Fired when something's been created | [
"Fired",
"when",
"something",
"s",
"been",
"created"
]
| 05e488385673a69597b5b39c7728795aa4d5eb18 | https://github.com/mastro35/flows/blob/05e488385673a69597b5b39c7728795aa4d5eb18/flows/Actions/InputWatchdogAction.py#L118-L123 | train |
zsimic/runez | src/runez/heartbeat.py | Heartbeat.start | def start(cls):
"""Start background thread if not already started"""
if cls._thread is None:
cls._thread = threading.Thread(target=cls._run, name="Heartbeat")
cls._thread.daemon = True
cls._thread.start() | python | def start(cls):
"""Start background thread if not already started"""
if cls._thread is None:
cls._thread = threading.Thread(target=cls._run, name="Heartbeat")
cls._thread.daemon = True
cls._thread.start() | [
"def",
"start",
"(",
"cls",
")",
":",
"if",
"cls",
".",
"_thread",
"is",
"None",
":",
"cls",
".",
"_thread",
"=",
"threading",
".",
"Thread",
"(",
"target",
"=",
"cls",
".",
"_run",
",",
"name",
"=",
"\"Heartbeat\"",
")",
"cls",
".",
"_thread",
".",
"daemon",
"=",
"True",
"cls",
".",
"_thread",
".",
"start",
"(",
")"
]
| Start background thread if not already started | [
"Start",
"background",
"thread",
"if",
"not",
"already",
"started"
]
| 14363b719a1aae1528859a501a22d075ce0abfcc | https://github.com/zsimic/runez/blob/14363b719a1aae1528859a501a22d075ce0abfcc/src/runez/heartbeat.py#L82-L87 | train |
zsimic/runez | src/runez/heartbeat.py | Heartbeat.resolved_task | def resolved_task(cls, task):
"""Task instance representing 'task', if any"""
for t in cls.tasks:
if t is task or t.execute is task:
return t | python | def resolved_task(cls, task):
"""Task instance representing 'task', if any"""
for t in cls.tasks:
if t is task or t.execute is task:
return t | [
"def",
"resolved_task",
"(",
"cls",
",",
"task",
")",
":",
"for",
"t",
"in",
"cls",
".",
"tasks",
":",
"if",
"t",
"is",
"task",
"or",
"t",
".",
"execute",
"is",
"task",
":",
"return",
"t"
]
| Task instance representing 'task', if any | [
"Task",
"instance",
"representing",
"task",
"if",
"any"
]
| 14363b719a1aae1528859a501a22d075ce0abfcc | https://github.com/zsimic/runez/blob/14363b719a1aae1528859a501a22d075ce0abfcc/src/runez/heartbeat.py#L118-L122 | train |
zsimic/runez | src/runez/heartbeat.py | Heartbeat._run | def _run(cls):
"""Background thread's main function, execute registered tasks accordingly to their frequencies"""
if cls._thread:
with cls._lock:
# First run: execute each task once to get it started
for task in cls.tasks:
cls._execute_task(task)
cls.tasks.sort()
cls._last_execution = time.time()
while cls._thread:
with cls._lock:
if cls.tasks:
for task in cls.tasks:
if task.next_execution - cls._last_execution < 0.5:
cls._execute_task(task)
else:
break
cls.tasks.sort()
cls._last_execution = time.time()
cls._sleep_delay = cls.tasks[0].next_execution - cls._last_execution
else:
cls._sleep_delay = 1
sleep_delay = max(0.1, cls._sleep_delay)
# Don't hold cls._lock while sleeping, sleep delay should be 1 second when no tasks are present
time.sleep(sleep_delay) | python | def _run(cls):
"""Background thread's main function, execute registered tasks accordingly to their frequencies"""
if cls._thread:
with cls._lock:
# First run: execute each task once to get it started
for task in cls.tasks:
cls._execute_task(task)
cls.tasks.sort()
cls._last_execution = time.time()
while cls._thread:
with cls._lock:
if cls.tasks:
for task in cls.tasks:
if task.next_execution - cls._last_execution < 0.5:
cls._execute_task(task)
else:
break
cls.tasks.sort()
cls._last_execution = time.time()
cls._sleep_delay = cls.tasks[0].next_execution - cls._last_execution
else:
cls._sleep_delay = 1
sleep_delay = max(0.1, cls._sleep_delay)
# Don't hold cls._lock while sleeping, sleep delay should be 1 second when no tasks are present
time.sleep(sleep_delay) | [
"def",
"_run",
"(",
"cls",
")",
":",
"if",
"cls",
".",
"_thread",
":",
"with",
"cls",
".",
"_lock",
":",
"# First run: execute each task once to get it started",
"for",
"task",
"in",
"cls",
".",
"tasks",
":",
"cls",
".",
"_execute_task",
"(",
"task",
")",
"cls",
".",
"tasks",
".",
"sort",
"(",
")",
"cls",
".",
"_last_execution",
"=",
"time",
".",
"time",
"(",
")",
"while",
"cls",
".",
"_thread",
":",
"with",
"cls",
".",
"_lock",
":",
"if",
"cls",
".",
"tasks",
":",
"for",
"task",
"in",
"cls",
".",
"tasks",
":",
"if",
"task",
".",
"next_execution",
"-",
"cls",
".",
"_last_execution",
"<",
"0.5",
":",
"cls",
".",
"_execute_task",
"(",
"task",
")",
"else",
":",
"break",
"cls",
".",
"tasks",
".",
"sort",
"(",
")",
"cls",
".",
"_last_execution",
"=",
"time",
".",
"time",
"(",
")",
"cls",
".",
"_sleep_delay",
"=",
"cls",
".",
"tasks",
"[",
"0",
"]",
".",
"next_execution",
"-",
"cls",
".",
"_last_execution",
"else",
":",
"cls",
".",
"_sleep_delay",
"=",
"1",
"sleep_delay",
"=",
"max",
"(",
"0.1",
",",
"cls",
".",
"_sleep_delay",
")",
"# Don't hold cls._lock while sleeping, sleep delay should be 1 second when no tasks are present",
"time",
".",
"sleep",
"(",
"sleep_delay",
")"
]
| Background thread's main function, execute registered tasks accordingly to their frequencies | [
"Background",
"thread",
"s",
"main",
"function",
"execute",
"registered",
"tasks",
"accordingly",
"to",
"their",
"frequencies"
]
| 14363b719a1aae1528859a501a22d075ce0abfcc | https://github.com/zsimic/runez/blob/14363b719a1aae1528859a501a22d075ce0abfcc/src/runez/heartbeat.py#L149-L177 | train |
wesleybeckner/salty | salty/visualization.py | parity_plot | def parity_plot(X, Y, model, devmodel, axes_labels=None):
"""
A standard method of creating parity plots between predicted and
experimental values for trained models.
Parameters
----------
X: array
experimental input data
Y: array
experimental output data
model: model object
either sklearn or keras ML model
devmodel: dev_model object
salty dev_model
axes_labels: dict
optional. Default behavior is to use the labels in the dev_model
object.
Returns
------------------
plt: matplotlib object
parity plot of predicted vs experimental values
"""
model_outputs = Y.shape[1]
with plt.style.context('seaborn-whitegrid'):
fig = plt.figure(figsize=(2.5 * model_outputs, 2.5), dpi=300)
for i in range(model_outputs):
ax = fig.add_subplot(1, model_outputs, i + 1)
minval = np.min([np.exp(model.predict(X)[:, i]), np.exp(Y)[:, i]])
maxval = np.max([np.exp(model.predict(X)[:, i]), np.exp(Y)[:, i]])
buffer = (maxval - minval) / 100 * 2
minval = minval - buffer
maxval = maxval + buffer
ax.plot([minval, maxval], [minval, maxval], linestyle="-",
label=None, c="black", linewidth=1)
ax.plot(np.exp(Y)[:, i], np.exp(model.predict(X))[:, i],
marker="*", linestyle="", alpha=0.4)
if axes_labels:
ax.set_ylabel("Predicted {}".format(
axes_labels['{}'.format(i)]))
ax.set_xlabel("Actual {}".format(
axes_labels['{}'.format(i)]))
else:
ax.set_ylabel("Predicted {}".format(
devmodel.Data.columns[-(6 - i)].split("<")[0]),
wrap=True, fontsize=5)
ax.set_xlabel("Actual {}".format(
devmodel.Data.columns[-(6 - i)].split("<")[0]),
wrap=True, fontsize=5)
plt.xlim(minval, maxval)
plt.ylim(minval, maxval)
ax.grid()
plt.tight_layout()
return plt | python | def parity_plot(X, Y, model, devmodel, axes_labels=None):
"""
A standard method of creating parity plots between predicted and
experimental values for trained models.
Parameters
----------
X: array
experimental input data
Y: array
experimental output data
model: model object
either sklearn or keras ML model
devmodel: dev_model object
salty dev_model
axes_labels: dict
optional. Default behavior is to use the labels in the dev_model
object.
Returns
------------------
plt: matplotlib object
parity plot of predicted vs experimental values
"""
model_outputs = Y.shape[1]
with plt.style.context('seaborn-whitegrid'):
fig = plt.figure(figsize=(2.5 * model_outputs, 2.5), dpi=300)
for i in range(model_outputs):
ax = fig.add_subplot(1, model_outputs, i + 1)
minval = np.min([np.exp(model.predict(X)[:, i]), np.exp(Y)[:, i]])
maxval = np.max([np.exp(model.predict(X)[:, i]), np.exp(Y)[:, i]])
buffer = (maxval - minval) / 100 * 2
minval = minval - buffer
maxval = maxval + buffer
ax.plot([minval, maxval], [minval, maxval], linestyle="-",
label=None, c="black", linewidth=1)
ax.plot(np.exp(Y)[:, i], np.exp(model.predict(X))[:, i],
marker="*", linestyle="", alpha=0.4)
if axes_labels:
ax.set_ylabel("Predicted {}".format(
axes_labels['{}'.format(i)]))
ax.set_xlabel("Actual {}".format(
axes_labels['{}'.format(i)]))
else:
ax.set_ylabel("Predicted {}".format(
devmodel.Data.columns[-(6 - i)].split("<")[0]),
wrap=True, fontsize=5)
ax.set_xlabel("Actual {}".format(
devmodel.Data.columns[-(6 - i)].split("<")[0]),
wrap=True, fontsize=5)
plt.xlim(minval, maxval)
plt.ylim(minval, maxval)
ax.grid()
plt.tight_layout()
return plt | [
"def",
"parity_plot",
"(",
"X",
",",
"Y",
",",
"model",
",",
"devmodel",
",",
"axes_labels",
"=",
"None",
")",
":",
"model_outputs",
"=",
"Y",
".",
"shape",
"[",
"1",
"]",
"with",
"plt",
".",
"style",
".",
"context",
"(",
"'seaborn-whitegrid'",
")",
":",
"fig",
"=",
"plt",
".",
"figure",
"(",
"figsize",
"=",
"(",
"2.5",
"*",
"model_outputs",
",",
"2.5",
")",
",",
"dpi",
"=",
"300",
")",
"for",
"i",
"in",
"range",
"(",
"model_outputs",
")",
":",
"ax",
"=",
"fig",
".",
"add_subplot",
"(",
"1",
",",
"model_outputs",
",",
"i",
"+",
"1",
")",
"minval",
"=",
"np",
".",
"min",
"(",
"[",
"np",
".",
"exp",
"(",
"model",
".",
"predict",
"(",
"X",
")",
"[",
":",
",",
"i",
"]",
")",
",",
"np",
".",
"exp",
"(",
"Y",
")",
"[",
":",
",",
"i",
"]",
"]",
")",
"maxval",
"=",
"np",
".",
"max",
"(",
"[",
"np",
".",
"exp",
"(",
"model",
".",
"predict",
"(",
"X",
")",
"[",
":",
",",
"i",
"]",
")",
",",
"np",
".",
"exp",
"(",
"Y",
")",
"[",
":",
",",
"i",
"]",
"]",
")",
"buffer",
"=",
"(",
"maxval",
"-",
"minval",
")",
"/",
"100",
"*",
"2",
"minval",
"=",
"minval",
"-",
"buffer",
"maxval",
"=",
"maxval",
"+",
"buffer",
"ax",
".",
"plot",
"(",
"[",
"minval",
",",
"maxval",
"]",
",",
"[",
"minval",
",",
"maxval",
"]",
",",
"linestyle",
"=",
"\"-\"",
",",
"label",
"=",
"None",
",",
"c",
"=",
"\"black\"",
",",
"linewidth",
"=",
"1",
")",
"ax",
".",
"plot",
"(",
"np",
".",
"exp",
"(",
"Y",
")",
"[",
":",
",",
"i",
"]",
",",
"np",
".",
"exp",
"(",
"model",
".",
"predict",
"(",
"X",
")",
")",
"[",
":",
",",
"i",
"]",
",",
"marker",
"=",
"\"*\"",
",",
"linestyle",
"=",
"\"\"",
",",
"alpha",
"=",
"0.4",
")",
"if",
"axes_labels",
":",
"ax",
".",
"set_ylabel",
"(",
"\"Predicted {}\"",
".",
"format",
"(",
"axes_labels",
"[",
"'{}'",
".",
"format",
"(",
"i",
")",
"]",
")",
")",
"ax",
".",
"set_xlabel",
"(",
"\"Actual {}\"",
".",
"format",
"(",
"axes_labels",
"[",
"'{}'",
".",
"format",
"(",
"i",
")",
"]",
")",
")",
"else",
":",
"ax",
".",
"set_ylabel",
"(",
"\"Predicted {}\"",
".",
"format",
"(",
"devmodel",
".",
"Data",
".",
"columns",
"[",
"-",
"(",
"6",
"-",
"i",
")",
"]",
".",
"split",
"(",
"\"<\"",
")",
"[",
"0",
"]",
")",
",",
"wrap",
"=",
"True",
",",
"fontsize",
"=",
"5",
")",
"ax",
".",
"set_xlabel",
"(",
"\"Actual {}\"",
".",
"format",
"(",
"devmodel",
".",
"Data",
".",
"columns",
"[",
"-",
"(",
"6",
"-",
"i",
")",
"]",
".",
"split",
"(",
"\"<\"",
")",
"[",
"0",
"]",
")",
",",
"wrap",
"=",
"True",
",",
"fontsize",
"=",
"5",
")",
"plt",
".",
"xlim",
"(",
"minval",
",",
"maxval",
")",
"plt",
".",
"ylim",
"(",
"minval",
",",
"maxval",
")",
"ax",
".",
"grid",
"(",
")",
"plt",
".",
"tight_layout",
"(",
")",
"return",
"plt"
]
| A standard method of creating parity plots between predicted and
experimental values for trained models.
Parameters
----------
X: array
experimental input data
Y: array
experimental output data
model: model object
either sklearn or keras ML model
devmodel: dev_model object
salty dev_model
axes_labels: dict
optional. Default behavior is to use the labels in the dev_model
object.
Returns
------------------
plt: matplotlib object
parity plot of predicted vs experimental values | [
"A",
"standard",
"method",
"of",
"creating",
"parity",
"plots",
"between",
"predicted",
"and",
"experimental",
"values",
"for",
"trained",
"models",
"."
]
| ef17a97aea3e4f81fcd0359ce85b3438c0e6499b | https://github.com/wesleybeckner/salty/blob/ef17a97aea3e4f81fcd0359ce85b3438c0e6499b/salty/visualization.py#L6-L60 | train |
potash/drain | drain/data.py | expand_dates | def expand_dates(df, columns=[]):
"""
generate year, month, day features from specified date features
"""
columns = df.columns.intersection(columns)
df2 = df.reindex(columns=set(df.columns).difference(columns))
for column in columns:
df2[column + '_year'] = df[column].apply(lambda x: x.year)
df2[column + '_month'] = df[column].apply(lambda x: x.month)
df2[column + '_day'] = df[column].apply(lambda x: x.day)
return df2 | python | def expand_dates(df, columns=[]):
"""
generate year, month, day features from specified date features
"""
columns = df.columns.intersection(columns)
df2 = df.reindex(columns=set(df.columns).difference(columns))
for column in columns:
df2[column + '_year'] = df[column].apply(lambda x: x.year)
df2[column + '_month'] = df[column].apply(lambda x: x.month)
df2[column + '_day'] = df[column].apply(lambda x: x.day)
return df2 | [
"def",
"expand_dates",
"(",
"df",
",",
"columns",
"=",
"[",
"]",
")",
":",
"columns",
"=",
"df",
".",
"columns",
".",
"intersection",
"(",
"columns",
")",
"df2",
"=",
"df",
".",
"reindex",
"(",
"columns",
"=",
"set",
"(",
"df",
".",
"columns",
")",
".",
"difference",
"(",
"columns",
")",
")",
"for",
"column",
"in",
"columns",
":",
"df2",
"[",
"column",
"+",
"'_year'",
"]",
"=",
"df",
"[",
"column",
"]",
".",
"apply",
"(",
"lambda",
"x",
":",
"x",
".",
"year",
")",
"df2",
"[",
"column",
"+",
"'_month'",
"]",
"=",
"df",
"[",
"column",
"]",
".",
"apply",
"(",
"lambda",
"x",
":",
"x",
".",
"month",
")",
"df2",
"[",
"column",
"+",
"'_day'",
"]",
"=",
"df",
"[",
"column",
"]",
".",
"apply",
"(",
"lambda",
"x",
":",
"x",
".",
"day",
")",
"return",
"df2"
]
| generate year, month, day features from specified date features | [
"generate",
"year",
"month",
"day",
"features",
"from",
"specified",
"date",
"features"
]
| ddd62081cb9317beb5d21f86c8b4bb196ca3d222 | https://github.com/potash/drain/blob/ddd62081cb9317beb5d21f86c8b4bb196ca3d222/drain/data.py#L203-L213 | train |
potash/drain | drain/data.py | binarize | def binarize(df, category_classes, all_classes=True, drop=True,
astype=None, inplace=True, min_freq=None):
"""
Binarize specified categoricals. Works inplace!
Args:
- df: the DataFrame whose columns to binarize
- category_classes: either a dict of (column : [class1, class2, ...]) pairs
or a collection of column names, in which case classes are
given using df[column].unique()
- all_classes: when False, the last class is skipped
- drop: when True, the original categorical columns are dropped
- astype: a type for the resulting binaries, e.g. np.float32.
When None, use the defualt (bool).
- inplace: whether to modify the DataFrame inplace
Returns:
the DataFrame with binarized columns
"""
if type(category_classes) is not dict:
columns = set(category_classes)
category_classes = {column: df[column].unique() for column in columns}
else:
columns = category_classes.keys()
df_new = df if inplace else df.drop(columns, axis=1)
for category in columns:
classes = category_classes[category]
for i in range(len(classes)-1 if not all_classes else len(classes)):
c = df[category] == classes[i]
if not min_freq or c.sum() >= min_freq:
if astype is not None:
c = c.astype(astype)
df_new['%s_%s' % (category, str(classes[i]).replace(' ', '_'))] = c
if drop and inplace:
df_new.drop(columns, axis=1, inplace=True)
return df_new | python | def binarize(df, category_classes, all_classes=True, drop=True,
astype=None, inplace=True, min_freq=None):
"""
Binarize specified categoricals. Works inplace!
Args:
- df: the DataFrame whose columns to binarize
- category_classes: either a dict of (column : [class1, class2, ...]) pairs
or a collection of column names, in which case classes are
given using df[column].unique()
- all_classes: when False, the last class is skipped
- drop: when True, the original categorical columns are dropped
- astype: a type for the resulting binaries, e.g. np.float32.
When None, use the defualt (bool).
- inplace: whether to modify the DataFrame inplace
Returns:
the DataFrame with binarized columns
"""
if type(category_classes) is not dict:
columns = set(category_classes)
category_classes = {column: df[column].unique() for column in columns}
else:
columns = category_classes.keys()
df_new = df if inplace else df.drop(columns, axis=1)
for category in columns:
classes = category_classes[category]
for i in range(len(classes)-1 if not all_classes else len(classes)):
c = df[category] == classes[i]
if not min_freq or c.sum() >= min_freq:
if astype is not None:
c = c.astype(astype)
df_new['%s_%s' % (category, str(classes[i]).replace(' ', '_'))] = c
if drop and inplace:
df_new.drop(columns, axis=1, inplace=True)
return df_new | [
"def",
"binarize",
"(",
"df",
",",
"category_classes",
",",
"all_classes",
"=",
"True",
",",
"drop",
"=",
"True",
",",
"astype",
"=",
"None",
",",
"inplace",
"=",
"True",
",",
"min_freq",
"=",
"None",
")",
":",
"if",
"type",
"(",
"category_classes",
")",
"is",
"not",
"dict",
":",
"columns",
"=",
"set",
"(",
"category_classes",
")",
"category_classes",
"=",
"{",
"column",
":",
"df",
"[",
"column",
"]",
".",
"unique",
"(",
")",
"for",
"column",
"in",
"columns",
"}",
"else",
":",
"columns",
"=",
"category_classes",
".",
"keys",
"(",
")",
"df_new",
"=",
"df",
"if",
"inplace",
"else",
"df",
".",
"drop",
"(",
"columns",
",",
"axis",
"=",
"1",
")",
"for",
"category",
"in",
"columns",
":",
"classes",
"=",
"category_classes",
"[",
"category",
"]",
"for",
"i",
"in",
"range",
"(",
"len",
"(",
"classes",
")",
"-",
"1",
"if",
"not",
"all_classes",
"else",
"len",
"(",
"classes",
")",
")",
":",
"c",
"=",
"df",
"[",
"category",
"]",
"==",
"classes",
"[",
"i",
"]",
"if",
"not",
"min_freq",
"or",
"c",
".",
"sum",
"(",
")",
">=",
"min_freq",
":",
"if",
"astype",
"is",
"not",
"None",
":",
"c",
"=",
"c",
".",
"astype",
"(",
"astype",
")",
"df_new",
"[",
"'%s_%s'",
"%",
"(",
"category",
",",
"str",
"(",
"classes",
"[",
"i",
"]",
")",
".",
"replace",
"(",
"' '",
",",
"'_'",
")",
")",
"]",
"=",
"c",
"if",
"drop",
"and",
"inplace",
":",
"df_new",
".",
"drop",
"(",
"columns",
",",
"axis",
"=",
"1",
",",
"inplace",
"=",
"True",
")",
"return",
"df_new"
]
| Binarize specified categoricals. Works inplace!
Args:
- df: the DataFrame whose columns to binarize
- category_classes: either a dict of (column : [class1, class2, ...]) pairs
or a collection of column names, in which case classes are
given using df[column].unique()
- all_classes: when False, the last class is skipped
- drop: when True, the original categorical columns are dropped
- astype: a type for the resulting binaries, e.g. np.float32.
When None, use the defualt (bool).
- inplace: whether to modify the DataFrame inplace
Returns:
the DataFrame with binarized columns | [
"Binarize",
"specified",
"categoricals",
".",
"Works",
"inplace!"
]
| ddd62081cb9317beb5d21f86c8b4bb196ca3d222 | https://github.com/potash/drain/blob/ddd62081cb9317beb5d21f86c8b4bb196ca3d222/drain/data.py#L216-L255 | train |
potash/drain | drain/data.py | select_regexes | def select_regexes(strings, regexes):
"""
select subset of strings matching a regex
treats strings as a set
"""
strings = set(strings)
select = set()
if isinstance(strings, collections.Iterable):
for r in regexes:
s = set(filter(re.compile('^' + r + '$').search, strings))
strings -= s
select |= s
return select
else:
raise ValueError("exclude should be iterable") | python | def select_regexes(strings, regexes):
"""
select subset of strings matching a regex
treats strings as a set
"""
strings = set(strings)
select = set()
if isinstance(strings, collections.Iterable):
for r in regexes:
s = set(filter(re.compile('^' + r + '$').search, strings))
strings -= s
select |= s
return select
else:
raise ValueError("exclude should be iterable") | [
"def",
"select_regexes",
"(",
"strings",
",",
"regexes",
")",
":",
"strings",
"=",
"set",
"(",
"strings",
")",
"select",
"=",
"set",
"(",
")",
"if",
"isinstance",
"(",
"strings",
",",
"collections",
".",
"Iterable",
")",
":",
"for",
"r",
"in",
"regexes",
":",
"s",
"=",
"set",
"(",
"filter",
"(",
"re",
".",
"compile",
"(",
"'^'",
"+",
"r",
"+",
"'$'",
")",
".",
"search",
",",
"strings",
")",
")",
"strings",
"-=",
"s",
"select",
"|=",
"s",
"return",
"select",
"else",
":",
"raise",
"ValueError",
"(",
"\"exclude should be iterable\"",
")"
]
| select subset of strings matching a regex
treats strings as a set | [
"select",
"subset",
"of",
"strings",
"matching",
"a",
"regex",
"treats",
"strings",
"as",
"a",
"set"
]
| ddd62081cb9317beb5d21f86c8b4bb196ca3d222 | https://github.com/potash/drain/blob/ddd62081cb9317beb5d21f86c8b4bb196ca3d222/drain/data.py#L399-L413 | train |
potash/drain | drain/data.py | parse_delta | def parse_delta(s):
"""
parse a string to a delta
'all' is represented by None
"""
if s == 'all':
return None
else:
ls = delta_regex.findall(s)
if len(ls) == 1:
return relativedelta(**{delta_chars[ls[0][1]]: int(ls[0][0])})
else:
raise ValueError('Invalid delta string: %s' % s) | python | def parse_delta(s):
"""
parse a string to a delta
'all' is represented by None
"""
if s == 'all':
return None
else:
ls = delta_regex.findall(s)
if len(ls) == 1:
return relativedelta(**{delta_chars[ls[0][1]]: int(ls[0][0])})
else:
raise ValueError('Invalid delta string: %s' % s) | [
"def",
"parse_delta",
"(",
"s",
")",
":",
"if",
"s",
"==",
"'all'",
":",
"return",
"None",
"else",
":",
"ls",
"=",
"delta_regex",
".",
"findall",
"(",
"s",
")",
"if",
"len",
"(",
"ls",
")",
"==",
"1",
":",
"return",
"relativedelta",
"(",
"*",
"*",
"{",
"delta_chars",
"[",
"ls",
"[",
"0",
"]",
"[",
"1",
"]",
"]",
":",
"int",
"(",
"ls",
"[",
"0",
"]",
"[",
"0",
"]",
")",
"}",
")",
"else",
":",
"raise",
"ValueError",
"(",
"'Invalid delta string: %s'",
"%",
"s",
")"
]
| parse a string to a delta
'all' is represented by None | [
"parse",
"a",
"string",
"to",
"a",
"delta",
"all",
"is",
"represented",
"by",
"None"
]
| ddd62081cb9317beb5d21f86c8b4bb196ca3d222 | https://github.com/potash/drain/blob/ddd62081cb9317beb5d21f86c8b4bb196ca3d222/drain/data.py#L623-L635 | train |
potash/drain | drain/data.py | Column.apply | def apply(self, df):
"""Takes a pd.DataFrame and returns the newly defined column, i.e.
a pd.Series that has the same index as `df`.
"""
if hasattr(self.definition, '__call__'):
r = self.definition(df)
elif self.definition in df.columns:
r = df[self.definition]
elif not isinstance(self.definition, string_types):
r = pd.Series(self.definition, index=df.index)
else:
raise ValueError("Invalid column definition: %s" % str(self.definition))
return r.astype(self.astype) if self.astype else r | python | def apply(self, df):
"""Takes a pd.DataFrame and returns the newly defined column, i.e.
a pd.Series that has the same index as `df`.
"""
if hasattr(self.definition, '__call__'):
r = self.definition(df)
elif self.definition in df.columns:
r = df[self.definition]
elif not isinstance(self.definition, string_types):
r = pd.Series(self.definition, index=df.index)
else:
raise ValueError("Invalid column definition: %s" % str(self.definition))
return r.astype(self.astype) if self.astype else r | [
"def",
"apply",
"(",
"self",
",",
"df",
")",
":",
"if",
"hasattr",
"(",
"self",
".",
"definition",
",",
"'__call__'",
")",
":",
"r",
"=",
"self",
".",
"definition",
"(",
"df",
")",
"elif",
"self",
".",
"definition",
"in",
"df",
".",
"columns",
":",
"r",
"=",
"df",
"[",
"self",
".",
"definition",
"]",
"elif",
"not",
"isinstance",
"(",
"self",
".",
"definition",
",",
"string_types",
")",
":",
"r",
"=",
"pd",
".",
"Series",
"(",
"self",
".",
"definition",
",",
"index",
"=",
"df",
".",
"index",
")",
"else",
":",
"raise",
"ValueError",
"(",
"\"Invalid column definition: %s\"",
"%",
"str",
"(",
"self",
".",
"definition",
")",
")",
"return",
"r",
".",
"astype",
"(",
"self",
".",
"astype",
")",
"if",
"self",
".",
"astype",
"else",
"r"
]
| Takes a pd.DataFrame and returns the newly defined column, i.e.
a pd.Series that has the same index as `df`. | [
"Takes",
"a",
"pd",
".",
"DataFrame",
"and",
"returns",
"the",
"newly",
"defined",
"column",
"i",
".",
"e",
".",
"a",
"pd",
".",
"Series",
"that",
"has",
"the",
"same",
"index",
"as",
"df",
"."
]
| ddd62081cb9317beb5d21f86c8b4bb196ca3d222 | https://github.com/potash/drain/blob/ddd62081cb9317beb5d21f86c8b4bb196ca3d222/drain/data.py#L44-L56 | train |
untwisted/untwisted | untwisted/iputils.py | ip_to_long | def ip_to_long (ip):
"""
Convert ip address to a network byte order 32-bit integer.
"""
quad = ip.split('.')
if len(quad) == 1:
quad = quad + [0, 0, 0]
elif len(quad) < 4:
host = quad[-1:]
quad = quad[:-1] + [0,] * (4 - len(quad)) + host
lip = 0
for q in quad:
lip = (lip << 8) | int(q)
return lip | python | def ip_to_long (ip):
"""
Convert ip address to a network byte order 32-bit integer.
"""
quad = ip.split('.')
if len(quad) == 1:
quad = quad + [0, 0, 0]
elif len(quad) < 4:
host = quad[-1:]
quad = quad[:-1] + [0,] * (4 - len(quad)) + host
lip = 0
for q in quad:
lip = (lip << 8) | int(q)
return lip | [
"def",
"ip_to_long",
"(",
"ip",
")",
":",
"quad",
"=",
"ip",
".",
"split",
"(",
"'.'",
")",
"if",
"len",
"(",
"quad",
")",
"==",
"1",
":",
"quad",
"=",
"quad",
"+",
"[",
"0",
",",
"0",
",",
"0",
"]",
"elif",
"len",
"(",
"quad",
")",
"<",
"4",
":",
"host",
"=",
"quad",
"[",
"-",
"1",
":",
"]",
"quad",
"=",
"quad",
"[",
":",
"-",
"1",
"]",
"+",
"[",
"0",
",",
"]",
"*",
"(",
"4",
"-",
"len",
"(",
"quad",
")",
")",
"+",
"host",
"lip",
"=",
"0",
"for",
"q",
"in",
"quad",
":",
"lip",
"=",
"(",
"lip",
"<<",
"8",
")",
"|",
"int",
"(",
"q",
")",
"return",
"lip"
]
| Convert ip address to a network byte order 32-bit integer. | [
"Convert",
"ip",
"address",
"to",
"a",
"network",
"byte",
"order",
"32",
"-",
"bit",
"integer",
"."
]
| 8a8d9c8a8d0f3452d5de67cd760297bb5759f637 | https://github.com/untwisted/untwisted/blob/8a8d9c8a8d0f3452d5de67cd760297bb5759f637/untwisted/iputils.py#L1-L15 | train |
lsst-sqre/documenteer | documenteer/sphinxext/lsstdocushare.py | lsst_doc_shortlink_role | def lsst_doc_shortlink_role(name, rawtext, text, lineno, inliner,
options=None, content=None):
"""Link to LSST documents given their handle using LSST's ls.st link
shortener.
Example::
:ldm:`151`
"""
options = options or {}
content = content or []
node = nodes.reference(
text='{0}-{1}'.format(name.upper(), text),
refuri='https://ls.st/{0}-{1}'.format(name, text),
**options)
return [node], [] | python | def lsst_doc_shortlink_role(name, rawtext, text, lineno, inliner,
options=None, content=None):
"""Link to LSST documents given their handle using LSST's ls.st link
shortener.
Example::
:ldm:`151`
"""
options = options or {}
content = content or []
node = nodes.reference(
text='{0}-{1}'.format(name.upper(), text),
refuri='https://ls.st/{0}-{1}'.format(name, text),
**options)
return [node], [] | [
"def",
"lsst_doc_shortlink_role",
"(",
"name",
",",
"rawtext",
",",
"text",
",",
"lineno",
",",
"inliner",
",",
"options",
"=",
"None",
",",
"content",
"=",
"None",
")",
":",
"options",
"=",
"options",
"or",
"{",
"}",
"content",
"=",
"content",
"or",
"[",
"]",
"node",
"=",
"nodes",
".",
"reference",
"(",
"text",
"=",
"'{0}-{1}'",
".",
"format",
"(",
"name",
".",
"upper",
"(",
")",
",",
"text",
")",
",",
"refuri",
"=",
"'https://ls.st/{0}-{1}'",
".",
"format",
"(",
"name",
",",
"text",
")",
",",
"*",
"*",
"options",
")",
"return",
"[",
"node",
"]",
",",
"[",
"]"
]
| Link to LSST documents given their handle using LSST's ls.st link
shortener.
Example::
:ldm:`151` | [
"Link",
"to",
"LSST",
"documents",
"given",
"their",
"handle",
"using",
"LSST",
"s",
"ls",
".",
"st",
"link",
"shortener",
"."
]
| 75f02901a80042b28d074df1cc1dca32eb8e38c8 | https://github.com/lsst-sqre/documenteer/blob/75f02901a80042b28d074df1cc1dca32eb8e38c8/documenteer/sphinxext/lsstdocushare.py#L6-L21 | train |
lsst-sqre/documenteer | documenteer/sphinxext/lsstdocushare.py | lsst_doc_shortlink_titlecase_display_role | def lsst_doc_shortlink_titlecase_display_role(
name, rawtext, text, lineno, inliner, options=None, content=None):
"""Link to LSST documents given their handle using LSST's ls.st link
shortener with the document handle displayed in title case.
This role is useful for Document, Report, Minutes, and Collection
DocuShare handles.
Example::
:document:`1`
"""
options = options or {}
content = content or []
node = nodes.reference(
text='{0}-{1}'.format(name.title(), text),
refuri='https://ls.st/{0}-{1}'.format(name, text),
**options)
return [node], [] | python | def lsst_doc_shortlink_titlecase_display_role(
name, rawtext, text, lineno, inliner, options=None, content=None):
"""Link to LSST documents given their handle using LSST's ls.st link
shortener with the document handle displayed in title case.
This role is useful for Document, Report, Minutes, and Collection
DocuShare handles.
Example::
:document:`1`
"""
options = options or {}
content = content or []
node = nodes.reference(
text='{0}-{1}'.format(name.title(), text),
refuri='https://ls.st/{0}-{1}'.format(name, text),
**options)
return [node], [] | [
"def",
"lsst_doc_shortlink_titlecase_display_role",
"(",
"name",
",",
"rawtext",
",",
"text",
",",
"lineno",
",",
"inliner",
",",
"options",
"=",
"None",
",",
"content",
"=",
"None",
")",
":",
"options",
"=",
"options",
"or",
"{",
"}",
"content",
"=",
"content",
"or",
"[",
"]",
"node",
"=",
"nodes",
".",
"reference",
"(",
"text",
"=",
"'{0}-{1}'",
".",
"format",
"(",
"name",
".",
"title",
"(",
")",
",",
"text",
")",
",",
"refuri",
"=",
"'https://ls.st/{0}-{1}'",
".",
"format",
"(",
"name",
",",
"text",
")",
",",
"*",
"*",
"options",
")",
"return",
"[",
"node",
"]",
",",
"[",
"]"
]
| Link to LSST documents given their handle using LSST's ls.st link
shortener with the document handle displayed in title case.
This role is useful for Document, Report, Minutes, and Collection
DocuShare handles.
Example::
:document:`1` | [
"Link",
"to",
"LSST",
"documents",
"given",
"their",
"handle",
"using",
"LSST",
"s",
"ls",
".",
"st",
"link",
"shortener",
"with",
"the",
"document",
"handle",
"displayed",
"in",
"title",
"case",
"."
]
| 75f02901a80042b28d074df1cc1dca32eb8e38c8 | https://github.com/lsst-sqre/documenteer/blob/75f02901a80042b28d074df1cc1dca32eb8e38c8/documenteer/sphinxext/lsstdocushare.py#L24-L42 | train |
lsst-sqre/documenteer | documenteer/bin/refreshlsstbib.py | run | def run():
"""Command line entrypoint for the ``refresh-lsst-bib`` program.
"""
args = parse_args()
if args.verbose:
log_level = logging.DEBUG
else:
log_level = logging.INFO
logging.basicConfig(
level=log_level,
format='%(asctime)s %(levelname)s %(name)s: %(message)s')
if not args.verbose:
# Manage third-party loggers
req_logger = logging.getLogger('requests')
req_logger.setLevel(logging.WARNING)
logger = logging.getLogger(__name__)
logger.info('refresh-lsst-bib version {}'.format(__version__))
error_count = process_bib_files(args.dir)
sys.exit(error_count) | python | def run():
"""Command line entrypoint for the ``refresh-lsst-bib`` program.
"""
args = parse_args()
if args.verbose:
log_level = logging.DEBUG
else:
log_level = logging.INFO
logging.basicConfig(
level=log_level,
format='%(asctime)s %(levelname)s %(name)s: %(message)s')
if not args.verbose:
# Manage third-party loggers
req_logger = logging.getLogger('requests')
req_logger.setLevel(logging.WARNING)
logger = logging.getLogger(__name__)
logger.info('refresh-lsst-bib version {}'.format(__version__))
error_count = process_bib_files(args.dir)
sys.exit(error_count) | [
"def",
"run",
"(",
")",
":",
"args",
"=",
"parse_args",
"(",
")",
"if",
"args",
".",
"verbose",
":",
"log_level",
"=",
"logging",
".",
"DEBUG",
"else",
":",
"log_level",
"=",
"logging",
".",
"INFO",
"logging",
".",
"basicConfig",
"(",
"level",
"=",
"log_level",
",",
"format",
"=",
"'%(asctime)s %(levelname)s %(name)s: %(message)s'",
")",
"if",
"not",
"args",
".",
"verbose",
":",
"# Manage third-party loggers",
"req_logger",
"=",
"logging",
".",
"getLogger",
"(",
"'requests'",
")",
"req_logger",
".",
"setLevel",
"(",
"logging",
".",
"WARNING",
")",
"logger",
"=",
"logging",
".",
"getLogger",
"(",
"__name__",
")",
"logger",
".",
"info",
"(",
"'refresh-lsst-bib version {}'",
".",
"format",
"(",
"__version__",
")",
")",
"error_count",
"=",
"process_bib_files",
"(",
"args",
".",
"dir",
")",
"sys",
".",
"exit",
"(",
"error_count",
")"
]
| Command line entrypoint for the ``refresh-lsst-bib`` program. | [
"Command",
"line",
"entrypoint",
"for",
"the",
"refresh",
"-",
"lsst",
"-",
"bib",
"program",
"."
]
| 75f02901a80042b28d074df1cc1dca32eb8e38c8 | https://github.com/lsst-sqre/documenteer/blob/75f02901a80042b28d074df1cc1dca32eb8e38c8/documenteer/bin/refreshlsstbib.py#L20-L43 | train |
lsst-sqre/documenteer | documenteer/stackdocs/build.py | run_build_cli | def run_build_cli():
"""Command line entrypoint for the ``build-stack-docs`` program.
"""
args = parse_args()
if args.verbose:
log_level = logging.DEBUG
else:
log_level = logging.INFO
logging.basicConfig(
level=log_level,
format='%(asctime)s %(levelname)s %(name)s: %(message)s')
logger = logging.getLogger(__name__)
logger.info('build-stack-docs version {0}'.format(__version__))
return_code = build_stack_docs(args.root_project_dir)
if return_code == 0:
logger.info('build-stack-docs succeeded')
sys.exit(0)
else:
logger.error('Sphinx errored: code {0:d}'.format(return_code))
sys.exit(1) | python | def run_build_cli():
"""Command line entrypoint for the ``build-stack-docs`` program.
"""
args = parse_args()
if args.verbose:
log_level = logging.DEBUG
else:
log_level = logging.INFO
logging.basicConfig(
level=log_level,
format='%(asctime)s %(levelname)s %(name)s: %(message)s')
logger = logging.getLogger(__name__)
logger.info('build-stack-docs version {0}'.format(__version__))
return_code = build_stack_docs(args.root_project_dir)
if return_code == 0:
logger.info('build-stack-docs succeeded')
sys.exit(0)
else:
logger.error('Sphinx errored: code {0:d}'.format(return_code))
sys.exit(1) | [
"def",
"run_build_cli",
"(",
")",
":",
"args",
"=",
"parse_args",
"(",
")",
"if",
"args",
".",
"verbose",
":",
"log_level",
"=",
"logging",
".",
"DEBUG",
"else",
":",
"log_level",
"=",
"logging",
".",
"INFO",
"logging",
".",
"basicConfig",
"(",
"level",
"=",
"log_level",
",",
"format",
"=",
"'%(asctime)s %(levelname)s %(name)s: %(message)s'",
")",
"logger",
"=",
"logging",
".",
"getLogger",
"(",
"__name__",
")",
"logger",
".",
"info",
"(",
"'build-stack-docs version {0}'",
".",
"format",
"(",
"__version__",
")",
")",
"return_code",
"=",
"build_stack_docs",
"(",
"args",
".",
"root_project_dir",
")",
"if",
"return_code",
"==",
"0",
":",
"logger",
".",
"info",
"(",
"'build-stack-docs succeeded'",
")",
"sys",
".",
"exit",
"(",
"0",
")",
"else",
":",
"logger",
".",
"error",
"(",
"'Sphinx errored: code {0:d}'",
".",
"format",
"(",
"return_code",
")",
")",
"sys",
".",
"exit",
"(",
"1",
")"
]
| Command line entrypoint for the ``build-stack-docs`` program. | [
"Command",
"line",
"entrypoint",
"for",
"the",
"build",
"-",
"stack",
"-",
"docs",
"program",
"."
]
| 75f02901a80042b28d074df1cc1dca32eb8e38c8 | https://github.com/lsst-sqre/documenteer/blob/75f02901a80042b28d074df1cc1dca32eb8e38c8/documenteer/stackdocs/build.py#L25-L48 | train |
lsst-sqre/documenteer | documenteer/stackdocs/build.py | parse_args | def parse_args():
"""Create an argument parser for the ``build-stack-docs`` program.
Returns
-------
args : `argparse.Namespace`
Parsed argument object.
"""
parser = argparse.ArgumentParser(
description="Build a Sphinx documentation site for an EUPS stack, "
"such as pipelines.lsst.io.",
epilog="Version {0}".format(__version__)
)
parser.add_argument(
'-d', '--dir',
dest='root_project_dir',
help="Root Sphinx project directory")
parser.add_argument(
'-v', '--verbose',
dest='verbose',
action='store_true', default=False,
help='Enable Verbose output (debug level logging)'
)
return parser.parse_args() | python | def parse_args():
"""Create an argument parser for the ``build-stack-docs`` program.
Returns
-------
args : `argparse.Namespace`
Parsed argument object.
"""
parser = argparse.ArgumentParser(
description="Build a Sphinx documentation site for an EUPS stack, "
"such as pipelines.lsst.io.",
epilog="Version {0}".format(__version__)
)
parser.add_argument(
'-d', '--dir',
dest='root_project_dir',
help="Root Sphinx project directory")
parser.add_argument(
'-v', '--verbose',
dest='verbose',
action='store_true', default=False,
help='Enable Verbose output (debug level logging)'
)
return parser.parse_args() | [
"def",
"parse_args",
"(",
")",
":",
"parser",
"=",
"argparse",
".",
"ArgumentParser",
"(",
"description",
"=",
"\"Build a Sphinx documentation site for an EUPS stack, \"",
"\"such as pipelines.lsst.io.\"",
",",
"epilog",
"=",
"\"Version {0}\"",
".",
"format",
"(",
"__version__",
")",
")",
"parser",
".",
"add_argument",
"(",
"'-d'",
",",
"'--dir'",
",",
"dest",
"=",
"'root_project_dir'",
",",
"help",
"=",
"\"Root Sphinx project directory\"",
")",
"parser",
".",
"add_argument",
"(",
"'-v'",
",",
"'--verbose'",
",",
"dest",
"=",
"'verbose'",
",",
"action",
"=",
"'store_true'",
",",
"default",
"=",
"False",
",",
"help",
"=",
"'Enable Verbose output (debug level logging)'",
")",
"return",
"parser",
".",
"parse_args",
"(",
")"
]
| Create an argument parser for the ``build-stack-docs`` program.
Returns
-------
args : `argparse.Namespace`
Parsed argument object. | [
"Create",
"an",
"argument",
"parser",
"for",
"the",
"build",
"-",
"stack",
"-",
"docs",
"program",
"."
]
| 75f02901a80042b28d074df1cc1dca32eb8e38c8 | https://github.com/lsst-sqre/documenteer/blob/75f02901a80042b28d074df1cc1dca32eb8e38c8/documenteer/stackdocs/build.py#L133-L156 | train |
lsst-sqre/documenteer | documenteer/stackdocs/build.py | discover_setup_packages | def discover_setup_packages():
"""Summarize packages currently set up by EUPS, listing their
set up directories and EUPS version names.
Returns
-------
packages : `dict`
Dictionary with keys that are EUPS package names. Values are
dictionaries with fields:
- ``'dir'``: absolute directory path of the set up package.
- ``'version'``: EUPS version string for package.
Notes
-----
This function imports the ``eups`` Python package, which is assumed to
be available in the build environmen. This function is designed to
encapsulate all direct EUPS interactions need by the stack documentation
build process.
"""
logger = logging.getLogger(__name__)
# Not a PyPI dependency; assumed to be available in the build environment.
import eups
eups_client = eups.Eups()
products = eups_client.getSetupProducts()
packages = {}
for package in products:
name = package.name
info = {
'dir': package.dir,
'version': package.version
}
packages[name] = info
logger.debug('Found setup package: {name} {version} {dir}'.format(
name=name, **info))
return packages | python | def discover_setup_packages():
"""Summarize packages currently set up by EUPS, listing their
set up directories and EUPS version names.
Returns
-------
packages : `dict`
Dictionary with keys that are EUPS package names. Values are
dictionaries with fields:
- ``'dir'``: absolute directory path of the set up package.
- ``'version'``: EUPS version string for package.
Notes
-----
This function imports the ``eups`` Python package, which is assumed to
be available in the build environmen. This function is designed to
encapsulate all direct EUPS interactions need by the stack documentation
build process.
"""
logger = logging.getLogger(__name__)
# Not a PyPI dependency; assumed to be available in the build environment.
import eups
eups_client = eups.Eups()
products = eups_client.getSetupProducts()
packages = {}
for package in products:
name = package.name
info = {
'dir': package.dir,
'version': package.version
}
packages[name] = info
logger.debug('Found setup package: {name} {version} {dir}'.format(
name=name, **info))
return packages | [
"def",
"discover_setup_packages",
"(",
")",
":",
"logger",
"=",
"logging",
".",
"getLogger",
"(",
"__name__",
")",
"# Not a PyPI dependency; assumed to be available in the build environment.",
"import",
"eups",
"eups_client",
"=",
"eups",
".",
"Eups",
"(",
")",
"products",
"=",
"eups_client",
".",
"getSetupProducts",
"(",
")",
"packages",
"=",
"{",
"}",
"for",
"package",
"in",
"products",
":",
"name",
"=",
"package",
".",
"name",
"info",
"=",
"{",
"'dir'",
":",
"package",
".",
"dir",
",",
"'version'",
":",
"package",
".",
"version",
"}",
"packages",
"[",
"name",
"]",
"=",
"info",
"logger",
".",
"debug",
"(",
"'Found setup package: {name} {version} {dir}'",
".",
"format",
"(",
"name",
"=",
"name",
",",
"*",
"*",
"info",
")",
")",
"return",
"packages"
]
| Summarize packages currently set up by EUPS, listing their
set up directories and EUPS version names.
Returns
-------
packages : `dict`
Dictionary with keys that are EUPS package names. Values are
dictionaries with fields:
- ``'dir'``: absolute directory path of the set up package.
- ``'version'``: EUPS version string for package.
Notes
-----
This function imports the ``eups`` Python package, which is assumed to
be available in the build environmen. This function is designed to
encapsulate all direct EUPS interactions need by the stack documentation
build process. | [
"Summarize",
"packages",
"currently",
"set",
"up",
"by",
"EUPS",
"listing",
"their",
"set",
"up",
"directories",
"and",
"EUPS",
"version",
"names",
"."
]
| 75f02901a80042b28d074df1cc1dca32eb8e38c8 | https://github.com/lsst-sqre/documenteer/blob/75f02901a80042b28d074df1cc1dca32eb8e38c8/documenteer/stackdocs/build.py#L159-L198 | train |
lsst-sqre/documenteer | documenteer/stackdocs/build.py | find_table_file | def find_table_file(root_project_dir):
"""Find the EUPS table file for a project.
Parameters
----------
root_project_dir : `str`
Path to the root directory of the main documentation project. This
is the directory containing the ``conf.py`` file and a ``ups``
directory.
Returns
-------
table_path : `str`
Path to the EUPS table file.
"""
ups_dir_path = os.path.join(root_project_dir, 'ups')
table_path = None
for name in os.listdir(ups_dir_path):
if name.endswith('.table'):
table_path = os.path.join(ups_dir_path, name)
break
if not os.path.exists(table_path):
raise RuntimeError(
'Could not find the EUPS table file at {}'.format(table_path))
return table_path | python | def find_table_file(root_project_dir):
"""Find the EUPS table file for a project.
Parameters
----------
root_project_dir : `str`
Path to the root directory of the main documentation project. This
is the directory containing the ``conf.py`` file and a ``ups``
directory.
Returns
-------
table_path : `str`
Path to the EUPS table file.
"""
ups_dir_path = os.path.join(root_project_dir, 'ups')
table_path = None
for name in os.listdir(ups_dir_path):
if name.endswith('.table'):
table_path = os.path.join(ups_dir_path, name)
break
if not os.path.exists(table_path):
raise RuntimeError(
'Could not find the EUPS table file at {}'.format(table_path))
return table_path | [
"def",
"find_table_file",
"(",
"root_project_dir",
")",
":",
"ups_dir_path",
"=",
"os",
".",
"path",
".",
"join",
"(",
"root_project_dir",
",",
"'ups'",
")",
"table_path",
"=",
"None",
"for",
"name",
"in",
"os",
".",
"listdir",
"(",
"ups_dir_path",
")",
":",
"if",
"name",
".",
"endswith",
"(",
"'.table'",
")",
":",
"table_path",
"=",
"os",
".",
"path",
".",
"join",
"(",
"ups_dir_path",
",",
"name",
")",
"break",
"if",
"not",
"os",
".",
"path",
".",
"exists",
"(",
"table_path",
")",
":",
"raise",
"RuntimeError",
"(",
"'Could not find the EUPS table file at {}'",
".",
"format",
"(",
"table_path",
")",
")",
"return",
"table_path"
]
| Find the EUPS table file for a project.
Parameters
----------
root_project_dir : `str`
Path to the root directory of the main documentation project. This
is the directory containing the ``conf.py`` file and a ``ups``
directory.
Returns
-------
table_path : `str`
Path to the EUPS table file. | [
"Find",
"the",
"EUPS",
"table",
"file",
"for",
"a",
"project",
"."
]
| 75f02901a80042b28d074df1cc1dca32eb8e38c8 | https://github.com/lsst-sqre/documenteer/blob/75f02901a80042b28d074df1cc1dca32eb8e38c8/documenteer/stackdocs/build.py#L201-L225 | train |
lsst-sqre/documenteer | documenteer/stackdocs/build.py | list_packages_in_eups_table | def list_packages_in_eups_table(table_text):
"""List the names of packages that are required by an EUPS table file.
Parameters
----------
table_text : `str`
The text content of an EUPS table file.
Returns
-------
names : `list` [`str`]
List of package names that are required byy the EUPS table file.
"""
logger = logging.getLogger(__name__)
# This pattern matches required product names in EUPS table files.
pattern = re.compile(r'setupRequired\((?P<name>\w+)\)')
listed_packages = [m.group('name') for m in pattern.finditer(table_text)]
logger.debug('Packages listed in the table file: %r', listed_packages)
return listed_packages | python | def list_packages_in_eups_table(table_text):
"""List the names of packages that are required by an EUPS table file.
Parameters
----------
table_text : `str`
The text content of an EUPS table file.
Returns
-------
names : `list` [`str`]
List of package names that are required byy the EUPS table file.
"""
logger = logging.getLogger(__name__)
# This pattern matches required product names in EUPS table files.
pattern = re.compile(r'setupRequired\((?P<name>\w+)\)')
listed_packages = [m.group('name') for m in pattern.finditer(table_text)]
logger.debug('Packages listed in the table file: %r', listed_packages)
return listed_packages | [
"def",
"list_packages_in_eups_table",
"(",
"table_text",
")",
":",
"logger",
"=",
"logging",
".",
"getLogger",
"(",
"__name__",
")",
"# This pattern matches required product names in EUPS table files.",
"pattern",
"=",
"re",
".",
"compile",
"(",
"r'setupRequired\\((?P<name>\\w+)\\)'",
")",
"listed_packages",
"=",
"[",
"m",
".",
"group",
"(",
"'name'",
")",
"for",
"m",
"in",
"pattern",
".",
"finditer",
"(",
"table_text",
")",
"]",
"logger",
".",
"debug",
"(",
"'Packages listed in the table file: %r'",
",",
"listed_packages",
")",
"return",
"listed_packages"
]
| List the names of packages that are required by an EUPS table file.
Parameters
----------
table_text : `str`
The text content of an EUPS table file.
Returns
-------
names : `list` [`str`]
List of package names that are required byy the EUPS table file. | [
"List",
"the",
"names",
"of",
"packages",
"that",
"are",
"required",
"by",
"an",
"EUPS",
"table",
"file",
"."
]
| 75f02901a80042b28d074df1cc1dca32eb8e38c8 | https://github.com/lsst-sqre/documenteer/blob/75f02901a80042b28d074df1cc1dca32eb8e38c8/documenteer/stackdocs/build.py#L228-L246 | train |
lsst-sqre/documenteer | documenteer/stackdocs/build.py | find_package_docs | def find_package_docs(package_dir, skippedNames=None):
"""Find documentation directories in a package using ``manifest.yaml``.
Parameters
----------
package_dir : `str`
Directory of an EUPS package.
skippedNames : `list` of `str`, optional
List of package or module names to skip when creating links.
Returns
-------
doc_dirs : namedtuple
Attributes of the namedtuple are:
- ``package_dirs`` (`dict`). Keys are package names (for example,
``'afw'``). Values are absolute directory paths to the package's
documentation directory inside the package's ``doc`` directory. If
there is no package-level documentation the dictionary will be empty.
- ``modules_dirs`` (`dict`). Keys are module names (for example,
``'lsst.afw.table'``). Values are absolute directory paths to the
module's directory inside the package's ``doc`` directory. If a
package has no modules the returned dictionary will be empty.
- ``static_doc_dirs`` (`dict`). Keys are directory names relative to
the ``_static`` directory. Values are absolute directory paths to
the static documentation directory in the package. If there
isn't a declared ``_static`` directory, this dictionary is empty.
Raises
------
NoPackageDocs
Raised when the ``manifest.yaml`` file cannot be found in a package.
Notes
-----
Stack packages have documentation in subdirectories of their `doc`
directory. The ``manifest.yaml`` file declares what these directories are
so that they can be symlinked into the root project.
There are three types of documentation directories:
1. Package doc directories contain documentation for the EUPS package
aspect. This is optional.
2. Module doc directories contain documentation for a Python package
aspect. These are optional.
3. Static doc directories are root directories inside the package's
``doc/_static/`` directory. These are optional.
These are declared in a package's ``doc/manifest.yaml`` file. For example:
.. code-block:: yaml
package: "afw"
modules:
- "lsst.afw.image"
- "lsst.afw.geom"
statics:
- "_static/afw"
This YAML declares *module* documentation directories:
- ``afw/doc/lsst.afw.image/``
- ``afw/doc/lsst.afw.geom/``
It also declares a *package* documentation directory:
- ``afw/doc/afw``
And a static documentaton directory:
- ``afw/doc/_static/afw``
"""
logger = logging.getLogger(__name__)
if skippedNames is None:
skippedNames = []
doc_dir = os.path.join(package_dir, 'doc')
modules_yaml_path = os.path.join(doc_dir, 'manifest.yaml')
if not os.path.exists(modules_yaml_path):
raise NoPackageDocs(
'Manifest YAML not found: {0}'.format(modules_yaml_path))
with open(modules_yaml_path) as f:
manifest_data = yaml.safe_load(f)
module_dirs = {}
package_dirs = {}
static_dirs = {}
if 'modules' in manifest_data:
for module_name in manifest_data['modules']:
if module_name in skippedNames:
logger.debug('Skipping module {0}'.format(module_name))
continue
module_dir = os.path.join(doc_dir, module_name)
# validate that the module's documentation directory does exist
if not os.path.isdir(module_dir):
message = 'module doc dir not found: {0}'.format(module_dir)
logger.warning(message)
continue
module_dirs[module_name] = module_dir
logger.debug('Found module doc dir {0}'.format(module_dir))
if 'package' in manifest_data:
package_name = manifest_data['package']
full_package_dir = os.path.join(doc_dir, package_name)
# validate the directory exists
if os.path.isdir(full_package_dir) \
and package_name not in skippedNames:
package_dirs[package_name] = full_package_dir
logger.debug('Found package doc dir {0}'.format(full_package_dir))
else:
logger.warning('package doc dir excluded or not found: {0}'.format(
full_package_dir))
if 'statics' in manifest_data:
for static_dirname in manifest_data['statics']:
full_static_dir = os.path.join(doc_dir, static_dirname)
# validate the directory exists
if not os.path.isdir(full_static_dir):
message = '_static doc dir not found: {0}'.format(
full_static_dir)
logger.warning(message)
continue
# Make a relative path to `_static` that's used as the
# link source in the root docproject's _static/ directory
relative_static_dir = os.path.relpath(
full_static_dir,
os.path.join(doc_dir, '_static'))
static_dirs[relative_static_dir] = full_static_dir
logger.debug('Found _static doc dir: {0}'.format(full_static_dir))
Dirs = namedtuple('Dirs', ['module_dirs', 'package_dirs', 'static_dirs'])
return Dirs(module_dirs=module_dirs,
package_dirs=package_dirs,
static_dirs=static_dirs) | python | def find_package_docs(package_dir, skippedNames=None):
"""Find documentation directories in a package using ``manifest.yaml``.
Parameters
----------
package_dir : `str`
Directory of an EUPS package.
skippedNames : `list` of `str`, optional
List of package or module names to skip when creating links.
Returns
-------
doc_dirs : namedtuple
Attributes of the namedtuple are:
- ``package_dirs`` (`dict`). Keys are package names (for example,
``'afw'``). Values are absolute directory paths to the package's
documentation directory inside the package's ``doc`` directory. If
there is no package-level documentation the dictionary will be empty.
- ``modules_dirs`` (`dict`). Keys are module names (for example,
``'lsst.afw.table'``). Values are absolute directory paths to the
module's directory inside the package's ``doc`` directory. If a
package has no modules the returned dictionary will be empty.
- ``static_doc_dirs`` (`dict`). Keys are directory names relative to
the ``_static`` directory. Values are absolute directory paths to
the static documentation directory in the package. If there
isn't a declared ``_static`` directory, this dictionary is empty.
Raises
------
NoPackageDocs
Raised when the ``manifest.yaml`` file cannot be found in a package.
Notes
-----
Stack packages have documentation in subdirectories of their `doc`
directory. The ``manifest.yaml`` file declares what these directories are
so that they can be symlinked into the root project.
There are three types of documentation directories:
1. Package doc directories contain documentation for the EUPS package
aspect. This is optional.
2. Module doc directories contain documentation for a Python package
aspect. These are optional.
3. Static doc directories are root directories inside the package's
``doc/_static/`` directory. These are optional.
These are declared in a package's ``doc/manifest.yaml`` file. For example:
.. code-block:: yaml
package: "afw"
modules:
- "lsst.afw.image"
- "lsst.afw.geom"
statics:
- "_static/afw"
This YAML declares *module* documentation directories:
- ``afw/doc/lsst.afw.image/``
- ``afw/doc/lsst.afw.geom/``
It also declares a *package* documentation directory:
- ``afw/doc/afw``
And a static documentaton directory:
- ``afw/doc/_static/afw``
"""
logger = logging.getLogger(__name__)
if skippedNames is None:
skippedNames = []
doc_dir = os.path.join(package_dir, 'doc')
modules_yaml_path = os.path.join(doc_dir, 'manifest.yaml')
if not os.path.exists(modules_yaml_path):
raise NoPackageDocs(
'Manifest YAML not found: {0}'.format(modules_yaml_path))
with open(modules_yaml_path) as f:
manifest_data = yaml.safe_load(f)
module_dirs = {}
package_dirs = {}
static_dirs = {}
if 'modules' in manifest_data:
for module_name in manifest_data['modules']:
if module_name in skippedNames:
logger.debug('Skipping module {0}'.format(module_name))
continue
module_dir = os.path.join(doc_dir, module_name)
# validate that the module's documentation directory does exist
if not os.path.isdir(module_dir):
message = 'module doc dir not found: {0}'.format(module_dir)
logger.warning(message)
continue
module_dirs[module_name] = module_dir
logger.debug('Found module doc dir {0}'.format(module_dir))
if 'package' in manifest_data:
package_name = manifest_data['package']
full_package_dir = os.path.join(doc_dir, package_name)
# validate the directory exists
if os.path.isdir(full_package_dir) \
and package_name not in skippedNames:
package_dirs[package_name] = full_package_dir
logger.debug('Found package doc dir {0}'.format(full_package_dir))
else:
logger.warning('package doc dir excluded or not found: {0}'.format(
full_package_dir))
if 'statics' in manifest_data:
for static_dirname in manifest_data['statics']:
full_static_dir = os.path.join(doc_dir, static_dirname)
# validate the directory exists
if not os.path.isdir(full_static_dir):
message = '_static doc dir not found: {0}'.format(
full_static_dir)
logger.warning(message)
continue
# Make a relative path to `_static` that's used as the
# link source in the root docproject's _static/ directory
relative_static_dir = os.path.relpath(
full_static_dir,
os.path.join(doc_dir, '_static'))
static_dirs[relative_static_dir] = full_static_dir
logger.debug('Found _static doc dir: {0}'.format(full_static_dir))
Dirs = namedtuple('Dirs', ['module_dirs', 'package_dirs', 'static_dirs'])
return Dirs(module_dirs=module_dirs,
package_dirs=package_dirs,
static_dirs=static_dirs) | [
"def",
"find_package_docs",
"(",
"package_dir",
",",
"skippedNames",
"=",
"None",
")",
":",
"logger",
"=",
"logging",
".",
"getLogger",
"(",
"__name__",
")",
"if",
"skippedNames",
"is",
"None",
":",
"skippedNames",
"=",
"[",
"]",
"doc_dir",
"=",
"os",
".",
"path",
".",
"join",
"(",
"package_dir",
",",
"'doc'",
")",
"modules_yaml_path",
"=",
"os",
".",
"path",
".",
"join",
"(",
"doc_dir",
",",
"'manifest.yaml'",
")",
"if",
"not",
"os",
".",
"path",
".",
"exists",
"(",
"modules_yaml_path",
")",
":",
"raise",
"NoPackageDocs",
"(",
"'Manifest YAML not found: {0}'",
".",
"format",
"(",
"modules_yaml_path",
")",
")",
"with",
"open",
"(",
"modules_yaml_path",
")",
"as",
"f",
":",
"manifest_data",
"=",
"yaml",
".",
"safe_load",
"(",
"f",
")",
"module_dirs",
"=",
"{",
"}",
"package_dirs",
"=",
"{",
"}",
"static_dirs",
"=",
"{",
"}",
"if",
"'modules'",
"in",
"manifest_data",
":",
"for",
"module_name",
"in",
"manifest_data",
"[",
"'modules'",
"]",
":",
"if",
"module_name",
"in",
"skippedNames",
":",
"logger",
".",
"debug",
"(",
"'Skipping module {0}'",
".",
"format",
"(",
"module_name",
")",
")",
"continue",
"module_dir",
"=",
"os",
".",
"path",
".",
"join",
"(",
"doc_dir",
",",
"module_name",
")",
"# validate that the module's documentation directory does exist",
"if",
"not",
"os",
".",
"path",
".",
"isdir",
"(",
"module_dir",
")",
":",
"message",
"=",
"'module doc dir not found: {0}'",
".",
"format",
"(",
"module_dir",
")",
"logger",
".",
"warning",
"(",
"message",
")",
"continue",
"module_dirs",
"[",
"module_name",
"]",
"=",
"module_dir",
"logger",
".",
"debug",
"(",
"'Found module doc dir {0}'",
".",
"format",
"(",
"module_dir",
")",
")",
"if",
"'package'",
"in",
"manifest_data",
":",
"package_name",
"=",
"manifest_data",
"[",
"'package'",
"]",
"full_package_dir",
"=",
"os",
".",
"path",
".",
"join",
"(",
"doc_dir",
",",
"package_name",
")",
"# validate the directory exists",
"if",
"os",
".",
"path",
".",
"isdir",
"(",
"full_package_dir",
")",
"and",
"package_name",
"not",
"in",
"skippedNames",
":",
"package_dirs",
"[",
"package_name",
"]",
"=",
"full_package_dir",
"logger",
".",
"debug",
"(",
"'Found package doc dir {0}'",
".",
"format",
"(",
"full_package_dir",
")",
")",
"else",
":",
"logger",
".",
"warning",
"(",
"'package doc dir excluded or not found: {0}'",
".",
"format",
"(",
"full_package_dir",
")",
")",
"if",
"'statics'",
"in",
"manifest_data",
":",
"for",
"static_dirname",
"in",
"manifest_data",
"[",
"'statics'",
"]",
":",
"full_static_dir",
"=",
"os",
".",
"path",
".",
"join",
"(",
"doc_dir",
",",
"static_dirname",
")",
"# validate the directory exists",
"if",
"not",
"os",
".",
"path",
".",
"isdir",
"(",
"full_static_dir",
")",
":",
"message",
"=",
"'_static doc dir not found: {0}'",
".",
"format",
"(",
"full_static_dir",
")",
"logger",
".",
"warning",
"(",
"message",
")",
"continue",
"# Make a relative path to `_static` that's used as the",
"# link source in the root docproject's _static/ directory",
"relative_static_dir",
"=",
"os",
".",
"path",
".",
"relpath",
"(",
"full_static_dir",
",",
"os",
".",
"path",
".",
"join",
"(",
"doc_dir",
",",
"'_static'",
")",
")",
"static_dirs",
"[",
"relative_static_dir",
"]",
"=",
"full_static_dir",
"logger",
".",
"debug",
"(",
"'Found _static doc dir: {0}'",
".",
"format",
"(",
"full_static_dir",
")",
")",
"Dirs",
"=",
"namedtuple",
"(",
"'Dirs'",
",",
"[",
"'module_dirs'",
",",
"'package_dirs'",
",",
"'static_dirs'",
"]",
")",
"return",
"Dirs",
"(",
"module_dirs",
"=",
"module_dirs",
",",
"package_dirs",
"=",
"package_dirs",
",",
"static_dirs",
"=",
"static_dirs",
")"
]
| Find documentation directories in a package using ``manifest.yaml``.
Parameters
----------
package_dir : `str`
Directory of an EUPS package.
skippedNames : `list` of `str`, optional
List of package or module names to skip when creating links.
Returns
-------
doc_dirs : namedtuple
Attributes of the namedtuple are:
- ``package_dirs`` (`dict`). Keys are package names (for example,
``'afw'``). Values are absolute directory paths to the package's
documentation directory inside the package's ``doc`` directory. If
there is no package-level documentation the dictionary will be empty.
- ``modules_dirs`` (`dict`). Keys are module names (for example,
``'lsst.afw.table'``). Values are absolute directory paths to the
module's directory inside the package's ``doc`` directory. If a
package has no modules the returned dictionary will be empty.
- ``static_doc_dirs`` (`dict`). Keys are directory names relative to
the ``_static`` directory. Values are absolute directory paths to
the static documentation directory in the package. If there
isn't a declared ``_static`` directory, this dictionary is empty.
Raises
------
NoPackageDocs
Raised when the ``manifest.yaml`` file cannot be found in a package.
Notes
-----
Stack packages have documentation in subdirectories of their `doc`
directory. The ``manifest.yaml`` file declares what these directories are
so that they can be symlinked into the root project.
There are three types of documentation directories:
1. Package doc directories contain documentation for the EUPS package
aspect. This is optional.
2. Module doc directories contain documentation for a Python package
aspect. These are optional.
3. Static doc directories are root directories inside the package's
``doc/_static/`` directory. These are optional.
These are declared in a package's ``doc/manifest.yaml`` file. For example:
.. code-block:: yaml
package: "afw"
modules:
- "lsst.afw.image"
- "lsst.afw.geom"
statics:
- "_static/afw"
This YAML declares *module* documentation directories:
- ``afw/doc/lsst.afw.image/``
- ``afw/doc/lsst.afw.geom/``
It also declares a *package* documentation directory:
- ``afw/doc/afw``
And a static documentaton directory:
- ``afw/doc/_static/afw`` | [
"Find",
"documentation",
"directories",
"in",
"a",
"package",
"using",
"manifest",
".",
"yaml",
"."
]
| 75f02901a80042b28d074df1cc1dca32eb8e38c8 | https://github.com/lsst-sqre/documenteer/blob/75f02901a80042b28d074df1cc1dca32eb8e38c8/documenteer/stackdocs/build.py#L249-L395 | train |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.