repo
stringlengths 7
55
| path
stringlengths 4
127
| func_name
stringlengths 1
88
| original_string
stringlengths 75
19.8k
| language
stringclasses 1
value | code
stringlengths 75
19.8k
| code_tokens
list | docstring
stringlengths 3
17.3k
| docstring_tokens
list | sha
stringlengths 40
40
| url
stringlengths 87
242
| partition
stringclasses 1
value |
---|---|---|---|---|---|---|---|---|---|---|---|
Kortemme-Lab/klab | klab/benchmarking/analysis/ddg_monomeric_stability_analysis.py | DBBenchmarkRun.get_experimental_ddg_values | def get_experimental_ddg_values(self, record, dataframe_record):
'''Adds the mean experimental value associated with each analysis set to the dataframe row.'''
new_idxs = []
for analysis_set in self.get_analysis_sets(record):
ddg_details = record['DDG'][analysis_set]
exp_ddg_fieldname = BenchmarkRun.get_analysis_set_fieldname('Experimental', analysis_set)
new_idxs.append(exp_ddg_fieldname)
dataframe_record[exp_ddg_fieldname] = None
if ddg_details:
dataframe_record[exp_ddg_fieldname] = ddg_details['MeanDDG']
# Update the CSV headers
try:
idx = self.csv_headers.index('Experimental')
self.csv_headers = self.csv_headers[:idx] + new_idxs + self.csv_headers[idx + 1:]
except ValueError, e: pass | python | def get_experimental_ddg_values(self, record, dataframe_record):
'''Adds the mean experimental value associated with each analysis set to the dataframe row.'''
new_idxs = []
for analysis_set in self.get_analysis_sets(record):
ddg_details = record['DDG'][analysis_set]
exp_ddg_fieldname = BenchmarkRun.get_analysis_set_fieldname('Experimental', analysis_set)
new_idxs.append(exp_ddg_fieldname)
dataframe_record[exp_ddg_fieldname] = None
if ddg_details:
dataframe_record[exp_ddg_fieldname] = ddg_details['MeanDDG']
# Update the CSV headers
try:
idx = self.csv_headers.index('Experimental')
self.csv_headers = self.csv_headers[:idx] + new_idxs + self.csv_headers[idx + 1:]
except ValueError, e: pass | [
"def",
"get_experimental_ddg_values",
"(",
"self",
",",
"record",
",",
"dataframe_record",
")",
":",
"new_idxs",
"=",
"[",
"]",
"for",
"analysis_set",
"in",
"self",
".",
"get_analysis_sets",
"(",
"record",
")",
":",
"ddg_details",
"=",
"record",
"[",
"'DDG'",
"]",
"[",
"analysis_set",
"]",
"exp_ddg_fieldname",
"=",
"BenchmarkRun",
".",
"get_analysis_set_fieldname",
"(",
"'Experimental'",
",",
"analysis_set",
")",
"new_idxs",
".",
"append",
"(",
"exp_ddg_fieldname",
")",
"dataframe_record",
"[",
"exp_ddg_fieldname",
"]",
"=",
"None",
"if",
"ddg_details",
":",
"dataframe_record",
"[",
"exp_ddg_fieldname",
"]",
"=",
"ddg_details",
"[",
"'MeanDDG'",
"]",
"# Update the CSV headers",
"try",
":",
"idx",
"=",
"self",
".",
"csv_headers",
".",
"index",
"(",
"'Experimental'",
")",
"self",
".",
"csv_headers",
"=",
"self",
".",
"csv_headers",
"[",
":",
"idx",
"]",
"+",
"new_idxs",
"+",
"self",
".",
"csv_headers",
"[",
"idx",
"+",
"1",
":",
"]",
"except",
"ValueError",
",",
"e",
":",
"pass"
]
| Adds the mean experimental value associated with each analysis set to the dataframe row. | [
"Adds",
"the",
"mean",
"experimental",
"value",
"associated",
"with",
"each",
"analysis",
"set",
"to",
"the",
"dataframe",
"row",
"."
]
| 6d410ad08f1bd9f7cbbb28d7d946e94fbaaa2b6b | https://github.com/Kortemme-Lab/klab/blob/6d410ad08f1bd9f7cbbb28d7d946e94fbaaa2b6b/klab/benchmarking/analysis/ddg_monomeric_stability_analysis.py#L2314-L2329 | train |
Kortemme-Lab/klab | klab/benchmarking/analysis/ddg_monomeric_stability_analysis.py | DBBenchmarkRun.compute_stability_classification | def compute_stability_classification(self, predicted_data, record, dataframe_record):
'''Calculate the stability classification for the analysis cases. Must be called after get_experimental_ddg_values.'''
new_idxs = []
stability_classication_x_cutoff, stability_classication_y_cutoff = self.stability_classication_x_cutoff, self.stability_classication_y_cutoff
for analysis_set in self.get_analysis_sets(record):
ddg_details = record['DDG'][analysis_set]
exp_ddg_fieldname = BenchmarkRun.get_analysis_set_fieldname('Experimental', analysis_set)
stability_classification_fieldname = BenchmarkRun.get_analysis_set_fieldname('StabilityClassification', analysis_set)
new_idxs.append(stability_classification_fieldname)
dataframe_record[stability_classification_fieldname] = None
if ddg_details:
stability_classification = None
if dataframe_record[exp_ddg_fieldname] != None:
stability_classification = fraction_correct([dataframe_record[exp_ddg_fieldname]], [predicted_data[self.ddg_analysis_type]], x_cutoff = stability_classication_x_cutoff, y_cutoff = stability_classication_y_cutoff)
stability_classification = int(stability_classification)
assert(stability_classification == 0 or stability_classification == 1)
dataframe_record[stability_classification_fieldname] = stability_classification
# Update the CSV headers
try:
idx = self.csv_headers.index('StabilityClassification')
self.csv_headers = self.csv_headers[:idx] + new_idxs + self.csv_headers[idx + 1:]
except ValueError, e: pass | python | def compute_stability_classification(self, predicted_data, record, dataframe_record):
'''Calculate the stability classification for the analysis cases. Must be called after get_experimental_ddg_values.'''
new_idxs = []
stability_classication_x_cutoff, stability_classication_y_cutoff = self.stability_classication_x_cutoff, self.stability_classication_y_cutoff
for analysis_set in self.get_analysis_sets(record):
ddg_details = record['DDG'][analysis_set]
exp_ddg_fieldname = BenchmarkRun.get_analysis_set_fieldname('Experimental', analysis_set)
stability_classification_fieldname = BenchmarkRun.get_analysis_set_fieldname('StabilityClassification', analysis_set)
new_idxs.append(stability_classification_fieldname)
dataframe_record[stability_classification_fieldname] = None
if ddg_details:
stability_classification = None
if dataframe_record[exp_ddg_fieldname] != None:
stability_classification = fraction_correct([dataframe_record[exp_ddg_fieldname]], [predicted_data[self.ddg_analysis_type]], x_cutoff = stability_classication_x_cutoff, y_cutoff = stability_classication_y_cutoff)
stability_classification = int(stability_classification)
assert(stability_classification == 0 or stability_classification == 1)
dataframe_record[stability_classification_fieldname] = stability_classification
# Update the CSV headers
try:
idx = self.csv_headers.index('StabilityClassification')
self.csv_headers = self.csv_headers[:idx] + new_idxs + self.csv_headers[idx + 1:]
except ValueError, e: pass | [
"def",
"compute_stability_classification",
"(",
"self",
",",
"predicted_data",
",",
"record",
",",
"dataframe_record",
")",
":",
"new_idxs",
"=",
"[",
"]",
"stability_classication_x_cutoff",
",",
"stability_classication_y_cutoff",
"=",
"self",
".",
"stability_classication_x_cutoff",
",",
"self",
".",
"stability_classication_y_cutoff",
"for",
"analysis_set",
"in",
"self",
".",
"get_analysis_sets",
"(",
"record",
")",
":",
"ddg_details",
"=",
"record",
"[",
"'DDG'",
"]",
"[",
"analysis_set",
"]",
"exp_ddg_fieldname",
"=",
"BenchmarkRun",
".",
"get_analysis_set_fieldname",
"(",
"'Experimental'",
",",
"analysis_set",
")",
"stability_classification_fieldname",
"=",
"BenchmarkRun",
".",
"get_analysis_set_fieldname",
"(",
"'StabilityClassification'",
",",
"analysis_set",
")",
"new_idxs",
".",
"append",
"(",
"stability_classification_fieldname",
")",
"dataframe_record",
"[",
"stability_classification_fieldname",
"]",
"=",
"None",
"if",
"ddg_details",
":",
"stability_classification",
"=",
"None",
"if",
"dataframe_record",
"[",
"exp_ddg_fieldname",
"]",
"!=",
"None",
":",
"stability_classification",
"=",
"fraction_correct",
"(",
"[",
"dataframe_record",
"[",
"exp_ddg_fieldname",
"]",
"]",
",",
"[",
"predicted_data",
"[",
"self",
".",
"ddg_analysis_type",
"]",
"]",
",",
"x_cutoff",
"=",
"stability_classication_x_cutoff",
",",
"y_cutoff",
"=",
"stability_classication_y_cutoff",
")",
"stability_classification",
"=",
"int",
"(",
"stability_classification",
")",
"assert",
"(",
"stability_classification",
"==",
"0",
"or",
"stability_classification",
"==",
"1",
")",
"dataframe_record",
"[",
"stability_classification_fieldname",
"]",
"=",
"stability_classification",
"# Update the CSV headers",
"try",
":",
"idx",
"=",
"self",
".",
"csv_headers",
".",
"index",
"(",
"'StabilityClassification'",
")",
"self",
".",
"csv_headers",
"=",
"self",
".",
"csv_headers",
"[",
":",
"idx",
"]",
"+",
"new_idxs",
"+",
"self",
".",
"csv_headers",
"[",
"idx",
"+",
"1",
":",
"]",
"except",
"ValueError",
",",
"e",
":",
"pass"
]
| Calculate the stability classification for the analysis cases. Must be called after get_experimental_ddg_values. | [
"Calculate",
"the",
"stability",
"classification",
"for",
"the",
"analysis",
"cases",
".",
"Must",
"be",
"called",
"after",
"get_experimental_ddg_values",
"."
]
| 6d410ad08f1bd9f7cbbb28d7d946e94fbaaa2b6b | https://github.com/Kortemme-Lab/klab/blob/6d410ad08f1bd9f7cbbb28d7d946e94fbaaa2b6b/klab/benchmarking/analysis/ddg_monomeric_stability_analysis.py#L2332-L2356 | train |
Kortemme-Lab/klab | klab/benchmarking/analysis/ddg_monomeric_stability_analysis.py | DBBenchmarkRun.compute_absolute_error | def compute_absolute_error(self, predicted_data, record, dataframe_record):
'''Calculate the absolute error for the analysis cases. Must be called after get_experimental_ddg_values.'''
new_idxs = []
for analysis_set in self.get_analysis_sets(record):
ddg_details = record['DDG'][analysis_set]
exp_ddg_fieldname = BenchmarkRun.get_analysis_set_fieldname('Experimental', analysis_set)
absolute_error_fieldname = BenchmarkRun.get_analysis_set_fieldname('AbsoluteError', analysis_set)
new_idxs.append(absolute_error_fieldname)
dataframe_record[absolute_error_fieldname] = None
if ddg_details and predicted_data[self.ddg_analysis_type] != None:
absolute_error = abs(dataframe_record[exp_ddg_fieldname] - predicted_data[self.ddg_analysis_type])
dataframe_record[absolute_error_fieldname] = absolute_error
# Update the CSV headers
try:
idx = self.csv_headers.index('AbsoluteError')
self.csv_headers = self.csv_headers[:idx] + new_idxs + self.csv_headers[idx + 1:]
except ValueError, e: pass | python | def compute_absolute_error(self, predicted_data, record, dataframe_record):
'''Calculate the absolute error for the analysis cases. Must be called after get_experimental_ddg_values.'''
new_idxs = []
for analysis_set in self.get_analysis_sets(record):
ddg_details = record['DDG'][analysis_set]
exp_ddg_fieldname = BenchmarkRun.get_analysis_set_fieldname('Experimental', analysis_set)
absolute_error_fieldname = BenchmarkRun.get_analysis_set_fieldname('AbsoluteError', analysis_set)
new_idxs.append(absolute_error_fieldname)
dataframe_record[absolute_error_fieldname] = None
if ddg_details and predicted_data[self.ddg_analysis_type] != None:
absolute_error = abs(dataframe_record[exp_ddg_fieldname] - predicted_data[self.ddg_analysis_type])
dataframe_record[absolute_error_fieldname] = absolute_error
# Update the CSV headers
try:
idx = self.csv_headers.index('AbsoluteError')
self.csv_headers = self.csv_headers[:idx] + new_idxs + self.csv_headers[idx + 1:]
except ValueError, e: pass | [
"def",
"compute_absolute_error",
"(",
"self",
",",
"predicted_data",
",",
"record",
",",
"dataframe_record",
")",
":",
"new_idxs",
"=",
"[",
"]",
"for",
"analysis_set",
"in",
"self",
".",
"get_analysis_sets",
"(",
"record",
")",
":",
"ddg_details",
"=",
"record",
"[",
"'DDG'",
"]",
"[",
"analysis_set",
"]",
"exp_ddg_fieldname",
"=",
"BenchmarkRun",
".",
"get_analysis_set_fieldname",
"(",
"'Experimental'",
",",
"analysis_set",
")",
"absolute_error_fieldname",
"=",
"BenchmarkRun",
".",
"get_analysis_set_fieldname",
"(",
"'AbsoluteError'",
",",
"analysis_set",
")",
"new_idxs",
".",
"append",
"(",
"absolute_error_fieldname",
")",
"dataframe_record",
"[",
"absolute_error_fieldname",
"]",
"=",
"None",
"if",
"ddg_details",
"and",
"predicted_data",
"[",
"self",
".",
"ddg_analysis_type",
"]",
"!=",
"None",
":",
"absolute_error",
"=",
"abs",
"(",
"dataframe_record",
"[",
"exp_ddg_fieldname",
"]",
"-",
"predicted_data",
"[",
"self",
".",
"ddg_analysis_type",
"]",
")",
"dataframe_record",
"[",
"absolute_error_fieldname",
"]",
"=",
"absolute_error",
"# Update the CSV headers",
"try",
":",
"idx",
"=",
"self",
".",
"csv_headers",
".",
"index",
"(",
"'AbsoluteError'",
")",
"self",
".",
"csv_headers",
"=",
"self",
".",
"csv_headers",
"[",
":",
"idx",
"]",
"+",
"new_idxs",
"+",
"self",
".",
"csv_headers",
"[",
"idx",
"+",
"1",
":",
"]",
"except",
"ValueError",
",",
"e",
":",
"pass"
]
| Calculate the absolute error for the analysis cases. Must be called after get_experimental_ddg_values. | [
"Calculate",
"the",
"absolute",
"error",
"for",
"the",
"analysis",
"cases",
".",
"Must",
"be",
"called",
"after",
"get_experimental_ddg_values",
"."
]
| 6d410ad08f1bd9f7cbbb28d7d946e94fbaaa2b6b | https://github.com/Kortemme-Lab/klab/blob/6d410ad08f1bd9f7cbbb28d7d946e94fbaaa2b6b/klab/benchmarking/analysis/ddg_monomeric_stability_analysis.py#L2359-L2378 | train |
brunato/lograptor | lograptor/application.py | AppRule.add_result | def add_result(self, values):
"""
Add a tuple or increment the value of an existing one
in the rule results dictionary.
"""
idx = [values['host']]
for gid in self.key_gids[1:]:
idx.append(values[gid])
idx = tuple(idx)
try:
self.results[idx] += 1
except KeyError:
self.results[idx] = 1
self._last_idx = idx | python | def add_result(self, values):
"""
Add a tuple or increment the value of an existing one
in the rule results dictionary.
"""
idx = [values['host']]
for gid in self.key_gids[1:]:
idx.append(values[gid])
idx = tuple(idx)
try:
self.results[idx] += 1
except KeyError:
self.results[idx] = 1
self._last_idx = idx | [
"def",
"add_result",
"(",
"self",
",",
"values",
")",
":",
"idx",
"=",
"[",
"values",
"[",
"'host'",
"]",
"]",
"for",
"gid",
"in",
"self",
".",
"key_gids",
"[",
"1",
":",
"]",
":",
"idx",
".",
"append",
"(",
"values",
"[",
"gid",
"]",
")",
"idx",
"=",
"tuple",
"(",
"idx",
")",
"try",
":",
"self",
".",
"results",
"[",
"idx",
"]",
"+=",
"1",
"except",
"KeyError",
":",
"self",
".",
"results",
"[",
"idx",
"]",
"=",
"1",
"self",
".",
"_last_idx",
"=",
"idx"
]
| Add a tuple or increment the value of an existing one
in the rule results dictionary. | [
"Add",
"a",
"tuple",
"or",
"increment",
"the",
"value",
"of",
"an",
"existing",
"one",
"in",
"the",
"rule",
"results",
"dictionary",
"."
]
| b1f09fe1b429ed15110610092704ef12d253f3c9 | https://github.com/brunato/lograptor/blob/b1f09fe1b429ed15110610092704ef12d253f3c9/lograptor/application.py#L103-L117 | train |
brunato/lograptor | lograptor/application.py | AppRule.increase_last | def increase_last(self, k):
"""
Increase the last result by k.
"""
idx = self._last_idx
if idx is not None:
self.results[idx] += k | python | def increase_last(self, k):
"""
Increase the last result by k.
"""
idx = self._last_idx
if idx is not None:
self.results[idx] += k | [
"def",
"increase_last",
"(",
"self",
",",
"k",
")",
":",
"idx",
"=",
"self",
".",
"_last_idx",
"if",
"idx",
"is",
"not",
"None",
":",
"self",
".",
"results",
"[",
"idx",
"]",
"+=",
"k"
]
| Increase the last result by k. | [
"Increase",
"the",
"last",
"result",
"by",
"k",
"."
]
| b1f09fe1b429ed15110610092704ef12d253f3c9 | https://github.com/brunato/lograptor/blob/b1f09fe1b429ed15110610092704ef12d253f3c9/lograptor/application.py#L119-L125 | train |
brunato/lograptor | lograptor/application.py | AppLogParser.parse_rules | def parse_rules(self):
"""
Add a set of rules to the app, dividing between filter and other rule set
"""
# Load patterns: an app is removed when has no defined patterns.
try:
rule_options = self.config.items('rules')
except configparser.NoSectionError:
raise LogRaptorConfigError("the app %r has no defined rules!" % self.name)
rules = []
for option, value in rule_options:
pattern = value.replace('\n', '') # Strip newlines for multi-line declarations
if not self.args.filters:
# No filters case: substitute the filter fields with the corresponding patterns.
pattern = string.Template(pattern).safe_substitute(self.fields)
rules.append(AppRule(option, pattern, self.args))
continue
for filter_group in self.args.filters:
_pattern, filter_keys = exact_sub(pattern, filter_group)
_pattern = string.Template(_pattern).safe_substitute(self.fields)
if len(filter_keys) >= len(filter_group):
rules.append(AppRule(option, _pattern, self.args, filter_keys))
elif self._thread:
rules.append(AppRule(option, _pattern, self.args))
return rules | python | def parse_rules(self):
"""
Add a set of rules to the app, dividing between filter and other rule set
"""
# Load patterns: an app is removed when has no defined patterns.
try:
rule_options = self.config.items('rules')
except configparser.NoSectionError:
raise LogRaptorConfigError("the app %r has no defined rules!" % self.name)
rules = []
for option, value in rule_options:
pattern = value.replace('\n', '') # Strip newlines for multi-line declarations
if not self.args.filters:
# No filters case: substitute the filter fields with the corresponding patterns.
pattern = string.Template(pattern).safe_substitute(self.fields)
rules.append(AppRule(option, pattern, self.args))
continue
for filter_group in self.args.filters:
_pattern, filter_keys = exact_sub(pattern, filter_group)
_pattern = string.Template(_pattern).safe_substitute(self.fields)
if len(filter_keys) >= len(filter_group):
rules.append(AppRule(option, _pattern, self.args, filter_keys))
elif self._thread:
rules.append(AppRule(option, _pattern, self.args))
return rules | [
"def",
"parse_rules",
"(",
"self",
")",
":",
"# Load patterns: an app is removed when has no defined patterns.\r",
"try",
":",
"rule_options",
"=",
"self",
".",
"config",
".",
"items",
"(",
"'rules'",
")",
"except",
"configparser",
".",
"NoSectionError",
":",
"raise",
"LogRaptorConfigError",
"(",
"\"the app %r has no defined rules!\"",
"%",
"self",
".",
"name",
")",
"rules",
"=",
"[",
"]",
"for",
"option",
",",
"value",
"in",
"rule_options",
":",
"pattern",
"=",
"value",
".",
"replace",
"(",
"'\\n'",
",",
"''",
")",
"# Strip newlines for multi-line declarations\r",
"if",
"not",
"self",
".",
"args",
".",
"filters",
":",
"# No filters case: substitute the filter fields with the corresponding patterns.\r",
"pattern",
"=",
"string",
".",
"Template",
"(",
"pattern",
")",
".",
"safe_substitute",
"(",
"self",
".",
"fields",
")",
"rules",
".",
"append",
"(",
"AppRule",
"(",
"option",
",",
"pattern",
",",
"self",
".",
"args",
")",
")",
"continue",
"for",
"filter_group",
"in",
"self",
".",
"args",
".",
"filters",
":",
"_pattern",
",",
"filter_keys",
"=",
"exact_sub",
"(",
"pattern",
",",
"filter_group",
")",
"_pattern",
"=",
"string",
".",
"Template",
"(",
"_pattern",
")",
".",
"safe_substitute",
"(",
"self",
".",
"fields",
")",
"if",
"len",
"(",
"filter_keys",
")",
">=",
"len",
"(",
"filter_group",
")",
":",
"rules",
".",
"append",
"(",
"AppRule",
"(",
"option",
",",
"_pattern",
",",
"self",
".",
"args",
",",
"filter_keys",
")",
")",
"elif",
"self",
".",
"_thread",
":",
"rules",
".",
"append",
"(",
"AppRule",
"(",
"option",
",",
"_pattern",
",",
"self",
".",
"args",
")",
")",
"return",
"rules"
]
| Add a set of rules to the app, dividing between filter and other rule set | [
"Add",
"a",
"set",
"of",
"rules",
"to",
"the",
"app",
"dividing",
"between",
"filter",
"and",
"other",
"rule",
"set"
]
| b1f09fe1b429ed15110610092704ef12d253f3c9 | https://github.com/brunato/lograptor/blob/b1f09fe1b429ed15110610092704ef12d253f3c9/lograptor/application.py#L385-L411 | train |
brunato/lograptor | lograptor/application.py | AppLogParser.increase_last | def increase_last(self, k):
"""
Increase the counter of the last matched rule by k.
"""
rule = self._last_rule
if rule is not None:
rule.increase_last(k) | python | def increase_last(self, k):
"""
Increase the counter of the last matched rule by k.
"""
rule = self._last_rule
if rule is not None:
rule.increase_last(k) | [
"def",
"increase_last",
"(",
"self",
",",
"k",
")",
":",
"rule",
"=",
"self",
".",
"_last_rule",
"if",
"rule",
"is",
"not",
"None",
":",
"rule",
".",
"increase_last",
"(",
"k",
")"
]
| Increase the counter of the last matched rule by k. | [
"Increase",
"the",
"counter",
"of",
"the",
"last",
"matched",
"rule",
"by",
"k",
"."
]
| b1f09fe1b429ed15110610092704ef12d253f3c9 | https://github.com/brunato/lograptor/blob/b1f09fe1b429ed15110610092704ef12d253f3c9/lograptor/application.py#L413-L419 | train |
uw-it-aca/uw-restclients-sws | uw_sws/section.py | get_sections_by_delegate_and_term | def get_sections_by_delegate_and_term(person,
term,
future_terms=0,
include_secondaries=True,
transcriptable_course='yes',
delete_flag=['active']):
"""
Returns a list of uw_sws.models.SectionReference objects
for the passed grade submission delegate and term.
@param: future_terms: 0..400
@param: transcriptable_course: 'yes', 'no', 'all'
@param: delete_flag: ['active', 'suspended', 'withdrawn']
"""
data = _get_sections_by_person_and_term(person,
term,
"GradeSubmissionDelegate",
include_secondaries,
future_terms,
transcriptable_course,
delete_flag)
return _json_to_sectionref(data) | python | def get_sections_by_delegate_and_term(person,
term,
future_terms=0,
include_secondaries=True,
transcriptable_course='yes',
delete_flag=['active']):
"""
Returns a list of uw_sws.models.SectionReference objects
for the passed grade submission delegate and term.
@param: future_terms: 0..400
@param: transcriptable_course: 'yes', 'no', 'all'
@param: delete_flag: ['active', 'suspended', 'withdrawn']
"""
data = _get_sections_by_person_and_term(person,
term,
"GradeSubmissionDelegate",
include_secondaries,
future_terms,
transcriptable_course,
delete_flag)
return _json_to_sectionref(data) | [
"def",
"get_sections_by_delegate_and_term",
"(",
"person",
",",
"term",
",",
"future_terms",
"=",
"0",
",",
"include_secondaries",
"=",
"True",
",",
"transcriptable_course",
"=",
"'yes'",
",",
"delete_flag",
"=",
"[",
"'active'",
"]",
")",
":",
"data",
"=",
"_get_sections_by_person_and_term",
"(",
"person",
",",
"term",
",",
"\"GradeSubmissionDelegate\"",
",",
"include_secondaries",
",",
"future_terms",
",",
"transcriptable_course",
",",
"delete_flag",
")",
"return",
"_json_to_sectionref",
"(",
"data",
")"
]
| Returns a list of uw_sws.models.SectionReference objects
for the passed grade submission delegate and term.
@param: future_terms: 0..400
@param: transcriptable_course: 'yes', 'no', 'all'
@param: delete_flag: ['active', 'suspended', 'withdrawn'] | [
"Returns",
"a",
"list",
"of",
"uw_sws",
".",
"models",
".",
"SectionReference",
"objects",
"for",
"the",
"passed",
"grade",
"submission",
"delegate",
"and",
"term",
"."
]
| 4d36776dcca36855fc15c1b8fe7650ae045194cf | https://github.com/uw-it-aca/uw-restclients-sws/blob/4d36776dcca36855fc15c1b8fe7650ae045194cf/uw_sws/section.py#L65-L85 | train |
uw-it-aca/uw-restclients-sws | uw_sws/section.py | get_sections_by_curriculum_and_term | def get_sections_by_curriculum_and_term(curriculum, term):
"""
Returns a list of uw_sws.models.SectionReference objects
for the passed curriculum and term.
"""
url = "{}?{}".format(
section_res_url_prefix,
urlencode([("curriculum_abbreviation", curriculum.label,),
("quarter", term.quarter.lower(),),
("year", term.year,), ]))
return _json_to_sectionref(get_resource(url)) | python | def get_sections_by_curriculum_and_term(curriculum, term):
"""
Returns a list of uw_sws.models.SectionReference objects
for the passed curriculum and term.
"""
url = "{}?{}".format(
section_res_url_prefix,
urlencode([("curriculum_abbreviation", curriculum.label,),
("quarter", term.quarter.lower(),),
("year", term.year,), ]))
return _json_to_sectionref(get_resource(url)) | [
"def",
"get_sections_by_curriculum_and_term",
"(",
"curriculum",
",",
"term",
")",
":",
"url",
"=",
"\"{}?{}\"",
".",
"format",
"(",
"section_res_url_prefix",
",",
"urlencode",
"(",
"[",
"(",
"\"curriculum_abbreviation\"",
",",
"curriculum",
".",
"label",
",",
")",
",",
"(",
"\"quarter\"",
",",
"term",
".",
"quarter",
".",
"lower",
"(",
")",
",",
")",
",",
"(",
"\"year\"",
",",
"term",
".",
"year",
",",
")",
",",
"]",
")",
")",
"return",
"_json_to_sectionref",
"(",
"get_resource",
"(",
"url",
")",
")"
]
| Returns a list of uw_sws.models.SectionReference objects
for the passed curriculum and term. | [
"Returns",
"a",
"list",
"of",
"uw_sws",
".",
"models",
".",
"SectionReference",
"objects",
"for",
"the",
"passed",
"curriculum",
"and",
"term",
"."
]
| 4d36776dcca36855fc15c1b8fe7650ae045194cf | https://github.com/uw-it-aca/uw-restclients-sws/blob/4d36776dcca36855fc15c1b8fe7650ae045194cf/uw_sws/section.py#L88-L98 | train |
uw-it-aca/uw-restclients-sws | uw_sws/section.py | get_sections_by_building_and_term | def get_sections_by_building_and_term(building, term):
"""
Returns a list of uw_sws.models.SectionReference objects
for the passed building and term.
"""
url = "{}?{}".format(
section_res_url_prefix,
urlencode([("quarter", term.quarter.lower(),),
("facility_code", building,),
("year", term.year,), ]))
return _json_to_sectionref(get_resource(url)) | python | def get_sections_by_building_and_term(building, term):
"""
Returns a list of uw_sws.models.SectionReference objects
for the passed building and term.
"""
url = "{}?{}".format(
section_res_url_prefix,
urlencode([("quarter", term.quarter.lower(),),
("facility_code", building,),
("year", term.year,), ]))
return _json_to_sectionref(get_resource(url)) | [
"def",
"get_sections_by_building_and_term",
"(",
"building",
",",
"term",
")",
":",
"url",
"=",
"\"{}?{}\"",
".",
"format",
"(",
"section_res_url_prefix",
",",
"urlencode",
"(",
"[",
"(",
"\"quarter\"",
",",
"term",
".",
"quarter",
".",
"lower",
"(",
")",
",",
")",
",",
"(",
"\"facility_code\"",
",",
"building",
",",
")",
",",
"(",
"\"year\"",
",",
"term",
".",
"year",
",",
")",
",",
"]",
")",
")",
"return",
"_json_to_sectionref",
"(",
"get_resource",
"(",
"url",
")",
")"
]
| Returns a list of uw_sws.models.SectionReference objects
for the passed building and term. | [
"Returns",
"a",
"list",
"of",
"uw_sws",
".",
"models",
".",
"SectionReference",
"objects",
"for",
"the",
"passed",
"building",
"and",
"term",
"."
]
| 4d36776dcca36855fc15c1b8fe7650ae045194cf | https://github.com/uw-it-aca/uw-restclients-sws/blob/4d36776dcca36855fc15c1b8fe7650ae045194cf/uw_sws/section.py#L101-L111 | train |
uw-it-aca/uw-restclients-sws | uw_sws/section.py | _json_to_sectionref | def _json_to_sectionref(data):
"""
Returns a list of SectionReference object created from
the passed json data.
"""
section_term = None
sections = []
for section_data in data.get("Sections", []):
if (section_term is None or
section_data["Year"] != section_term.year or
section_data["Quarter"] != section_term.quarter):
section_term = get_term_by_year_and_quarter(
section_data["Year"], section_data["Quarter"])
section = SectionReference(
term=section_term,
curriculum_abbr=section_data["CurriculumAbbreviation"],
course_number=section_data["CourseNumber"],
section_id=section_data["SectionID"],
url=section_data["Href"])
sections.append(section)
return sections | python | def _json_to_sectionref(data):
"""
Returns a list of SectionReference object created from
the passed json data.
"""
section_term = None
sections = []
for section_data in data.get("Sections", []):
if (section_term is None or
section_data["Year"] != section_term.year or
section_data["Quarter"] != section_term.quarter):
section_term = get_term_by_year_and_quarter(
section_data["Year"], section_data["Quarter"])
section = SectionReference(
term=section_term,
curriculum_abbr=section_data["CurriculumAbbreviation"],
course_number=section_data["CourseNumber"],
section_id=section_data["SectionID"],
url=section_data["Href"])
sections.append(section)
return sections | [
"def",
"_json_to_sectionref",
"(",
"data",
")",
":",
"section_term",
"=",
"None",
"sections",
"=",
"[",
"]",
"for",
"section_data",
"in",
"data",
".",
"get",
"(",
"\"Sections\"",
",",
"[",
"]",
")",
":",
"if",
"(",
"section_term",
"is",
"None",
"or",
"section_data",
"[",
"\"Year\"",
"]",
"!=",
"section_term",
".",
"year",
"or",
"section_data",
"[",
"\"Quarter\"",
"]",
"!=",
"section_term",
".",
"quarter",
")",
":",
"section_term",
"=",
"get_term_by_year_and_quarter",
"(",
"section_data",
"[",
"\"Year\"",
"]",
",",
"section_data",
"[",
"\"Quarter\"",
"]",
")",
"section",
"=",
"SectionReference",
"(",
"term",
"=",
"section_term",
",",
"curriculum_abbr",
"=",
"section_data",
"[",
"\"CurriculumAbbreviation\"",
"]",
",",
"course_number",
"=",
"section_data",
"[",
"\"CourseNumber\"",
"]",
",",
"section_id",
"=",
"section_data",
"[",
"\"SectionID\"",
"]",
",",
"url",
"=",
"section_data",
"[",
"\"Href\"",
"]",
")",
"sections",
".",
"append",
"(",
"section",
")",
"return",
"sections"
]
| Returns a list of SectionReference object created from
the passed json data. | [
"Returns",
"a",
"list",
"of",
"SectionReference",
"object",
"created",
"from",
"the",
"passed",
"json",
"data",
"."
]
| 4d36776dcca36855fc15c1b8fe7650ae045194cf | https://github.com/uw-it-aca/uw-restclients-sws/blob/4d36776dcca36855fc15c1b8fe7650ae045194cf/uw_sws/section.py#L138-L158 | train |
uw-it-aca/uw-restclients-sws | uw_sws/section.py | get_section_by_url | def get_section_by_url(url,
include_instructor_not_on_time_schedule=True):
"""
Returns a uw_sws.models.Section object
for the passed section url.
"""
if not course_url_pattern.match(url):
raise InvalidSectionURL(url)
return _json_to_section(
get_resource(url),
include_instructor_not_on_time_schedule=(
include_instructor_not_on_time_schedule)) | python | def get_section_by_url(url,
include_instructor_not_on_time_schedule=True):
"""
Returns a uw_sws.models.Section object
for the passed section url.
"""
if not course_url_pattern.match(url):
raise InvalidSectionURL(url)
return _json_to_section(
get_resource(url),
include_instructor_not_on_time_schedule=(
include_instructor_not_on_time_schedule)) | [
"def",
"get_section_by_url",
"(",
"url",
",",
"include_instructor_not_on_time_schedule",
"=",
"True",
")",
":",
"if",
"not",
"course_url_pattern",
".",
"match",
"(",
"url",
")",
":",
"raise",
"InvalidSectionURL",
"(",
"url",
")",
"return",
"_json_to_section",
"(",
"get_resource",
"(",
"url",
")",
",",
"include_instructor_not_on_time_schedule",
"=",
"(",
"include_instructor_not_on_time_schedule",
")",
")"
]
| Returns a uw_sws.models.Section object
for the passed section url. | [
"Returns",
"a",
"uw_sws",
".",
"models",
".",
"Section",
"object",
"for",
"the",
"passed",
"section",
"url",
"."
]
| 4d36776dcca36855fc15c1b8fe7650ae045194cf | https://github.com/uw-it-aca/uw-restclients-sws/blob/4d36776dcca36855fc15c1b8fe7650ae045194cf/uw_sws/section.py#L221-L233 | train |
uw-it-aca/uw-restclients-sws | uw_sws/section.py | get_section_by_label | def get_section_by_label(label,
include_instructor_not_on_time_schedule=True):
"""
Returns a uw_sws.models.Section object for
the passed section label.
"""
validate_section_label(label)
url = "{}/{}.json".format(course_res_url_prefix,
encode_section_label(label))
return get_section_by_url(url,
include_instructor_not_on_time_schedule) | python | def get_section_by_label(label,
include_instructor_not_on_time_schedule=True):
"""
Returns a uw_sws.models.Section object for
the passed section label.
"""
validate_section_label(label)
url = "{}/{}.json".format(course_res_url_prefix,
encode_section_label(label))
return get_section_by_url(url,
include_instructor_not_on_time_schedule) | [
"def",
"get_section_by_label",
"(",
"label",
",",
"include_instructor_not_on_time_schedule",
"=",
"True",
")",
":",
"validate_section_label",
"(",
"label",
")",
"url",
"=",
"\"{}/{}.json\"",
".",
"format",
"(",
"course_res_url_prefix",
",",
"encode_section_label",
"(",
"label",
")",
")",
"return",
"get_section_by_url",
"(",
"url",
",",
"include_instructor_not_on_time_schedule",
")"
]
| Returns a uw_sws.models.Section object for
the passed section label. | [
"Returns",
"a",
"uw_sws",
".",
"models",
".",
"Section",
"object",
"for",
"the",
"passed",
"section",
"label",
"."
]
| 4d36776dcca36855fc15c1b8fe7650ae045194cf | https://github.com/uw-it-aca/uw-restclients-sws/blob/4d36776dcca36855fc15c1b8fe7650ae045194cf/uw_sws/section.py#L236-L248 | train |
uw-it-aca/uw-restclients-sws | uw_sws/section.py | get_linked_sections | def get_linked_sections(section,
include_instructor_not_on_time_schedule=True):
"""
Returns a list of uw_sws.models.Section objects,
representing linked sections for the passed section.
"""
linked_sections = []
for url in section.linked_section_urls:
section = get_section_by_url(url,
include_instructor_not_on_time_schedule)
linked_sections.append(section)
return linked_sections | python | def get_linked_sections(section,
include_instructor_not_on_time_schedule=True):
"""
Returns a list of uw_sws.models.Section objects,
representing linked sections for the passed section.
"""
linked_sections = []
for url in section.linked_section_urls:
section = get_section_by_url(url,
include_instructor_not_on_time_schedule)
linked_sections.append(section)
return linked_sections | [
"def",
"get_linked_sections",
"(",
"section",
",",
"include_instructor_not_on_time_schedule",
"=",
"True",
")",
":",
"linked_sections",
"=",
"[",
"]",
"for",
"url",
"in",
"section",
".",
"linked_section_urls",
":",
"section",
"=",
"get_section_by_url",
"(",
"url",
",",
"include_instructor_not_on_time_schedule",
")",
"linked_sections",
".",
"append",
"(",
"section",
")",
"return",
"linked_sections"
]
| Returns a list of uw_sws.models.Section objects,
representing linked sections for the passed section. | [
"Returns",
"a",
"list",
"of",
"uw_sws",
".",
"models",
".",
"Section",
"objects",
"representing",
"linked",
"sections",
"for",
"the",
"passed",
"section",
"."
]
| 4d36776dcca36855fc15c1b8fe7650ae045194cf | https://github.com/uw-it-aca/uw-restclients-sws/blob/4d36776dcca36855fc15c1b8fe7650ae045194cf/uw_sws/section.py#L251-L264 | train |
uw-it-aca/uw-restclients-sws | uw_sws/section.py | get_joint_sections | def get_joint_sections(section,
include_instructor_not_on_time_schedule=True):
"""
Returns a list of uw_sws.models.Section objects,
representing joint sections for the passed section.
"""
joint_sections = []
for url in section.joint_section_urls:
section = get_section_by_url(url,
include_instructor_not_on_time_schedule)
joint_sections.append(section)
return joint_sections | python | def get_joint_sections(section,
include_instructor_not_on_time_schedule=True):
"""
Returns a list of uw_sws.models.Section objects,
representing joint sections for the passed section.
"""
joint_sections = []
for url in section.joint_section_urls:
section = get_section_by_url(url,
include_instructor_not_on_time_schedule)
joint_sections.append(section)
return joint_sections | [
"def",
"get_joint_sections",
"(",
"section",
",",
"include_instructor_not_on_time_schedule",
"=",
"True",
")",
":",
"joint_sections",
"=",
"[",
"]",
"for",
"url",
"in",
"section",
".",
"joint_section_urls",
":",
"section",
"=",
"get_section_by_url",
"(",
"url",
",",
"include_instructor_not_on_time_schedule",
")",
"joint_sections",
".",
"append",
"(",
"section",
")",
"return",
"joint_sections"
]
| Returns a list of uw_sws.models.Section objects,
representing joint sections for the passed section. | [
"Returns",
"a",
"list",
"of",
"uw_sws",
".",
"models",
".",
"Section",
"objects",
"representing",
"joint",
"sections",
"for",
"the",
"passed",
"section",
"."
]
| 4d36776dcca36855fc15c1b8fe7650ae045194cf | https://github.com/uw-it-aca/uw-restclients-sws/blob/4d36776dcca36855fc15c1b8fe7650ae045194cf/uw_sws/section.py#L267-L280 | train |
Kortemme-Lab/klab | klab/bio/scop.py | SCOPeDatabase.get_chain_details_by_related_pdb_chains | def get_chain_details_by_related_pdb_chains(self, pdb_id, chain_id, pfam_accs):
''' Returns a dict of SCOPe details using info
This returns Pfam-level information for a PDB chain i.e. no details on the protein, species, or domain will be returned.
If there are SCOPe entries for the associated Pfam accession numbers which agree then this function returns
pretty complete information.
'''
if not pfam_accs:
return None
associated_pdb_chains = set()
pfam_api = self.get_pfam_api()
for pfam_acc in pfam_accs:
associated_pdb_chains = associated_pdb_chains.union(pfam_api.get_pdb_chains_from_pfam_accession_number(pfam_acc))
hits = []
#class_count = {}
pfam_scop_mapping = {}
for pdb_chain_pair in associated_pdb_chains:
ass_pdb_id, ass_chain_id = pdb_chain_pair[0], pdb_chain_pair[1]
hit = self.get_chain_details(ass_pdb_id, chain = ass_chain_id, internal_function_call = True, pfam_scop_mapping = pfam_scop_mapping)
if hit and hit.get('chains'):
assert(len(hit['chains']) == 1)
hits.append(hit['chains'][ass_chain_id])
#for k, v in hit.iteritems():
#class_count[v['sccs']] = class_count.get(v['sccs'], 0)
#class_count[v['sccs']] += 1
#print(' %s, %s: %s' % (v['pdb_id'], k, v['sccs']))
#pprint.pprint(class_count)
allowed_scop_domains = map(int, map(set.intersection, pfam_scop_mapping.values())[0])
allowed_scop_domains = list(set((allowed_scop_domains or []) + (self.get_sunid_for_pfam_accs(pfam_accs) or [])))
filtered_hits = []
print(pfam_accs)
print(allowed_scop_domains)
print('%d hits' % len(hits))
for hit in hits:
domains_to_ignore = []
for k, v in hit['domains'].iteritems():
if v['sunid'] in allowed_scop_domains:
filtered_hits.append(v)
print('%d filtered_hits' % len(filtered_hits))
if not filtered_hits:
return None
d = self.get_basic_pdb_chain_information(pdb_id, chain_id)
d.update(self.get_common_fields(filtered_hits))
d.update(dict(
SCOPe_sources = 'Pfam + SCOPe',
SCOPe_search_fields = 'Pfam + link_pdb.pdb_chain_id',
SCOPe_trust_level = 3
))
# Add the lowest common classification over all related Pfam families
for k, v in sorted(self.levels.iteritems()):
d[v] = None
d.update(dict(self.get_common_hierarchy(filtered_hits)))
return d | python | def get_chain_details_by_related_pdb_chains(self, pdb_id, chain_id, pfam_accs):
''' Returns a dict of SCOPe details using info
This returns Pfam-level information for a PDB chain i.e. no details on the protein, species, or domain will be returned.
If there are SCOPe entries for the associated Pfam accession numbers which agree then this function returns
pretty complete information.
'''
if not pfam_accs:
return None
associated_pdb_chains = set()
pfam_api = self.get_pfam_api()
for pfam_acc in pfam_accs:
associated_pdb_chains = associated_pdb_chains.union(pfam_api.get_pdb_chains_from_pfam_accession_number(pfam_acc))
hits = []
#class_count = {}
pfam_scop_mapping = {}
for pdb_chain_pair in associated_pdb_chains:
ass_pdb_id, ass_chain_id = pdb_chain_pair[0], pdb_chain_pair[1]
hit = self.get_chain_details(ass_pdb_id, chain = ass_chain_id, internal_function_call = True, pfam_scop_mapping = pfam_scop_mapping)
if hit and hit.get('chains'):
assert(len(hit['chains']) == 1)
hits.append(hit['chains'][ass_chain_id])
#for k, v in hit.iteritems():
#class_count[v['sccs']] = class_count.get(v['sccs'], 0)
#class_count[v['sccs']] += 1
#print(' %s, %s: %s' % (v['pdb_id'], k, v['sccs']))
#pprint.pprint(class_count)
allowed_scop_domains = map(int, map(set.intersection, pfam_scop_mapping.values())[0])
allowed_scop_domains = list(set((allowed_scop_domains or []) + (self.get_sunid_for_pfam_accs(pfam_accs) or [])))
filtered_hits = []
print(pfam_accs)
print(allowed_scop_domains)
print('%d hits' % len(hits))
for hit in hits:
domains_to_ignore = []
for k, v in hit['domains'].iteritems():
if v['sunid'] in allowed_scop_domains:
filtered_hits.append(v)
print('%d filtered_hits' % len(filtered_hits))
if not filtered_hits:
return None
d = self.get_basic_pdb_chain_information(pdb_id, chain_id)
d.update(self.get_common_fields(filtered_hits))
d.update(dict(
SCOPe_sources = 'Pfam + SCOPe',
SCOPe_search_fields = 'Pfam + link_pdb.pdb_chain_id',
SCOPe_trust_level = 3
))
# Add the lowest common classification over all related Pfam families
for k, v in sorted(self.levels.iteritems()):
d[v] = None
d.update(dict(self.get_common_hierarchy(filtered_hits)))
return d | [
"def",
"get_chain_details_by_related_pdb_chains",
"(",
"self",
",",
"pdb_id",
",",
"chain_id",
",",
"pfam_accs",
")",
":",
"if",
"not",
"pfam_accs",
":",
"return",
"None",
"associated_pdb_chains",
"=",
"set",
"(",
")",
"pfam_api",
"=",
"self",
".",
"get_pfam_api",
"(",
")",
"for",
"pfam_acc",
"in",
"pfam_accs",
":",
"associated_pdb_chains",
"=",
"associated_pdb_chains",
".",
"union",
"(",
"pfam_api",
".",
"get_pdb_chains_from_pfam_accession_number",
"(",
"pfam_acc",
")",
")",
"hits",
"=",
"[",
"]",
"#class_count = {}",
"pfam_scop_mapping",
"=",
"{",
"}",
"for",
"pdb_chain_pair",
"in",
"associated_pdb_chains",
":",
"ass_pdb_id",
",",
"ass_chain_id",
"=",
"pdb_chain_pair",
"[",
"0",
"]",
",",
"pdb_chain_pair",
"[",
"1",
"]",
"hit",
"=",
"self",
".",
"get_chain_details",
"(",
"ass_pdb_id",
",",
"chain",
"=",
"ass_chain_id",
",",
"internal_function_call",
"=",
"True",
",",
"pfam_scop_mapping",
"=",
"pfam_scop_mapping",
")",
"if",
"hit",
"and",
"hit",
".",
"get",
"(",
"'chains'",
")",
":",
"assert",
"(",
"len",
"(",
"hit",
"[",
"'chains'",
"]",
")",
"==",
"1",
")",
"hits",
".",
"append",
"(",
"hit",
"[",
"'chains'",
"]",
"[",
"ass_chain_id",
"]",
")",
"#for k, v in hit.iteritems():",
"#class_count[v['sccs']] = class_count.get(v['sccs'], 0)",
"#class_count[v['sccs']] += 1",
"#print(' %s, %s: %s' % (v['pdb_id'], k, v['sccs']))",
"#pprint.pprint(class_count)",
"allowed_scop_domains",
"=",
"map",
"(",
"int",
",",
"map",
"(",
"set",
".",
"intersection",
",",
"pfam_scop_mapping",
".",
"values",
"(",
")",
")",
"[",
"0",
"]",
")",
"allowed_scop_domains",
"=",
"list",
"(",
"set",
"(",
"(",
"allowed_scop_domains",
"or",
"[",
"]",
")",
"+",
"(",
"self",
".",
"get_sunid_for_pfam_accs",
"(",
"pfam_accs",
")",
"or",
"[",
"]",
")",
")",
")",
"filtered_hits",
"=",
"[",
"]",
"print",
"(",
"pfam_accs",
")",
"print",
"(",
"allowed_scop_domains",
")",
"print",
"(",
"'%d hits'",
"%",
"len",
"(",
"hits",
")",
")",
"for",
"hit",
"in",
"hits",
":",
"domains_to_ignore",
"=",
"[",
"]",
"for",
"k",
",",
"v",
"in",
"hit",
"[",
"'domains'",
"]",
".",
"iteritems",
"(",
")",
":",
"if",
"v",
"[",
"'sunid'",
"]",
"in",
"allowed_scop_domains",
":",
"filtered_hits",
".",
"append",
"(",
"v",
")",
"print",
"(",
"'%d filtered_hits'",
"%",
"len",
"(",
"filtered_hits",
")",
")",
"if",
"not",
"filtered_hits",
":",
"return",
"None",
"d",
"=",
"self",
".",
"get_basic_pdb_chain_information",
"(",
"pdb_id",
",",
"chain_id",
")",
"d",
".",
"update",
"(",
"self",
".",
"get_common_fields",
"(",
"filtered_hits",
")",
")",
"d",
".",
"update",
"(",
"dict",
"(",
"SCOPe_sources",
"=",
"'Pfam + SCOPe'",
",",
"SCOPe_search_fields",
"=",
"'Pfam + link_pdb.pdb_chain_id'",
",",
"SCOPe_trust_level",
"=",
"3",
")",
")",
"# Add the lowest common classification over all related Pfam families",
"for",
"k",
",",
"v",
"in",
"sorted",
"(",
"self",
".",
"levels",
".",
"iteritems",
"(",
")",
")",
":",
"d",
"[",
"v",
"]",
"=",
"None",
"d",
".",
"update",
"(",
"dict",
"(",
"self",
".",
"get_common_hierarchy",
"(",
"filtered_hits",
")",
")",
")",
"return",
"d"
]
| Returns a dict of SCOPe details using info
This returns Pfam-level information for a PDB chain i.e. no details on the protein, species, or domain will be returned.
If there are SCOPe entries for the associated Pfam accession numbers which agree then this function returns
pretty complete information. | [
"Returns",
"a",
"dict",
"of",
"SCOPe",
"details",
"using",
"info",
"This",
"returns",
"Pfam",
"-",
"level",
"information",
"for",
"a",
"PDB",
"chain",
"i",
".",
"e",
".",
"no",
"details",
"on",
"the",
"protein",
"species",
"or",
"domain",
"will",
"be",
"returned",
".",
"If",
"there",
"are",
"SCOPe",
"entries",
"for",
"the",
"associated",
"Pfam",
"accession",
"numbers",
"which",
"agree",
"then",
"this",
"function",
"returns",
"pretty",
"complete",
"information",
"."
]
| 6d410ad08f1bd9f7cbbb28d7d946e94fbaaa2b6b | https://github.com/Kortemme-Lab/klab/blob/6d410ad08f1bd9f7cbbb28d7d946e94fbaaa2b6b/klab/bio/scop.py#L216-L275 | train |
TUNE-Archive/freight_forwarder | freight_forwarder/container_ship.py | ContainerShip.recall_service | def recall_service(self, service):
"""
This method assumes that its a roll back during a deployment. If not used during a deployment session
This method should be extended later to be more useful.
"""
if not isinstance(service, Service):
raise TypeError("service must be of type Service.")
logger.warning("The deployment for {0} on {1} failed starting the rollback.".format(service.alias, self.url.geturl()))
def anonymous(anonymous_service):
if not isinstance(anonymous_service, Service):
raise TypeError("service must be an instance of Service.")
containers = self.find_previous_service_containers(anonymous_service)
if containers:
for name in list(anonymous_service.containers.keys()):
del anonymous_service.containers[name]
anonymous_service.cargo.delete()
for name, container in six.iteritems(containers):
# TODO: add function to container obj to see if its running.
if container.state().get('running'):
logger.info(
"is already running... Might want to investigate.",
extra={'formatter': 'container', 'container': container.name}
)
else:
if container.start():
logger.info(
"is restarted and healthy.",
extra={'formatter': 'container', 'container': container.name}
)
else:
logger.error(
"failed to start.",
extra={'formatter': 'container', 'container': container.name}
)
container.dump_logs()
raise Exception(
"The deployment for {0} on {1} went horribly wrong".format(container.name, self.url.geturl())
)
self._service_map(service, anonymous, descending=False) | python | def recall_service(self, service):
"""
This method assumes that its a roll back during a deployment. If not used during a deployment session
This method should be extended later to be more useful.
"""
if not isinstance(service, Service):
raise TypeError("service must be of type Service.")
logger.warning("The deployment for {0} on {1} failed starting the rollback.".format(service.alias, self.url.geturl()))
def anonymous(anonymous_service):
if not isinstance(anonymous_service, Service):
raise TypeError("service must be an instance of Service.")
containers = self.find_previous_service_containers(anonymous_service)
if containers:
for name in list(anonymous_service.containers.keys()):
del anonymous_service.containers[name]
anonymous_service.cargo.delete()
for name, container in six.iteritems(containers):
# TODO: add function to container obj to see if its running.
if container.state().get('running'):
logger.info(
"is already running... Might want to investigate.",
extra={'formatter': 'container', 'container': container.name}
)
else:
if container.start():
logger.info(
"is restarted and healthy.",
extra={'formatter': 'container', 'container': container.name}
)
else:
logger.error(
"failed to start.",
extra={'formatter': 'container', 'container': container.name}
)
container.dump_logs()
raise Exception(
"The deployment for {0} on {1} went horribly wrong".format(container.name, self.url.geturl())
)
self._service_map(service, anonymous, descending=False) | [
"def",
"recall_service",
"(",
"self",
",",
"service",
")",
":",
"if",
"not",
"isinstance",
"(",
"service",
",",
"Service",
")",
":",
"raise",
"TypeError",
"(",
"\"service must be of type Service.\"",
")",
"logger",
".",
"warning",
"(",
"\"The deployment for {0} on {1} failed starting the rollback.\"",
".",
"format",
"(",
"service",
".",
"alias",
",",
"self",
".",
"url",
".",
"geturl",
"(",
")",
")",
")",
"def",
"anonymous",
"(",
"anonymous_service",
")",
":",
"if",
"not",
"isinstance",
"(",
"anonymous_service",
",",
"Service",
")",
":",
"raise",
"TypeError",
"(",
"\"service must be an instance of Service.\"",
")",
"containers",
"=",
"self",
".",
"find_previous_service_containers",
"(",
"anonymous_service",
")",
"if",
"containers",
":",
"for",
"name",
"in",
"list",
"(",
"anonymous_service",
".",
"containers",
".",
"keys",
"(",
")",
")",
":",
"del",
"anonymous_service",
".",
"containers",
"[",
"name",
"]",
"anonymous_service",
".",
"cargo",
".",
"delete",
"(",
")",
"for",
"name",
",",
"container",
"in",
"six",
".",
"iteritems",
"(",
"containers",
")",
":",
"# TODO: add function to container obj to see if its running.",
"if",
"container",
".",
"state",
"(",
")",
".",
"get",
"(",
"'running'",
")",
":",
"logger",
".",
"info",
"(",
"\"is already running... Might want to investigate.\"",
",",
"extra",
"=",
"{",
"'formatter'",
":",
"'container'",
",",
"'container'",
":",
"container",
".",
"name",
"}",
")",
"else",
":",
"if",
"container",
".",
"start",
"(",
")",
":",
"logger",
".",
"info",
"(",
"\"is restarted and healthy.\"",
",",
"extra",
"=",
"{",
"'formatter'",
":",
"'container'",
",",
"'container'",
":",
"container",
".",
"name",
"}",
")",
"else",
":",
"logger",
".",
"error",
"(",
"\"failed to start.\"",
",",
"extra",
"=",
"{",
"'formatter'",
":",
"'container'",
",",
"'container'",
":",
"container",
".",
"name",
"}",
")",
"container",
".",
"dump_logs",
"(",
")",
"raise",
"Exception",
"(",
"\"The deployment for {0} on {1} went horribly wrong\"",
".",
"format",
"(",
"container",
".",
"name",
",",
"self",
".",
"url",
".",
"geturl",
"(",
")",
")",
")",
"self",
".",
"_service_map",
"(",
"service",
",",
"anonymous",
",",
"descending",
"=",
"False",
")"
]
| This method assumes that its a roll back during a deployment. If not used during a deployment session
This method should be extended later to be more useful. | [
"This",
"method",
"assumes",
"that",
"its",
"a",
"roll",
"back",
"during",
"a",
"deployment",
".",
"If",
"not",
"used",
"during",
"a",
"deployment",
"session"
]
| 6ea4a49f474ec04abb8bb81b175c774a16b5312f | https://github.com/TUNE-Archive/freight_forwarder/blob/6ea4a49f474ec04abb8bb81b175c774a16b5312f/freight_forwarder/container_ship.py#L88-L134 | train |
TUNE-Archive/freight_forwarder | freight_forwarder/container_ship.py | ContainerShip.clean_up_dangling_images | def clean_up_dangling_images(self):
"""
Clean up all dangling images.
"""
cargoes = Image.all(client=self._client_session, filters={'dangling': True})
for id, cargo in six.iteritems(cargoes):
logger.info("Removing dangling image: {0}".format(id))
cargo.delete() | python | def clean_up_dangling_images(self):
"""
Clean up all dangling images.
"""
cargoes = Image.all(client=self._client_session, filters={'dangling': True})
for id, cargo in six.iteritems(cargoes):
logger.info("Removing dangling image: {0}".format(id))
cargo.delete() | [
"def",
"clean_up_dangling_images",
"(",
"self",
")",
":",
"cargoes",
"=",
"Image",
".",
"all",
"(",
"client",
"=",
"self",
".",
"_client_session",
",",
"filters",
"=",
"{",
"'dangling'",
":",
"True",
"}",
")",
"for",
"id",
",",
"cargo",
"in",
"six",
".",
"iteritems",
"(",
"cargoes",
")",
":",
"logger",
".",
"info",
"(",
"\"Removing dangling image: {0}\"",
".",
"format",
"(",
"id",
")",
")",
"cargo",
".",
"delete",
"(",
")"
]
| Clean up all dangling images. | [
"Clean",
"up",
"all",
"dangling",
"images",
"."
]
| 6ea4a49f474ec04abb8bb81b175c774a16b5312f | https://github.com/TUNE-Archive/freight_forwarder/blob/6ea4a49f474ec04abb8bb81b175c774a16b5312f/freight_forwarder/container_ship.py#L156-L163 | train |
TUNE-Archive/freight_forwarder | freight_forwarder/container_ship.py | ContainerShip.offload_all_service_containers | def offload_all_service_containers(self, service):
"""Deletes all containers related to the service.
"""
def anonymous(anonymous_service):
if not isinstance(anonymous_service, Service):
raise TypeError("service must be an instance of Service.")
containers = self.find_service_containers(anonymous_service)
if containers:
logger.info("Deleting service: {0} containers.".format(anonymous_service.name))
for container in six.itervalues(containers):
container.delete()
self._service_map(service, anonymous, descending=True) | python | def offload_all_service_containers(self, service):
"""Deletes all containers related to the service.
"""
def anonymous(anonymous_service):
if not isinstance(anonymous_service, Service):
raise TypeError("service must be an instance of Service.")
containers = self.find_service_containers(anonymous_service)
if containers:
logger.info("Deleting service: {0} containers.".format(anonymous_service.name))
for container in six.itervalues(containers):
container.delete()
self._service_map(service, anonymous, descending=True) | [
"def",
"offload_all_service_containers",
"(",
"self",
",",
"service",
")",
":",
"def",
"anonymous",
"(",
"anonymous_service",
")",
":",
"if",
"not",
"isinstance",
"(",
"anonymous_service",
",",
"Service",
")",
":",
"raise",
"TypeError",
"(",
"\"service must be an instance of Service.\"",
")",
"containers",
"=",
"self",
".",
"find_service_containers",
"(",
"anonymous_service",
")",
"if",
"containers",
":",
"logger",
".",
"info",
"(",
"\"Deleting service: {0} containers.\"",
".",
"format",
"(",
"anonymous_service",
".",
"name",
")",
")",
"for",
"container",
"in",
"six",
".",
"itervalues",
"(",
"containers",
")",
":",
"container",
".",
"delete",
"(",
")",
"self",
".",
"_service_map",
"(",
"service",
",",
"anonymous",
",",
"descending",
"=",
"True",
")"
]
| Deletes all containers related to the service. | [
"Deletes",
"all",
"containers",
"related",
"to",
"the",
"service",
"."
]
| 6ea4a49f474ec04abb8bb81b175c774a16b5312f | https://github.com/TUNE-Archive/freight_forwarder/blob/6ea4a49f474ec04abb8bb81b175c774a16b5312f/freight_forwarder/container_ship.py#L326-L339 | train |
TUNE-Archive/freight_forwarder | freight_forwarder/container_ship.py | ContainerShip._container_registration | def _container_registration(self, alias):
"""
Check for an available name and return that to the caller.
"""
containers = Container.find_by_name(self._client_session, alias)
def validate_name(name):
valid = True
if name in containers:
valid = False
return valid
count = 1
container_name = "{0}-0{1}".format(alias, count)
while not validate_name(container_name):
count += 1
container_index = count if count > 10 else "0{0}".format(count)
container_name = "{0}-{1}".format(alias, container_index)
return container_name | python | def _container_registration(self, alias):
"""
Check for an available name and return that to the caller.
"""
containers = Container.find_by_name(self._client_session, alias)
def validate_name(name):
valid = True
if name in containers:
valid = False
return valid
count = 1
container_name = "{0}-0{1}".format(alias, count)
while not validate_name(container_name):
count += 1
container_index = count if count > 10 else "0{0}".format(count)
container_name = "{0}-{1}".format(alias, container_index)
return container_name | [
"def",
"_container_registration",
"(",
"self",
",",
"alias",
")",
":",
"containers",
"=",
"Container",
".",
"find_by_name",
"(",
"self",
".",
"_client_session",
",",
"alias",
")",
"def",
"validate_name",
"(",
"name",
")",
":",
"valid",
"=",
"True",
"if",
"name",
"in",
"containers",
":",
"valid",
"=",
"False",
"return",
"valid",
"count",
"=",
"1",
"container_name",
"=",
"\"{0}-0{1}\"",
".",
"format",
"(",
"alias",
",",
"count",
")",
"while",
"not",
"validate_name",
"(",
"container_name",
")",
":",
"count",
"+=",
"1",
"container_index",
"=",
"count",
"if",
"count",
">",
"10",
"else",
"\"0{0}\"",
".",
"format",
"(",
"count",
")",
"container_name",
"=",
"\"{0}-{1}\"",
".",
"format",
"(",
"alias",
",",
"container_index",
")",
"return",
"container_name"
]
| Check for an available name and return that to the caller. | [
"Check",
"for",
"an",
"available",
"name",
"and",
"return",
"that",
"to",
"the",
"caller",
"."
]
| 6ea4a49f474ec04abb8bb81b175c774a16b5312f | https://github.com/TUNE-Archive/freight_forwarder/blob/6ea4a49f474ec04abb8bb81b175c774a16b5312f/freight_forwarder/container_ship.py#L529-L549 | train |
jpulec/django-protractor | protractor/management/commands/protractor.py | Command.teardown_databases | def teardown_databases(self, old_config, options):
"""
Destroys all the non-mirror databases.
"""
if len(old_config) > 1:
old_names, mirrors = old_config
else:
old_names = old_config
for connection, old_name, destroy in old_names:
if destroy:
connection.creation.destroy_test_db(old_name, options['verbosity']) | python | def teardown_databases(self, old_config, options):
"""
Destroys all the non-mirror databases.
"""
if len(old_config) > 1:
old_names, mirrors = old_config
else:
old_names = old_config
for connection, old_name, destroy in old_names:
if destroy:
connection.creation.destroy_test_db(old_name, options['verbosity']) | [
"def",
"teardown_databases",
"(",
"self",
",",
"old_config",
",",
"options",
")",
":",
"if",
"len",
"(",
"old_config",
")",
">",
"1",
":",
"old_names",
",",
"mirrors",
"=",
"old_config",
"else",
":",
"old_names",
"=",
"old_config",
"for",
"connection",
",",
"old_name",
",",
"destroy",
"in",
"old_names",
":",
"if",
"destroy",
":",
"connection",
".",
"creation",
".",
"destroy_test_db",
"(",
"old_name",
",",
"options",
"[",
"'verbosity'",
"]",
")"
]
| Destroys all the non-mirror databases. | [
"Destroys",
"all",
"the",
"non",
"-",
"mirror",
"databases",
"."
]
| 3857d651612fd988ff0ab17264d367db5345664a | https://github.com/jpulec/django-protractor/blob/3857d651612fd988ff0ab17264d367db5345664a/protractor/management/commands/protractor.py#L114-L124 | train |
mardix/Mocha | mocha/extras/jinja_helpers.py | oembed | def oembed(url, class_=""):
"""
Create OEmbed link
{{ url | oembed }}
:param url:
:param class_:
:return:
"""
o = "<a href=\"{url}\" class=\"oembed {class_}\" ></a>".format(url=url,
class_=class_)
return Markup(o) | python | def oembed(url, class_=""):
"""
Create OEmbed link
{{ url | oembed }}
:param url:
:param class_:
:return:
"""
o = "<a href=\"{url}\" class=\"oembed {class_}\" ></a>".format(url=url,
class_=class_)
return Markup(o) | [
"def",
"oembed",
"(",
"url",
",",
"class_",
"=",
"\"\"",
")",
":",
"o",
"=",
"\"<a href=\\\"{url}\\\" class=\\\"oembed {class_}\\\" ></a>\"",
".",
"format",
"(",
"url",
"=",
"url",
",",
"class_",
"=",
"class_",
")",
"return",
"Markup",
"(",
"o",
")"
]
| Create OEmbed link
{{ url | oembed }}
:param url:
:param class_:
:return: | [
"Create",
"OEmbed",
"link"
]
| bce481cb31a0972061dd99bc548701411dcb9de3 | https://github.com/mardix/Mocha/blob/bce481cb31a0972061dd99bc548701411dcb9de3/mocha/extras/jinja_helpers.py#L31-L42 | train |
mardix/Mocha | mocha/extras/jinja_helpers.py | img_src | def img_src(url, class_="", responsive=False, lazy_load=False, id_=""):
"""
Create an image src
{{ xyz.jpg | img_src }}
:param url:
:param class_:
:param responsive:
:param lazy_load:
:param id_:
:return:
"""
if not url.startswith("http://") and not url.startswith("https://"):
url = static_url(url)
data_src = ""
if responsive:
class_ += " responsive"
if lazy_load:
data_src = url
# 1x1 image
url = "data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAAAEAAAABCAQAAAC1HAwCAAAAC0lEQVR42mNgYAAAAAMAASsJTYQAAAAASUVORK5CYII="
class_ += " lazy"
img = "<img src=\"{src}\" class=\"{class_}\" id=\"{id_}\" data-src={data_src}>" \
.format(src=url, class_=class_, id_=id_, data_src=data_src)
return Markup(img) | python | def img_src(url, class_="", responsive=False, lazy_load=False, id_=""):
"""
Create an image src
{{ xyz.jpg | img_src }}
:param url:
:param class_:
:param responsive:
:param lazy_load:
:param id_:
:return:
"""
if not url.startswith("http://") and not url.startswith("https://"):
url = static_url(url)
data_src = ""
if responsive:
class_ += " responsive"
if lazy_load:
data_src = url
# 1x1 image
url = "data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAAAEAAAABCAQAAAC1HAwCAAAAC0lEQVR42mNgYAAAAAMAASsJTYQAAAAASUVORK5CYII="
class_ += " lazy"
img = "<img src=\"{src}\" class=\"{class_}\" id=\"{id_}\" data-src={data_src}>" \
.format(src=url, class_=class_, id_=id_, data_src=data_src)
return Markup(img) | [
"def",
"img_src",
"(",
"url",
",",
"class_",
"=",
"\"\"",
",",
"responsive",
"=",
"False",
",",
"lazy_load",
"=",
"False",
",",
"id_",
"=",
"\"\"",
")",
":",
"if",
"not",
"url",
".",
"startswith",
"(",
"\"http://\"",
")",
"and",
"not",
"url",
".",
"startswith",
"(",
"\"https://\"",
")",
":",
"url",
"=",
"static_url",
"(",
"url",
")",
"data_src",
"=",
"\"\"",
"if",
"responsive",
":",
"class_",
"+=",
"\" responsive\"",
"if",
"lazy_load",
":",
"data_src",
"=",
"url",
"# 1x1 image",
"url",
"=",
"\"data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAAAEAAAABCAQAAAC1HAwCAAAAC0lEQVR42mNgYAAAAAMAASsJTYQAAAAASUVORK5CYII=\"",
"class_",
"+=",
"\" lazy\"",
"img",
"=",
"\"<img src=\\\"{src}\\\" class=\\\"{class_}\\\" id=\\\"{id_}\\\" data-src={data_src}>\"",
".",
"format",
"(",
"src",
"=",
"url",
",",
"class_",
"=",
"class_",
",",
"id_",
"=",
"id_",
",",
"data_src",
"=",
"data_src",
")",
"return",
"Markup",
"(",
"img",
")"
]
| Create an image src
{{ xyz.jpg | img_src }}
:param url:
:param class_:
:param responsive:
:param lazy_load:
:param id_:
:return: | [
"Create",
"an",
"image",
"src"
]
| bce481cb31a0972061dd99bc548701411dcb9de3 | https://github.com/mardix/Mocha/blob/bce481cb31a0972061dd99bc548701411dcb9de3/mocha/extras/jinja_helpers.py#L45-L72 | train |
assamite/creamas | creamas/examples/spiro/spiro.py | give_dots_yield | def give_dots_yield(R, r, r_, resolution=2*PI/1000, spins=50):
'''Generate Spirograph dots without numpy using yield.
'''
def x(theta):
return (R-r) * math.cos(theta) + r_*math.cos((R-r) / r * theta)
def y(theta):
return (R-r) * math.sin(theta) - r_*math.sin((R-r) / r * theta)
theta = 0.0
while theta < 2*PI*spins:
yield (x(theta), y(theta))
theta += resolution | python | def give_dots_yield(R, r, r_, resolution=2*PI/1000, spins=50):
'''Generate Spirograph dots without numpy using yield.
'''
def x(theta):
return (R-r) * math.cos(theta) + r_*math.cos((R-r) / r * theta)
def y(theta):
return (R-r) * math.sin(theta) - r_*math.sin((R-r) / r * theta)
theta = 0.0
while theta < 2*PI*spins:
yield (x(theta), y(theta))
theta += resolution | [
"def",
"give_dots_yield",
"(",
"R",
",",
"r",
",",
"r_",
",",
"resolution",
"=",
"2",
"*",
"PI",
"/",
"1000",
",",
"spins",
"=",
"50",
")",
":",
"def",
"x",
"(",
"theta",
")",
":",
"return",
"(",
"R",
"-",
"r",
")",
"*",
"math",
".",
"cos",
"(",
"theta",
")",
"+",
"r_",
"*",
"math",
".",
"cos",
"(",
"(",
"R",
"-",
"r",
")",
"/",
"r",
"*",
"theta",
")",
"def",
"y",
"(",
"theta",
")",
":",
"return",
"(",
"R",
"-",
"r",
")",
"*",
"math",
".",
"sin",
"(",
"theta",
")",
"-",
"r_",
"*",
"math",
".",
"sin",
"(",
"(",
"R",
"-",
"r",
")",
"/",
"r",
"*",
"theta",
")",
"theta",
"=",
"0.0",
"while",
"theta",
"<",
"2",
"*",
"PI",
"*",
"spins",
":",
"yield",
"(",
"x",
"(",
"theta",
")",
",",
"y",
"(",
"theta",
")",
")",
"theta",
"+=",
"resolution"
]
| Generate Spirograph dots without numpy using yield. | [
"Generate",
"Spirograph",
"dots",
"without",
"numpy",
"using",
"yield",
"."
]
| 54dc3e31c97a3f938e58272f8ab80b6bcafeff58 | https://github.com/assamite/creamas/blob/54dc3e31c97a3f938e58272f8ab80b6bcafeff58/creamas/examples/spiro/spiro.py#L14-L26 | train |
assamite/creamas | creamas/examples/spiro/spiro.py | give_dots | def give_dots(R, r, r_, resolution=2*PI/1000, spins=50):
'''Generate Spirograph dots with numpy.
'''
thetas = np.arange(0, 2*PI*spins, resolution)
Rr = R - r
x = Rr * np.cos(thetas) + r_*np.cos(Rr / r * thetas)
y = Rr * np.sin(thetas) - r_*np.sin(Rr / r * thetas)
return x, y | python | def give_dots(R, r, r_, resolution=2*PI/1000, spins=50):
'''Generate Spirograph dots with numpy.
'''
thetas = np.arange(0, 2*PI*spins, resolution)
Rr = R - r
x = Rr * np.cos(thetas) + r_*np.cos(Rr / r * thetas)
y = Rr * np.sin(thetas) - r_*np.sin(Rr / r * thetas)
return x, y | [
"def",
"give_dots",
"(",
"R",
",",
"r",
",",
"r_",
",",
"resolution",
"=",
"2",
"*",
"PI",
"/",
"1000",
",",
"spins",
"=",
"50",
")",
":",
"thetas",
"=",
"np",
".",
"arange",
"(",
"0",
",",
"2",
"*",
"PI",
"*",
"spins",
",",
"resolution",
")",
"Rr",
"=",
"R",
"-",
"r",
"x",
"=",
"Rr",
"*",
"np",
".",
"cos",
"(",
"thetas",
")",
"+",
"r_",
"*",
"np",
".",
"cos",
"(",
"Rr",
"/",
"r",
"*",
"thetas",
")",
"y",
"=",
"Rr",
"*",
"np",
".",
"sin",
"(",
"thetas",
")",
"-",
"r_",
"*",
"np",
".",
"sin",
"(",
"Rr",
"/",
"r",
"*",
"thetas",
")",
"return",
"x",
",",
"y"
]
| Generate Spirograph dots with numpy. | [
"Generate",
"Spirograph",
"dots",
"with",
"numpy",
"."
]
| 54dc3e31c97a3f938e58272f8ab80b6bcafeff58 | https://github.com/assamite/creamas/blob/54dc3e31c97a3f938e58272f8ab80b6bcafeff58/creamas/examples/spiro/spiro.py#L29-L36 | train |
assamite/creamas | creamas/examples/spiro/spiro.py | spiro_image | def spiro_image(R, r, r_, resolution=2*PI/1000, spins=50, size=[32, 32]):
'''Create image with given Spirograph parameters using numpy and scipy.
'''
x, y = give_dots(200, r, r_, spins=20)
xy = np.array([x, y]).T
xy = np.array(np.around(xy), dtype=np.int64)
xy = xy[(xy[:, 0] >= -250) & (xy[:, 1] >= -250) &
(xy[:, 0] < 250) & (xy[:, 1] < 250)]
xy = xy + 250
img = np.ones([500, 500], dtype=np.uint8)
img[:] = 255
img[xy[:, 0], xy[:, 1]] = 0
img = misc.imresize(img, size)
fimg = img / 255.0
return fimg | python | def spiro_image(R, r, r_, resolution=2*PI/1000, spins=50, size=[32, 32]):
'''Create image with given Spirograph parameters using numpy and scipy.
'''
x, y = give_dots(200, r, r_, spins=20)
xy = np.array([x, y]).T
xy = np.array(np.around(xy), dtype=np.int64)
xy = xy[(xy[:, 0] >= -250) & (xy[:, 1] >= -250) &
(xy[:, 0] < 250) & (xy[:, 1] < 250)]
xy = xy + 250
img = np.ones([500, 500], dtype=np.uint8)
img[:] = 255
img[xy[:, 0], xy[:, 1]] = 0
img = misc.imresize(img, size)
fimg = img / 255.0
return fimg | [
"def",
"spiro_image",
"(",
"R",
",",
"r",
",",
"r_",
",",
"resolution",
"=",
"2",
"*",
"PI",
"/",
"1000",
",",
"spins",
"=",
"50",
",",
"size",
"=",
"[",
"32",
",",
"32",
"]",
")",
":",
"x",
",",
"y",
"=",
"give_dots",
"(",
"200",
",",
"r",
",",
"r_",
",",
"spins",
"=",
"20",
")",
"xy",
"=",
"np",
".",
"array",
"(",
"[",
"x",
",",
"y",
"]",
")",
".",
"T",
"xy",
"=",
"np",
".",
"array",
"(",
"np",
".",
"around",
"(",
"xy",
")",
",",
"dtype",
"=",
"np",
".",
"int64",
")",
"xy",
"=",
"xy",
"[",
"(",
"xy",
"[",
":",
",",
"0",
"]",
">=",
"-",
"250",
")",
"&",
"(",
"xy",
"[",
":",
",",
"1",
"]",
">=",
"-",
"250",
")",
"&",
"(",
"xy",
"[",
":",
",",
"0",
"]",
"<",
"250",
")",
"&",
"(",
"xy",
"[",
":",
",",
"1",
"]",
"<",
"250",
")",
"]",
"xy",
"=",
"xy",
"+",
"250",
"img",
"=",
"np",
".",
"ones",
"(",
"[",
"500",
",",
"500",
"]",
",",
"dtype",
"=",
"np",
".",
"uint8",
")",
"img",
"[",
":",
"]",
"=",
"255",
"img",
"[",
"xy",
"[",
":",
",",
"0",
"]",
",",
"xy",
"[",
":",
",",
"1",
"]",
"]",
"=",
"0",
"img",
"=",
"misc",
".",
"imresize",
"(",
"img",
",",
"size",
")",
"fimg",
"=",
"img",
"/",
"255.0",
"return",
"fimg"
]
| Create image with given Spirograph parameters using numpy and scipy. | [
"Create",
"image",
"with",
"given",
"Spirograph",
"parameters",
"using",
"numpy",
"and",
"scipy",
"."
]
| 54dc3e31c97a3f938e58272f8ab80b6bcafeff58 | https://github.com/assamite/creamas/blob/54dc3e31c97a3f938e58272f8ab80b6bcafeff58/creamas/examples/spiro/spiro.py#L39-L53 | train |
mardix/Mocha | mocha/extras/md.py | html | def html(text, lazy_images=False):
"""
To render a markdown format text into HTML.
- If you want to also build a Table of Content inside of the markdow,
add the tags: [TOC]
It will include a <ul><li>...</ul> of all <h*>
:param text:
:param lazy_images: bool - If true, it will activate the LazyImageExtension
:return:
"""
extensions = [
'markdown.extensions.nl2br',
'markdown.extensions.sane_lists',
'markdown.extensions.toc',
'markdown.extensions.tables',
OEmbedExtension()
]
if lazy_images:
extensions.append(LazyImageExtension())
return markdown.markdown(text, extensions=extensions) | python | def html(text, lazy_images=False):
"""
To render a markdown format text into HTML.
- If you want to also build a Table of Content inside of the markdow,
add the tags: [TOC]
It will include a <ul><li>...</ul> of all <h*>
:param text:
:param lazy_images: bool - If true, it will activate the LazyImageExtension
:return:
"""
extensions = [
'markdown.extensions.nl2br',
'markdown.extensions.sane_lists',
'markdown.extensions.toc',
'markdown.extensions.tables',
OEmbedExtension()
]
if lazy_images:
extensions.append(LazyImageExtension())
return markdown.markdown(text, extensions=extensions) | [
"def",
"html",
"(",
"text",
",",
"lazy_images",
"=",
"False",
")",
":",
"extensions",
"=",
"[",
"'markdown.extensions.nl2br'",
",",
"'markdown.extensions.sane_lists'",
",",
"'markdown.extensions.toc'",
",",
"'markdown.extensions.tables'",
",",
"OEmbedExtension",
"(",
")",
"]",
"if",
"lazy_images",
":",
"extensions",
".",
"append",
"(",
"LazyImageExtension",
"(",
")",
")",
"return",
"markdown",
".",
"markdown",
"(",
"text",
",",
"extensions",
"=",
"extensions",
")"
]
| To render a markdown format text into HTML.
- If you want to also build a Table of Content inside of the markdow,
add the tags: [TOC]
It will include a <ul><li>...</ul> of all <h*>
:param text:
:param lazy_images: bool - If true, it will activate the LazyImageExtension
:return: | [
"To",
"render",
"a",
"markdown",
"format",
"text",
"into",
"HTML",
"."
]
| bce481cb31a0972061dd99bc548701411dcb9de3 | https://github.com/mardix/Mocha/blob/bce481cb31a0972061dd99bc548701411dcb9de3/mocha/extras/md.py#L66-L88 | train |
mardix/Mocha | mocha/extras/md.py | ExtractImagesTreeprocessor.run | def run(self, root):
"Find all images and append to markdown.images. "
self.markdown.images = []
for image in root.getiterator("img"):
self.markdown.images.append(image.attrib["src"]) | python | def run(self, root):
"Find all images and append to markdown.images. "
self.markdown.images = []
for image in root.getiterator("img"):
self.markdown.images.append(image.attrib["src"]) | [
"def",
"run",
"(",
"self",
",",
"root",
")",
":",
"self",
".",
"markdown",
".",
"images",
"=",
"[",
"]",
"for",
"image",
"in",
"root",
".",
"getiterator",
"(",
"\"img\"",
")",
":",
"self",
".",
"markdown",
".",
"images",
".",
"append",
"(",
"image",
".",
"attrib",
"[",
"\"src\"",
"]",
")"
]
| Find all images and append to markdown.images. | [
"Find",
"all",
"images",
"and",
"append",
"to",
"markdown",
".",
"images",
"."
]
| bce481cb31a0972061dd99bc548701411dcb9de3 | https://github.com/mardix/Mocha/blob/bce481cb31a0972061dd99bc548701411dcb9de3/mocha/extras/md.py#L25-L29 | train |
jaraco/jaraco.collections | jaraco/collections.py | dict_map | def dict_map(function, dictionary):
"""
dict_map is much like the built-in function map. It takes a dictionary
and applys a function to the values of that dictionary, returning a
new dictionary with the mapped values in the original keys.
>>> d = dict_map(lambda x:x+1, dict(a=1, b=2))
>>> d == dict(a=2,b=3)
True
"""
return dict((key, function(value)) for key, value in dictionary.items()) | python | def dict_map(function, dictionary):
"""
dict_map is much like the built-in function map. It takes a dictionary
and applys a function to the values of that dictionary, returning a
new dictionary with the mapped values in the original keys.
>>> d = dict_map(lambda x:x+1, dict(a=1, b=2))
>>> d == dict(a=2,b=3)
True
"""
return dict((key, function(value)) for key, value in dictionary.items()) | [
"def",
"dict_map",
"(",
"function",
",",
"dictionary",
")",
":",
"return",
"dict",
"(",
"(",
"key",
",",
"function",
"(",
"value",
")",
")",
"for",
"key",
",",
"value",
"in",
"dictionary",
".",
"items",
"(",
")",
")"
]
| dict_map is much like the built-in function map. It takes a dictionary
and applys a function to the values of that dictionary, returning a
new dictionary with the mapped values in the original keys.
>>> d = dict_map(lambda x:x+1, dict(a=1, b=2))
>>> d == dict(a=2,b=3)
True | [
"dict_map",
"is",
"much",
"like",
"the",
"built",
"-",
"in",
"function",
"map",
".",
"It",
"takes",
"a",
"dictionary",
"and",
"applys",
"a",
"function",
"to",
"the",
"values",
"of",
"that",
"dictionary",
"returning",
"a",
"new",
"dictionary",
"with",
"the",
"mapped",
"values",
"in",
"the",
"original",
"keys",
"."
]
| 25db1dab06d7108dc0c2b7e83dc7530fb10718d2 | https://github.com/jaraco/jaraco.collections/blob/25db1dab06d7108dc0c2b7e83dc7530fb10718d2/jaraco/collections.py#L139-L149 | train |
jaraco/jaraco.collections | jaraco/collections.py | sorted_items | def sorted_items(d, key=__identity, reverse=False):
"""
Return the items of the dictionary sorted by the keys
>>> sample = dict(foo=20, bar=42, baz=10)
>>> tuple(sorted_items(sample))
(('bar', 42), ('baz', 10), ('foo', 20))
>>> reverse_string = lambda s: ''.join(reversed(s))
>>> tuple(sorted_items(sample, key=reverse_string))
(('foo', 20), ('bar', 42), ('baz', 10))
>>> tuple(sorted_items(sample, reverse=True))
(('foo', 20), ('baz', 10), ('bar', 42))
"""
# wrap the key func so it operates on the first element of each item
def pairkey_key(item):
return key(item[0])
return sorted(d.items(), key=pairkey_key, reverse=reverse) | python | def sorted_items(d, key=__identity, reverse=False):
"""
Return the items of the dictionary sorted by the keys
>>> sample = dict(foo=20, bar=42, baz=10)
>>> tuple(sorted_items(sample))
(('bar', 42), ('baz', 10), ('foo', 20))
>>> reverse_string = lambda s: ''.join(reversed(s))
>>> tuple(sorted_items(sample, key=reverse_string))
(('foo', 20), ('bar', 42), ('baz', 10))
>>> tuple(sorted_items(sample, reverse=True))
(('foo', 20), ('baz', 10), ('bar', 42))
"""
# wrap the key func so it operates on the first element of each item
def pairkey_key(item):
return key(item[0])
return sorted(d.items(), key=pairkey_key, reverse=reverse) | [
"def",
"sorted_items",
"(",
"d",
",",
"key",
"=",
"__identity",
",",
"reverse",
"=",
"False",
")",
":",
"# wrap the key func so it operates on the first element of each item",
"def",
"pairkey_key",
"(",
"item",
")",
":",
"return",
"key",
"(",
"item",
"[",
"0",
"]",
")",
"return",
"sorted",
"(",
"d",
".",
"items",
"(",
")",
",",
"key",
"=",
"pairkey_key",
",",
"reverse",
"=",
"reverse",
")"
]
| Return the items of the dictionary sorted by the keys
>>> sample = dict(foo=20, bar=42, baz=10)
>>> tuple(sorted_items(sample))
(('bar', 42), ('baz', 10), ('foo', 20))
>>> reverse_string = lambda s: ''.join(reversed(s))
>>> tuple(sorted_items(sample, key=reverse_string))
(('foo', 20), ('bar', 42), ('baz', 10))
>>> tuple(sorted_items(sample, reverse=True))
(('foo', 20), ('baz', 10), ('bar', 42)) | [
"Return",
"the",
"items",
"of",
"the",
"dictionary",
"sorted",
"by",
"the",
"keys"
]
| 25db1dab06d7108dc0c2b7e83dc7530fb10718d2 | https://github.com/jaraco/jaraco.collections/blob/25db1dab06d7108dc0c2b7e83dc7530fb10718d2/jaraco/collections.py#L269-L287 | train |
jaraco/jaraco.collections | jaraco/collections.py | invert_map | def invert_map(map):
"""
Given a dictionary, return another dictionary with keys and values
switched. If any of the values resolve to the same key, raises
a ValueError.
>>> numbers = dict(a=1, b=2, c=3)
>>> letters = invert_map(numbers)
>>> letters[1]
'a'
>>> numbers['d'] = 3
>>> invert_map(numbers)
Traceback (most recent call last):
...
ValueError: Key conflict in inverted mapping
"""
res = dict((v, k) for k, v in map.items())
if not len(res) == len(map):
raise ValueError('Key conflict in inverted mapping')
return res | python | def invert_map(map):
"""
Given a dictionary, return another dictionary with keys and values
switched. If any of the values resolve to the same key, raises
a ValueError.
>>> numbers = dict(a=1, b=2, c=3)
>>> letters = invert_map(numbers)
>>> letters[1]
'a'
>>> numbers['d'] = 3
>>> invert_map(numbers)
Traceback (most recent call last):
...
ValueError: Key conflict in inverted mapping
"""
res = dict((v, k) for k, v in map.items())
if not len(res) == len(map):
raise ValueError('Key conflict in inverted mapping')
return res | [
"def",
"invert_map",
"(",
"map",
")",
":",
"res",
"=",
"dict",
"(",
"(",
"v",
",",
"k",
")",
"for",
"k",
",",
"v",
"in",
"map",
".",
"items",
"(",
")",
")",
"if",
"not",
"len",
"(",
"res",
")",
"==",
"len",
"(",
"map",
")",
":",
"raise",
"ValueError",
"(",
"'Key conflict in inverted mapping'",
")",
"return",
"res"
]
| Given a dictionary, return another dictionary with keys and values
switched. If any of the values resolve to the same key, raises
a ValueError.
>>> numbers = dict(a=1, b=2, c=3)
>>> letters = invert_map(numbers)
>>> letters[1]
'a'
>>> numbers['d'] = 3
>>> invert_map(numbers)
Traceback (most recent call last):
...
ValueError: Key conflict in inverted mapping | [
"Given",
"a",
"dictionary",
"return",
"another",
"dictionary",
"with",
"keys",
"and",
"values",
"switched",
".",
"If",
"any",
"of",
"the",
"values",
"resolve",
"to",
"the",
"same",
"key",
"raises",
"a",
"ValueError",
"."
]
| 25db1dab06d7108dc0c2b7e83dc7530fb10718d2 | https://github.com/jaraco/jaraco.collections/blob/25db1dab06d7108dc0c2b7e83dc7530fb10718d2/jaraco/collections.py#L506-L525 | train |
jaraco/jaraco.collections | jaraco/collections.py | KeyTransformingDict.matching_key_for | def matching_key_for(self, key):
"""
Given a key, return the actual key stored in self that matches.
Raise KeyError if the key isn't found.
"""
try:
return next(e_key for e_key in self.keys() if e_key == key)
except StopIteration:
raise KeyError(key) | python | def matching_key_for(self, key):
"""
Given a key, return the actual key stored in self that matches.
Raise KeyError if the key isn't found.
"""
try:
return next(e_key for e_key in self.keys() if e_key == key)
except StopIteration:
raise KeyError(key) | [
"def",
"matching_key_for",
"(",
"self",
",",
"key",
")",
":",
"try",
":",
"return",
"next",
"(",
"e_key",
"for",
"e_key",
"in",
"self",
".",
"keys",
"(",
")",
"if",
"e_key",
"==",
"key",
")",
"except",
"StopIteration",
":",
"raise",
"KeyError",
"(",
"key",
")"
]
| Given a key, return the actual key stored in self that matches.
Raise KeyError if the key isn't found. | [
"Given",
"a",
"key",
"return",
"the",
"actual",
"key",
"stored",
"in",
"self",
"that",
"matches",
".",
"Raise",
"KeyError",
"if",
"the",
"key",
"isn",
"t",
"found",
"."
]
| 25db1dab06d7108dc0c2b7e83dc7530fb10718d2 | https://github.com/jaraco/jaraco.collections/blob/25db1dab06d7108dc0c2b7e83dc7530fb10718d2/jaraco/collections.py#L335-L343 | train |
cozy/python_cozy_management | cozy_management/backup.py | backup | def backup(backup_filename=None):
'''
Backup a Cozy
'''
timestamp = time.strftime("%Y-%m-%d-%H-%M-%S", time.gmtime())
if not backup_filename:
if not os.path.isdir(BACKUPS_PATH):
print 'Need to create {}'.format(BACKUPS_PATH)
os.makedirs(BACKUPS_PATH, 0700)
backup_filename = '{backups_path}/cozy-{timestamp}.tgz'.format(
backups_path=BACKUPS_PATH,
timestamp=timestamp
)
elif os.path.exists(backup_filename):
print 'Backup file already exists: {}'.format(backup_filename)
return
couchdb_path = _get_couchdb_path()
cmd = 'tar cvzf {backup_filename}'
cmd += ' --exclude stack.token'
cmd += ' --exclude couchdb.login'
cmd += ' --exclude self-hosting.json'
cmd += ' /etc/cozy /usr/local/var/cozy {couchdb_path}/cozy.couch'
cmd = cmd.format(backup_filename=backup_filename,
couchdb_path=couchdb_path)
helpers.cmd_exec(cmd, show_output=True)
print 'Backup file: {}'.format(backup_filename) | python | def backup(backup_filename=None):
'''
Backup a Cozy
'''
timestamp = time.strftime("%Y-%m-%d-%H-%M-%S", time.gmtime())
if not backup_filename:
if not os.path.isdir(BACKUPS_PATH):
print 'Need to create {}'.format(BACKUPS_PATH)
os.makedirs(BACKUPS_PATH, 0700)
backup_filename = '{backups_path}/cozy-{timestamp}.tgz'.format(
backups_path=BACKUPS_PATH,
timestamp=timestamp
)
elif os.path.exists(backup_filename):
print 'Backup file already exists: {}'.format(backup_filename)
return
couchdb_path = _get_couchdb_path()
cmd = 'tar cvzf {backup_filename}'
cmd += ' --exclude stack.token'
cmd += ' --exclude couchdb.login'
cmd += ' --exclude self-hosting.json'
cmd += ' /etc/cozy /usr/local/var/cozy {couchdb_path}/cozy.couch'
cmd = cmd.format(backup_filename=backup_filename,
couchdb_path=couchdb_path)
helpers.cmd_exec(cmd, show_output=True)
print 'Backup file: {}'.format(backup_filename) | [
"def",
"backup",
"(",
"backup_filename",
"=",
"None",
")",
":",
"timestamp",
"=",
"time",
".",
"strftime",
"(",
"\"%Y-%m-%d-%H-%M-%S\"",
",",
"time",
".",
"gmtime",
"(",
")",
")",
"if",
"not",
"backup_filename",
":",
"if",
"not",
"os",
".",
"path",
".",
"isdir",
"(",
"BACKUPS_PATH",
")",
":",
"print",
"'Need to create {}'",
".",
"format",
"(",
"BACKUPS_PATH",
")",
"os",
".",
"makedirs",
"(",
"BACKUPS_PATH",
",",
"0700",
")",
"backup_filename",
"=",
"'{backups_path}/cozy-{timestamp}.tgz'",
".",
"format",
"(",
"backups_path",
"=",
"BACKUPS_PATH",
",",
"timestamp",
"=",
"timestamp",
")",
"elif",
"os",
".",
"path",
".",
"exists",
"(",
"backup_filename",
")",
":",
"print",
"'Backup file already exists: {}'",
".",
"format",
"(",
"backup_filename",
")",
"return",
"couchdb_path",
"=",
"_get_couchdb_path",
"(",
")",
"cmd",
"=",
"'tar cvzf {backup_filename}'",
"cmd",
"+=",
"' --exclude stack.token'",
"cmd",
"+=",
"' --exclude couchdb.login'",
"cmd",
"+=",
"' --exclude self-hosting.json'",
"cmd",
"+=",
"' /etc/cozy /usr/local/var/cozy {couchdb_path}/cozy.couch'",
"cmd",
"=",
"cmd",
".",
"format",
"(",
"backup_filename",
"=",
"backup_filename",
",",
"couchdb_path",
"=",
"couchdb_path",
")",
"helpers",
".",
"cmd_exec",
"(",
"cmd",
",",
"show_output",
"=",
"True",
")",
"print",
"'Backup file: {}'",
".",
"format",
"(",
"backup_filename",
")"
]
| Backup a Cozy | [
"Backup",
"a",
"Cozy"
]
| 820cea58458ae3e067fa8cc2da38edbda4681dac | https://github.com/cozy/python_cozy_management/blob/820cea58458ae3e067fa8cc2da38edbda4681dac/cozy_management/backup.py#L24-L52 | train |
projectshift/shift-boiler | boiler/cli/db.py | get_config | def get_config():
"""
Prepare and return alembic config
These configurations used to live in alembic config initialiser, but that
just tight coupling. Ideally we should move that to userspace and find a
way to pass these into alembic commands.
@todo: think about it
"""
from boiler.migrations.config import MigrationsConfig
# used for errors
map = dict(
path='MIGRATIONS_PATH',
db_url='SQLALCHEMY_DATABASE_URI',
metadata='SQLAlchemy metadata'
)
app = bootstrap.get_app()
params = dict()
params['path'] = app.config.get(map['path'], 'migrations')
params['db_url'] = app.config.get(map['db_url'])
params['metadata'] = db.metadata
for param, value in params.items():
if not value:
msg = 'Configuration error: [{}] is undefined'
raise Exception(msg.format(map[param]))
config = MigrationsConfig(**params)
return config | python | def get_config():
"""
Prepare and return alembic config
These configurations used to live in alembic config initialiser, but that
just tight coupling. Ideally we should move that to userspace and find a
way to pass these into alembic commands.
@todo: think about it
"""
from boiler.migrations.config import MigrationsConfig
# used for errors
map = dict(
path='MIGRATIONS_PATH',
db_url='SQLALCHEMY_DATABASE_URI',
metadata='SQLAlchemy metadata'
)
app = bootstrap.get_app()
params = dict()
params['path'] = app.config.get(map['path'], 'migrations')
params['db_url'] = app.config.get(map['db_url'])
params['metadata'] = db.metadata
for param, value in params.items():
if not value:
msg = 'Configuration error: [{}] is undefined'
raise Exception(msg.format(map[param]))
config = MigrationsConfig(**params)
return config | [
"def",
"get_config",
"(",
")",
":",
"from",
"boiler",
".",
"migrations",
".",
"config",
"import",
"MigrationsConfig",
"# used for errors",
"map",
"=",
"dict",
"(",
"path",
"=",
"'MIGRATIONS_PATH'",
",",
"db_url",
"=",
"'SQLALCHEMY_DATABASE_URI'",
",",
"metadata",
"=",
"'SQLAlchemy metadata'",
")",
"app",
"=",
"bootstrap",
".",
"get_app",
"(",
")",
"params",
"=",
"dict",
"(",
")",
"params",
"[",
"'path'",
"]",
"=",
"app",
".",
"config",
".",
"get",
"(",
"map",
"[",
"'path'",
"]",
",",
"'migrations'",
")",
"params",
"[",
"'db_url'",
"]",
"=",
"app",
".",
"config",
".",
"get",
"(",
"map",
"[",
"'db_url'",
"]",
")",
"params",
"[",
"'metadata'",
"]",
"=",
"db",
".",
"metadata",
"for",
"param",
",",
"value",
"in",
"params",
".",
"items",
"(",
")",
":",
"if",
"not",
"value",
":",
"msg",
"=",
"'Configuration error: [{}] is undefined'",
"raise",
"Exception",
"(",
"msg",
".",
"format",
"(",
"map",
"[",
"param",
"]",
")",
")",
"config",
"=",
"MigrationsConfig",
"(",
"*",
"*",
"params",
")",
"return",
"config"
]
| Prepare and return alembic config
These configurations used to live in alembic config initialiser, but that
just tight coupling. Ideally we should move that to userspace and find a
way to pass these into alembic commands.
@todo: think about it | [
"Prepare",
"and",
"return",
"alembic",
"config",
"These",
"configurations",
"used",
"to",
"live",
"in",
"alembic",
"config",
"initialiser",
"but",
"that",
"just",
"tight",
"coupling",
".",
"Ideally",
"we",
"should",
"move",
"that",
"to",
"userspace",
"and",
"find",
"a",
"way",
"to",
"pass",
"these",
"into",
"alembic",
"commands",
"."
]
| 8e6f3a3e4b9493fb6c8bd16bed160ede153bfb0b | https://github.com/projectshift/shift-boiler/blob/8e6f3a3e4b9493fb6c8bd16bed160ede153bfb0b/boiler/cli/db.py#L10-L40 | train |
projectshift/shift-boiler | boiler/cli/db.py | init | def init():
""" Initialize new migrations directory """
try:
config = get_config()
print(config.dir)
alembic_command.init(config, config.dir, 'project')
except CommandError as e:
click.echo(red(str(e))) | python | def init():
""" Initialize new migrations directory """
try:
config = get_config()
print(config.dir)
alembic_command.init(config, config.dir, 'project')
except CommandError as e:
click.echo(red(str(e))) | [
"def",
"init",
"(",
")",
":",
"try",
":",
"config",
"=",
"get_config",
"(",
")",
"print",
"(",
"config",
".",
"dir",
")",
"alembic_command",
".",
"init",
"(",
"config",
",",
"config",
".",
"dir",
",",
"'project'",
")",
"except",
"CommandError",
"as",
"e",
":",
"click",
".",
"echo",
"(",
"red",
"(",
"str",
"(",
"e",
")",
")",
")"
]
| Initialize new migrations directory | [
"Initialize",
"new",
"migrations",
"directory"
]
| 8e6f3a3e4b9493fb6c8bd16bed160ede153bfb0b | https://github.com/projectshift/shift-boiler/blob/8e6f3a3e4b9493fb6c8bd16bed160ede153bfb0b/boiler/cli/db.py#L59-L66 | train |
projectshift/shift-boiler | boiler/cli/db.py | revision | def revision(revision, path, branch_label, splice, head, sql, autogenerate, message):
""" Create new revision file """
alembic_command.revision(
config=get_config(),
rev_id=revision,
version_path=path,
branch_label=branch_label,
splice=splice,
head=head,
sql=sql,
autogenerate=autogenerate,
message=message
) | python | def revision(revision, path, branch_label, splice, head, sql, autogenerate, message):
""" Create new revision file """
alembic_command.revision(
config=get_config(),
rev_id=revision,
version_path=path,
branch_label=branch_label,
splice=splice,
head=head,
sql=sql,
autogenerate=autogenerate,
message=message
) | [
"def",
"revision",
"(",
"revision",
",",
"path",
",",
"branch_label",
",",
"splice",
",",
"head",
",",
"sql",
",",
"autogenerate",
",",
"message",
")",
":",
"alembic_command",
".",
"revision",
"(",
"config",
"=",
"get_config",
"(",
")",
",",
"rev_id",
"=",
"revision",
",",
"version_path",
"=",
"path",
",",
"branch_label",
"=",
"branch_label",
",",
"splice",
"=",
"splice",
",",
"head",
"=",
"head",
",",
"sql",
"=",
"sql",
",",
"autogenerate",
"=",
"autogenerate",
",",
"message",
"=",
"message",
")"
]
| Create new revision file | [
"Create",
"new",
"revision",
"file"
]
| 8e6f3a3e4b9493fb6c8bd16bed160ede153bfb0b | https://github.com/projectshift/shift-boiler/blob/8e6f3a3e4b9493fb6c8bd16bed160ede153bfb0b/boiler/cli/db.py#L78-L90 | train |
projectshift/shift-boiler | boiler/cli/db.py | merge | def merge(revision, branch_label, message, list_revisions=''):
""" Merge two revision together, create new revision file """
alembic_command.merge(
config=get_config(),
revisions=list_revisions,
message=message,
branch_label=branch_label,
rev_id=revision
) | python | def merge(revision, branch_label, message, list_revisions=''):
""" Merge two revision together, create new revision file """
alembic_command.merge(
config=get_config(),
revisions=list_revisions,
message=message,
branch_label=branch_label,
rev_id=revision
) | [
"def",
"merge",
"(",
"revision",
",",
"branch_label",
",",
"message",
",",
"list_revisions",
"=",
"''",
")",
":",
"alembic_command",
".",
"merge",
"(",
"config",
"=",
"get_config",
"(",
")",
",",
"revisions",
"=",
"list_revisions",
",",
"message",
"=",
"message",
",",
"branch_label",
"=",
"branch_label",
",",
"rev_id",
"=",
"revision",
")"
]
| Merge two revision together, create new revision file | [
"Merge",
"two",
"revision",
"together",
"create",
"new",
"revision",
"file"
]
| 8e6f3a3e4b9493fb6c8bd16bed160ede153bfb0b | https://github.com/projectshift/shift-boiler/blob/8e6f3a3e4b9493fb6c8bd16bed160ede153bfb0b/boiler/cli/db.py#L121-L129 | train |
projectshift/shift-boiler | boiler/cli/db.py | up | def up(tag, sql, revision):
""" Upgrade to revision """
alembic_command.upgrade(
config=get_config(),
revision=revision,
sql=sql,
tag=tag
) | python | def up(tag, sql, revision):
""" Upgrade to revision """
alembic_command.upgrade(
config=get_config(),
revision=revision,
sql=sql,
tag=tag
) | [
"def",
"up",
"(",
"tag",
",",
"sql",
",",
"revision",
")",
":",
"alembic_command",
".",
"upgrade",
"(",
"config",
"=",
"get_config",
"(",
")",
",",
"revision",
"=",
"revision",
",",
"sql",
"=",
"sql",
",",
"tag",
"=",
"tag",
")"
]
| Upgrade to revision | [
"Upgrade",
"to",
"revision"
]
| 8e6f3a3e4b9493fb6c8bd16bed160ede153bfb0b | https://github.com/projectshift/shift-boiler/blob/8e6f3a3e4b9493fb6c8bd16bed160ede153bfb0b/boiler/cli/db.py#L136-L143 | train |
projectshift/shift-boiler | boiler/cli/db.py | down | def down(tag, sql, revision):
""" Downgrade to revision """
alembic_command.downgrade(
config=get_config(),
revision=revision,
sql=sql,
tag=tag
) | python | def down(tag, sql, revision):
""" Downgrade to revision """
alembic_command.downgrade(
config=get_config(),
revision=revision,
sql=sql,
tag=tag
) | [
"def",
"down",
"(",
"tag",
",",
"sql",
",",
"revision",
")",
":",
"alembic_command",
".",
"downgrade",
"(",
"config",
"=",
"get_config",
"(",
")",
",",
"revision",
"=",
"revision",
",",
"sql",
"=",
"sql",
",",
"tag",
"=",
"tag",
")"
]
| Downgrade to revision | [
"Downgrade",
"to",
"revision"
]
| 8e6f3a3e4b9493fb6c8bd16bed160ede153bfb0b | https://github.com/projectshift/shift-boiler/blob/8e6f3a3e4b9493fb6c8bd16bed160ede153bfb0b/boiler/cli/db.py#L150-L157 | train |
projectshift/shift-boiler | boiler/cli/db.py | history | def history(verbose, range):
""" List revision changesets chronologically """
alembic_command.history(
config=get_config(),
rev_range=range,
verbose=verbose
) | python | def history(verbose, range):
""" List revision changesets chronologically """
alembic_command.history(
config=get_config(),
rev_range=range,
verbose=verbose
) | [
"def",
"history",
"(",
"verbose",
",",
"range",
")",
":",
"alembic_command",
".",
"history",
"(",
"config",
"=",
"get_config",
"(",
")",
",",
"rev_range",
"=",
"range",
",",
"verbose",
"=",
"verbose",
")"
]
| List revision changesets chronologically | [
"List",
"revision",
"changesets",
"chronologically"
]
| 8e6f3a3e4b9493fb6c8bd16bed160ede153bfb0b | https://github.com/projectshift/shift-boiler/blob/8e6f3a3e4b9493fb6c8bd16bed160ede153bfb0b/boiler/cli/db.py#L173-L179 | train |
projectshift/shift-boiler | boiler/cli/db.py | heads | def heads(resolve, verbose):
""" Show available heads """
alembic_command.heads(
config=get_config(),
verbose=verbose,
resolve_dependencies=resolve
) | python | def heads(resolve, verbose):
""" Show available heads """
alembic_command.heads(
config=get_config(),
verbose=verbose,
resolve_dependencies=resolve
) | [
"def",
"heads",
"(",
"resolve",
",",
"verbose",
")",
":",
"alembic_command",
".",
"heads",
"(",
"config",
"=",
"get_config",
"(",
")",
",",
"verbose",
"=",
"verbose",
",",
"resolve_dependencies",
"=",
"resolve",
")"
]
| Show available heads | [
"Show",
"available",
"heads"
]
| 8e6f3a3e4b9493fb6c8bd16bed160ede153bfb0b | https://github.com/projectshift/shift-boiler/blob/8e6f3a3e4b9493fb6c8bd16bed160ede153bfb0b/boiler/cli/db.py#L185-L191 | train |
projectshift/shift-boiler | boiler/cli/db.py | stamp | def stamp(revision, sql, tag):
""" Stamp db to given revision without migrating """
alembic_command.stamp(
config=get_config(),
revision=revision,
sql=sql,
tag=tag
) | python | def stamp(revision, sql, tag):
""" Stamp db to given revision without migrating """
alembic_command.stamp(
config=get_config(),
revision=revision,
sql=sql,
tag=tag
) | [
"def",
"stamp",
"(",
"revision",
",",
"sql",
",",
"tag",
")",
":",
"alembic_command",
".",
"stamp",
"(",
"config",
"=",
"get_config",
"(",
")",
",",
"revision",
"=",
"revision",
",",
"sql",
"=",
"sql",
",",
"tag",
"=",
"tag",
")"
]
| Stamp db to given revision without migrating | [
"Stamp",
"db",
"to",
"given",
"revision",
"without",
"migrating"
]
| 8e6f3a3e4b9493fb6c8bd16bed160ede153bfb0b | https://github.com/projectshift/shift-boiler/blob/8e6f3a3e4b9493fb6c8bd16bed160ede153bfb0b/boiler/cli/db.py#L218-L225 | train |
Kortemme-Lab/klab | klab/db/sqlalchemy_interface.py | get_or_create_in_transaction | def get_or_create_in_transaction(tsession, model, values, missing_columns = [], variable_columns = [], updatable_columns = [], only_use_supplied_columns = False, read_only = False):
'''
Uses the SQLAlchemy model to retrieve an existing record based on the supplied field values or, if there is no
existing record, to create a new database record.
:param tsession: An SQLAlchemy transactioned session
:param model: The name of the SQLAlchemy class representing the table
:param values: A dict of values which will be used to populate the fields of the model
:param missing_columns: Elements of missing_columns are expected to be fields in the model but are left blank regardless of whether they exist in values. This is useful for auto_increment fields.
:param updatable_columns: If these are specified, they are treated as missing columns in the record matching and if a record is found, these fields will be updated
:param variable_columns: If these are specified, they are treated as missing columns in the record matching but are not updated. A good use of these are for datetime fields which default to the current datetime
:param read_only: If this is set then we query the database and return an instance if one exists but we do not create a new record.
:return:
Note: This function is a convenience function and is NOT efficient. The "tsession.query(model).filter_by(**pruned_values)"
call is only (sometimes) efficient if an index exists on the keys of pruned_values. If any of the fields of pruned_values are
large (even if otherwise deferred/loaded lazily) then you will incur a performance hit on lookup. You may need
to reconsider any calls to this function in inner loops of your code.'''
values = copy.deepcopy(values) # todo: this does not seem to be necessary since we do not seem to be writing
fieldnames = [c.name for c in list(sqlalchemy_inspect(model).columns)]
for c in missing_columns:
fieldnames.remove(c)
for c in updatable_columns:
fieldnames.remove(c)
for c in variable_columns:
if c in fieldnames:
fieldnames.remove(c)
if only_use_supplied_columns:
fieldnames = sorted(set(fieldnames).intersection(set(values.keys())))
else:
unexpected_fields = set(values.keys()).difference(set(fieldnames)).difference(set(variable_columns)).difference(set(updatable_columns))
if unexpected_fields:
raise Exception("The fields '{0}' were passed but not found in the schema for table {1}.".format("', '".join(sorted(unexpected_fields)), model.__dict__['__tablename__']))
pruned_values = {}
for k in set(values.keys()).intersection(set(fieldnames)):
v = values[k]
pruned_values[k] = v
instance = tsession.query(model).filter_by(**pruned_values)
if instance.count() > 1:
raise Exception('Multiple records were found with the search criteria.')
instance = instance.first()
if instance:
if read_only == False:
for c in updatable_columns:
setattr(instance, c, values[c])
tsession.flush()
return instance
else:
if read_only == False:
if sorted(pruned_values.keys()) != sorted(fieldnames):
# When adding new records, we require that all necessary fields are present
raise Exception('Some required fields are missing: {0}. Either supply these fields or add them to the missing_columns list.'.format(set(fieldnames).difference(pruned_values.keys())))
instance = model(**pruned_values)
tsession.add(instance)
tsession.flush()
return instance
return None | python | def get_or_create_in_transaction(tsession, model, values, missing_columns = [], variable_columns = [], updatable_columns = [], only_use_supplied_columns = False, read_only = False):
'''
Uses the SQLAlchemy model to retrieve an existing record based on the supplied field values or, if there is no
existing record, to create a new database record.
:param tsession: An SQLAlchemy transactioned session
:param model: The name of the SQLAlchemy class representing the table
:param values: A dict of values which will be used to populate the fields of the model
:param missing_columns: Elements of missing_columns are expected to be fields in the model but are left blank regardless of whether they exist in values. This is useful for auto_increment fields.
:param updatable_columns: If these are specified, they are treated as missing columns in the record matching and if a record is found, these fields will be updated
:param variable_columns: If these are specified, they are treated as missing columns in the record matching but are not updated. A good use of these are for datetime fields which default to the current datetime
:param read_only: If this is set then we query the database and return an instance if one exists but we do not create a new record.
:return:
Note: This function is a convenience function and is NOT efficient. The "tsession.query(model).filter_by(**pruned_values)"
call is only (sometimes) efficient if an index exists on the keys of pruned_values. If any of the fields of pruned_values are
large (even if otherwise deferred/loaded lazily) then you will incur a performance hit on lookup. You may need
to reconsider any calls to this function in inner loops of your code.'''
values = copy.deepcopy(values) # todo: this does not seem to be necessary since we do not seem to be writing
fieldnames = [c.name for c in list(sqlalchemy_inspect(model).columns)]
for c in missing_columns:
fieldnames.remove(c)
for c in updatable_columns:
fieldnames.remove(c)
for c in variable_columns:
if c in fieldnames:
fieldnames.remove(c)
if only_use_supplied_columns:
fieldnames = sorted(set(fieldnames).intersection(set(values.keys())))
else:
unexpected_fields = set(values.keys()).difference(set(fieldnames)).difference(set(variable_columns)).difference(set(updatable_columns))
if unexpected_fields:
raise Exception("The fields '{0}' were passed but not found in the schema for table {1}.".format("', '".join(sorted(unexpected_fields)), model.__dict__['__tablename__']))
pruned_values = {}
for k in set(values.keys()).intersection(set(fieldnames)):
v = values[k]
pruned_values[k] = v
instance = tsession.query(model).filter_by(**pruned_values)
if instance.count() > 1:
raise Exception('Multiple records were found with the search criteria.')
instance = instance.first()
if instance:
if read_only == False:
for c in updatable_columns:
setattr(instance, c, values[c])
tsession.flush()
return instance
else:
if read_only == False:
if sorted(pruned_values.keys()) != sorted(fieldnames):
# When adding new records, we require that all necessary fields are present
raise Exception('Some required fields are missing: {0}. Either supply these fields or add them to the missing_columns list.'.format(set(fieldnames).difference(pruned_values.keys())))
instance = model(**pruned_values)
tsession.add(instance)
tsession.flush()
return instance
return None | [
"def",
"get_or_create_in_transaction",
"(",
"tsession",
",",
"model",
",",
"values",
",",
"missing_columns",
"=",
"[",
"]",
",",
"variable_columns",
"=",
"[",
"]",
",",
"updatable_columns",
"=",
"[",
"]",
",",
"only_use_supplied_columns",
"=",
"False",
",",
"read_only",
"=",
"False",
")",
":",
"values",
"=",
"copy",
".",
"deepcopy",
"(",
"values",
")",
"# todo: this does not seem to be necessary since we do not seem to be writing",
"fieldnames",
"=",
"[",
"c",
".",
"name",
"for",
"c",
"in",
"list",
"(",
"sqlalchemy_inspect",
"(",
"model",
")",
".",
"columns",
")",
"]",
"for",
"c",
"in",
"missing_columns",
":",
"fieldnames",
".",
"remove",
"(",
"c",
")",
"for",
"c",
"in",
"updatable_columns",
":",
"fieldnames",
".",
"remove",
"(",
"c",
")",
"for",
"c",
"in",
"variable_columns",
":",
"if",
"c",
"in",
"fieldnames",
":",
"fieldnames",
".",
"remove",
"(",
"c",
")",
"if",
"only_use_supplied_columns",
":",
"fieldnames",
"=",
"sorted",
"(",
"set",
"(",
"fieldnames",
")",
".",
"intersection",
"(",
"set",
"(",
"values",
".",
"keys",
"(",
")",
")",
")",
")",
"else",
":",
"unexpected_fields",
"=",
"set",
"(",
"values",
".",
"keys",
"(",
")",
")",
".",
"difference",
"(",
"set",
"(",
"fieldnames",
")",
")",
".",
"difference",
"(",
"set",
"(",
"variable_columns",
")",
")",
".",
"difference",
"(",
"set",
"(",
"updatable_columns",
")",
")",
"if",
"unexpected_fields",
":",
"raise",
"Exception",
"(",
"\"The fields '{0}' were passed but not found in the schema for table {1}.\"",
".",
"format",
"(",
"\"', '\"",
".",
"join",
"(",
"sorted",
"(",
"unexpected_fields",
")",
")",
",",
"model",
".",
"__dict__",
"[",
"'__tablename__'",
"]",
")",
")",
"pruned_values",
"=",
"{",
"}",
"for",
"k",
"in",
"set",
"(",
"values",
".",
"keys",
"(",
")",
")",
".",
"intersection",
"(",
"set",
"(",
"fieldnames",
")",
")",
":",
"v",
"=",
"values",
"[",
"k",
"]",
"pruned_values",
"[",
"k",
"]",
"=",
"v",
"instance",
"=",
"tsession",
".",
"query",
"(",
"model",
")",
".",
"filter_by",
"(",
"*",
"*",
"pruned_values",
")",
"if",
"instance",
".",
"count",
"(",
")",
">",
"1",
":",
"raise",
"Exception",
"(",
"'Multiple records were found with the search criteria.'",
")",
"instance",
"=",
"instance",
".",
"first",
"(",
")",
"if",
"instance",
":",
"if",
"read_only",
"==",
"False",
":",
"for",
"c",
"in",
"updatable_columns",
":",
"setattr",
"(",
"instance",
",",
"c",
",",
"values",
"[",
"c",
"]",
")",
"tsession",
".",
"flush",
"(",
")",
"return",
"instance",
"else",
":",
"if",
"read_only",
"==",
"False",
":",
"if",
"sorted",
"(",
"pruned_values",
".",
"keys",
"(",
")",
")",
"!=",
"sorted",
"(",
"fieldnames",
")",
":",
"# When adding new records, we require that all necessary fields are present",
"raise",
"Exception",
"(",
"'Some required fields are missing: {0}. Either supply these fields or add them to the missing_columns list.'",
".",
"format",
"(",
"set",
"(",
"fieldnames",
")",
".",
"difference",
"(",
"pruned_values",
".",
"keys",
"(",
")",
")",
")",
")",
"instance",
"=",
"model",
"(",
"*",
"*",
"pruned_values",
")",
"tsession",
".",
"add",
"(",
"instance",
")",
"tsession",
".",
"flush",
"(",
")",
"return",
"instance",
"return",
"None"
]
| Uses the SQLAlchemy model to retrieve an existing record based on the supplied field values or, if there is no
existing record, to create a new database record.
:param tsession: An SQLAlchemy transactioned session
:param model: The name of the SQLAlchemy class representing the table
:param values: A dict of values which will be used to populate the fields of the model
:param missing_columns: Elements of missing_columns are expected to be fields in the model but are left blank regardless of whether they exist in values. This is useful for auto_increment fields.
:param updatable_columns: If these are specified, they are treated as missing columns in the record matching and if a record is found, these fields will be updated
:param variable_columns: If these are specified, they are treated as missing columns in the record matching but are not updated. A good use of these are for datetime fields which default to the current datetime
:param read_only: If this is set then we query the database and return an instance if one exists but we do not create a new record.
:return:
Note: This function is a convenience function and is NOT efficient. The "tsession.query(model).filter_by(**pruned_values)"
call is only (sometimes) efficient if an index exists on the keys of pruned_values. If any of the fields of pruned_values are
large (even if otherwise deferred/loaded lazily) then you will incur a performance hit on lookup. You may need
to reconsider any calls to this function in inner loops of your code. | [
"Uses",
"the",
"SQLAlchemy",
"model",
"to",
"retrieve",
"an",
"existing",
"record",
"based",
"on",
"the",
"supplied",
"field",
"values",
"or",
"if",
"there",
"is",
"no",
"existing",
"record",
"to",
"create",
"a",
"new",
"database",
"record",
"."
]
| 6d410ad08f1bd9f7cbbb28d7d946e94fbaaa2b6b | https://github.com/Kortemme-Lab/klab/blob/6d410ad08f1bd9f7cbbb28d7d946e94fbaaa2b6b/klab/db/sqlalchemy_interface.py#L74-L137 | train |
Kortemme-Lab/klab | klab/db/sqlalchemy_interface.py | get_or_create_in_transaction_wrapper | def get_or_create_in_transaction_wrapper(tsession, model, values, missing_columns = [], variable_columns = [], updatable_columns = [], only_use_supplied_columns = False, read_only = False):
'''This function can be used to determine which calling method is spending time in get_or_create_in_transaction when profiling the database API.
Switch out calls to get_or_create_in_transaction to get_or_create_in_transaction_wrapper in the suspected functions to determine where the pain lies.'''
return get_or_create_in_transaction(tsession, model, values, missing_columns = missing_columns, variable_columns = variable_columns, updatable_columns = updatable_columns, only_use_supplied_columns = only_use_supplied_columns, read_only = read_only) | python | def get_or_create_in_transaction_wrapper(tsession, model, values, missing_columns = [], variable_columns = [], updatable_columns = [], only_use_supplied_columns = False, read_only = False):
'''This function can be used to determine which calling method is spending time in get_or_create_in_transaction when profiling the database API.
Switch out calls to get_or_create_in_transaction to get_or_create_in_transaction_wrapper in the suspected functions to determine where the pain lies.'''
return get_or_create_in_transaction(tsession, model, values, missing_columns = missing_columns, variable_columns = variable_columns, updatable_columns = updatable_columns, only_use_supplied_columns = only_use_supplied_columns, read_only = read_only) | [
"def",
"get_or_create_in_transaction_wrapper",
"(",
"tsession",
",",
"model",
",",
"values",
",",
"missing_columns",
"=",
"[",
"]",
",",
"variable_columns",
"=",
"[",
"]",
",",
"updatable_columns",
"=",
"[",
"]",
",",
"only_use_supplied_columns",
"=",
"False",
",",
"read_only",
"=",
"False",
")",
":",
"return",
"get_or_create_in_transaction",
"(",
"tsession",
",",
"model",
",",
"values",
",",
"missing_columns",
"=",
"missing_columns",
",",
"variable_columns",
"=",
"variable_columns",
",",
"updatable_columns",
"=",
"updatable_columns",
",",
"only_use_supplied_columns",
"=",
"only_use_supplied_columns",
",",
"read_only",
"=",
"read_only",
")"
]
| This function can be used to determine which calling method is spending time in get_or_create_in_transaction when profiling the database API.
Switch out calls to get_or_create_in_transaction to get_or_create_in_transaction_wrapper in the suspected functions to determine where the pain lies. | [
"This",
"function",
"can",
"be",
"used",
"to",
"determine",
"which",
"calling",
"method",
"is",
"spending",
"time",
"in",
"get_or_create_in_transaction",
"when",
"profiling",
"the",
"database",
"API",
".",
"Switch",
"out",
"calls",
"to",
"get_or_create_in_transaction",
"to",
"get_or_create_in_transaction_wrapper",
"in",
"the",
"suspected",
"functions",
"to",
"determine",
"where",
"the",
"pain",
"lies",
"."
]
| 6d410ad08f1bd9f7cbbb28d7d946e94fbaaa2b6b | https://github.com/Kortemme-Lab/klab/blob/6d410ad08f1bd9f7cbbb28d7d946e94fbaaa2b6b/klab/db/sqlalchemy_interface.py#L140-L143 | train |
assamite/creamas | creamas/rules/agent.py | RuleAgent.get_weight | def get_weight(self, rule):
"""Get weight for rule.
If rule is not in :attr:`R`, returns ``None``.
"""
if not issubclass(rule.__class__, (Rule, RuleLeaf)):
raise TypeError("Rule to get weight ({}) is not subclass "
"of {} or {}.".format(rule, Rule, RuleLeaf))
try:
ind = self._R.index(rule)
return self._W[ind]
except:
return None | python | def get_weight(self, rule):
"""Get weight for rule.
If rule is not in :attr:`R`, returns ``None``.
"""
if not issubclass(rule.__class__, (Rule, RuleLeaf)):
raise TypeError("Rule to get weight ({}) is not subclass "
"of {} or {}.".format(rule, Rule, RuleLeaf))
try:
ind = self._R.index(rule)
return self._W[ind]
except:
return None | [
"def",
"get_weight",
"(",
"self",
",",
"rule",
")",
":",
"if",
"not",
"issubclass",
"(",
"rule",
".",
"__class__",
",",
"(",
"Rule",
",",
"RuleLeaf",
")",
")",
":",
"raise",
"TypeError",
"(",
"\"Rule to get weight ({}) is not subclass \"",
"\"of {} or {}.\"",
".",
"format",
"(",
"rule",
",",
"Rule",
",",
"RuleLeaf",
")",
")",
"try",
":",
"ind",
"=",
"self",
".",
"_R",
".",
"index",
"(",
"rule",
")",
"return",
"self",
".",
"_W",
"[",
"ind",
"]",
"except",
":",
"return",
"None"
]
| Get weight for rule.
If rule is not in :attr:`R`, returns ``None``. | [
"Get",
"weight",
"for",
"rule",
"."
]
| 54dc3e31c97a3f938e58272f8ab80b6bcafeff58 | https://github.com/assamite/creamas/blob/54dc3e31c97a3f938e58272f8ab80b6bcafeff58/creamas/rules/agent.py#L68-L80 | train |
assamite/creamas | creamas/rules/agent.py | RuleAgent.evaluate | def evaluate(self, artifact):
r"""Evaluate artifact with agent's current rules and weights.
:param artifact:
:class:`~creamas.core.artifact.Artifact` to be evaluated
:type artifact:
:py:class:`~creamas.core.artifact.Artifact`
:returns:
Agent's evaluation of the artifact, in [-1,1], and framing. In this
basic implementation framing is always ``None``.
:rtype:
tuple
Actual evaluation formula in this basic implementation is:
.. math::
e(A) = \frac{\sum_{i=1}^{n} r_{i}(A)w_i}
{\sum_{i=1}^{n} \lvert w_i \rvert},
where :math:`r_{i}(A)` is the :math:`i` th rule's evaluation on
artifact :math:`A`, and :math:`w_i` is the weight for rule
:math:`r_i`.
"""
s = 0
w = 0.0
if len(self.R) == 0:
return 0.0, None
for i in range(len(self.R)):
s += self.R[i](artifact) * self.W[i]
w += abs(self.W[i])
if w == 0.0:
return 0.0, None
return s / w, None | python | def evaluate(self, artifact):
r"""Evaluate artifact with agent's current rules and weights.
:param artifact:
:class:`~creamas.core.artifact.Artifact` to be evaluated
:type artifact:
:py:class:`~creamas.core.artifact.Artifact`
:returns:
Agent's evaluation of the artifact, in [-1,1], and framing. In this
basic implementation framing is always ``None``.
:rtype:
tuple
Actual evaluation formula in this basic implementation is:
.. math::
e(A) = \frac{\sum_{i=1}^{n} r_{i}(A)w_i}
{\sum_{i=1}^{n} \lvert w_i \rvert},
where :math:`r_{i}(A)` is the :math:`i` th rule's evaluation on
artifact :math:`A`, and :math:`w_i` is the weight for rule
:math:`r_i`.
"""
s = 0
w = 0.0
if len(self.R) == 0:
return 0.0, None
for i in range(len(self.R)):
s += self.R[i](artifact) * self.W[i]
w += abs(self.W[i])
if w == 0.0:
return 0.0, None
return s / w, None | [
"def",
"evaluate",
"(",
"self",
",",
"artifact",
")",
":",
"s",
"=",
"0",
"w",
"=",
"0.0",
"if",
"len",
"(",
"self",
".",
"R",
")",
"==",
"0",
":",
"return",
"0.0",
",",
"None",
"for",
"i",
"in",
"range",
"(",
"len",
"(",
"self",
".",
"R",
")",
")",
":",
"s",
"+=",
"self",
".",
"R",
"[",
"i",
"]",
"(",
"artifact",
")",
"*",
"self",
".",
"W",
"[",
"i",
"]",
"w",
"+=",
"abs",
"(",
"self",
".",
"W",
"[",
"i",
"]",
")",
"if",
"w",
"==",
"0.0",
":",
"return",
"0.0",
",",
"None",
"return",
"s",
"/",
"w",
",",
"None"
]
| r"""Evaluate artifact with agent's current rules and weights.
:param artifact:
:class:`~creamas.core.artifact.Artifact` to be evaluated
:type artifact:
:py:class:`~creamas.core.artifact.Artifact`
:returns:
Agent's evaluation of the artifact, in [-1,1], and framing. In this
basic implementation framing is always ``None``.
:rtype:
tuple
Actual evaluation formula in this basic implementation is:
.. math::
e(A) = \frac{\sum_{i=1}^{n} r_{i}(A)w_i}
{\sum_{i=1}^{n} \lvert w_i \rvert},
where :math:`r_{i}(A)` is the :math:`i` th rule's evaluation on
artifact :math:`A`, and :math:`w_i` is the weight for rule
:math:`r_i`. | [
"r",
"Evaluate",
"artifact",
"with",
"agent",
"s",
"current",
"rules",
"and",
"weights",
"."
]
| 54dc3e31c97a3f938e58272f8ab80b6bcafeff58 | https://github.com/assamite/creamas/blob/54dc3e31c97a3f938e58272f8ab80b6bcafeff58/creamas/rules/agent.py#L129-L167 | train |
TheGhouls/oct | oct/results/report.py | ReportResults._init_dates | def _init_dates(self):
"""Initialize all dates properties
"""
if self.total_transactions == 0:
return None
self.epoch_start = Result.select(Result.epoch).order_by(Result.epoch.asc()).limit(1).get().epoch
self.epoch_finish = Result.select(Result.epoch).order_by(Result.epoch.desc()).limit(1).get().epoch
self.start_datetime = time.strftime('%Y-%m-%d %H:%M:%S', time.localtime(self.epoch_start))
self.finish_datetime = time.strftime('%Y-%m-%d %H:%M:%S', time.localtime(self.epoch_finish)) | python | def _init_dates(self):
"""Initialize all dates properties
"""
if self.total_transactions == 0:
return None
self.epoch_start = Result.select(Result.epoch).order_by(Result.epoch.asc()).limit(1).get().epoch
self.epoch_finish = Result.select(Result.epoch).order_by(Result.epoch.desc()).limit(1).get().epoch
self.start_datetime = time.strftime('%Y-%m-%d %H:%M:%S', time.localtime(self.epoch_start))
self.finish_datetime = time.strftime('%Y-%m-%d %H:%M:%S', time.localtime(self.epoch_finish)) | [
"def",
"_init_dates",
"(",
"self",
")",
":",
"if",
"self",
".",
"total_transactions",
"==",
"0",
":",
"return",
"None",
"self",
".",
"epoch_start",
"=",
"Result",
".",
"select",
"(",
"Result",
".",
"epoch",
")",
".",
"order_by",
"(",
"Result",
".",
"epoch",
".",
"asc",
"(",
")",
")",
".",
"limit",
"(",
"1",
")",
".",
"get",
"(",
")",
".",
"epoch",
"self",
".",
"epoch_finish",
"=",
"Result",
".",
"select",
"(",
"Result",
".",
"epoch",
")",
".",
"order_by",
"(",
"Result",
".",
"epoch",
".",
"desc",
"(",
")",
")",
".",
"limit",
"(",
"1",
")",
".",
"get",
"(",
")",
".",
"epoch",
"self",
".",
"start_datetime",
"=",
"time",
".",
"strftime",
"(",
"'%Y-%m-%d %H:%M:%S'",
",",
"time",
".",
"localtime",
"(",
"self",
".",
"epoch_start",
")",
")",
"self",
".",
"finish_datetime",
"=",
"time",
".",
"strftime",
"(",
"'%Y-%m-%d %H:%M:%S'",
",",
"time",
".",
"localtime",
"(",
"self",
".",
"epoch_finish",
")",
")"
]
| Initialize all dates properties | [
"Initialize",
"all",
"dates",
"properties"
]
| 7e9bddeb3b8495a26442b1c86744e9fb187fe88f | https://github.com/TheGhouls/oct/blob/7e9bddeb3b8495a26442b1c86744e9fb187fe88f/oct/results/report.py#L29-L37 | train |
TheGhouls/oct | oct/results/report.py | ReportResults._init_dataframes | def _init_dataframes(self):
"""Initialise the main dataframe for the results and the custom timers dataframes
"""
df = pd.read_sql_query("SELECT elapsed, epoch, scriptrun_time, custom_timers FROM result ORDER BY epoch ASC",
db.get_conn())
self._get_all_timers(df)
self.main_results = self._get_processed_dataframe(df)
# create all custom timers dataframes
for key, value in six.iteritems(self._timers_values):
df = pd.DataFrame(value, columns=['epoch', 'scriptrun_time'])
df.index = pd.to_datetime(df['epoch'], unit='s')
timer_results = self._get_processed_dataframe(df)
self.timers_results[key] = timer_results
# clear memory
del self._timers_values | python | def _init_dataframes(self):
"""Initialise the main dataframe for the results and the custom timers dataframes
"""
df = pd.read_sql_query("SELECT elapsed, epoch, scriptrun_time, custom_timers FROM result ORDER BY epoch ASC",
db.get_conn())
self._get_all_timers(df)
self.main_results = self._get_processed_dataframe(df)
# create all custom timers dataframes
for key, value in six.iteritems(self._timers_values):
df = pd.DataFrame(value, columns=['epoch', 'scriptrun_time'])
df.index = pd.to_datetime(df['epoch'], unit='s')
timer_results = self._get_processed_dataframe(df)
self.timers_results[key] = timer_results
# clear memory
del self._timers_values | [
"def",
"_init_dataframes",
"(",
"self",
")",
":",
"df",
"=",
"pd",
".",
"read_sql_query",
"(",
"\"SELECT elapsed, epoch, scriptrun_time, custom_timers FROM result ORDER BY epoch ASC\"",
",",
"db",
".",
"get_conn",
"(",
")",
")",
"self",
".",
"_get_all_timers",
"(",
"df",
")",
"self",
".",
"main_results",
"=",
"self",
".",
"_get_processed_dataframe",
"(",
"df",
")",
"# create all custom timers dataframes",
"for",
"key",
",",
"value",
"in",
"six",
".",
"iteritems",
"(",
"self",
".",
"_timers_values",
")",
":",
"df",
"=",
"pd",
".",
"DataFrame",
"(",
"value",
",",
"columns",
"=",
"[",
"'epoch'",
",",
"'scriptrun_time'",
"]",
")",
"df",
".",
"index",
"=",
"pd",
".",
"to_datetime",
"(",
"df",
"[",
"'epoch'",
"]",
",",
"unit",
"=",
"'s'",
")",
"timer_results",
"=",
"self",
".",
"_get_processed_dataframe",
"(",
"df",
")",
"self",
".",
"timers_results",
"[",
"key",
"]",
"=",
"timer_results",
"# clear memory",
"del",
"self",
".",
"_timers_values"
]
| Initialise the main dataframe for the results and the custom timers dataframes | [
"Initialise",
"the",
"main",
"dataframe",
"for",
"the",
"results",
"and",
"the",
"custom",
"timers",
"dataframes"
]
| 7e9bddeb3b8495a26442b1c86744e9fb187fe88f | https://github.com/TheGhouls/oct/blob/7e9bddeb3b8495a26442b1c86744e9fb187fe88f/oct/results/report.py#L39-L56 | train |
TheGhouls/oct | oct/results/report.py | ReportResults._get_all_timers | def _get_all_timers(self, dataframe):
"""Get all timers and set them in the _timers_values property
:param pandas.DataFrame dataframe: the main dataframe with row results
"""
s = dataframe['custom_timers'].apply(json.loads)
s.index = dataframe['epoch']
for index, value in s.iteritems():
if not value:
continue
for key, value in six.iteritems(value):
self._timers_values[key].append((index, value))
self.total_timers += 1
del dataframe['custom_timers']
del s | python | def _get_all_timers(self, dataframe):
"""Get all timers and set them in the _timers_values property
:param pandas.DataFrame dataframe: the main dataframe with row results
"""
s = dataframe['custom_timers'].apply(json.loads)
s.index = dataframe['epoch']
for index, value in s.iteritems():
if not value:
continue
for key, value in six.iteritems(value):
self._timers_values[key].append((index, value))
self.total_timers += 1
del dataframe['custom_timers']
del s | [
"def",
"_get_all_timers",
"(",
"self",
",",
"dataframe",
")",
":",
"s",
"=",
"dataframe",
"[",
"'custom_timers'",
"]",
".",
"apply",
"(",
"json",
".",
"loads",
")",
"s",
".",
"index",
"=",
"dataframe",
"[",
"'epoch'",
"]",
"for",
"index",
",",
"value",
"in",
"s",
".",
"iteritems",
"(",
")",
":",
"if",
"not",
"value",
":",
"continue",
"for",
"key",
",",
"value",
"in",
"six",
".",
"iteritems",
"(",
"value",
")",
":",
"self",
".",
"_timers_values",
"[",
"key",
"]",
".",
"append",
"(",
"(",
"index",
",",
"value",
")",
")",
"self",
".",
"total_timers",
"+=",
"1",
"del",
"dataframe",
"[",
"'custom_timers'",
"]",
"del",
"s"
]
| Get all timers and set them in the _timers_values property
:param pandas.DataFrame dataframe: the main dataframe with row results | [
"Get",
"all",
"timers",
"and",
"set",
"them",
"in",
"the",
"_timers_values",
"property"
]
| 7e9bddeb3b8495a26442b1c86744e9fb187fe88f | https://github.com/TheGhouls/oct/blob/7e9bddeb3b8495a26442b1c86744e9fb187fe88f/oct/results/report.py#L58-L72 | train |
TheGhouls/oct | oct/results/report.py | ReportResults._get_processed_dataframe | def _get_processed_dataframe(self, dataframe):
"""Generate required dataframe for results from raw dataframe
:param pandas.DataFrame dataframe: the raw dataframe
:return: a dict containing raw, compiled, and summary dataframes from original dataframe
:rtype: dict
"""
dataframe.index = pd.to_datetime(dataframe['epoch'], unit='s', utc=True)
del dataframe['epoch']
summary = dataframe.describe(percentiles=[.80, .90, .95]).transpose().loc['scriptrun_time']
df_grp = dataframe.groupby(pd.TimeGrouper('{}S'.format(self.interval)))
df_final = df_grp.apply(lambda x: x.describe(percentiles=[.80, .90, .95])['scriptrun_time'])
return {
"raw": dataframe.round(2),
"compiled": df_final.round(2),
"summary": summary.round(2)
} | python | def _get_processed_dataframe(self, dataframe):
"""Generate required dataframe for results from raw dataframe
:param pandas.DataFrame dataframe: the raw dataframe
:return: a dict containing raw, compiled, and summary dataframes from original dataframe
:rtype: dict
"""
dataframe.index = pd.to_datetime(dataframe['epoch'], unit='s', utc=True)
del dataframe['epoch']
summary = dataframe.describe(percentiles=[.80, .90, .95]).transpose().loc['scriptrun_time']
df_grp = dataframe.groupby(pd.TimeGrouper('{}S'.format(self.interval)))
df_final = df_grp.apply(lambda x: x.describe(percentiles=[.80, .90, .95])['scriptrun_time'])
return {
"raw": dataframe.round(2),
"compiled": df_final.round(2),
"summary": summary.round(2)
} | [
"def",
"_get_processed_dataframe",
"(",
"self",
",",
"dataframe",
")",
":",
"dataframe",
".",
"index",
"=",
"pd",
".",
"to_datetime",
"(",
"dataframe",
"[",
"'epoch'",
"]",
",",
"unit",
"=",
"'s'",
",",
"utc",
"=",
"True",
")",
"del",
"dataframe",
"[",
"'epoch'",
"]",
"summary",
"=",
"dataframe",
".",
"describe",
"(",
"percentiles",
"=",
"[",
".80",
",",
".90",
",",
".95",
"]",
")",
".",
"transpose",
"(",
")",
".",
"loc",
"[",
"'scriptrun_time'",
"]",
"df_grp",
"=",
"dataframe",
".",
"groupby",
"(",
"pd",
".",
"TimeGrouper",
"(",
"'{}S'",
".",
"format",
"(",
"self",
".",
"interval",
")",
")",
")",
"df_final",
"=",
"df_grp",
".",
"apply",
"(",
"lambda",
"x",
":",
"x",
".",
"describe",
"(",
"percentiles",
"=",
"[",
".80",
",",
".90",
",",
".95",
"]",
")",
"[",
"'scriptrun_time'",
"]",
")",
"return",
"{",
"\"raw\"",
":",
"dataframe",
".",
"round",
"(",
"2",
")",
",",
"\"compiled\"",
":",
"df_final",
".",
"round",
"(",
"2",
")",
",",
"\"summary\"",
":",
"summary",
".",
"round",
"(",
"2",
")",
"}"
]
| Generate required dataframe for results from raw dataframe
:param pandas.DataFrame dataframe: the raw dataframe
:return: a dict containing raw, compiled, and summary dataframes from original dataframe
:rtype: dict | [
"Generate",
"required",
"dataframe",
"for",
"results",
"from",
"raw",
"dataframe"
]
| 7e9bddeb3b8495a26442b1c86744e9fb187fe88f | https://github.com/TheGhouls/oct/blob/7e9bddeb3b8495a26442b1c86744e9fb187fe88f/oct/results/report.py#L74-L91 | train |
TheGhouls/oct | oct/results/report.py | ReportResults._init_turrets | def _init_turrets(self):
"""Setup data from database
"""
for turret in Turret.select():
self.turrets.append(turret.to_dict()) | python | def _init_turrets(self):
"""Setup data from database
"""
for turret in Turret.select():
self.turrets.append(turret.to_dict()) | [
"def",
"_init_turrets",
"(",
"self",
")",
":",
"for",
"turret",
"in",
"Turret",
".",
"select",
"(",
")",
":",
"self",
".",
"turrets",
".",
"append",
"(",
"turret",
".",
"to_dict",
"(",
")",
")"
]
| Setup data from database | [
"Setup",
"data",
"from",
"database"
]
| 7e9bddeb3b8495a26442b1c86744e9fb187fe88f | https://github.com/TheGhouls/oct/blob/7e9bddeb3b8495a26442b1c86744e9fb187fe88f/oct/results/report.py#L93-L97 | train |
TheGhouls/oct | oct/results/report.py | ReportResults.compile_results | def compile_results(self):
"""Compile all results for the current test
"""
self._init_dataframes()
self.total_transactions = len(self.main_results['raw'])
self._init_dates() | python | def compile_results(self):
"""Compile all results for the current test
"""
self._init_dataframes()
self.total_transactions = len(self.main_results['raw'])
self._init_dates() | [
"def",
"compile_results",
"(",
"self",
")",
":",
"self",
".",
"_init_dataframes",
"(",
")",
"self",
".",
"total_transactions",
"=",
"len",
"(",
"self",
".",
"main_results",
"[",
"'raw'",
"]",
")",
"self",
".",
"_init_dates",
"(",
")"
]
| Compile all results for the current test | [
"Compile",
"all",
"results",
"for",
"the",
"current",
"test"
]
| 7e9bddeb3b8495a26442b1c86744e9fb187fe88f | https://github.com/TheGhouls/oct/blob/7e9bddeb3b8495a26442b1c86744e9fb187fe88f/oct/results/report.py#L99-L105 | train |
Kortemme-Lab/klab | klab/bio/kabsch.py | centroid | def centroid(X):
"""
Calculate the centroid from a matrix X
"""
C = np.sum(X, axis=0) / len(X)
return C | python | def centroid(X):
"""
Calculate the centroid from a matrix X
"""
C = np.sum(X, axis=0) / len(X)
return C | [
"def",
"centroid",
"(",
"X",
")",
":",
"C",
"=",
"np",
".",
"sum",
"(",
"X",
",",
"axis",
"=",
"0",
")",
"/",
"len",
"(",
"X",
")",
"return",
"C"
]
| Calculate the centroid from a matrix X | [
"Calculate",
"the",
"centroid",
"from",
"a",
"matrix",
"X"
]
| 6d410ad08f1bd9f7cbbb28d7d946e94fbaaa2b6b | https://github.com/Kortemme-Lab/klab/blob/6d410ad08f1bd9f7cbbb28d7d946e94fbaaa2b6b/klab/bio/kabsch.py#L30-L35 | train |
eventbrite/rebar | src/rebar/validators.py | StateValidator.is_valid | def is_valid(self, instance):
"""Return True if no errors are raised when validating instance.
instance can be a dict (ie, form.cleaned_data), a form, or a
model instance. If instance is a form, full_clean() will be
called.
"""
errors = self.errors(instance)
if isinstance(errors, list):
return not any(errors)
return not bool(errors) | python | def is_valid(self, instance):
"""Return True if no errors are raised when validating instance.
instance can be a dict (ie, form.cleaned_data), a form, or a
model instance. If instance is a form, full_clean() will be
called.
"""
errors = self.errors(instance)
if isinstance(errors, list):
return not any(errors)
return not bool(errors) | [
"def",
"is_valid",
"(",
"self",
",",
"instance",
")",
":",
"errors",
"=",
"self",
".",
"errors",
"(",
"instance",
")",
"if",
"isinstance",
"(",
"errors",
",",
"list",
")",
":",
"return",
"not",
"any",
"(",
"errors",
")",
"return",
"not",
"bool",
"(",
"errors",
")"
]
| Return True if no errors are raised when validating instance.
instance can be a dict (ie, form.cleaned_data), a form, or a
model instance. If instance is a form, full_clean() will be
called. | [
"Return",
"True",
"if",
"no",
"errors",
"are",
"raised",
"when",
"validating",
"instance",
"."
]
| 32f8914a2c5529519009d21c85f0d47cc6601901 | https://github.com/eventbrite/rebar/blob/32f8914a2c5529519009d21c85f0d47cc6601901/src/rebar/validators.py#L80-L93 | train |
eventbrite/rebar | src/rebar/validators.py | StateValidator._validate | def _validate(self, data):
"""Helper to run validators on the field data."""
errors = {}
# if the validator is not enabled, return the empty error dict
if not self._enabled:
return errors
for field in self.validators:
field_errors = []
for validator in self.validators[field]:
try:
validator(data.get(field, None))
except ValidationError as e:
field_errors += e.messages
# if there were errors, cast to ErrorList for output convenience
if field_errors:
errors[field] = ErrorList(field_errors)
return errors | python | def _validate(self, data):
"""Helper to run validators on the field data."""
errors = {}
# if the validator is not enabled, return the empty error dict
if not self._enabled:
return errors
for field in self.validators:
field_errors = []
for validator in self.validators[field]:
try:
validator(data.get(field, None))
except ValidationError as e:
field_errors += e.messages
# if there were errors, cast to ErrorList for output convenience
if field_errors:
errors[field] = ErrorList(field_errors)
return errors | [
"def",
"_validate",
"(",
"self",
",",
"data",
")",
":",
"errors",
"=",
"{",
"}",
"# if the validator is not enabled, return the empty error dict",
"if",
"not",
"self",
".",
"_enabled",
":",
"return",
"errors",
"for",
"field",
"in",
"self",
".",
"validators",
":",
"field_errors",
"=",
"[",
"]",
"for",
"validator",
"in",
"self",
".",
"validators",
"[",
"field",
"]",
":",
"try",
":",
"validator",
"(",
"data",
".",
"get",
"(",
"field",
",",
"None",
")",
")",
"except",
"ValidationError",
"as",
"e",
":",
"field_errors",
"+=",
"e",
".",
"messages",
"# if there were errors, cast to ErrorList for output convenience",
"if",
"field_errors",
":",
"errors",
"[",
"field",
"]",
"=",
"ErrorList",
"(",
"field_errors",
")",
"return",
"errors"
]
| Helper to run validators on the field data. | [
"Helper",
"to",
"run",
"validators",
"on",
"the",
"field",
"data",
"."
]
| 32f8914a2c5529519009d21c85f0d47cc6601901 | https://github.com/eventbrite/rebar/blob/32f8914a2c5529519009d21c85f0d47cc6601901/src/rebar/validators.py#L95-L118 | train |
eventbrite/rebar | src/rebar/validators.py | StateValidator.errors | def errors(self, instance):
"""Run all field validators and return a dict of errors.
The keys of the resulting dict coorespond to field
names. instance can be a dict (ie, form.cleaned_data), a form,
a formset, or a model instance.
If instance is a form, full_clean() will be called if the form
is bound.
If instance is a formset, full_clean() will be called on each
member form, if bound.
"""
if isinstance(instance, dict):
return self._validate(instance)
elif isinstance(instance, forms.BaseForm):
if instance.is_bound and instance.is_valid():
return self._validate(instance.cleaned_data)
return self._validate(dict(
[
(f, instance.initial.get(f, instance[f].value()))
for f in self.validators
]
))
elif isinstance(instance, formsets.BaseFormSet):
if instance.can_delete:
validate_forms = [
form for form in instance.initial_forms
if not instance._should_delete_form(form)
] + [
form for form in instance.extra_forms
if (form.has_changed() and
not instance._should_delete_form(form))
]
return [
self.errors(f)
for f in validate_forms
]
else:
validate_forms = instance.initial_forms + [
form for form in instance.extra_forms
if form.has_changed()
]
return [self.errors(f) for f in validate_forms]
elif isinstance(instance, models.Model):
return self._validate(dict(
[(f, getattr(instance, f)) for f in self.validators]
)) | python | def errors(self, instance):
"""Run all field validators and return a dict of errors.
The keys of the resulting dict coorespond to field
names. instance can be a dict (ie, form.cleaned_data), a form,
a formset, or a model instance.
If instance is a form, full_clean() will be called if the form
is bound.
If instance is a formset, full_clean() will be called on each
member form, if bound.
"""
if isinstance(instance, dict):
return self._validate(instance)
elif isinstance(instance, forms.BaseForm):
if instance.is_bound and instance.is_valid():
return self._validate(instance.cleaned_data)
return self._validate(dict(
[
(f, instance.initial.get(f, instance[f].value()))
for f in self.validators
]
))
elif isinstance(instance, formsets.BaseFormSet):
if instance.can_delete:
validate_forms = [
form for form in instance.initial_forms
if not instance._should_delete_form(form)
] + [
form for form in instance.extra_forms
if (form.has_changed() and
not instance._should_delete_form(form))
]
return [
self.errors(f)
for f in validate_forms
]
else:
validate_forms = instance.initial_forms + [
form for form in instance.extra_forms
if form.has_changed()
]
return [self.errors(f) for f in validate_forms]
elif isinstance(instance, models.Model):
return self._validate(dict(
[(f, getattr(instance, f)) for f in self.validators]
)) | [
"def",
"errors",
"(",
"self",
",",
"instance",
")",
":",
"if",
"isinstance",
"(",
"instance",
",",
"dict",
")",
":",
"return",
"self",
".",
"_validate",
"(",
"instance",
")",
"elif",
"isinstance",
"(",
"instance",
",",
"forms",
".",
"BaseForm",
")",
":",
"if",
"instance",
".",
"is_bound",
"and",
"instance",
".",
"is_valid",
"(",
")",
":",
"return",
"self",
".",
"_validate",
"(",
"instance",
".",
"cleaned_data",
")",
"return",
"self",
".",
"_validate",
"(",
"dict",
"(",
"[",
"(",
"f",
",",
"instance",
".",
"initial",
".",
"get",
"(",
"f",
",",
"instance",
"[",
"f",
"]",
".",
"value",
"(",
")",
")",
")",
"for",
"f",
"in",
"self",
".",
"validators",
"]",
")",
")",
"elif",
"isinstance",
"(",
"instance",
",",
"formsets",
".",
"BaseFormSet",
")",
":",
"if",
"instance",
".",
"can_delete",
":",
"validate_forms",
"=",
"[",
"form",
"for",
"form",
"in",
"instance",
".",
"initial_forms",
"if",
"not",
"instance",
".",
"_should_delete_form",
"(",
"form",
")",
"]",
"+",
"[",
"form",
"for",
"form",
"in",
"instance",
".",
"extra_forms",
"if",
"(",
"form",
".",
"has_changed",
"(",
")",
"and",
"not",
"instance",
".",
"_should_delete_form",
"(",
"form",
")",
")",
"]",
"return",
"[",
"self",
".",
"errors",
"(",
"f",
")",
"for",
"f",
"in",
"validate_forms",
"]",
"else",
":",
"validate_forms",
"=",
"instance",
".",
"initial_forms",
"+",
"[",
"form",
"for",
"form",
"in",
"instance",
".",
"extra_forms",
"if",
"form",
".",
"has_changed",
"(",
")",
"]",
"return",
"[",
"self",
".",
"errors",
"(",
"f",
")",
"for",
"f",
"in",
"validate_forms",
"]",
"elif",
"isinstance",
"(",
"instance",
",",
"models",
".",
"Model",
")",
":",
"return",
"self",
".",
"_validate",
"(",
"dict",
"(",
"[",
"(",
"f",
",",
"getattr",
"(",
"instance",
",",
"f",
")",
")",
"for",
"f",
"in",
"self",
".",
"validators",
"]",
")",
")"
]
| Run all field validators and return a dict of errors.
The keys of the resulting dict coorespond to field
names. instance can be a dict (ie, form.cleaned_data), a form,
a formset, or a model instance.
If instance is a form, full_clean() will be called if the form
is bound.
If instance is a formset, full_clean() will be called on each
member form, if bound. | [
"Run",
"all",
"field",
"validators",
"and",
"return",
"a",
"dict",
"of",
"errors",
"."
]
| 32f8914a2c5529519009d21c85f0d47cc6601901 | https://github.com/eventbrite/rebar/blob/32f8914a2c5529519009d21c85f0d47cc6601901/src/rebar/validators.py#L120-L173 | train |
peergradeio/flask-mongo-profiler | flask_mongo_profiler/contrib/flask_admin/formatters/relational.py | queryset_formatter | def queryset_formatter(queryset):
"""
This is used for custom detail fields returning a QuerySet of
admin objects.
"""
return Markup(
base_list_formatter(
None,
[
'<a href="{}">{}</a>'.format(u.get_admin_url(_external=True), u)
for u in queryset
],
)
) | python | def queryset_formatter(queryset):
"""
This is used for custom detail fields returning a QuerySet of
admin objects.
"""
return Markup(
base_list_formatter(
None,
[
'<a href="{}">{}</a>'.format(u.get_admin_url(_external=True), u)
for u in queryset
],
)
) | [
"def",
"queryset_formatter",
"(",
"queryset",
")",
":",
"return",
"Markup",
"(",
"base_list_formatter",
"(",
"None",
",",
"[",
"'<a href=\"{}\">{}</a>'",
".",
"format",
"(",
"u",
".",
"get_admin_url",
"(",
"_external",
"=",
"True",
")",
",",
"u",
")",
"for",
"u",
"in",
"queryset",
"]",
",",
")",
")"
]
| This is used for custom detail fields returning a QuerySet of
admin objects. | [
"This",
"is",
"used",
"for",
"custom",
"detail",
"fields",
"returning",
"a",
"QuerySet",
"of",
"admin",
"objects",
"."
]
| a267eeb49fea07c9a24fb370bd9d7a90ed313ccf | https://github.com/peergradeio/flask-mongo-profiler/blob/a267eeb49fea07c9a24fb370bd9d7a90ed313ccf/flask_mongo_profiler/contrib/flask_admin/formatters/relational.py#L8-L21 | train |
peergradeio/flask-mongo-profiler | flask_mongo_profiler/contrib/flask_admin/formatters/relational.py | qs_field | def qs_field(
model_class,
field,
filters=None,
formatter=queryset_formatter,
manager_name='objects',
):
"""
Show computed fields based on QuerySet's.
This is a workaround since sometimes some filtering is involved to see if a user
owns and object, is a student, etc.
Example
-------
class MyModel(ModelView):
details_extra_columns = [
('courses_owned', 'Courses (Owner of)'),
]
column_formatters_detail = {
'courses_owner': qs_field(model.Course, 'owner'),
]
"""
if filters is None:
filters = {}
def _(view, context, _model, name):
filters[field] = _model # e.g. students: user
# e.g. User.objects, User.deleted_objects
manager = getattr(model_class, manager_name)
return formatter(manager(**filters))
return _ | python | def qs_field(
model_class,
field,
filters=None,
formatter=queryset_formatter,
manager_name='objects',
):
"""
Show computed fields based on QuerySet's.
This is a workaround since sometimes some filtering is involved to see if a user
owns and object, is a student, etc.
Example
-------
class MyModel(ModelView):
details_extra_columns = [
('courses_owned', 'Courses (Owner of)'),
]
column_formatters_detail = {
'courses_owner': qs_field(model.Course, 'owner'),
]
"""
if filters is None:
filters = {}
def _(view, context, _model, name):
filters[field] = _model # e.g. students: user
# e.g. User.objects, User.deleted_objects
manager = getattr(model_class, manager_name)
return formatter(manager(**filters))
return _ | [
"def",
"qs_field",
"(",
"model_class",
",",
"field",
",",
"filters",
"=",
"None",
",",
"formatter",
"=",
"queryset_formatter",
",",
"manager_name",
"=",
"'objects'",
",",
")",
":",
"if",
"filters",
"is",
"None",
":",
"filters",
"=",
"{",
"}",
"def",
"_",
"(",
"view",
",",
"context",
",",
"_model",
",",
"name",
")",
":",
"filters",
"[",
"field",
"]",
"=",
"_model",
"# e.g. students: user",
"# e.g. User.objects, User.deleted_objects",
"manager",
"=",
"getattr",
"(",
"model_class",
",",
"manager_name",
")",
"return",
"formatter",
"(",
"manager",
"(",
"*",
"*",
"filters",
")",
")",
"return",
"_"
]
| Show computed fields based on QuerySet's.
This is a workaround since sometimes some filtering is involved to see if a user
owns and object, is a student, etc.
Example
-------
class MyModel(ModelView):
details_extra_columns = [
('courses_owned', 'Courses (Owner of)'),
]
column_formatters_detail = {
'courses_owner': qs_field(model.Course, 'owner'),
] | [
"Show",
"computed",
"fields",
"based",
"on",
"QuerySet",
"s",
"."
]
| a267eeb49fea07c9a24fb370bd9d7a90ed313ccf | https://github.com/peergradeio/flask-mongo-profiler/blob/a267eeb49fea07c9a24fb370bd9d7a90ed313ccf/flask_mongo_profiler/contrib/flask_admin/formatters/relational.py#L24-L57 | train |
Kortemme-Lab/klab | klab/bio/pdbtm.py | PDBTM._get_pdb_id | def _get_pdb_id(self, elem, **kwargs):
'''If self.restrict_to_transmembrane_proteins is False then this adds all ids to self.ids. Otherwise, only transmembrane protein ids are added.'''
id = elem.attrib['ID']
if self.restrict_to_transmembrane_proteins:
tmp = elem.attrib['TMP']
assert(tmp == 'no' or tmp == 'yes' or tmp == 'not')
if tmp == 'yes':
self.ids[id] = PDBTM._get_tm_type(elem)
else:
self.ids[id] = self.ids.get(id, 0) + 1 | python | def _get_pdb_id(self, elem, **kwargs):
'''If self.restrict_to_transmembrane_proteins is False then this adds all ids to self.ids. Otherwise, only transmembrane protein ids are added.'''
id = elem.attrib['ID']
if self.restrict_to_transmembrane_proteins:
tmp = elem.attrib['TMP']
assert(tmp == 'no' or tmp == 'yes' or tmp == 'not')
if tmp == 'yes':
self.ids[id] = PDBTM._get_tm_type(elem)
else:
self.ids[id] = self.ids.get(id, 0) + 1 | [
"def",
"_get_pdb_id",
"(",
"self",
",",
"elem",
",",
"*",
"*",
"kwargs",
")",
":",
"id",
"=",
"elem",
".",
"attrib",
"[",
"'ID'",
"]",
"if",
"self",
".",
"restrict_to_transmembrane_proteins",
":",
"tmp",
"=",
"elem",
".",
"attrib",
"[",
"'TMP'",
"]",
"assert",
"(",
"tmp",
"==",
"'no'",
"or",
"tmp",
"==",
"'yes'",
"or",
"tmp",
"==",
"'not'",
")",
"if",
"tmp",
"==",
"'yes'",
":",
"self",
".",
"ids",
"[",
"id",
"]",
"=",
"PDBTM",
".",
"_get_tm_type",
"(",
"elem",
")",
"else",
":",
"self",
".",
"ids",
"[",
"id",
"]",
"=",
"self",
".",
"ids",
".",
"get",
"(",
"id",
",",
"0",
")",
"+",
"1"
]
| If self.restrict_to_transmembrane_proteins is False then this adds all ids to self.ids. Otherwise, only transmembrane protein ids are added. | [
"If",
"self",
".",
"restrict_to_transmembrane_proteins",
"is",
"False",
"then",
"this",
"adds",
"all",
"ids",
"to",
"self",
".",
"ids",
".",
"Otherwise",
"only",
"transmembrane",
"protein",
"ids",
"are",
"added",
"."
]
| 6d410ad08f1bd9f7cbbb28d7d946e94fbaaa2b6b | https://github.com/Kortemme-Lab/klab/blob/6d410ad08f1bd9f7cbbb28d7d946e94fbaaa2b6b/klab/bio/pdbtm.py#L61-L70 | train |
Kortemme-Lab/klab | klab/bio/pdbtm.py | PDBTM.get_xml | def get_xml(self, pdb_id):
''' Returns the XML for pdb_id if the tag exists.'''
self.tmp_string = None
context = etree.iterparse(io.BytesIO(self.xml_contents), events=('end',), tag=self.PDBTM_entry_tag_type)
try:
fast_iter(context, self._get_xml, pdb_id = pdb_id.upper())
except EarlyOut: pass
return self.tmp_string | python | def get_xml(self, pdb_id):
''' Returns the XML for pdb_id if the tag exists.'''
self.tmp_string = None
context = etree.iterparse(io.BytesIO(self.xml_contents), events=('end',), tag=self.PDBTM_entry_tag_type)
try:
fast_iter(context, self._get_xml, pdb_id = pdb_id.upper())
except EarlyOut: pass
return self.tmp_string | [
"def",
"get_xml",
"(",
"self",
",",
"pdb_id",
")",
":",
"self",
".",
"tmp_string",
"=",
"None",
"context",
"=",
"etree",
".",
"iterparse",
"(",
"io",
".",
"BytesIO",
"(",
"self",
".",
"xml_contents",
")",
",",
"events",
"=",
"(",
"'end'",
",",
")",
",",
"tag",
"=",
"self",
".",
"PDBTM_entry_tag_type",
")",
"try",
":",
"fast_iter",
"(",
"context",
",",
"self",
".",
"_get_xml",
",",
"pdb_id",
"=",
"pdb_id",
".",
"upper",
"(",
")",
")",
"except",
"EarlyOut",
":",
"pass",
"return",
"self",
".",
"tmp_string"
]
| Returns the XML for pdb_id if the tag exists. | [
"Returns",
"the",
"XML",
"for",
"pdb_id",
"if",
"the",
"tag",
"exists",
"."
]
| 6d410ad08f1bd9f7cbbb28d7d946e94fbaaa2b6b | https://github.com/Kortemme-Lab/klab/blob/6d410ad08f1bd9f7cbbb28d7d946e94fbaaa2b6b/klab/bio/pdbtm.py#L117-L124 | train |
uogbuji/versa | tools/py/driver/mongo.py | connection.multimatch | def multimatch(self, origin=None, rel=None, target=None, attrs=None, include_ids=False):
'''
Iterator over relationship IDs that match a pattern of components, with multiple options provided for each component
origin - (optional) origin of the relationship (similar to an RDF subject), or set of values. If omitted any origin will be matched.
rel - (optional) type IRI of the relationship (similar to an RDF predicate), or set of values. If omitted any relationship will be matched.
target - (optional) target of the relationship (similar to an RDF object), a boolean, floating point or unicode object, or set of values. If omitted any target will be matched.
attrs - (optional) attribute mapping of relationship metadata, i.e. {attrname1: attrval1, attrname2: attrval2}. If any attribute is specified, an exact match is made (i.e. the attribute name and value must match).
include_ids - If true include statement IDs with yield values
'''
raise NotImplementedError
origin = origin if origin is None or isinstance(origin, set) else set([origin])
rel = rel if rel is None or isinstance(rel, set) else set([rel])
target = target if target is None or isinstance(target, set) else set([target])
for index, curr_rel in enumerate(self._relationships):
matches = True
if origin and curr_rel[ORIGIN] not in origin:
matches = False
if rel and curr_rel[RELATIONSHIP] not in rel:
matches = False
if target and curr_rel[TARGET] not in target:
matches = False
if attrs:
for k, v in attrs.items():
if k not in curr_rel[ATTRIBUTES] or curr_rel[ATTRIBUTES].get(k) != v:
matches = False
if matches:
if include_ids:
yield index, (curr_rel[0], curr_rel[1], curr_rel[2], curr_rel[3].copy())
else:
yield (curr_rel[0], curr_rel[1], curr_rel[2], curr_rel[3].copy())
return | python | def multimatch(self, origin=None, rel=None, target=None, attrs=None, include_ids=False):
'''
Iterator over relationship IDs that match a pattern of components, with multiple options provided for each component
origin - (optional) origin of the relationship (similar to an RDF subject), or set of values. If omitted any origin will be matched.
rel - (optional) type IRI of the relationship (similar to an RDF predicate), or set of values. If omitted any relationship will be matched.
target - (optional) target of the relationship (similar to an RDF object), a boolean, floating point or unicode object, or set of values. If omitted any target will be matched.
attrs - (optional) attribute mapping of relationship metadata, i.e. {attrname1: attrval1, attrname2: attrval2}. If any attribute is specified, an exact match is made (i.e. the attribute name and value must match).
include_ids - If true include statement IDs with yield values
'''
raise NotImplementedError
origin = origin if origin is None or isinstance(origin, set) else set([origin])
rel = rel if rel is None or isinstance(rel, set) else set([rel])
target = target if target is None or isinstance(target, set) else set([target])
for index, curr_rel in enumerate(self._relationships):
matches = True
if origin and curr_rel[ORIGIN] not in origin:
matches = False
if rel and curr_rel[RELATIONSHIP] not in rel:
matches = False
if target and curr_rel[TARGET] not in target:
matches = False
if attrs:
for k, v in attrs.items():
if k not in curr_rel[ATTRIBUTES] or curr_rel[ATTRIBUTES].get(k) != v:
matches = False
if matches:
if include_ids:
yield index, (curr_rel[0], curr_rel[1], curr_rel[2], curr_rel[3].copy())
else:
yield (curr_rel[0], curr_rel[1], curr_rel[2], curr_rel[3].copy())
return | [
"def",
"multimatch",
"(",
"self",
",",
"origin",
"=",
"None",
",",
"rel",
"=",
"None",
",",
"target",
"=",
"None",
",",
"attrs",
"=",
"None",
",",
"include_ids",
"=",
"False",
")",
":",
"raise",
"NotImplementedError",
"origin",
"=",
"origin",
"if",
"origin",
"is",
"None",
"or",
"isinstance",
"(",
"origin",
",",
"set",
")",
"else",
"set",
"(",
"[",
"origin",
"]",
")",
"rel",
"=",
"rel",
"if",
"rel",
"is",
"None",
"or",
"isinstance",
"(",
"rel",
",",
"set",
")",
"else",
"set",
"(",
"[",
"rel",
"]",
")",
"target",
"=",
"target",
"if",
"target",
"is",
"None",
"or",
"isinstance",
"(",
"target",
",",
"set",
")",
"else",
"set",
"(",
"[",
"target",
"]",
")",
"for",
"index",
",",
"curr_rel",
"in",
"enumerate",
"(",
"self",
".",
"_relationships",
")",
":",
"matches",
"=",
"True",
"if",
"origin",
"and",
"curr_rel",
"[",
"ORIGIN",
"]",
"not",
"in",
"origin",
":",
"matches",
"=",
"False",
"if",
"rel",
"and",
"curr_rel",
"[",
"RELATIONSHIP",
"]",
"not",
"in",
"rel",
":",
"matches",
"=",
"False",
"if",
"target",
"and",
"curr_rel",
"[",
"TARGET",
"]",
"not",
"in",
"target",
":",
"matches",
"=",
"False",
"if",
"attrs",
":",
"for",
"k",
",",
"v",
"in",
"attrs",
".",
"items",
"(",
")",
":",
"if",
"k",
"not",
"in",
"curr_rel",
"[",
"ATTRIBUTES",
"]",
"or",
"curr_rel",
"[",
"ATTRIBUTES",
"]",
".",
"get",
"(",
"k",
")",
"!=",
"v",
":",
"matches",
"=",
"False",
"if",
"matches",
":",
"if",
"include_ids",
":",
"yield",
"index",
",",
"(",
"curr_rel",
"[",
"0",
"]",
",",
"curr_rel",
"[",
"1",
"]",
",",
"curr_rel",
"[",
"2",
"]",
",",
"curr_rel",
"[",
"3",
"]",
".",
"copy",
"(",
")",
")",
"else",
":",
"yield",
"(",
"curr_rel",
"[",
"0",
"]",
",",
"curr_rel",
"[",
"1",
"]",
",",
"curr_rel",
"[",
"2",
"]",
",",
"curr_rel",
"[",
"3",
"]",
".",
"copy",
"(",
")",
")",
"return"
]
| Iterator over relationship IDs that match a pattern of components, with multiple options provided for each component
origin - (optional) origin of the relationship (similar to an RDF subject), or set of values. If omitted any origin will be matched.
rel - (optional) type IRI of the relationship (similar to an RDF predicate), or set of values. If omitted any relationship will be matched.
target - (optional) target of the relationship (similar to an RDF object), a boolean, floating point or unicode object, or set of values. If omitted any target will be matched.
attrs - (optional) attribute mapping of relationship metadata, i.e. {attrname1: attrval1, attrname2: attrval2}. If any attribute is specified, an exact match is made (i.e. the attribute name and value must match).
include_ids - If true include statement IDs with yield values | [
"Iterator",
"over",
"relationship",
"IDs",
"that",
"match",
"a",
"pattern",
"of",
"components",
"with",
"multiple",
"options",
"provided",
"for",
"each",
"component"
]
| f092ffc7ed363a5b170890955168500f32de0dd5 | https://github.com/uogbuji/versa/blob/f092ffc7ed363a5b170890955168500f32de0dd5/tools/py/driver/mongo.py#L167-L198 | train |
uogbuji/versa | tools/py/driver/mongo.py | connection.add | def add(self, origin, rel, target, attrs=None):
'''
Add one relationship to the model
origin - origin of the relationship (similar to an RDF subject)
rel - type IRI of the relationship (similar to an RDF predicate)
target - target of the relationship (similar to an RDF object), a boolean, floating point or unicode object
attrs - optional attribute mapping of relationship metadata, i.e. {attrname1: attrval1, attrname2: attrval2}
'''
if not origin:
raise ValueError('Relationship origin cannot be null')
if not rel:
raise ValueError('Relationship ID cannot be null')
attrs = attrs or {}
origin_item = self._db_coll.find_one({'origin': origin})
rel = self._abbreviate(rel)
target = self._abbreviate(target)
rel_info = {'rid': rel, 'instances': [[target, attrs]]}
if origin_item is None:
self._db_coll.insert_one(
{
'origin': origin,
'rels': [rel_info],
}
)
else:
origin_item['rels'].append(rel_info)
self._db_coll.replace_one(
{'origin': origin}, origin_item
)
return | python | def add(self, origin, rel, target, attrs=None):
'''
Add one relationship to the model
origin - origin of the relationship (similar to an RDF subject)
rel - type IRI of the relationship (similar to an RDF predicate)
target - target of the relationship (similar to an RDF object), a boolean, floating point or unicode object
attrs - optional attribute mapping of relationship metadata, i.e. {attrname1: attrval1, attrname2: attrval2}
'''
if not origin:
raise ValueError('Relationship origin cannot be null')
if not rel:
raise ValueError('Relationship ID cannot be null')
attrs = attrs or {}
origin_item = self._db_coll.find_one({'origin': origin})
rel = self._abbreviate(rel)
target = self._abbreviate(target)
rel_info = {'rid': rel, 'instances': [[target, attrs]]}
if origin_item is None:
self._db_coll.insert_one(
{
'origin': origin,
'rels': [rel_info],
}
)
else:
origin_item['rels'].append(rel_info)
self._db_coll.replace_one(
{'origin': origin}, origin_item
)
return | [
"def",
"add",
"(",
"self",
",",
"origin",
",",
"rel",
",",
"target",
",",
"attrs",
"=",
"None",
")",
":",
"if",
"not",
"origin",
":",
"raise",
"ValueError",
"(",
"'Relationship origin cannot be null'",
")",
"if",
"not",
"rel",
":",
"raise",
"ValueError",
"(",
"'Relationship ID cannot be null'",
")",
"attrs",
"=",
"attrs",
"or",
"{",
"}",
"origin_item",
"=",
"self",
".",
"_db_coll",
".",
"find_one",
"(",
"{",
"'origin'",
":",
"origin",
"}",
")",
"rel",
"=",
"self",
".",
"_abbreviate",
"(",
"rel",
")",
"target",
"=",
"self",
".",
"_abbreviate",
"(",
"target",
")",
"rel_info",
"=",
"{",
"'rid'",
":",
"rel",
",",
"'instances'",
":",
"[",
"[",
"target",
",",
"attrs",
"]",
"]",
"}",
"if",
"origin_item",
"is",
"None",
":",
"self",
".",
"_db_coll",
".",
"insert_one",
"(",
"{",
"'origin'",
":",
"origin",
",",
"'rels'",
":",
"[",
"rel_info",
"]",
",",
"}",
")",
"else",
":",
"origin_item",
"[",
"'rels'",
"]",
".",
"append",
"(",
"rel_info",
")",
"self",
".",
"_db_coll",
".",
"replace_one",
"(",
"{",
"'origin'",
":",
"origin",
"}",
",",
"origin_item",
")",
"return"
]
| Add one relationship to the model
origin - origin of the relationship (similar to an RDF subject)
rel - type IRI of the relationship (similar to an RDF predicate)
target - target of the relationship (similar to an RDF object), a boolean, floating point or unicode object
attrs - optional attribute mapping of relationship metadata, i.e. {attrname1: attrval1, attrname2: attrval2} | [
"Add",
"one",
"relationship",
"to",
"the",
"model"
]
| f092ffc7ed363a5b170890955168500f32de0dd5 | https://github.com/uogbuji/versa/blob/f092ffc7ed363a5b170890955168500f32de0dd5/tools/py/driver/mongo.py#L200-L232 | train |
uogbuji/versa | tools/py/driver/mongo.py | connection.remove | def remove(self, index):
'''
Delete one or more relationship, by index, from the extent
index - either a single index or a list of indices
'''
raise NotImplementedError
if hasattr(index, '__iter__'):
ind = set(index)
else:
ind = [index]
# Rebuild relationships, excluding the provided indices
self._relationships = [r for i, r in enumerate(self._relationships) if i not in ind] | python | def remove(self, index):
'''
Delete one or more relationship, by index, from the extent
index - either a single index or a list of indices
'''
raise NotImplementedError
if hasattr(index, '__iter__'):
ind = set(index)
else:
ind = [index]
# Rebuild relationships, excluding the provided indices
self._relationships = [r for i, r in enumerate(self._relationships) if i not in ind] | [
"def",
"remove",
"(",
"self",
",",
"index",
")",
":",
"raise",
"NotImplementedError",
"if",
"hasattr",
"(",
"index",
",",
"'__iter__'",
")",
":",
"ind",
"=",
"set",
"(",
"index",
")",
"else",
":",
"ind",
"=",
"[",
"index",
"]",
"# Rebuild relationships, excluding the provided indices",
"self",
".",
"_relationships",
"=",
"[",
"r",
"for",
"i",
",",
"r",
"in",
"enumerate",
"(",
"self",
".",
"_relationships",
")",
"if",
"i",
"not",
"in",
"ind",
"]"
]
| Delete one or more relationship, by index, from the extent
index - either a single index or a list of indices | [
"Delete",
"one",
"or",
"more",
"relationship",
"by",
"index",
"from",
"the",
"extent"
]
| f092ffc7ed363a5b170890955168500f32de0dd5 | https://github.com/uogbuji/versa/blob/f092ffc7ed363a5b170890955168500f32de0dd5/tools/py/driver/mongo.py#L260-L273 | train |
assamite/creamas | creamas/examples/spiro/spiro_agent.py | SpiroAgent.act | async def act(self):
'''Agent's main method to create new spirographs.
See Simulation and CreativeAgent documentation for details.
'''
# Learn from domain artifacts.
self.age += 1
self.added_last = False
self.learn_from_domain(method=self.env_learning_method,
amount=self.env_learning_amount)
# Invent new artifact
artifact = self.invent(self.search_width)
args = artifact.framings[self.name]['args']
val = artifact.evals[self.name]
self._log(logging.DEBUG, "Created spirograph with args={}, val={}"
.format(args, val))
self.spiro_args = args
self.arg_history.append(self.spiro_args)
self.add_artifact(artifact)
if val >= self._own_threshold:
artifact.self_criticism = 'pass'
# Train SOM with the invented artifact
self.learn(artifact, self.teaching_iterations)
# Save images if logger is defined
# Add created artifact to voting candidates in the environment
self.add_candidate(artifact)
self.added_last = True
elif self.jump == 'random':
largs = self.spiro_args
self.spiro_args = np.random.uniform(-199, 199,
self.spiro_args.shape)
self._log(logging.DEBUG, "Jumped from {} to {}"
.format(largs, self.spiro_args))
self.save_images(artifact) | python | async def act(self):
'''Agent's main method to create new spirographs.
See Simulation and CreativeAgent documentation for details.
'''
# Learn from domain artifacts.
self.age += 1
self.added_last = False
self.learn_from_domain(method=self.env_learning_method,
amount=self.env_learning_amount)
# Invent new artifact
artifact = self.invent(self.search_width)
args = artifact.framings[self.name]['args']
val = artifact.evals[self.name]
self._log(logging.DEBUG, "Created spirograph with args={}, val={}"
.format(args, val))
self.spiro_args = args
self.arg_history.append(self.spiro_args)
self.add_artifact(artifact)
if val >= self._own_threshold:
artifact.self_criticism = 'pass'
# Train SOM with the invented artifact
self.learn(artifact, self.teaching_iterations)
# Save images if logger is defined
# Add created artifact to voting candidates in the environment
self.add_candidate(artifact)
self.added_last = True
elif self.jump == 'random':
largs = self.spiro_args
self.spiro_args = np.random.uniform(-199, 199,
self.spiro_args.shape)
self._log(logging.DEBUG, "Jumped from {} to {}"
.format(largs, self.spiro_args))
self.save_images(artifact) | [
"async",
"def",
"act",
"(",
"self",
")",
":",
"# Learn from domain artifacts.",
"self",
".",
"age",
"+=",
"1",
"self",
".",
"added_last",
"=",
"False",
"self",
".",
"learn_from_domain",
"(",
"method",
"=",
"self",
".",
"env_learning_method",
",",
"amount",
"=",
"self",
".",
"env_learning_amount",
")",
"# Invent new artifact",
"artifact",
"=",
"self",
".",
"invent",
"(",
"self",
".",
"search_width",
")",
"args",
"=",
"artifact",
".",
"framings",
"[",
"self",
".",
"name",
"]",
"[",
"'args'",
"]",
"val",
"=",
"artifact",
".",
"evals",
"[",
"self",
".",
"name",
"]",
"self",
".",
"_log",
"(",
"logging",
".",
"DEBUG",
",",
"\"Created spirograph with args={}, val={}\"",
".",
"format",
"(",
"args",
",",
"val",
")",
")",
"self",
".",
"spiro_args",
"=",
"args",
"self",
".",
"arg_history",
".",
"append",
"(",
"self",
".",
"spiro_args",
")",
"self",
".",
"add_artifact",
"(",
"artifact",
")",
"if",
"val",
">=",
"self",
".",
"_own_threshold",
":",
"artifact",
".",
"self_criticism",
"=",
"'pass'",
"# Train SOM with the invented artifact",
"self",
".",
"learn",
"(",
"artifact",
",",
"self",
".",
"teaching_iterations",
")",
"# Save images if logger is defined",
"# Add created artifact to voting candidates in the environment",
"self",
".",
"add_candidate",
"(",
"artifact",
")",
"self",
".",
"added_last",
"=",
"True",
"elif",
"self",
".",
"jump",
"==",
"'random'",
":",
"largs",
"=",
"self",
".",
"spiro_args",
"self",
".",
"spiro_args",
"=",
"np",
".",
"random",
".",
"uniform",
"(",
"-",
"199",
",",
"199",
",",
"self",
".",
"spiro_args",
".",
"shape",
")",
"self",
".",
"_log",
"(",
"logging",
".",
"DEBUG",
",",
"\"Jumped from {} to {}\"",
".",
"format",
"(",
"largs",
",",
"self",
".",
"spiro_args",
")",
")",
"self",
".",
"save_images",
"(",
"artifact",
")"
]
| Agent's main method to create new spirographs.
See Simulation and CreativeAgent documentation for details. | [
"Agent",
"s",
"main",
"method",
"to",
"create",
"new",
"spirographs",
"."
]
| 54dc3e31c97a3f938e58272f8ab80b6bcafeff58 | https://github.com/assamite/creamas/blob/54dc3e31c97a3f938e58272f8ab80b6bcafeff58/creamas/examples/spiro/spiro_agent.py#L213-L246 | train |
assamite/creamas | creamas/examples/spiro/spiro_agent.py | SpiroAgent.learn_from_domain | def learn_from_domain(self, method='random', amount=10):
'''Learn SOM from artifacts introduced to the environment.
:param str method:
learning method, should be either 'random' or 'closest', where
'random' chooses **amount** random artifacts, and 'closest' samples
closest artifacts based on spirograph generation artifacts.
:param int amount:
Maximum amount of artifacts sampled
:param bool last:
Learn from last domain artifact in any case
'''
if method == 'none':
return
arts = self.env.artifacts
if len(arts) == 0:
return
if 'random' in method:
samples = min(len(arts), amount)
ars = np.random.choice(arts, samples, replace=False)
for a in ars:
self.learn(a, self.teaching_iterations)
if 'closest' in method:
ars = arts
dists = []
for a in ars:
args = a.framings[a.creator]['args']
d = np.sqrt(np.sum(np.square(args - self.spiro_args)))
dists.append((d,a))
dists.sort(key=operator.itemgetter(0))
for d,a in dists[:amount]:
self.learn(a, self.teaching_iterations) | python | def learn_from_domain(self, method='random', amount=10):
'''Learn SOM from artifacts introduced to the environment.
:param str method:
learning method, should be either 'random' or 'closest', where
'random' chooses **amount** random artifacts, and 'closest' samples
closest artifacts based on spirograph generation artifacts.
:param int amount:
Maximum amount of artifacts sampled
:param bool last:
Learn from last domain artifact in any case
'''
if method == 'none':
return
arts = self.env.artifacts
if len(arts) == 0:
return
if 'random' in method:
samples = min(len(arts), amount)
ars = np.random.choice(arts, samples, replace=False)
for a in ars:
self.learn(a, self.teaching_iterations)
if 'closest' in method:
ars = arts
dists = []
for a in ars:
args = a.framings[a.creator]['args']
d = np.sqrt(np.sum(np.square(args - self.spiro_args)))
dists.append((d,a))
dists.sort(key=operator.itemgetter(0))
for d,a in dists[:amount]:
self.learn(a, self.teaching_iterations) | [
"def",
"learn_from_domain",
"(",
"self",
",",
"method",
"=",
"'random'",
",",
"amount",
"=",
"10",
")",
":",
"if",
"method",
"==",
"'none'",
":",
"return",
"arts",
"=",
"self",
".",
"env",
".",
"artifacts",
"if",
"len",
"(",
"arts",
")",
"==",
"0",
":",
"return",
"if",
"'random'",
"in",
"method",
":",
"samples",
"=",
"min",
"(",
"len",
"(",
"arts",
")",
",",
"amount",
")",
"ars",
"=",
"np",
".",
"random",
".",
"choice",
"(",
"arts",
",",
"samples",
",",
"replace",
"=",
"False",
")",
"for",
"a",
"in",
"ars",
":",
"self",
".",
"learn",
"(",
"a",
",",
"self",
".",
"teaching_iterations",
")",
"if",
"'closest'",
"in",
"method",
":",
"ars",
"=",
"arts",
"dists",
"=",
"[",
"]",
"for",
"a",
"in",
"ars",
":",
"args",
"=",
"a",
".",
"framings",
"[",
"a",
".",
"creator",
"]",
"[",
"'args'",
"]",
"d",
"=",
"np",
".",
"sqrt",
"(",
"np",
".",
"sum",
"(",
"np",
".",
"square",
"(",
"args",
"-",
"self",
".",
"spiro_args",
")",
")",
")",
"dists",
".",
"append",
"(",
"(",
"d",
",",
"a",
")",
")",
"dists",
".",
"sort",
"(",
"key",
"=",
"operator",
".",
"itemgetter",
"(",
"0",
")",
")",
"for",
"d",
",",
"a",
"in",
"dists",
"[",
":",
"amount",
"]",
":",
"self",
".",
"learn",
"(",
"a",
",",
"self",
".",
"teaching_iterations",
")"
]
| Learn SOM from artifacts introduced to the environment.
:param str method:
learning method, should be either 'random' or 'closest', where
'random' chooses **amount** random artifacts, and 'closest' samples
closest artifacts based on spirograph generation artifacts.
:param int amount:
Maximum amount of artifacts sampled
:param bool last:
Learn from last domain artifact in any case | [
"Learn",
"SOM",
"from",
"artifacts",
"introduced",
"to",
"the",
"environment",
"."
]
| 54dc3e31c97a3f938e58272f8ab80b6bcafeff58 | https://github.com/assamite/creamas/blob/54dc3e31c97a3f938e58272f8ab80b6bcafeff58/creamas/examples/spiro/spiro_agent.py#L248-L279 | train |
assamite/creamas | creamas/examples/spiro/spiro_agent.py | SpiroAgent.plot_distances | def plot_distances(self, mean_dist, distances, indeces):
'''Plot distances of the generated spirographs w.r.t. the previously
generated spirogaphs.
'''
from matplotlib import pyplot as plt
x = np.arange(len(distances))
y = [mean_dist for i in x]
fig, ax = plt.subplots()
data_line = ax.plot(indeces, distances, label='Min Distance to previous',
marker='.', color='black', linestyle="")
mean_line = ax.plot(indeces, y, label='Mean', linestyle='--', color='green')
if len(distances) > 0:
z = np.poly1d(np.polyfit(x,distances,2))
f = [z(i) for i in x]
mean_line = ax.plot(indeces, f, label='Fitted', linestyle='-', color='red')
legend = ax.legend(loc='upper right', prop={'size':8})
agent_vars = "{}_{}_{}{}_last={}_stmem=list{}_veto={}_sc={}_jump={}_sw={}_mr={}_maxN".format(
self.sanitized_name(), self.age, self.env_learning_method, self.env_learning_amount, self.env_learn_on_add,
self.stmem.length, self._novelty_threshold, self._own_threshold,
self.jump, self.search_width, self.move_radius)
ax.set_title("{} min distances: env_learn={} {}"
.format(self.name, self.env_learning_method,
self.env_learning_amount))
ax.set_ylabel('min distance to preceding artifact')
ax.set_xlabel('iteration')
if self.logger is not None:
imname = os.path.join(self.logger.folder, '{}_dists.png'.format(agent_vars))
plt.savefig(imname)
plt.close()
else:
plt.show() | python | def plot_distances(self, mean_dist, distances, indeces):
'''Plot distances of the generated spirographs w.r.t. the previously
generated spirogaphs.
'''
from matplotlib import pyplot as plt
x = np.arange(len(distances))
y = [mean_dist for i in x]
fig, ax = plt.subplots()
data_line = ax.plot(indeces, distances, label='Min Distance to previous',
marker='.', color='black', linestyle="")
mean_line = ax.plot(indeces, y, label='Mean', linestyle='--', color='green')
if len(distances) > 0:
z = np.poly1d(np.polyfit(x,distances,2))
f = [z(i) for i in x]
mean_line = ax.plot(indeces, f, label='Fitted', linestyle='-', color='red')
legend = ax.legend(loc='upper right', prop={'size':8})
agent_vars = "{}_{}_{}{}_last={}_stmem=list{}_veto={}_sc={}_jump={}_sw={}_mr={}_maxN".format(
self.sanitized_name(), self.age, self.env_learning_method, self.env_learning_amount, self.env_learn_on_add,
self.stmem.length, self._novelty_threshold, self._own_threshold,
self.jump, self.search_width, self.move_radius)
ax.set_title("{} min distances: env_learn={} {}"
.format(self.name, self.env_learning_method,
self.env_learning_amount))
ax.set_ylabel('min distance to preceding artifact')
ax.set_xlabel('iteration')
if self.logger is not None:
imname = os.path.join(self.logger.folder, '{}_dists.png'.format(agent_vars))
plt.savefig(imname)
plt.close()
else:
plt.show() | [
"def",
"plot_distances",
"(",
"self",
",",
"mean_dist",
",",
"distances",
",",
"indeces",
")",
":",
"from",
"matplotlib",
"import",
"pyplot",
"as",
"plt",
"x",
"=",
"np",
".",
"arange",
"(",
"len",
"(",
"distances",
")",
")",
"y",
"=",
"[",
"mean_dist",
"for",
"i",
"in",
"x",
"]",
"fig",
",",
"ax",
"=",
"plt",
".",
"subplots",
"(",
")",
"data_line",
"=",
"ax",
".",
"plot",
"(",
"indeces",
",",
"distances",
",",
"label",
"=",
"'Min Distance to previous'",
",",
"marker",
"=",
"'.'",
",",
"color",
"=",
"'black'",
",",
"linestyle",
"=",
"\"\"",
")",
"mean_line",
"=",
"ax",
".",
"plot",
"(",
"indeces",
",",
"y",
",",
"label",
"=",
"'Mean'",
",",
"linestyle",
"=",
"'--'",
",",
"color",
"=",
"'green'",
")",
"if",
"len",
"(",
"distances",
")",
">",
"0",
":",
"z",
"=",
"np",
".",
"poly1d",
"(",
"np",
".",
"polyfit",
"(",
"x",
",",
"distances",
",",
"2",
")",
")",
"f",
"=",
"[",
"z",
"(",
"i",
")",
"for",
"i",
"in",
"x",
"]",
"mean_line",
"=",
"ax",
".",
"plot",
"(",
"indeces",
",",
"f",
",",
"label",
"=",
"'Fitted'",
",",
"linestyle",
"=",
"'-'",
",",
"color",
"=",
"'red'",
")",
"legend",
"=",
"ax",
".",
"legend",
"(",
"loc",
"=",
"'upper right'",
",",
"prop",
"=",
"{",
"'size'",
":",
"8",
"}",
")",
"agent_vars",
"=",
"\"{}_{}_{}{}_last={}_stmem=list{}_veto={}_sc={}_jump={}_sw={}_mr={}_maxN\"",
".",
"format",
"(",
"self",
".",
"sanitized_name",
"(",
")",
",",
"self",
".",
"age",
",",
"self",
".",
"env_learning_method",
",",
"self",
".",
"env_learning_amount",
",",
"self",
".",
"env_learn_on_add",
",",
"self",
".",
"stmem",
".",
"length",
",",
"self",
".",
"_novelty_threshold",
",",
"self",
".",
"_own_threshold",
",",
"self",
".",
"jump",
",",
"self",
".",
"search_width",
",",
"self",
".",
"move_radius",
")",
"ax",
".",
"set_title",
"(",
"\"{} min distances: env_learn={} {}\"",
".",
"format",
"(",
"self",
".",
"name",
",",
"self",
".",
"env_learning_method",
",",
"self",
".",
"env_learning_amount",
")",
")",
"ax",
".",
"set_ylabel",
"(",
"'min distance to preceding artifact'",
")",
"ax",
".",
"set_xlabel",
"(",
"'iteration'",
")",
"if",
"self",
".",
"logger",
"is",
"not",
"None",
":",
"imname",
"=",
"os",
".",
"path",
".",
"join",
"(",
"self",
".",
"logger",
".",
"folder",
",",
"'{}_dists.png'",
".",
"format",
"(",
"agent_vars",
")",
")",
"plt",
".",
"savefig",
"(",
"imname",
")",
"plt",
".",
"close",
"(",
")",
"else",
":",
"plt",
".",
"show",
"(",
")"
]
| Plot distances of the generated spirographs w.r.t. the previously
generated spirogaphs. | [
"Plot",
"distances",
"of",
"the",
"generated",
"spirographs",
"w",
".",
"r",
".",
"t",
".",
"the",
"previously",
"generated",
"spirogaphs",
"."
]
| 54dc3e31c97a3f938e58272f8ab80b6bcafeff58 | https://github.com/assamite/creamas/blob/54dc3e31c97a3f938e58272f8ab80b6bcafeff58/creamas/examples/spiro/spiro_agent.py#L353-L383 | train |
PBR/MQ2 | MQ2/plugins/mapqtl_plugin.py | get_qtls_from_mapqtl_data | def get_qtls_from_mapqtl_data(matrix, threshold, inputfile):
"""Extract the QTLs found by MapQTL reading its file.
This assume that there is only one QTL per linkage group.
:arg matrix, the MapQTL file read in memory
:arg threshold, threshold used to determine if a given LOD value is
reflective the presence of a QTL.
:arg inputfile, name of the inputfile in which the QTLs have been
found
"""
trait_name = inputfile.split(')_', 1)[1].split('.mqo')[0]
qtls = []
qtl = None
for entry in matrix[1:]:
if qtl is None:
qtl = entry
if qtl[1] != entry[1]:
if float(qtl[4]) > float(threshold):
qtl[0] = trait_name
qtls.append(qtl)
qtl = entry
if entry[4] == '': # pragma: no cover
entry[4] = 0
if qtl[4] == '': # pragma: no cover
qtl[4] = 0
if float(entry[4]) > float(qtl[4]):
qtl = entry
if float(qtl[4]) > float(threshold):
qtl[0] = trait_name
if qtl not in qtls:
qtls.append(qtl)
return qtls | python | def get_qtls_from_mapqtl_data(matrix, threshold, inputfile):
"""Extract the QTLs found by MapQTL reading its file.
This assume that there is only one QTL per linkage group.
:arg matrix, the MapQTL file read in memory
:arg threshold, threshold used to determine if a given LOD value is
reflective the presence of a QTL.
:arg inputfile, name of the inputfile in which the QTLs have been
found
"""
trait_name = inputfile.split(')_', 1)[1].split('.mqo')[0]
qtls = []
qtl = None
for entry in matrix[1:]:
if qtl is None:
qtl = entry
if qtl[1] != entry[1]:
if float(qtl[4]) > float(threshold):
qtl[0] = trait_name
qtls.append(qtl)
qtl = entry
if entry[4] == '': # pragma: no cover
entry[4] = 0
if qtl[4] == '': # pragma: no cover
qtl[4] = 0
if float(entry[4]) > float(qtl[4]):
qtl = entry
if float(qtl[4]) > float(threshold):
qtl[0] = trait_name
if qtl not in qtls:
qtls.append(qtl)
return qtls | [
"def",
"get_qtls_from_mapqtl_data",
"(",
"matrix",
",",
"threshold",
",",
"inputfile",
")",
":",
"trait_name",
"=",
"inputfile",
".",
"split",
"(",
"')_'",
",",
"1",
")",
"[",
"1",
"]",
".",
"split",
"(",
"'.mqo'",
")",
"[",
"0",
"]",
"qtls",
"=",
"[",
"]",
"qtl",
"=",
"None",
"for",
"entry",
"in",
"matrix",
"[",
"1",
":",
"]",
":",
"if",
"qtl",
"is",
"None",
":",
"qtl",
"=",
"entry",
"if",
"qtl",
"[",
"1",
"]",
"!=",
"entry",
"[",
"1",
"]",
":",
"if",
"float",
"(",
"qtl",
"[",
"4",
"]",
")",
">",
"float",
"(",
"threshold",
")",
":",
"qtl",
"[",
"0",
"]",
"=",
"trait_name",
"qtls",
".",
"append",
"(",
"qtl",
")",
"qtl",
"=",
"entry",
"if",
"entry",
"[",
"4",
"]",
"==",
"''",
":",
"# pragma: no cover",
"entry",
"[",
"4",
"]",
"=",
"0",
"if",
"qtl",
"[",
"4",
"]",
"==",
"''",
":",
"# pragma: no cover",
"qtl",
"[",
"4",
"]",
"=",
"0",
"if",
"float",
"(",
"entry",
"[",
"4",
"]",
")",
">",
"float",
"(",
"qtl",
"[",
"4",
"]",
")",
":",
"qtl",
"=",
"entry",
"if",
"float",
"(",
"qtl",
"[",
"4",
"]",
")",
">",
"float",
"(",
"threshold",
")",
":",
"qtl",
"[",
"0",
"]",
"=",
"trait_name",
"if",
"qtl",
"not",
"in",
"qtls",
":",
"qtls",
".",
"append",
"(",
"qtl",
")",
"return",
"qtls"
]
| Extract the QTLs found by MapQTL reading its file.
This assume that there is only one QTL per linkage group.
:arg matrix, the MapQTL file read in memory
:arg threshold, threshold used to determine if a given LOD value is
reflective the presence of a QTL.
:arg inputfile, name of the inputfile in which the QTLs have been
found | [
"Extract",
"the",
"QTLs",
"found",
"by",
"MapQTL",
"reading",
"its",
"file",
".",
"This",
"assume",
"that",
"there",
"is",
"only",
"one",
"QTL",
"per",
"linkage",
"group",
"."
]
| 6d84dea47e6751333004743f588f03158e35c28d | https://github.com/PBR/MQ2/blob/6d84dea47e6751333004743f588f03158e35c28d/MQ2/plugins/mapqtl_plugin.py#L89-L123 | train |
PBR/MQ2 | MQ2/plugins/mapqtl_plugin.py | MapQTLPlugin.get_files | def get_files(cls, folder, session_id=''):
""" Retrieve the list of files the plugin can work on.
Find this list based on the files name, files extension or even
actually by reading in the file.
If a session identifier is specified it will restrict the list
of files returned to those with this session identifier in their
name.
:arg folder: the path to the folder containing the files to
check. This folder may contain sub-folders.
:kwarg session_id: the session identifier of the MapQTL output
to process.
"""
filelist = []
if folder is None or not os.path.isdir(folder):
return filelist
if session_id is None:
session_id = ''
for root, dirs, files in os.walk(folder):
for filename in files:
if filename.startswith('Session %s' % session_id) \
and filename.endswith('.mqo'):
filename = os.path.join(root, filename)
filelist.append(filename)
return filelist | python | def get_files(cls, folder, session_id=''):
""" Retrieve the list of files the plugin can work on.
Find this list based on the files name, files extension or even
actually by reading in the file.
If a session identifier is specified it will restrict the list
of files returned to those with this session identifier in their
name.
:arg folder: the path to the folder containing the files to
check. This folder may contain sub-folders.
:kwarg session_id: the session identifier of the MapQTL output
to process.
"""
filelist = []
if folder is None or not os.path.isdir(folder):
return filelist
if session_id is None:
session_id = ''
for root, dirs, files in os.walk(folder):
for filename in files:
if filename.startswith('Session %s' % session_id) \
and filename.endswith('.mqo'):
filename = os.path.join(root, filename)
filelist.append(filename)
return filelist | [
"def",
"get_files",
"(",
"cls",
",",
"folder",
",",
"session_id",
"=",
"''",
")",
":",
"filelist",
"=",
"[",
"]",
"if",
"folder",
"is",
"None",
"or",
"not",
"os",
".",
"path",
".",
"isdir",
"(",
"folder",
")",
":",
"return",
"filelist",
"if",
"session_id",
"is",
"None",
":",
"session_id",
"=",
"''",
"for",
"root",
",",
"dirs",
",",
"files",
"in",
"os",
".",
"walk",
"(",
"folder",
")",
":",
"for",
"filename",
"in",
"files",
":",
"if",
"filename",
".",
"startswith",
"(",
"'Session %s'",
"%",
"session_id",
")",
"and",
"filename",
".",
"endswith",
"(",
"'.mqo'",
")",
":",
"filename",
"=",
"os",
".",
"path",
".",
"join",
"(",
"root",
",",
"filename",
")",
"filelist",
".",
"append",
"(",
"filename",
")",
"return",
"filelist"
]
| Retrieve the list of files the plugin can work on.
Find this list based on the files name, files extension or even
actually by reading in the file.
If a session identifier is specified it will restrict the list
of files returned to those with this session identifier in their
name.
:arg folder: the path to the folder containing the files to
check. This folder may contain sub-folders.
:kwarg session_id: the session identifier of the MapQTL output
to process. | [
"Retrieve",
"the",
"list",
"of",
"files",
"the",
"plugin",
"can",
"work",
"on",
".",
"Find",
"this",
"list",
"based",
"on",
"the",
"files",
"name",
"files",
"extension",
"or",
"even",
"actually",
"by",
"reading",
"in",
"the",
"file",
".",
"If",
"a",
"session",
"identifier",
"is",
"specified",
"it",
"will",
"restrict",
"the",
"list",
"of",
"files",
"returned",
"to",
"those",
"with",
"this",
"session",
"identifier",
"in",
"their",
"name",
"."
]
| 6d84dea47e6751333004743f588f03158e35c28d | https://github.com/PBR/MQ2/blob/6d84dea47e6751333004743f588f03158e35c28d/MQ2/plugins/mapqtl_plugin.py#L155-L180 | train |
PBR/MQ2 | MQ2/plugins/mapqtl_plugin.py | MapQTLPlugin.get_session_identifiers | def get_session_identifiers(cls, folder=None, inputfile=None):
""" Retrieve the list of session identifiers contained in the
data on the folder.
:kwarg folder: the path to the folder containing the files to
check. This folder may contain sub-folders.
:kwarg inputfile: the path to the input file to use
"""
sessions = []
if folder is None or not os.path.isdir(folder):
return sessions
for root, dirs, files in os.walk(folder):
for filename in files:
if filename.startswith('Session ') \
and filename.endswith('.mqo'):
session = filename.split()[1]
if session not in sessions:
sessions.append(session)
return sessions | python | def get_session_identifiers(cls, folder=None, inputfile=None):
""" Retrieve the list of session identifiers contained in the
data on the folder.
:kwarg folder: the path to the folder containing the files to
check. This folder may contain sub-folders.
:kwarg inputfile: the path to the input file to use
"""
sessions = []
if folder is None or not os.path.isdir(folder):
return sessions
for root, dirs, files in os.walk(folder):
for filename in files:
if filename.startswith('Session ') \
and filename.endswith('.mqo'):
session = filename.split()[1]
if session not in sessions:
sessions.append(session)
return sessions | [
"def",
"get_session_identifiers",
"(",
"cls",
",",
"folder",
"=",
"None",
",",
"inputfile",
"=",
"None",
")",
":",
"sessions",
"=",
"[",
"]",
"if",
"folder",
"is",
"None",
"or",
"not",
"os",
".",
"path",
".",
"isdir",
"(",
"folder",
")",
":",
"return",
"sessions",
"for",
"root",
",",
"dirs",
",",
"files",
"in",
"os",
".",
"walk",
"(",
"folder",
")",
":",
"for",
"filename",
"in",
"files",
":",
"if",
"filename",
".",
"startswith",
"(",
"'Session '",
")",
"and",
"filename",
".",
"endswith",
"(",
"'.mqo'",
")",
":",
"session",
"=",
"filename",
".",
"split",
"(",
")",
"[",
"1",
"]",
"if",
"session",
"not",
"in",
"sessions",
":",
"sessions",
".",
"append",
"(",
"session",
")",
"return",
"sessions"
]
| Retrieve the list of session identifiers contained in the
data on the folder.
:kwarg folder: the path to the folder containing the files to
check. This folder may contain sub-folders.
:kwarg inputfile: the path to the input file to use | [
"Retrieve",
"the",
"list",
"of",
"session",
"identifiers",
"contained",
"in",
"the",
"data",
"on",
"the",
"folder",
"."
]
| 6d84dea47e6751333004743f588f03158e35c28d | https://github.com/PBR/MQ2/blob/6d84dea47e6751333004743f588f03158e35c28d/MQ2/plugins/mapqtl_plugin.py#L183-L202 | train |
Kortemme-Lab/klab | klab/general/strutil.py | parse_range | def parse_range(s, range_separator = '-'):
''' Parses the string s which contains indices and ranges and returns the explicit list of integers defined by s.
Written by Laurens Kraal 2014.
'''
return reduce(lambda x,y: x+y, (map(lambda r: (range(int(r.split(range_separator)[0]), int(r.split(range_separator)[1])+1)) if range_separator in r else [int(r)], s.split(',')))) | python | def parse_range(s, range_separator = '-'):
''' Parses the string s which contains indices and ranges and returns the explicit list of integers defined by s.
Written by Laurens Kraal 2014.
'''
return reduce(lambda x,y: x+y, (map(lambda r: (range(int(r.split(range_separator)[0]), int(r.split(range_separator)[1])+1)) if range_separator in r else [int(r)], s.split(',')))) | [
"def",
"parse_range",
"(",
"s",
",",
"range_separator",
"=",
"'-'",
")",
":",
"return",
"reduce",
"(",
"lambda",
"x",
",",
"y",
":",
"x",
"+",
"y",
",",
"(",
"map",
"(",
"lambda",
"r",
":",
"(",
"range",
"(",
"int",
"(",
"r",
".",
"split",
"(",
"range_separator",
")",
"[",
"0",
"]",
")",
",",
"int",
"(",
"r",
".",
"split",
"(",
"range_separator",
")",
"[",
"1",
"]",
")",
"+",
"1",
")",
")",
"if",
"range_separator",
"in",
"r",
"else",
"[",
"int",
"(",
"r",
")",
"]",
",",
"s",
".",
"split",
"(",
"','",
")",
")",
")",
")"
]
| Parses the string s which contains indices and ranges and returns the explicit list of integers defined by s.
Written by Laurens Kraal 2014. | [
"Parses",
"the",
"string",
"s",
"which",
"contains",
"indices",
"and",
"ranges",
"and",
"returns",
"the",
"explicit",
"list",
"of",
"integers",
"defined",
"by",
"s",
".",
"Written",
"by",
"Laurens",
"Kraal",
"2014",
"."
]
| 6d410ad08f1bd9f7cbbb28d7d946e94fbaaa2b6b | https://github.com/Kortemme-Lab/klab/blob/6d410ad08f1bd9f7cbbb28d7d946e94fbaaa2b6b/klab/general/strutil.py#L15-L19 | train |
Kortemme-Lab/klab | klab/general/strutil.py | merge_range_pairs | def merge_range_pairs(prs):
'''Takes in a list of pairs specifying ranges and returns a sorted list of merged, sorted ranges.'''
new_prs = []
sprs = [sorted(p) for p in prs]
sprs = sorted(sprs)
merged = False
x = 0
while x < len(sprs):
newx = x + 1
new_pair = list(sprs[x])
for y in range(x + 1, len(sprs)):
if new_pair[0] <= sprs[y][0] - 1 <= new_pair[1]:
new_pair[0] = min(new_pair[0], sprs[y][0])
new_pair[1] = max(new_pair[1], sprs[y][1])
newx = y + 1
if new_pair not in new_prs:
new_prs.append(new_pair)
x = newx
return new_prs | python | def merge_range_pairs(prs):
'''Takes in a list of pairs specifying ranges and returns a sorted list of merged, sorted ranges.'''
new_prs = []
sprs = [sorted(p) for p in prs]
sprs = sorted(sprs)
merged = False
x = 0
while x < len(sprs):
newx = x + 1
new_pair = list(sprs[x])
for y in range(x + 1, len(sprs)):
if new_pair[0] <= sprs[y][0] - 1 <= new_pair[1]:
new_pair[0] = min(new_pair[0], sprs[y][0])
new_pair[1] = max(new_pair[1], sprs[y][1])
newx = y + 1
if new_pair not in new_prs:
new_prs.append(new_pair)
x = newx
return new_prs | [
"def",
"merge_range_pairs",
"(",
"prs",
")",
":",
"new_prs",
"=",
"[",
"]",
"sprs",
"=",
"[",
"sorted",
"(",
"p",
")",
"for",
"p",
"in",
"prs",
"]",
"sprs",
"=",
"sorted",
"(",
"sprs",
")",
"merged",
"=",
"False",
"x",
"=",
"0",
"while",
"x",
"<",
"len",
"(",
"sprs",
")",
":",
"newx",
"=",
"x",
"+",
"1",
"new_pair",
"=",
"list",
"(",
"sprs",
"[",
"x",
"]",
")",
"for",
"y",
"in",
"range",
"(",
"x",
"+",
"1",
",",
"len",
"(",
"sprs",
")",
")",
":",
"if",
"new_pair",
"[",
"0",
"]",
"<=",
"sprs",
"[",
"y",
"]",
"[",
"0",
"]",
"-",
"1",
"<=",
"new_pair",
"[",
"1",
"]",
":",
"new_pair",
"[",
"0",
"]",
"=",
"min",
"(",
"new_pair",
"[",
"0",
"]",
",",
"sprs",
"[",
"y",
"]",
"[",
"0",
"]",
")",
"new_pair",
"[",
"1",
"]",
"=",
"max",
"(",
"new_pair",
"[",
"1",
"]",
",",
"sprs",
"[",
"y",
"]",
"[",
"1",
"]",
")",
"newx",
"=",
"y",
"+",
"1",
"if",
"new_pair",
"not",
"in",
"new_prs",
":",
"new_prs",
".",
"append",
"(",
"new_pair",
")",
"x",
"=",
"newx",
"return",
"new_prs"
]
| Takes in a list of pairs specifying ranges and returns a sorted list of merged, sorted ranges. | [
"Takes",
"in",
"a",
"list",
"of",
"pairs",
"specifying",
"ranges",
"and",
"returns",
"a",
"sorted",
"list",
"of",
"merged",
"sorted",
"ranges",
"."
]
| 6d410ad08f1bd9f7cbbb28d7d946e94fbaaa2b6b | https://github.com/Kortemme-Lab/klab/blob/6d410ad08f1bd9f7cbbb28d7d946e94fbaaa2b6b/klab/general/strutil.py#L36-L54 | train |
Kortemme-Lab/klab | klab/general/strutil.py | split_pdb_residue | def split_pdb_residue(s):
'''Splits a PDB residue into the numeric and insertion code components.'''
if s.isdigit():
return (int(s), ' ')
else:
assert(s[:-1].isdigit())
return ((s[:-1], s[-1])) | python | def split_pdb_residue(s):
'''Splits a PDB residue into the numeric and insertion code components.'''
if s.isdigit():
return (int(s), ' ')
else:
assert(s[:-1].isdigit())
return ((s[:-1], s[-1])) | [
"def",
"split_pdb_residue",
"(",
"s",
")",
":",
"if",
"s",
".",
"isdigit",
"(",
")",
":",
"return",
"(",
"int",
"(",
"s",
")",
",",
"' '",
")",
"else",
":",
"assert",
"(",
"s",
"[",
":",
"-",
"1",
"]",
".",
"isdigit",
"(",
")",
")",
"return",
"(",
"(",
"s",
"[",
":",
"-",
"1",
"]",
",",
"s",
"[",
"-",
"1",
"]",
")",
")"
]
| Splits a PDB residue into the numeric and insertion code components. | [
"Splits",
"a",
"PDB",
"residue",
"into",
"the",
"numeric",
"and",
"insertion",
"code",
"components",
"."
]
| 6d410ad08f1bd9f7cbbb28d7d946e94fbaaa2b6b | https://github.com/Kortemme-Lab/klab/blob/6d410ad08f1bd9f7cbbb28d7d946e94fbaaa2b6b/klab/general/strutil.py#L57-L63 | train |
brunato/lograptor | lograptor/utils.py | do_chunked_gzip | def do_chunked_gzip(infh, outfh, filename):
"""
A memory-friendly way of compressing the data.
"""
import gzip
gzfh = gzip.GzipFile('rawlogs', mode='wb', fileobj=outfh)
if infh.closed:
infh = open(infh.name, 'r')
else:
infh.seek(0)
readsize = 0
sys.stdout.write('Gzipping {0}: '.format(filename))
if os.stat(infh.name).st_size:
infh.seek(0)
progressbar = ProgressBar(sys.stdout, os.stat(infh.name).st_size, "bytes gzipped")
while True:
chunk = infh.read(GZIP_CHUNK_SIZE)
if not chunk:
break
if sys.version_info[0] >= 3:
# noinspection PyArgumentList
gzfh.write(bytes(chunk, "utf-8"))
else:
gzfh.write(chunk)
readsize += len(chunk)
progressbar.redraw(readsize)
gzfh.close() | python | def do_chunked_gzip(infh, outfh, filename):
"""
A memory-friendly way of compressing the data.
"""
import gzip
gzfh = gzip.GzipFile('rawlogs', mode='wb', fileobj=outfh)
if infh.closed:
infh = open(infh.name, 'r')
else:
infh.seek(0)
readsize = 0
sys.stdout.write('Gzipping {0}: '.format(filename))
if os.stat(infh.name).st_size:
infh.seek(0)
progressbar = ProgressBar(sys.stdout, os.stat(infh.name).st_size, "bytes gzipped")
while True:
chunk = infh.read(GZIP_CHUNK_SIZE)
if not chunk:
break
if sys.version_info[0] >= 3:
# noinspection PyArgumentList
gzfh.write(bytes(chunk, "utf-8"))
else:
gzfh.write(chunk)
readsize += len(chunk)
progressbar.redraw(readsize)
gzfh.close() | [
"def",
"do_chunked_gzip",
"(",
"infh",
",",
"outfh",
",",
"filename",
")",
":",
"import",
"gzip",
"gzfh",
"=",
"gzip",
".",
"GzipFile",
"(",
"'rawlogs'",
",",
"mode",
"=",
"'wb'",
",",
"fileobj",
"=",
"outfh",
")",
"if",
"infh",
".",
"closed",
":",
"infh",
"=",
"open",
"(",
"infh",
".",
"name",
",",
"'r'",
")",
"else",
":",
"infh",
".",
"seek",
"(",
"0",
")",
"readsize",
"=",
"0",
"sys",
".",
"stdout",
".",
"write",
"(",
"'Gzipping {0}: '",
".",
"format",
"(",
"filename",
")",
")",
"if",
"os",
".",
"stat",
"(",
"infh",
".",
"name",
")",
".",
"st_size",
":",
"infh",
".",
"seek",
"(",
"0",
")",
"progressbar",
"=",
"ProgressBar",
"(",
"sys",
".",
"stdout",
",",
"os",
".",
"stat",
"(",
"infh",
".",
"name",
")",
".",
"st_size",
",",
"\"bytes gzipped\"",
")",
"while",
"True",
":",
"chunk",
"=",
"infh",
".",
"read",
"(",
"GZIP_CHUNK_SIZE",
")",
"if",
"not",
"chunk",
":",
"break",
"if",
"sys",
".",
"version_info",
"[",
"0",
"]",
">=",
"3",
":",
"# noinspection PyArgumentList",
"gzfh",
".",
"write",
"(",
"bytes",
"(",
"chunk",
",",
"\"utf-8\"",
")",
")",
"else",
":",
"gzfh",
".",
"write",
"(",
"chunk",
")",
"readsize",
"+=",
"len",
"(",
"chunk",
")",
"progressbar",
".",
"redraw",
"(",
"readsize",
")",
"gzfh",
".",
"close",
"(",
")"
]
| A memory-friendly way of compressing the data. | [
"A",
"memory",
"-",
"friendly",
"way",
"of",
"compressing",
"the",
"data",
"."
]
| b1f09fe1b429ed15110610092704ef12d253f3c9 | https://github.com/brunato/lograptor/blob/b1f09fe1b429ed15110610092704ef12d253f3c9/lograptor/utils.py#L42-L75 | train |
brunato/lograptor | lograptor/utils.py | mail_message | def mail_message(smtp_server, message, from_address, rcpt_addresses):
"""
Send mail using smtp.
"""
if smtp_server[0] == '/':
# Sending the message with local sendmail
p = os.popen(smtp_server, 'w')
p.write(message)
p.close()
else:
# Sending the message using a smtp server
import smtplib
server = smtplib.SMTP(smtp_server)
server.sendmail(from_address, rcpt_addresses, message)
server.quit() | python | def mail_message(smtp_server, message, from_address, rcpt_addresses):
"""
Send mail using smtp.
"""
if smtp_server[0] == '/':
# Sending the message with local sendmail
p = os.popen(smtp_server, 'w')
p.write(message)
p.close()
else:
# Sending the message using a smtp server
import smtplib
server = smtplib.SMTP(smtp_server)
server.sendmail(from_address, rcpt_addresses, message)
server.quit() | [
"def",
"mail_message",
"(",
"smtp_server",
",",
"message",
",",
"from_address",
",",
"rcpt_addresses",
")",
":",
"if",
"smtp_server",
"[",
"0",
"]",
"==",
"'/'",
":",
"# Sending the message with local sendmail",
"p",
"=",
"os",
".",
"popen",
"(",
"smtp_server",
",",
"'w'",
")",
"p",
".",
"write",
"(",
"message",
")",
"p",
".",
"close",
"(",
")",
"else",
":",
"# Sending the message using a smtp server",
"import",
"smtplib",
"server",
"=",
"smtplib",
".",
"SMTP",
"(",
"smtp_server",
")",
"server",
".",
"sendmail",
"(",
"from_address",
",",
"rcpt_addresses",
",",
"message",
")",
"server",
".",
"quit",
"(",
")"
]
| Send mail using smtp. | [
"Send",
"mail",
"using",
"smtp",
"."
]
| b1f09fe1b429ed15110610092704ef12d253f3c9 | https://github.com/brunato/lograptor/blob/b1f09fe1b429ed15110610092704ef12d253f3c9/lograptor/utils.py#L78-L93 | train |
brunato/lograptor | lograptor/utils.py | get_value_unit | def get_value_unit(value, unit, prefix):
"""
Return a human-readable value with unit specification. Try to
transform the unit prefix to the one passed as parameter. When
transform to higher prefix apply nearest integer round.
"""
prefixes = ('', 'K', 'M', 'G', 'T')
if len(unit):
if unit[:1] in prefixes:
valprefix = unit[0]
unit = unit[1:]
else:
valprefix = ''
else:
valprefix = ''
while valprefix != prefix:
uidx = prefixes.index(valprefix)
if uidx > prefixes.index(prefix):
value *= 1024
valprefix = prefixes[uidx-1]
else:
if value < 10240:
return value, '{0}{1}'.format(valprefix, unit)
value = int(round(value/1024.0))
valprefix = prefixes[uidx+1]
return value, '{0}{1}'.format(valprefix, unit) | python | def get_value_unit(value, unit, prefix):
"""
Return a human-readable value with unit specification. Try to
transform the unit prefix to the one passed as parameter. When
transform to higher prefix apply nearest integer round.
"""
prefixes = ('', 'K', 'M', 'G', 'T')
if len(unit):
if unit[:1] in prefixes:
valprefix = unit[0]
unit = unit[1:]
else:
valprefix = ''
else:
valprefix = ''
while valprefix != prefix:
uidx = prefixes.index(valprefix)
if uidx > prefixes.index(prefix):
value *= 1024
valprefix = prefixes[uidx-1]
else:
if value < 10240:
return value, '{0}{1}'.format(valprefix, unit)
value = int(round(value/1024.0))
valprefix = prefixes[uidx+1]
return value, '{0}{1}'.format(valprefix, unit) | [
"def",
"get_value_unit",
"(",
"value",
",",
"unit",
",",
"prefix",
")",
":",
"prefixes",
"=",
"(",
"''",
",",
"'K'",
",",
"'M'",
",",
"'G'",
",",
"'T'",
")",
"if",
"len",
"(",
"unit",
")",
":",
"if",
"unit",
"[",
":",
"1",
"]",
"in",
"prefixes",
":",
"valprefix",
"=",
"unit",
"[",
"0",
"]",
"unit",
"=",
"unit",
"[",
"1",
":",
"]",
"else",
":",
"valprefix",
"=",
"''",
"else",
":",
"valprefix",
"=",
"''",
"while",
"valprefix",
"!=",
"prefix",
":",
"uidx",
"=",
"prefixes",
".",
"index",
"(",
"valprefix",
")",
"if",
"uidx",
">",
"prefixes",
".",
"index",
"(",
"prefix",
")",
":",
"value",
"*=",
"1024",
"valprefix",
"=",
"prefixes",
"[",
"uidx",
"-",
"1",
"]",
"else",
":",
"if",
"value",
"<",
"10240",
":",
"return",
"value",
",",
"'{0}{1}'",
".",
"format",
"(",
"valprefix",
",",
"unit",
")",
"value",
"=",
"int",
"(",
"round",
"(",
"value",
"/",
"1024.0",
")",
")",
"valprefix",
"=",
"prefixes",
"[",
"uidx",
"+",
"1",
"]",
"return",
"value",
",",
"'{0}{1}'",
".",
"format",
"(",
"valprefix",
",",
"unit",
")"
]
| Return a human-readable value with unit specification. Try to
transform the unit prefix to the one passed as parameter. When
transform to higher prefix apply nearest integer round. | [
"Return",
"a",
"human",
"-",
"readable",
"value",
"with",
"unit",
"specification",
".",
"Try",
"to",
"transform",
"the",
"unit",
"prefix",
"to",
"the",
"one",
"passed",
"as",
"parameter",
".",
"When",
"transform",
"to",
"higher",
"prefix",
"apply",
"nearest",
"integer",
"round",
"."
]
| b1f09fe1b429ed15110610092704ef12d253f3c9 | https://github.com/brunato/lograptor/blob/b1f09fe1b429ed15110610092704ef12d253f3c9/lograptor/utils.py#L96-L124 | train |
brunato/lograptor | lograptor/utils.py | get_fmt_results | def get_fmt_results(results, limit=5, sep='::', fmt=None):
"""
Return a list of formatted strings representation on a result dictionary.
The elements of the key are divided by a separator string. The result is
appended after the key between parentheses. Apply a format transformation
to odd elements of the key if a fmt parameter is passed.
"""
result_list = []
for key in sorted(results, key=lambda x: results[x], reverse=True):
if len(result_list) >= limit and results[key] <= 1:
break
if fmt is not None:
fmtkey = []
for i in range(len(key)):
if i % 2 == 1:
fmtkey.append(fmt.format(key[i]))
else:
fmtkey.append(key[i])
result_list.append(u'{0}({1})'.format(sep.join(fmtkey), results[key]))
else:
result_list.append(u'{0}({1})'.format(sep.join(key), results[key]))
else:
return result_list
if fmt is not None:
result_list.append(fmt.format(u'[%d more skipped]' % (len(results) - len(result_list))))
else:
result_list.append(u'[%d more skipped]' % (len(results) - len(result_list)))
return result_list | python | def get_fmt_results(results, limit=5, sep='::', fmt=None):
"""
Return a list of formatted strings representation on a result dictionary.
The elements of the key are divided by a separator string. The result is
appended after the key between parentheses. Apply a format transformation
to odd elements of the key if a fmt parameter is passed.
"""
result_list = []
for key in sorted(results, key=lambda x: results[x], reverse=True):
if len(result_list) >= limit and results[key] <= 1:
break
if fmt is not None:
fmtkey = []
for i in range(len(key)):
if i % 2 == 1:
fmtkey.append(fmt.format(key[i]))
else:
fmtkey.append(key[i])
result_list.append(u'{0}({1})'.format(sep.join(fmtkey), results[key]))
else:
result_list.append(u'{0}({1})'.format(sep.join(key), results[key]))
else:
return result_list
if fmt is not None:
result_list.append(fmt.format(u'[%d more skipped]' % (len(results) - len(result_list))))
else:
result_list.append(u'[%d more skipped]' % (len(results) - len(result_list)))
return result_list | [
"def",
"get_fmt_results",
"(",
"results",
",",
"limit",
"=",
"5",
",",
"sep",
"=",
"'::'",
",",
"fmt",
"=",
"None",
")",
":",
"result_list",
"=",
"[",
"]",
"for",
"key",
"in",
"sorted",
"(",
"results",
",",
"key",
"=",
"lambda",
"x",
":",
"results",
"[",
"x",
"]",
",",
"reverse",
"=",
"True",
")",
":",
"if",
"len",
"(",
"result_list",
")",
">=",
"limit",
"and",
"results",
"[",
"key",
"]",
"<=",
"1",
":",
"break",
"if",
"fmt",
"is",
"not",
"None",
":",
"fmtkey",
"=",
"[",
"]",
"for",
"i",
"in",
"range",
"(",
"len",
"(",
"key",
")",
")",
":",
"if",
"i",
"%",
"2",
"==",
"1",
":",
"fmtkey",
".",
"append",
"(",
"fmt",
".",
"format",
"(",
"key",
"[",
"i",
"]",
")",
")",
"else",
":",
"fmtkey",
".",
"append",
"(",
"key",
"[",
"i",
"]",
")",
"result_list",
".",
"append",
"(",
"u'{0}({1})'",
".",
"format",
"(",
"sep",
".",
"join",
"(",
"fmtkey",
")",
",",
"results",
"[",
"key",
"]",
")",
")",
"else",
":",
"result_list",
".",
"append",
"(",
"u'{0}({1})'",
".",
"format",
"(",
"sep",
".",
"join",
"(",
"key",
")",
",",
"results",
"[",
"key",
"]",
")",
")",
"else",
":",
"return",
"result_list",
"if",
"fmt",
"is",
"not",
"None",
":",
"result_list",
".",
"append",
"(",
"fmt",
".",
"format",
"(",
"u'[%d more skipped]'",
"%",
"(",
"len",
"(",
"results",
")",
"-",
"len",
"(",
"result_list",
")",
")",
")",
")",
"else",
":",
"result_list",
".",
"append",
"(",
"u'[%d more skipped]'",
"%",
"(",
"len",
"(",
"results",
")",
"-",
"len",
"(",
"result_list",
")",
")",
")",
"return",
"result_list"
]
| Return a list of formatted strings representation on a result dictionary.
The elements of the key are divided by a separator string. The result is
appended after the key between parentheses. Apply a format transformation
to odd elements of the key if a fmt parameter is passed. | [
"Return",
"a",
"list",
"of",
"formatted",
"strings",
"representation",
"on",
"a",
"result",
"dictionary",
".",
"The",
"elements",
"of",
"the",
"key",
"are",
"divided",
"by",
"a",
"separator",
"string",
".",
"The",
"result",
"is",
"appended",
"after",
"the",
"key",
"between",
"parentheses",
".",
"Apply",
"a",
"format",
"transformation",
"to",
"odd",
"elements",
"of",
"the",
"key",
"if",
"a",
"fmt",
"parameter",
"is",
"passed",
"."
]
| b1f09fe1b429ed15110610092704ef12d253f3c9 | https://github.com/brunato/lograptor/blob/b1f09fe1b429ed15110610092704ef12d253f3c9/lograptor/utils.py#L137-L164 | train |
brunato/lograptor | lograptor/utils.py | safe_expand | def safe_expand(template, mapping):
"""
Safe string template expansion. Raises an error if the provided substitution mapping has circularities.
"""
for _ in range(len(mapping) + 1):
_template = template
template = string.Template(template).safe_substitute(mapping)
if template == _template:
return template
else:
raise ValueError("circular mapping provided!") | python | def safe_expand(template, mapping):
"""
Safe string template expansion. Raises an error if the provided substitution mapping has circularities.
"""
for _ in range(len(mapping) + 1):
_template = template
template = string.Template(template).safe_substitute(mapping)
if template == _template:
return template
else:
raise ValueError("circular mapping provided!") | [
"def",
"safe_expand",
"(",
"template",
",",
"mapping",
")",
":",
"for",
"_",
"in",
"range",
"(",
"len",
"(",
"mapping",
")",
"+",
"1",
")",
":",
"_template",
"=",
"template",
"template",
"=",
"string",
".",
"Template",
"(",
"template",
")",
".",
"safe_substitute",
"(",
"mapping",
")",
"if",
"template",
"==",
"_template",
":",
"return",
"template",
"else",
":",
"raise",
"ValueError",
"(",
"\"circular mapping provided!\"",
")"
]
| Safe string template expansion. Raises an error if the provided substitution mapping has circularities. | [
"Safe",
"string",
"template",
"expansion",
".",
"Raises",
"an",
"error",
"if",
"the",
"provided",
"substitution",
"mapping",
"has",
"circularities",
"."
]
| b1f09fe1b429ed15110610092704ef12d253f3c9 | https://github.com/brunato/lograptor/blob/b1f09fe1b429ed15110610092704ef12d253f3c9/lograptor/utils.py#L186-L196 | train |
brunato/lograptor | lograptor/utils.py | protected_property | def protected_property(func):
"""
Class method decorator that creates a property that returns the protected attribute
or the value returned by the wrapped method, if the protected attribute is not defined.
"""
if func.__name__.startswith('_'):
raise ValueError("%r: Cannot decorate a protected method!" % func)
@property
@wraps(func)
def proxy_wrapper(self):
try:
return getattr(self, '_%s' % func.__name__)
except AttributeError:
pass
return func(self)
return proxy_wrapper | python | def protected_property(func):
"""
Class method decorator that creates a property that returns the protected attribute
or the value returned by the wrapped method, if the protected attribute is not defined.
"""
if func.__name__.startswith('_'):
raise ValueError("%r: Cannot decorate a protected method!" % func)
@property
@wraps(func)
def proxy_wrapper(self):
try:
return getattr(self, '_%s' % func.__name__)
except AttributeError:
pass
return func(self)
return proxy_wrapper | [
"def",
"protected_property",
"(",
"func",
")",
":",
"if",
"func",
".",
"__name__",
".",
"startswith",
"(",
"'_'",
")",
":",
"raise",
"ValueError",
"(",
"\"%r: Cannot decorate a protected method!\"",
"%",
"func",
")",
"@",
"property",
"@",
"wraps",
"(",
"func",
")",
"def",
"proxy_wrapper",
"(",
"self",
")",
":",
"try",
":",
"return",
"getattr",
"(",
"self",
",",
"'_%s'",
"%",
"func",
".",
"__name__",
")",
"except",
"AttributeError",
":",
"pass",
"return",
"func",
"(",
"self",
")",
"return",
"proxy_wrapper"
]
| Class method decorator that creates a property that returns the protected attribute
or the value returned by the wrapped method, if the protected attribute is not defined. | [
"Class",
"method",
"decorator",
"that",
"creates",
"a",
"property",
"that",
"returns",
"the",
"protected",
"attribute",
"or",
"the",
"value",
"returned",
"by",
"the",
"wrapped",
"method",
"if",
"the",
"protected",
"attribute",
"is",
"not",
"defined",
"."
]
| b1f09fe1b429ed15110610092704ef12d253f3c9 | https://github.com/brunato/lograptor/blob/b1f09fe1b429ed15110610092704ef12d253f3c9/lograptor/utils.py#L214-L231 | train |
brunato/lograptor | lograptor/utils.py | open_resource | def open_resource(source):
"""
Opens a resource in binary reading mode. Wraps the resource with a
context manager when it doesn't have one.
:param source: a filepath or an URL.
"""
try:
return open(source, mode='rb')
except (IOError, OSError) as err:
try:
resource = urlopen(source)
except ValueError:
pass
else:
resource.name = resource.url
if hasattr(resource, '__enter__'):
return resource
else:
return closing(resource)
raise err
except TypeError:
if hasattr(source, 'read') and hasattr(source, 'readlines'):
return source # Source is already a file-like object
raise | python | def open_resource(source):
"""
Opens a resource in binary reading mode. Wraps the resource with a
context manager when it doesn't have one.
:param source: a filepath or an URL.
"""
try:
return open(source, mode='rb')
except (IOError, OSError) as err:
try:
resource = urlopen(source)
except ValueError:
pass
else:
resource.name = resource.url
if hasattr(resource, '__enter__'):
return resource
else:
return closing(resource)
raise err
except TypeError:
if hasattr(source, 'read') and hasattr(source, 'readlines'):
return source # Source is already a file-like object
raise | [
"def",
"open_resource",
"(",
"source",
")",
":",
"try",
":",
"return",
"open",
"(",
"source",
",",
"mode",
"=",
"'rb'",
")",
"except",
"(",
"IOError",
",",
"OSError",
")",
"as",
"err",
":",
"try",
":",
"resource",
"=",
"urlopen",
"(",
"source",
")",
"except",
"ValueError",
":",
"pass",
"else",
":",
"resource",
".",
"name",
"=",
"resource",
".",
"url",
"if",
"hasattr",
"(",
"resource",
",",
"'__enter__'",
")",
":",
"return",
"resource",
"else",
":",
"return",
"closing",
"(",
"resource",
")",
"raise",
"err",
"except",
"TypeError",
":",
"if",
"hasattr",
"(",
"source",
",",
"'read'",
")",
"and",
"hasattr",
"(",
"source",
",",
"'readlines'",
")",
":",
"return",
"source",
"# Source is already a file-like object",
"raise"
]
| Opens a resource in binary reading mode. Wraps the resource with a
context manager when it doesn't have one.
:param source: a filepath or an URL. | [
"Opens",
"a",
"resource",
"in",
"binary",
"reading",
"mode",
".",
"Wraps",
"the",
"resource",
"with",
"a",
"context",
"manager",
"when",
"it",
"doesn",
"t",
"have",
"one",
"."
]
| b1f09fe1b429ed15110610092704ef12d253f3c9 | https://github.com/brunato/lograptor/blob/b1f09fe1b429ed15110610092704ef12d253f3c9/lograptor/utils.py#L255-L279 | train |
DsixTools/python-smeftrunner | smeftrunner/io.py | load | def load(stream, fmt='lha'):
"""Load a parameter file in DSixTools SLHA-like format or its JSON or
YAML representation."""
if fmt == 'lha':
return pylha.load(stream)
elif fmt == 'json':
if isinstance(stream, str):
return json.loads(stream)
else:
return json.load(stream)
elif fmt == 'yaml':
return yaml.load(stream) | python | def load(stream, fmt='lha'):
"""Load a parameter file in DSixTools SLHA-like format or its JSON or
YAML representation."""
if fmt == 'lha':
return pylha.load(stream)
elif fmt == 'json':
if isinstance(stream, str):
return json.loads(stream)
else:
return json.load(stream)
elif fmt == 'yaml':
return yaml.load(stream) | [
"def",
"load",
"(",
"stream",
",",
"fmt",
"=",
"'lha'",
")",
":",
"if",
"fmt",
"==",
"'lha'",
":",
"return",
"pylha",
".",
"load",
"(",
"stream",
")",
"elif",
"fmt",
"==",
"'json'",
":",
"if",
"isinstance",
"(",
"stream",
",",
"str",
")",
":",
"return",
"json",
".",
"loads",
"(",
"stream",
")",
"else",
":",
"return",
"json",
".",
"load",
"(",
"stream",
")",
"elif",
"fmt",
"==",
"'yaml'",
":",
"return",
"yaml",
".",
"load",
"(",
"stream",
")"
]
| Load a parameter file in DSixTools SLHA-like format or its JSON or
YAML representation. | [
"Load",
"a",
"parameter",
"file",
"in",
"DSixTools",
"SLHA",
"-",
"like",
"format",
"or",
"its",
"JSON",
"or",
"YAML",
"representation",
"."
]
| 4c9130e53ad4f7bbb526657a82150ca9d57c4b37 | https://github.com/DsixTools/python-smeftrunner/blob/4c9130e53ad4f7bbb526657a82150ca9d57c4b37/smeftrunner/io.py#L8-L19 | train |
DsixTools/python-smeftrunner | smeftrunner/io.py | sm_lha2dict | def sm_lha2dict(lha):
"""Convert a dictionary returned by pylha from a DSixTools SM input file
into a dictionary of SM values."""
d = OrderedDict()
v = dict(lha['BLOCK']['GAUGE']['values'])
d['g'] = v[1]
d['gp'] = v[2]
d['gs'] = v[3]
v = dict(lha['BLOCK']['SCALAR']['values'])
d['Lambda'] = v[1]
d['m2'] = v[2]
d['Gu'] = lha2matrix(lha['BLOCK']['GU']['values'], (3,3))
if 'IMGU' in lha['BLOCK']:
d['Gu'] = d['Gu'] + 1j*lha2matrix(lha['BLOCK']['IMGU']['values'], (3,3))
d['Gd'] = lha2matrix(lha['BLOCK']['GD']['values'], (3,3))
if 'IMGD' in lha['BLOCK']:
d['Gd'] = d['Gd'] + 1j*lha2matrix(lha['BLOCK']['IMGD']['values'], (3,3))
d['Ge'] = lha2matrix(lha['BLOCK']['GE']['values'], (3,3))
if 'IMGE' in lha['BLOCK']:
d['Ge'] = d['Ge'] + 1j*lha2matrix(lha['BLOCK']['IMGE']['values'], (3,3))
# thetas default to 0
if 'THETA' in lha['BLOCK']:
v = dict(lha['BLOCK']['THETA']['values'])
d['Theta'] = v.get(1, 0)
d['Thetap'] = v.get(2, 0)
d['Thetas'] = v.get(3, 0)
else:
d['Theta'] = 0
d['Thetap'] = 0
d['Thetas'] = 0
return d | python | def sm_lha2dict(lha):
"""Convert a dictionary returned by pylha from a DSixTools SM input file
into a dictionary of SM values."""
d = OrderedDict()
v = dict(lha['BLOCK']['GAUGE']['values'])
d['g'] = v[1]
d['gp'] = v[2]
d['gs'] = v[3]
v = dict(lha['BLOCK']['SCALAR']['values'])
d['Lambda'] = v[1]
d['m2'] = v[2]
d['Gu'] = lha2matrix(lha['BLOCK']['GU']['values'], (3,3))
if 'IMGU' in lha['BLOCK']:
d['Gu'] = d['Gu'] + 1j*lha2matrix(lha['BLOCK']['IMGU']['values'], (3,3))
d['Gd'] = lha2matrix(lha['BLOCK']['GD']['values'], (3,3))
if 'IMGD' in lha['BLOCK']:
d['Gd'] = d['Gd'] + 1j*lha2matrix(lha['BLOCK']['IMGD']['values'], (3,3))
d['Ge'] = lha2matrix(lha['BLOCK']['GE']['values'], (3,3))
if 'IMGE' in lha['BLOCK']:
d['Ge'] = d['Ge'] + 1j*lha2matrix(lha['BLOCK']['IMGE']['values'], (3,3))
# thetas default to 0
if 'THETA' in lha['BLOCK']:
v = dict(lha['BLOCK']['THETA']['values'])
d['Theta'] = v.get(1, 0)
d['Thetap'] = v.get(2, 0)
d['Thetas'] = v.get(3, 0)
else:
d['Theta'] = 0
d['Thetap'] = 0
d['Thetas'] = 0
return d | [
"def",
"sm_lha2dict",
"(",
"lha",
")",
":",
"d",
"=",
"OrderedDict",
"(",
")",
"v",
"=",
"dict",
"(",
"lha",
"[",
"'BLOCK'",
"]",
"[",
"'GAUGE'",
"]",
"[",
"'values'",
"]",
")",
"d",
"[",
"'g'",
"]",
"=",
"v",
"[",
"1",
"]",
"d",
"[",
"'gp'",
"]",
"=",
"v",
"[",
"2",
"]",
"d",
"[",
"'gs'",
"]",
"=",
"v",
"[",
"3",
"]",
"v",
"=",
"dict",
"(",
"lha",
"[",
"'BLOCK'",
"]",
"[",
"'SCALAR'",
"]",
"[",
"'values'",
"]",
")",
"d",
"[",
"'Lambda'",
"]",
"=",
"v",
"[",
"1",
"]",
"d",
"[",
"'m2'",
"]",
"=",
"v",
"[",
"2",
"]",
"d",
"[",
"'Gu'",
"]",
"=",
"lha2matrix",
"(",
"lha",
"[",
"'BLOCK'",
"]",
"[",
"'GU'",
"]",
"[",
"'values'",
"]",
",",
"(",
"3",
",",
"3",
")",
")",
"if",
"'IMGU'",
"in",
"lha",
"[",
"'BLOCK'",
"]",
":",
"d",
"[",
"'Gu'",
"]",
"=",
"d",
"[",
"'Gu'",
"]",
"+",
"1j",
"*",
"lha2matrix",
"(",
"lha",
"[",
"'BLOCK'",
"]",
"[",
"'IMGU'",
"]",
"[",
"'values'",
"]",
",",
"(",
"3",
",",
"3",
")",
")",
"d",
"[",
"'Gd'",
"]",
"=",
"lha2matrix",
"(",
"lha",
"[",
"'BLOCK'",
"]",
"[",
"'GD'",
"]",
"[",
"'values'",
"]",
",",
"(",
"3",
",",
"3",
")",
")",
"if",
"'IMGD'",
"in",
"lha",
"[",
"'BLOCK'",
"]",
":",
"d",
"[",
"'Gd'",
"]",
"=",
"d",
"[",
"'Gd'",
"]",
"+",
"1j",
"*",
"lha2matrix",
"(",
"lha",
"[",
"'BLOCK'",
"]",
"[",
"'IMGD'",
"]",
"[",
"'values'",
"]",
",",
"(",
"3",
",",
"3",
")",
")",
"d",
"[",
"'Ge'",
"]",
"=",
"lha2matrix",
"(",
"lha",
"[",
"'BLOCK'",
"]",
"[",
"'GE'",
"]",
"[",
"'values'",
"]",
",",
"(",
"3",
",",
"3",
")",
")",
"if",
"'IMGE'",
"in",
"lha",
"[",
"'BLOCK'",
"]",
":",
"d",
"[",
"'Ge'",
"]",
"=",
"d",
"[",
"'Ge'",
"]",
"+",
"1j",
"*",
"lha2matrix",
"(",
"lha",
"[",
"'BLOCK'",
"]",
"[",
"'IMGE'",
"]",
"[",
"'values'",
"]",
",",
"(",
"3",
",",
"3",
")",
")",
"# thetas default to 0",
"if",
"'THETA'",
"in",
"lha",
"[",
"'BLOCK'",
"]",
":",
"v",
"=",
"dict",
"(",
"lha",
"[",
"'BLOCK'",
"]",
"[",
"'THETA'",
"]",
"[",
"'values'",
"]",
")",
"d",
"[",
"'Theta'",
"]",
"=",
"v",
".",
"get",
"(",
"1",
",",
"0",
")",
"d",
"[",
"'Thetap'",
"]",
"=",
"v",
".",
"get",
"(",
"2",
",",
"0",
")",
"d",
"[",
"'Thetas'",
"]",
"=",
"v",
".",
"get",
"(",
"3",
",",
"0",
")",
"else",
":",
"d",
"[",
"'Theta'",
"]",
"=",
"0",
"d",
"[",
"'Thetap'",
"]",
"=",
"0",
"d",
"[",
"'Thetas'",
"]",
"=",
"0",
"return",
"d"
]
| Convert a dictionary returned by pylha from a DSixTools SM input file
into a dictionary of SM values. | [
"Convert",
"a",
"dictionary",
"returned",
"by",
"pylha",
"from",
"a",
"DSixTools",
"SM",
"input",
"file",
"into",
"a",
"dictionary",
"of",
"SM",
"values",
"."
]
| 4c9130e53ad4f7bbb526657a82150ca9d57c4b37 | https://github.com/DsixTools/python-smeftrunner/blob/4c9130e53ad4f7bbb526657a82150ca9d57c4b37/smeftrunner/io.py#L40-L70 | train |
DsixTools/python-smeftrunner | smeftrunner/io.py | sm_dict2lha | def sm_dict2lha(d):
"""Convert a a dictionary of SM parameters into
a dictionary that pylha can convert into a DSixTools SM output file."""
blocks = OrderedDict([
('GAUGE', {'values': [[1, d['g'].real], [2, d['gp'].real], [3, d['gs'].real]]}),
('SCALAR', {'values': [[1, d['Lambda'].real], [2, d['m2'].real]]}),
('GU', {'values': matrix2lha(d['Gu'].real)}),
('IMGU', {'values': matrix2lha(d['Gu'].imag)}),
('GD', {'values': matrix2lha(d['Gd'].real)}),
('IMGD', {'values': matrix2lha(d['Gd'].imag)}),
('GE', {'values': matrix2lha(d['Ge'].real)}),
('IMGE', {'values': matrix2lha(d['Ge'].imag)}),
('THETA', {'values': [[1, d['Theta'].real], [2, d['Thetap'].real], [3, d['Thetas'].real]]}),
])
return {'BLOCK': blocks} | python | def sm_dict2lha(d):
"""Convert a a dictionary of SM parameters into
a dictionary that pylha can convert into a DSixTools SM output file."""
blocks = OrderedDict([
('GAUGE', {'values': [[1, d['g'].real], [2, d['gp'].real], [3, d['gs'].real]]}),
('SCALAR', {'values': [[1, d['Lambda'].real], [2, d['m2'].real]]}),
('GU', {'values': matrix2lha(d['Gu'].real)}),
('IMGU', {'values': matrix2lha(d['Gu'].imag)}),
('GD', {'values': matrix2lha(d['Gd'].real)}),
('IMGD', {'values': matrix2lha(d['Gd'].imag)}),
('GE', {'values': matrix2lha(d['Ge'].real)}),
('IMGE', {'values': matrix2lha(d['Ge'].imag)}),
('THETA', {'values': [[1, d['Theta'].real], [2, d['Thetap'].real], [3, d['Thetas'].real]]}),
])
return {'BLOCK': blocks} | [
"def",
"sm_dict2lha",
"(",
"d",
")",
":",
"blocks",
"=",
"OrderedDict",
"(",
"[",
"(",
"'GAUGE'",
",",
"{",
"'values'",
":",
"[",
"[",
"1",
",",
"d",
"[",
"'g'",
"]",
".",
"real",
"]",
",",
"[",
"2",
",",
"d",
"[",
"'gp'",
"]",
".",
"real",
"]",
",",
"[",
"3",
",",
"d",
"[",
"'gs'",
"]",
".",
"real",
"]",
"]",
"}",
")",
",",
"(",
"'SCALAR'",
",",
"{",
"'values'",
":",
"[",
"[",
"1",
",",
"d",
"[",
"'Lambda'",
"]",
".",
"real",
"]",
",",
"[",
"2",
",",
"d",
"[",
"'m2'",
"]",
".",
"real",
"]",
"]",
"}",
")",
",",
"(",
"'GU'",
",",
"{",
"'values'",
":",
"matrix2lha",
"(",
"d",
"[",
"'Gu'",
"]",
".",
"real",
")",
"}",
")",
",",
"(",
"'IMGU'",
",",
"{",
"'values'",
":",
"matrix2lha",
"(",
"d",
"[",
"'Gu'",
"]",
".",
"imag",
")",
"}",
")",
",",
"(",
"'GD'",
",",
"{",
"'values'",
":",
"matrix2lha",
"(",
"d",
"[",
"'Gd'",
"]",
".",
"real",
")",
"}",
")",
",",
"(",
"'IMGD'",
",",
"{",
"'values'",
":",
"matrix2lha",
"(",
"d",
"[",
"'Gd'",
"]",
".",
"imag",
")",
"}",
")",
",",
"(",
"'GE'",
",",
"{",
"'values'",
":",
"matrix2lha",
"(",
"d",
"[",
"'Ge'",
"]",
".",
"real",
")",
"}",
")",
",",
"(",
"'IMGE'",
",",
"{",
"'values'",
":",
"matrix2lha",
"(",
"d",
"[",
"'Ge'",
"]",
".",
"imag",
")",
"}",
")",
",",
"(",
"'THETA'",
",",
"{",
"'values'",
":",
"[",
"[",
"1",
",",
"d",
"[",
"'Theta'",
"]",
".",
"real",
"]",
",",
"[",
"2",
",",
"d",
"[",
"'Thetap'",
"]",
".",
"real",
"]",
",",
"[",
"3",
",",
"d",
"[",
"'Thetas'",
"]",
".",
"real",
"]",
"]",
"}",
")",
",",
"]",
")",
"return",
"{",
"'BLOCK'",
":",
"blocks",
"}"
]
| Convert a a dictionary of SM parameters into
a dictionary that pylha can convert into a DSixTools SM output file. | [
"Convert",
"a",
"a",
"dictionary",
"of",
"SM",
"parameters",
"into",
"a",
"dictionary",
"that",
"pylha",
"can",
"convert",
"into",
"a",
"DSixTools",
"SM",
"output",
"file",
"."
]
| 4c9130e53ad4f7bbb526657a82150ca9d57c4b37 | https://github.com/DsixTools/python-smeftrunner/blob/4c9130e53ad4f7bbb526657a82150ca9d57c4b37/smeftrunner/io.py#L72-L86 | train |
DsixTools/python-smeftrunner | smeftrunner/io.py | wc_lha2dict | def wc_lha2dict(lha):
"""Convert a dictionary returned by pylha from a DSixTools WC input file
into a dictionary of Wilson coefficients."""
C = OrderedDict()
# try to read all WCs with 0, 2, or 4 fermions; if not found, set to zero
for k, (block, i) in WC_dict_0f.items():
try:
C[k] = dict(lha['BLOCK'][block]['values'])[i]
except KeyError:
C[k] = 0
for k in definitions.WC_keys_2f:
try:
C[k] = lha2matrix(lha['BLOCK']['WC' + k.upper()]['values'], (3,3)).real
except KeyError:
C[k] = np.zeros((3,3))
try: # try to add imaginary part
C[k] = C[k] + 1j*lha2matrix(lha['BLOCK']['IMWC' + k.upper()]['values'], (3,3))
except KeyError:
pass
for k in definitions.WC_keys_4f:
try:
C[k] = lha2matrix(lha['BLOCK']['WC' + k.upper()]['values'], (3,3,3,3))
except KeyError:
C[k] = np.zeros((3,3,3,3))
try: # try to add imaginary part
C[k] = C[k] + 1j*lha2matrix(lha['BLOCK']['IMWC' + k.upper()]['values'], (3,3,3,3))
except KeyError:
pass
return C | python | def wc_lha2dict(lha):
"""Convert a dictionary returned by pylha from a DSixTools WC input file
into a dictionary of Wilson coefficients."""
C = OrderedDict()
# try to read all WCs with 0, 2, or 4 fermions; if not found, set to zero
for k, (block, i) in WC_dict_0f.items():
try:
C[k] = dict(lha['BLOCK'][block]['values'])[i]
except KeyError:
C[k] = 0
for k in definitions.WC_keys_2f:
try:
C[k] = lha2matrix(lha['BLOCK']['WC' + k.upper()]['values'], (3,3)).real
except KeyError:
C[k] = np.zeros((3,3))
try: # try to add imaginary part
C[k] = C[k] + 1j*lha2matrix(lha['BLOCK']['IMWC' + k.upper()]['values'], (3,3))
except KeyError:
pass
for k in definitions.WC_keys_4f:
try:
C[k] = lha2matrix(lha['BLOCK']['WC' + k.upper()]['values'], (3,3,3,3))
except KeyError:
C[k] = np.zeros((3,3,3,3))
try: # try to add imaginary part
C[k] = C[k] + 1j*lha2matrix(lha['BLOCK']['IMWC' + k.upper()]['values'], (3,3,3,3))
except KeyError:
pass
return C | [
"def",
"wc_lha2dict",
"(",
"lha",
")",
":",
"C",
"=",
"OrderedDict",
"(",
")",
"# try to read all WCs with 0, 2, or 4 fermions; if not found, set to zero",
"for",
"k",
",",
"(",
"block",
",",
"i",
")",
"in",
"WC_dict_0f",
".",
"items",
"(",
")",
":",
"try",
":",
"C",
"[",
"k",
"]",
"=",
"dict",
"(",
"lha",
"[",
"'BLOCK'",
"]",
"[",
"block",
"]",
"[",
"'values'",
"]",
")",
"[",
"i",
"]",
"except",
"KeyError",
":",
"C",
"[",
"k",
"]",
"=",
"0",
"for",
"k",
"in",
"definitions",
".",
"WC_keys_2f",
":",
"try",
":",
"C",
"[",
"k",
"]",
"=",
"lha2matrix",
"(",
"lha",
"[",
"'BLOCK'",
"]",
"[",
"'WC'",
"+",
"k",
".",
"upper",
"(",
")",
"]",
"[",
"'values'",
"]",
",",
"(",
"3",
",",
"3",
")",
")",
".",
"real",
"except",
"KeyError",
":",
"C",
"[",
"k",
"]",
"=",
"np",
".",
"zeros",
"(",
"(",
"3",
",",
"3",
")",
")",
"try",
":",
"# try to add imaginary part",
"C",
"[",
"k",
"]",
"=",
"C",
"[",
"k",
"]",
"+",
"1j",
"*",
"lha2matrix",
"(",
"lha",
"[",
"'BLOCK'",
"]",
"[",
"'IMWC'",
"+",
"k",
".",
"upper",
"(",
")",
"]",
"[",
"'values'",
"]",
",",
"(",
"3",
",",
"3",
")",
")",
"except",
"KeyError",
":",
"pass",
"for",
"k",
"in",
"definitions",
".",
"WC_keys_4f",
":",
"try",
":",
"C",
"[",
"k",
"]",
"=",
"lha2matrix",
"(",
"lha",
"[",
"'BLOCK'",
"]",
"[",
"'WC'",
"+",
"k",
".",
"upper",
"(",
")",
"]",
"[",
"'values'",
"]",
",",
"(",
"3",
",",
"3",
",",
"3",
",",
"3",
")",
")",
"except",
"KeyError",
":",
"C",
"[",
"k",
"]",
"=",
"np",
".",
"zeros",
"(",
"(",
"3",
",",
"3",
",",
"3",
",",
"3",
")",
")",
"try",
":",
"# try to add imaginary part",
"C",
"[",
"k",
"]",
"=",
"C",
"[",
"k",
"]",
"+",
"1j",
"*",
"lha2matrix",
"(",
"lha",
"[",
"'BLOCK'",
"]",
"[",
"'IMWC'",
"+",
"k",
".",
"upper",
"(",
")",
"]",
"[",
"'values'",
"]",
",",
"(",
"3",
",",
"3",
",",
"3",
",",
"3",
")",
")",
"except",
"KeyError",
":",
"pass",
"return",
"C"
]
| Convert a dictionary returned by pylha from a DSixTools WC input file
into a dictionary of Wilson coefficients. | [
"Convert",
"a",
"dictionary",
"returned",
"by",
"pylha",
"from",
"a",
"DSixTools",
"WC",
"input",
"file",
"into",
"a",
"dictionary",
"of",
"Wilson",
"coefficients",
"."
]
| 4c9130e53ad4f7bbb526657a82150ca9d57c4b37 | https://github.com/DsixTools/python-smeftrunner/blob/4c9130e53ad4f7bbb526657a82150ca9d57c4b37/smeftrunner/io.py#L107-L135 | train |
Kortemme-Lab/klab | klab/pymath/cartesian/rmsd.py | compute_rmsd_by_matrix | def compute_rmsd_by_matrix(dataframe_1, dataframe_2, use_assertion = False):
'''Computes the RMSD of two pandas dataframes. The dataframes are expected to be of equal dimensions and use_assertion
can be set to assert that the row indices match. '''
if use_assertion: assert([i for i in dataframe_1.index] == [i for i in dataframe_2.index]) # Note: this assertion creates garbage memory allocations
num_points = dataframe_1.shape[0]
return numpy.linalg.norm(dataframe_1 - dataframe_2) / numpy.sqrt(num_points) | python | def compute_rmsd_by_matrix(dataframe_1, dataframe_2, use_assertion = False):
'''Computes the RMSD of two pandas dataframes. The dataframes are expected to be of equal dimensions and use_assertion
can be set to assert that the row indices match. '''
if use_assertion: assert([i for i in dataframe_1.index] == [i for i in dataframe_2.index]) # Note: this assertion creates garbage memory allocations
num_points = dataframe_1.shape[0]
return numpy.linalg.norm(dataframe_1 - dataframe_2) / numpy.sqrt(num_points) | [
"def",
"compute_rmsd_by_matrix",
"(",
"dataframe_1",
",",
"dataframe_2",
",",
"use_assertion",
"=",
"False",
")",
":",
"if",
"use_assertion",
":",
"assert",
"(",
"[",
"i",
"for",
"i",
"in",
"dataframe_1",
".",
"index",
"]",
"==",
"[",
"i",
"for",
"i",
"in",
"dataframe_2",
".",
"index",
"]",
")",
"# Note: this assertion creates garbage memory allocations",
"num_points",
"=",
"dataframe_1",
".",
"shape",
"[",
"0",
"]",
"return",
"numpy",
".",
"linalg",
".",
"norm",
"(",
"dataframe_1",
"-",
"dataframe_2",
")",
"/",
"numpy",
".",
"sqrt",
"(",
"num_points",
")"
]
| Computes the RMSD of two pandas dataframes. The dataframes are expected to be of equal dimensions and use_assertion
can be set to assert that the row indices match. | [
"Computes",
"the",
"RMSD",
"of",
"two",
"pandas",
"dataframes",
".",
"The",
"dataframes",
"are",
"expected",
"to",
"be",
"of",
"equal",
"dimensions",
"and",
"use_assertion",
"can",
"be",
"set",
"to",
"assert",
"that",
"the",
"row",
"indices",
"match",
"."
]
| 6d410ad08f1bd9f7cbbb28d7d946e94fbaaa2b6b | https://github.com/Kortemme-Lab/klab/blob/6d410ad08f1bd9f7cbbb28d7d946e94fbaaa2b6b/klab/pymath/cartesian/rmsd.py#L3-L8 | train |
projectshift/shift-boiler | boiler/feature/jinja_extensions.py | jinja_extensions_feature | def jinja_extensions_feature(app):
""" Enables custom templating extensions """
# register jinja filters
app.jinja_env.globals['momentjs'] = MomentJsFilters
app.jinja_env.filters.update(MomentJsFilters().get_filters())
app.jinja_env.filters.update(DateFilters().get_filters())
app.jinja_env.filters.update(HumanizeFilters().get_filters())
# register custom jinja functions
app.jinja_env.globals.update(dict(
asset=functions.asset,
dev_proxy=functions.dev_proxy
)) | python | def jinja_extensions_feature(app):
""" Enables custom templating extensions """
# register jinja filters
app.jinja_env.globals['momentjs'] = MomentJsFilters
app.jinja_env.filters.update(MomentJsFilters().get_filters())
app.jinja_env.filters.update(DateFilters().get_filters())
app.jinja_env.filters.update(HumanizeFilters().get_filters())
# register custom jinja functions
app.jinja_env.globals.update(dict(
asset=functions.asset,
dev_proxy=functions.dev_proxy
)) | [
"def",
"jinja_extensions_feature",
"(",
"app",
")",
":",
"# register jinja filters",
"app",
".",
"jinja_env",
".",
"globals",
"[",
"'momentjs'",
"]",
"=",
"MomentJsFilters",
"app",
".",
"jinja_env",
".",
"filters",
".",
"update",
"(",
"MomentJsFilters",
"(",
")",
".",
"get_filters",
"(",
")",
")",
"app",
".",
"jinja_env",
".",
"filters",
".",
"update",
"(",
"DateFilters",
"(",
")",
".",
"get_filters",
"(",
")",
")",
"app",
".",
"jinja_env",
".",
"filters",
".",
"update",
"(",
"HumanizeFilters",
"(",
")",
".",
"get_filters",
"(",
")",
")",
"# register custom jinja functions",
"app",
".",
"jinja_env",
".",
"globals",
".",
"update",
"(",
"dict",
"(",
"asset",
"=",
"functions",
".",
"asset",
",",
"dev_proxy",
"=",
"functions",
".",
"dev_proxy",
")",
")"
]
| Enables custom templating extensions | [
"Enables",
"custom",
"templating",
"extensions"
]
| 8e6f3a3e4b9493fb6c8bd16bed160ede153bfb0b | https://github.com/projectshift/shift-boiler/blob/8e6f3a3e4b9493fb6c8bd16bed160ede153bfb0b/boiler/feature/jinja_extensions.py#L7-L20 | train |
ethan92429/onshapepy | onshapepy/core/utils.py | log | def log(msg, level=0):
'''
Logs a message to the console, with optional level paramater
Args:
- msg (str): message to send to console
- level (int): log level; 0 for info, 1 for error (default = 0)
'''
red = '\033[91m'
endc = '\033[0m'
# configure the logging module
cfg = {
'version': 1,
'disable_existing_loggers': False,
'formatters': {
'stdout': {
'format': '[%(levelname)s]: %(asctime)s - %(message)s',
'datefmt': '%x %X'
},
'stderr': {
'format': red + '[%(levelname)s]: %(asctime)s - %(message)s' + endc,
'datefmt': '%x %X'
}
},
'handlers': {
'stdout': {
'class': 'logging.StreamHandler',
'level': 'DEBUG',
'formatter': 'stdout'
},
'stderr': {
'class': 'logging.StreamHandler',
'level': 'ERROR',
'formatter': 'stderr'
}
},
'loggers': {
'info': {
'handlers': ['stdout'],
'level': 'INFO',
'propagate': True
},
'error': {
'handlers': ['stderr'],
'level': 'ERROR',
'propagate': False
}
}
}
dictConfig(cfg)
lg = 'info' if level == 0 else 'error'
lvl = 20 if level == 0 else 40
logger = logging.getLogger(lg)
logger.log(lvl, msg) | python | def log(msg, level=0):
'''
Logs a message to the console, with optional level paramater
Args:
- msg (str): message to send to console
- level (int): log level; 0 for info, 1 for error (default = 0)
'''
red = '\033[91m'
endc = '\033[0m'
# configure the logging module
cfg = {
'version': 1,
'disable_existing_loggers': False,
'formatters': {
'stdout': {
'format': '[%(levelname)s]: %(asctime)s - %(message)s',
'datefmt': '%x %X'
},
'stderr': {
'format': red + '[%(levelname)s]: %(asctime)s - %(message)s' + endc,
'datefmt': '%x %X'
}
},
'handlers': {
'stdout': {
'class': 'logging.StreamHandler',
'level': 'DEBUG',
'formatter': 'stdout'
},
'stderr': {
'class': 'logging.StreamHandler',
'level': 'ERROR',
'formatter': 'stderr'
}
},
'loggers': {
'info': {
'handlers': ['stdout'],
'level': 'INFO',
'propagate': True
},
'error': {
'handlers': ['stderr'],
'level': 'ERROR',
'propagate': False
}
}
}
dictConfig(cfg)
lg = 'info' if level == 0 else 'error'
lvl = 20 if level == 0 else 40
logger = logging.getLogger(lg)
logger.log(lvl, msg) | [
"def",
"log",
"(",
"msg",
",",
"level",
"=",
"0",
")",
":",
"red",
"=",
"'\\033[91m'",
"endc",
"=",
"'\\033[0m'",
"# configure the logging module",
"cfg",
"=",
"{",
"'version'",
":",
"1",
",",
"'disable_existing_loggers'",
":",
"False",
",",
"'formatters'",
":",
"{",
"'stdout'",
":",
"{",
"'format'",
":",
"'[%(levelname)s]: %(asctime)s - %(message)s'",
",",
"'datefmt'",
":",
"'%x %X'",
"}",
",",
"'stderr'",
":",
"{",
"'format'",
":",
"red",
"+",
"'[%(levelname)s]: %(asctime)s - %(message)s'",
"+",
"endc",
",",
"'datefmt'",
":",
"'%x %X'",
"}",
"}",
",",
"'handlers'",
":",
"{",
"'stdout'",
":",
"{",
"'class'",
":",
"'logging.StreamHandler'",
",",
"'level'",
":",
"'DEBUG'",
",",
"'formatter'",
":",
"'stdout'",
"}",
",",
"'stderr'",
":",
"{",
"'class'",
":",
"'logging.StreamHandler'",
",",
"'level'",
":",
"'ERROR'",
",",
"'formatter'",
":",
"'stderr'",
"}",
"}",
",",
"'loggers'",
":",
"{",
"'info'",
":",
"{",
"'handlers'",
":",
"[",
"'stdout'",
"]",
",",
"'level'",
":",
"'INFO'",
",",
"'propagate'",
":",
"True",
"}",
",",
"'error'",
":",
"{",
"'handlers'",
":",
"[",
"'stderr'",
"]",
",",
"'level'",
":",
"'ERROR'",
",",
"'propagate'",
":",
"False",
"}",
"}",
"}",
"dictConfig",
"(",
"cfg",
")",
"lg",
"=",
"'info'",
"if",
"level",
"==",
"0",
"else",
"'error'",
"lvl",
"=",
"20",
"if",
"level",
"==",
"0",
"else",
"40",
"logger",
"=",
"logging",
".",
"getLogger",
"(",
"lg",
")",
"logger",
".",
"log",
"(",
"lvl",
",",
"msg",
")"
]
| Logs a message to the console, with optional level paramater
Args:
- msg (str): message to send to console
- level (int): log level; 0 for info, 1 for error (default = 0) | [
"Logs",
"a",
"message",
"to",
"the",
"console",
"with",
"optional",
"level",
"paramater"
]
| 61dc7ccbdc6095fa6cc3b4a414e2f72d03d1c9df | https://github.com/ethan92429/onshapepy/blob/61dc7ccbdc6095fa6cc3b4a414e2f72d03d1c9df/onshapepy/core/utils.py#L17-L75 | train |
ethan92429/onshapepy | onshapepy/assembly.py | Assembly.insert | def insert(self, part):
""" Insert a part into this assembly.
Args:
- part (onshapepy.part.Part) A Part instance that will be inserted.
Returns:
- requests.Response: Onshape response data
"""
params = {k: str(v) for k,v in part.params.items()}
res=c.create_assembly_instance(self.uri.as_dict(), part.uri.as_dict(), params)
return res | python | def insert(self, part):
""" Insert a part into this assembly.
Args:
- part (onshapepy.part.Part) A Part instance that will be inserted.
Returns:
- requests.Response: Onshape response data
"""
params = {k: str(v) for k,v in part.params.items()}
res=c.create_assembly_instance(self.uri.as_dict(), part.uri.as_dict(), params)
return res | [
"def",
"insert",
"(",
"self",
",",
"part",
")",
":",
"params",
"=",
"{",
"k",
":",
"str",
"(",
"v",
")",
"for",
"k",
",",
"v",
"in",
"part",
".",
"params",
".",
"items",
"(",
")",
"}",
"res",
"=",
"c",
".",
"create_assembly_instance",
"(",
"self",
".",
"uri",
".",
"as_dict",
"(",
")",
",",
"part",
".",
"uri",
".",
"as_dict",
"(",
")",
",",
"params",
")",
"return",
"res"
]
| Insert a part into this assembly.
Args:
- part (onshapepy.part.Part) A Part instance that will be inserted.
Returns:
- requests.Response: Onshape response data | [
"Insert",
"a",
"part",
"into",
"this",
"assembly",
"."
]
| 61dc7ccbdc6095fa6cc3b4a414e2f72d03d1c9df | https://github.com/ethan92429/onshapepy/blob/61dc7ccbdc6095fa6cc3b4a414e2f72d03d1c9df/onshapepy/assembly.py#L26-L38 | train |
TUNE-Archive/freight_forwarder | freight_forwarder/utils/utils.py | parse_http_scheme | def parse_http_scheme(uri):
"""
match on http scheme if no match is found will assume http
"""
regex = re.compile(
r'^(?:http)s?://',
flags=re.IGNORECASE
)
match = regex.match(uri)
return match.group(0) if match else 'http://' | python | def parse_http_scheme(uri):
"""
match on http scheme if no match is found will assume http
"""
regex = re.compile(
r'^(?:http)s?://',
flags=re.IGNORECASE
)
match = regex.match(uri)
return match.group(0) if match else 'http://' | [
"def",
"parse_http_scheme",
"(",
"uri",
")",
":",
"regex",
"=",
"re",
".",
"compile",
"(",
"r'^(?:http)s?://'",
",",
"flags",
"=",
"re",
".",
"IGNORECASE",
")",
"match",
"=",
"regex",
".",
"match",
"(",
"uri",
")",
"return",
"match",
".",
"group",
"(",
"0",
")",
"if",
"match",
"else",
"'http://'"
]
| match on http scheme if no match is found will assume http | [
"match",
"on",
"http",
"scheme",
"if",
"no",
"match",
"is",
"found",
"will",
"assume",
"http"
]
| 6ea4a49f474ec04abb8bb81b175c774a16b5312f | https://github.com/TUNE-Archive/freight_forwarder/blob/6ea4a49f474ec04abb8bb81b175c774a16b5312f/freight_forwarder/utils/utils.py#L110-L120 | train |
TUNE-Archive/freight_forwarder | freight_forwarder/utils/utils.py | parse_stream | def parse_stream(response):
"""
take stream from docker-py lib and display it to the user.
this also builds a stream list and returns it.
"""
stream_data = []
stream = stdout
for data in response:
if data:
try:
data = data.decode('utf-8')
except AttributeError as e:
logger.exception("Unable to parse stream, Attribute Error Raised: {0}".format(e))
stream.write(data)
continue
try:
normalized_data = normalize_keys(json.loads(data))
except ValueError:
stream.write(data)
continue
except TypeError:
stream.write(data)
continue
if 'progress' in normalized_data:
stream_data.append(normalized_data)
_display_progress(normalized_data, stream)
elif 'error' in normalized_data:
_display_error(normalized_data, stream)
elif 'status' in normalized_data:
stream_data.append(normalized_data)
_display_status(normalized_data, stream)
elif 'stream' in normalized_data:
stream_data.append(normalized_data)
_display_stream(normalized_data, stream)
else:
stream.write(data)
stream.flush()
return stream_data | python | def parse_stream(response):
"""
take stream from docker-py lib and display it to the user.
this also builds a stream list and returns it.
"""
stream_data = []
stream = stdout
for data in response:
if data:
try:
data = data.decode('utf-8')
except AttributeError as e:
logger.exception("Unable to parse stream, Attribute Error Raised: {0}".format(e))
stream.write(data)
continue
try:
normalized_data = normalize_keys(json.loads(data))
except ValueError:
stream.write(data)
continue
except TypeError:
stream.write(data)
continue
if 'progress' in normalized_data:
stream_data.append(normalized_data)
_display_progress(normalized_data, stream)
elif 'error' in normalized_data:
_display_error(normalized_data, stream)
elif 'status' in normalized_data:
stream_data.append(normalized_data)
_display_status(normalized_data, stream)
elif 'stream' in normalized_data:
stream_data.append(normalized_data)
_display_stream(normalized_data, stream)
else:
stream.write(data)
stream.flush()
return stream_data | [
"def",
"parse_stream",
"(",
"response",
")",
":",
"stream_data",
"=",
"[",
"]",
"stream",
"=",
"stdout",
"for",
"data",
"in",
"response",
":",
"if",
"data",
":",
"try",
":",
"data",
"=",
"data",
".",
"decode",
"(",
"'utf-8'",
")",
"except",
"AttributeError",
"as",
"e",
":",
"logger",
".",
"exception",
"(",
"\"Unable to parse stream, Attribute Error Raised: {0}\"",
".",
"format",
"(",
"e",
")",
")",
"stream",
".",
"write",
"(",
"data",
")",
"continue",
"try",
":",
"normalized_data",
"=",
"normalize_keys",
"(",
"json",
".",
"loads",
"(",
"data",
")",
")",
"except",
"ValueError",
":",
"stream",
".",
"write",
"(",
"data",
")",
"continue",
"except",
"TypeError",
":",
"stream",
".",
"write",
"(",
"data",
")",
"continue",
"if",
"'progress'",
"in",
"normalized_data",
":",
"stream_data",
".",
"append",
"(",
"normalized_data",
")",
"_display_progress",
"(",
"normalized_data",
",",
"stream",
")",
"elif",
"'error'",
"in",
"normalized_data",
":",
"_display_error",
"(",
"normalized_data",
",",
"stream",
")",
"elif",
"'status'",
"in",
"normalized_data",
":",
"stream_data",
".",
"append",
"(",
"normalized_data",
")",
"_display_status",
"(",
"normalized_data",
",",
"stream",
")",
"elif",
"'stream'",
"in",
"normalized_data",
":",
"stream_data",
".",
"append",
"(",
"normalized_data",
")",
"_display_stream",
"(",
"normalized_data",
",",
"stream",
")",
"else",
":",
"stream",
".",
"write",
"(",
"data",
")",
"stream",
".",
"flush",
"(",
")",
"return",
"stream_data"
]
| take stream from docker-py lib and display it to the user.
this also builds a stream list and returns it. | [
"take",
"stream",
"from",
"docker",
"-",
"py",
"lib",
"and",
"display",
"it",
"to",
"the",
"user",
"."
]
| 6ea4a49f474ec04abb8bb81b175c774a16b5312f | https://github.com/TUNE-Archive/freight_forwarder/blob/6ea4a49f474ec04abb8bb81b175c774a16b5312f/freight_forwarder/utils/utils.py#L159-L204 | train |
TUNE-Archive/freight_forwarder | freight_forwarder/utils/utils.py | normalize_keys | def normalize_keys(suspect, snake_case=True):
"""
take a dict and turn all of its type string keys into snake_case
"""
if not isinstance(suspect, dict):
raise TypeError('you must pass a dict.')
for key in list(suspect):
if not isinstance(key, six.string_types):
continue
if snake_case:
s1 = first_cap_re.sub(r'\1_\2', key)
new_key = all_cap_re.sub(r'\1_\2', s1).lower() # .replace('-', '_')
else:
new_key = key.lower()
value = suspect.pop(key)
if isinstance(value, dict):
suspect[new_key] = normalize_keys(value, snake_case)
elif isinstance(value, list):
for i in range(0, len(value)):
if isinstance(value[i], dict):
normalize_keys(value[i], snake_case)
suspect[new_key] = value
else:
suspect[new_key] = value
return suspect | python | def normalize_keys(suspect, snake_case=True):
"""
take a dict and turn all of its type string keys into snake_case
"""
if not isinstance(suspect, dict):
raise TypeError('you must pass a dict.')
for key in list(suspect):
if not isinstance(key, six.string_types):
continue
if snake_case:
s1 = first_cap_re.sub(r'\1_\2', key)
new_key = all_cap_re.sub(r'\1_\2', s1).lower() # .replace('-', '_')
else:
new_key = key.lower()
value = suspect.pop(key)
if isinstance(value, dict):
suspect[new_key] = normalize_keys(value, snake_case)
elif isinstance(value, list):
for i in range(0, len(value)):
if isinstance(value[i], dict):
normalize_keys(value[i], snake_case)
suspect[new_key] = value
else:
suspect[new_key] = value
return suspect | [
"def",
"normalize_keys",
"(",
"suspect",
",",
"snake_case",
"=",
"True",
")",
":",
"if",
"not",
"isinstance",
"(",
"suspect",
",",
"dict",
")",
":",
"raise",
"TypeError",
"(",
"'you must pass a dict.'",
")",
"for",
"key",
"in",
"list",
"(",
"suspect",
")",
":",
"if",
"not",
"isinstance",
"(",
"key",
",",
"six",
".",
"string_types",
")",
":",
"continue",
"if",
"snake_case",
":",
"s1",
"=",
"first_cap_re",
".",
"sub",
"(",
"r'\\1_\\2'",
",",
"key",
")",
"new_key",
"=",
"all_cap_re",
".",
"sub",
"(",
"r'\\1_\\2'",
",",
"s1",
")",
".",
"lower",
"(",
")",
"# .replace('-', '_')",
"else",
":",
"new_key",
"=",
"key",
".",
"lower",
"(",
")",
"value",
"=",
"suspect",
".",
"pop",
"(",
"key",
")",
"if",
"isinstance",
"(",
"value",
",",
"dict",
")",
":",
"suspect",
"[",
"new_key",
"]",
"=",
"normalize_keys",
"(",
"value",
",",
"snake_case",
")",
"elif",
"isinstance",
"(",
"value",
",",
"list",
")",
":",
"for",
"i",
"in",
"range",
"(",
"0",
",",
"len",
"(",
"value",
")",
")",
":",
"if",
"isinstance",
"(",
"value",
"[",
"i",
"]",
",",
"dict",
")",
":",
"normalize_keys",
"(",
"value",
"[",
"i",
"]",
",",
"snake_case",
")",
"suspect",
"[",
"new_key",
"]",
"=",
"value",
"else",
":",
"suspect",
"[",
"new_key",
"]",
"=",
"value",
"return",
"suspect"
]
| take a dict and turn all of its type string keys into snake_case | [
"take",
"a",
"dict",
"and",
"turn",
"all",
"of",
"its",
"type",
"string",
"keys",
"into",
"snake_case"
]
| 6ea4a49f474ec04abb8bb81b175c774a16b5312f | https://github.com/TUNE-Archive/freight_forwarder/blob/6ea4a49f474ec04abb8bb81b175c774a16b5312f/freight_forwarder/utils/utils.py#L227-L256 | train |
TUNE-Archive/freight_forwarder | freight_forwarder/utils/utils.py | _display_status | def _display_status(normalized_data, stream):
"""
print status message from docker-py stream.
"""
if 'Pull complete' in normalized_data['status'] or 'Download complete' in normalized_data['status']:
stream.write("\n")
if 'id' in normalized_data:
stream.write("%s - " % normalized_data['id'])
stream.write("{0}\n".format(normalized_data['status'])) | python | def _display_status(normalized_data, stream):
"""
print status message from docker-py stream.
"""
if 'Pull complete' in normalized_data['status'] or 'Download complete' in normalized_data['status']:
stream.write("\n")
if 'id' in normalized_data:
stream.write("%s - " % normalized_data['id'])
stream.write("{0}\n".format(normalized_data['status'])) | [
"def",
"_display_status",
"(",
"normalized_data",
",",
"stream",
")",
":",
"if",
"'Pull complete'",
"in",
"normalized_data",
"[",
"'status'",
"]",
"or",
"'Download complete'",
"in",
"normalized_data",
"[",
"'status'",
"]",
":",
"stream",
".",
"write",
"(",
"\"\\n\"",
")",
"if",
"'id'",
"in",
"normalized_data",
":",
"stream",
".",
"write",
"(",
"\"%s - \"",
"%",
"normalized_data",
"[",
"'id'",
"]",
")",
"stream",
".",
"write",
"(",
"\"{0}\\n\"",
".",
"format",
"(",
"normalized_data",
"[",
"'status'",
"]",
")",
")"
]
| print status message from docker-py stream. | [
"print",
"status",
"message",
"from",
"docker",
"-",
"py",
"stream",
"."
]
| 6ea4a49f474ec04abb8bb81b175c774a16b5312f | https://github.com/TUNE-Archive/freight_forwarder/blob/6ea4a49f474ec04abb8bb81b175c774a16b5312f/freight_forwarder/utils/utils.py#L367-L377 | train |
TUNE-Archive/freight_forwarder | freight_forwarder/utils/utils.py | _display_stream | def _display_stream(normalized_data, stream):
"""
print stream message from docker-py stream.
"""
try:
stream.write(normalized_data['stream'])
except UnicodeEncodeError:
stream.write(normalized_data['stream'].encode("utf-8")) | python | def _display_stream(normalized_data, stream):
"""
print stream message from docker-py stream.
"""
try:
stream.write(normalized_data['stream'])
except UnicodeEncodeError:
stream.write(normalized_data['stream'].encode("utf-8")) | [
"def",
"_display_stream",
"(",
"normalized_data",
",",
"stream",
")",
":",
"try",
":",
"stream",
".",
"write",
"(",
"normalized_data",
"[",
"'stream'",
"]",
")",
"except",
"UnicodeEncodeError",
":",
"stream",
".",
"write",
"(",
"normalized_data",
"[",
"'stream'",
"]",
".",
"encode",
"(",
"\"utf-8\"",
")",
")"
]
| print stream message from docker-py stream. | [
"print",
"stream",
"message",
"from",
"docker",
"-",
"py",
"stream",
"."
]
| 6ea4a49f474ec04abb8bb81b175c774a16b5312f | https://github.com/TUNE-Archive/freight_forwarder/blob/6ea4a49f474ec04abb8bb81b175c774a16b5312f/freight_forwarder/utils/utils.py#L380-L387 | train |
fjwCode/cerium | cerium/service.py | BaseService.version | def version(self) -> str:
'''Show the version number of Android Debug Bridge.'''
output, _ = self._execute('version')
return output.splitlines()[0].split()[-1] | python | def version(self) -> str:
'''Show the version number of Android Debug Bridge.'''
output, _ = self._execute('version')
return output.splitlines()[0].split()[-1] | [
"def",
"version",
"(",
"self",
")",
"->",
"str",
":",
"output",
",",
"_",
"=",
"self",
".",
"_execute",
"(",
"'version'",
")",
"return",
"output",
".",
"splitlines",
"(",
")",
"[",
"0",
"]",
".",
"split",
"(",
")",
"[",
"-",
"1",
"]"
]
| Show the version number of Android Debug Bridge. | [
"Show",
"the",
"version",
"number",
"of",
"Android",
"Debug",
"Bridge",
"."
]
| f6e06e0dcf83a0bc924828e9d6cb81383ed2364f | https://github.com/fjwCode/cerium/blob/f6e06e0dcf83a0bc924828e9d6cb81383ed2364f/cerium/service.py#L68-L71 | train |
fjwCode/cerium | cerium/service.py | BaseService.get_state | def get_state(self) -> str:
'''offline | bootloader | device'''
output, error = self._execute('get-state')
if error:
raise DeviceConnectionException(error.split(':', 1)[-1].strip())
return output.strip() | python | def get_state(self) -> str:
'''offline | bootloader | device'''
output, error = self._execute('get-state')
if error:
raise DeviceConnectionException(error.split(':', 1)[-1].strip())
return output.strip() | [
"def",
"get_state",
"(",
"self",
")",
"->",
"str",
":",
"output",
",",
"error",
"=",
"self",
".",
"_execute",
"(",
"'get-state'",
")",
"if",
"error",
":",
"raise",
"DeviceConnectionException",
"(",
"error",
".",
"split",
"(",
"':'",
",",
"1",
")",
"[",
"-",
"1",
"]",
".",
"strip",
"(",
")",
")",
"return",
"output",
".",
"strip",
"(",
")"
]
| offline | bootloader | device | [
"offline",
"|",
"bootloader",
"|",
"device"
]
| f6e06e0dcf83a0bc924828e9d6cb81383ed2364f | https://github.com/fjwCode/cerium/blob/f6e06e0dcf83a0bc924828e9d6cb81383ed2364f/cerium/service.py#L102-L107 | train |
cozy/python_cozy_management | cozy_management/ssl.py | acme_init | def acme_init():
'''
Init acme key
'''
acme_private_key = ACME_PRIVATE_KEY
acme_intermediate_cert = ACME_INTERMEDIATE_CERT
acme_intermediate_cert_url = ACME_INTERMEDIATE_CERT_URL
if not os.path.isfile(acme_private_key):
print 'Create {}'.format(acme_private_key)
cmd = 'openssl genrsa 4096 > {acme_private_key}'.format(
acme_private_key=acme_private_key)
p = subprocess.Popen(cmd,
shell=True,
stdout=subprocess.PIPE,
close_fds=True)
p.communicate()
helpers.file_rights(acme_private_key, mode=0444, uid=0, gid=0)
else:
print 'Already exist: {}'.format(acme_private_key)
if not os.path.isfile(acme_intermediate_cert):
print 'Create {}'.format(acme_intermediate_cert)
cmd = 'wget -O - {acme_intermediate_cert_url} > {acme_intermediate_cert}'
cmd = cmd.format(acme_intermediate_cert_url=acme_intermediate_cert_url,
acme_intermediate_cert=acme_intermediate_cert)
p = subprocess.Popen(cmd,
shell=True,
stdout=subprocess.PIPE,
close_fds=True)
p.communicate()
helpers.file_rights(acme_intermediate_cert, mode=0444, uid=0, gid=0)
else:
print 'Already exist: {}'.format(acme_intermediate_cert) | python | def acme_init():
'''
Init acme key
'''
acme_private_key = ACME_PRIVATE_KEY
acme_intermediate_cert = ACME_INTERMEDIATE_CERT
acme_intermediate_cert_url = ACME_INTERMEDIATE_CERT_URL
if not os.path.isfile(acme_private_key):
print 'Create {}'.format(acme_private_key)
cmd = 'openssl genrsa 4096 > {acme_private_key}'.format(
acme_private_key=acme_private_key)
p = subprocess.Popen(cmd,
shell=True,
stdout=subprocess.PIPE,
close_fds=True)
p.communicate()
helpers.file_rights(acme_private_key, mode=0444, uid=0, gid=0)
else:
print 'Already exist: {}'.format(acme_private_key)
if not os.path.isfile(acme_intermediate_cert):
print 'Create {}'.format(acme_intermediate_cert)
cmd = 'wget -O - {acme_intermediate_cert_url} > {acme_intermediate_cert}'
cmd = cmd.format(acme_intermediate_cert_url=acme_intermediate_cert_url,
acme_intermediate_cert=acme_intermediate_cert)
p = subprocess.Popen(cmd,
shell=True,
stdout=subprocess.PIPE,
close_fds=True)
p.communicate()
helpers.file_rights(acme_intermediate_cert, mode=0444, uid=0, gid=0)
else:
print 'Already exist: {}'.format(acme_intermediate_cert) | [
"def",
"acme_init",
"(",
")",
":",
"acme_private_key",
"=",
"ACME_PRIVATE_KEY",
"acme_intermediate_cert",
"=",
"ACME_INTERMEDIATE_CERT",
"acme_intermediate_cert_url",
"=",
"ACME_INTERMEDIATE_CERT_URL",
"if",
"not",
"os",
".",
"path",
".",
"isfile",
"(",
"acme_private_key",
")",
":",
"print",
"'Create {}'",
".",
"format",
"(",
"acme_private_key",
")",
"cmd",
"=",
"'openssl genrsa 4096 > {acme_private_key}'",
".",
"format",
"(",
"acme_private_key",
"=",
"acme_private_key",
")",
"p",
"=",
"subprocess",
".",
"Popen",
"(",
"cmd",
",",
"shell",
"=",
"True",
",",
"stdout",
"=",
"subprocess",
".",
"PIPE",
",",
"close_fds",
"=",
"True",
")",
"p",
".",
"communicate",
"(",
")",
"helpers",
".",
"file_rights",
"(",
"acme_private_key",
",",
"mode",
"=",
"0444",
",",
"uid",
"=",
"0",
",",
"gid",
"=",
"0",
")",
"else",
":",
"print",
"'Already exist: {}'",
".",
"format",
"(",
"acme_private_key",
")",
"if",
"not",
"os",
".",
"path",
".",
"isfile",
"(",
"acme_intermediate_cert",
")",
":",
"print",
"'Create {}'",
".",
"format",
"(",
"acme_intermediate_cert",
")",
"cmd",
"=",
"'wget -O - {acme_intermediate_cert_url} > {acme_intermediate_cert}'",
"cmd",
"=",
"cmd",
".",
"format",
"(",
"acme_intermediate_cert_url",
"=",
"acme_intermediate_cert_url",
",",
"acme_intermediate_cert",
"=",
"acme_intermediate_cert",
")",
"p",
"=",
"subprocess",
".",
"Popen",
"(",
"cmd",
",",
"shell",
"=",
"True",
",",
"stdout",
"=",
"subprocess",
".",
"PIPE",
",",
"close_fds",
"=",
"True",
")",
"p",
".",
"communicate",
"(",
")",
"helpers",
".",
"file_rights",
"(",
"acme_intermediate_cert",
",",
"mode",
"=",
"0444",
",",
"uid",
"=",
"0",
",",
"gid",
"=",
"0",
")",
"else",
":",
"print",
"'Already exist: {}'",
".",
"format",
"(",
"acme_intermediate_cert",
")"
]
| Init acme key | [
"Init",
"acme",
"key"
]
| 820cea58458ae3e067fa8cc2da38edbda4681dac | https://github.com/cozy/python_cozy_management/blob/820cea58458ae3e067fa8cc2da38edbda4681dac/cozy_management/ssl.py#L57-L90 | train |
cozy/python_cozy_management | cozy_management/ssl.py | acme_sign_certificate | def acme_sign_certificate(common_name, size=DEFAULT_KEY_SIZE):
'''
Sign certificate with acme_tiny for let's encrypt
'''
private_key_path = '{}/{}.key'.format(CERTIFICATES_PATH, common_name)
certificate_path = '{}/{}.crt'.format(CERTIFICATES_PATH, common_name)
certificate_request_path = '{}/{}.csr'.format(CERTIFICATES_PATH,
common_name)
signed_cert = '{certificates_path}/{common_name}-signed.crt'.format(
certificates_path=CERTIFICATES_PATH,
common_name=common_name)
generate_certificate(common_name, size)
cmd = 'openssl req -new -sha256 -key {private_key_path}'
cmd += ' -subj "/CN={common_name}" -out {certificate_request_path}'
cmd = cmd.format(
private_key_path=private_key_path,
common_name=common_name,
certificate_request_path=certificate_request_path
)
p = subprocess.Popen(cmd,
shell=True,
stdout=subprocess.PIPE,
close_fds=True)
p.communicate()
_internal_sign_certificate(certificate_path, certificate_request_path,
signed_cert)
cron = "/etc/cron.monthly/acme-renew"
if not os.path.exists(cron):
with open(cron, "w") as file:
file.write("#!/bin/bash\ncozy_management renew_certificates\n")
st = os.stat(cron)
os.chmod(cron, st.st_mode | S_IXUSR) | python | def acme_sign_certificate(common_name, size=DEFAULT_KEY_SIZE):
'''
Sign certificate with acme_tiny for let's encrypt
'''
private_key_path = '{}/{}.key'.format(CERTIFICATES_PATH, common_name)
certificate_path = '{}/{}.crt'.format(CERTIFICATES_PATH, common_name)
certificate_request_path = '{}/{}.csr'.format(CERTIFICATES_PATH,
common_name)
signed_cert = '{certificates_path}/{common_name}-signed.crt'.format(
certificates_path=CERTIFICATES_PATH,
common_name=common_name)
generate_certificate(common_name, size)
cmd = 'openssl req -new -sha256 -key {private_key_path}'
cmd += ' -subj "/CN={common_name}" -out {certificate_request_path}'
cmd = cmd.format(
private_key_path=private_key_path,
common_name=common_name,
certificate_request_path=certificate_request_path
)
p = subprocess.Popen(cmd,
shell=True,
stdout=subprocess.PIPE,
close_fds=True)
p.communicate()
_internal_sign_certificate(certificate_path, certificate_request_path,
signed_cert)
cron = "/etc/cron.monthly/acme-renew"
if not os.path.exists(cron):
with open(cron, "w") as file:
file.write("#!/bin/bash\ncozy_management renew_certificates\n")
st = os.stat(cron)
os.chmod(cron, st.st_mode | S_IXUSR) | [
"def",
"acme_sign_certificate",
"(",
"common_name",
",",
"size",
"=",
"DEFAULT_KEY_SIZE",
")",
":",
"private_key_path",
"=",
"'{}/{}.key'",
".",
"format",
"(",
"CERTIFICATES_PATH",
",",
"common_name",
")",
"certificate_path",
"=",
"'{}/{}.crt'",
".",
"format",
"(",
"CERTIFICATES_PATH",
",",
"common_name",
")",
"certificate_request_path",
"=",
"'{}/{}.csr'",
".",
"format",
"(",
"CERTIFICATES_PATH",
",",
"common_name",
")",
"signed_cert",
"=",
"'{certificates_path}/{common_name}-signed.crt'",
".",
"format",
"(",
"certificates_path",
"=",
"CERTIFICATES_PATH",
",",
"common_name",
"=",
"common_name",
")",
"generate_certificate",
"(",
"common_name",
",",
"size",
")",
"cmd",
"=",
"'openssl req -new -sha256 -key {private_key_path}'",
"cmd",
"+=",
"' -subj \"/CN={common_name}\" -out {certificate_request_path}'",
"cmd",
"=",
"cmd",
".",
"format",
"(",
"private_key_path",
"=",
"private_key_path",
",",
"common_name",
"=",
"common_name",
",",
"certificate_request_path",
"=",
"certificate_request_path",
")",
"p",
"=",
"subprocess",
".",
"Popen",
"(",
"cmd",
",",
"shell",
"=",
"True",
",",
"stdout",
"=",
"subprocess",
".",
"PIPE",
",",
"close_fds",
"=",
"True",
")",
"p",
".",
"communicate",
"(",
")",
"_internal_sign_certificate",
"(",
"certificate_path",
",",
"certificate_request_path",
",",
"signed_cert",
")",
"cron",
"=",
"\"/etc/cron.monthly/acme-renew\"",
"if",
"not",
"os",
".",
"path",
".",
"exists",
"(",
"cron",
")",
":",
"with",
"open",
"(",
"cron",
",",
"\"w\"",
")",
"as",
"file",
":",
"file",
".",
"write",
"(",
"\"#!/bin/bash\\ncozy_management renew_certificates\\n\"",
")",
"st",
"=",
"os",
".",
"stat",
"(",
"cron",
")",
"os",
".",
"chmod",
"(",
"cron",
",",
"st",
".",
"st_mode",
"|",
"S_IXUSR",
")"
]
| Sign certificate with acme_tiny for let's encrypt | [
"Sign",
"certificate",
"with",
"acme_tiny",
"for",
"let",
"s",
"encrypt"
]
| 820cea58458ae3e067fa8cc2da38edbda4681dac | https://github.com/cozy/python_cozy_management/blob/820cea58458ae3e067fa8cc2da38edbda4681dac/cozy_management/ssl.py#L120-L155 | train |
cozy/python_cozy_management | cozy_management/ssl.py | acme_renew_certificates | def acme_renew_certificates():
'''
Renew certificates with acme_tiny for let's encrypt
'''
for csr in glob(os.path.join(CERTIFICATES_PATH, '*.csr')):
common_name = os.path.basename(csr)
common_name = os.path.splitext(common_name)[0]
certificate_path = "{}.crt".format(common_name)
certificate_path = os.path.join(CERTIFICATES_PATH, certificate_path)
with open(certificate_path) as file:
crt = OpenSSL.crypto.load_certificate(OpenSSL.crypto.FILETYPE_PEM,
file.read())
expiration = crt.get_notAfter()
expiration = _parse_asn1_generalized_date(expiration)
remaining = expiration - datetime.utcnow()
if remaining > timedelta(days=30):
print "No need to renew {} ({})".format(certificate_path, remaining)
continue
print "Renewing {} ({})".format(certificate_path, remaining)
certificate_request_path = "{}.csr".format(common_name)
certificate_request_path = os.path.join(CERTIFICATES_PATH,
certificate_request_path)
signed_cert = "{}-signed.crt".format(common_name)
signed_cert = os.path.join(CERTIFICATES_PATH,
signed_cert)
_internal_sign_certificate(certificate_path, certificate_request_path,
signed_cert) | python | def acme_renew_certificates():
'''
Renew certificates with acme_tiny for let's encrypt
'''
for csr in glob(os.path.join(CERTIFICATES_PATH, '*.csr')):
common_name = os.path.basename(csr)
common_name = os.path.splitext(common_name)[0]
certificate_path = "{}.crt".format(common_name)
certificate_path = os.path.join(CERTIFICATES_PATH, certificate_path)
with open(certificate_path) as file:
crt = OpenSSL.crypto.load_certificate(OpenSSL.crypto.FILETYPE_PEM,
file.read())
expiration = crt.get_notAfter()
expiration = _parse_asn1_generalized_date(expiration)
remaining = expiration - datetime.utcnow()
if remaining > timedelta(days=30):
print "No need to renew {} ({})".format(certificate_path, remaining)
continue
print "Renewing {} ({})".format(certificate_path, remaining)
certificate_request_path = "{}.csr".format(common_name)
certificate_request_path = os.path.join(CERTIFICATES_PATH,
certificate_request_path)
signed_cert = "{}-signed.crt".format(common_name)
signed_cert = os.path.join(CERTIFICATES_PATH,
signed_cert)
_internal_sign_certificate(certificate_path, certificate_request_path,
signed_cert) | [
"def",
"acme_renew_certificates",
"(",
")",
":",
"for",
"csr",
"in",
"glob",
"(",
"os",
".",
"path",
".",
"join",
"(",
"CERTIFICATES_PATH",
",",
"'*.csr'",
")",
")",
":",
"common_name",
"=",
"os",
".",
"path",
".",
"basename",
"(",
"csr",
")",
"common_name",
"=",
"os",
".",
"path",
".",
"splitext",
"(",
"common_name",
")",
"[",
"0",
"]",
"certificate_path",
"=",
"\"{}.crt\"",
".",
"format",
"(",
"common_name",
")",
"certificate_path",
"=",
"os",
".",
"path",
".",
"join",
"(",
"CERTIFICATES_PATH",
",",
"certificate_path",
")",
"with",
"open",
"(",
"certificate_path",
")",
"as",
"file",
":",
"crt",
"=",
"OpenSSL",
".",
"crypto",
".",
"load_certificate",
"(",
"OpenSSL",
".",
"crypto",
".",
"FILETYPE_PEM",
",",
"file",
".",
"read",
"(",
")",
")",
"expiration",
"=",
"crt",
".",
"get_notAfter",
"(",
")",
"expiration",
"=",
"_parse_asn1_generalized_date",
"(",
"expiration",
")",
"remaining",
"=",
"expiration",
"-",
"datetime",
".",
"utcnow",
"(",
")",
"if",
"remaining",
">",
"timedelta",
"(",
"days",
"=",
"30",
")",
":",
"print",
"\"No need to renew {} ({})\"",
".",
"format",
"(",
"certificate_path",
",",
"remaining",
")",
"continue",
"print",
"\"Renewing {} ({})\"",
".",
"format",
"(",
"certificate_path",
",",
"remaining",
")",
"certificate_request_path",
"=",
"\"{}.csr\"",
".",
"format",
"(",
"common_name",
")",
"certificate_request_path",
"=",
"os",
".",
"path",
".",
"join",
"(",
"CERTIFICATES_PATH",
",",
"certificate_request_path",
")",
"signed_cert",
"=",
"\"{}-signed.crt\"",
".",
"format",
"(",
"common_name",
")",
"signed_cert",
"=",
"os",
".",
"path",
".",
"join",
"(",
"CERTIFICATES_PATH",
",",
"signed_cert",
")",
"_internal_sign_certificate",
"(",
"certificate_path",
",",
"certificate_request_path",
",",
"signed_cert",
")"
]
| Renew certificates with acme_tiny for let's encrypt | [
"Renew",
"certificates",
"with",
"acme_tiny",
"for",
"let",
"s",
"encrypt"
]
| 820cea58458ae3e067fa8cc2da38edbda4681dac | https://github.com/cozy/python_cozy_management/blob/820cea58458ae3e067fa8cc2da38edbda4681dac/cozy_management/ssl.py#L160-L192 | train |
cozy/python_cozy_management | cozy_management/ssl.py | get_crt_common_name | def get_crt_common_name(certificate_path=OLD_CERTIFICATE_PATH):
'''
Get CN from certificate
'''
try:
certificate_file = open(certificate_path)
crt = OpenSSL.crypto.load_certificate(OpenSSL.crypto.FILETYPE_PEM,
certificate_file.read())
return crt.get_subject().commonName
except IOError:
return None | python | def get_crt_common_name(certificate_path=OLD_CERTIFICATE_PATH):
'''
Get CN from certificate
'''
try:
certificate_file = open(certificate_path)
crt = OpenSSL.crypto.load_certificate(OpenSSL.crypto.FILETYPE_PEM,
certificate_file.read())
return crt.get_subject().commonName
except IOError:
return None | [
"def",
"get_crt_common_name",
"(",
"certificate_path",
"=",
"OLD_CERTIFICATE_PATH",
")",
":",
"try",
":",
"certificate_file",
"=",
"open",
"(",
"certificate_path",
")",
"crt",
"=",
"OpenSSL",
".",
"crypto",
".",
"load_certificate",
"(",
"OpenSSL",
".",
"crypto",
".",
"FILETYPE_PEM",
",",
"certificate_file",
".",
"read",
"(",
")",
")",
"return",
"crt",
".",
"get_subject",
"(",
")",
".",
"commonName",
"except",
"IOError",
":",
"return",
"None"
]
| Get CN from certificate | [
"Get",
"CN",
"from",
"certificate"
]
| 820cea58458ae3e067fa8cc2da38edbda4681dac | https://github.com/cozy/python_cozy_management/blob/820cea58458ae3e067fa8cc2da38edbda4681dac/cozy_management/ssl.py#L246-L256 | train |
cozy/python_cozy_management | cozy_management/ssl.py | normalize_cert_dir | def normalize_cert_dir():
'''
Put old cerfificate form to new one
'''
current_cn = get_crt_common_name()
if not os.path.isdir(COZY_CONFIG_PATH):
print 'Need to create {}'.format(COZY_CONFIG_PATH)
os.mkdir(COZY_CONFIG_PATH, 0755)
if not os.path.isdir(CERTIFICATES_PATH):
print 'Need to create {}'.format(CERTIFICATES_PATH)
os.mkdir(CERTIFICATES_PATH, 0755)
if not os.path.isdir(ACME_PRIVATE_PATH):
print 'Need to create {}'.format(ACME_PRIVATE_PATH)
os.mkdir(ACME_PRIVATE_PATH, 0700)
if os.path.isfile(OLD_CERTIFICATE_PATH) and \
not os.path.islink(OLD_CERTIFICATE_PATH):
target = '{}/{}.crt'.format(CERTIFICATES_PATH, current_cn)
print 'Move {} to {}'.format(CERTIFICATES_PATH, target)
os.rename(OLD_CERTIFICATE_PATH, target)
else:
print 'Nothing to do for {}'.format(OLD_CERTIFICATE_PATH)
if os.path.isfile(OLD_PRIVATE_KEY_PATH) and \
not os.path.islink(OLD_PRIVATE_KEY_PATH):
target = '{}/{}.key'.format(CERTIFICATES_PATH, current_cn)
print 'Move {} to {}'.format(OLD_PRIVATE_KEY_PATH, target)
os.rename(OLD_PRIVATE_KEY_PATH, target)
else:
print 'Nothing to do for {}'.format(OLD_PRIVATE_KEY_PATH)
if current_cn:
make_links(current_cn) | python | def normalize_cert_dir():
'''
Put old cerfificate form to new one
'''
current_cn = get_crt_common_name()
if not os.path.isdir(COZY_CONFIG_PATH):
print 'Need to create {}'.format(COZY_CONFIG_PATH)
os.mkdir(COZY_CONFIG_PATH, 0755)
if not os.path.isdir(CERTIFICATES_PATH):
print 'Need to create {}'.format(CERTIFICATES_PATH)
os.mkdir(CERTIFICATES_PATH, 0755)
if not os.path.isdir(ACME_PRIVATE_PATH):
print 'Need to create {}'.format(ACME_PRIVATE_PATH)
os.mkdir(ACME_PRIVATE_PATH, 0700)
if os.path.isfile(OLD_CERTIFICATE_PATH) and \
not os.path.islink(OLD_CERTIFICATE_PATH):
target = '{}/{}.crt'.format(CERTIFICATES_PATH, current_cn)
print 'Move {} to {}'.format(CERTIFICATES_PATH, target)
os.rename(OLD_CERTIFICATE_PATH, target)
else:
print 'Nothing to do for {}'.format(OLD_CERTIFICATE_PATH)
if os.path.isfile(OLD_PRIVATE_KEY_PATH) and \
not os.path.islink(OLD_PRIVATE_KEY_PATH):
target = '{}/{}.key'.format(CERTIFICATES_PATH, current_cn)
print 'Move {} to {}'.format(OLD_PRIVATE_KEY_PATH, target)
os.rename(OLD_PRIVATE_KEY_PATH, target)
else:
print 'Nothing to do for {}'.format(OLD_PRIVATE_KEY_PATH)
if current_cn:
make_links(current_cn) | [
"def",
"normalize_cert_dir",
"(",
")",
":",
"current_cn",
"=",
"get_crt_common_name",
"(",
")",
"if",
"not",
"os",
".",
"path",
".",
"isdir",
"(",
"COZY_CONFIG_PATH",
")",
":",
"print",
"'Need to create {}'",
".",
"format",
"(",
"COZY_CONFIG_PATH",
")",
"os",
".",
"mkdir",
"(",
"COZY_CONFIG_PATH",
",",
"0755",
")",
"if",
"not",
"os",
".",
"path",
".",
"isdir",
"(",
"CERTIFICATES_PATH",
")",
":",
"print",
"'Need to create {}'",
".",
"format",
"(",
"CERTIFICATES_PATH",
")",
"os",
".",
"mkdir",
"(",
"CERTIFICATES_PATH",
",",
"0755",
")",
"if",
"not",
"os",
".",
"path",
".",
"isdir",
"(",
"ACME_PRIVATE_PATH",
")",
":",
"print",
"'Need to create {}'",
".",
"format",
"(",
"ACME_PRIVATE_PATH",
")",
"os",
".",
"mkdir",
"(",
"ACME_PRIVATE_PATH",
",",
"0700",
")",
"if",
"os",
".",
"path",
".",
"isfile",
"(",
"OLD_CERTIFICATE_PATH",
")",
"and",
"not",
"os",
".",
"path",
".",
"islink",
"(",
"OLD_CERTIFICATE_PATH",
")",
":",
"target",
"=",
"'{}/{}.crt'",
".",
"format",
"(",
"CERTIFICATES_PATH",
",",
"current_cn",
")",
"print",
"'Move {} to {}'",
".",
"format",
"(",
"CERTIFICATES_PATH",
",",
"target",
")",
"os",
".",
"rename",
"(",
"OLD_CERTIFICATE_PATH",
",",
"target",
")",
"else",
":",
"print",
"'Nothing to do for {}'",
".",
"format",
"(",
"OLD_CERTIFICATE_PATH",
")",
"if",
"os",
".",
"path",
".",
"isfile",
"(",
"OLD_PRIVATE_KEY_PATH",
")",
"and",
"not",
"os",
".",
"path",
".",
"islink",
"(",
"OLD_PRIVATE_KEY_PATH",
")",
":",
"target",
"=",
"'{}/{}.key'",
".",
"format",
"(",
"CERTIFICATES_PATH",
",",
"current_cn",
")",
"print",
"'Move {} to {}'",
".",
"format",
"(",
"OLD_PRIVATE_KEY_PATH",
",",
"target",
")",
"os",
".",
"rename",
"(",
"OLD_PRIVATE_KEY_PATH",
",",
"target",
")",
"else",
":",
"print",
"'Nothing to do for {}'",
".",
"format",
"(",
"OLD_PRIVATE_KEY_PATH",
")",
"if",
"current_cn",
":",
"make_links",
"(",
"current_cn",
")"
]
| Put old cerfificate form to new one | [
"Put",
"old",
"cerfificate",
"form",
"to",
"new",
"one"
]
| 820cea58458ae3e067fa8cc2da38edbda4681dac | https://github.com/cozy/python_cozy_management/blob/820cea58458ae3e067fa8cc2da38edbda4681dac/cozy_management/ssl.py#L259-L294 | train |
cozy/python_cozy_management | cozy_management/ssl.py | clean_links | def clean_links():
'''
Clean symlink for nginx
'''
if os.path.isfile(CURRENT_CERTIFICATE_PATH):
print 'Delete symlink {}'.format(CURRENT_CERTIFICATE_PATH)
os.remove(CURRENT_CERTIFICATE_PATH)
if os.path.isfile(CURRENT_PRIVATE_KEY_PATH):
print 'Delete symlink {}'.format(CURRENT_PRIVATE_KEY_PATH)
os.remove(CURRENT_PRIVATE_KEY_PATH) | python | def clean_links():
'''
Clean symlink for nginx
'''
if os.path.isfile(CURRENT_CERTIFICATE_PATH):
print 'Delete symlink {}'.format(CURRENT_CERTIFICATE_PATH)
os.remove(CURRENT_CERTIFICATE_PATH)
if os.path.isfile(CURRENT_PRIVATE_KEY_PATH):
print 'Delete symlink {}'.format(CURRENT_PRIVATE_KEY_PATH)
os.remove(CURRENT_PRIVATE_KEY_PATH) | [
"def",
"clean_links",
"(",
")",
":",
"if",
"os",
".",
"path",
".",
"isfile",
"(",
"CURRENT_CERTIFICATE_PATH",
")",
":",
"print",
"'Delete symlink {}'",
".",
"format",
"(",
"CURRENT_CERTIFICATE_PATH",
")",
"os",
".",
"remove",
"(",
"CURRENT_CERTIFICATE_PATH",
")",
"if",
"os",
".",
"path",
".",
"isfile",
"(",
"CURRENT_PRIVATE_KEY_PATH",
")",
":",
"print",
"'Delete symlink {}'",
".",
"format",
"(",
"CURRENT_PRIVATE_KEY_PATH",
")",
"os",
".",
"remove",
"(",
"CURRENT_PRIVATE_KEY_PATH",
")"
]
| Clean symlink for nginx | [
"Clean",
"symlink",
"for",
"nginx"
]
| 820cea58458ae3e067fa8cc2da38edbda4681dac | https://github.com/cozy/python_cozy_management/blob/820cea58458ae3e067fa8cc2da38edbda4681dac/cozy_management/ssl.py#L297-L307 | train |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.