repo
stringlengths 7
55
| path
stringlengths 4
127
| func_name
stringlengths 1
88
| original_string
stringlengths 75
19.8k
| language
stringclasses 1
value | code
stringlengths 75
19.8k
| code_tokens
sequence | docstring
stringlengths 3
17.3k
| docstring_tokens
sequence | sha
stringlengths 40
40
| url
stringlengths 87
242
| partition
stringclasses 1
value |
---|---|---|---|---|---|---|---|---|---|---|---|
warrenspe/hconf | hconf/subparsers/cmdline.py | Cmdline.getArgumentParser | def getArgumentParser(self, configManager, config):
"""
May be overidden to provide custom functionality.
Constructs an argparse.ArgumentParser used to parse configuration options from the command line.
Inputs: configManager - Our parent ConfigManager instance which is constructing the Config object.
config - The _Config object containing configuration options populated thus far.
Outputs: An argparse.ArgumentParser object intialized to parse command line configuration options.
"""
argParser = argparse.ArgumentParser(self.description)
for configName, configDict in configManager.configs.items():
cmdName = configName.replace("_", "-")
argParser.add_argument(
'--%s' % cmdName,
default=None,
help=configDict['description']
)
return argParser | python | def getArgumentParser(self, configManager, config):
"""
May be overidden to provide custom functionality.
Constructs an argparse.ArgumentParser used to parse configuration options from the command line.
Inputs: configManager - Our parent ConfigManager instance which is constructing the Config object.
config - The _Config object containing configuration options populated thus far.
Outputs: An argparse.ArgumentParser object intialized to parse command line configuration options.
"""
argParser = argparse.ArgumentParser(self.description)
for configName, configDict in configManager.configs.items():
cmdName = configName.replace("_", "-")
argParser.add_argument(
'--%s' % cmdName,
default=None,
help=configDict['description']
)
return argParser | [
"def",
"getArgumentParser",
"(",
"self",
",",
"configManager",
",",
"config",
")",
":",
"argParser",
"=",
"argparse",
".",
"ArgumentParser",
"(",
"self",
".",
"description",
")",
"for",
"configName",
",",
"configDict",
"in",
"configManager",
".",
"configs",
".",
"items",
"(",
")",
":",
"cmdName",
"=",
"configName",
".",
"replace",
"(",
"\"_\"",
",",
"\"-\"",
")",
"argParser",
".",
"add_argument",
"(",
"'--%s'",
"%",
"cmdName",
",",
"default",
"=",
"None",
",",
"help",
"=",
"configDict",
"[",
"'description'",
"]",
")",
"return",
"argParser"
] | May be overidden to provide custom functionality.
Constructs an argparse.ArgumentParser used to parse configuration options from the command line.
Inputs: configManager - Our parent ConfigManager instance which is constructing the Config object.
config - The _Config object containing configuration options populated thus far.
Outputs: An argparse.ArgumentParser object intialized to parse command line configuration options. | [
"May",
"be",
"overidden",
"to",
"provide",
"custom",
"functionality",
".",
"Constructs",
"an",
"argparse",
".",
"ArgumentParser",
"used",
"to",
"parse",
"configuration",
"options",
"from",
"the",
"command",
"line",
"."
] | 12074d15dc3641d3903488c95d89a507386a32d5 | https://github.com/warrenspe/hconf/blob/12074d15dc3641d3903488c95d89a507386a32d5/hconf/subparsers/cmdline.py#L41-L61 | train |
warrenspe/hconf | hconf/subparsers/cmdline.py | Cmdline.parse | def parse(self, configManager, config):
"""
Parses commandline arguments, given a series of configuration options.
Inputs: configManager - Our parent ConfigManager instance which is constructing the Config object.
config - The _Config object containing configuration options populated thus far.
Outputs: A dictionary of new configuration options to add to the Config object.
"""
argParser = self.getArgumentParser(configManager, config)
return vars(argParser.parse_args()) | python | def parse(self, configManager, config):
"""
Parses commandline arguments, given a series of configuration options.
Inputs: configManager - Our parent ConfigManager instance which is constructing the Config object.
config - The _Config object containing configuration options populated thus far.
Outputs: A dictionary of new configuration options to add to the Config object.
"""
argParser = self.getArgumentParser(configManager, config)
return vars(argParser.parse_args()) | [
"def",
"parse",
"(",
"self",
",",
"configManager",
",",
"config",
")",
":",
"argParser",
"=",
"self",
".",
"getArgumentParser",
"(",
"configManager",
",",
"config",
")",
"return",
"vars",
"(",
"argParser",
".",
"parse_args",
"(",
")",
")"
] | Parses commandline arguments, given a series of configuration options.
Inputs: configManager - Our parent ConfigManager instance which is constructing the Config object.
config - The _Config object containing configuration options populated thus far.
Outputs: A dictionary of new configuration options to add to the Config object. | [
"Parses",
"commandline",
"arguments",
"given",
"a",
"series",
"of",
"configuration",
"options",
"."
] | 12074d15dc3641d3903488c95d89a507386a32d5 | https://github.com/warrenspe/hconf/blob/12074d15dc3641d3903488c95d89a507386a32d5/hconf/subparsers/cmdline.py#L63-L74 | train |
totalgood/pugnlp | src/pugnlp/detector_morse.py | Detector.candidates | def candidates(text):
"""
Given a `text` string, get candidates and context for feature
extraction and classification
"""
for Pmatch in finditer(TARGET, text):
# the punctuation mark itself
P = Pmatch.group(1)
# is it a boundary?
B = bool(match(NEWLINE, Pmatch.group(5)))
# L & R
start = Pmatch.start()
end = Pmatch.end()
Lmatch = search(LTOKEN, text[max(0, start - BUFSIZE):start])
if not Lmatch: # this happens when a line begins with '.'
continue
L = word_tokenize(" " + Lmatch.group(1))[-1]
Rmatch = search(RTOKEN, text[end:end + BUFSIZE])
if not Rmatch: # this happens at the end of the file, usually
continue
R = word_tokenize(Rmatch.group(1) + " ")[0]
# complete observation
yield Observation(L, P, R, B, end) | python | def candidates(text):
"""
Given a `text` string, get candidates and context for feature
extraction and classification
"""
for Pmatch in finditer(TARGET, text):
# the punctuation mark itself
P = Pmatch.group(1)
# is it a boundary?
B = bool(match(NEWLINE, Pmatch.group(5)))
# L & R
start = Pmatch.start()
end = Pmatch.end()
Lmatch = search(LTOKEN, text[max(0, start - BUFSIZE):start])
if not Lmatch: # this happens when a line begins with '.'
continue
L = word_tokenize(" " + Lmatch.group(1))[-1]
Rmatch = search(RTOKEN, text[end:end + BUFSIZE])
if not Rmatch: # this happens at the end of the file, usually
continue
R = word_tokenize(Rmatch.group(1) + " ")[0]
# complete observation
yield Observation(L, P, R, B, end) | [
"def",
"candidates",
"(",
"text",
")",
":",
"for",
"Pmatch",
"in",
"finditer",
"(",
"TARGET",
",",
"text",
")",
":",
"# the punctuation mark itself",
"P",
"=",
"Pmatch",
".",
"group",
"(",
"1",
")",
"# is it a boundary?",
"B",
"=",
"bool",
"(",
"match",
"(",
"NEWLINE",
",",
"Pmatch",
".",
"group",
"(",
"5",
")",
")",
")",
"# L & R",
"start",
"=",
"Pmatch",
".",
"start",
"(",
")",
"end",
"=",
"Pmatch",
".",
"end",
"(",
")",
"Lmatch",
"=",
"search",
"(",
"LTOKEN",
",",
"text",
"[",
"max",
"(",
"0",
",",
"start",
"-",
"BUFSIZE",
")",
":",
"start",
"]",
")",
"if",
"not",
"Lmatch",
":",
"# this happens when a line begins with '.'",
"continue",
"L",
"=",
"word_tokenize",
"(",
"\" \"",
"+",
"Lmatch",
".",
"group",
"(",
"1",
")",
")",
"[",
"-",
"1",
"]",
"Rmatch",
"=",
"search",
"(",
"RTOKEN",
",",
"text",
"[",
"end",
":",
"end",
"+",
"BUFSIZE",
"]",
")",
"if",
"not",
"Rmatch",
":",
"# this happens at the end of the file, usually",
"continue",
"R",
"=",
"word_tokenize",
"(",
"Rmatch",
".",
"group",
"(",
"1",
")",
"+",
"\" \"",
")",
"[",
"0",
"]",
"# complete observation",
"yield",
"Observation",
"(",
"L",
",",
"P",
",",
"R",
",",
"B",
",",
"end",
")"
] | Given a `text` string, get candidates and context for feature
extraction and classification | [
"Given",
"a",
"text",
"string",
"get",
"candidates",
"and",
"context",
"for",
"feature",
"extraction",
"and",
"classification"
] | c43445b14afddfdeadc5f3076675c9e8fc1ee67c | https://github.com/totalgood/pugnlp/blob/c43445b14afddfdeadc5f3076675c9e8fc1ee67c/src/pugnlp/detector_morse.py#L105-L127 | train |
totalgood/pugnlp | src/pugnlp/detector_morse.py | Detector.extract_one | def extract_one(self, L, P, R):
"""
Given left context `L`, punctuation mark `P`, and right context
R`, extract features. Probability distributions for any
quantile-based features will not be modified.
"""
yield "*bias*"
# L feature(s)
if match(QUOTE, L):
L = QUOTE_TOKEN
elif isnumberlike(L):
L = NUMBER_TOKEN
else:
yield "len(L)={}".format(min(len(L), CLIP))
if "." in L:
yield "L:*period*"
if not self.nocase:
cf = case_feature(R)
if cf:
yield "L:{}'".format(cf)
L = L.upper()
if not any(char in VOWELS for char in L):
yield "L:*no-vowel*"
L_feat = "L='{}'".format(L)
yield L_feat
# P feature(s)
yield "P='{}'".format(P)
# R feature(s)
if match(QUOTE, R):
R = QUOTE_TOKEN
elif isnumberlike(R):
R = NUMBER_TOKEN
else:
if not self.nocase:
cf = case_feature(R)
if cf:
yield "R:{}'".format(cf)
R = R.upper()
R_feat = "R='{}'".format(R)
yield R_feat
# the combined L,R feature
yield "{},{}".format(L_feat, R_feat) | python | def extract_one(self, L, P, R):
"""
Given left context `L`, punctuation mark `P`, and right context
R`, extract features. Probability distributions for any
quantile-based features will not be modified.
"""
yield "*bias*"
# L feature(s)
if match(QUOTE, L):
L = QUOTE_TOKEN
elif isnumberlike(L):
L = NUMBER_TOKEN
else:
yield "len(L)={}".format(min(len(L), CLIP))
if "." in L:
yield "L:*period*"
if not self.nocase:
cf = case_feature(R)
if cf:
yield "L:{}'".format(cf)
L = L.upper()
if not any(char in VOWELS for char in L):
yield "L:*no-vowel*"
L_feat = "L='{}'".format(L)
yield L_feat
# P feature(s)
yield "P='{}'".format(P)
# R feature(s)
if match(QUOTE, R):
R = QUOTE_TOKEN
elif isnumberlike(R):
R = NUMBER_TOKEN
else:
if not self.nocase:
cf = case_feature(R)
if cf:
yield "R:{}'".format(cf)
R = R.upper()
R_feat = "R='{}'".format(R)
yield R_feat
# the combined L,R feature
yield "{},{}".format(L_feat, R_feat) | [
"def",
"extract_one",
"(",
"self",
",",
"L",
",",
"P",
",",
"R",
")",
":",
"yield",
"\"*bias*\"",
"# L feature(s)",
"if",
"match",
"(",
"QUOTE",
",",
"L",
")",
":",
"L",
"=",
"QUOTE_TOKEN",
"elif",
"isnumberlike",
"(",
"L",
")",
":",
"L",
"=",
"NUMBER_TOKEN",
"else",
":",
"yield",
"\"len(L)={}\"",
".",
"format",
"(",
"min",
"(",
"len",
"(",
"L",
")",
",",
"CLIP",
")",
")",
"if",
"\".\"",
"in",
"L",
":",
"yield",
"\"L:*period*\"",
"if",
"not",
"self",
".",
"nocase",
":",
"cf",
"=",
"case_feature",
"(",
"R",
")",
"if",
"cf",
":",
"yield",
"\"L:{}'\"",
".",
"format",
"(",
"cf",
")",
"L",
"=",
"L",
".",
"upper",
"(",
")",
"if",
"not",
"any",
"(",
"char",
"in",
"VOWELS",
"for",
"char",
"in",
"L",
")",
":",
"yield",
"\"L:*no-vowel*\"",
"L_feat",
"=",
"\"L='{}'\"",
".",
"format",
"(",
"L",
")",
"yield",
"L_feat",
"# P feature(s)",
"yield",
"\"P='{}'\"",
".",
"format",
"(",
"P",
")",
"# R feature(s)",
"if",
"match",
"(",
"QUOTE",
",",
"R",
")",
":",
"R",
"=",
"QUOTE_TOKEN",
"elif",
"isnumberlike",
"(",
"R",
")",
":",
"R",
"=",
"NUMBER_TOKEN",
"else",
":",
"if",
"not",
"self",
".",
"nocase",
":",
"cf",
"=",
"case_feature",
"(",
"R",
")",
"if",
"cf",
":",
"yield",
"\"R:{}'\"",
".",
"format",
"(",
"cf",
")",
"R",
"=",
"R",
".",
"upper",
"(",
")",
"R_feat",
"=",
"\"R='{}'\"",
".",
"format",
"(",
"R",
")",
"yield",
"R_feat",
"# the combined L,R feature",
"yield",
"\"{},{}\"",
".",
"format",
"(",
"L_feat",
",",
"R_feat",
")"
] | Given left context `L`, punctuation mark `P`, and right context
R`, extract features. Probability distributions for any
quantile-based features will not be modified. | [
"Given",
"left",
"context",
"L",
"punctuation",
"mark",
"P",
"and",
"right",
"context",
"R",
"extract",
"features",
".",
"Probability",
"distributions",
"for",
"any",
"quantile",
"-",
"based",
"features",
"will",
"not",
"be",
"modified",
"."
] | c43445b14afddfdeadc5f3076675c9e8fc1ee67c | https://github.com/totalgood/pugnlp/blob/c43445b14afddfdeadc5f3076675c9e8fc1ee67c/src/pugnlp/detector_morse.py#L132-L173 | train |
totalgood/pugnlp | src/pugnlp/detector_morse.py | Detector.fit | def fit(self, text, epochs=EPOCHS):
""" Given a string `text`, use it to train the segmentation classifier for `epochs` iterations.
"""
logger.debug("Extracting features and classifications.")
Phi = []
Y = []
for (L, P, R, gold, _) in Detector.candidates(text):
Phi.append(self.extract_one(L, P, R))
Y.append(gold)
self.classifier.fit(Y, Phi, epochs)
logger.debug("Fitting complete.") | python | def fit(self, text, epochs=EPOCHS):
""" Given a string `text`, use it to train the segmentation classifier for `epochs` iterations.
"""
logger.debug("Extracting features and classifications.")
Phi = []
Y = []
for (L, P, R, gold, _) in Detector.candidates(text):
Phi.append(self.extract_one(L, P, R))
Y.append(gold)
self.classifier.fit(Y, Phi, epochs)
logger.debug("Fitting complete.") | [
"def",
"fit",
"(",
"self",
",",
"text",
",",
"epochs",
"=",
"EPOCHS",
")",
":",
"logger",
".",
"debug",
"(",
"\"Extracting features and classifications.\"",
")",
"Phi",
"=",
"[",
"]",
"Y",
"=",
"[",
"]",
"for",
"(",
"L",
",",
"P",
",",
"R",
",",
"gold",
",",
"_",
")",
"in",
"Detector",
".",
"candidates",
"(",
"text",
")",
":",
"Phi",
".",
"append",
"(",
"self",
".",
"extract_one",
"(",
"L",
",",
"P",
",",
"R",
")",
")",
"Y",
".",
"append",
"(",
"gold",
")",
"self",
".",
"classifier",
".",
"fit",
"(",
"Y",
",",
"Phi",
",",
"epochs",
")",
"logger",
".",
"debug",
"(",
"\"Fitting complete.\"",
")"
] | Given a string `text`, use it to train the segmentation classifier for `epochs` iterations. | [
"Given",
"a",
"string",
"text",
"use",
"it",
"to",
"train",
"the",
"segmentation",
"classifier",
"for",
"epochs",
"iterations",
"."
] | c43445b14afddfdeadc5f3076675c9e8fc1ee67c | https://github.com/totalgood/pugnlp/blob/c43445b14afddfdeadc5f3076675c9e8fc1ee67c/src/pugnlp/detector_morse.py#L177-L187 | train |
totalgood/pugnlp | src/pugnlp/detector_morse.py | Detector.predict | def predict(self, L, P, R):
"""
Given an left context `L`, punctuation mark `P`, and right context
`R`, return True iff this observation is hypothesized to be a
sentence boundary.
"""
phi = self.extract_one(L, P, R)
return self.classifier.predict(phi) | python | def predict(self, L, P, R):
"""
Given an left context `L`, punctuation mark `P`, and right context
`R`, return True iff this observation is hypothesized to be a
sentence boundary.
"""
phi = self.extract_one(L, P, R)
return self.classifier.predict(phi) | [
"def",
"predict",
"(",
"self",
",",
"L",
",",
"P",
",",
"R",
")",
":",
"phi",
"=",
"self",
".",
"extract_one",
"(",
"L",
",",
"P",
",",
"R",
")",
"return",
"self",
".",
"classifier",
".",
"predict",
"(",
"phi",
")"
] | Given an left context `L`, punctuation mark `P`, and right context
`R`, return True iff this observation is hypothesized to be a
sentence boundary. | [
"Given",
"an",
"left",
"context",
"L",
"punctuation",
"mark",
"P",
"and",
"right",
"context",
"R",
"return",
"True",
"iff",
"this",
"observation",
"is",
"hypothesized",
"to",
"be",
"a",
"sentence",
"boundary",
"."
] | c43445b14afddfdeadc5f3076675c9e8fc1ee67c | https://github.com/totalgood/pugnlp/blob/c43445b14afddfdeadc5f3076675c9e8fc1ee67c/src/pugnlp/detector_morse.py#L189-L196 | train |
totalgood/pugnlp | src/pugnlp/detector_morse.py | Detector.segments | def segments(self, text):
"""
Given a string of `text`, return a generator yielding each
hypothesized sentence string
"""
start = 0
for (L, P, R, B, end) in Detector.candidates(text):
# if there's already a newline there, we have nothing to do
if B:
continue
if self.predict(L, P, R):
yield text[start:end].rstrip()
start = end
# otherwise, there's probably not a sentence boundary here
yield text[start:].rstrip() | python | def segments(self, text):
"""
Given a string of `text`, return a generator yielding each
hypothesized sentence string
"""
start = 0
for (L, P, R, B, end) in Detector.candidates(text):
# if there's already a newline there, we have nothing to do
if B:
continue
if self.predict(L, P, R):
yield text[start:end].rstrip()
start = end
# otherwise, there's probably not a sentence boundary here
yield text[start:].rstrip() | [
"def",
"segments",
"(",
"self",
",",
"text",
")",
":",
"start",
"=",
"0",
"for",
"(",
"L",
",",
"P",
",",
"R",
",",
"B",
",",
"end",
")",
"in",
"Detector",
".",
"candidates",
"(",
"text",
")",
":",
"# if there's already a newline there, we have nothing to do",
"if",
"B",
":",
"continue",
"if",
"self",
".",
"predict",
"(",
"L",
",",
"P",
",",
"R",
")",
":",
"yield",
"text",
"[",
"start",
":",
"end",
"]",
".",
"rstrip",
"(",
")",
"start",
"=",
"end",
"# otherwise, there's probably not a sentence boundary here",
"yield",
"text",
"[",
"start",
":",
"]",
".",
"rstrip",
"(",
")"
] | Given a string of `text`, return a generator yielding each
hypothesized sentence string | [
"Given",
"a",
"string",
"of",
"text",
"return",
"a",
"generator",
"yielding",
"each",
"hypothesized",
"sentence",
"string"
] | c43445b14afddfdeadc5f3076675c9e8fc1ee67c | https://github.com/totalgood/pugnlp/blob/c43445b14afddfdeadc5f3076675c9e8fc1ee67c/src/pugnlp/detector_morse.py#L198-L212 | train |
totalgood/pugnlp | src/pugnlp/detector_morse.py | Detector.evaluate | def evaluate(self, text):
"""
Given a string of `text`, compute confusion matrix for the
classification task.
"""
cx = BinaryConfusion()
for (L, P, R, gold, _) in Detector.candidates(text):
guess = self.predict(L, P, R)
cx.update(gold, guess)
if not gold and guess:
logger.debug("False pos.: L='{}', R='{}'.".format(L, R))
elif gold and not guess:
logger.debug("False neg.: L='{}', R='{}'.".format(L, R))
return cx | python | def evaluate(self, text):
"""
Given a string of `text`, compute confusion matrix for the
classification task.
"""
cx = BinaryConfusion()
for (L, P, R, gold, _) in Detector.candidates(text):
guess = self.predict(L, P, R)
cx.update(gold, guess)
if not gold and guess:
logger.debug("False pos.: L='{}', R='{}'.".format(L, R))
elif gold and not guess:
logger.debug("False neg.: L='{}', R='{}'.".format(L, R))
return cx | [
"def",
"evaluate",
"(",
"self",
",",
"text",
")",
":",
"cx",
"=",
"BinaryConfusion",
"(",
")",
"for",
"(",
"L",
",",
"P",
",",
"R",
",",
"gold",
",",
"_",
")",
"in",
"Detector",
".",
"candidates",
"(",
"text",
")",
":",
"guess",
"=",
"self",
".",
"predict",
"(",
"L",
",",
"P",
",",
"R",
")",
"cx",
".",
"update",
"(",
"gold",
",",
"guess",
")",
"if",
"not",
"gold",
"and",
"guess",
":",
"logger",
".",
"debug",
"(",
"\"False pos.: L='{}', R='{}'.\"",
".",
"format",
"(",
"L",
",",
"R",
")",
")",
"elif",
"gold",
"and",
"not",
"guess",
":",
"logger",
".",
"debug",
"(",
"\"False neg.: L='{}', R='{}'.\"",
".",
"format",
"(",
"L",
",",
"R",
")",
")",
"return",
"cx"
] | Given a string of `text`, compute confusion matrix for the
classification task. | [
"Given",
"a",
"string",
"of",
"text",
"compute",
"confusion",
"matrix",
"for",
"the",
"classification",
"task",
"."
] | c43445b14afddfdeadc5f3076675c9e8fc1ee67c | https://github.com/totalgood/pugnlp/blob/c43445b14afddfdeadc5f3076675c9e8fc1ee67c/src/pugnlp/detector_morse.py#L214-L227 | train |
DarkEnergySurvey/ugali | ugali/observation/mask.py | scale | def scale(mask, mag_scale, outfile=None):
"""
Scale the completeness depth of a mask such that mag_new = mag + mag_scale.
Input is a full HEALPix map.
Optionally write out the scaled mask as an sparse HEALPix map.
"""
msg = "'mask.scale': ADW 2018-05-05"
DeprecationWarning(msg)
mask_new = hp.UNSEEN * np.ones(len(mask))
mask_new[mask == 0.] = 0.
mask_new[mask > 0.] = mask[mask > 0.] + mag_scale
if outfile is not None:
pix = np.nonzero(mask_new > 0.)[0]
data_dict = {'MAGLIM': mask_new[pix]}
nside = hp.npix2nside(len(mask_new))
ugali.utils.skymap.writeSparseHealpixMap(pix, data_dict, nside, outfile)
return mask_new | python | def scale(mask, mag_scale, outfile=None):
"""
Scale the completeness depth of a mask such that mag_new = mag + mag_scale.
Input is a full HEALPix map.
Optionally write out the scaled mask as an sparse HEALPix map.
"""
msg = "'mask.scale': ADW 2018-05-05"
DeprecationWarning(msg)
mask_new = hp.UNSEEN * np.ones(len(mask))
mask_new[mask == 0.] = 0.
mask_new[mask > 0.] = mask[mask > 0.] + mag_scale
if outfile is not None:
pix = np.nonzero(mask_new > 0.)[0]
data_dict = {'MAGLIM': mask_new[pix]}
nside = hp.npix2nside(len(mask_new))
ugali.utils.skymap.writeSparseHealpixMap(pix, data_dict, nside, outfile)
return mask_new | [
"def",
"scale",
"(",
"mask",
",",
"mag_scale",
",",
"outfile",
"=",
"None",
")",
":",
"msg",
"=",
"\"'mask.scale': ADW 2018-05-05\"",
"DeprecationWarning",
"(",
"msg",
")",
"mask_new",
"=",
"hp",
".",
"UNSEEN",
"*",
"np",
".",
"ones",
"(",
"len",
"(",
"mask",
")",
")",
"mask_new",
"[",
"mask",
"==",
"0.",
"]",
"=",
"0.",
"mask_new",
"[",
"mask",
">",
"0.",
"]",
"=",
"mask",
"[",
"mask",
">",
"0.",
"]",
"+",
"mag_scale",
"if",
"outfile",
"is",
"not",
"None",
":",
"pix",
"=",
"np",
".",
"nonzero",
"(",
"mask_new",
">",
"0.",
")",
"[",
"0",
"]",
"data_dict",
"=",
"{",
"'MAGLIM'",
":",
"mask_new",
"[",
"pix",
"]",
"}",
"nside",
"=",
"hp",
".",
"npix2nside",
"(",
"len",
"(",
"mask_new",
")",
")",
"ugali",
".",
"utils",
".",
"skymap",
".",
"writeSparseHealpixMap",
"(",
"pix",
",",
"data_dict",
",",
"nside",
",",
"outfile",
")",
"return",
"mask_new"
] | Scale the completeness depth of a mask such that mag_new = mag + mag_scale.
Input is a full HEALPix map.
Optionally write out the scaled mask as an sparse HEALPix map. | [
"Scale",
"the",
"completeness",
"depth",
"of",
"a",
"mask",
"such",
"that",
"mag_new",
"=",
"mag",
"+",
"mag_scale",
".",
"Input",
"is",
"a",
"full",
"HEALPix",
"map",
".",
"Optionally",
"write",
"out",
"the",
"scaled",
"mask",
"as",
"an",
"sparse",
"HEALPix",
"map",
"."
] | 21e890b4117fc810afb6fb058e8055d564f03382 | https://github.com/DarkEnergySurvey/ugali/blob/21e890b4117fc810afb6fb058e8055d564f03382/ugali/observation/mask.py#L1007-L1025 | train |
DarkEnergySurvey/ugali | ugali/observation/mask.py | Mask.mask_roi_unique | def mask_roi_unique(self):
"""
Assemble a set of unique magnitude tuples for the ROI
"""
# There is no good inherent way in numpy to do this...
# http://stackoverflow.com/q/16970982/
# Also possible and simple:
#return np.unique(zip(self.mask_1.mask_roi_sparse,self.mask_2.mask_roi_sparse))
A = np.vstack([self.mask_1.mask_roi_sparse,self.mask_2.mask_roi_sparse]).T
B = A[np.lexsort(A.T[::-1])]
return B[np.concatenate(([True],np.any(B[1:]!=B[:-1],axis=1)))] | python | def mask_roi_unique(self):
"""
Assemble a set of unique magnitude tuples for the ROI
"""
# There is no good inherent way in numpy to do this...
# http://stackoverflow.com/q/16970982/
# Also possible and simple:
#return np.unique(zip(self.mask_1.mask_roi_sparse,self.mask_2.mask_roi_sparse))
A = np.vstack([self.mask_1.mask_roi_sparse,self.mask_2.mask_roi_sparse]).T
B = A[np.lexsort(A.T[::-1])]
return B[np.concatenate(([True],np.any(B[1:]!=B[:-1],axis=1)))] | [
"def",
"mask_roi_unique",
"(",
"self",
")",
":",
"# There is no good inherent way in numpy to do this...",
"# http://stackoverflow.com/q/16970982/",
"# Also possible and simple:",
"#return np.unique(zip(self.mask_1.mask_roi_sparse,self.mask_2.mask_roi_sparse))",
"A",
"=",
"np",
".",
"vstack",
"(",
"[",
"self",
".",
"mask_1",
".",
"mask_roi_sparse",
",",
"self",
".",
"mask_2",
".",
"mask_roi_sparse",
"]",
")",
".",
"T",
"B",
"=",
"A",
"[",
"np",
".",
"lexsort",
"(",
"A",
".",
"T",
"[",
":",
":",
"-",
"1",
"]",
")",
"]",
"return",
"B",
"[",
"np",
".",
"concatenate",
"(",
"(",
"[",
"True",
"]",
",",
"np",
".",
"any",
"(",
"B",
"[",
"1",
":",
"]",
"!=",
"B",
"[",
":",
"-",
"1",
"]",
",",
"axis",
"=",
"1",
")",
")",
")",
"]"
] | Assemble a set of unique magnitude tuples for the ROI | [
"Assemble",
"a",
"set",
"of",
"unique",
"magnitude",
"tuples",
"for",
"the",
"ROI"
] | 21e890b4117fc810afb6fb058e8055d564f03382 | https://github.com/DarkEnergySurvey/ugali/blob/21e890b4117fc810afb6fb058e8055d564f03382/ugali/observation/mask.py#L55-L67 | train |
DarkEnergySurvey/ugali | ugali/observation/mask.py | Mask.mask_roi_digi | def mask_roi_digi(self):
"""
Get the index of the unique magnitude tuple for each pixel in the ROI.
"""
# http://stackoverflow.com/q/24205045/#24206440
A = np.vstack([self.mask_1.mask_roi_sparse,self.mask_2.mask_roi_sparse]).T
B = self.mask_roi_unique
AA = np.ascontiguousarray(A)
BB = np.ascontiguousarray(B)
dt = np.dtype((np.void, AA.dtype.itemsize * AA.shape[1]))
a = AA.view(dt).ravel()
b = BB.view(dt).ravel()
idx = np.argsort(b)
indices = np.searchsorted(b[idx],a)
return idx[indices] | python | def mask_roi_digi(self):
"""
Get the index of the unique magnitude tuple for each pixel in the ROI.
"""
# http://stackoverflow.com/q/24205045/#24206440
A = np.vstack([self.mask_1.mask_roi_sparse,self.mask_2.mask_roi_sparse]).T
B = self.mask_roi_unique
AA = np.ascontiguousarray(A)
BB = np.ascontiguousarray(B)
dt = np.dtype((np.void, AA.dtype.itemsize * AA.shape[1]))
a = AA.view(dt).ravel()
b = BB.view(dt).ravel()
idx = np.argsort(b)
indices = np.searchsorted(b[idx],a)
return idx[indices] | [
"def",
"mask_roi_digi",
"(",
"self",
")",
":",
"# http://stackoverflow.com/q/24205045/#24206440",
"A",
"=",
"np",
".",
"vstack",
"(",
"[",
"self",
".",
"mask_1",
".",
"mask_roi_sparse",
",",
"self",
".",
"mask_2",
".",
"mask_roi_sparse",
"]",
")",
".",
"T",
"B",
"=",
"self",
".",
"mask_roi_unique",
"AA",
"=",
"np",
".",
"ascontiguousarray",
"(",
"A",
")",
"BB",
"=",
"np",
".",
"ascontiguousarray",
"(",
"B",
")",
"dt",
"=",
"np",
".",
"dtype",
"(",
"(",
"np",
".",
"void",
",",
"AA",
".",
"dtype",
".",
"itemsize",
"*",
"AA",
".",
"shape",
"[",
"1",
"]",
")",
")",
"a",
"=",
"AA",
".",
"view",
"(",
"dt",
")",
".",
"ravel",
"(",
")",
"b",
"=",
"BB",
".",
"view",
"(",
"dt",
")",
".",
"ravel",
"(",
")",
"idx",
"=",
"np",
".",
"argsort",
"(",
"b",
")",
"indices",
"=",
"np",
".",
"searchsorted",
"(",
"b",
"[",
"idx",
"]",
",",
"a",
")",
"return",
"idx",
"[",
"indices",
"]"
] | Get the index of the unique magnitude tuple for each pixel in the ROI. | [
"Get",
"the",
"index",
"of",
"the",
"unique",
"magnitude",
"tuple",
"for",
"each",
"pixel",
"in",
"the",
"ROI",
"."
] | 21e890b4117fc810afb6fb058e8055d564f03382 | https://github.com/DarkEnergySurvey/ugali/blob/21e890b4117fc810afb6fb058e8055d564f03382/ugali/observation/mask.py#L70-L87 | train |
DarkEnergySurvey/ugali | ugali/observation/mask.py | Mask._fracRoiSparse | def _fracRoiSparse(self):
"""
Calculate an approximate pixel coverage fraction from the two masks.
We have no way to know a priori how much the coverage of the
two masks overlap in a give pixel. For example, masks that each
have frac = 0.5 could have a combined frac = [0.0 to 0.5].
The limits will be:
max: min(frac1,frac2)
min: max((frac1+frac2)-1, 0.0)
Sometimes we are lucky and our fracdet is actually already
calculated for the two masks combined, so that the max
condition is satisfied. That is what we will assume...
"""
self.frac_roi_sparse = np.min([self.mask_1.frac_roi_sparse,self.mask_2.frac_roi_sparse],axis=0)
return self.frac_roi_sparse | python | def _fracRoiSparse(self):
"""
Calculate an approximate pixel coverage fraction from the two masks.
We have no way to know a priori how much the coverage of the
two masks overlap in a give pixel. For example, masks that each
have frac = 0.5 could have a combined frac = [0.0 to 0.5].
The limits will be:
max: min(frac1,frac2)
min: max((frac1+frac2)-1, 0.0)
Sometimes we are lucky and our fracdet is actually already
calculated for the two masks combined, so that the max
condition is satisfied. That is what we will assume...
"""
self.frac_roi_sparse = np.min([self.mask_1.frac_roi_sparse,self.mask_2.frac_roi_sparse],axis=0)
return self.frac_roi_sparse | [
"def",
"_fracRoiSparse",
"(",
"self",
")",
":",
"self",
".",
"frac_roi_sparse",
"=",
"np",
".",
"min",
"(",
"[",
"self",
".",
"mask_1",
".",
"frac_roi_sparse",
",",
"self",
".",
"mask_2",
".",
"frac_roi_sparse",
"]",
",",
"axis",
"=",
"0",
")",
"return",
"self",
".",
"frac_roi_sparse"
] | Calculate an approximate pixel coverage fraction from the two masks.
We have no way to know a priori how much the coverage of the
two masks overlap in a give pixel. For example, masks that each
have frac = 0.5 could have a combined frac = [0.0 to 0.5].
The limits will be:
max: min(frac1,frac2)
min: max((frac1+frac2)-1, 0.0)
Sometimes we are lucky and our fracdet is actually already
calculated for the two masks combined, so that the max
condition is satisfied. That is what we will assume... | [
"Calculate",
"an",
"approximate",
"pixel",
"coverage",
"fraction",
"from",
"the",
"two",
"masks",
"."
] | 21e890b4117fc810afb6fb058e8055d564f03382 | https://github.com/DarkEnergySurvey/ugali/blob/21e890b4117fc810afb6fb058e8055d564f03382/ugali/observation/mask.py#L97-L113 | train |
DarkEnergySurvey/ugali | ugali/observation/mask.py | Mask._pruneMMD | def _pruneMMD(self, minimum_solid_angle):
"""
Remove regions of magnitude-magnitude space where the unmasked solid angle is
statistically insufficient to estimate the background.
INPUTS:
solid_angle[1]: minimum solid angle (deg^2)
"""
logger.info('Pruning mask based on minimum solid angle of %.2f deg^2'%(minimum_solid_angle))
solid_angle_mmd = self.solid_angle_mmd*(self.solid_angle_mmd > minimum_solid_angle)
if solid_angle_mmd.sum() == 0:
msg = "Pruned mask contains no solid angle."
logger.error(msg)
raise Exception(msg)
self.solid_angle_mmd = solid_angle_mmd
# Compute which magnitudes the clipping correspond to
index_mag_1, index_mag_2 = np.nonzero(self.solid_angle_mmd)
self.mag_1_clip = self.roi.bins_mag[1:][np.max(index_mag_1)]
self.mag_2_clip = self.roi.bins_mag[1:][np.max(index_mag_2)]
logger.info('Clipping mask 1 at %.2f mag'%(self.mag_1_clip) )
logger.info('Clipping mask 2 at %.2f mag'%(self.mag_2_clip) )
self.mask_1.mask_roi_sparse = np.clip(self.mask_1.mask_roi_sparse, 0., self.mag_1_clip)
self.mask_2.mask_roi_sparse = np.clip(self.mask_2.mask_roi_sparse, 0., self.mag_2_clip) | python | def _pruneMMD(self, minimum_solid_angle):
"""
Remove regions of magnitude-magnitude space where the unmasked solid angle is
statistically insufficient to estimate the background.
INPUTS:
solid_angle[1]: minimum solid angle (deg^2)
"""
logger.info('Pruning mask based on minimum solid angle of %.2f deg^2'%(minimum_solid_angle))
solid_angle_mmd = self.solid_angle_mmd*(self.solid_angle_mmd > minimum_solid_angle)
if solid_angle_mmd.sum() == 0:
msg = "Pruned mask contains no solid angle."
logger.error(msg)
raise Exception(msg)
self.solid_angle_mmd = solid_angle_mmd
# Compute which magnitudes the clipping correspond to
index_mag_1, index_mag_2 = np.nonzero(self.solid_angle_mmd)
self.mag_1_clip = self.roi.bins_mag[1:][np.max(index_mag_1)]
self.mag_2_clip = self.roi.bins_mag[1:][np.max(index_mag_2)]
logger.info('Clipping mask 1 at %.2f mag'%(self.mag_1_clip) )
logger.info('Clipping mask 2 at %.2f mag'%(self.mag_2_clip) )
self.mask_1.mask_roi_sparse = np.clip(self.mask_1.mask_roi_sparse, 0., self.mag_1_clip)
self.mask_2.mask_roi_sparse = np.clip(self.mask_2.mask_roi_sparse, 0., self.mag_2_clip) | [
"def",
"_pruneMMD",
"(",
"self",
",",
"minimum_solid_angle",
")",
":",
"logger",
".",
"info",
"(",
"'Pruning mask based on minimum solid angle of %.2f deg^2'",
"%",
"(",
"minimum_solid_angle",
")",
")",
"solid_angle_mmd",
"=",
"self",
".",
"solid_angle_mmd",
"*",
"(",
"self",
".",
"solid_angle_mmd",
">",
"minimum_solid_angle",
")",
"if",
"solid_angle_mmd",
".",
"sum",
"(",
")",
"==",
"0",
":",
"msg",
"=",
"\"Pruned mask contains no solid angle.\"",
"logger",
".",
"error",
"(",
"msg",
")",
"raise",
"Exception",
"(",
"msg",
")",
"self",
".",
"solid_angle_mmd",
"=",
"solid_angle_mmd",
"# Compute which magnitudes the clipping correspond to",
"index_mag_1",
",",
"index_mag_2",
"=",
"np",
".",
"nonzero",
"(",
"self",
".",
"solid_angle_mmd",
")",
"self",
".",
"mag_1_clip",
"=",
"self",
".",
"roi",
".",
"bins_mag",
"[",
"1",
":",
"]",
"[",
"np",
".",
"max",
"(",
"index_mag_1",
")",
"]",
"self",
".",
"mag_2_clip",
"=",
"self",
".",
"roi",
".",
"bins_mag",
"[",
"1",
":",
"]",
"[",
"np",
".",
"max",
"(",
"index_mag_2",
")",
"]",
"logger",
".",
"info",
"(",
"'Clipping mask 1 at %.2f mag'",
"%",
"(",
"self",
".",
"mag_1_clip",
")",
")",
"logger",
".",
"info",
"(",
"'Clipping mask 2 at %.2f mag'",
"%",
"(",
"self",
".",
"mag_2_clip",
")",
")",
"self",
".",
"mask_1",
".",
"mask_roi_sparse",
"=",
"np",
".",
"clip",
"(",
"self",
".",
"mask_1",
".",
"mask_roi_sparse",
",",
"0.",
",",
"self",
".",
"mag_1_clip",
")",
"self",
".",
"mask_2",
".",
"mask_roi_sparse",
"=",
"np",
".",
"clip",
"(",
"self",
".",
"mask_2",
".",
"mask_roi_sparse",
",",
"0.",
",",
"self",
".",
"mag_2_clip",
")"
] | Remove regions of magnitude-magnitude space where the unmasked solid angle is
statistically insufficient to estimate the background.
INPUTS:
solid_angle[1]: minimum solid angle (deg^2) | [
"Remove",
"regions",
"of",
"magnitude",
"-",
"magnitude",
"space",
"where",
"the",
"unmasked",
"solid",
"angle",
"is",
"statistically",
"insufficient",
"to",
"estimate",
"the",
"background",
"."
] | 21e890b4117fc810afb6fb058e8055d564f03382 | https://github.com/DarkEnergySurvey/ugali/blob/21e890b4117fc810afb6fb058e8055d564f03382/ugali/observation/mask.py#L135-L162 | train |
DarkEnergySurvey/ugali | ugali/observation/mask.py | Mask._pruneCMD | def _pruneCMD(self, minimum_solid_angle):
"""
Remove regions of color-magnitude space where the unmasked solid angle is
statistically insufficient to estimate the background.
ADW: Why are we clipping at the bin center instead of edge?
INPUTS:
solid_angle[1]: minimum solid angle (deg^2)
"""
logger.info('Pruning mask based on minimum solid angle of %.2f deg^2'%(minimum_solid_angle))
self.solid_angle_cmd *= self.solid_angle_cmd > minimum_solid_angle
if self.solid_angle_cmd.sum() == 0:
msg = "Pruned mask contains no solid angle."
logger.error(msg)
raise Exception(msg)
# Compute which magnitudes the clipping correspond to
index_mag, index_color = np.nonzero(self.solid_angle_cmd)
mag = self.roi.centers_mag[index_mag]
color = self.roi.centers_color[index_color]
if self.config.params['catalog']['band_1_detection']:
mag_1 = mag
mag_2 = mag_1 - color
self.mag_1_clip = np.max(mag_1) + (0.5 * self.roi.delta_mag)
self.mag_2_clip = np.max(mag_2) + (0.5 * self.roi.delta_color)
else:
mag_2 = mag
mag_1 = color + mag_2
self.mag_1_clip = np.max(mag_1) + (0.5 * self.roi.delta_color)
self.mag_2_clip = np.max(mag_2) + (0.5 * self.roi.delta_mag)
logger.info('Clipping mask 1 at %.2f mag'%(self.mag_1_clip) )
logger.info('Clipping mask 2 at %.2f mag'%(self.mag_2_clip) )
self.mask_1.mask_roi_sparse = np.clip(self.mask_1.mask_roi_sparse, 0., self.mag_1_clip)
self.mask_2.mask_roi_sparse = np.clip(self.mask_2.mask_roi_sparse, 0., self.mag_2_clip) | python | def _pruneCMD(self, minimum_solid_angle):
"""
Remove regions of color-magnitude space where the unmasked solid angle is
statistically insufficient to estimate the background.
ADW: Why are we clipping at the bin center instead of edge?
INPUTS:
solid_angle[1]: minimum solid angle (deg^2)
"""
logger.info('Pruning mask based on minimum solid angle of %.2f deg^2'%(minimum_solid_angle))
self.solid_angle_cmd *= self.solid_angle_cmd > minimum_solid_angle
if self.solid_angle_cmd.sum() == 0:
msg = "Pruned mask contains no solid angle."
logger.error(msg)
raise Exception(msg)
# Compute which magnitudes the clipping correspond to
index_mag, index_color = np.nonzero(self.solid_angle_cmd)
mag = self.roi.centers_mag[index_mag]
color = self.roi.centers_color[index_color]
if self.config.params['catalog']['band_1_detection']:
mag_1 = mag
mag_2 = mag_1 - color
self.mag_1_clip = np.max(mag_1) + (0.5 * self.roi.delta_mag)
self.mag_2_clip = np.max(mag_2) + (0.5 * self.roi.delta_color)
else:
mag_2 = mag
mag_1 = color + mag_2
self.mag_1_clip = np.max(mag_1) + (0.5 * self.roi.delta_color)
self.mag_2_clip = np.max(mag_2) + (0.5 * self.roi.delta_mag)
logger.info('Clipping mask 1 at %.2f mag'%(self.mag_1_clip) )
logger.info('Clipping mask 2 at %.2f mag'%(self.mag_2_clip) )
self.mask_1.mask_roi_sparse = np.clip(self.mask_1.mask_roi_sparse, 0., self.mag_1_clip)
self.mask_2.mask_roi_sparse = np.clip(self.mask_2.mask_roi_sparse, 0., self.mag_2_clip) | [
"def",
"_pruneCMD",
"(",
"self",
",",
"minimum_solid_angle",
")",
":",
"logger",
".",
"info",
"(",
"'Pruning mask based on minimum solid angle of %.2f deg^2'",
"%",
"(",
"minimum_solid_angle",
")",
")",
"self",
".",
"solid_angle_cmd",
"*=",
"self",
".",
"solid_angle_cmd",
">",
"minimum_solid_angle",
"if",
"self",
".",
"solid_angle_cmd",
".",
"sum",
"(",
")",
"==",
"0",
":",
"msg",
"=",
"\"Pruned mask contains no solid angle.\"",
"logger",
".",
"error",
"(",
"msg",
")",
"raise",
"Exception",
"(",
"msg",
")",
"# Compute which magnitudes the clipping correspond to",
"index_mag",
",",
"index_color",
"=",
"np",
".",
"nonzero",
"(",
"self",
".",
"solid_angle_cmd",
")",
"mag",
"=",
"self",
".",
"roi",
".",
"centers_mag",
"[",
"index_mag",
"]",
"color",
"=",
"self",
".",
"roi",
".",
"centers_color",
"[",
"index_color",
"]",
"if",
"self",
".",
"config",
".",
"params",
"[",
"'catalog'",
"]",
"[",
"'band_1_detection'",
"]",
":",
"mag_1",
"=",
"mag",
"mag_2",
"=",
"mag_1",
"-",
"color",
"self",
".",
"mag_1_clip",
"=",
"np",
".",
"max",
"(",
"mag_1",
")",
"+",
"(",
"0.5",
"*",
"self",
".",
"roi",
".",
"delta_mag",
")",
"self",
".",
"mag_2_clip",
"=",
"np",
".",
"max",
"(",
"mag_2",
")",
"+",
"(",
"0.5",
"*",
"self",
".",
"roi",
".",
"delta_color",
")",
"else",
":",
"mag_2",
"=",
"mag",
"mag_1",
"=",
"color",
"+",
"mag_2",
"self",
".",
"mag_1_clip",
"=",
"np",
".",
"max",
"(",
"mag_1",
")",
"+",
"(",
"0.5",
"*",
"self",
".",
"roi",
".",
"delta_color",
")",
"self",
".",
"mag_2_clip",
"=",
"np",
".",
"max",
"(",
"mag_2",
")",
"+",
"(",
"0.5",
"*",
"self",
".",
"roi",
".",
"delta_mag",
")",
"logger",
".",
"info",
"(",
"'Clipping mask 1 at %.2f mag'",
"%",
"(",
"self",
".",
"mag_1_clip",
")",
")",
"logger",
".",
"info",
"(",
"'Clipping mask 2 at %.2f mag'",
"%",
"(",
"self",
".",
"mag_2_clip",
")",
")",
"self",
".",
"mask_1",
".",
"mask_roi_sparse",
"=",
"np",
".",
"clip",
"(",
"self",
".",
"mask_1",
".",
"mask_roi_sparse",
",",
"0.",
",",
"self",
".",
"mag_1_clip",
")",
"self",
".",
"mask_2",
".",
"mask_roi_sparse",
"=",
"np",
".",
"clip",
"(",
"self",
".",
"mask_2",
".",
"mask_roi_sparse",
",",
"0.",
",",
"self",
".",
"mag_2_clip",
")"
] | Remove regions of color-magnitude space where the unmasked solid angle is
statistically insufficient to estimate the background.
ADW: Why are we clipping at the bin center instead of edge?
INPUTS:
solid_angle[1]: minimum solid angle (deg^2) | [
"Remove",
"regions",
"of",
"color",
"-",
"magnitude",
"space",
"where",
"the",
"unmasked",
"solid",
"angle",
"is",
"statistically",
"insufficient",
"to",
"estimate",
"the",
"background",
"."
] | 21e890b4117fc810afb6fb058e8055d564f03382 | https://github.com/DarkEnergySurvey/ugali/blob/21e890b4117fc810afb6fb058e8055d564f03382/ugali/observation/mask.py#L257-L294 | train |
DarkEnergySurvey/ugali | ugali/observation/mask.py | MaskBand.plot | def plot(self):
"""
Plot the magnitude depth.
"""
msg = "'%s.plot': ADW 2018-05-05"%self.__class__.__name__
DeprecationWarning(msg)
import ugali.utils.plotting
mask = hp.UNSEEN * np.ones(hp.nside2npix(self.nside))
mask[self.roi.pixels] = self.mask_roi_sparse
mask[mask == 0.] = hp.UNSEEN
ugali.utils.plotting.zoomedHealpixMap('Completeness Depth',
mask,
self.roi.lon, self.roi.lat,
self.roi.config.params['coords']['roi_radius']) | python | def plot(self):
"""
Plot the magnitude depth.
"""
msg = "'%s.plot': ADW 2018-05-05"%self.__class__.__name__
DeprecationWarning(msg)
import ugali.utils.plotting
mask = hp.UNSEEN * np.ones(hp.nside2npix(self.nside))
mask[self.roi.pixels] = self.mask_roi_sparse
mask[mask == 0.] = hp.UNSEEN
ugali.utils.plotting.zoomedHealpixMap('Completeness Depth',
mask,
self.roi.lon, self.roi.lat,
self.roi.config.params['coords']['roi_radius']) | [
"def",
"plot",
"(",
"self",
")",
":",
"msg",
"=",
"\"'%s.plot': ADW 2018-05-05\"",
"%",
"self",
".",
"__class__",
".",
"__name__",
"DeprecationWarning",
"(",
"msg",
")",
"import",
"ugali",
".",
"utils",
".",
"plotting",
"mask",
"=",
"hp",
".",
"UNSEEN",
"*",
"np",
".",
"ones",
"(",
"hp",
".",
"nside2npix",
"(",
"self",
".",
"nside",
")",
")",
"mask",
"[",
"self",
".",
"roi",
".",
"pixels",
"]",
"=",
"self",
".",
"mask_roi_sparse",
"mask",
"[",
"mask",
"==",
"0.",
"]",
"=",
"hp",
".",
"UNSEEN",
"ugali",
".",
"utils",
".",
"plotting",
".",
"zoomedHealpixMap",
"(",
"'Completeness Depth'",
",",
"mask",
",",
"self",
".",
"roi",
".",
"lon",
",",
"self",
".",
"roi",
".",
"lat",
",",
"self",
".",
"roi",
".",
"config",
".",
"params",
"[",
"'coords'",
"]",
"[",
"'roi_radius'",
"]",
")"
] | Plot the magnitude depth. | [
"Plot",
"the",
"magnitude",
"depth",
"."
] | 21e890b4117fc810afb6fb058e8055d564f03382 | https://github.com/DarkEnergySurvey/ugali/blob/21e890b4117fc810afb6fb058e8055d564f03382/ugali/observation/mask.py#L850-L865 | train |
stevearc/dynamo3 | dynamo3/connection.py | build_expected | def build_expected(dynamizer, expected):
""" Build the Expected parameters from a dict """
ret = {}
for k, v in six.iteritems(expected):
if is_null(v):
ret[k] = {
'Exists': False,
}
else:
ret[k] = {
'Exists': True,
'Value': dynamizer.encode(v),
}
return ret | python | def build_expected(dynamizer, expected):
""" Build the Expected parameters from a dict """
ret = {}
for k, v in six.iteritems(expected):
if is_null(v):
ret[k] = {
'Exists': False,
}
else:
ret[k] = {
'Exists': True,
'Value': dynamizer.encode(v),
}
return ret | [
"def",
"build_expected",
"(",
"dynamizer",
",",
"expected",
")",
":",
"ret",
"=",
"{",
"}",
"for",
"k",
",",
"v",
"in",
"six",
".",
"iteritems",
"(",
"expected",
")",
":",
"if",
"is_null",
"(",
"v",
")",
":",
"ret",
"[",
"k",
"]",
"=",
"{",
"'Exists'",
":",
"False",
",",
"}",
"else",
":",
"ret",
"[",
"k",
"]",
"=",
"{",
"'Exists'",
":",
"True",
",",
"'Value'",
":",
"dynamizer",
".",
"encode",
"(",
"v",
")",
",",
"}",
"return",
"ret"
] | Build the Expected parameters from a dict | [
"Build",
"the",
"Expected",
"parameters",
"from",
"a",
"dict"
] | f897c40ece28586272dbcab8f0d99a14a1831dda | https://github.com/stevearc/dynamo3/blob/f897c40ece28586272dbcab8f0d99a14a1831dda/dynamo3/connection.py#L19-L32 | train |
stevearc/dynamo3 | dynamo3/connection.py | build_expression_values | def build_expression_values(dynamizer, expr_values, kwargs):
""" Build ExpresionAttributeValues from a value or kwargs """
if expr_values:
values = expr_values
return dynamizer.encode_keys(values)
elif kwargs:
values = dict(((':' + k, v) for k, v in six.iteritems(kwargs)))
return dynamizer.encode_keys(values) | python | def build_expression_values(dynamizer, expr_values, kwargs):
""" Build ExpresionAttributeValues from a value or kwargs """
if expr_values:
values = expr_values
return dynamizer.encode_keys(values)
elif kwargs:
values = dict(((':' + k, v) for k, v in six.iteritems(kwargs)))
return dynamizer.encode_keys(values) | [
"def",
"build_expression_values",
"(",
"dynamizer",
",",
"expr_values",
",",
"kwargs",
")",
":",
"if",
"expr_values",
":",
"values",
"=",
"expr_values",
"return",
"dynamizer",
".",
"encode_keys",
"(",
"values",
")",
"elif",
"kwargs",
":",
"values",
"=",
"dict",
"(",
"(",
"(",
"':'",
"+",
"k",
",",
"v",
")",
"for",
"k",
",",
"v",
"in",
"six",
".",
"iteritems",
"(",
"kwargs",
")",
")",
")",
"return",
"dynamizer",
".",
"encode_keys",
"(",
"values",
")"
] | Build ExpresionAttributeValues from a value or kwargs | [
"Build",
"ExpresionAttributeValues",
"from",
"a",
"value",
"or",
"kwargs"
] | f897c40ece28586272dbcab8f0d99a14a1831dda | https://github.com/stevearc/dynamo3/blob/f897c40ece28586272dbcab8f0d99a14a1831dda/dynamo3/connection.py#L35-L42 | train |
stevearc/dynamo3 | dynamo3/connection.py | DynamoDBConnection.connect_to_host | def connect_to_host(cls, host='localhost', port=8000, is_secure=False,
session=None, access_key=None, secret_key=None,
**kwargs):
"""
Connect to a specific host.
This method has been deprecated in favor of :meth:`~.connect`
Parameters
----------
host : str, optional
Address of the host (default 'localhost')
port : int, optional
Connect to the host on this port (default 8000)
is_secure : bool, optional
Enforce https connection (default False)
session : :class:`~botocore.session.Session`, optional
The Session object to use for the connection
access_key : str, optional
If session is None, set this access key when creating the session
secret_key : str, optional
If session is None, set this secret key when creating the session
**kwargs : dict
Keyword arguments to pass to the constructor
"""
warnings.warn("connect_to_host is deprecated and will be removed. "
"Use connect instead.")
if session is None:
session = botocore.session.get_session()
if access_key is not None:
session.set_credentials(access_key, secret_key)
url = "http://%s:%d" % (host, port)
client = session.create_client('dynamodb', 'local', endpoint_url=url,
use_ssl=is_secure)
return cls(client, **kwargs) | python | def connect_to_host(cls, host='localhost', port=8000, is_secure=False,
session=None, access_key=None, secret_key=None,
**kwargs):
"""
Connect to a specific host.
This method has been deprecated in favor of :meth:`~.connect`
Parameters
----------
host : str, optional
Address of the host (default 'localhost')
port : int, optional
Connect to the host on this port (default 8000)
is_secure : bool, optional
Enforce https connection (default False)
session : :class:`~botocore.session.Session`, optional
The Session object to use for the connection
access_key : str, optional
If session is None, set this access key when creating the session
secret_key : str, optional
If session is None, set this secret key when creating the session
**kwargs : dict
Keyword arguments to pass to the constructor
"""
warnings.warn("connect_to_host is deprecated and will be removed. "
"Use connect instead.")
if session is None:
session = botocore.session.get_session()
if access_key is not None:
session.set_credentials(access_key, secret_key)
url = "http://%s:%d" % (host, port)
client = session.create_client('dynamodb', 'local', endpoint_url=url,
use_ssl=is_secure)
return cls(client, **kwargs) | [
"def",
"connect_to_host",
"(",
"cls",
",",
"host",
"=",
"'localhost'",
",",
"port",
"=",
"8000",
",",
"is_secure",
"=",
"False",
",",
"session",
"=",
"None",
",",
"access_key",
"=",
"None",
",",
"secret_key",
"=",
"None",
",",
"*",
"*",
"kwargs",
")",
":",
"warnings",
".",
"warn",
"(",
"\"connect_to_host is deprecated and will be removed. \"",
"\"Use connect instead.\"",
")",
"if",
"session",
"is",
"None",
":",
"session",
"=",
"botocore",
".",
"session",
".",
"get_session",
"(",
")",
"if",
"access_key",
"is",
"not",
"None",
":",
"session",
".",
"set_credentials",
"(",
"access_key",
",",
"secret_key",
")",
"url",
"=",
"\"http://%s:%d\"",
"%",
"(",
"host",
",",
"port",
")",
"client",
"=",
"session",
".",
"create_client",
"(",
"'dynamodb'",
",",
"'local'",
",",
"endpoint_url",
"=",
"url",
",",
"use_ssl",
"=",
"is_secure",
")",
"return",
"cls",
"(",
"client",
",",
"*",
"*",
"kwargs",
")"
] | Connect to a specific host.
This method has been deprecated in favor of :meth:`~.connect`
Parameters
----------
host : str, optional
Address of the host (default 'localhost')
port : int, optional
Connect to the host on this port (default 8000)
is_secure : bool, optional
Enforce https connection (default False)
session : :class:`~botocore.session.Session`, optional
The Session object to use for the connection
access_key : str, optional
If session is None, set this access key when creating the session
secret_key : str, optional
If session is None, set this secret key when creating the session
**kwargs : dict
Keyword arguments to pass to the constructor | [
"Connect",
"to",
"a",
"specific",
"host",
"."
] | f897c40ece28586272dbcab8f0d99a14a1831dda | https://github.com/stevearc/dynamo3/blob/f897c40ece28586272dbcab8f0d99a14a1831dda/dynamo3/connection.py#L121-L156 | train |
stevearc/dynamo3 | dynamo3/connection.py | DynamoDBConnection.call | def call(self, command, **kwargs):
"""
Make a request to DynamoDB using the raw botocore API
Parameters
----------
command : str
The name of the Dynamo command to execute
**kwargs : dict
The parameters to pass up in the request
Raises
------
exc : :class:`~.DynamoDBError`
Returns
-------
data : dict
"""
for hook in self._hooks['precall']:
hook(self, command, kwargs)
op = getattr(self.client, command)
attempt = 0
while True:
try:
data = op(**kwargs)
break
except ClientError as e:
exc = translate_exception(e, kwargs)
attempt += 1
if isinstance(exc, ThroughputException):
if attempt > self.request_retries:
exc.re_raise()
self.exponential_sleep(attempt)
else:
exc.re_raise()
for hook in self._hooks['postcall']:
hook(self, command, kwargs, data)
if 'ConsumedCapacity' in data:
is_read = command in READ_COMMANDS
consumed = data['ConsumedCapacity']
if isinstance(consumed, list):
data['consumed_capacity'] = [
ConsumedCapacity.from_response(cap, is_read)
for cap in consumed
]
else:
capacity = ConsumedCapacity.from_response(consumed, is_read)
data['consumed_capacity'] = capacity
if 'consumed_capacity' in data:
if isinstance(data['consumed_capacity'], list):
all_caps = data['consumed_capacity']
else:
all_caps = [data['consumed_capacity']]
for hook in self._hooks['capacity']:
for cap in all_caps:
hook(self, command, kwargs, data, cap)
return data | python | def call(self, command, **kwargs):
"""
Make a request to DynamoDB using the raw botocore API
Parameters
----------
command : str
The name of the Dynamo command to execute
**kwargs : dict
The parameters to pass up in the request
Raises
------
exc : :class:`~.DynamoDBError`
Returns
-------
data : dict
"""
for hook in self._hooks['precall']:
hook(self, command, kwargs)
op = getattr(self.client, command)
attempt = 0
while True:
try:
data = op(**kwargs)
break
except ClientError as e:
exc = translate_exception(e, kwargs)
attempt += 1
if isinstance(exc, ThroughputException):
if attempt > self.request_retries:
exc.re_raise()
self.exponential_sleep(attempt)
else:
exc.re_raise()
for hook in self._hooks['postcall']:
hook(self, command, kwargs, data)
if 'ConsumedCapacity' in data:
is_read = command in READ_COMMANDS
consumed = data['ConsumedCapacity']
if isinstance(consumed, list):
data['consumed_capacity'] = [
ConsumedCapacity.from_response(cap, is_read)
for cap in consumed
]
else:
capacity = ConsumedCapacity.from_response(consumed, is_read)
data['consumed_capacity'] = capacity
if 'consumed_capacity' in data:
if isinstance(data['consumed_capacity'], list):
all_caps = data['consumed_capacity']
else:
all_caps = [data['consumed_capacity']]
for hook in self._hooks['capacity']:
for cap in all_caps:
hook(self, command, kwargs, data, cap)
return data | [
"def",
"call",
"(",
"self",
",",
"command",
",",
"*",
"*",
"kwargs",
")",
":",
"for",
"hook",
"in",
"self",
".",
"_hooks",
"[",
"'precall'",
"]",
":",
"hook",
"(",
"self",
",",
"command",
",",
"kwargs",
")",
"op",
"=",
"getattr",
"(",
"self",
".",
"client",
",",
"command",
")",
"attempt",
"=",
"0",
"while",
"True",
":",
"try",
":",
"data",
"=",
"op",
"(",
"*",
"*",
"kwargs",
")",
"break",
"except",
"ClientError",
"as",
"e",
":",
"exc",
"=",
"translate_exception",
"(",
"e",
",",
"kwargs",
")",
"attempt",
"+=",
"1",
"if",
"isinstance",
"(",
"exc",
",",
"ThroughputException",
")",
":",
"if",
"attempt",
">",
"self",
".",
"request_retries",
":",
"exc",
".",
"re_raise",
"(",
")",
"self",
".",
"exponential_sleep",
"(",
"attempt",
")",
"else",
":",
"exc",
".",
"re_raise",
"(",
")",
"for",
"hook",
"in",
"self",
".",
"_hooks",
"[",
"'postcall'",
"]",
":",
"hook",
"(",
"self",
",",
"command",
",",
"kwargs",
",",
"data",
")",
"if",
"'ConsumedCapacity'",
"in",
"data",
":",
"is_read",
"=",
"command",
"in",
"READ_COMMANDS",
"consumed",
"=",
"data",
"[",
"'ConsumedCapacity'",
"]",
"if",
"isinstance",
"(",
"consumed",
",",
"list",
")",
":",
"data",
"[",
"'consumed_capacity'",
"]",
"=",
"[",
"ConsumedCapacity",
".",
"from_response",
"(",
"cap",
",",
"is_read",
")",
"for",
"cap",
"in",
"consumed",
"]",
"else",
":",
"capacity",
"=",
"ConsumedCapacity",
".",
"from_response",
"(",
"consumed",
",",
"is_read",
")",
"data",
"[",
"'consumed_capacity'",
"]",
"=",
"capacity",
"if",
"'consumed_capacity'",
"in",
"data",
":",
"if",
"isinstance",
"(",
"data",
"[",
"'consumed_capacity'",
"]",
",",
"list",
")",
":",
"all_caps",
"=",
"data",
"[",
"'consumed_capacity'",
"]",
"else",
":",
"all_caps",
"=",
"[",
"data",
"[",
"'consumed_capacity'",
"]",
"]",
"for",
"hook",
"in",
"self",
".",
"_hooks",
"[",
"'capacity'",
"]",
":",
"for",
"cap",
"in",
"all_caps",
":",
"hook",
"(",
"self",
",",
"command",
",",
"kwargs",
",",
"data",
",",
"cap",
")",
"return",
"data"
] | Make a request to DynamoDB using the raw botocore API
Parameters
----------
command : str
The name of the Dynamo command to execute
**kwargs : dict
The parameters to pass up in the request
Raises
------
exc : :class:`~.DynamoDBError`
Returns
-------
data : dict | [
"Make",
"a",
"request",
"to",
"DynamoDB",
"using",
"the",
"raw",
"botocore",
"API"
] | f897c40ece28586272dbcab8f0d99a14a1831dda | https://github.com/stevearc/dynamo3/blob/f897c40ece28586272dbcab8f0d99a14a1831dda/dynamo3/connection.py#L196-L254 | train |
stevearc/dynamo3 | dynamo3/connection.py | DynamoDBConnection.subscribe | def subscribe(self, event, hook):
"""
Subscribe a callback to an event
Parameters
----------
event : str
Available events are 'precall', 'postcall', and 'capacity'.
precall is called with: (connection, command, query_kwargs)
postcall is called with: (connection, command, query_kwargs, response)
capacity is called with: (connection, command, query_kwargs, response, capacity)
hook : callable
"""
if hook not in self._hooks[event]:
self._hooks[event].append(hook) | python | def subscribe(self, event, hook):
"""
Subscribe a callback to an event
Parameters
----------
event : str
Available events are 'precall', 'postcall', and 'capacity'.
precall is called with: (connection, command, query_kwargs)
postcall is called with: (connection, command, query_kwargs, response)
capacity is called with: (connection, command, query_kwargs, response, capacity)
hook : callable
"""
if hook not in self._hooks[event]:
self._hooks[event].append(hook) | [
"def",
"subscribe",
"(",
"self",
",",
"event",
",",
"hook",
")",
":",
"if",
"hook",
"not",
"in",
"self",
".",
"_hooks",
"[",
"event",
"]",
":",
"self",
".",
"_hooks",
"[",
"event",
"]",
".",
"append",
"(",
"hook",
")"
] | Subscribe a callback to an event
Parameters
----------
event : str
Available events are 'precall', 'postcall', and 'capacity'.
precall is called with: (connection, command, query_kwargs)
postcall is called with: (connection, command, query_kwargs, response)
capacity is called with: (connection, command, query_kwargs, response, capacity)
hook : callable | [
"Subscribe",
"a",
"callback",
"to",
"an",
"event"
] | f897c40ece28586272dbcab8f0d99a14a1831dda | https://github.com/stevearc/dynamo3/blob/f897c40ece28586272dbcab8f0d99a14a1831dda/dynamo3/connection.py#L261-L276 | train |
stevearc/dynamo3 | dynamo3/connection.py | DynamoDBConnection.unsubscribe | def unsubscribe(self, event, hook):
""" Unsubscribe a hook from an event """
if hook in self._hooks[event]:
self._hooks[event].remove(hook) | python | def unsubscribe(self, event, hook):
""" Unsubscribe a hook from an event """
if hook in self._hooks[event]:
self._hooks[event].remove(hook) | [
"def",
"unsubscribe",
"(",
"self",
",",
"event",
",",
"hook",
")",
":",
"if",
"hook",
"in",
"self",
".",
"_hooks",
"[",
"event",
"]",
":",
"self",
".",
"_hooks",
"[",
"event",
"]",
".",
"remove",
"(",
"hook",
")"
] | Unsubscribe a hook from an event | [
"Unsubscribe",
"a",
"hook",
"from",
"an",
"event"
] | f897c40ece28586272dbcab8f0d99a14a1831dda | https://github.com/stevearc/dynamo3/blob/f897c40ece28586272dbcab8f0d99a14a1831dda/dynamo3/connection.py#L278-L281 | train |
stevearc/dynamo3 | dynamo3/connection.py | DynamoDBConnection.add_rate_limit | def add_rate_limit(self, limiter):
""" Add a RateLimit to the connection """
if limiter not in self.rate_limiters:
self.subscribe('capacity', limiter.on_capacity)
self.rate_limiters.append(limiter) | python | def add_rate_limit(self, limiter):
""" Add a RateLimit to the connection """
if limiter not in self.rate_limiters:
self.subscribe('capacity', limiter.on_capacity)
self.rate_limiters.append(limiter) | [
"def",
"add_rate_limit",
"(",
"self",
",",
"limiter",
")",
":",
"if",
"limiter",
"not",
"in",
"self",
".",
"rate_limiters",
":",
"self",
".",
"subscribe",
"(",
"'capacity'",
",",
"limiter",
".",
"on_capacity",
")",
"self",
".",
"rate_limiters",
".",
"append",
"(",
"limiter",
")"
] | Add a RateLimit to the connection | [
"Add",
"a",
"RateLimit",
"to",
"the",
"connection"
] | f897c40ece28586272dbcab8f0d99a14a1831dda | https://github.com/stevearc/dynamo3/blob/f897c40ece28586272dbcab8f0d99a14a1831dda/dynamo3/connection.py#L283-L287 | train |
stevearc/dynamo3 | dynamo3/connection.py | DynamoDBConnection.remove_rate_limit | def remove_rate_limit(self, limiter):
""" Remove a RateLimit from the connection """
if limiter in self.rate_limiters:
self.unsubscribe('capacity', limiter.on_capacity)
self.rate_limiters.remove(limiter) | python | def remove_rate_limit(self, limiter):
""" Remove a RateLimit from the connection """
if limiter in self.rate_limiters:
self.unsubscribe('capacity', limiter.on_capacity)
self.rate_limiters.remove(limiter) | [
"def",
"remove_rate_limit",
"(",
"self",
",",
"limiter",
")",
":",
"if",
"limiter",
"in",
"self",
".",
"rate_limiters",
":",
"self",
".",
"unsubscribe",
"(",
"'capacity'",
",",
"limiter",
".",
"on_capacity",
")",
"self",
".",
"rate_limiters",
".",
"remove",
"(",
"limiter",
")"
] | Remove a RateLimit from the connection | [
"Remove",
"a",
"RateLimit",
"from",
"the",
"connection"
] | f897c40ece28586272dbcab8f0d99a14a1831dda | https://github.com/stevearc/dynamo3/blob/f897c40ece28586272dbcab8f0d99a14a1831dda/dynamo3/connection.py#L289-L293 | train |
stevearc/dynamo3 | dynamo3/connection.py | DynamoDBConnection._default_capacity | def _default_capacity(self, value):
""" Get the value for ReturnConsumedCapacity from provided value """
if value is not None:
return value
if self.default_return_capacity or self.rate_limiters:
return INDEXES
return NONE | python | def _default_capacity(self, value):
""" Get the value for ReturnConsumedCapacity from provided value """
if value is not None:
return value
if self.default_return_capacity or self.rate_limiters:
return INDEXES
return NONE | [
"def",
"_default_capacity",
"(",
"self",
",",
"value",
")",
":",
"if",
"value",
"is",
"not",
"None",
":",
"return",
"value",
"if",
"self",
".",
"default_return_capacity",
"or",
"self",
".",
"rate_limiters",
":",
"return",
"INDEXES",
"return",
"NONE"
] | Get the value for ReturnConsumedCapacity from provided value | [
"Get",
"the",
"value",
"for",
"ReturnConsumedCapacity",
"from",
"provided",
"value"
] | f897c40ece28586272dbcab8f0d99a14a1831dda | https://github.com/stevearc/dynamo3/blob/f897c40ece28586272dbcab8f0d99a14a1831dda/dynamo3/connection.py#L312-L318 | train |
stevearc/dynamo3 | dynamo3/connection.py | DynamoDBConnection._count | def _count(self, method, limit, keywords):
""" Do a scan or query and aggregate the results into a Count """
# The limit will be mutated, so copy it and leave the original intact
limit = limit.copy()
has_more = True
count = None
while has_more:
limit.set_request_args(keywords)
response = self.call(method, **keywords)
limit.post_fetch(response)
count += Count.from_response(response)
last_evaluated_key = response.get('LastEvaluatedKey')
has_more = last_evaluated_key is not None and not limit.complete
if has_more:
keywords['ExclusiveStartKey'] = last_evaluated_key
return count | python | def _count(self, method, limit, keywords):
""" Do a scan or query and aggregate the results into a Count """
# The limit will be mutated, so copy it and leave the original intact
limit = limit.copy()
has_more = True
count = None
while has_more:
limit.set_request_args(keywords)
response = self.call(method, **keywords)
limit.post_fetch(response)
count += Count.from_response(response)
last_evaluated_key = response.get('LastEvaluatedKey')
has_more = last_evaluated_key is not None and not limit.complete
if has_more:
keywords['ExclusiveStartKey'] = last_evaluated_key
return count | [
"def",
"_count",
"(",
"self",
",",
"method",
",",
"limit",
",",
"keywords",
")",
":",
"# The limit will be mutated, so copy it and leave the original intact",
"limit",
"=",
"limit",
".",
"copy",
"(",
")",
"has_more",
"=",
"True",
"count",
"=",
"None",
"while",
"has_more",
":",
"limit",
".",
"set_request_args",
"(",
"keywords",
")",
"response",
"=",
"self",
".",
"call",
"(",
"method",
",",
"*",
"*",
"keywords",
")",
"limit",
".",
"post_fetch",
"(",
"response",
")",
"count",
"+=",
"Count",
".",
"from_response",
"(",
"response",
")",
"last_evaluated_key",
"=",
"response",
".",
"get",
"(",
"'LastEvaluatedKey'",
")",
"has_more",
"=",
"last_evaluated_key",
"is",
"not",
"None",
"and",
"not",
"limit",
".",
"complete",
"if",
"has_more",
":",
"keywords",
"[",
"'ExclusiveStartKey'",
"]",
"=",
"last_evaluated_key",
"return",
"count"
] | Do a scan or query and aggregate the results into a Count | [
"Do",
"a",
"scan",
"or",
"query",
"and",
"aggregate",
"the",
"results",
"into",
"a",
"Count"
] | f897c40ece28586272dbcab8f0d99a14a1831dda | https://github.com/stevearc/dynamo3/blob/f897c40ece28586272dbcab8f0d99a14a1831dda/dynamo3/connection.py#L320-L335 | train |
stevearc/dynamo3 | dynamo3/connection.py | DynamoDBConnection.describe_table | def describe_table(self, tablename):
"""
Get the details about a table
Parameters
----------
tablename : str
Name of the table
Returns
-------
table : :class:`~dynamo3.fields.Table`
"""
try:
response = self.call(
'describe_table', TableName=tablename)['Table']
return Table.from_response(response)
except DynamoDBError as e:
if e.kwargs['Code'] == 'ResourceNotFoundException':
return None
else: # pragma: no cover
raise | python | def describe_table(self, tablename):
"""
Get the details about a table
Parameters
----------
tablename : str
Name of the table
Returns
-------
table : :class:`~dynamo3.fields.Table`
"""
try:
response = self.call(
'describe_table', TableName=tablename)['Table']
return Table.from_response(response)
except DynamoDBError as e:
if e.kwargs['Code'] == 'ResourceNotFoundException':
return None
else: # pragma: no cover
raise | [
"def",
"describe_table",
"(",
"self",
",",
"tablename",
")",
":",
"try",
":",
"response",
"=",
"self",
".",
"call",
"(",
"'describe_table'",
",",
"TableName",
"=",
"tablename",
")",
"[",
"'Table'",
"]",
"return",
"Table",
".",
"from_response",
"(",
"response",
")",
"except",
"DynamoDBError",
"as",
"e",
":",
"if",
"e",
".",
"kwargs",
"[",
"'Code'",
"]",
"==",
"'ResourceNotFoundException'",
":",
"return",
"None",
"else",
":",
"# pragma: no cover",
"raise"
] | Get the details about a table
Parameters
----------
tablename : str
Name of the table
Returns
-------
table : :class:`~dynamo3.fields.Table` | [
"Get",
"the",
"details",
"about",
"a",
"table"
] | f897c40ece28586272dbcab8f0d99a14a1831dda | https://github.com/stevearc/dynamo3/blob/f897c40ece28586272dbcab8f0d99a14a1831dda/dynamo3/connection.py#L354-L376 | train |
stevearc/dynamo3 | dynamo3/connection.py | DynamoDBConnection.put_item | def put_item(self, tablename, item, expected=None, returns=NONE,
return_capacity=None, expect_or=False, **kwargs):
"""
Store an item, overwriting existing data
This uses the older version of the DynamoDB API.
See also: :meth:`~.put_item2`.
Parameters
----------
tablename : str
Name of the table to write
item : dict
Item data
expected : dict, optional
DEPRECATED (use **kwargs instead).
If present, will check the values in Dynamo before performing the
write. If values do not match, will raise an exception. (Using None
as a value checks that the field does not exist).
returns : {NONE, ALL_OLD}, optional
If ALL_OLD, will return any data that was overwritten (default
NONE)
return_capacity : {NONE, INDEXES, TOTAL}, optional
INDEXES will return the consumed capacity for indexes, TOTAL will
return the consumed capacity for the table and the indexes.
(default NONE)
expect_or : bool, optional
If True, the **kwargs conditionals will be OR'd together. If False,
they will be AND'd. (default False).
**kwargs : dict, optional
Conditional filter on the PUT. Same format as the kwargs for
:meth:`~.scan`.
"""
keywords = {}
if kwargs:
keywords['Expected'] = encode_query_kwargs(self.dynamizer, kwargs)
if len(keywords['Expected']) > 1:
keywords['ConditionalOperator'] = 'OR' if expect_or else 'AND'
elif expected is not None:
keywords['Expected'] = build_expected(self.dynamizer, expected)
keywords['ReturnConsumedCapacity'] = \
self._default_capacity(return_capacity)
item = self.dynamizer.encode_keys(item)
ret = self.call('put_item', TableName=tablename, Item=item,
ReturnValues=returns, **keywords)
if ret:
return Result(self.dynamizer, ret, 'Attributes') | python | def put_item(self, tablename, item, expected=None, returns=NONE,
return_capacity=None, expect_or=False, **kwargs):
"""
Store an item, overwriting existing data
This uses the older version of the DynamoDB API.
See also: :meth:`~.put_item2`.
Parameters
----------
tablename : str
Name of the table to write
item : dict
Item data
expected : dict, optional
DEPRECATED (use **kwargs instead).
If present, will check the values in Dynamo before performing the
write. If values do not match, will raise an exception. (Using None
as a value checks that the field does not exist).
returns : {NONE, ALL_OLD}, optional
If ALL_OLD, will return any data that was overwritten (default
NONE)
return_capacity : {NONE, INDEXES, TOTAL}, optional
INDEXES will return the consumed capacity for indexes, TOTAL will
return the consumed capacity for the table and the indexes.
(default NONE)
expect_or : bool, optional
If True, the **kwargs conditionals will be OR'd together. If False,
they will be AND'd. (default False).
**kwargs : dict, optional
Conditional filter on the PUT. Same format as the kwargs for
:meth:`~.scan`.
"""
keywords = {}
if kwargs:
keywords['Expected'] = encode_query_kwargs(self.dynamizer, kwargs)
if len(keywords['Expected']) > 1:
keywords['ConditionalOperator'] = 'OR' if expect_or else 'AND'
elif expected is not None:
keywords['Expected'] = build_expected(self.dynamizer, expected)
keywords['ReturnConsumedCapacity'] = \
self._default_capacity(return_capacity)
item = self.dynamizer.encode_keys(item)
ret = self.call('put_item', TableName=tablename, Item=item,
ReturnValues=returns, **keywords)
if ret:
return Result(self.dynamizer, ret, 'Attributes') | [
"def",
"put_item",
"(",
"self",
",",
"tablename",
",",
"item",
",",
"expected",
"=",
"None",
",",
"returns",
"=",
"NONE",
",",
"return_capacity",
"=",
"None",
",",
"expect_or",
"=",
"False",
",",
"*",
"*",
"kwargs",
")",
":",
"keywords",
"=",
"{",
"}",
"if",
"kwargs",
":",
"keywords",
"[",
"'Expected'",
"]",
"=",
"encode_query_kwargs",
"(",
"self",
".",
"dynamizer",
",",
"kwargs",
")",
"if",
"len",
"(",
"keywords",
"[",
"'Expected'",
"]",
")",
">",
"1",
":",
"keywords",
"[",
"'ConditionalOperator'",
"]",
"=",
"'OR'",
"if",
"expect_or",
"else",
"'AND'",
"elif",
"expected",
"is",
"not",
"None",
":",
"keywords",
"[",
"'Expected'",
"]",
"=",
"build_expected",
"(",
"self",
".",
"dynamizer",
",",
"expected",
")",
"keywords",
"[",
"'ReturnConsumedCapacity'",
"]",
"=",
"self",
".",
"_default_capacity",
"(",
"return_capacity",
")",
"item",
"=",
"self",
".",
"dynamizer",
".",
"encode_keys",
"(",
"item",
")",
"ret",
"=",
"self",
".",
"call",
"(",
"'put_item'",
",",
"TableName",
"=",
"tablename",
",",
"Item",
"=",
"item",
",",
"ReturnValues",
"=",
"returns",
",",
"*",
"*",
"keywords",
")",
"if",
"ret",
":",
"return",
"Result",
"(",
"self",
".",
"dynamizer",
",",
"ret",
",",
"'Attributes'",
")"
] | Store an item, overwriting existing data
This uses the older version of the DynamoDB API.
See also: :meth:`~.put_item2`.
Parameters
----------
tablename : str
Name of the table to write
item : dict
Item data
expected : dict, optional
DEPRECATED (use **kwargs instead).
If present, will check the values in Dynamo before performing the
write. If values do not match, will raise an exception. (Using None
as a value checks that the field does not exist).
returns : {NONE, ALL_OLD}, optional
If ALL_OLD, will return any data that was overwritten (default
NONE)
return_capacity : {NONE, INDEXES, TOTAL}, optional
INDEXES will return the consumed capacity for indexes, TOTAL will
return the consumed capacity for the table and the indexes.
(default NONE)
expect_or : bool, optional
If True, the **kwargs conditionals will be OR'd together. If False,
they will be AND'd. (default False).
**kwargs : dict, optional
Conditional filter on the PUT. Same format as the kwargs for
:meth:`~.scan`. | [
"Store",
"an",
"item",
"overwriting",
"existing",
"data"
] | f897c40ece28586272dbcab8f0d99a14a1831dda | https://github.com/stevearc/dynamo3/blob/f897c40ece28586272dbcab8f0d99a14a1831dda/dynamo3/connection.py#L466-L513 | train |
stevearc/dynamo3 | dynamo3/connection.py | DynamoDBConnection.delete_item2 | def delete_item2(self, tablename, key, expr_values=None, alias=None,
condition=None, returns=NONE, return_capacity=None,
return_item_collection_metrics=NONE, **kwargs):
"""
Delete an item from a table
For many parameters you will want to reference the DynamoDB API:
http://docs.aws.amazon.com/amazondynamodb/latest/APIReference/API_DeleteItem.html
Parameters
----------
tablename : str
Name of the table to update
key : dict
Primary key dict specifying the hash key and, if applicable, the
range key of the item.
expr_values : dict, optional
See docs for ExpressionAttributeValues. See also: kwargs
alias : dict, optional
See docs for ExpressionAttributeNames
condition : str, optional
See docs for ConditionExpression
returns : {NONE, ALL_OLD, UPDATED_OLD, ALL_NEW, UPDATED_NEW}, optional
Return either the old or new values, either all attributes or just
the ones that changed. (default NONE)
return_capacity : {NONE, INDEXES, TOTAL}, optional
INDEXES will return the consumed capacity for indexes, TOTAL will
return the consumed capacity for the table and the indexes.
(default NONE)
return_item_collection_metrics : (NONE, SIZE), optional
SIZE will return statistics about item collections that were
modified.
**kwargs : dict, optional
If expr_values is not provided, the kwargs dict will be used as the
ExpressionAttributeValues (a ':' will be automatically prepended to
all keys).
"""
keywords = {
'TableName': tablename,
'Key': self.dynamizer.encode_keys(key),
'ReturnValues': returns,
'ReturnConsumedCapacity': self._default_capacity(return_capacity),
'ReturnItemCollectionMetrics': return_item_collection_metrics,
}
values = build_expression_values(self.dynamizer, expr_values, kwargs)
if values:
keywords['ExpressionAttributeValues'] = values
if alias:
keywords['ExpressionAttributeNames'] = alias
if condition:
keywords['ConditionExpression'] = condition
result = self.call('delete_item', **keywords)
if result:
return Result(self.dynamizer, result, 'Attributes') | python | def delete_item2(self, tablename, key, expr_values=None, alias=None,
condition=None, returns=NONE, return_capacity=None,
return_item_collection_metrics=NONE, **kwargs):
"""
Delete an item from a table
For many parameters you will want to reference the DynamoDB API:
http://docs.aws.amazon.com/amazondynamodb/latest/APIReference/API_DeleteItem.html
Parameters
----------
tablename : str
Name of the table to update
key : dict
Primary key dict specifying the hash key and, if applicable, the
range key of the item.
expr_values : dict, optional
See docs for ExpressionAttributeValues. See also: kwargs
alias : dict, optional
See docs for ExpressionAttributeNames
condition : str, optional
See docs for ConditionExpression
returns : {NONE, ALL_OLD, UPDATED_OLD, ALL_NEW, UPDATED_NEW}, optional
Return either the old or new values, either all attributes or just
the ones that changed. (default NONE)
return_capacity : {NONE, INDEXES, TOTAL}, optional
INDEXES will return the consumed capacity for indexes, TOTAL will
return the consumed capacity for the table and the indexes.
(default NONE)
return_item_collection_metrics : (NONE, SIZE), optional
SIZE will return statistics about item collections that were
modified.
**kwargs : dict, optional
If expr_values is not provided, the kwargs dict will be used as the
ExpressionAttributeValues (a ':' will be automatically prepended to
all keys).
"""
keywords = {
'TableName': tablename,
'Key': self.dynamizer.encode_keys(key),
'ReturnValues': returns,
'ReturnConsumedCapacity': self._default_capacity(return_capacity),
'ReturnItemCollectionMetrics': return_item_collection_metrics,
}
values = build_expression_values(self.dynamizer, expr_values, kwargs)
if values:
keywords['ExpressionAttributeValues'] = values
if alias:
keywords['ExpressionAttributeNames'] = alias
if condition:
keywords['ConditionExpression'] = condition
result = self.call('delete_item', **keywords)
if result:
return Result(self.dynamizer, result, 'Attributes') | [
"def",
"delete_item2",
"(",
"self",
",",
"tablename",
",",
"key",
",",
"expr_values",
"=",
"None",
",",
"alias",
"=",
"None",
",",
"condition",
"=",
"None",
",",
"returns",
"=",
"NONE",
",",
"return_capacity",
"=",
"None",
",",
"return_item_collection_metrics",
"=",
"NONE",
",",
"*",
"*",
"kwargs",
")",
":",
"keywords",
"=",
"{",
"'TableName'",
":",
"tablename",
",",
"'Key'",
":",
"self",
".",
"dynamizer",
".",
"encode_keys",
"(",
"key",
")",
",",
"'ReturnValues'",
":",
"returns",
",",
"'ReturnConsumedCapacity'",
":",
"self",
".",
"_default_capacity",
"(",
"return_capacity",
")",
",",
"'ReturnItemCollectionMetrics'",
":",
"return_item_collection_metrics",
",",
"}",
"values",
"=",
"build_expression_values",
"(",
"self",
".",
"dynamizer",
",",
"expr_values",
",",
"kwargs",
")",
"if",
"values",
":",
"keywords",
"[",
"'ExpressionAttributeValues'",
"]",
"=",
"values",
"if",
"alias",
":",
"keywords",
"[",
"'ExpressionAttributeNames'",
"]",
"=",
"alias",
"if",
"condition",
":",
"keywords",
"[",
"'ConditionExpression'",
"]",
"=",
"condition",
"result",
"=",
"self",
".",
"call",
"(",
"'delete_item'",
",",
"*",
"*",
"keywords",
")",
"if",
"result",
":",
"return",
"Result",
"(",
"self",
".",
"dynamizer",
",",
"result",
",",
"'Attributes'",
")"
] | Delete an item from a table
For many parameters you will want to reference the DynamoDB API:
http://docs.aws.amazon.com/amazondynamodb/latest/APIReference/API_DeleteItem.html
Parameters
----------
tablename : str
Name of the table to update
key : dict
Primary key dict specifying the hash key and, if applicable, the
range key of the item.
expr_values : dict, optional
See docs for ExpressionAttributeValues. See also: kwargs
alias : dict, optional
See docs for ExpressionAttributeNames
condition : str, optional
See docs for ConditionExpression
returns : {NONE, ALL_OLD, UPDATED_OLD, ALL_NEW, UPDATED_NEW}, optional
Return either the old or new values, either all attributes or just
the ones that changed. (default NONE)
return_capacity : {NONE, INDEXES, TOTAL}, optional
INDEXES will return the consumed capacity for indexes, TOTAL will
return the consumed capacity for the table and the indexes.
(default NONE)
return_item_collection_metrics : (NONE, SIZE), optional
SIZE will return statistics about item collections that were
modified.
**kwargs : dict, optional
If expr_values is not provided, the kwargs dict will be used as the
ExpressionAttributeValues (a ':' will be automatically prepended to
all keys). | [
"Delete",
"an",
"item",
"from",
"a",
"table"
] | f897c40ece28586272dbcab8f0d99a14a1831dda | https://github.com/stevearc/dynamo3/blob/f897c40ece28586272dbcab8f0d99a14a1831dda/dynamo3/connection.py#L695-L749 | train |
stevearc/dynamo3 | dynamo3/connection.py | DynamoDBConnection.batch_write | def batch_write(self, tablename, return_capacity=None,
return_item_collection_metrics=NONE):
"""
Perform a batch write on a table
Parameters
----------
tablename : str
Name of the table to write to
return_capacity : {NONE, INDEXES, TOTAL}, optional
INDEXES will return the consumed capacity for indexes, TOTAL will
return the consumed capacity for the table and the indexes.
(default NONE)
return_item_collection_metrics : (NONE, SIZE), optional
SIZE will return statistics about item collections that were
modified.
Examples
--------
.. code-block:: python
with connection.batch_write('mytable') as batch:
batch.put({'id': 'id1', 'foo': 'bar'})
batch.delete({'id': 'oldid'})
"""
return_capacity = self._default_capacity(return_capacity)
return BatchWriter(self, tablename, return_capacity=return_capacity,
return_item_collection_metrics=return_item_collection_metrics) | python | def batch_write(self, tablename, return_capacity=None,
return_item_collection_metrics=NONE):
"""
Perform a batch write on a table
Parameters
----------
tablename : str
Name of the table to write to
return_capacity : {NONE, INDEXES, TOTAL}, optional
INDEXES will return the consumed capacity for indexes, TOTAL will
return the consumed capacity for the table and the indexes.
(default NONE)
return_item_collection_metrics : (NONE, SIZE), optional
SIZE will return statistics about item collections that were
modified.
Examples
--------
.. code-block:: python
with connection.batch_write('mytable') as batch:
batch.put({'id': 'id1', 'foo': 'bar'})
batch.delete({'id': 'oldid'})
"""
return_capacity = self._default_capacity(return_capacity)
return BatchWriter(self, tablename, return_capacity=return_capacity,
return_item_collection_metrics=return_item_collection_metrics) | [
"def",
"batch_write",
"(",
"self",
",",
"tablename",
",",
"return_capacity",
"=",
"None",
",",
"return_item_collection_metrics",
"=",
"NONE",
")",
":",
"return_capacity",
"=",
"self",
".",
"_default_capacity",
"(",
"return_capacity",
")",
"return",
"BatchWriter",
"(",
"self",
",",
"tablename",
",",
"return_capacity",
"=",
"return_capacity",
",",
"return_item_collection_metrics",
"=",
"return_item_collection_metrics",
")"
] | Perform a batch write on a table
Parameters
----------
tablename : str
Name of the table to write to
return_capacity : {NONE, INDEXES, TOTAL}, optional
INDEXES will return the consumed capacity for indexes, TOTAL will
return the consumed capacity for the table and the indexes.
(default NONE)
return_item_collection_metrics : (NONE, SIZE), optional
SIZE will return statistics about item collections that were
modified.
Examples
--------
.. code-block:: python
with connection.batch_write('mytable') as batch:
batch.put({'id': 'id1', 'foo': 'bar'})
batch.delete({'id': 'oldid'}) | [
"Perform",
"a",
"batch",
"write",
"on",
"a",
"table"
] | f897c40ece28586272dbcab8f0d99a14a1831dda | https://github.com/stevearc/dynamo3/blob/f897c40ece28586272dbcab8f0d99a14a1831dda/dynamo3/connection.py#L751-L779 | train |
stevearc/dynamo3 | dynamo3/connection.py | DynamoDBConnection.batch_get | def batch_get(self, tablename, keys, attributes=None, alias=None,
consistent=False, return_capacity=None):
"""
Perform a batch get of many items in a table
Parameters
----------
tablename : str
Name of the table to fetch from
keys : list or iterable
List or iterable of primary key dicts that specify the hash key and
the optional range key of each item to fetch
attributes : str or list, optional
See docs for ProjectionExpression. If list, it will be joined by
commas.
alias : dict, optional
See docs for ExpressionAttributeNames
consistent : bool, optional
Perform a strongly consistent read of the data (default False)
return_capacity : {NONE, INDEXES, TOTAL}, optional
INDEXES will return the consumed capacity for indexes, TOTAL will
return the consumed capacity for the table and the indexes.
(default NONE)
"""
keys = [self.dynamizer.encode_keys(k) for k in keys]
return_capacity = self._default_capacity(return_capacity)
ret = GetResultSet(self, tablename, keys,
consistent=consistent, attributes=attributes,
alias=alias, return_capacity=return_capacity)
return ret | python | def batch_get(self, tablename, keys, attributes=None, alias=None,
consistent=False, return_capacity=None):
"""
Perform a batch get of many items in a table
Parameters
----------
tablename : str
Name of the table to fetch from
keys : list or iterable
List or iterable of primary key dicts that specify the hash key and
the optional range key of each item to fetch
attributes : str or list, optional
See docs for ProjectionExpression. If list, it will be joined by
commas.
alias : dict, optional
See docs for ExpressionAttributeNames
consistent : bool, optional
Perform a strongly consistent read of the data (default False)
return_capacity : {NONE, INDEXES, TOTAL}, optional
INDEXES will return the consumed capacity for indexes, TOTAL will
return the consumed capacity for the table and the indexes.
(default NONE)
"""
keys = [self.dynamizer.encode_keys(k) for k in keys]
return_capacity = self._default_capacity(return_capacity)
ret = GetResultSet(self, tablename, keys,
consistent=consistent, attributes=attributes,
alias=alias, return_capacity=return_capacity)
return ret | [
"def",
"batch_get",
"(",
"self",
",",
"tablename",
",",
"keys",
",",
"attributes",
"=",
"None",
",",
"alias",
"=",
"None",
",",
"consistent",
"=",
"False",
",",
"return_capacity",
"=",
"None",
")",
":",
"keys",
"=",
"[",
"self",
".",
"dynamizer",
".",
"encode_keys",
"(",
"k",
")",
"for",
"k",
"in",
"keys",
"]",
"return_capacity",
"=",
"self",
".",
"_default_capacity",
"(",
"return_capacity",
")",
"ret",
"=",
"GetResultSet",
"(",
"self",
",",
"tablename",
",",
"keys",
",",
"consistent",
"=",
"consistent",
",",
"attributes",
"=",
"attributes",
",",
"alias",
"=",
"alias",
",",
"return_capacity",
"=",
"return_capacity",
")",
"return",
"ret"
] | Perform a batch get of many items in a table
Parameters
----------
tablename : str
Name of the table to fetch from
keys : list or iterable
List or iterable of primary key dicts that specify the hash key and
the optional range key of each item to fetch
attributes : str or list, optional
See docs for ProjectionExpression. If list, it will be joined by
commas.
alias : dict, optional
See docs for ExpressionAttributeNames
consistent : bool, optional
Perform a strongly consistent read of the data (default False)
return_capacity : {NONE, INDEXES, TOTAL}, optional
INDEXES will return the consumed capacity for indexes, TOTAL will
return the consumed capacity for the table and the indexes.
(default NONE) | [
"Perform",
"a",
"batch",
"get",
"of",
"many",
"items",
"in",
"a",
"table"
] | f897c40ece28586272dbcab8f0d99a14a1831dda | https://github.com/stevearc/dynamo3/blob/f897c40ece28586272dbcab8f0d99a14a1831dda/dynamo3/connection.py#L781-L811 | train |
stevearc/dynamo3 | dynamo3/connection.py | DynamoDBConnection.update_item | def update_item(self, tablename, key, updates, returns=NONE,
return_capacity=None, expect_or=False, **kwargs):
"""
Update a single item in a table
This uses the older version of the DynamoDB API.
See also: :meth:`~.update_item2`.
Parameters
----------
tablename : str
Name of the table to update
key : dict
Primary key dict specifying the hash key and, if applicable, the
range key of the item.
updates : list
List of :class:`~dynamo3.batch.ItemUpdate`
returns : {NONE, ALL_OLD, UPDATED_OLD, ALL_NEW, UPDATED_NEW}, optional
Return either the old or new values, either all attributes or just
the ones that changed. (default NONE)
return_capacity : {NONE, INDEXES, TOTAL}, optional
INDEXES will return the consumed capacity for indexes, TOTAL will
return the consumed capacity for the table and the indexes.
(default NONE)
expect_or : bool, optional
If True, the updates conditionals will be OR'd together. If False,
they will be AND'd. (default False).
**kwargs : dict, optional
Conditional filter on the PUT. Same format as the kwargs for
:meth:`~.scan`.
Notes
-----
There are two ways to specify the expected values of fields. The
simplest is via the list of updates. Each updated field may specify a
constraint on the current value of that field. You may pass additional
constraints in via the **kwargs the same way you would for put_item.
This is necessary if you have constraints on fields that are not being
updated.
"""
key = self.dynamizer.encode_keys(key)
attr_updates = {}
expected = {}
keywords = {
'ReturnConsumedCapacity': self._default_capacity(return_capacity),
}
for update in updates:
attr_updates.update(update.attrs(self.dynamizer))
expected.update(update.expected(self.dynamizer))
# Pull the 'expected' constraints from the kwargs
for k, v in six.iteritems(encode_query_kwargs(self.dynamizer, kwargs)):
if k in expected:
raise ValueError("Cannot have more than one condition on a single field")
expected[k] = v
if expected:
keywords['Expected'] = expected
if len(expected) > 1:
keywords['ConditionalOperator'] = 'OR' if expect_or else 'AND'
result = self.call('update_item', TableName=tablename, Key=key,
AttributeUpdates=attr_updates,
ReturnValues=returns,
**keywords)
if result:
return Result(self.dynamizer, result, 'Attributes') | python | def update_item(self, tablename, key, updates, returns=NONE,
return_capacity=None, expect_or=False, **kwargs):
"""
Update a single item in a table
This uses the older version of the DynamoDB API.
See also: :meth:`~.update_item2`.
Parameters
----------
tablename : str
Name of the table to update
key : dict
Primary key dict specifying the hash key and, if applicable, the
range key of the item.
updates : list
List of :class:`~dynamo3.batch.ItemUpdate`
returns : {NONE, ALL_OLD, UPDATED_OLD, ALL_NEW, UPDATED_NEW}, optional
Return either the old or new values, either all attributes or just
the ones that changed. (default NONE)
return_capacity : {NONE, INDEXES, TOTAL}, optional
INDEXES will return the consumed capacity for indexes, TOTAL will
return the consumed capacity for the table and the indexes.
(default NONE)
expect_or : bool, optional
If True, the updates conditionals will be OR'd together. If False,
they will be AND'd. (default False).
**kwargs : dict, optional
Conditional filter on the PUT. Same format as the kwargs for
:meth:`~.scan`.
Notes
-----
There are two ways to specify the expected values of fields. The
simplest is via the list of updates. Each updated field may specify a
constraint on the current value of that field. You may pass additional
constraints in via the **kwargs the same way you would for put_item.
This is necessary if you have constraints on fields that are not being
updated.
"""
key = self.dynamizer.encode_keys(key)
attr_updates = {}
expected = {}
keywords = {
'ReturnConsumedCapacity': self._default_capacity(return_capacity),
}
for update in updates:
attr_updates.update(update.attrs(self.dynamizer))
expected.update(update.expected(self.dynamizer))
# Pull the 'expected' constraints from the kwargs
for k, v in six.iteritems(encode_query_kwargs(self.dynamizer, kwargs)):
if k in expected:
raise ValueError("Cannot have more than one condition on a single field")
expected[k] = v
if expected:
keywords['Expected'] = expected
if len(expected) > 1:
keywords['ConditionalOperator'] = 'OR' if expect_or else 'AND'
result = self.call('update_item', TableName=tablename, Key=key,
AttributeUpdates=attr_updates,
ReturnValues=returns,
**keywords)
if result:
return Result(self.dynamizer, result, 'Attributes') | [
"def",
"update_item",
"(",
"self",
",",
"tablename",
",",
"key",
",",
"updates",
",",
"returns",
"=",
"NONE",
",",
"return_capacity",
"=",
"None",
",",
"expect_or",
"=",
"False",
",",
"*",
"*",
"kwargs",
")",
":",
"key",
"=",
"self",
".",
"dynamizer",
".",
"encode_keys",
"(",
"key",
")",
"attr_updates",
"=",
"{",
"}",
"expected",
"=",
"{",
"}",
"keywords",
"=",
"{",
"'ReturnConsumedCapacity'",
":",
"self",
".",
"_default_capacity",
"(",
"return_capacity",
")",
",",
"}",
"for",
"update",
"in",
"updates",
":",
"attr_updates",
".",
"update",
"(",
"update",
".",
"attrs",
"(",
"self",
".",
"dynamizer",
")",
")",
"expected",
".",
"update",
"(",
"update",
".",
"expected",
"(",
"self",
".",
"dynamizer",
")",
")",
"# Pull the 'expected' constraints from the kwargs",
"for",
"k",
",",
"v",
"in",
"six",
".",
"iteritems",
"(",
"encode_query_kwargs",
"(",
"self",
".",
"dynamizer",
",",
"kwargs",
")",
")",
":",
"if",
"k",
"in",
"expected",
":",
"raise",
"ValueError",
"(",
"\"Cannot have more than one condition on a single field\"",
")",
"expected",
"[",
"k",
"]",
"=",
"v",
"if",
"expected",
":",
"keywords",
"[",
"'Expected'",
"]",
"=",
"expected",
"if",
"len",
"(",
"expected",
")",
">",
"1",
":",
"keywords",
"[",
"'ConditionalOperator'",
"]",
"=",
"'OR'",
"if",
"expect_or",
"else",
"'AND'",
"result",
"=",
"self",
".",
"call",
"(",
"'update_item'",
",",
"TableName",
"=",
"tablename",
",",
"Key",
"=",
"key",
",",
"AttributeUpdates",
"=",
"attr_updates",
",",
"ReturnValues",
"=",
"returns",
",",
"*",
"*",
"keywords",
")",
"if",
"result",
":",
"return",
"Result",
"(",
"self",
".",
"dynamizer",
",",
"result",
",",
"'Attributes'",
")"
] | Update a single item in a table
This uses the older version of the DynamoDB API.
See also: :meth:`~.update_item2`.
Parameters
----------
tablename : str
Name of the table to update
key : dict
Primary key dict specifying the hash key and, if applicable, the
range key of the item.
updates : list
List of :class:`~dynamo3.batch.ItemUpdate`
returns : {NONE, ALL_OLD, UPDATED_OLD, ALL_NEW, UPDATED_NEW}, optional
Return either the old or new values, either all attributes or just
the ones that changed. (default NONE)
return_capacity : {NONE, INDEXES, TOTAL}, optional
INDEXES will return the consumed capacity for indexes, TOTAL will
return the consumed capacity for the table and the indexes.
(default NONE)
expect_or : bool, optional
If True, the updates conditionals will be OR'd together. If False,
they will be AND'd. (default False).
**kwargs : dict, optional
Conditional filter on the PUT. Same format as the kwargs for
:meth:`~.scan`.
Notes
-----
There are two ways to specify the expected values of fields. The
simplest is via the list of updates. Each updated field may specify a
constraint on the current value of that field. You may pass additional
constraints in via the **kwargs the same way you would for put_item.
This is necessary if you have constraints on fields that are not being
updated. | [
"Update",
"a",
"single",
"item",
"in",
"a",
"table"
] | f897c40ece28586272dbcab8f0d99a14a1831dda | https://github.com/stevearc/dynamo3/blob/f897c40ece28586272dbcab8f0d99a14a1831dda/dynamo3/connection.py#L813-L880 | train |
stevearc/dynamo3 | dynamo3/connection.py | DynamoDBConnection.query | def query(self, tablename, attributes=None, consistent=False, count=False,
index=None, limit=None, desc=False, return_capacity=None,
filter=None, filter_or=False, exclusive_start_key=None, **kwargs):
"""
Perform an index query on a table
This uses the older version of the DynamoDB API.
See also: :meth:`~.query2`.
Parameters
----------
tablename : str
Name of the table to query
attributes : list
If present, only fetch these attributes from the item
consistent : bool, optional
Perform a strongly consistent read of the data (default False)
count : bool, optional
If True, return a count of matched items instead of the items
themselves (default False)
index : str, optional
The name of the index to query
limit : int, optional
Maximum number of items to return
desc : bool, optional
If True, return items in descending order (default False)
return_capacity : {NONE, INDEXES, TOTAL}, optional
INDEXES will return the consumed capacity for indexes, TOTAL will
return the consumed capacity for the table and the indexes.
(default NONE)
filter : dict, optional
Query arguments. Same format as **kwargs, but these arguments
filter the results on the server before they are returned. They
will NOT use an index, as that is what the **kwargs are for.
filter_or : bool, optional
If True, multiple filter args will be OR'd together. If False, they
will be AND'd together. (default False)
exclusive_start_key : dict, optional
The ExclusiveStartKey to resume a previous query
**kwargs : dict, optional
Query arguments (examples below)
Examples
--------
You may pass in constraints using the Django-style '__' syntax. For
example:
.. code-block:: python
connection.query('mytable', foo__eq=5)
connection.query('mytable', foo__eq=5, bar__lt=22)
connection.query('mytable', foo__eq=5, bar__between=(1, 10))
"""
keywords = {
'TableName': tablename,
'ReturnConsumedCapacity': self._default_capacity(return_capacity),
'ConsistentRead': consistent,
'ScanIndexForward': not desc,
'KeyConditions': encode_query_kwargs(self.dynamizer, kwargs),
}
if attributes is not None:
keywords['AttributesToGet'] = attributes
if index is not None:
keywords['IndexName'] = index
if filter is not None:
if len(filter) > 1:
keywords['ConditionalOperator'] = 'OR' if filter_or else 'AND'
keywords['QueryFilter'] = encode_query_kwargs(self.dynamizer,
filter)
if exclusive_start_key is not None:
keywords['ExclusiveStartKey'] = \
self.dynamizer.maybe_encode_keys(exclusive_start_key)
if not isinstance(limit, Limit):
limit = Limit(limit)
if count:
keywords['Select'] = COUNT
return self._count('query', limit, keywords)
else:
return ResultSet(self, limit, 'query', **keywords) | python | def query(self, tablename, attributes=None, consistent=False, count=False,
index=None, limit=None, desc=False, return_capacity=None,
filter=None, filter_or=False, exclusive_start_key=None, **kwargs):
"""
Perform an index query on a table
This uses the older version of the DynamoDB API.
See also: :meth:`~.query2`.
Parameters
----------
tablename : str
Name of the table to query
attributes : list
If present, only fetch these attributes from the item
consistent : bool, optional
Perform a strongly consistent read of the data (default False)
count : bool, optional
If True, return a count of matched items instead of the items
themselves (default False)
index : str, optional
The name of the index to query
limit : int, optional
Maximum number of items to return
desc : bool, optional
If True, return items in descending order (default False)
return_capacity : {NONE, INDEXES, TOTAL}, optional
INDEXES will return the consumed capacity for indexes, TOTAL will
return the consumed capacity for the table and the indexes.
(default NONE)
filter : dict, optional
Query arguments. Same format as **kwargs, but these arguments
filter the results on the server before they are returned. They
will NOT use an index, as that is what the **kwargs are for.
filter_or : bool, optional
If True, multiple filter args will be OR'd together. If False, they
will be AND'd together. (default False)
exclusive_start_key : dict, optional
The ExclusiveStartKey to resume a previous query
**kwargs : dict, optional
Query arguments (examples below)
Examples
--------
You may pass in constraints using the Django-style '__' syntax. For
example:
.. code-block:: python
connection.query('mytable', foo__eq=5)
connection.query('mytable', foo__eq=5, bar__lt=22)
connection.query('mytable', foo__eq=5, bar__between=(1, 10))
"""
keywords = {
'TableName': tablename,
'ReturnConsumedCapacity': self._default_capacity(return_capacity),
'ConsistentRead': consistent,
'ScanIndexForward': not desc,
'KeyConditions': encode_query_kwargs(self.dynamizer, kwargs),
}
if attributes is not None:
keywords['AttributesToGet'] = attributes
if index is not None:
keywords['IndexName'] = index
if filter is not None:
if len(filter) > 1:
keywords['ConditionalOperator'] = 'OR' if filter_or else 'AND'
keywords['QueryFilter'] = encode_query_kwargs(self.dynamizer,
filter)
if exclusive_start_key is not None:
keywords['ExclusiveStartKey'] = \
self.dynamizer.maybe_encode_keys(exclusive_start_key)
if not isinstance(limit, Limit):
limit = Limit(limit)
if count:
keywords['Select'] = COUNT
return self._count('query', limit, keywords)
else:
return ResultSet(self, limit, 'query', **keywords) | [
"def",
"query",
"(",
"self",
",",
"tablename",
",",
"attributes",
"=",
"None",
",",
"consistent",
"=",
"False",
",",
"count",
"=",
"False",
",",
"index",
"=",
"None",
",",
"limit",
"=",
"None",
",",
"desc",
"=",
"False",
",",
"return_capacity",
"=",
"None",
",",
"filter",
"=",
"None",
",",
"filter_or",
"=",
"False",
",",
"exclusive_start_key",
"=",
"None",
",",
"*",
"*",
"kwargs",
")",
":",
"keywords",
"=",
"{",
"'TableName'",
":",
"tablename",
",",
"'ReturnConsumedCapacity'",
":",
"self",
".",
"_default_capacity",
"(",
"return_capacity",
")",
",",
"'ConsistentRead'",
":",
"consistent",
",",
"'ScanIndexForward'",
":",
"not",
"desc",
",",
"'KeyConditions'",
":",
"encode_query_kwargs",
"(",
"self",
".",
"dynamizer",
",",
"kwargs",
")",
",",
"}",
"if",
"attributes",
"is",
"not",
"None",
":",
"keywords",
"[",
"'AttributesToGet'",
"]",
"=",
"attributes",
"if",
"index",
"is",
"not",
"None",
":",
"keywords",
"[",
"'IndexName'",
"]",
"=",
"index",
"if",
"filter",
"is",
"not",
"None",
":",
"if",
"len",
"(",
"filter",
")",
">",
"1",
":",
"keywords",
"[",
"'ConditionalOperator'",
"]",
"=",
"'OR'",
"if",
"filter_or",
"else",
"'AND'",
"keywords",
"[",
"'QueryFilter'",
"]",
"=",
"encode_query_kwargs",
"(",
"self",
".",
"dynamizer",
",",
"filter",
")",
"if",
"exclusive_start_key",
"is",
"not",
"None",
":",
"keywords",
"[",
"'ExclusiveStartKey'",
"]",
"=",
"self",
".",
"dynamizer",
".",
"maybe_encode_keys",
"(",
"exclusive_start_key",
")",
"if",
"not",
"isinstance",
"(",
"limit",
",",
"Limit",
")",
":",
"limit",
"=",
"Limit",
"(",
"limit",
")",
"if",
"count",
":",
"keywords",
"[",
"'Select'",
"]",
"=",
"COUNT",
"return",
"self",
".",
"_count",
"(",
"'query'",
",",
"limit",
",",
"keywords",
")",
"else",
":",
"return",
"ResultSet",
"(",
"self",
",",
"limit",
",",
"'query'",
",",
"*",
"*",
"keywords",
")"
] | Perform an index query on a table
This uses the older version of the DynamoDB API.
See also: :meth:`~.query2`.
Parameters
----------
tablename : str
Name of the table to query
attributes : list
If present, only fetch these attributes from the item
consistent : bool, optional
Perform a strongly consistent read of the data (default False)
count : bool, optional
If True, return a count of matched items instead of the items
themselves (default False)
index : str, optional
The name of the index to query
limit : int, optional
Maximum number of items to return
desc : bool, optional
If True, return items in descending order (default False)
return_capacity : {NONE, INDEXES, TOTAL}, optional
INDEXES will return the consumed capacity for indexes, TOTAL will
return the consumed capacity for the table and the indexes.
(default NONE)
filter : dict, optional
Query arguments. Same format as **kwargs, but these arguments
filter the results on the server before they are returned. They
will NOT use an index, as that is what the **kwargs are for.
filter_or : bool, optional
If True, multiple filter args will be OR'd together. If False, they
will be AND'd together. (default False)
exclusive_start_key : dict, optional
The ExclusiveStartKey to resume a previous query
**kwargs : dict, optional
Query arguments (examples below)
Examples
--------
You may pass in constraints using the Django-style '__' syntax. For
example:
.. code-block:: python
connection.query('mytable', foo__eq=5)
connection.query('mytable', foo__eq=5, bar__lt=22)
connection.query('mytable', foo__eq=5, bar__between=(1, 10)) | [
"Perform",
"an",
"index",
"query",
"on",
"a",
"table"
] | f897c40ece28586272dbcab8f0d99a14a1831dda | https://github.com/stevearc/dynamo3/blob/f897c40ece28586272dbcab8f0d99a14a1831dda/dynamo3/connection.py#L1099-L1178 | train |
etal/biofrills | biofrills/logoutils.py | read_logodata | def read_logodata(handle):
"""Get weblogo data for a sequence alignment.
Returns a list of tuples: (posn, letter_counts, entropy, weight)
"""
seqs = weblogolib.read_seq_data(handle,
alphabet=unambiguous_protein_alphabet)
ldata = weblogolib.LogoData.from_seqs(seqs)
letters = ldata.alphabet.letters()
counts = ldata.counts.array
logodata = []
for i, coldata, entropy, weight in zip(range(len(counts)), counts,
ldata.entropy, ldata.weight):
cnts = dict((let, int(cnt))
for let, cnt in zip(letters, coldata))
logodata.append((i + 1, cnts, entropy, weight))
return logodata | python | def read_logodata(handle):
"""Get weblogo data for a sequence alignment.
Returns a list of tuples: (posn, letter_counts, entropy, weight)
"""
seqs = weblogolib.read_seq_data(handle,
alphabet=unambiguous_protein_alphabet)
ldata = weblogolib.LogoData.from_seqs(seqs)
letters = ldata.alphabet.letters()
counts = ldata.counts.array
logodata = []
for i, coldata, entropy, weight in zip(range(len(counts)), counts,
ldata.entropy, ldata.weight):
cnts = dict((let, int(cnt))
for let, cnt in zip(letters, coldata))
logodata.append((i + 1, cnts, entropy, weight))
return logodata | [
"def",
"read_logodata",
"(",
"handle",
")",
":",
"seqs",
"=",
"weblogolib",
".",
"read_seq_data",
"(",
"handle",
",",
"alphabet",
"=",
"unambiguous_protein_alphabet",
")",
"ldata",
"=",
"weblogolib",
".",
"LogoData",
".",
"from_seqs",
"(",
"seqs",
")",
"letters",
"=",
"ldata",
".",
"alphabet",
".",
"letters",
"(",
")",
"counts",
"=",
"ldata",
".",
"counts",
".",
"array",
"logodata",
"=",
"[",
"]",
"for",
"i",
",",
"coldata",
",",
"entropy",
",",
"weight",
"in",
"zip",
"(",
"range",
"(",
"len",
"(",
"counts",
")",
")",
",",
"counts",
",",
"ldata",
".",
"entropy",
",",
"ldata",
".",
"weight",
")",
":",
"cnts",
"=",
"dict",
"(",
"(",
"let",
",",
"int",
"(",
"cnt",
")",
")",
"for",
"let",
",",
"cnt",
"in",
"zip",
"(",
"letters",
",",
"coldata",
")",
")",
"logodata",
".",
"append",
"(",
"(",
"i",
"+",
"1",
",",
"cnts",
",",
"entropy",
",",
"weight",
")",
")",
"return",
"logodata"
] | Get weblogo data for a sequence alignment.
Returns a list of tuples: (posn, letter_counts, entropy, weight) | [
"Get",
"weblogo",
"data",
"for",
"a",
"sequence",
"alignment",
"."
] | 36684bb6c7632f96215e8b2b4ebc86640f331bcd | https://github.com/etal/biofrills/blob/36684bb6c7632f96215e8b2b4ebc86640f331bcd/biofrills/logoutils.py#L9-L25 | train |
etal/biofrills | biofrills/logoutils.py | aln2logodata | def aln2logodata(aln):
"""Get weblogo data for an alignment object.
Returns a list of tuples: (posn, letter_counts, entropy, weight)
"""
handle = StringIO(aln.format('fasta'))
logodata = read_logodata(handle)
handle.close()
return logodata | python | def aln2logodata(aln):
"""Get weblogo data for an alignment object.
Returns a list of tuples: (posn, letter_counts, entropy, weight)
"""
handle = StringIO(aln.format('fasta'))
logodata = read_logodata(handle)
handle.close()
return logodata | [
"def",
"aln2logodata",
"(",
"aln",
")",
":",
"handle",
"=",
"StringIO",
"(",
"aln",
".",
"format",
"(",
"'fasta'",
")",
")",
"logodata",
"=",
"read_logodata",
"(",
"handle",
")",
"handle",
".",
"close",
"(",
")",
"return",
"logodata"
] | Get weblogo data for an alignment object.
Returns a list of tuples: (posn, letter_counts, entropy, weight) | [
"Get",
"weblogo",
"data",
"for",
"an",
"alignment",
"object",
"."
] | 36684bb6c7632f96215e8b2b4ebc86640f331bcd | https://github.com/etal/biofrills/blob/36684bb6c7632f96215e8b2b4ebc86640f331bcd/biofrills/logoutils.py#L28-L36 | train |
etal/biofrills | biofrills/logoutils.py | letter_scales | def letter_scales(counts):
"""Convert letter counts to frequencies, sorted increasing."""
try:
scale = 1.0 / sum(counts.values())
except ZeroDivisionError:
# This logo is all gaps, nothing can be done
return []
freqs = [(aa, cnt*scale) for aa, cnt in counts.iteritems() if cnt]
freqs.sort(key=lambda pair: pair[1])
return freqs | python | def letter_scales(counts):
"""Convert letter counts to frequencies, sorted increasing."""
try:
scale = 1.0 / sum(counts.values())
except ZeroDivisionError:
# This logo is all gaps, nothing can be done
return []
freqs = [(aa, cnt*scale) for aa, cnt in counts.iteritems() if cnt]
freqs.sort(key=lambda pair: pair[1])
return freqs | [
"def",
"letter_scales",
"(",
"counts",
")",
":",
"try",
":",
"scale",
"=",
"1.0",
"/",
"sum",
"(",
"counts",
".",
"values",
"(",
")",
")",
"except",
"ZeroDivisionError",
":",
"# This logo is all gaps, nothing can be done",
"return",
"[",
"]",
"freqs",
"=",
"[",
"(",
"aa",
",",
"cnt",
"*",
"scale",
")",
"for",
"aa",
",",
"cnt",
"in",
"counts",
".",
"iteritems",
"(",
")",
"if",
"cnt",
"]",
"freqs",
".",
"sort",
"(",
"key",
"=",
"lambda",
"pair",
":",
"pair",
"[",
"1",
"]",
")",
"return",
"freqs"
] | Convert letter counts to frequencies, sorted increasing. | [
"Convert",
"letter",
"counts",
"to",
"frequencies",
"sorted",
"increasing",
"."
] | 36684bb6c7632f96215e8b2b4ebc86640f331bcd | https://github.com/etal/biofrills/blob/36684bb6c7632f96215e8b2b4ebc86640f331bcd/biofrills/logoutils.py#L46-L55 | train |
johnnoone/json-spec | src/jsonspec/operations/__init__.py | replace | def replace(doc, pointer, value):
"""Replace element from sequence, member from mapping.
:param doc: the document base
:param pointer: the path to search in
:param value: the new value
:return: the new object
.. note::
This operation is functionally identical to a "remove" operation for
a value, followed immediately by an "add" operation at the same
location with the replacement value.
"""
return Target(doc).replace(pointer, value).document | python | def replace(doc, pointer, value):
"""Replace element from sequence, member from mapping.
:param doc: the document base
:param pointer: the path to search in
:param value: the new value
:return: the new object
.. note::
This operation is functionally identical to a "remove" operation for
a value, followed immediately by an "add" operation at the same
location with the replacement value.
"""
return Target(doc).replace(pointer, value).document | [
"def",
"replace",
"(",
"doc",
",",
"pointer",
",",
"value",
")",
":",
"return",
"Target",
"(",
"doc",
")",
".",
"replace",
"(",
"pointer",
",",
"value",
")",
".",
"document"
] | Replace element from sequence, member from mapping.
:param doc: the document base
:param pointer: the path to search in
:param value: the new value
:return: the new object
.. note::
This operation is functionally identical to a "remove" operation for
a value, followed immediately by an "add" operation at the same
location with the replacement value. | [
"Replace",
"element",
"from",
"sequence",
"member",
"from",
"mapping",
"."
] | f91981724cea0c366bd42a6670eb07bbe31c0e0c | https://github.com/johnnoone/json-spec/blob/f91981724cea0c366bd42a6670eb07bbe31c0e0c/src/jsonspec/operations/__init__.py#L49-L64 | train |
heronotears/lazyxml | lazyxml/parser.py | Parser.set_options | def set_options(self, **kw):
r"""Set Parser options.
.. seealso::
``kw`` argument have the same meaning as in :func:`lazyxml.loads`
"""
for k, v in kw.iteritems():
if k in self.__options:
self.__options[k] = v | python | def set_options(self, **kw):
r"""Set Parser options.
.. seealso::
``kw`` argument have the same meaning as in :func:`lazyxml.loads`
"""
for k, v in kw.iteritems():
if k in self.__options:
self.__options[k] = v | [
"def",
"set_options",
"(",
"self",
",",
"*",
"*",
"kw",
")",
":",
"for",
"k",
",",
"v",
"in",
"kw",
".",
"iteritems",
"(",
")",
":",
"if",
"k",
"in",
"self",
".",
"__options",
":",
"self",
".",
"__options",
"[",
"k",
"]",
"=",
"v"
] | r"""Set Parser options.
.. seealso::
``kw`` argument have the same meaning as in :func:`lazyxml.loads` | [
"r",
"Set",
"Parser",
"options",
"."
] | e3f1ebd3f34cfa03d022ddec90e17d60c1c81953 | https://github.com/heronotears/lazyxml/blob/e3f1ebd3f34cfa03d022ddec90e17d60c1c81953/lazyxml/parser.py#L39-L47 | train |
heronotears/lazyxml | lazyxml/parser.py | Parser.xml2object | def xml2object(self, content):
r"""Convert xml content to python object.
:param content: xml content
:rtype: dict
.. versionadded:: 1.2
"""
content = self.xml_filter(content)
element = ET.fromstring(content)
tree = self.parse(element) if self.__options['strip_attr'] else self.parse_full(element)
if not self.__options['strip_root']:
node = self.get_node(element)
if not self.__options['strip_attr']:
tree['attrs'] = node['attr']
return {node['tag']: tree}
return tree | python | def xml2object(self, content):
r"""Convert xml content to python object.
:param content: xml content
:rtype: dict
.. versionadded:: 1.2
"""
content = self.xml_filter(content)
element = ET.fromstring(content)
tree = self.parse(element) if self.__options['strip_attr'] else self.parse_full(element)
if not self.__options['strip_root']:
node = self.get_node(element)
if not self.__options['strip_attr']:
tree['attrs'] = node['attr']
return {node['tag']: tree}
return tree | [
"def",
"xml2object",
"(",
"self",
",",
"content",
")",
":",
"content",
"=",
"self",
".",
"xml_filter",
"(",
"content",
")",
"element",
"=",
"ET",
".",
"fromstring",
"(",
"content",
")",
"tree",
"=",
"self",
".",
"parse",
"(",
"element",
")",
"if",
"self",
".",
"__options",
"[",
"'strip_attr'",
"]",
"else",
"self",
".",
"parse_full",
"(",
"element",
")",
"if",
"not",
"self",
".",
"__options",
"[",
"'strip_root'",
"]",
":",
"node",
"=",
"self",
".",
"get_node",
"(",
"element",
")",
"if",
"not",
"self",
".",
"__options",
"[",
"'strip_attr'",
"]",
":",
"tree",
"[",
"'attrs'",
"]",
"=",
"node",
"[",
"'attr'",
"]",
"return",
"{",
"node",
"[",
"'tag'",
"]",
":",
"tree",
"}",
"return",
"tree"
] | r"""Convert xml content to python object.
:param content: xml content
:rtype: dict
.. versionadded:: 1.2 | [
"r",
"Convert",
"xml",
"content",
"to",
"python",
"object",
"."
] | e3f1ebd3f34cfa03d022ddec90e17d60c1c81953 | https://github.com/heronotears/lazyxml/blob/e3f1ebd3f34cfa03d022ddec90e17d60c1c81953/lazyxml/parser.py#L64-L80 | train |
heronotears/lazyxml | lazyxml/parser.py | Parser.xml_filter | def xml_filter(self, content):
r"""Filter and preprocess xml content
:param content: xml content
:rtype: str
"""
content = utils.strip_whitespace(content, True) if self.__options['strip'] else content.strip()
if not self.__options['encoding']:
encoding = self.guess_xml_encoding(content) or self.__encoding
self.set_options(encoding=encoding)
if self.__options['encoding'].lower() != self.__encoding:
# 编码转换去除xml头
content = self.strip_xml_header(content.decode(self.__options['encoding'], errors=self.__options['errors']))
if self.__options['unescape']:
content = utils.html_entity_decode(content)
return content | python | def xml_filter(self, content):
r"""Filter and preprocess xml content
:param content: xml content
:rtype: str
"""
content = utils.strip_whitespace(content, True) if self.__options['strip'] else content.strip()
if not self.__options['encoding']:
encoding = self.guess_xml_encoding(content) or self.__encoding
self.set_options(encoding=encoding)
if self.__options['encoding'].lower() != self.__encoding:
# 编码转换去除xml头
content = self.strip_xml_header(content.decode(self.__options['encoding'], errors=self.__options['errors']))
if self.__options['unescape']:
content = utils.html_entity_decode(content)
return content | [
"def",
"xml_filter",
"(",
"self",
",",
"content",
")",
":",
"content",
"=",
"utils",
".",
"strip_whitespace",
"(",
"content",
",",
"True",
")",
"if",
"self",
".",
"__options",
"[",
"'strip'",
"]",
"else",
"content",
".",
"strip",
"(",
")",
"if",
"not",
"self",
".",
"__options",
"[",
"'encoding'",
"]",
":",
"encoding",
"=",
"self",
".",
"guess_xml_encoding",
"(",
"content",
")",
"or",
"self",
".",
"__encoding",
"self",
".",
"set_options",
"(",
"encoding",
"=",
"encoding",
")",
"if",
"self",
".",
"__options",
"[",
"'encoding'",
"]",
".",
"lower",
"(",
")",
"!=",
"self",
".",
"__encoding",
":",
"# 编码转换去除xml头",
"content",
"=",
"self",
".",
"strip_xml_header",
"(",
"content",
".",
"decode",
"(",
"self",
".",
"__options",
"[",
"'encoding'",
"]",
",",
"errors",
"=",
"self",
".",
"__options",
"[",
"'errors'",
"]",
")",
")",
"if",
"self",
".",
"__options",
"[",
"'unescape'",
"]",
":",
"content",
"=",
"utils",
".",
"html_entity_decode",
"(",
"content",
")",
"return",
"content"
] | r"""Filter and preprocess xml content
:param content: xml content
:rtype: str | [
"r",
"Filter",
"and",
"preprocess",
"xml",
"content"
] | e3f1ebd3f34cfa03d022ddec90e17d60c1c81953 | https://github.com/heronotears/lazyxml/blob/e3f1ebd3f34cfa03d022ddec90e17d60c1c81953/lazyxml/parser.py#L82-L100 | train |
heronotears/lazyxml | lazyxml/parser.py | Parser.guess_xml_encoding | def guess_xml_encoding(self, content):
r"""Guess encoding from xml header declaration.
:param content: xml content
:rtype: str or None
"""
matchobj = self.__regex['xml_encoding'].match(content)
return matchobj and matchobj.group(1).lower() | python | def guess_xml_encoding(self, content):
r"""Guess encoding from xml header declaration.
:param content: xml content
:rtype: str or None
"""
matchobj = self.__regex['xml_encoding'].match(content)
return matchobj and matchobj.group(1).lower() | [
"def",
"guess_xml_encoding",
"(",
"self",
",",
"content",
")",
":",
"matchobj",
"=",
"self",
".",
"__regex",
"[",
"'xml_encoding'",
"]",
".",
"match",
"(",
"content",
")",
"return",
"matchobj",
"and",
"matchobj",
".",
"group",
"(",
"1",
")",
".",
"lower",
"(",
")"
] | r"""Guess encoding from xml header declaration.
:param content: xml content
:rtype: str or None | [
"r",
"Guess",
"encoding",
"from",
"xml",
"header",
"declaration",
"."
] | e3f1ebd3f34cfa03d022ddec90e17d60c1c81953 | https://github.com/heronotears/lazyxml/blob/e3f1ebd3f34cfa03d022ddec90e17d60c1c81953/lazyxml/parser.py#L102-L109 | train |
heronotears/lazyxml | lazyxml/parser.py | Parser.parse | def parse(self, element):
r"""Parse xml element.
:param element: an :class:`~xml.etree.ElementTree.Element` instance
:rtype: dict
"""
values = {}
for child in element:
node = self.get_node(child)
subs = self.parse(child)
value = subs or node['value']
if node['tag'] not in values:
values[node['tag']] = value
else:
if not isinstance(values[node['tag']], list):
values[node['tag']] = [values.pop(node['tag'])]
values[node['tag']].append(value)
return values | python | def parse(self, element):
r"""Parse xml element.
:param element: an :class:`~xml.etree.ElementTree.Element` instance
:rtype: dict
"""
values = {}
for child in element:
node = self.get_node(child)
subs = self.parse(child)
value = subs or node['value']
if node['tag'] not in values:
values[node['tag']] = value
else:
if not isinstance(values[node['tag']], list):
values[node['tag']] = [values.pop(node['tag'])]
values[node['tag']].append(value)
return values | [
"def",
"parse",
"(",
"self",
",",
"element",
")",
":",
"values",
"=",
"{",
"}",
"for",
"child",
"in",
"element",
":",
"node",
"=",
"self",
".",
"get_node",
"(",
"child",
")",
"subs",
"=",
"self",
".",
"parse",
"(",
"child",
")",
"value",
"=",
"subs",
"or",
"node",
"[",
"'value'",
"]",
"if",
"node",
"[",
"'tag'",
"]",
"not",
"in",
"values",
":",
"values",
"[",
"node",
"[",
"'tag'",
"]",
"]",
"=",
"value",
"else",
":",
"if",
"not",
"isinstance",
"(",
"values",
"[",
"node",
"[",
"'tag'",
"]",
"]",
",",
"list",
")",
":",
"values",
"[",
"node",
"[",
"'tag'",
"]",
"]",
"=",
"[",
"values",
".",
"pop",
"(",
"node",
"[",
"'tag'",
"]",
")",
"]",
"values",
"[",
"node",
"[",
"'tag'",
"]",
"]",
".",
"append",
"(",
"value",
")",
"return",
"values"
] | r"""Parse xml element.
:param element: an :class:`~xml.etree.ElementTree.Element` instance
:rtype: dict | [
"r",
"Parse",
"xml",
"element",
"."
] | e3f1ebd3f34cfa03d022ddec90e17d60c1c81953 | https://github.com/heronotears/lazyxml/blob/e3f1ebd3f34cfa03d022ddec90e17d60c1c81953/lazyxml/parser.py#L119-L136 | train |
heronotears/lazyxml | lazyxml/parser.py | Parser.parse_full | def parse_full(self, element):
r"""Parse xml element include the node attributes.
:param element: an :class:`~xml.etree.ElementTree.Element` instance
:rtype: dict
.. versionadded:: 1.2.1
"""
values = collections.defaultdict(dict)
for child in element:
node = self.get_node(child)
subs = self.parse_full(child)
value = subs or {'values': node['value']}
value['attrs'] = node['attr']
if node['tag'] not in values['values']:
values['values'][node['tag']] = value
else:
if not isinstance(values['values'][node['tag']], list):
values['values'][node['tag']] = [values['values'].pop(node['tag'])]
values['values'][node['tag']].append(value)
return values | python | def parse_full(self, element):
r"""Parse xml element include the node attributes.
:param element: an :class:`~xml.etree.ElementTree.Element` instance
:rtype: dict
.. versionadded:: 1.2.1
"""
values = collections.defaultdict(dict)
for child in element:
node = self.get_node(child)
subs = self.parse_full(child)
value = subs or {'values': node['value']}
value['attrs'] = node['attr']
if node['tag'] not in values['values']:
values['values'][node['tag']] = value
else:
if not isinstance(values['values'][node['tag']], list):
values['values'][node['tag']] = [values['values'].pop(node['tag'])]
values['values'][node['tag']].append(value)
return values | [
"def",
"parse_full",
"(",
"self",
",",
"element",
")",
":",
"values",
"=",
"collections",
".",
"defaultdict",
"(",
"dict",
")",
"for",
"child",
"in",
"element",
":",
"node",
"=",
"self",
".",
"get_node",
"(",
"child",
")",
"subs",
"=",
"self",
".",
"parse_full",
"(",
"child",
")",
"value",
"=",
"subs",
"or",
"{",
"'values'",
":",
"node",
"[",
"'value'",
"]",
"}",
"value",
"[",
"'attrs'",
"]",
"=",
"node",
"[",
"'attr'",
"]",
"if",
"node",
"[",
"'tag'",
"]",
"not",
"in",
"values",
"[",
"'values'",
"]",
":",
"values",
"[",
"'values'",
"]",
"[",
"node",
"[",
"'tag'",
"]",
"]",
"=",
"value",
"else",
":",
"if",
"not",
"isinstance",
"(",
"values",
"[",
"'values'",
"]",
"[",
"node",
"[",
"'tag'",
"]",
"]",
",",
"list",
")",
":",
"values",
"[",
"'values'",
"]",
"[",
"node",
"[",
"'tag'",
"]",
"]",
"=",
"[",
"values",
"[",
"'values'",
"]",
".",
"pop",
"(",
"node",
"[",
"'tag'",
"]",
")",
"]",
"values",
"[",
"'values'",
"]",
"[",
"node",
"[",
"'tag'",
"]",
"]",
".",
"append",
"(",
"value",
")",
"return",
"values"
] | r"""Parse xml element include the node attributes.
:param element: an :class:`~xml.etree.ElementTree.Element` instance
:rtype: dict
.. versionadded:: 1.2.1 | [
"r",
"Parse",
"xml",
"element",
"include",
"the",
"node",
"attributes",
"."
] | e3f1ebd3f34cfa03d022ddec90e17d60c1c81953 | https://github.com/heronotears/lazyxml/blob/e3f1ebd3f34cfa03d022ddec90e17d60c1c81953/lazyxml/parser.py#L138-L158 | train |
heronotears/lazyxml | lazyxml/parser.py | Parser.get_node | def get_node(self, element):
r"""Get node info.
Parse element and get the element tag info. Include tag name, value, attribute, namespace.
:param element: an :class:`~xml.etree.ElementTree.Element` instance
:rtype: dict
"""
ns, tag = self.split_namespace(element.tag)
return {'tag': tag, 'value': (element.text or '').strip(), 'attr': element.attrib, 'namespace': ns} | python | def get_node(self, element):
r"""Get node info.
Parse element and get the element tag info. Include tag name, value, attribute, namespace.
:param element: an :class:`~xml.etree.ElementTree.Element` instance
:rtype: dict
"""
ns, tag = self.split_namespace(element.tag)
return {'tag': tag, 'value': (element.text or '').strip(), 'attr': element.attrib, 'namespace': ns} | [
"def",
"get_node",
"(",
"self",
",",
"element",
")",
":",
"ns",
",",
"tag",
"=",
"self",
".",
"split_namespace",
"(",
"element",
".",
"tag",
")",
"return",
"{",
"'tag'",
":",
"tag",
",",
"'value'",
":",
"(",
"element",
".",
"text",
"or",
"''",
")",
".",
"strip",
"(",
")",
",",
"'attr'",
":",
"element",
".",
"attrib",
",",
"'namespace'",
":",
"ns",
"}"
] | r"""Get node info.
Parse element and get the element tag info. Include tag name, value, attribute, namespace.
:param element: an :class:`~xml.etree.ElementTree.Element` instance
:rtype: dict | [
"r",
"Get",
"node",
"info",
"."
] | e3f1ebd3f34cfa03d022ddec90e17d60c1c81953 | https://github.com/heronotears/lazyxml/blob/e3f1ebd3f34cfa03d022ddec90e17d60c1c81953/lazyxml/parser.py#L160-L169 | train |
heronotears/lazyxml | lazyxml/parser.py | Parser.split_namespace | def split_namespace(self, tag):
r"""Split tag namespace.
:param tag: tag name
:return: a pair of (namespace, tag)
:rtype: tuple
"""
matchobj = self.__regex['xml_ns'].search(tag)
return matchobj.groups() if matchobj else ('', tag) | python | def split_namespace(self, tag):
r"""Split tag namespace.
:param tag: tag name
:return: a pair of (namespace, tag)
:rtype: tuple
"""
matchobj = self.__regex['xml_ns'].search(tag)
return matchobj.groups() if matchobj else ('', tag) | [
"def",
"split_namespace",
"(",
"self",
",",
"tag",
")",
":",
"matchobj",
"=",
"self",
".",
"__regex",
"[",
"'xml_ns'",
"]",
".",
"search",
"(",
"tag",
")",
"return",
"matchobj",
".",
"groups",
"(",
")",
"if",
"matchobj",
"else",
"(",
"''",
",",
"tag",
")"
] | r"""Split tag namespace.
:param tag: tag name
:return: a pair of (namespace, tag)
:rtype: tuple | [
"r",
"Split",
"tag",
"namespace",
"."
] | e3f1ebd3f34cfa03d022ddec90e17d60c1c81953 | https://github.com/heronotears/lazyxml/blob/e3f1ebd3f34cfa03d022ddec90e17d60c1c81953/lazyxml/parser.py#L171-L179 | train |
LISE-B26/pylabcontrol | build/lib/pylabcontrol/src/core/loading.py | instantiate_probes | def instantiate_probes(probes, instruments):
"""
Creates instances of the probes inputed;
Args:
probes: probes is a nested dictionary with
(key, sub_dict ) = (name of the probe, {'probe_name': value_probe, 'instrument_name': value_inst}),
where value_probe is a valid name of a probe in intrument with name value_inst
for example script = {'detector signal': {'probe_name': "AI0", 'instrument_name': "my_DAQ"}}
Returns:
a dictionary with (key,sub_dict) = (name of probe, reference to probe) for all of the probes
passed to the function that were successfully imported and initialized. Otherwise, probes are omitted
in the outputted list.
"""
probe_instances = {}
for name, sub_dict in probes.items():
assert isinstance(sub_dict, dict)
assert "probe_name" in sub_dict
assert "instrument_name" in sub_dict
probe_name = sub_dict['probe_name']
instrument_name = sub_dict['instrument_name']
if "probe_info" in sub_dict:
probe_info = sub_dict['probe_info']
else:
probe_info = ''
assert instrument_name in instruments, "{:s} not in {:s}".format(instrument_name, list(instruments.keys()))
assert probe_name in instruments[instrument_name]._PROBES
probe_instances.update({name: Probe(instruments[instrument_name], probe_name, name, probe_info)})
return probe_instances | python | def instantiate_probes(probes, instruments):
"""
Creates instances of the probes inputed;
Args:
probes: probes is a nested dictionary with
(key, sub_dict ) = (name of the probe, {'probe_name': value_probe, 'instrument_name': value_inst}),
where value_probe is a valid name of a probe in intrument with name value_inst
for example script = {'detector signal': {'probe_name': "AI0", 'instrument_name': "my_DAQ"}}
Returns:
a dictionary with (key,sub_dict) = (name of probe, reference to probe) for all of the probes
passed to the function that were successfully imported and initialized. Otherwise, probes are omitted
in the outputted list.
"""
probe_instances = {}
for name, sub_dict in probes.items():
assert isinstance(sub_dict, dict)
assert "probe_name" in sub_dict
assert "instrument_name" in sub_dict
probe_name = sub_dict['probe_name']
instrument_name = sub_dict['instrument_name']
if "probe_info" in sub_dict:
probe_info = sub_dict['probe_info']
else:
probe_info = ''
assert instrument_name in instruments, "{:s} not in {:s}".format(instrument_name, list(instruments.keys()))
assert probe_name in instruments[instrument_name]._PROBES
probe_instances.update({name: Probe(instruments[instrument_name], probe_name, name, probe_info)})
return probe_instances | [
"def",
"instantiate_probes",
"(",
"probes",
",",
"instruments",
")",
":",
"probe_instances",
"=",
"{",
"}",
"for",
"name",
",",
"sub_dict",
"in",
"probes",
".",
"items",
"(",
")",
":",
"assert",
"isinstance",
"(",
"sub_dict",
",",
"dict",
")",
"assert",
"\"probe_name\"",
"in",
"sub_dict",
"assert",
"\"instrument_name\"",
"in",
"sub_dict",
"probe_name",
"=",
"sub_dict",
"[",
"'probe_name'",
"]",
"instrument_name",
"=",
"sub_dict",
"[",
"'instrument_name'",
"]",
"if",
"\"probe_info\"",
"in",
"sub_dict",
":",
"probe_info",
"=",
"sub_dict",
"[",
"'probe_info'",
"]",
"else",
":",
"probe_info",
"=",
"''",
"assert",
"instrument_name",
"in",
"instruments",
",",
"\"{:s} not in {:s}\"",
".",
"format",
"(",
"instrument_name",
",",
"list",
"(",
"instruments",
".",
"keys",
"(",
")",
")",
")",
"assert",
"probe_name",
"in",
"instruments",
"[",
"instrument_name",
"]",
".",
"_PROBES",
"probe_instances",
".",
"update",
"(",
"{",
"name",
":",
"Probe",
"(",
"instruments",
"[",
"instrument_name",
"]",
",",
"probe_name",
",",
"name",
",",
"probe_info",
")",
"}",
")",
"return",
"probe_instances"
] | Creates instances of the probes inputed;
Args:
probes: probes is a nested dictionary with
(key, sub_dict ) = (name of the probe, {'probe_name': value_probe, 'instrument_name': value_inst}),
where value_probe is a valid name of a probe in intrument with name value_inst
for example script = {'detector signal': {'probe_name': "AI0", 'instrument_name': "my_DAQ"}}
Returns:
a dictionary with (key,sub_dict) = (name of probe, reference to probe) for all of the probes
passed to the function that were successfully imported and initialized. Otherwise, probes are omitted
in the outputted list. | [
"Creates",
"instances",
"of",
"the",
"probes",
"inputed",
";"
] | 67482e5157fcd1c40705e5c2cacfb93564703ed0 | https://github.com/LISE-B26/pylabcontrol/blob/67482e5157fcd1c40705e5c2cacfb93564703ed0/build/lib/pylabcontrol/src/core/loading.py#L22-L58 | train |
emre/lightsteem | lightsteem/broadcast/key_objects.py | PublicKey.compressed | def compressed(self):
""" Derive compressed public key """
order = ecdsa.SECP256k1.generator.order()
p = ecdsa.VerifyingKey.from_string(
compat_bytes(self), curve=ecdsa.SECP256k1).pubkey.point
x_str = ecdsa.util.number_to_string(p.x(), order)
# y_str = ecdsa.util.number_to_string(p.y(), order)
compressed = hexlify(
compat_bytes(chr(2 + (p.y() & 1)), 'ascii') + x_str).decode(
'ascii')
return (compressed) | python | def compressed(self):
""" Derive compressed public key """
order = ecdsa.SECP256k1.generator.order()
p = ecdsa.VerifyingKey.from_string(
compat_bytes(self), curve=ecdsa.SECP256k1).pubkey.point
x_str = ecdsa.util.number_to_string(p.x(), order)
# y_str = ecdsa.util.number_to_string(p.y(), order)
compressed = hexlify(
compat_bytes(chr(2 + (p.y() & 1)), 'ascii') + x_str).decode(
'ascii')
return (compressed) | [
"def",
"compressed",
"(",
"self",
")",
":",
"order",
"=",
"ecdsa",
".",
"SECP256k1",
".",
"generator",
".",
"order",
"(",
")",
"p",
"=",
"ecdsa",
".",
"VerifyingKey",
".",
"from_string",
"(",
"compat_bytes",
"(",
"self",
")",
",",
"curve",
"=",
"ecdsa",
".",
"SECP256k1",
")",
".",
"pubkey",
".",
"point",
"x_str",
"=",
"ecdsa",
".",
"util",
".",
"number_to_string",
"(",
"p",
".",
"x",
"(",
")",
",",
"order",
")",
"# y_str = ecdsa.util.number_to_string(p.y(), order)",
"compressed",
"=",
"hexlify",
"(",
"compat_bytes",
"(",
"chr",
"(",
"2",
"+",
"(",
"p",
".",
"y",
"(",
")",
"&",
"1",
")",
")",
",",
"'ascii'",
")",
"+",
"x_str",
")",
".",
"decode",
"(",
"'ascii'",
")",
"return",
"(",
"compressed",
")"
] | Derive compressed public key | [
"Derive",
"compressed",
"public",
"key"
] | 0fc29a517c20d881cbdbb15b43add4bcf3af242e | https://github.com/emre/lightsteem/blob/0fc29a517c20d881cbdbb15b43add4bcf3af242e/lightsteem/broadcast/key_objects.py#L106-L116 | train |
emre/lightsteem | lightsteem/broadcast/key_objects.py | PublicKey.unCompressed | def unCompressed(self):
""" Derive uncompressed key """
public_key = repr(self._pk)
prefix = public_key[0:2]
if prefix == "04":
return public_key
assert prefix == "02" or prefix == "03"
x = int(public_key[2:], 16)
y = self._derive_y_from_x(x, (prefix == "02"))
key = '04' + '%064x' % x + '%064x' % y
return key | python | def unCompressed(self):
""" Derive uncompressed key """
public_key = repr(self._pk)
prefix = public_key[0:2]
if prefix == "04":
return public_key
assert prefix == "02" or prefix == "03"
x = int(public_key[2:], 16)
y = self._derive_y_from_x(x, (prefix == "02"))
key = '04' + '%064x' % x + '%064x' % y
return key | [
"def",
"unCompressed",
"(",
"self",
")",
":",
"public_key",
"=",
"repr",
"(",
"self",
".",
"_pk",
")",
"prefix",
"=",
"public_key",
"[",
"0",
":",
"2",
"]",
"if",
"prefix",
"==",
"\"04\"",
":",
"return",
"public_key",
"assert",
"prefix",
"==",
"\"02\"",
"or",
"prefix",
"==",
"\"03\"",
"x",
"=",
"int",
"(",
"public_key",
"[",
"2",
":",
"]",
",",
"16",
")",
"y",
"=",
"self",
".",
"_derive_y_from_x",
"(",
"x",
",",
"(",
"prefix",
"==",
"\"02\"",
")",
")",
"key",
"=",
"'04'",
"+",
"'%064x'",
"%",
"x",
"+",
"'%064x'",
"%",
"y",
"return",
"key"
] | Derive uncompressed key | [
"Derive",
"uncompressed",
"key"
] | 0fc29a517c20d881cbdbb15b43add4bcf3af242e | https://github.com/emre/lightsteem/blob/0fc29a517c20d881cbdbb15b43add4bcf3af242e/lightsteem/broadcast/key_objects.py#L118-L128 | train |
emre/lightsteem | lightsteem/broadcast/key_objects.py | PrivateKey.compressedpubkey | def compressedpubkey(self):
""" Derive uncompressed public key """
secret = unhexlify(repr(self._wif))
order = ecdsa.SigningKey.from_string(
secret, curve=ecdsa.SECP256k1).curve.generator.order()
p = ecdsa.SigningKey.from_string(
secret, curve=ecdsa.SECP256k1).verifying_key.pubkey.point
x_str = ecdsa.util.number_to_string(p.x(), order)
y_str = ecdsa.util.number_to_string(p.y(), order)
compressed = hexlify(
chr(2 + (p.y() & 1)).encode('ascii') + x_str
).decode('ascii')
uncompressed = hexlify(
chr(4).encode('ascii') + x_str + y_str).decode(
'ascii')
return [compressed, uncompressed] | python | def compressedpubkey(self):
""" Derive uncompressed public key """
secret = unhexlify(repr(self._wif))
order = ecdsa.SigningKey.from_string(
secret, curve=ecdsa.SECP256k1).curve.generator.order()
p = ecdsa.SigningKey.from_string(
secret, curve=ecdsa.SECP256k1).verifying_key.pubkey.point
x_str = ecdsa.util.number_to_string(p.x(), order)
y_str = ecdsa.util.number_to_string(p.y(), order)
compressed = hexlify(
chr(2 + (p.y() & 1)).encode('ascii') + x_str
).decode('ascii')
uncompressed = hexlify(
chr(4).encode('ascii') + x_str + y_str).decode(
'ascii')
return [compressed, uncompressed] | [
"def",
"compressedpubkey",
"(",
"self",
")",
":",
"secret",
"=",
"unhexlify",
"(",
"repr",
"(",
"self",
".",
"_wif",
")",
")",
"order",
"=",
"ecdsa",
".",
"SigningKey",
".",
"from_string",
"(",
"secret",
",",
"curve",
"=",
"ecdsa",
".",
"SECP256k1",
")",
".",
"curve",
".",
"generator",
".",
"order",
"(",
")",
"p",
"=",
"ecdsa",
".",
"SigningKey",
".",
"from_string",
"(",
"secret",
",",
"curve",
"=",
"ecdsa",
".",
"SECP256k1",
")",
".",
"verifying_key",
".",
"pubkey",
".",
"point",
"x_str",
"=",
"ecdsa",
".",
"util",
".",
"number_to_string",
"(",
"p",
".",
"x",
"(",
")",
",",
"order",
")",
"y_str",
"=",
"ecdsa",
".",
"util",
".",
"number_to_string",
"(",
"p",
".",
"y",
"(",
")",
",",
"order",
")",
"compressed",
"=",
"hexlify",
"(",
"chr",
"(",
"2",
"+",
"(",
"p",
".",
"y",
"(",
")",
"&",
"1",
")",
")",
".",
"encode",
"(",
"'ascii'",
")",
"+",
"x_str",
")",
".",
"decode",
"(",
"'ascii'",
")",
"uncompressed",
"=",
"hexlify",
"(",
"chr",
"(",
"4",
")",
".",
"encode",
"(",
"'ascii'",
")",
"+",
"x_str",
"+",
"y_str",
")",
".",
"decode",
"(",
"'ascii'",
")",
"return",
"[",
"compressed",
",",
"uncompressed",
"]"
] | Derive uncompressed public key | [
"Derive",
"uncompressed",
"public",
"key"
] | 0fc29a517c20d881cbdbb15b43add4bcf3af242e | https://github.com/emre/lightsteem/blob/0fc29a517c20d881cbdbb15b43add4bcf3af242e/lightsteem/broadcast/key_objects.py#L174-L189 | train |
emre/lightsteem | lightsteem/broadcast/key_objects.py | PasswordKey.get_private | def get_private(self):
""" Derive private key from the brain key and the current sequence
number
"""
a = compat_bytes(self.account + self.role + self.password, 'utf8')
s = hashlib.sha256(a).digest()
return PrivateKey(hexlify(s).decode('ascii')) | python | def get_private(self):
""" Derive private key from the brain key and the current sequence
number
"""
a = compat_bytes(self.account + self.role + self.password, 'utf8')
s = hashlib.sha256(a).digest()
return PrivateKey(hexlify(s).decode('ascii')) | [
"def",
"get_private",
"(",
"self",
")",
":",
"a",
"=",
"compat_bytes",
"(",
"self",
".",
"account",
"+",
"self",
".",
"role",
"+",
"self",
".",
"password",
",",
"'utf8'",
")",
"s",
"=",
"hashlib",
".",
"sha256",
"(",
"a",
")",
".",
"digest",
"(",
")",
"return",
"PrivateKey",
"(",
"hexlify",
"(",
"s",
")",
".",
"decode",
"(",
"'ascii'",
")",
")"
] | Derive private key from the brain key and the current sequence
number | [
"Derive",
"private",
"key",
"from",
"the",
"brain",
"key",
"and",
"the",
"current",
"sequence",
"number"
] | 0fc29a517c20d881cbdbb15b43add4bcf3af242e | https://github.com/emre/lightsteem/blob/0fc29a517c20d881cbdbb15b43add4bcf3af242e/lightsteem/broadcast/key_objects.py#L224-L230 | train |
lreis2415/PyGeoC | pygeoc/hydro.py | D8Util.downstream_index | def downstream_index(dir_value, i, j, alg='taudem'):
"""find downslope coordinate for D8 direction."""
assert alg.lower() in FlowModelConst.d8_deltas
delta = FlowModelConst.d8_deltas.get(alg.lower())
drow, dcol = delta[int(dir_value)]
return i + drow, j + dcol | python | def downstream_index(dir_value, i, j, alg='taudem'):
"""find downslope coordinate for D8 direction."""
assert alg.lower() in FlowModelConst.d8_deltas
delta = FlowModelConst.d8_deltas.get(alg.lower())
drow, dcol = delta[int(dir_value)]
return i + drow, j + dcol | [
"def",
"downstream_index",
"(",
"dir_value",
",",
"i",
",",
"j",
",",
"alg",
"=",
"'taudem'",
")",
":",
"assert",
"alg",
".",
"lower",
"(",
")",
"in",
"FlowModelConst",
".",
"d8_deltas",
"delta",
"=",
"FlowModelConst",
".",
"d8_deltas",
".",
"get",
"(",
"alg",
".",
"lower",
"(",
")",
")",
"drow",
",",
"dcol",
"=",
"delta",
"[",
"int",
"(",
"dir_value",
")",
"]",
"return",
"i",
"+",
"drow",
",",
"j",
"+",
"dcol"
] | find downslope coordinate for D8 direction. | [
"find",
"downslope",
"coordinate",
"for",
"D8",
"direction",
"."
] | 9a92d1a229bb74298e3c57f27c97079980b5f729 | https://github.com/lreis2415/PyGeoC/blob/9a92d1a229bb74298e3c57f27c97079980b5f729/pygeoc/hydro.py#L130-L135 | train |
heinzK1X/pylibconfig2 | pylibconfig2/parsing.py | convert_group | def convert_group(tokens):
"""Converts parseResult from to ConfGroup type."""
tok = tokens.asList()
dic = dict(tok)
if not (len(dic) == len(tok)):
raise ParseFatalException("Names in group must be unique: %s" % tokens)
return ConfGroup(dic) | python | def convert_group(tokens):
"""Converts parseResult from to ConfGroup type."""
tok = tokens.asList()
dic = dict(tok)
if not (len(dic) == len(tok)):
raise ParseFatalException("Names in group must be unique: %s" % tokens)
return ConfGroup(dic) | [
"def",
"convert_group",
"(",
"tokens",
")",
":",
"tok",
"=",
"tokens",
".",
"asList",
"(",
")",
"dic",
"=",
"dict",
"(",
"tok",
")",
"if",
"not",
"(",
"len",
"(",
"dic",
")",
"==",
"len",
"(",
"tok",
")",
")",
":",
"raise",
"ParseFatalException",
"(",
"\"Names in group must be unique: %s\"",
"%",
"tokens",
")",
"return",
"ConfGroup",
"(",
"dic",
")"
] | Converts parseResult from to ConfGroup type. | [
"Converts",
"parseResult",
"from",
"to",
"ConfGroup",
"type",
"."
] | f3a851ac780da28a42264c24aac51b54fbd63f81 | https://github.com/heinzK1X/pylibconfig2/blob/f3a851ac780da28a42264c24aac51b54fbd63f81/pylibconfig2/parsing.py#L64-L70 | train |
LISE-B26/pylabcontrol | build/lib/pylabcontrol/src/gui/qt_b26_load_dialog.py | LoadDialog.load_elements | def load_elements(self, filename):
"""
loads the elements from file filename
"""
input_data = load_b26_file(filename)
if isinstance(input_data, dict) and self.elements_type in input_data:
return input_data[self.elements_type]
else:
return {} | python | def load_elements(self, filename):
"""
loads the elements from file filename
"""
input_data = load_b26_file(filename)
if isinstance(input_data, dict) and self.elements_type in input_data:
return input_data[self.elements_type]
else:
return {} | [
"def",
"load_elements",
"(",
"self",
",",
"filename",
")",
":",
"input_data",
"=",
"load_b26_file",
"(",
"filename",
")",
"if",
"isinstance",
"(",
"input_data",
",",
"dict",
")",
"and",
"self",
".",
"elements_type",
"in",
"input_data",
":",
"return",
"input_data",
"[",
"self",
".",
"elements_type",
"]",
"else",
":",
"return",
"{",
"}"
] | loads the elements from file filename | [
"loads",
"the",
"elements",
"from",
"file",
"filename"
] | 67482e5157fcd1c40705e5c2cacfb93564703ed0 | https://github.com/LISE-B26/pylabcontrol/blob/67482e5157fcd1c40705e5c2cacfb93564703ed0/build/lib/pylabcontrol/src/gui/qt_b26_load_dialog.py#L178-L186 | train |
LISE-B26/pylabcontrol | build/lib/pylabcontrol/src/gui/qt_b26_load_dialog.py | LoadDialog.add_script_sequence | def add_script_sequence(self):
"""
creates a script sequence based on the script iterator type selected and the selected scripts and sends it to the tree
self.tree_loaded
"""
def empty_tree(tree_model):
# COMMENT_ME
def add_children_to_list(item, somelist):
if item.hasChildren():
for rownum in range(0, item.rowCount()):
somelist.append(str(item.child(rownum, 0).text()))
output_list = []
root = tree_model.invisibleRootItem()
add_children_to_list(root, output_list)
tree_model.clear()
return output_list
name = str(self.txt_script_sequence_name.text())
new_script_list = empty_tree(self.tree_script_sequence_model)
new_script_dict = {}
for script in new_script_list:
if script in self.elements_old:
new_script_dict.update({script: self.elements_old[script]})
elif script in self.elements_from_file:
new_script_dict.update({script: self.elements_from_file[script]})
new_script_parameter_dict = {}
for index, script in enumerate(new_script_list):
new_script_parameter_dict.update({script: index})
# QtGui.QTextEdit.toPlainText()
# get the module of the current dialogue
package = get_python_package(inspect.getmodule(self).__file__)
assert package is not None # check that we actually find a module
# class_name = Script.set_up_dynamic_script(factory_scripts, new_script_parameter_list, self.cmb_looping_variable.currentText() == 'Parameter Sweep')
new_script_dict = {name: {'class': 'ScriptIterator', 'package': package, 'scripts': new_script_dict,
'info': str(self.txt_info.toPlainText()),
'settings': {'script_order': new_script_parameter_dict,
'iterator_type': str(self.cmb_looping_variable.currentText())}}}
self.selected_element_name = name
self.fill_tree(self.tree_loaded, new_script_dict)
self.elements_from_file.update(new_script_dict) | python | def add_script_sequence(self):
"""
creates a script sequence based on the script iterator type selected and the selected scripts and sends it to the tree
self.tree_loaded
"""
def empty_tree(tree_model):
# COMMENT_ME
def add_children_to_list(item, somelist):
if item.hasChildren():
for rownum in range(0, item.rowCount()):
somelist.append(str(item.child(rownum, 0).text()))
output_list = []
root = tree_model.invisibleRootItem()
add_children_to_list(root, output_list)
tree_model.clear()
return output_list
name = str(self.txt_script_sequence_name.text())
new_script_list = empty_tree(self.tree_script_sequence_model)
new_script_dict = {}
for script in new_script_list:
if script in self.elements_old:
new_script_dict.update({script: self.elements_old[script]})
elif script in self.elements_from_file:
new_script_dict.update({script: self.elements_from_file[script]})
new_script_parameter_dict = {}
for index, script in enumerate(new_script_list):
new_script_parameter_dict.update({script: index})
# QtGui.QTextEdit.toPlainText()
# get the module of the current dialogue
package = get_python_package(inspect.getmodule(self).__file__)
assert package is not None # check that we actually find a module
# class_name = Script.set_up_dynamic_script(factory_scripts, new_script_parameter_list, self.cmb_looping_variable.currentText() == 'Parameter Sweep')
new_script_dict = {name: {'class': 'ScriptIterator', 'package': package, 'scripts': new_script_dict,
'info': str(self.txt_info.toPlainText()),
'settings': {'script_order': new_script_parameter_dict,
'iterator_type': str(self.cmb_looping_variable.currentText())}}}
self.selected_element_name = name
self.fill_tree(self.tree_loaded, new_script_dict)
self.elements_from_file.update(new_script_dict) | [
"def",
"add_script_sequence",
"(",
"self",
")",
":",
"def",
"empty_tree",
"(",
"tree_model",
")",
":",
"# COMMENT_ME",
"def",
"add_children_to_list",
"(",
"item",
",",
"somelist",
")",
":",
"if",
"item",
".",
"hasChildren",
"(",
")",
":",
"for",
"rownum",
"in",
"range",
"(",
"0",
",",
"item",
".",
"rowCount",
"(",
")",
")",
":",
"somelist",
".",
"append",
"(",
"str",
"(",
"item",
".",
"child",
"(",
"rownum",
",",
"0",
")",
".",
"text",
"(",
")",
")",
")",
"output_list",
"=",
"[",
"]",
"root",
"=",
"tree_model",
".",
"invisibleRootItem",
"(",
")",
"add_children_to_list",
"(",
"root",
",",
"output_list",
")",
"tree_model",
".",
"clear",
"(",
")",
"return",
"output_list",
"name",
"=",
"str",
"(",
"self",
".",
"txt_script_sequence_name",
".",
"text",
"(",
")",
")",
"new_script_list",
"=",
"empty_tree",
"(",
"self",
".",
"tree_script_sequence_model",
")",
"new_script_dict",
"=",
"{",
"}",
"for",
"script",
"in",
"new_script_list",
":",
"if",
"script",
"in",
"self",
".",
"elements_old",
":",
"new_script_dict",
".",
"update",
"(",
"{",
"script",
":",
"self",
".",
"elements_old",
"[",
"script",
"]",
"}",
")",
"elif",
"script",
"in",
"self",
".",
"elements_from_file",
":",
"new_script_dict",
".",
"update",
"(",
"{",
"script",
":",
"self",
".",
"elements_from_file",
"[",
"script",
"]",
"}",
")",
"new_script_parameter_dict",
"=",
"{",
"}",
"for",
"index",
",",
"script",
"in",
"enumerate",
"(",
"new_script_list",
")",
":",
"new_script_parameter_dict",
".",
"update",
"(",
"{",
"script",
":",
"index",
"}",
")",
"# QtGui.QTextEdit.toPlainText()",
"# get the module of the current dialogue",
"package",
"=",
"get_python_package",
"(",
"inspect",
".",
"getmodule",
"(",
"self",
")",
".",
"__file__",
")",
"assert",
"package",
"is",
"not",
"None",
"# check that we actually find a module",
"# class_name = Script.set_up_dynamic_script(factory_scripts, new_script_parameter_list, self.cmb_looping_variable.currentText() == 'Parameter Sweep')",
"new_script_dict",
"=",
"{",
"name",
":",
"{",
"'class'",
":",
"'ScriptIterator'",
",",
"'package'",
":",
"package",
",",
"'scripts'",
":",
"new_script_dict",
",",
"'info'",
":",
"str",
"(",
"self",
".",
"txt_info",
".",
"toPlainText",
"(",
")",
")",
",",
"'settings'",
":",
"{",
"'script_order'",
":",
"new_script_parameter_dict",
",",
"'iterator_type'",
":",
"str",
"(",
"self",
".",
"cmb_looping_variable",
".",
"currentText",
"(",
")",
")",
"}",
"}",
"}",
"self",
".",
"selected_element_name",
"=",
"name",
"self",
".",
"fill_tree",
"(",
"self",
".",
"tree_loaded",
",",
"new_script_dict",
")",
"self",
".",
"elements_from_file",
".",
"update",
"(",
"new_script_dict",
")"
] | creates a script sequence based on the script iterator type selected and the selected scripts and sends it to the tree
self.tree_loaded | [
"creates",
"a",
"script",
"sequence",
"based",
"on",
"the",
"script",
"iterator",
"type",
"selected",
"and",
"the",
"selected",
"scripts",
"and",
"sends",
"it",
"to",
"the",
"tree",
"self",
".",
"tree_loaded"
] | 67482e5157fcd1c40705e5c2cacfb93564703ed0 | https://github.com/LISE-B26/pylabcontrol/blob/67482e5157fcd1c40705e5c2cacfb93564703ed0/build/lib/pylabcontrol/src/gui/qt_b26_load_dialog.py#L247-L295 | train |
moonso/ped_parser | ped_parser/parser.py | cli | def cli(family_file, family_type, to_json, to_madeline, to_ped, to_dict,
outfile, logfile, loglevel):
"""Cli for testing the ped parser."""
from pprint import pprint as pp
my_parser = FamilyParser(family_file, family_type)
if to_json:
if outfile:
outfile.write(my_parser.to_json())
else:
print(my_parser.to_json())
elif to_madeline:
for line in my_parser.to_madeline():
if outfile:
outfile.write(line + '\n')
else:
print(line)
elif to_ped:
for line in my_parser.to_ped():
if outfile:
outfile.write(line + '\n')
else:
print(line)
elif to_dict:
pp(my_parser.to_dict()) | python | def cli(family_file, family_type, to_json, to_madeline, to_ped, to_dict,
outfile, logfile, loglevel):
"""Cli for testing the ped parser."""
from pprint import pprint as pp
my_parser = FamilyParser(family_file, family_type)
if to_json:
if outfile:
outfile.write(my_parser.to_json())
else:
print(my_parser.to_json())
elif to_madeline:
for line in my_parser.to_madeline():
if outfile:
outfile.write(line + '\n')
else:
print(line)
elif to_ped:
for line in my_parser.to_ped():
if outfile:
outfile.write(line + '\n')
else:
print(line)
elif to_dict:
pp(my_parser.to_dict()) | [
"def",
"cli",
"(",
"family_file",
",",
"family_type",
",",
"to_json",
",",
"to_madeline",
",",
"to_ped",
",",
"to_dict",
",",
"outfile",
",",
"logfile",
",",
"loglevel",
")",
":",
"from",
"pprint",
"import",
"pprint",
"as",
"pp",
"my_parser",
"=",
"FamilyParser",
"(",
"family_file",
",",
"family_type",
")",
"if",
"to_json",
":",
"if",
"outfile",
":",
"outfile",
".",
"write",
"(",
"my_parser",
".",
"to_json",
"(",
")",
")",
"else",
":",
"print",
"(",
"my_parser",
".",
"to_json",
"(",
")",
")",
"elif",
"to_madeline",
":",
"for",
"line",
"in",
"my_parser",
".",
"to_madeline",
"(",
")",
":",
"if",
"outfile",
":",
"outfile",
".",
"write",
"(",
"line",
"+",
"'\\n'",
")",
"else",
":",
"print",
"(",
"line",
")",
"elif",
"to_ped",
":",
"for",
"line",
"in",
"my_parser",
".",
"to_ped",
"(",
")",
":",
"if",
"outfile",
":",
"outfile",
".",
"write",
"(",
"line",
"+",
"'\\n'",
")",
"else",
":",
"print",
"(",
"line",
")",
"elif",
"to_dict",
":",
"pp",
"(",
"my_parser",
".",
"to_dict",
"(",
")",
")"
] | Cli for testing the ped parser. | [
"Cli",
"for",
"testing",
"the",
"ped",
"parser",
"."
] | a7393e47139532782ea3c821aabea33d46f94323 | https://github.com/moonso/ped_parser/blob/a7393e47139532782ea3c821aabea33d46f94323/ped_parser/parser.py#L641-L668 | train |
moonso/ped_parser | ped_parser/parser.py | FamilyParser.check_line_length | def check_line_length(self, splitted_line, expected_length):
"""
Check if the line is correctly formated. Throw a SyntaxError if it is not.
"""
if len(splitted_line) != expected_length:
raise WrongLineFormat(
message='WRONG FORMATED PED LINE!',
ped_line = '\t'.join(splitted_line))
return | python | def check_line_length(self, splitted_line, expected_length):
"""
Check if the line is correctly formated. Throw a SyntaxError if it is not.
"""
if len(splitted_line) != expected_length:
raise WrongLineFormat(
message='WRONG FORMATED PED LINE!',
ped_line = '\t'.join(splitted_line))
return | [
"def",
"check_line_length",
"(",
"self",
",",
"splitted_line",
",",
"expected_length",
")",
":",
"if",
"len",
"(",
"splitted_line",
")",
"!=",
"expected_length",
":",
"raise",
"WrongLineFormat",
"(",
"message",
"=",
"'WRONG FORMATED PED LINE!'",
",",
"ped_line",
"=",
"'\\t'",
".",
"join",
"(",
"splitted_line",
")",
")",
"return"
] | Check if the line is correctly formated. Throw a SyntaxError if it is not. | [
"Check",
"if",
"the",
"line",
"is",
"correctly",
"formated",
".",
"Throw",
"a",
"SyntaxError",
"if",
"it",
"is",
"not",
"."
] | a7393e47139532782ea3c821aabea33d46f94323 | https://github.com/moonso/ped_parser/blob/a7393e47139532782ea3c821aabea33d46f94323/ped_parser/parser.py#L195-L203 | train |
PeerAssets/pypeerassets | pypeerassets/voting.py | deck_vote_tag | def deck_vote_tag(deck: Deck) -> str:
'''deck vote tag address'''
if deck.id is None:
raise Exception("deck.id is required")
deck_vote_tag_privkey = sha256(unhexlify(deck.id) + b"vote_init").hexdigest()
deck_vote_tag_address = Kutil(network=deck.network, privkey=bytearray.fromhex(deck_vote_tag_privkey))
return deck_vote_tag_address.address | python | def deck_vote_tag(deck: Deck) -> str:
'''deck vote tag address'''
if deck.id is None:
raise Exception("deck.id is required")
deck_vote_tag_privkey = sha256(unhexlify(deck.id) + b"vote_init").hexdigest()
deck_vote_tag_address = Kutil(network=deck.network, privkey=bytearray.fromhex(deck_vote_tag_privkey))
return deck_vote_tag_address.address | [
"def",
"deck_vote_tag",
"(",
"deck",
":",
"Deck",
")",
"->",
"str",
":",
"if",
"deck",
".",
"id",
"is",
"None",
":",
"raise",
"Exception",
"(",
"\"deck.id is required\"",
")",
"deck_vote_tag_privkey",
"=",
"sha256",
"(",
"unhexlify",
"(",
"deck",
".",
"id",
")",
"+",
"b\"vote_init\"",
")",
".",
"hexdigest",
"(",
")",
"deck_vote_tag_address",
"=",
"Kutil",
"(",
"network",
"=",
"deck",
".",
"network",
",",
"privkey",
"=",
"bytearray",
".",
"fromhex",
"(",
"deck_vote_tag_privkey",
")",
")",
"return",
"deck_vote_tag_address",
".",
"address"
] | deck vote tag address | [
"deck",
"vote",
"tag",
"address"
] | 8927b4a686887f44fe2cd9de777e2c827c948987 | https://github.com/PeerAssets/pypeerassets/blob/8927b4a686887f44fe2cd9de777e2c827c948987/pypeerassets/voting.py#L14-L22 | train |
PeerAssets/pypeerassets | pypeerassets/voting.py | parse_vote_info | def parse_vote_info(protobuf: bytes) -> dict:
'''decode vote init tx op_return protobuf message and validate it.'''
vote = pavoteproto.Vote()
vote.ParseFromString(protobuf)
assert vote.version > 0, {"error": "Vote info incomplete, version can't be 0."}
assert vote.start_block < vote.end_block, {"error": "vote can't end in the past."}
return {
"version": vote.version,
"description": vote.description,
"count_mode": vote.MODE.Name(vote.count_mode),
"choices": vote.choices,
"start_block": vote.start_block,
"end_block": vote.end_block,
"vote_metainfo": vote.vote_metainfo
} | python | def parse_vote_info(protobuf: bytes) -> dict:
'''decode vote init tx op_return protobuf message and validate it.'''
vote = pavoteproto.Vote()
vote.ParseFromString(protobuf)
assert vote.version > 0, {"error": "Vote info incomplete, version can't be 0."}
assert vote.start_block < vote.end_block, {"error": "vote can't end in the past."}
return {
"version": vote.version,
"description": vote.description,
"count_mode": vote.MODE.Name(vote.count_mode),
"choices": vote.choices,
"start_block": vote.start_block,
"end_block": vote.end_block,
"vote_metainfo": vote.vote_metainfo
} | [
"def",
"parse_vote_info",
"(",
"protobuf",
":",
"bytes",
")",
"->",
"dict",
":",
"vote",
"=",
"pavoteproto",
".",
"Vote",
"(",
")",
"vote",
".",
"ParseFromString",
"(",
"protobuf",
")",
"assert",
"vote",
".",
"version",
">",
"0",
",",
"{",
"\"error\"",
":",
"\"Vote info incomplete, version can't be 0.\"",
"}",
"assert",
"vote",
".",
"start_block",
"<",
"vote",
".",
"end_block",
",",
"{",
"\"error\"",
":",
"\"vote can't end in the past.\"",
"}",
"return",
"{",
"\"version\"",
":",
"vote",
".",
"version",
",",
"\"description\"",
":",
"vote",
".",
"description",
",",
"\"count_mode\"",
":",
"vote",
".",
"MODE",
".",
"Name",
"(",
"vote",
".",
"count_mode",
")",
",",
"\"choices\"",
":",
"vote",
".",
"choices",
",",
"\"start_block\"",
":",
"vote",
".",
"start_block",
",",
"\"end_block\"",
":",
"vote",
".",
"end_block",
",",
"\"vote_metainfo\"",
":",
"vote",
".",
"vote_metainfo",
"}"
] | decode vote init tx op_return protobuf message and validate it. | [
"decode",
"vote",
"init",
"tx",
"op_return",
"protobuf",
"message",
"and",
"validate",
"it",
"."
] | 8927b4a686887f44fe2cd9de777e2c827c948987 | https://github.com/PeerAssets/pypeerassets/blob/8927b4a686887f44fe2cd9de777e2c827c948987/pypeerassets/voting.py#L101-L118 | train |
PeerAssets/pypeerassets | pypeerassets/voting.py | vote_init | def vote_init(vote: Vote, inputs: dict, change_address: str) -> bytes:
'''initialize vote transaction, must be signed by the deck_issuer privkey'''
network_params = net_query(vote.deck.network)
deck_vote_tag_address = deck_vote_tag(vote.deck)
tx_fee = network_params.min_tx_fee # settle for min tx fee for now
for utxo in inputs['utxos']:
utxo['txid'] = unhexlify(utxo['txid'])
utxo['scriptSig'] = unhexlify(utxo['scriptSig'])
outputs = [
{"redeem": 0.01, "outputScript": transactions.monosig_script(deck_vote_tag_address)},
{"redeem": 0, "outputScript": transactions.op_return_script(vote.to_protobuf)},
{"redeem": float(inputs['total']) - float(tx_fee) - float(0.01),
"outputScript": transactions.monosig_script(change_address)
}]
return transactions.make_raw_transaction(inputs['utxos'], outputs) | python | def vote_init(vote: Vote, inputs: dict, change_address: str) -> bytes:
'''initialize vote transaction, must be signed by the deck_issuer privkey'''
network_params = net_query(vote.deck.network)
deck_vote_tag_address = deck_vote_tag(vote.deck)
tx_fee = network_params.min_tx_fee # settle for min tx fee for now
for utxo in inputs['utxos']:
utxo['txid'] = unhexlify(utxo['txid'])
utxo['scriptSig'] = unhexlify(utxo['scriptSig'])
outputs = [
{"redeem": 0.01, "outputScript": transactions.monosig_script(deck_vote_tag_address)},
{"redeem": 0, "outputScript": transactions.op_return_script(vote.to_protobuf)},
{"redeem": float(inputs['total']) - float(tx_fee) - float(0.01),
"outputScript": transactions.monosig_script(change_address)
}]
return transactions.make_raw_transaction(inputs['utxos'], outputs) | [
"def",
"vote_init",
"(",
"vote",
":",
"Vote",
",",
"inputs",
":",
"dict",
",",
"change_address",
":",
"str",
")",
"->",
"bytes",
":",
"network_params",
"=",
"net_query",
"(",
"vote",
".",
"deck",
".",
"network",
")",
"deck_vote_tag_address",
"=",
"deck_vote_tag",
"(",
"vote",
".",
"deck",
")",
"tx_fee",
"=",
"network_params",
".",
"min_tx_fee",
"# settle for min tx fee for now",
"for",
"utxo",
"in",
"inputs",
"[",
"'utxos'",
"]",
":",
"utxo",
"[",
"'txid'",
"]",
"=",
"unhexlify",
"(",
"utxo",
"[",
"'txid'",
"]",
")",
"utxo",
"[",
"'scriptSig'",
"]",
"=",
"unhexlify",
"(",
"utxo",
"[",
"'scriptSig'",
"]",
")",
"outputs",
"=",
"[",
"{",
"\"redeem\"",
":",
"0.01",
",",
"\"outputScript\"",
":",
"transactions",
".",
"monosig_script",
"(",
"deck_vote_tag_address",
")",
"}",
",",
"{",
"\"redeem\"",
":",
"0",
",",
"\"outputScript\"",
":",
"transactions",
".",
"op_return_script",
"(",
"vote",
".",
"to_protobuf",
")",
"}",
",",
"{",
"\"redeem\"",
":",
"float",
"(",
"inputs",
"[",
"'total'",
"]",
")",
"-",
"float",
"(",
"tx_fee",
")",
"-",
"float",
"(",
"0.01",
")",
",",
"\"outputScript\"",
":",
"transactions",
".",
"monosig_script",
"(",
"change_address",
")",
"}",
"]",
"return",
"transactions",
".",
"make_raw_transaction",
"(",
"inputs",
"[",
"'utxos'",
"]",
",",
"outputs",
")"
] | initialize vote transaction, must be signed by the deck_issuer privkey | [
"initialize",
"vote",
"transaction",
"must",
"be",
"signed",
"by",
"the",
"deck_issuer",
"privkey"
] | 8927b4a686887f44fe2cd9de777e2c827c948987 | https://github.com/PeerAssets/pypeerassets/blob/8927b4a686887f44fe2cd9de777e2c827c948987/pypeerassets/voting.py#L121-L140 | train |
PeerAssets/pypeerassets | pypeerassets/voting.py | find_vote_inits | def find_vote_inits(provider: Provider, deck: Deck) -> Iterable[Vote]:
'''find vote_inits on this deck'''
vote_ints = provider.listtransactions(deck_vote_tag(deck))
for txid in vote_ints:
try:
raw_vote = provider.getrawtransaction(txid)
vote = parse_vote_info(read_tx_opreturn(raw_vote))
vote["vote_id"] = txid
vote["sender"] = find_tx_sender(provider, raw_vote)
vote["deck"] = deck
yield Vote(**vote)
except AssertionError:
pass | python | def find_vote_inits(provider: Provider, deck: Deck) -> Iterable[Vote]:
'''find vote_inits on this deck'''
vote_ints = provider.listtransactions(deck_vote_tag(deck))
for txid in vote_ints:
try:
raw_vote = provider.getrawtransaction(txid)
vote = parse_vote_info(read_tx_opreturn(raw_vote))
vote["vote_id"] = txid
vote["sender"] = find_tx_sender(provider, raw_vote)
vote["deck"] = deck
yield Vote(**vote)
except AssertionError:
pass | [
"def",
"find_vote_inits",
"(",
"provider",
":",
"Provider",
",",
"deck",
":",
"Deck",
")",
"->",
"Iterable",
"[",
"Vote",
"]",
":",
"vote_ints",
"=",
"provider",
".",
"listtransactions",
"(",
"deck_vote_tag",
"(",
"deck",
")",
")",
"for",
"txid",
"in",
"vote_ints",
":",
"try",
":",
"raw_vote",
"=",
"provider",
".",
"getrawtransaction",
"(",
"txid",
")",
"vote",
"=",
"parse_vote_info",
"(",
"read_tx_opreturn",
"(",
"raw_vote",
")",
")",
"vote",
"[",
"\"vote_id\"",
"]",
"=",
"txid",
"vote",
"[",
"\"sender\"",
"]",
"=",
"find_tx_sender",
"(",
"provider",
",",
"raw_vote",
")",
"vote",
"[",
"\"deck\"",
"]",
"=",
"deck",
"yield",
"Vote",
"(",
"*",
"*",
"vote",
")",
"except",
"AssertionError",
":",
"pass"
] | find vote_inits on this deck | [
"find",
"vote_inits",
"on",
"this",
"deck"
] | 8927b4a686887f44fe2cd9de777e2c827c948987 | https://github.com/PeerAssets/pypeerassets/blob/8927b4a686887f44fe2cd9de777e2c827c948987/pypeerassets/voting.py#L143-L157 | train |
PeerAssets/pypeerassets | pypeerassets/voting.py | vote_cast | def vote_cast(vote: Vote, choice_index: int, inputs: dict,
change_address: str) -> bytes:
'''vote cast transaction'''
network_params = net_query(vote.deck.network)
vote_cast_addr = vote.vote_choice_address[choice_index]
tx_fee = network_params.min_tx_fee # settle for min tx fee for now
for utxo in inputs['utxos']:
utxo['txid'] = unhexlify(utxo['txid'])
utxo['scriptSig'] = unhexlify(utxo['scriptSig'])
outputs = [
{"redeem": 0.01, "outputScript": transactions.monosig_script(vote_cast_addr)},
{"redeem": float(inputs['total']) - float(tx_fee) - float(0.01),
"outputScript": transactions.monosig_script(change_address)
}]
return transactions.make_raw_transaction(inputs['utxos'], outputs) | python | def vote_cast(vote: Vote, choice_index: int, inputs: dict,
change_address: str) -> bytes:
'''vote cast transaction'''
network_params = net_query(vote.deck.network)
vote_cast_addr = vote.vote_choice_address[choice_index]
tx_fee = network_params.min_tx_fee # settle for min tx fee for now
for utxo in inputs['utxos']:
utxo['txid'] = unhexlify(utxo['txid'])
utxo['scriptSig'] = unhexlify(utxo['scriptSig'])
outputs = [
{"redeem": 0.01, "outputScript": transactions.monosig_script(vote_cast_addr)},
{"redeem": float(inputs['total']) - float(tx_fee) - float(0.01),
"outputScript": transactions.monosig_script(change_address)
}]
return transactions.make_raw_transaction(inputs['utxos'], outputs) | [
"def",
"vote_cast",
"(",
"vote",
":",
"Vote",
",",
"choice_index",
":",
"int",
",",
"inputs",
":",
"dict",
",",
"change_address",
":",
"str",
")",
"->",
"bytes",
":",
"network_params",
"=",
"net_query",
"(",
"vote",
".",
"deck",
".",
"network",
")",
"vote_cast_addr",
"=",
"vote",
".",
"vote_choice_address",
"[",
"choice_index",
"]",
"tx_fee",
"=",
"network_params",
".",
"min_tx_fee",
"# settle for min tx fee for now",
"for",
"utxo",
"in",
"inputs",
"[",
"'utxos'",
"]",
":",
"utxo",
"[",
"'txid'",
"]",
"=",
"unhexlify",
"(",
"utxo",
"[",
"'txid'",
"]",
")",
"utxo",
"[",
"'scriptSig'",
"]",
"=",
"unhexlify",
"(",
"utxo",
"[",
"'scriptSig'",
"]",
")",
"outputs",
"=",
"[",
"{",
"\"redeem\"",
":",
"0.01",
",",
"\"outputScript\"",
":",
"transactions",
".",
"monosig_script",
"(",
"vote_cast_addr",
")",
"}",
",",
"{",
"\"redeem\"",
":",
"float",
"(",
"inputs",
"[",
"'total'",
"]",
")",
"-",
"float",
"(",
"tx_fee",
")",
"-",
"float",
"(",
"0.01",
")",
",",
"\"outputScript\"",
":",
"transactions",
".",
"monosig_script",
"(",
"change_address",
")",
"}",
"]",
"return",
"transactions",
".",
"make_raw_transaction",
"(",
"inputs",
"[",
"'utxos'",
"]",
",",
"outputs",
")"
] | vote cast transaction | [
"vote",
"cast",
"transaction"
] | 8927b4a686887f44fe2cd9de777e2c827c948987 | https://github.com/PeerAssets/pypeerassets/blob/8927b4a686887f44fe2cd9de777e2c827c948987/pypeerassets/voting.py#L160-L179 | train |
PeerAssets/pypeerassets | pypeerassets/voting.py | find_vote_casts | def find_vote_casts(provider: Provider, vote: Vote, choice_index: int) -> Iterable[VoteCast]:
'''find and verify vote_casts on this vote_choice_address'''
vote_casts = provider.listtransactions(vote.vote_choice_address[choice_index])
for tx in vote_casts:
raw_tx = provider.getrawtransaction(tx, 1)
sender = find_tx_sender(provider, raw_tx)
confirmations = raw_tx["confirmations"]
blocknum = provider.getblock(raw_tx["blockhash"])["height"]
yield VoteCast(vote, sender, blocknum, confirmations, raw_tx["blocktime"]) | python | def find_vote_casts(provider: Provider, vote: Vote, choice_index: int) -> Iterable[VoteCast]:
'''find and verify vote_casts on this vote_choice_address'''
vote_casts = provider.listtransactions(vote.vote_choice_address[choice_index])
for tx in vote_casts:
raw_tx = provider.getrawtransaction(tx, 1)
sender = find_tx_sender(provider, raw_tx)
confirmations = raw_tx["confirmations"]
blocknum = provider.getblock(raw_tx["blockhash"])["height"]
yield VoteCast(vote, sender, blocknum, confirmations, raw_tx["blocktime"]) | [
"def",
"find_vote_casts",
"(",
"provider",
":",
"Provider",
",",
"vote",
":",
"Vote",
",",
"choice_index",
":",
"int",
")",
"->",
"Iterable",
"[",
"VoteCast",
"]",
":",
"vote_casts",
"=",
"provider",
".",
"listtransactions",
"(",
"vote",
".",
"vote_choice_address",
"[",
"choice_index",
"]",
")",
"for",
"tx",
"in",
"vote_casts",
":",
"raw_tx",
"=",
"provider",
".",
"getrawtransaction",
"(",
"tx",
",",
"1",
")",
"sender",
"=",
"find_tx_sender",
"(",
"provider",
",",
"raw_tx",
")",
"confirmations",
"=",
"raw_tx",
"[",
"\"confirmations\"",
"]",
"blocknum",
"=",
"provider",
".",
"getblock",
"(",
"raw_tx",
"[",
"\"blockhash\"",
"]",
")",
"[",
"\"height\"",
"]",
"yield",
"VoteCast",
"(",
"vote",
",",
"sender",
",",
"blocknum",
",",
"confirmations",
",",
"raw_tx",
"[",
"\"blocktime\"",
"]",
")"
] | find and verify vote_casts on this vote_choice_address | [
"find",
"and",
"verify",
"vote_casts",
"on",
"this",
"vote_choice_address"
] | 8927b4a686887f44fe2cd9de777e2c827c948987 | https://github.com/PeerAssets/pypeerassets/blob/8927b4a686887f44fe2cd9de777e2c827c948987/pypeerassets/voting.py#L207-L217 | train |
PeerAssets/pypeerassets | pypeerassets/voting.py | Vote.to_protobuf | def to_protobuf(self) -> str:
'''encode vote into protobuf'''
vote = pavoteproto.Vote()
vote.version = self.version
vote.description = self.description
vote.count_mode = vote.MODE.Value(self.count_mode)
vote.start_block = self.start_block
vote.end_block = self.end_block
vote.choices.extend(self.choices)
if not isinstance(self.vote_metainfo, bytes):
vote.vote_metainfo = self.vote_metainfo.encode()
else:
vote.vote_metainfo = self.vote_metainfo
proto = vote.SerializeToString()
if len(proto) > 80:
warnings.warn('\nMetainfo size exceeds maximum of 80 bytes allowed by OP_RETURN.')
return proto | python | def to_protobuf(self) -> str:
'''encode vote into protobuf'''
vote = pavoteproto.Vote()
vote.version = self.version
vote.description = self.description
vote.count_mode = vote.MODE.Value(self.count_mode)
vote.start_block = self.start_block
vote.end_block = self.end_block
vote.choices.extend(self.choices)
if not isinstance(self.vote_metainfo, bytes):
vote.vote_metainfo = self.vote_metainfo.encode()
else:
vote.vote_metainfo = self.vote_metainfo
proto = vote.SerializeToString()
if len(proto) > 80:
warnings.warn('\nMetainfo size exceeds maximum of 80 bytes allowed by OP_RETURN.')
return proto | [
"def",
"to_protobuf",
"(",
"self",
")",
"->",
"str",
":",
"vote",
"=",
"pavoteproto",
".",
"Vote",
"(",
")",
"vote",
".",
"version",
"=",
"self",
".",
"version",
"vote",
".",
"description",
"=",
"self",
".",
"description",
"vote",
".",
"count_mode",
"=",
"vote",
".",
"MODE",
".",
"Value",
"(",
"self",
".",
"count_mode",
")",
"vote",
".",
"start_block",
"=",
"self",
".",
"start_block",
"vote",
".",
"end_block",
"=",
"self",
".",
"end_block",
"vote",
".",
"choices",
".",
"extend",
"(",
"self",
".",
"choices",
")",
"if",
"not",
"isinstance",
"(",
"self",
".",
"vote_metainfo",
",",
"bytes",
")",
":",
"vote",
".",
"vote_metainfo",
"=",
"self",
".",
"vote_metainfo",
".",
"encode",
"(",
")",
"else",
":",
"vote",
".",
"vote_metainfo",
"=",
"self",
".",
"vote_metainfo",
"proto",
"=",
"vote",
".",
"SerializeToString",
"(",
")",
"if",
"len",
"(",
"proto",
")",
">",
"80",
":",
"warnings",
".",
"warn",
"(",
"'\\nMetainfo size exceeds maximum of 80 bytes allowed by OP_RETURN.'",
")",
"return",
"proto"
] | encode vote into protobuf | [
"encode",
"vote",
"into",
"protobuf"
] | 8927b4a686887f44fe2cd9de777e2c827c948987 | https://github.com/PeerAssets/pypeerassets/blob/8927b4a686887f44fe2cd9de777e2c827c948987/pypeerassets/voting.py#L44-L65 | train |
PeerAssets/pypeerassets | pypeerassets/voting.py | Vote.to_dict | def to_dict(self) -> dict:
'''vote info as dict'''
return {
"version": self.version,
"description": self.description,
"count_mode": self.count_mode,
"start_block": self.start_block,
"end_block": self.end_block,
"choices": self.choices,
"vote_metainfo": self.vote_metainfo
} | python | def to_dict(self) -> dict:
'''vote info as dict'''
return {
"version": self.version,
"description": self.description,
"count_mode": self.count_mode,
"start_block": self.start_block,
"end_block": self.end_block,
"choices": self.choices,
"vote_metainfo": self.vote_metainfo
} | [
"def",
"to_dict",
"(",
"self",
")",
"->",
"dict",
":",
"return",
"{",
"\"version\"",
":",
"self",
".",
"version",
",",
"\"description\"",
":",
"self",
".",
"description",
",",
"\"count_mode\"",
":",
"self",
".",
"count_mode",
",",
"\"start_block\"",
":",
"self",
".",
"start_block",
",",
"\"end_block\"",
":",
"self",
".",
"end_block",
",",
"\"choices\"",
":",
"self",
".",
"choices",
",",
"\"vote_metainfo\"",
":",
"self",
".",
"vote_metainfo",
"}"
] | vote info as dict | [
"vote",
"info",
"as",
"dict"
] | 8927b4a686887f44fe2cd9de777e2c827c948987 | https://github.com/PeerAssets/pypeerassets/blob/8927b4a686887f44fe2cd9de777e2c827c948987/pypeerassets/voting.py#L68-L79 | train |
PeerAssets/pypeerassets | pypeerassets/voting.py | Vote.vote_choice_address | def vote_choice_address(self) -> List[str]:
'''calculate the addresses on which the vote is casted.'''
if self.vote_id is None:
raise Exception("vote_id is required")
addresses = []
vote_init_txid = unhexlify(self.vote_id)
for choice in self.choices:
vote_cast_privkey = sha256(vote_init_txid + bytes(
list(self.choices).index(choice))
).hexdigest()
addresses.append(Kutil(network=self.deck.network,
privkey=bytearray.fromhex(vote_cast_privkey)).address)
return addresses | python | def vote_choice_address(self) -> List[str]:
'''calculate the addresses on which the vote is casted.'''
if self.vote_id is None:
raise Exception("vote_id is required")
addresses = []
vote_init_txid = unhexlify(self.vote_id)
for choice in self.choices:
vote_cast_privkey = sha256(vote_init_txid + bytes(
list(self.choices).index(choice))
).hexdigest()
addresses.append(Kutil(network=self.deck.network,
privkey=bytearray.fromhex(vote_cast_privkey)).address)
return addresses | [
"def",
"vote_choice_address",
"(",
"self",
")",
"->",
"List",
"[",
"str",
"]",
":",
"if",
"self",
".",
"vote_id",
"is",
"None",
":",
"raise",
"Exception",
"(",
"\"vote_id is required\"",
")",
"addresses",
"=",
"[",
"]",
"vote_init_txid",
"=",
"unhexlify",
"(",
"self",
".",
"vote_id",
")",
"for",
"choice",
"in",
"self",
".",
"choices",
":",
"vote_cast_privkey",
"=",
"sha256",
"(",
"vote_init_txid",
"+",
"bytes",
"(",
"list",
"(",
"self",
".",
"choices",
")",
".",
"index",
"(",
"choice",
")",
")",
")",
".",
"hexdigest",
"(",
")",
"addresses",
".",
"append",
"(",
"Kutil",
"(",
"network",
"=",
"self",
".",
"deck",
".",
"network",
",",
"privkey",
"=",
"bytearray",
".",
"fromhex",
"(",
"vote_cast_privkey",
")",
")",
".",
"address",
")",
"return",
"addresses"
] | calculate the addresses on which the vote is casted. | [
"calculate",
"the",
"addresses",
"on",
"which",
"the",
"vote",
"is",
"casted",
"."
] | 8927b4a686887f44fe2cd9de777e2c827c948987 | https://github.com/PeerAssets/pypeerassets/blob/8927b4a686887f44fe2cd9de777e2c827c948987/pypeerassets/voting.py#L82-L98 | train |
PeerAssets/pypeerassets | pypeerassets/voting.py | VoteCast.is_valid | def is_valid(self) -> bool:
'''check if VoteCast is valid'''
if not (self.blocknum >= self.vote.start_block and
self.blocknum <= self.vote.end_block):
return False
if not self.confirmations >= 6:
return False
return True | python | def is_valid(self) -> bool:
'''check if VoteCast is valid'''
if not (self.blocknum >= self.vote.start_block and
self.blocknum <= self.vote.end_block):
return False
if not self.confirmations >= 6:
return False
return True | [
"def",
"is_valid",
"(",
"self",
")",
"->",
"bool",
":",
"if",
"not",
"(",
"self",
".",
"blocknum",
">=",
"self",
".",
"vote",
".",
"start_block",
"and",
"self",
".",
"blocknum",
"<=",
"self",
".",
"vote",
".",
"end_block",
")",
":",
"return",
"False",
"if",
"not",
"self",
".",
"confirmations",
">=",
"6",
":",
"return",
"False",
"return",
"True"
] | check if VoteCast is valid | [
"check",
"if",
"VoteCast",
"is",
"valid"
] | 8927b4a686887f44fe2cd9de777e2c827c948987 | https://github.com/PeerAssets/pypeerassets/blob/8927b4a686887f44fe2cd9de777e2c827c948987/pypeerassets/voting.py#L194-L204 | train |
nmdp-bioinformatics/SeqAnn | seqann/models/reference_data.py | ReferenceData.search_refdata | def search_refdata(self, seq, locus):
"""
This checks to see if a sequence already exists in the reference data. If it does, then it'll return the known annotation.
:return: The Annotation of associated with the input sequence
:rtype: :ref:`ann`
Example:
>>> from Bio.Seq import Seq
>>> from seqann.models.reference_data import ReferenceData
>>> sequence = Seq('AGAGACTCTCCCGAGGATTTCGTGTACCAGTTTAAGGCCATGTGCTACTTCACC')
>>> refdata = ReferenceData()
>>> matched_annotation = refdata.search_refdata(sequence, locus)
"""
# TODO: ONLY MAKE ONE CONNECTION
# TODO: add try statement
# TODO: take password from environment variable
if self.server_avail:
hla, loc = locus.split('-')
p1 = "SELECT ent.name "
p2 = "FROM bioentry ent,biosequence seq,biodatabase dbb "
p3 = "WHERE dbb.biodatabase_id = ent.biodatabase_id AND seq.bioentry_id = ent.bioentry_id "
p4 = " AND dbb.name = \"" + self.dbversion + "_" + loc + "\""
p5 = " AND seq.seq = \"" + str(seq.seq) + "\""
select_stm = p1 + p2 + p3 + p4 + p5
# TODO: add try statement
conn = pymysql.connect(host=biosqlhost, port=biosqlport,
user=biosqluser, passwd=biosqlpass,
db=biosqldb)
cur = conn.cursor()
cur.execute(select_stm)
typ = ''
for row in cur:
typ = row[0]
cur.close()
conn.close()
if typ:
if self.verbose:
self.logger.info("Exact typing found in BioSQL database")
seqrecord = self.seqrecord(typ, loc)
return self.seqannotation(seqrecord, typ, loc)
else:
return
else:
if str(seq.seq) in self.seqref:
if self.verbose:
self.logger.info("Exact typing found in dat file")
seqrec = self.hlaref[self.seqref[str(seq.seq)]]
return self.seqannotation(seqrec,
self.seqref[str(seq.seq)],
locus)
else:
return | python | def search_refdata(self, seq, locus):
"""
This checks to see if a sequence already exists in the reference data. If it does, then it'll return the known annotation.
:return: The Annotation of associated with the input sequence
:rtype: :ref:`ann`
Example:
>>> from Bio.Seq import Seq
>>> from seqann.models.reference_data import ReferenceData
>>> sequence = Seq('AGAGACTCTCCCGAGGATTTCGTGTACCAGTTTAAGGCCATGTGCTACTTCACC')
>>> refdata = ReferenceData()
>>> matched_annotation = refdata.search_refdata(sequence, locus)
"""
# TODO: ONLY MAKE ONE CONNECTION
# TODO: add try statement
# TODO: take password from environment variable
if self.server_avail:
hla, loc = locus.split('-')
p1 = "SELECT ent.name "
p2 = "FROM bioentry ent,biosequence seq,biodatabase dbb "
p3 = "WHERE dbb.biodatabase_id = ent.biodatabase_id AND seq.bioentry_id = ent.bioentry_id "
p4 = " AND dbb.name = \"" + self.dbversion + "_" + loc + "\""
p5 = " AND seq.seq = \"" + str(seq.seq) + "\""
select_stm = p1 + p2 + p3 + p4 + p5
# TODO: add try statement
conn = pymysql.connect(host=biosqlhost, port=biosqlport,
user=biosqluser, passwd=biosqlpass,
db=biosqldb)
cur = conn.cursor()
cur.execute(select_stm)
typ = ''
for row in cur:
typ = row[0]
cur.close()
conn.close()
if typ:
if self.verbose:
self.logger.info("Exact typing found in BioSQL database")
seqrecord = self.seqrecord(typ, loc)
return self.seqannotation(seqrecord, typ, loc)
else:
return
else:
if str(seq.seq) in self.seqref:
if self.verbose:
self.logger.info("Exact typing found in dat file")
seqrec = self.hlaref[self.seqref[str(seq.seq)]]
return self.seqannotation(seqrec,
self.seqref[str(seq.seq)],
locus)
else:
return | [
"def",
"search_refdata",
"(",
"self",
",",
"seq",
",",
"locus",
")",
":",
"# TODO: ONLY MAKE ONE CONNECTION",
"# TODO: add try statement",
"# TODO: take password from environment variable",
"if",
"self",
".",
"server_avail",
":",
"hla",
",",
"loc",
"=",
"locus",
".",
"split",
"(",
"'-'",
")",
"p1",
"=",
"\"SELECT ent.name \"",
"p2",
"=",
"\"FROM bioentry ent,biosequence seq,biodatabase dbb \"",
"p3",
"=",
"\"WHERE dbb.biodatabase_id = ent.biodatabase_id AND seq.bioentry_id = ent.bioentry_id \"",
"p4",
"=",
"\" AND dbb.name = \\\"\"",
"+",
"self",
".",
"dbversion",
"+",
"\"_\"",
"+",
"loc",
"+",
"\"\\\"\"",
"p5",
"=",
"\" AND seq.seq = \\\"\"",
"+",
"str",
"(",
"seq",
".",
"seq",
")",
"+",
"\"\\\"\"",
"select_stm",
"=",
"p1",
"+",
"p2",
"+",
"p3",
"+",
"p4",
"+",
"p5",
"# TODO: add try statement",
"conn",
"=",
"pymysql",
".",
"connect",
"(",
"host",
"=",
"biosqlhost",
",",
"port",
"=",
"biosqlport",
",",
"user",
"=",
"biosqluser",
",",
"passwd",
"=",
"biosqlpass",
",",
"db",
"=",
"biosqldb",
")",
"cur",
"=",
"conn",
".",
"cursor",
"(",
")",
"cur",
".",
"execute",
"(",
"select_stm",
")",
"typ",
"=",
"''",
"for",
"row",
"in",
"cur",
":",
"typ",
"=",
"row",
"[",
"0",
"]",
"cur",
".",
"close",
"(",
")",
"conn",
".",
"close",
"(",
")",
"if",
"typ",
":",
"if",
"self",
".",
"verbose",
":",
"self",
".",
"logger",
".",
"info",
"(",
"\"Exact typing found in BioSQL database\"",
")",
"seqrecord",
"=",
"self",
".",
"seqrecord",
"(",
"typ",
",",
"loc",
")",
"return",
"self",
".",
"seqannotation",
"(",
"seqrecord",
",",
"typ",
",",
"loc",
")",
"else",
":",
"return",
"else",
":",
"if",
"str",
"(",
"seq",
".",
"seq",
")",
"in",
"self",
".",
"seqref",
":",
"if",
"self",
".",
"verbose",
":",
"self",
".",
"logger",
".",
"info",
"(",
"\"Exact typing found in dat file\"",
")",
"seqrec",
"=",
"self",
".",
"hlaref",
"[",
"self",
".",
"seqref",
"[",
"str",
"(",
"seq",
".",
"seq",
")",
"]",
"]",
"return",
"self",
".",
"seqannotation",
"(",
"seqrec",
",",
"self",
".",
"seqref",
"[",
"str",
"(",
"seq",
".",
"seq",
")",
"]",
",",
"locus",
")",
"else",
":",
"return"
] | This checks to see if a sequence already exists in the reference data. If it does, then it'll return the known annotation.
:return: The Annotation of associated with the input sequence
:rtype: :ref:`ann`
Example:
>>> from Bio.Seq import Seq
>>> from seqann.models.reference_data import ReferenceData
>>> sequence = Seq('AGAGACTCTCCCGAGGATTTCGTGTACCAGTTTAAGGCCATGTGCTACTTCACC')
>>> refdata = ReferenceData()
>>> matched_annotation = refdata.search_refdata(sequence, locus) | [
"This",
"checks",
"to",
"see",
"if",
"a",
"sequence",
"already",
"exists",
"in",
"the",
"reference",
"data",
".",
"If",
"it",
"does",
"then",
"it",
"ll",
"return",
"the",
"known",
"annotation",
"."
] | 5ce91559b0a4fbe4fb7758e034eb258202632463 | https://github.com/nmdp-bioinformatics/SeqAnn/blob/5ce91559b0a4fbe4fb7758e034eb258202632463/seqann/models/reference_data.py#L621-L679 | train |
LISE-B26/pylabcontrol | build/lib/pylabcontrol/src/core/parameter.py | Parameter.update | def update(self, *args):
"""
updates the values of the parameter, just as a regular dictionary
"""
for d in args:
for (key, value) in d.items():
self.__setitem__(key, value) | python | def update(self, *args):
"""
updates the values of the parameter, just as a regular dictionary
"""
for d in args:
for (key, value) in d.items():
self.__setitem__(key, value) | [
"def",
"update",
"(",
"self",
",",
"*",
"args",
")",
":",
"for",
"d",
"in",
"args",
":",
"for",
"(",
"key",
",",
"value",
")",
"in",
"d",
".",
"items",
"(",
")",
":",
"self",
".",
"__setitem__",
"(",
"key",
",",
"value",
")"
] | updates the values of the parameter, just as a regular dictionary | [
"updates",
"the",
"values",
"of",
"the",
"parameter",
"just",
"as",
"a",
"regular",
"dictionary"
] | 67482e5157fcd1c40705e5c2cacfb93564703ed0 | https://github.com/LISE-B26/pylabcontrol/blob/67482e5157fcd1c40705e5c2cacfb93564703ed0/build/lib/pylabcontrol/src/core/parameter.py#L110-L116 | train |
lreis2415/PyGeoC | pygeoc/TauDEM.py | TauDEM.error | def error(msg, log_file=None):
"""Print, output error message and raise RuntimeError."""
UtilClass.print_msg(msg + os.linesep)
if log_file is not None:
UtilClass.writelog(log_file, msg, 'append')
raise RuntimeError(msg) | python | def error(msg, log_file=None):
"""Print, output error message and raise RuntimeError."""
UtilClass.print_msg(msg + os.linesep)
if log_file is not None:
UtilClass.writelog(log_file, msg, 'append')
raise RuntimeError(msg) | [
"def",
"error",
"(",
"msg",
",",
"log_file",
"=",
"None",
")",
":",
"UtilClass",
".",
"print_msg",
"(",
"msg",
"+",
"os",
".",
"linesep",
")",
"if",
"log_file",
"is",
"not",
"None",
":",
"UtilClass",
".",
"writelog",
"(",
"log_file",
",",
"msg",
",",
"'append'",
")",
"raise",
"RuntimeError",
"(",
"msg",
")"
] | Print, output error message and raise RuntimeError. | [
"Print",
"output",
"error",
"message",
"and",
"raise",
"RuntimeError",
"."
] | 9a92d1a229bb74298e3c57f27c97079980b5f729 | https://github.com/lreis2415/PyGeoC/blob/9a92d1a229bb74298e3c57f27c97079980b5f729/pygeoc/TauDEM.py#L96-L101 | train |
lreis2415/PyGeoC | pygeoc/TauDEM.py | TauDEM.log | def log(lines, log_file=None):
"""Output log message."""
err = False
for line in lines:
print(line)
if log_file is not None:
UtilClass.writelog(log_file, line, 'append')
if 'BAD TERMINATION' in line.upper():
err = True
break
if err:
TauDEM.error('Error occurred when calling TauDEM function, please check!', log_file) | python | def log(lines, log_file=None):
"""Output log message."""
err = False
for line in lines:
print(line)
if log_file is not None:
UtilClass.writelog(log_file, line, 'append')
if 'BAD TERMINATION' in line.upper():
err = True
break
if err:
TauDEM.error('Error occurred when calling TauDEM function, please check!', log_file) | [
"def",
"log",
"(",
"lines",
",",
"log_file",
"=",
"None",
")",
":",
"err",
"=",
"False",
"for",
"line",
"in",
"lines",
":",
"print",
"(",
"line",
")",
"if",
"log_file",
"is",
"not",
"None",
":",
"UtilClass",
".",
"writelog",
"(",
"log_file",
",",
"line",
",",
"'append'",
")",
"if",
"'BAD TERMINATION'",
"in",
"line",
".",
"upper",
"(",
")",
":",
"err",
"=",
"True",
"break",
"if",
"err",
":",
"TauDEM",
".",
"error",
"(",
"'Error occurred when calling TauDEM function, please check!'",
",",
"log_file",
")"
] | Output log message. | [
"Output",
"log",
"message",
"."
] | 9a92d1a229bb74298e3c57f27c97079980b5f729 | https://github.com/lreis2415/PyGeoC/blob/9a92d1a229bb74298e3c57f27c97079980b5f729/pygeoc/TauDEM.py#L104-L115 | train |
lreis2415/PyGeoC | pygeoc/TauDEM.py | TauDEM.write_time_log | def write_time_log(logfile, time):
"""Write time log."""
if os.path.exists(logfile):
log_status = open(logfile, 'a', encoding='utf-8')
else:
log_status = open(logfile, 'w', encoding='utf-8')
log_status.write('Function Name\tRead Time\tCompute Time\tWrite Time\tTotal Time\t\n')
log_status.write('%s\t%.5f\t%.5f\t%.5f\t%.5f\t\n' % (time['name'], time['readt'],
time['computet'],
time['writet'],
time['totalt']))
log_status.flush()
log_status.close() | python | def write_time_log(logfile, time):
"""Write time log."""
if os.path.exists(logfile):
log_status = open(logfile, 'a', encoding='utf-8')
else:
log_status = open(logfile, 'w', encoding='utf-8')
log_status.write('Function Name\tRead Time\tCompute Time\tWrite Time\tTotal Time\t\n')
log_status.write('%s\t%.5f\t%.5f\t%.5f\t%.5f\t\n' % (time['name'], time['readt'],
time['computet'],
time['writet'],
time['totalt']))
log_status.flush()
log_status.close() | [
"def",
"write_time_log",
"(",
"logfile",
",",
"time",
")",
":",
"if",
"os",
".",
"path",
".",
"exists",
"(",
"logfile",
")",
":",
"log_status",
"=",
"open",
"(",
"logfile",
",",
"'a'",
",",
"encoding",
"=",
"'utf-8'",
")",
"else",
":",
"log_status",
"=",
"open",
"(",
"logfile",
",",
"'w'",
",",
"encoding",
"=",
"'utf-8'",
")",
"log_status",
".",
"write",
"(",
"'Function Name\\tRead Time\\tCompute Time\\tWrite Time\\tTotal Time\\t\\n'",
")",
"log_status",
".",
"write",
"(",
"'%s\\t%.5f\\t%.5f\\t%.5f\\t%.5f\\t\\n'",
"%",
"(",
"time",
"[",
"'name'",
"]",
",",
"time",
"[",
"'readt'",
"]",
",",
"time",
"[",
"'computet'",
"]",
",",
"time",
"[",
"'writet'",
"]",
",",
"time",
"[",
"'totalt'",
"]",
")",
")",
"log_status",
".",
"flush",
"(",
")",
"log_status",
".",
"close",
"(",
")"
] | Write time log. | [
"Write",
"time",
"log",
"."
] | 9a92d1a229bb74298e3c57f27c97079980b5f729 | https://github.com/lreis2415/PyGeoC/blob/9a92d1a229bb74298e3c57f27c97079980b5f729/pygeoc/TauDEM.py#L118-L130 | train |
lreis2415/PyGeoC | pygeoc/TauDEM.py | TauDEM.check_infile_and_wp | def check_infile_and_wp(curinf, curwp):
"""Check the existence of the given file and directory path.
1. Raise Runtime exception of both not existed.
2. If the ``curwp`` is None, the set the base folder of ``curinf`` to it.
"""
if not os.path.exists(curinf):
if curwp is None:
TauDEM.error('You must specify one of the workspace and the '
'full path of input file!')
curinf = curwp + os.sep + curinf
curinf = os.path.abspath(curinf)
if not os.path.exists(curinf):
TauDEM.error('Input files parameter %s is not existed!' % curinf)
else:
curinf = os.path.abspath(curinf)
if curwp is None:
curwp = os.path.dirname(curinf)
return curinf, curwp | python | def check_infile_and_wp(curinf, curwp):
"""Check the existence of the given file and directory path.
1. Raise Runtime exception of both not existed.
2. If the ``curwp`` is None, the set the base folder of ``curinf`` to it.
"""
if not os.path.exists(curinf):
if curwp is None:
TauDEM.error('You must specify one of the workspace and the '
'full path of input file!')
curinf = curwp + os.sep + curinf
curinf = os.path.abspath(curinf)
if not os.path.exists(curinf):
TauDEM.error('Input files parameter %s is not existed!' % curinf)
else:
curinf = os.path.abspath(curinf)
if curwp is None:
curwp = os.path.dirname(curinf)
return curinf, curwp | [
"def",
"check_infile_and_wp",
"(",
"curinf",
",",
"curwp",
")",
":",
"if",
"not",
"os",
".",
"path",
".",
"exists",
"(",
"curinf",
")",
":",
"if",
"curwp",
"is",
"None",
":",
"TauDEM",
".",
"error",
"(",
"'You must specify one of the workspace and the '",
"'full path of input file!'",
")",
"curinf",
"=",
"curwp",
"+",
"os",
".",
"sep",
"+",
"curinf",
"curinf",
"=",
"os",
".",
"path",
".",
"abspath",
"(",
"curinf",
")",
"if",
"not",
"os",
".",
"path",
".",
"exists",
"(",
"curinf",
")",
":",
"TauDEM",
".",
"error",
"(",
"'Input files parameter %s is not existed!'",
"%",
"curinf",
")",
"else",
":",
"curinf",
"=",
"os",
".",
"path",
".",
"abspath",
"(",
"curinf",
")",
"if",
"curwp",
"is",
"None",
":",
"curwp",
"=",
"os",
".",
"path",
".",
"dirname",
"(",
"curinf",
")",
"return",
"curinf",
",",
"curwp"
] | Check the existence of the given file and directory path.
1. Raise Runtime exception of both not existed.
2. If the ``curwp`` is None, the set the base folder of ``curinf`` to it. | [
"Check",
"the",
"existence",
"of",
"the",
"given",
"file",
"and",
"directory",
"path",
".",
"1",
".",
"Raise",
"Runtime",
"exception",
"of",
"both",
"not",
"existed",
".",
"2",
".",
"If",
"the",
"curwp",
"is",
"None",
"the",
"set",
"the",
"base",
"folder",
"of",
"curinf",
"to",
"it",
"."
] | 9a92d1a229bb74298e3c57f27c97079980b5f729 | https://github.com/lreis2415/PyGeoC/blob/9a92d1a229bb74298e3c57f27c97079980b5f729/pygeoc/TauDEM.py#L163-L180 | train |
lreis2415/PyGeoC | pygeoc/TauDEM.py | TauDEM.pitremove | def pitremove(np, dem, filleddem, workingdir=None, mpiexedir=None, exedir=None, log_file=None,
runtime_file=None, hostfile=None):
"""Run pit remove using the flooding approach """
fname = TauDEM.func_name('pitremove')
return TauDEM.run(FileClass.get_executable_fullpath(fname, exedir),
{'-z': dem}, workingdir,
None,
{'-fel': filleddem},
{'mpipath': mpiexedir, 'hostfile': hostfile, 'n': np},
{'logfile': log_file, 'runtimefile': runtime_file}) | python | def pitremove(np, dem, filleddem, workingdir=None, mpiexedir=None, exedir=None, log_file=None,
runtime_file=None, hostfile=None):
"""Run pit remove using the flooding approach """
fname = TauDEM.func_name('pitremove')
return TauDEM.run(FileClass.get_executable_fullpath(fname, exedir),
{'-z': dem}, workingdir,
None,
{'-fel': filleddem},
{'mpipath': mpiexedir, 'hostfile': hostfile, 'n': np},
{'logfile': log_file, 'runtimefile': runtime_file}) | [
"def",
"pitremove",
"(",
"np",
",",
"dem",
",",
"filleddem",
",",
"workingdir",
"=",
"None",
",",
"mpiexedir",
"=",
"None",
",",
"exedir",
"=",
"None",
",",
"log_file",
"=",
"None",
",",
"runtime_file",
"=",
"None",
",",
"hostfile",
"=",
"None",
")",
":",
"fname",
"=",
"TauDEM",
".",
"func_name",
"(",
"'pitremove'",
")",
"return",
"TauDEM",
".",
"run",
"(",
"FileClass",
".",
"get_executable_fullpath",
"(",
"fname",
",",
"exedir",
")",
",",
"{",
"'-z'",
":",
"dem",
"}",
",",
"workingdir",
",",
"None",
",",
"{",
"'-fel'",
":",
"filleddem",
"}",
",",
"{",
"'mpipath'",
":",
"mpiexedir",
",",
"'hostfile'",
":",
"hostfile",
",",
"'n'",
":",
"np",
"}",
",",
"{",
"'logfile'",
":",
"log_file",
",",
"'runtimefile'",
":",
"runtime_file",
"}",
")"
] | Run pit remove using the flooding approach | [
"Run",
"pit",
"remove",
"using",
"the",
"flooding",
"approach"
] | 9a92d1a229bb74298e3c57f27c97079980b5f729 | https://github.com/lreis2415/PyGeoC/blob/9a92d1a229bb74298e3c57f27c97079980b5f729/pygeoc/TauDEM.py#L377-L386 | train |
lreis2415/PyGeoC | pygeoc/TauDEM.py | TauDEM.d8flowdir | def d8flowdir(np, filleddem, flowdir, slope, workingdir=None, mpiexedir=None, exedir=None,
log_file=None, runtime_file=None, hostfile=None):
"""Run D8 flow direction"""
fname = TauDEM.func_name('d8flowdir')
return TauDEM.run(FileClass.get_executable_fullpath(fname, exedir),
{'-fel': filleddem}, workingdir,
None,
{'-p': flowdir, '-sd8': slope},
{'mpipath': mpiexedir, 'hostfile': hostfile, 'n': np},
{'logfile': log_file, 'runtimefile': runtime_file}) | python | def d8flowdir(np, filleddem, flowdir, slope, workingdir=None, mpiexedir=None, exedir=None,
log_file=None, runtime_file=None, hostfile=None):
"""Run D8 flow direction"""
fname = TauDEM.func_name('d8flowdir')
return TauDEM.run(FileClass.get_executable_fullpath(fname, exedir),
{'-fel': filleddem}, workingdir,
None,
{'-p': flowdir, '-sd8': slope},
{'mpipath': mpiexedir, 'hostfile': hostfile, 'n': np},
{'logfile': log_file, 'runtimefile': runtime_file}) | [
"def",
"d8flowdir",
"(",
"np",
",",
"filleddem",
",",
"flowdir",
",",
"slope",
",",
"workingdir",
"=",
"None",
",",
"mpiexedir",
"=",
"None",
",",
"exedir",
"=",
"None",
",",
"log_file",
"=",
"None",
",",
"runtime_file",
"=",
"None",
",",
"hostfile",
"=",
"None",
")",
":",
"fname",
"=",
"TauDEM",
".",
"func_name",
"(",
"'d8flowdir'",
")",
"return",
"TauDEM",
".",
"run",
"(",
"FileClass",
".",
"get_executable_fullpath",
"(",
"fname",
",",
"exedir",
")",
",",
"{",
"'-fel'",
":",
"filleddem",
"}",
",",
"workingdir",
",",
"None",
",",
"{",
"'-p'",
":",
"flowdir",
",",
"'-sd8'",
":",
"slope",
"}",
",",
"{",
"'mpipath'",
":",
"mpiexedir",
",",
"'hostfile'",
":",
"hostfile",
",",
"'n'",
":",
"np",
"}",
",",
"{",
"'logfile'",
":",
"log_file",
",",
"'runtimefile'",
":",
"runtime_file",
"}",
")"
] | Run D8 flow direction | [
"Run",
"D8",
"flow",
"direction"
] | 9a92d1a229bb74298e3c57f27c97079980b5f729 | https://github.com/lreis2415/PyGeoC/blob/9a92d1a229bb74298e3c57f27c97079980b5f729/pygeoc/TauDEM.py#L389-L398 | train |
lreis2415/PyGeoC | pygeoc/TauDEM.py | TauDEM.dinfflowdir | def dinfflowdir(np, filleddem, flowangle, slope, workingdir=None, mpiexedir=None, exedir=None,
log_file=None, runtime_file=None, hostfile=None):
"""Run Dinf flow direction"""
fname = TauDEM.func_name('dinfflowdir')
return TauDEM.run(FileClass.get_executable_fullpath(fname, exedir),
{'-fel': filleddem}, workingdir,
None,
{'-ang': flowangle, '-slp': slope},
{'mpipath': mpiexedir, 'hostfile': hostfile, 'n': np},
{'logfile': log_file, 'runtimefile': runtime_file}) | python | def dinfflowdir(np, filleddem, flowangle, slope, workingdir=None, mpiexedir=None, exedir=None,
log_file=None, runtime_file=None, hostfile=None):
"""Run Dinf flow direction"""
fname = TauDEM.func_name('dinfflowdir')
return TauDEM.run(FileClass.get_executable_fullpath(fname, exedir),
{'-fel': filleddem}, workingdir,
None,
{'-ang': flowangle, '-slp': slope},
{'mpipath': mpiexedir, 'hostfile': hostfile, 'n': np},
{'logfile': log_file, 'runtimefile': runtime_file}) | [
"def",
"dinfflowdir",
"(",
"np",
",",
"filleddem",
",",
"flowangle",
",",
"slope",
",",
"workingdir",
"=",
"None",
",",
"mpiexedir",
"=",
"None",
",",
"exedir",
"=",
"None",
",",
"log_file",
"=",
"None",
",",
"runtime_file",
"=",
"None",
",",
"hostfile",
"=",
"None",
")",
":",
"fname",
"=",
"TauDEM",
".",
"func_name",
"(",
"'dinfflowdir'",
")",
"return",
"TauDEM",
".",
"run",
"(",
"FileClass",
".",
"get_executable_fullpath",
"(",
"fname",
",",
"exedir",
")",
",",
"{",
"'-fel'",
":",
"filleddem",
"}",
",",
"workingdir",
",",
"None",
",",
"{",
"'-ang'",
":",
"flowangle",
",",
"'-slp'",
":",
"slope",
"}",
",",
"{",
"'mpipath'",
":",
"mpiexedir",
",",
"'hostfile'",
":",
"hostfile",
",",
"'n'",
":",
"np",
"}",
",",
"{",
"'logfile'",
":",
"log_file",
",",
"'runtimefile'",
":",
"runtime_file",
"}",
")"
] | Run Dinf flow direction | [
"Run",
"Dinf",
"flow",
"direction"
] | 9a92d1a229bb74298e3c57f27c97079980b5f729 | https://github.com/lreis2415/PyGeoC/blob/9a92d1a229bb74298e3c57f27c97079980b5f729/pygeoc/TauDEM.py#L401-L410 | train |
lreis2415/PyGeoC | pygeoc/TauDEM.py | TauDEM.aread8 | def aread8(np, flowdir, acc, outlet=None, streamskeleton=None, edgecontaimination=False,
workingdir=None, mpiexedir=None, exedir=None,
log_file=None, runtime_file=None, hostfile=None):
"""Run Accumulate area according to D8 flow direction"""
# -nc means do not consider edge contaimination
if not edgecontaimination:
in_params = {'-nc': None}
else:
in_params = None
fname = TauDEM.func_name('aread8')
return TauDEM.run(FileClass.get_executable_fullpath(fname, exedir),
{'-p': flowdir, '-o': outlet, '-wg': streamskeleton}, workingdir,
in_params,
{'-ad8': acc},
{'mpipath': mpiexedir, 'hostfile': hostfile, 'n': np},
{'logfile': log_file, 'runtimefile': runtime_file}) | python | def aread8(np, flowdir, acc, outlet=None, streamskeleton=None, edgecontaimination=False,
workingdir=None, mpiexedir=None, exedir=None,
log_file=None, runtime_file=None, hostfile=None):
"""Run Accumulate area according to D8 flow direction"""
# -nc means do not consider edge contaimination
if not edgecontaimination:
in_params = {'-nc': None}
else:
in_params = None
fname = TauDEM.func_name('aread8')
return TauDEM.run(FileClass.get_executable_fullpath(fname, exedir),
{'-p': flowdir, '-o': outlet, '-wg': streamskeleton}, workingdir,
in_params,
{'-ad8': acc},
{'mpipath': mpiexedir, 'hostfile': hostfile, 'n': np},
{'logfile': log_file, 'runtimefile': runtime_file}) | [
"def",
"aread8",
"(",
"np",
",",
"flowdir",
",",
"acc",
",",
"outlet",
"=",
"None",
",",
"streamskeleton",
"=",
"None",
",",
"edgecontaimination",
"=",
"False",
",",
"workingdir",
"=",
"None",
",",
"mpiexedir",
"=",
"None",
",",
"exedir",
"=",
"None",
",",
"log_file",
"=",
"None",
",",
"runtime_file",
"=",
"None",
",",
"hostfile",
"=",
"None",
")",
":",
"# -nc means do not consider edge contaimination",
"if",
"not",
"edgecontaimination",
":",
"in_params",
"=",
"{",
"'-nc'",
":",
"None",
"}",
"else",
":",
"in_params",
"=",
"None",
"fname",
"=",
"TauDEM",
".",
"func_name",
"(",
"'aread8'",
")",
"return",
"TauDEM",
".",
"run",
"(",
"FileClass",
".",
"get_executable_fullpath",
"(",
"fname",
",",
"exedir",
")",
",",
"{",
"'-p'",
":",
"flowdir",
",",
"'-o'",
":",
"outlet",
",",
"'-wg'",
":",
"streamskeleton",
"}",
",",
"workingdir",
",",
"in_params",
",",
"{",
"'-ad8'",
":",
"acc",
"}",
",",
"{",
"'mpipath'",
":",
"mpiexedir",
",",
"'hostfile'",
":",
"hostfile",
",",
"'n'",
":",
"np",
"}",
",",
"{",
"'logfile'",
":",
"log_file",
",",
"'runtimefile'",
":",
"runtime_file",
"}",
")"
] | Run Accumulate area according to D8 flow direction | [
"Run",
"Accumulate",
"area",
"according",
"to",
"D8",
"flow",
"direction"
] | 9a92d1a229bb74298e3c57f27c97079980b5f729 | https://github.com/lreis2415/PyGeoC/blob/9a92d1a229bb74298e3c57f27c97079980b5f729/pygeoc/TauDEM.py#L413-L428 | train |
lreis2415/PyGeoC | pygeoc/TauDEM.py | TauDEM.areadinf | def areadinf(np, angfile, sca, outlet=None, wg=None, edgecontaimination=False,
workingdir=None, mpiexedir=None, exedir=None,
log_file=None, runtime_file=None, hostfile=None):
"""Run Accumulate area according to Dinf flow direction"""
# -nc means do not consider edge contaimination
if edgecontaimination:
in_params = {'-nc': None}
else:
in_params = None
fname = TauDEM.func_name('areadinf')
return TauDEM.run(FileClass.get_executable_fullpath(fname, exedir),
{'-ang': angfile, '-o': outlet, '-wg': wg}, workingdir,
in_params,
{'-sca': sca},
{'mpipath': mpiexedir, 'hostfile': hostfile, 'n': np},
{'logfile': log_file, 'runtimefile': runtime_file}) | python | def areadinf(np, angfile, sca, outlet=None, wg=None, edgecontaimination=False,
workingdir=None, mpiexedir=None, exedir=None,
log_file=None, runtime_file=None, hostfile=None):
"""Run Accumulate area according to Dinf flow direction"""
# -nc means do not consider edge contaimination
if edgecontaimination:
in_params = {'-nc': None}
else:
in_params = None
fname = TauDEM.func_name('areadinf')
return TauDEM.run(FileClass.get_executable_fullpath(fname, exedir),
{'-ang': angfile, '-o': outlet, '-wg': wg}, workingdir,
in_params,
{'-sca': sca},
{'mpipath': mpiexedir, 'hostfile': hostfile, 'n': np},
{'logfile': log_file, 'runtimefile': runtime_file}) | [
"def",
"areadinf",
"(",
"np",
",",
"angfile",
",",
"sca",
",",
"outlet",
"=",
"None",
",",
"wg",
"=",
"None",
",",
"edgecontaimination",
"=",
"False",
",",
"workingdir",
"=",
"None",
",",
"mpiexedir",
"=",
"None",
",",
"exedir",
"=",
"None",
",",
"log_file",
"=",
"None",
",",
"runtime_file",
"=",
"None",
",",
"hostfile",
"=",
"None",
")",
":",
"# -nc means do not consider edge contaimination",
"if",
"edgecontaimination",
":",
"in_params",
"=",
"{",
"'-nc'",
":",
"None",
"}",
"else",
":",
"in_params",
"=",
"None",
"fname",
"=",
"TauDEM",
".",
"func_name",
"(",
"'areadinf'",
")",
"return",
"TauDEM",
".",
"run",
"(",
"FileClass",
".",
"get_executable_fullpath",
"(",
"fname",
",",
"exedir",
")",
",",
"{",
"'-ang'",
":",
"angfile",
",",
"'-o'",
":",
"outlet",
",",
"'-wg'",
":",
"wg",
"}",
",",
"workingdir",
",",
"in_params",
",",
"{",
"'-sca'",
":",
"sca",
"}",
",",
"{",
"'mpipath'",
":",
"mpiexedir",
",",
"'hostfile'",
":",
"hostfile",
",",
"'n'",
":",
"np",
"}",
",",
"{",
"'logfile'",
":",
"log_file",
",",
"'runtimefile'",
":",
"runtime_file",
"}",
")"
] | Run Accumulate area according to Dinf flow direction | [
"Run",
"Accumulate",
"area",
"according",
"to",
"Dinf",
"flow",
"direction"
] | 9a92d1a229bb74298e3c57f27c97079980b5f729 | https://github.com/lreis2415/PyGeoC/blob/9a92d1a229bb74298e3c57f27c97079980b5f729/pygeoc/TauDEM.py#L431-L446 | train |
lreis2415/PyGeoC | pygeoc/TauDEM.py | TauDEM.connectdown | def connectdown(np, p, acc, outlet, wtsd=None, workingdir=None, mpiexedir=None,
exedir=None, log_file=None, runtime_file=None, hostfile=None):
"""Reads an ad8 contributing area file,
identifies the location of the largest ad8 value as the outlet of the largest watershed"""
# If watershed is not specified, use acc to generate a mask layer.
if wtsd is None or not os.path.isfile(wtsd):
p, workingdir = TauDEM.check_infile_and_wp(p, workingdir)
wtsd = workingdir + os.sep + 'wtsd_default.tif'
RasterUtilClass.get_mask_from_raster(p, wtsd, True)
fname = TauDEM.func_name('connectdown')
return TauDEM.run(FileClass.get_executable_fullpath(fname, exedir),
{'-p': p, '-ad8': acc, '-w': wtsd},
workingdir,
None,
{'-o': outlet},
{'mpipath': mpiexedir, 'hostfile': hostfile, 'n': np},
{'logfile': log_file, 'runtimefile': runtime_file}) | python | def connectdown(np, p, acc, outlet, wtsd=None, workingdir=None, mpiexedir=None,
exedir=None, log_file=None, runtime_file=None, hostfile=None):
"""Reads an ad8 contributing area file,
identifies the location of the largest ad8 value as the outlet of the largest watershed"""
# If watershed is not specified, use acc to generate a mask layer.
if wtsd is None or not os.path.isfile(wtsd):
p, workingdir = TauDEM.check_infile_and_wp(p, workingdir)
wtsd = workingdir + os.sep + 'wtsd_default.tif'
RasterUtilClass.get_mask_from_raster(p, wtsd, True)
fname = TauDEM.func_name('connectdown')
return TauDEM.run(FileClass.get_executable_fullpath(fname, exedir),
{'-p': p, '-ad8': acc, '-w': wtsd},
workingdir,
None,
{'-o': outlet},
{'mpipath': mpiexedir, 'hostfile': hostfile, 'n': np},
{'logfile': log_file, 'runtimefile': runtime_file}) | [
"def",
"connectdown",
"(",
"np",
",",
"p",
",",
"acc",
",",
"outlet",
",",
"wtsd",
"=",
"None",
",",
"workingdir",
"=",
"None",
",",
"mpiexedir",
"=",
"None",
",",
"exedir",
"=",
"None",
",",
"log_file",
"=",
"None",
",",
"runtime_file",
"=",
"None",
",",
"hostfile",
"=",
"None",
")",
":",
"# If watershed is not specified, use acc to generate a mask layer.",
"if",
"wtsd",
"is",
"None",
"or",
"not",
"os",
".",
"path",
".",
"isfile",
"(",
"wtsd",
")",
":",
"p",
",",
"workingdir",
"=",
"TauDEM",
".",
"check_infile_and_wp",
"(",
"p",
",",
"workingdir",
")",
"wtsd",
"=",
"workingdir",
"+",
"os",
".",
"sep",
"+",
"'wtsd_default.tif'",
"RasterUtilClass",
".",
"get_mask_from_raster",
"(",
"p",
",",
"wtsd",
",",
"True",
")",
"fname",
"=",
"TauDEM",
".",
"func_name",
"(",
"'connectdown'",
")",
"return",
"TauDEM",
".",
"run",
"(",
"FileClass",
".",
"get_executable_fullpath",
"(",
"fname",
",",
"exedir",
")",
",",
"{",
"'-p'",
":",
"p",
",",
"'-ad8'",
":",
"acc",
",",
"'-w'",
":",
"wtsd",
"}",
",",
"workingdir",
",",
"None",
",",
"{",
"'-o'",
":",
"outlet",
"}",
",",
"{",
"'mpipath'",
":",
"mpiexedir",
",",
"'hostfile'",
":",
"hostfile",
",",
"'n'",
":",
"np",
"}",
",",
"{",
"'logfile'",
":",
"log_file",
",",
"'runtimefile'",
":",
"runtime_file",
"}",
")"
] | Reads an ad8 contributing area file,
identifies the location of the largest ad8 value as the outlet of the largest watershed | [
"Reads",
"an",
"ad8",
"contributing",
"area",
"file",
"identifies",
"the",
"location",
"of",
"the",
"largest",
"ad8",
"value",
"as",
"the",
"outlet",
"of",
"the",
"largest",
"watershed"
] | 9a92d1a229bb74298e3c57f27c97079980b5f729 | https://github.com/lreis2415/PyGeoC/blob/9a92d1a229bb74298e3c57f27c97079980b5f729/pygeoc/TauDEM.py#L449-L465 | train |
lreis2415/PyGeoC | pygeoc/TauDEM.py | TauDEM.threshold | def threshold(np, acc, stream_raster, threshold=100., workingdir=None,
mpiexedir=None, exedir=None, log_file=None, runtime_file=None, hostfile=None):
"""Run threshold for stream raster"""
fname = TauDEM.func_name('threshold')
return TauDEM.run(FileClass.get_executable_fullpath(fname, exedir),
{'-ssa': acc}, workingdir,
{'-thresh': threshold},
{'-src': stream_raster},
{'mpipath': mpiexedir, 'hostfile': hostfile, 'n': np},
{'logfile': log_file, 'runtimefile': runtime_file}) | python | def threshold(np, acc, stream_raster, threshold=100., workingdir=None,
mpiexedir=None, exedir=None, log_file=None, runtime_file=None, hostfile=None):
"""Run threshold for stream raster"""
fname = TauDEM.func_name('threshold')
return TauDEM.run(FileClass.get_executable_fullpath(fname, exedir),
{'-ssa': acc}, workingdir,
{'-thresh': threshold},
{'-src': stream_raster},
{'mpipath': mpiexedir, 'hostfile': hostfile, 'n': np},
{'logfile': log_file, 'runtimefile': runtime_file}) | [
"def",
"threshold",
"(",
"np",
",",
"acc",
",",
"stream_raster",
",",
"threshold",
"=",
"100.",
",",
"workingdir",
"=",
"None",
",",
"mpiexedir",
"=",
"None",
",",
"exedir",
"=",
"None",
",",
"log_file",
"=",
"None",
",",
"runtime_file",
"=",
"None",
",",
"hostfile",
"=",
"None",
")",
":",
"fname",
"=",
"TauDEM",
".",
"func_name",
"(",
"'threshold'",
")",
"return",
"TauDEM",
".",
"run",
"(",
"FileClass",
".",
"get_executable_fullpath",
"(",
"fname",
",",
"exedir",
")",
",",
"{",
"'-ssa'",
":",
"acc",
"}",
",",
"workingdir",
",",
"{",
"'-thresh'",
":",
"threshold",
"}",
",",
"{",
"'-src'",
":",
"stream_raster",
"}",
",",
"{",
"'mpipath'",
":",
"mpiexedir",
",",
"'hostfile'",
":",
"hostfile",
",",
"'n'",
":",
"np",
"}",
",",
"{",
"'logfile'",
":",
"log_file",
",",
"'runtimefile'",
":",
"runtime_file",
"}",
")"
] | Run threshold for stream raster | [
"Run",
"threshold",
"for",
"stream",
"raster"
] | 9a92d1a229bb74298e3c57f27c97079980b5f729 | https://github.com/lreis2415/PyGeoC/blob/9a92d1a229bb74298e3c57f27c97079980b5f729/pygeoc/TauDEM.py#L480-L489 | train |
lreis2415/PyGeoC | pygeoc/TauDEM.py | TauDEM.moveoutletstostrm | def moveoutletstostrm(np, flowdir, streamRaster, outlet, modifiedOutlet,
workingdir=None, mpiexedir=None,
exedir=None, log_file=None, runtime_file=None, hostfile=None):
"""Run move the given outlets to stream"""
fname = TauDEM.func_name('moveoutletstostrm')
return TauDEM.run(FileClass.get_executable_fullpath(fname, exedir),
{'-p': flowdir, '-src': streamRaster, '-o': outlet},
workingdir,
None,
{'-om': modifiedOutlet},
{'mpipath': mpiexedir, 'hostfile': hostfile, 'n': np},
{'logfile': log_file, 'runtimefile': runtime_file}) | python | def moveoutletstostrm(np, flowdir, streamRaster, outlet, modifiedOutlet,
workingdir=None, mpiexedir=None,
exedir=None, log_file=None, runtime_file=None, hostfile=None):
"""Run move the given outlets to stream"""
fname = TauDEM.func_name('moveoutletstostrm')
return TauDEM.run(FileClass.get_executable_fullpath(fname, exedir),
{'-p': flowdir, '-src': streamRaster, '-o': outlet},
workingdir,
None,
{'-om': modifiedOutlet},
{'mpipath': mpiexedir, 'hostfile': hostfile, 'n': np},
{'logfile': log_file, 'runtimefile': runtime_file}) | [
"def",
"moveoutletstostrm",
"(",
"np",
",",
"flowdir",
",",
"streamRaster",
",",
"outlet",
",",
"modifiedOutlet",
",",
"workingdir",
"=",
"None",
",",
"mpiexedir",
"=",
"None",
",",
"exedir",
"=",
"None",
",",
"log_file",
"=",
"None",
",",
"runtime_file",
"=",
"None",
",",
"hostfile",
"=",
"None",
")",
":",
"fname",
"=",
"TauDEM",
".",
"func_name",
"(",
"'moveoutletstostrm'",
")",
"return",
"TauDEM",
".",
"run",
"(",
"FileClass",
".",
"get_executable_fullpath",
"(",
"fname",
",",
"exedir",
")",
",",
"{",
"'-p'",
":",
"flowdir",
",",
"'-src'",
":",
"streamRaster",
",",
"'-o'",
":",
"outlet",
"}",
",",
"workingdir",
",",
"None",
",",
"{",
"'-om'",
":",
"modifiedOutlet",
"}",
",",
"{",
"'mpipath'",
":",
"mpiexedir",
",",
"'hostfile'",
":",
"hostfile",
",",
"'n'",
":",
"np",
"}",
",",
"{",
"'logfile'",
":",
"log_file",
",",
"'runtimefile'",
":",
"runtime_file",
"}",
")"
] | Run move the given outlets to stream | [
"Run",
"move",
"the",
"given",
"outlets",
"to",
"stream"
] | 9a92d1a229bb74298e3c57f27c97079980b5f729 | https://github.com/lreis2415/PyGeoC/blob/9a92d1a229bb74298e3c57f27c97079980b5f729/pygeoc/TauDEM.py#L507-L518 | train |
lreis2415/PyGeoC | pygeoc/TauDEM.py | TauDEM.convertdistmethod | def convertdistmethod(method_str):
"""Convert distance method to h, v, p, and s."""
if StringClass.string_match(method_str, 'Horizontal'):
return 'h'
elif StringClass.string_match(method_str, 'Vertical'):
return 'v'
elif StringClass.string_match(method_str, 'Pythagoras'):
return 'p'
elif StringClass.string_match(method_str, 'Surface'):
return 's'
elif method_str.lower() in ['h', 'v', 'p', 's']:
return method_str.lower()
else:
return 's' | python | def convertdistmethod(method_str):
"""Convert distance method to h, v, p, and s."""
if StringClass.string_match(method_str, 'Horizontal'):
return 'h'
elif StringClass.string_match(method_str, 'Vertical'):
return 'v'
elif StringClass.string_match(method_str, 'Pythagoras'):
return 'p'
elif StringClass.string_match(method_str, 'Surface'):
return 's'
elif method_str.lower() in ['h', 'v', 'p', 's']:
return method_str.lower()
else:
return 's' | [
"def",
"convertdistmethod",
"(",
"method_str",
")",
":",
"if",
"StringClass",
".",
"string_match",
"(",
"method_str",
",",
"'Horizontal'",
")",
":",
"return",
"'h'",
"elif",
"StringClass",
".",
"string_match",
"(",
"method_str",
",",
"'Vertical'",
")",
":",
"return",
"'v'",
"elif",
"StringClass",
".",
"string_match",
"(",
"method_str",
",",
"'Pythagoras'",
")",
":",
"return",
"'p'",
"elif",
"StringClass",
".",
"string_match",
"(",
"method_str",
",",
"'Surface'",
")",
":",
"return",
"'s'",
"elif",
"method_str",
".",
"lower",
"(",
")",
"in",
"[",
"'h'",
",",
"'v'",
",",
"'p'",
",",
"'s'",
"]",
":",
"return",
"method_str",
".",
"lower",
"(",
")",
"else",
":",
"return",
"'s'"
] | Convert distance method to h, v, p, and s. | [
"Convert",
"distance",
"method",
"to",
"h",
"v",
"p",
"and",
"s",
"."
] | 9a92d1a229bb74298e3c57f27c97079980b5f729 | https://github.com/lreis2415/PyGeoC/blob/9a92d1a229bb74298e3c57f27c97079980b5f729/pygeoc/TauDEM.py#L521-L534 | train |
lreis2415/PyGeoC | pygeoc/TauDEM.py | TauDEM.convertstatsmethod | def convertstatsmethod(method_str):
"""Convert statistics method to ave, min, and max."""
if StringClass.string_match(method_str, 'Average'):
return 'ave'
elif StringClass.string_match(method_str, 'Maximum'):
return 'max'
elif StringClass.string_match(method_str, 'Minimum'):
return 'min'
elif method_str.lower() in ['ave', 'max', 'min']:
return method_str.lower()
else:
return 'ave' | python | def convertstatsmethod(method_str):
"""Convert statistics method to ave, min, and max."""
if StringClass.string_match(method_str, 'Average'):
return 'ave'
elif StringClass.string_match(method_str, 'Maximum'):
return 'max'
elif StringClass.string_match(method_str, 'Minimum'):
return 'min'
elif method_str.lower() in ['ave', 'max', 'min']:
return method_str.lower()
else:
return 'ave' | [
"def",
"convertstatsmethod",
"(",
"method_str",
")",
":",
"if",
"StringClass",
".",
"string_match",
"(",
"method_str",
",",
"'Average'",
")",
":",
"return",
"'ave'",
"elif",
"StringClass",
".",
"string_match",
"(",
"method_str",
",",
"'Maximum'",
")",
":",
"return",
"'max'",
"elif",
"StringClass",
".",
"string_match",
"(",
"method_str",
",",
"'Minimum'",
")",
":",
"return",
"'min'",
"elif",
"method_str",
".",
"lower",
"(",
")",
"in",
"[",
"'ave'",
",",
"'max'",
",",
"'min'",
"]",
":",
"return",
"method_str",
".",
"lower",
"(",
")",
"else",
":",
"return",
"'ave'"
] | Convert statistics method to ave, min, and max. | [
"Convert",
"statistics",
"method",
"to",
"ave",
"min",
"and",
"max",
"."
] | 9a92d1a229bb74298e3c57f27c97079980b5f729 | https://github.com/lreis2415/PyGeoC/blob/9a92d1a229bb74298e3c57f27c97079980b5f729/pygeoc/TauDEM.py#L537-L548 | train |
lreis2415/PyGeoC | pygeoc/TauDEM.py | TauDEM.d8hdisttostrm | def d8hdisttostrm(np, p, src, dist, thresh, workingdir=None,
mpiexedir=None, exedir=None, log_file=None, runtime_file=None, hostfile=None):
"""Run D8 horizontal distance down to stream.
"""
fname = TauDEM.func_name('d8hdisttostrm')
return TauDEM.run(FileClass.get_executable_fullpath(fname, exedir),
{'-p': p, '-src': src},
workingdir,
{'-thresh': thresh},
{'-dist': dist},
{'mpipath': mpiexedir, 'hostfile': hostfile, 'n': np},
{'logfile': log_file, 'runtimefile': runtime_file}) | python | def d8hdisttostrm(np, p, src, dist, thresh, workingdir=None,
mpiexedir=None, exedir=None, log_file=None, runtime_file=None, hostfile=None):
"""Run D8 horizontal distance down to stream.
"""
fname = TauDEM.func_name('d8hdisttostrm')
return TauDEM.run(FileClass.get_executable_fullpath(fname, exedir),
{'-p': p, '-src': src},
workingdir,
{'-thresh': thresh},
{'-dist': dist},
{'mpipath': mpiexedir, 'hostfile': hostfile, 'n': np},
{'logfile': log_file, 'runtimefile': runtime_file}) | [
"def",
"d8hdisttostrm",
"(",
"np",
",",
"p",
",",
"src",
",",
"dist",
",",
"thresh",
",",
"workingdir",
"=",
"None",
",",
"mpiexedir",
"=",
"None",
",",
"exedir",
"=",
"None",
",",
"log_file",
"=",
"None",
",",
"runtime_file",
"=",
"None",
",",
"hostfile",
"=",
"None",
")",
":",
"fname",
"=",
"TauDEM",
".",
"func_name",
"(",
"'d8hdisttostrm'",
")",
"return",
"TauDEM",
".",
"run",
"(",
"FileClass",
".",
"get_executable_fullpath",
"(",
"fname",
",",
"exedir",
")",
",",
"{",
"'-p'",
":",
"p",
",",
"'-src'",
":",
"src",
"}",
",",
"workingdir",
",",
"{",
"'-thresh'",
":",
"thresh",
"}",
",",
"{",
"'-dist'",
":",
"dist",
"}",
",",
"{",
"'mpipath'",
":",
"mpiexedir",
",",
"'hostfile'",
":",
"hostfile",
",",
"'n'",
":",
"np",
"}",
",",
"{",
"'logfile'",
":",
"log_file",
",",
"'runtimefile'",
":",
"runtime_file",
"}",
")"
] | Run D8 horizontal distance down to stream. | [
"Run",
"D8",
"horizontal",
"distance",
"down",
"to",
"stream",
"."
] | 9a92d1a229bb74298e3c57f27c97079980b5f729 | https://github.com/lreis2415/PyGeoC/blob/9a92d1a229bb74298e3c57f27c97079980b5f729/pygeoc/TauDEM.py#L551-L562 | train |
lreis2415/PyGeoC | pygeoc/TauDEM.py | TauDEM.d8distdowntostream | def d8distdowntostream(np, p, fel, src, dist, distancemethod, thresh, workingdir=None,
mpiexedir=None, exedir=None,
log_file=None, runtime_file=None, hostfile=None):
"""Run D8 distance down to stream by different method for distance.
This function is extended from d8hdisttostrm by Liangjun.
Please clone `TauDEM by lreis2415`_ and compile for this program.
.. _TauDEM by lreis2415:
https://github.com/lreis2415/TauDEM
"""
fname = TauDEM.func_name('d8distdowntostream')
return TauDEM.run(FileClass.get_executable_fullpath(fname, exedir),
{'-fel': fel, '-p': p, '-src': src},
workingdir,
{'-thresh': thresh, '-m': TauDEM.convertdistmethod(distancemethod)},
{'-dist': dist},
{'mpipath': mpiexedir, 'hostfile': hostfile, 'n': np},
{'logfile': log_file, 'runtimefile': runtime_file}) | python | def d8distdowntostream(np, p, fel, src, dist, distancemethod, thresh, workingdir=None,
mpiexedir=None, exedir=None,
log_file=None, runtime_file=None, hostfile=None):
"""Run D8 distance down to stream by different method for distance.
This function is extended from d8hdisttostrm by Liangjun.
Please clone `TauDEM by lreis2415`_ and compile for this program.
.. _TauDEM by lreis2415:
https://github.com/lreis2415/TauDEM
"""
fname = TauDEM.func_name('d8distdowntostream')
return TauDEM.run(FileClass.get_executable_fullpath(fname, exedir),
{'-fel': fel, '-p': p, '-src': src},
workingdir,
{'-thresh': thresh, '-m': TauDEM.convertdistmethod(distancemethod)},
{'-dist': dist},
{'mpipath': mpiexedir, 'hostfile': hostfile, 'n': np},
{'logfile': log_file, 'runtimefile': runtime_file}) | [
"def",
"d8distdowntostream",
"(",
"np",
",",
"p",
",",
"fel",
",",
"src",
",",
"dist",
",",
"distancemethod",
",",
"thresh",
",",
"workingdir",
"=",
"None",
",",
"mpiexedir",
"=",
"None",
",",
"exedir",
"=",
"None",
",",
"log_file",
"=",
"None",
",",
"runtime_file",
"=",
"None",
",",
"hostfile",
"=",
"None",
")",
":",
"fname",
"=",
"TauDEM",
".",
"func_name",
"(",
"'d8distdowntostream'",
")",
"return",
"TauDEM",
".",
"run",
"(",
"FileClass",
".",
"get_executable_fullpath",
"(",
"fname",
",",
"exedir",
")",
",",
"{",
"'-fel'",
":",
"fel",
",",
"'-p'",
":",
"p",
",",
"'-src'",
":",
"src",
"}",
",",
"workingdir",
",",
"{",
"'-thresh'",
":",
"thresh",
",",
"'-m'",
":",
"TauDEM",
".",
"convertdistmethod",
"(",
"distancemethod",
")",
"}",
",",
"{",
"'-dist'",
":",
"dist",
"}",
",",
"{",
"'mpipath'",
":",
"mpiexedir",
",",
"'hostfile'",
":",
"hostfile",
",",
"'n'",
":",
"np",
"}",
",",
"{",
"'logfile'",
":",
"log_file",
",",
"'runtimefile'",
":",
"runtime_file",
"}",
")"
] | Run D8 distance down to stream by different method for distance.
This function is extended from d8hdisttostrm by Liangjun.
Please clone `TauDEM by lreis2415`_ and compile for this program.
.. _TauDEM by lreis2415:
https://github.com/lreis2415/TauDEM | [
"Run",
"D8",
"distance",
"down",
"to",
"stream",
"by",
"different",
"method",
"for",
"distance",
".",
"This",
"function",
"is",
"extended",
"from",
"d8hdisttostrm",
"by",
"Liangjun",
"."
] | 9a92d1a229bb74298e3c57f27c97079980b5f729 | https://github.com/lreis2415/PyGeoC/blob/9a92d1a229bb74298e3c57f27c97079980b5f729/pygeoc/TauDEM.py#L565-L583 | train |
lreis2415/PyGeoC | pygeoc/TauDEM.py | TauDEM.dinfdistdown | def dinfdistdown(np, ang, fel, slp, src, statsm, distm, edgecontamination, wg, dist,
workingdir=None, mpiexedir=None, exedir=None,
log_file=None, runtime_file=None, hostfile=None):
"""Run D-inf distance down to stream"""
in_params = {'-m': '%s %s' % (TauDEM.convertstatsmethod(statsm),
TauDEM.convertdistmethod(distm))}
if StringClass.string_match(edgecontamination, 'false') or edgecontamination is False:
in_params['-nc'] = None
fname = TauDEM.func_name('dinfdistdown')
return TauDEM.run(FileClass.get_executable_fullpath(fname, exedir),
{'-fel': fel, '-slp': slp, '-ang': ang, '-src': src, '-wg': wg},
workingdir,
in_params,
{'-dd': dist},
{'mpipath': mpiexedir, 'hostfile': hostfile, 'n': np},
{'logfile': log_file, 'runtimefile': runtime_file}) | python | def dinfdistdown(np, ang, fel, slp, src, statsm, distm, edgecontamination, wg, dist,
workingdir=None, mpiexedir=None, exedir=None,
log_file=None, runtime_file=None, hostfile=None):
"""Run D-inf distance down to stream"""
in_params = {'-m': '%s %s' % (TauDEM.convertstatsmethod(statsm),
TauDEM.convertdistmethod(distm))}
if StringClass.string_match(edgecontamination, 'false') or edgecontamination is False:
in_params['-nc'] = None
fname = TauDEM.func_name('dinfdistdown')
return TauDEM.run(FileClass.get_executable_fullpath(fname, exedir),
{'-fel': fel, '-slp': slp, '-ang': ang, '-src': src, '-wg': wg},
workingdir,
in_params,
{'-dd': dist},
{'mpipath': mpiexedir, 'hostfile': hostfile, 'n': np},
{'logfile': log_file, 'runtimefile': runtime_file}) | [
"def",
"dinfdistdown",
"(",
"np",
",",
"ang",
",",
"fel",
",",
"slp",
",",
"src",
",",
"statsm",
",",
"distm",
",",
"edgecontamination",
",",
"wg",
",",
"dist",
",",
"workingdir",
"=",
"None",
",",
"mpiexedir",
"=",
"None",
",",
"exedir",
"=",
"None",
",",
"log_file",
"=",
"None",
",",
"runtime_file",
"=",
"None",
",",
"hostfile",
"=",
"None",
")",
":",
"in_params",
"=",
"{",
"'-m'",
":",
"'%s %s'",
"%",
"(",
"TauDEM",
".",
"convertstatsmethod",
"(",
"statsm",
")",
",",
"TauDEM",
".",
"convertdistmethod",
"(",
"distm",
")",
")",
"}",
"if",
"StringClass",
".",
"string_match",
"(",
"edgecontamination",
",",
"'false'",
")",
"or",
"edgecontamination",
"is",
"False",
":",
"in_params",
"[",
"'-nc'",
"]",
"=",
"None",
"fname",
"=",
"TauDEM",
".",
"func_name",
"(",
"'dinfdistdown'",
")",
"return",
"TauDEM",
".",
"run",
"(",
"FileClass",
".",
"get_executable_fullpath",
"(",
"fname",
",",
"exedir",
")",
",",
"{",
"'-fel'",
":",
"fel",
",",
"'-slp'",
":",
"slp",
",",
"'-ang'",
":",
"ang",
",",
"'-src'",
":",
"src",
",",
"'-wg'",
":",
"wg",
"}",
",",
"workingdir",
",",
"in_params",
",",
"{",
"'-dd'",
":",
"dist",
"}",
",",
"{",
"'mpipath'",
":",
"mpiexedir",
",",
"'hostfile'",
":",
"hostfile",
",",
"'n'",
":",
"np",
"}",
",",
"{",
"'logfile'",
":",
"log_file",
",",
"'runtimefile'",
":",
"runtime_file",
"}",
")"
] | Run D-inf distance down to stream | [
"Run",
"D",
"-",
"inf",
"distance",
"down",
"to",
"stream"
] | 9a92d1a229bb74298e3c57f27c97079980b5f729 | https://github.com/lreis2415/PyGeoC/blob/9a92d1a229bb74298e3c57f27c97079980b5f729/pygeoc/TauDEM.py#L586-L601 | train |
lreis2415/PyGeoC | pygeoc/TauDEM.py | TauDEM.peukerdouglas | def peukerdouglas(np, fel, streamSkeleton, workingdir=None, mpiexedir=None, exedir=None,
log_file=None, runtime_file=None, hostfile=None):
"""Run peuker-douglas function"""
fname = TauDEM.func_name('peukerdouglas')
return TauDEM.run(FileClass.get_executable_fullpath(fname, exedir),
{'-fel': fel}, workingdir,
None,
{'-ss': streamSkeleton},
{'mpipath': mpiexedir, 'hostfile': hostfile, 'n': np},
{'logfile': log_file, 'runtimefile': runtime_file}) | python | def peukerdouglas(np, fel, streamSkeleton, workingdir=None, mpiexedir=None, exedir=None,
log_file=None, runtime_file=None, hostfile=None):
"""Run peuker-douglas function"""
fname = TauDEM.func_name('peukerdouglas')
return TauDEM.run(FileClass.get_executable_fullpath(fname, exedir),
{'-fel': fel}, workingdir,
None,
{'-ss': streamSkeleton},
{'mpipath': mpiexedir, 'hostfile': hostfile, 'n': np},
{'logfile': log_file, 'runtimefile': runtime_file}) | [
"def",
"peukerdouglas",
"(",
"np",
",",
"fel",
",",
"streamSkeleton",
",",
"workingdir",
"=",
"None",
",",
"mpiexedir",
"=",
"None",
",",
"exedir",
"=",
"None",
",",
"log_file",
"=",
"None",
",",
"runtime_file",
"=",
"None",
",",
"hostfile",
"=",
"None",
")",
":",
"fname",
"=",
"TauDEM",
".",
"func_name",
"(",
"'peukerdouglas'",
")",
"return",
"TauDEM",
".",
"run",
"(",
"FileClass",
".",
"get_executable_fullpath",
"(",
"fname",
",",
"exedir",
")",
",",
"{",
"'-fel'",
":",
"fel",
"}",
",",
"workingdir",
",",
"None",
",",
"{",
"'-ss'",
":",
"streamSkeleton",
"}",
",",
"{",
"'mpipath'",
":",
"mpiexedir",
",",
"'hostfile'",
":",
"hostfile",
",",
"'n'",
":",
"np",
"}",
",",
"{",
"'logfile'",
":",
"log_file",
",",
"'runtimefile'",
":",
"runtime_file",
"}",
")"
] | Run peuker-douglas function | [
"Run",
"peuker",
"-",
"douglas",
"function"
] | 9a92d1a229bb74298e3c57f27c97079980b5f729 | https://github.com/lreis2415/PyGeoC/blob/9a92d1a229bb74298e3c57f27c97079980b5f729/pygeoc/TauDEM.py#L604-L613 | train |
lreis2415/PyGeoC | pygeoc/TauDEM.py | TauDEM.dropanalysis | def dropanalysis(np, fel, p, ad8, ssa, outlet, minthresh, maxthresh, numthresh,
logspace, drp, workingdir=None,
mpiexedir=None, exedir=None, log_file=None, runtime_file=None, hostfile=None):
"""Drop analysis for optimal threshold for extracting stream."""
parstr = '%f %f %f' % (minthresh, maxthresh, numthresh)
if logspace == 'false':
parstr += ' 1'
else:
parstr += ' 0'
fname = TauDEM.func_name('dropanalysis')
return TauDEM.run(FileClass.get_executable_fullpath(fname, exedir),
{'-fel': fel, '-p': p, '-ad8': ad8, '-ssa': ssa, '-o': outlet},
workingdir,
{'-par': parstr},
{'-drp': drp},
{'mpipath': mpiexedir, 'hostfile': hostfile, 'n': np},
{'logfile': log_file, 'runtimefile': runtime_file}) | python | def dropanalysis(np, fel, p, ad8, ssa, outlet, minthresh, maxthresh, numthresh,
logspace, drp, workingdir=None,
mpiexedir=None, exedir=None, log_file=None, runtime_file=None, hostfile=None):
"""Drop analysis for optimal threshold for extracting stream."""
parstr = '%f %f %f' % (minthresh, maxthresh, numthresh)
if logspace == 'false':
parstr += ' 1'
else:
parstr += ' 0'
fname = TauDEM.func_name('dropanalysis')
return TauDEM.run(FileClass.get_executable_fullpath(fname, exedir),
{'-fel': fel, '-p': p, '-ad8': ad8, '-ssa': ssa, '-o': outlet},
workingdir,
{'-par': parstr},
{'-drp': drp},
{'mpipath': mpiexedir, 'hostfile': hostfile, 'n': np},
{'logfile': log_file, 'runtimefile': runtime_file}) | [
"def",
"dropanalysis",
"(",
"np",
",",
"fel",
",",
"p",
",",
"ad8",
",",
"ssa",
",",
"outlet",
",",
"minthresh",
",",
"maxthresh",
",",
"numthresh",
",",
"logspace",
",",
"drp",
",",
"workingdir",
"=",
"None",
",",
"mpiexedir",
"=",
"None",
",",
"exedir",
"=",
"None",
",",
"log_file",
"=",
"None",
",",
"runtime_file",
"=",
"None",
",",
"hostfile",
"=",
"None",
")",
":",
"parstr",
"=",
"'%f %f %f'",
"%",
"(",
"minthresh",
",",
"maxthresh",
",",
"numthresh",
")",
"if",
"logspace",
"==",
"'false'",
":",
"parstr",
"+=",
"' 1'",
"else",
":",
"parstr",
"+=",
"' 0'",
"fname",
"=",
"TauDEM",
".",
"func_name",
"(",
"'dropanalysis'",
")",
"return",
"TauDEM",
".",
"run",
"(",
"FileClass",
".",
"get_executable_fullpath",
"(",
"fname",
",",
"exedir",
")",
",",
"{",
"'-fel'",
":",
"fel",
",",
"'-p'",
":",
"p",
",",
"'-ad8'",
":",
"ad8",
",",
"'-ssa'",
":",
"ssa",
",",
"'-o'",
":",
"outlet",
"}",
",",
"workingdir",
",",
"{",
"'-par'",
":",
"parstr",
"}",
",",
"{",
"'-drp'",
":",
"drp",
"}",
",",
"{",
"'mpipath'",
":",
"mpiexedir",
",",
"'hostfile'",
":",
"hostfile",
",",
"'n'",
":",
"np",
"}",
",",
"{",
"'logfile'",
":",
"log_file",
",",
"'runtimefile'",
":",
"runtime_file",
"}",
")"
] | Drop analysis for optimal threshold for extracting stream. | [
"Drop",
"analysis",
"for",
"optimal",
"threshold",
"for",
"extracting",
"stream",
"."
] | 9a92d1a229bb74298e3c57f27c97079980b5f729 | https://github.com/lreis2415/PyGeoC/blob/9a92d1a229bb74298e3c57f27c97079980b5f729/pygeoc/TauDEM.py#L616-L632 | train |
johnnoone/json-spec | src/jsonspec/reference/bases.py | Registry.resolve | def resolve(self, pointer):
"""Resolve from documents.
:param pointer: foo
:type pointer: DocumentPointer
"""
dp = DocumentPointer(pointer)
obj, fetcher = self.prototype(dp)
for token in dp.pointer:
obj = token.extract(obj, bypass_ref=True)
reference = ref(obj)
if reference:
obj = fetcher.resolve(reference)
return obj | python | def resolve(self, pointer):
"""Resolve from documents.
:param pointer: foo
:type pointer: DocumentPointer
"""
dp = DocumentPointer(pointer)
obj, fetcher = self.prototype(dp)
for token in dp.pointer:
obj = token.extract(obj, bypass_ref=True)
reference = ref(obj)
if reference:
obj = fetcher.resolve(reference)
return obj | [
"def",
"resolve",
"(",
"self",
",",
"pointer",
")",
":",
"dp",
"=",
"DocumentPointer",
"(",
"pointer",
")",
"obj",
",",
"fetcher",
"=",
"self",
".",
"prototype",
"(",
"dp",
")",
"for",
"token",
"in",
"dp",
".",
"pointer",
":",
"obj",
"=",
"token",
".",
"extract",
"(",
"obj",
",",
"bypass_ref",
"=",
"True",
")",
"reference",
"=",
"ref",
"(",
"obj",
")",
"if",
"reference",
":",
"obj",
"=",
"fetcher",
".",
"resolve",
"(",
"reference",
")",
"return",
"obj"
] | Resolve from documents.
:param pointer: foo
:type pointer: DocumentPointer | [
"Resolve",
"from",
"documents",
"."
] | f91981724cea0c366bd42a6670eb07bbe31c0e0c | https://github.com/johnnoone/json-spec/blob/f91981724cea0c366bd42a6670eb07bbe31c0e0c/src/jsonspec/reference/bases.py#L37-L52 | train |
nmdp-bioinformatics/SeqAnn | seqann/align.py | count_diffs | def count_diffs(align, feats, inseq, locus, cutoff,
verbose=False, verbosity=0):
"""
count_diffs - Counts the number of mismatches, gaps, and insertions and then determines if those are within an acceptable range.
:param align: The alignment
:type align: ``List``
:param feats: Dictonary of the features
:type feats: ``dict``
:param locus: The gene locus associated with the sequence.
:type locus: ``str``
:param inseq: The input sequence
:type inseq: ``str``
:param cutoff: The alignment cutoff
:type cutoff: ``float``
:param verbose: Flag for running in verbose mode.
:type verbose: ``bool``
:param verbosity: Numerical value to indicate how verbose the output will be in verbose mode.
:type verbosity: ``int``
:rtype: ``List``
"""
nfeats = len(feats.keys())
mm = 0
insr = 0
dels = 0
gaps = 0
match = 0
lastb = ''
l = len(align[0]) if len(align[0]) > len(align[1]) else len(align[1])
# Counting gaps, mismatches and insertions
for i in range(0, l):
if align[0][i] == "-" or align[1][i] == "-":
if align[0][i] == "-":
insr += 1
if lastb != '-':
gaps += 1
lastb = "-"
if align[1][i] == "-":
dels += 1
if lastb != '-':
gaps += 1
lastb = "-"
else:
lastb = ''
if align[0][i] != align[1][i]:
mm += 1
else:
match += 1
gper = gaps / nfeats
delper = dels / l
iper = insr / l
mmper = mm / l
mper = match / l
mper2 = match / len(inseq)
logger = logging.getLogger("Logger." + __name__)
if verbose and verbosity > 0:
logger.info("Features algined = " + ",".join(list(feats.keys())))
logger.info('{:<22}{:<6d}'.format("Number of feats: ", nfeats))
logger.info('{:<22}{:<6d}{:<1.2f}'.format("Number of gaps: ", gaps, gper))
logger.info('{:<22}{:<6d}{:<1.2f}'.format("Number of deletions: ", dels, delper))
logger.info('{:<22}{:<6d}{:<1.2f}'.format("Number of insertions: ", insr, iper))
logger.info('{:<22}{:<6d}{:<1.2f}'.format("Number of mismatches: ", mm, mmper))
logger.info('{:<22}{:<6d}{:<1.2f}'.format("Number of matches: ", match, mper))
logger.info('{:<22}{:<6d}{:<1.2f}'.format("Number of matches: ", match, mper2))
indel = iper + delper
# ** HARD CODED LOGIC ** #
if len(inseq) > 6000 and mmper < .10 and mper2 > .80:
if verbose:
logger.info("Alignment coverage high enough to complete annotation 11")
return insr, dels
else:
# TODO: These numbers need to be fine tuned
indel_mm = indel + mper2
if (indel > 0.5 or mmper > 0.05) and mper2 < cutoff and indel_mm != 1:
if verbose:
logger.info("Alignment coverage NOT high enough to return annotation")
return Annotation(complete_annotation=False)
else:
if verbose:
logger.info("Alignment coverage high enough to complete annotation")
return insr, dels | python | def count_diffs(align, feats, inseq, locus, cutoff,
verbose=False, verbosity=0):
"""
count_diffs - Counts the number of mismatches, gaps, and insertions and then determines if those are within an acceptable range.
:param align: The alignment
:type align: ``List``
:param feats: Dictonary of the features
:type feats: ``dict``
:param locus: The gene locus associated with the sequence.
:type locus: ``str``
:param inseq: The input sequence
:type inseq: ``str``
:param cutoff: The alignment cutoff
:type cutoff: ``float``
:param verbose: Flag for running in verbose mode.
:type verbose: ``bool``
:param verbosity: Numerical value to indicate how verbose the output will be in verbose mode.
:type verbosity: ``int``
:rtype: ``List``
"""
nfeats = len(feats.keys())
mm = 0
insr = 0
dels = 0
gaps = 0
match = 0
lastb = ''
l = len(align[0]) if len(align[0]) > len(align[1]) else len(align[1])
# Counting gaps, mismatches and insertions
for i in range(0, l):
if align[0][i] == "-" or align[1][i] == "-":
if align[0][i] == "-":
insr += 1
if lastb != '-':
gaps += 1
lastb = "-"
if align[1][i] == "-":
dels += 1
if lastb != '-':
gaps += 1
lastb = "-"
else:
lastb = ''
if align[0][i] != align[1][i]:
mm += 1
else:
match += 1
gper = gaps / nfeats
delper = dels / l
iper = insr / l
mmper = mm / l
mper = match / l
mper2 = match / len(inseq)
logger = logging.getLogger("Logger." + __name__)
if verbose and verbosity > 0:
logger.info("Features algined = " + ",".join(list(feats.keys())))
logger.info('{:<22}{:<6d}'.format("Number of feats: ", nfeats))
logger.info('{:<22}{:<6d}{:<1.2f}'.format("Number of gaps: ", gaps, gper))
logger.info('{:<22}{:<6d}{:<1.2f}'.format("Number of deletions: ", dels, delper))
logger.info('{:<22}{:<6d}{:<1.2f}'.format("Number of insertions: ", insr, iper))
logger.info('{:<22}{:<6d}{:<1.2f}'.format("Number of mismatches: ", mm, mmper))
logger.info('{:<22}{:<6d}{:<1.2f}'.format("Number of matches: ", match, mper))
logger.info('{:<22}{:<6d}{:<1.2f}'.format("Number of matches: ", match, mper2))
indel = iper + delper
# ** HARD CODED LOGIC ** #
if len(inseq) > 6000 and mmper < .10 and mper2 > .80:
if verbose:
logger.info("Alignment coverage high enough to complete annotation 11")
return insr, dels
else:
# TODO: These numbers need to be fine tuned
indel_mm = indel + mper2
if (indel > 0.5 or mmper > 0.05) and mper2 < cutoff and indel_mm != 1:
if verbose:
logger.info("Alignment coverage NOT high enough to return annotation")
return Annotation(complete_annotation=False)
else:
if verbose:
logger.info("Alignment coverage high enough to complete annotation")
return insr, dels | [
"def",
"count_diffs",
"(",
"align",
",",
"feats",
",",
"inseq",
",",
"locus",
",",
"cutoff",
",",
"verbose",
"=",
"False",
",",
"verbosity",
"=",
"0",
")",
":",
"nfeats",
"=",
"len",
"(",
"feats",
".",
"keys",
"(",
")",
")",
"mm",
"=",
"0",
"insr",
"=",
"0",
"dels",
"=",
"0",
"gaps",
"=",
"0",
"match",
"=",
"0",
"lastb",
"=",
"''",
"l",
"=",
"len",
"(",
"align",
"[",
"0",
"]",
")",
"if",
"len",
"(",
"align",
"[",
"0",
"]",
")",
">",
"len",
"(",
"align",
"[",
"1",
"]",
")",
"else",
"len",
"(",
"align",
"[",
"1",
"]",
")",
"# Counting gaps, mismatches and insertions",
"for",
"i",
"in",
"range",
"(",
"0",
",",
"l",
")",
":",
"if",
"align",
"[",
"0",
"]",
"[",
"i",
"]",
"==",
"\"-\"",
"or",
"align",
"[",
"1",
"]",
"[",
"i",
"]",
"==",
"\"-\"",
":",
"if",
"align",
"[",
"0",
"]",
"[",
"i",
"]",
"==",
"\"-\"",
":",
"insr",
"+=",
"1",
"if",
"lastb",
"!=",
"'-'",
":",
"gaps",
"+=",
"1",
"lastb",
"=",
"\"-\"",
"if",
"align",
"[",
"1",
"]",
"[",
"i",
"]",
"==",
"\"-\"",
":",
"dels",
"+=",
"1",
"if",
"lastb",
"!=",
"'-'",
":",
"gaps",
"+=",
"1",
"lastb",
"=",
"\"-\"",
"else",
":",
"lastb",
"=",
"''",
"if",
"align",
"[",
"0",
"]",
"[",
"i",
"]",
"!=",
"align",
"[",
"1",
"]",
"[",
"i",
"]",
":",
"mm",
"+=",
"1",
"else",
":",
"match",
"+=",
"1",
"gper",
"=",
"gaps",
"/",
"nfeats",
"delper",
"=",
"dels",
"/",
"l",
"iper",
"=",
"insr",
"/",
"l",
"mmper",
"=",
"mm",
"/",
"l",
"mper",
"=",
"match",
"/",
"l",
"mper2",
"=",
"match",
"/",
"len",
"(",
"inseq",
")",
"logger",
"=",
"logging",
".",
"getLogger",
"(",
"\"Logger.\"",
"+",
"__name__",
")",
"if",
"verbose",
"and",
"verbosity",
">",
"0",
":",
"logger",
".",
"info",
"(",
"\"Features algined = \"",
"+",
"\",\"",
".",
"join",
"(",
"list",
"(",
"feats",
".",
"keys",
"(",
")",
")",
")",
")",
"logger",
".",
"info",
"(",
"'{:<22}{:<6d}'",
".",
"format",
"(",
"\"Number of feats: \"",
",",
"nfeats",
")",
")",
"logger",
".",
"info",
"(",
"'{:<22}{:<6d}{:<1.2f}'",
".",
"format",
"(",
"\"Number of gaps: \"",
",",
"gaps",
",",
"gper",
")",
")",
"logger",
".",
"info",
"(",
"'{:<22}{:<6d}{:<1.2f}'",
".",
"format",
"(",
"\"Number of deletions: \"",
",",
"dels",
",",
"delper",
")",
")",
"logger",
".",
"info",
"(",
"'{:<22}{:<6d}{:<1.2f}'",
".",
"format",
"(",
"\"Number of insertions: \"",
",",
"insr",
",",
"iper",
")",
")",
"logger",
".",
"info",
"(",
"'{:<22}{:<6d}{:<1.2f}'",
".",
"format",
"(",
"\"Number of mismatches: \"",
",",
"mm",
",",
"mmper",
")",
")",
"logger",
".",
"info",
"(",
"'{:<22}{:<6d}{:<1.2f}'",
".",
"format",
"(",
"\"Number of matches: \"",
",",
"match",
",",
"mper",
")",
")",
"logger",
".",
"info",
"(",
"'{:<22}{:<6d}{:<1.2f}'",
".",
"format",
"(",
"\"Number of matches: \"",
",",
"match",
",",
"mper2",
")",
")",
"indel",
"=",
"iper",
"+",
"delper",
"# ** HARD CODED LOGIC ** #",
"if",
"len",
"(",
"inseq",
")",
">",
"6000",
"and",
"mmper",
"<",
".10",
"and",
"mper2",
">",
".80",
":",
"if",
"verbose",
":",
"logger",
".",
"info",
"(",
"\"Alignment coverage high enough to complete annotation 11\"",
")",
"return",
"insr",
",",
"dels",
"else",
":",
"# TODO: These numbers need to be fine tuned",
"indel_mm",
"=",
"indel",
"+",
"mper2",
"if",
"(",
"indel",
">",
"0.5",
"or",
"mmper",
">",
"0.05",
")",
"and",
"mper2",
"<",
"cutoff",
"and",
"indel_mm",
"!=",
"1",
":",
"if",
"verbose",
":",
"logger",
".",
"info",
"(",
"\"Alignment coverage NOT high enough to return annotation\"",
")",
"return",
"Annotation",
"(",
"complete_annotation",
"=",
"False",
")",
"else",
":",
"if",
"verbose",
":",
"logger",
".",
"info",
"(",
"\"Alignment coverage high enough to complete annotation\"",
")",
"return",
"insr",
",",
"dels"
] | count_diffs - Counts the number of mismatches, gaps, and insertions and then determines if those are within an acceptable range.
:param align: The alignment
:type align: ``List``
:param feats: Dictonary of the features
:type feats: ``dict``
:param locus: The gene locus associated with the sequence.
:type locus: ``str``
:param inseq: The input sequence
:type inseq: ``str``
:param cutoff: The alignment cutoff
:type cutoff: ``float``
:param verbose: Flag for running in verbose mode.
:type verbose: ``bool``
:param verbosity: Numerical value to indicate how verbose the output will be in verbose mode.
:type verbosity: ``int``
:rtype: ``List`` | [
"count_diffs",
"-",
"Counts",
"the",
"number",
"of",
"mismatches",
"gaps",
"and",
"insertions",
"and",
"then",
"determines",
"if",
"those",
"are",
"within",
"an",
"acceptable",
"range",
"."
] | 5ce91559b0a4fbe4fb7758e034eb258202632463 | https://github.com/nmdp-bioinformatics/SeqAnn/blob/5ce91559b0a4fbe4fb7758e034eb258202632463/seqann/align.py#L394-L480 | train |
ployground/ploy | ploy/__init__.py | Controller.cmd_stop | def cmd_stop(self, argv, help):
"""Stops the instance"""
parser = argparse.ArgumentParser(
prog="%s stop" % self.progname,
description=help,
)
instances = self.get_instances(command='stop')
parser.add_argument("instance", nargs=1,
metavar="instance",
help="Name of the instance from the config.",
choices=sorted(instances))
args = parser.parse_args(argv)
instance = instances[args.instance[0]]
instance.stop() | python | def cmd_stop(self, argv, help):
"""Stops the instance"""
parser = argparse.ArgumentParser(
prog="%s stop" % self.progname,
description=help,
)
instances = self.get_instances(command='stop')
parser.add_argument("instance", nargs=1,
metavar="instance",
help="Name of the instance from the config.",
choices=sorted(instances))
args = parser.parse_args(argv)
instance = instances[args.instance[0]]
instance.stop() | [
"def",
"cmd_stop",
"(",
"self",
",",
"argv",
",",
"help",
")",
":",
"parser",
"=",
"argparse",
".",
"ArgumentParser",
"(",
"prog",
"=",
"\"%s stop\"",
"%",
"self",
".",
"progname",
",",
"description",
"=",
"help",
",",
")",
"instances",
"=",
"self",
".",
"get_instances",
"(",
"command",
"=",
"'stop'",
")",
"parser",
".",
"add_argument",
"(",
"\"instance\"",
",",
"nargs",
"=",
"1",
",",
"metavar",
"=",
"\"instance\"",
",",
"help",
"=",
"\"Name of the instance from the config.\"",
",",
"choices",
"=",
"sorted",
"(",
"instances",
")",
")",
"args",
"=",
"parser",
".",
"parse_args",
"(",
"argv",
")",
"instance",
"=",
"instances",
"[",
"args",
".",
"instance",
"[",
"0",
"]",
"]",
"instance",
".",
"stop",
"(",
")"
] | Stops the instance | [
"Stops",
"the",
"instance"
] | 9295b5597c09c434f170afbfd245d73f09affc39 | https://github.com/ployground/ploy/blob/9295b5597c09c434f170afbfd245d73f09affc39/ploy/__init__.py#L219-L232 | train |
ployground/ploy | ploy/__init__.py | Controller.cmd_terminate | def cmd_terminate(self, argv, help):
"""Terminates the instance"""
from ploy.common import yesno
parser = argparse.ArgumentParser(
prog="%s terminate" % self.progname,
description=help,
)
instances = self.get_instances(command='terminate')
parser.add_argument("instance", nargs=1,
metavar="instance",
help="Name of the instance from the config.",
choices=sorted(instances))
args = parser.parse_args(argv)
instance = instances[args.instance[0]]
if not yesno("Are you sure you want to terminate '%s'?" % instance.config_id):
return
instance.hooks.before_terminate(instance)
instance.terminate()
instance.hooks.after_terminate(instance) | python | def cmd_terminate(self, argv, help):
"""Terminates the instance"""
from ploy.common import yesno
parser = argparse.ArgumentParser(
prog="%s terminate" % self.progname,
description=help,
)
instances = self.get_instances(command='terminate')
parser.add_argument("instance", nargs=1,
metavar="instance",
help="Name of the instance from the config.",
choices=sorted(instances))
args = parser.parse_args(argv)
instance = instances[args.instance[0]]
if not yesno("Are you sure you want to terminate '%s'?" % instance.config_id):
return
instance.hooks.before_terminate(instance)
instance.terminate()
instance.hooks.after_terminate(instance) | [
"def",
"cmd_terminate",
"(",
"self",
",",
"argv",
",",
"help",
")",
":",
"from",
"ploy",
".",
"common",
"import",
"yesno",
"parser",
"=",
"argparse",
".",
"ArgumentParser",
"(",
"prog",
"=",
"\"%s terminate\"",
"%",
"self",
".",
"progname",
",",
"description",
"=",
"help",
",",
")",
"instances",
"=",
"self",
".",
"get_instances",
"(",
"command",
"=",
"'terminate'",
")",
"parser",
".",
"add_argument",
"(",
"\"instance\"",
",",
"nargs",
"=",
"1",
",",
"metavar",
"=",
"\"instance\"",
",",
"help",
"=",
"\"Name of the instance from the config.\"",
",",
"choices",
"=",
"sorted",
"(",
"instances",
")",
")",
"args",
"=",
"parser",
".",
"parse_args",
"(",
"argv",
")",
"instance",
"=",
"instances",
"[",
"args",
".",
"instance",
"[",
"0",
"]",
"]",
"if",
"not",
"yesno",
"(",
"\"Are you sure you want to terminate '%s'?\"",
"%",
"instance",
".",
"config_id",
")",
":",
"return",
"instance",
".",
"hooks",
".",
"before_terminate",
"(",
"instance",
")",
"instance",
".",
"terminate",
"(",
")",
"instance",
".",
"hooks",
".",
"after_terminate",
"(",
"instance",
")"
] | Terminates the instance | [
"Terminates",
"the",
"instance"
] | 9295b5597c09c434f170afbfd245d73f09affc39 | https://github.com/ployground/ploy/blob/9295b5597c09c434f170afbfd245d73f09affc39/ploy/__init__.py#L234-L252 | train |
ployground/ploy | ploy/__init__.py | Controller.cmd_start | def cmd_start(self, argv, help):
"""Starts the instance"""
parser = argparse.ArgumentParser(
prog="%s start" % self.progname,
description=help,
)
instances = self.get_instances(command='start')
parser.add_argument("instance", nargs=1,
metavar="instance",
help="Name of the instance from the config.",
choices=sorted(instances))
parser.add_argument("-o", "--override", nargs="*", type=str,
dest="overrides", metavar="OVERRIDE",
help="Option to override in instance config for startup script (name=value).")
args = parser.parse_args(argv)
overrides = self._parse_overrides(args)
overrides['instances'] = self.instances
instance = instances[args.instance[0]]
instance.hooks.before_start(instance)
result = instance.start(overrides)
instance.hooks.after_start(instance)
if result is None:
return
instance.status() | python | def cmd_start(self, argv, help):
"""Starts the instance"""
parser = argparse.ArgumentParser(
prog="%s start" % self.progname,
description=help,
)
instances = self.get_instances(command='start')
parser.add_argument("instance", nargs=1,
metavar="instance",
help="Name of the instance from the config.",
choices=sorted(instances))
parser.add_argument("-o", "--override", nargs="*", type=str,
dest="overrides", metavar="OVERRIDE",
help="Option to override in instance config for startup script (name=value).")
args = parser.parse_args(argv)
overrides = self._parse_overrides(args)
overrides['instances'] = self.instances
instance = instances[args.instance[0]]
instance.hooks.before_start(instance)
result = instance.start(overrides)
instance.hooks.after_start(instance)
if result is None:
return
instance.status() | [
"def",
"cmd_start",
"(",
"self",
",",
"argv",
",",
"help",
")",
":",
"parser",
"=",
"argparse",
".",
"ArgumentParser",
"(",
"prog",
"=",
"\"%s start\"",
"%",
"self",
".",
"progname",
",",
"description",
"=",
"help",
",",
")",
"instances",
"=",
"self",
".",
"get_instances",
"(",
"command",
"=",
"'start'",
")",
"parser",
".",
"add_argument",
"(",
"\"instance\"",
",",
"nargs",
"=",
"1",
",",
"metavar",
"=",
"\"instance\"",
",",
"help",
"=",
"\"Name of the instance from the config.\"",
",",
"choices",
"=",
"sorted",
"(",
"instances",
")",
")",
"parser",
".",
"add_argument",
"(",
"\"-o\"",
",",
"\"--override\"",
",",
"nargs",
"=",
"\"*\"",
",",
"type",
"=",
"str",
",",
"dest",
"=",
"\"overrides\"",
",",
"metavar",
"=",
"\"OVERRIDE\"",
",",
"help",
"=",
"\"Option to override in instance config for startup script (name=value).\"",
")",
"args",
"=",
"parser",
".",
"parse_args",
"(",
"argv",
")",
"overrides",
"=",
"self",
".",
"_parse_overrides",
"(",
"args",
")",
"overrides",
"[",
"'instances'",
"]",
"=",
"self",
".",
"instances",
"instance",
"=",
"instances",
"[",
"args",
".",
"instance",
"[",
"0",
"]",
"]",
"instance",
".",
"hooks",
".",
"before_start",
"(",
"instance",
")",
"result",
"=",
"instance",
".",
"start",
"(",
"overrides",
")",
"instance",
".",
"hooks",
".",
"after_start",
"(",
"instance",
")",
"if",
"result",
"is",
"None",
":",
"return",
"instance",
".",
"status",
"(",
")"
] | Starts the instance | [
"Starts",
"the",
"instance"
] | 9295b5597c09c434f170afbfd245d73f09affc39 | https://github.com/ployground/ploy/blob/9295b5597c09c434f170afbfd245d73f09affc39/ploy/__init__.py#L270-L293 | train |
ployground/ploy | ploy/__init__.py | Controller.cmd_annotate | def cmd_annotate(self, argv, help):
"""Prints annotated config"""
parser = argparse.ArgumentParser(
prog="%s annotate" % self.progname,
description=help,
)
parser.parse_args(argv)
list(self.instances.values()) # trigger instance augmentation
for global_section in sorted(self.config):
for sectionname in sorted(self.config[global_section]):
print("[%s:%s]" % (global_section, sectionname))
section = self.config[global_section][sectionname]
for option, value in sorted(section._dict.items()):
print("%s = %s" % (option, value.value))
print(" %s" % value.src)
print() | python | def cmd_annotate(self, argv, help):
"""Prints annotated config"""
parser = argparse.ArgumentParser(
prog="%s annotate" % self.progname,
description=help,
)
parser.parse_args(argv)
list(self.instances.values()) # trigger instance augmentation
for global_section in sorted(self.config):
for sectionname in sorted(self.config[global_section]):
print("[%s:%s]" % (global_section, sectionname))
section = self.config[global_section][sectionname]
for option, value in sorted(section._dict.items()):
print("%s = %s" % (option, value.value))
print(" %s" % value.src)
print() | [
"def",
"cmd_annotate",
"(",
"self",
",",
"argv",
",",
"help",
")",
":",
"parser",
"=",
"argparse",
".",
"ArgumentParser",
"(",
"prog",
"=",
"\"%s annotate\"",
"%",
"self",
".",
"progname",
",",
"description",
"=",
"help",
",",
")",
"parser",
".",
"parse_args",
"(",
"argv",
")",
"list",
"(",
"self",
".",
"instances",
".",
"values",
"(",
")",
")",
"# trigger instance augmentation",
"for",
"global_section",
"in",
"sorted",
"(",
"self",
".",
"config",
")",
":",
"for",
"sectionname",
"in",
"sorted",
"(",
"self",
".",
"config",
"[",
"global_section",
"]",
")",
":",
"print",
"(",
"\"[%s:%s]\"",
"%",
"(",
"global_section",
",",
"sectionname",
")",
")",
"section",
"=",
"self",
".",
"config",
"[",
"global_section",
"]",
"[",
"sectionname",
"]",
"for",
"option",
",",
"value",
"in",
"sorted",
"(",
"section",
".",
"_dict",
".",
"items",
"(",
")",
")",
":",
"print",
"(",
"\"%s = %s\"",
"%",
"(",
"option",
",",
"value",
".",
"value",
")",
")",
"print",
"(",
"\" %s\"",
"%",
"value",
".",
"src",
")",
"print",
"(",
")"
] | Prints annotated config | [
"Prints",
"annotated",
"config"
] | 9295b5597c09c434f170afbfd245d73f09affc39 | https://github.com/ployground/ploy/blob/9295b5597c09c434f170afbfd245d73f09affc39/ploy/__init__.py#L295-L310 | train |
ployground/ploy | ploy/__init__.py | Controller.cmd_debug | def cmd_debug(self, argv, help):
"""Prints some debug info for this script"""
parser = argparse.ArgumentParser(
prog="%s debug" % self.progname,
description=help,
)
instances = self.instances
parser.add_argument("instance", nargs=1,
metavar="instance",
help="Name of the instance from the config.",
choices=sorted(instances))
parser.add_argument("-v", "--verbose", dest="verbose",
action="store_true", help="Print more info and output the startup script")
parser.add_argument("-c", "--console-output", dest="console_output",
action="store_true", help="Prints the console output of the instance if available")
parser.add_argument("-i", "--interactive", dest="interactive",
action="store_true", help="Creates a connection and drops you into an interactive Python session")
parser.add_argument("-r", "--raw", dest="raw",
action="store_true", help="Outputs the raw possibly compressed startup script")
parser.add_argument("-o", "--override", nargs="*", type=str,
dest="overrides", metavar="OVERRIDE",
help="Option to override instance config for startup script (name=value).")
args = parser.parse_args(argv)
overrides = self._parse_overrides(args)
overrides['instances'] = self.instances
instance = instances[args.instance[0]]
if hasattr(instance, 'startup_script'):
startup_script = instance.startup_script(overrides=overrides, debug=True)
max_size = getattr(instance, 'max_startup_script_size', 16 * 1024)
log.info("Length of startup script: %s/%s", len(startup_script['raw']), max_size)
if args.verbose:
if 'startup_script' in instance.config:
if startup_script['original'] == startup_script['raw']:
log.info("Startup script:")
elif args.raw:
log.info("Compressed startup script:")
else:
log.info("Uncompressed startup script:")
else:
log.info("No startup script specified")
if args.raw:
print(startup_script['raw'], end='')
elif args.verbose:
print(startup_script['original'], end='')
if args.console_output:
if hasattr(instance, 'get_console_output'):
print(instance.get_console_output())
else:
log.error("The instance doesn't support console output.")
if args.interactive: # pragma: no cover
import readline
from pprint import pprint
local = dict(
ctrl=self,
instances=self.instances,
instance=instance,
pprint=pprint)
readline.parse_and_bind('tab: complete')
try:
import rlcompleter
readline.set_completer(rlcompleter.Completer(local).complete)
except ImportError:
pass
__import__("code").interact(local=local) | python | def cmd_debug(self, argv, help):
"""Prints some debug info for this script"""
parser = argparse.ArgumentParser(
prog="%s debug" % self.progname,
description=help,
)
instances = self.instances
parser.add_argument("instance", nargs=1,
metavar="instance",
help="Name of the instance from the config.",
choices=sorted(instances))
parser.add_argument("-v", "--verbose", dest="verbose",
action="store_true", help="Print more info and output the startup script")
parser.add_argument("-c", "--console-output", dest="console_output",
action="store_true", help="Prints the console output of the instance if available")
parser.add_argument("-i", "--interactive", dest="interactive",
action="store_true", help="Creates a connection and drops you into an interactive Python session")
parser.add_argument("-r", "--raw", dest="raw",
action="store_true", help="Outputs the raw possibly compressed startup script")
parser.add_argument("-o", "--override", nargs="*", type=str,
dest="overrides", metavar="OVERRIDE",
help="Option to override instance config for startup script (name=value).")
args = parser.parse_args(argv)
overrides = self._parse_overrides(args)
overrides['instances'] = self.instances
instance = instances[args.instance[0]]
if hasattr(instance, 'startup_script'):
startup_script = instance.startup_script(overrides=overrides, debug=True)
max_size = getattr(instance, 'max_startup_script_size', 16 * 1024)
log.info("Length of startup script: %s/%s", len(startup_script['raw']), max_size)
if args.verbose:
if 'startup_script' in instance.config:
if startup_script['original'] == startup_script['raw']:
log.info("Startup script:")
elif args.raw:
log.info("Compressed startup script:")
else:
log.info("Uncompressed startup script:")
else:
log.info("No startup script specified")
if args.raw:
print(startup_script['raw'], end='')
elif args.verbose:
print(startup_script['original'], end='')
if args.console_output:
if hasattr(instance, 'get_console_output'):
print(instance.get_console_output())
else:
log.error("The instance doesn't support console output.")
if args.interactive: # pragma: no cover
import readline
from pprint import pprint
local = dict(
ctrl=self,
instances=self.instances,
instance=instance,
pprint=pprint)
readline.parse_and_bind('tab: complete')
try:
import rlcompleter
readline.set_completer(rlcompleter.Completer(local).complete)
except ImportError:
pass
__import__("code").interact(local=local) | [
"def",
"cmd_debug",
"(",
"self",
",",
"argv",
",",
"help",
")",
":",
"parser",
"=",
"argparse",
".",
"ArgumentParser",
"(",
"prog",
"=",
"\"%s debug\"",
"%",
"self",
".",
"progname",
",",
"description",
"=",
"help",
",",
")",
"instances",
"=",
"self",
".",
"instances",
"parser",
".",
"add_argument",
"(",
"\"instance\"",
",",
"nargs",
"=",
"1",
",",
"metavar",
"=",
"\"instance\"",
",",
"help",
"=",
"\"Name of the instance from the config.\"",
",",
"choices",
"=",
"sorted",
"(",
"instances",
")",
")",
"parser",
".",
"add_argument",
"(",
"\"-v\"",
",",
"\"--verbose\"",
",",
"dest",
"=",
"\"verbose\"",
",",
"action",
"=",
"\"store_true\"",
",",
"help",
"=",
"\"Print more info and output the startup script\"",
")",
"parser",
".",
"add_argument",
"(",
"\"-c\"",
",",
"\"--console-output\"",
",",
"dest",
"=",
"\"console_output\"",
",",
"action",
"=",
"\"store_true\"",
",",
"help",
"=",
"\"Prints the console output of the instance if available\"",
")",
"parser",
".",
"add_argument",
"(",
"\"-i\"",
",",
"\"--interactive\"",
",",
"dest",
"=",
"\"interactive\"",
",",
"action",
"=",
"\"store_true\"",
",",
"help",
"=",
"\"Creates a connection and drops you into an interactive Python session\"",
")",
"parser",
".",
"add_argument",
"(",
"\"-r\"",
",",
"\"--raw\"",
",",
"dest",
"=",
"\"raw\"",
",",
"action",
"=",
"\"store_true\"",
",",
"help",
"=",
"\"Outputs the raw possibly compressed startup script\"",
")",
"parser",
".",
"add_argument",
"(",
"\"-o\"",
",",
"\"--override\"",
",",
"nargs",
"=",
"\"*\"",
",",
"type",
"=",
"str",
",",
"dest",
"=",
"\"overrides\"",
",",
"metavar",
"=",
"\"OVERRIDE\"",
",",
"help",
"=",
"\"Option to override instance config for startup script (name=value).\"",
")",
"args",
"=",
"parser",
".",
"parse_args",
"(",
"argv",
")",
"overrides",
"=",
"self",
".",
"_parse_overrides",
"(",
"args",
")",
"overrides",
"[",
"'instances'",
"]",
"=",
"self",
".",
"instances",
"instance",
"=",
"instances",
"[",
"args",
".",
"instance",
"[",
"0",
"]",
"]",
"if",
"hasattr",
"(",
"instance",
",",
"'startup_script'",
")",
":",
"startup_script",
"=",
"instance",
".",
"startup_script",
"(",
"overrides",
"=",
"overrides",
",",
"debug",
"=",
"True",
")",
"max_size",
"=",
"getattr",
"(",
"instance",
",",
"'max_startup_script_size'",
",",
"16",
"*",
"1024",
")",
"log",
".",
"info",
"(",
"\"Length of startup script: %s/%s\"",
",",
"len",
"(",
"startup_script",
"[",
"'raw'",
"]",
")",
",",
"max_size",
")",
"if",
"args",
".",
"verbose",
":",
"if",
"'startup_script'",
"in",
"instance",
".",
"config",
":",
"if",
"startup_script",
"[",
"'original'",
"]",
"==",
"startup_script",
"[",
"'raw'",
"]",
":",
"log",
".",
"info",
"(",
"\"Startup script:\"",
")",
"elif",
"args",
".",
"raw",
":",
"log",
".",
"info",
"(",
"\"Compressed startup script:\"",
")",
"else",
":",
"log",
".",
"info",
"(",
"\"Uncompressed startup script:\"",
")",
"else",
":",
"log",
".",
"info",
"(",
"\"No startup script specified\"",
")",
"if",
"args",
".",
"raw",
":",
"print",
"(",
"startup_script",
"[",
"'raw'",
"]",
",",
"end",
"=",
"''",
")",
"elif",
"args",
".",
"verbose",
":",
"print",
"(",
"startup_script",
"[",
"'original'",
"]",
",",
"end",
"=",
"''",
")",
"if",
"args",
".",
"console_output",
":",
"if",
"hasattr",
"(",
"instance",
",",
"'get_console_output'",
")",
":",
"print",
"(",
"instance",
".",
"get_console_output",
"(",
")",
")",
"else",
":",
"log",
".",
"error",
"(",
"\"The instance doesn't support console output.\"",
")",
"if",
"args",
".",
"interactive",
":",
"# pragma: no cover",
"import",
"readline",
"from",
"pprint",
"import",
"pprint",
"local",
"=",
"dict",
"(",
"ctrl",
"=",
"self",
",",
"instances",
"=",
"self",
".",
"instances",
",",
"instance",
"=",
"instance",
",",
"pprint",
"=",
"pprint",
")",
"readline",
".",
"parse_and_bind",
"(",
"'tab: complete'",
")",
"try",
":",
"import",
"rlcompleter",
"readline",
".",
"set_completer",
"(",
"rlcompleter",
".",
"Completer",
"(",
"local",
")",
".",
"complete",
")",
"except",
"ImportError",
":",
"pass",
"__import__",
"(",
"\"code\"",
")",
".",
"interact",
"(",
"local",
"=",
"local",
")"
] | Prints some debug info for this script | [
"Prints",
"some",
"debug",
"info",
"for",
"this",
"script"
] | 9295b5597c09c434f170afbfd245d73f09affc39 | https://github.com/ployground/ploy/blob/9295b5597c09c434f170afbfd245d73f09affc39/ploy/__init__.py#L312-L375 | train |
ployground/ploy | ploy/__init__.py | Controller.cmd_list | def cmd_list(self, argv, help):
"""Return a list of various things"""
parser = argparse.ArgumentParser(
prog="%s list" % self.progname,
description=help,
)
parser.add_argument("list", nargs=1,
metavar="listname",
help="Name of list to show.",
choices=sorted(self.list_cmds))
parser.add_argument("listopts",
metavar="...",
nargs=argparse.REMAINDER,
help="list command options")
args = parser.parse_args(argv)
for name, func in sorted(self.list_cmds[args.list[0]]):
func(args.listopts, func.__doc__) | python | def cmd_list(self, argv, help):
"""Return a list of various things"""
parser = argparse.ArgumentParser(
prog="%s list" % self.progname,
description=help,
)
parser.add_argument("list", nargs=1,
metavar="listname",
help="Name of list to show.",
choices=sorted(self.list_cmds))
parser.add_argument("listopts",
metavar="...",
nargs=argparse.REMAINDER,
help="list command options")
args = parser.parse_args(argv)
for name, func in sorted(self.list_cmds[args.list[0]]):
func(args.listopts, func.__doc__) | [
"def",
"cmd_list",
"(",
"self",
",",
"argv",
",",
"help",
")",
":",
"parser",
"=",
"argparse",
".",
"ArgumentParser",
"(",
"prog",
"=",
"\"%s list\"",
"%",
"self",
".",
"progname",
",",
"description",
"=",
"help",
",",
")",
"parser",
".",
"add_argument",
"(",
"\"list\"",
",",
"nargs",
"=",
"1",
",",
"metavar",
"=",
"\"listname\"",
",",
"help",
"=",
"\"Name of list to show.\"",
",",
"choices",
"=",
"sorted",
"(",
"self",
".",
"list_cmds",
")",
")",
"parser",
".",
"add_argument",
"(",
"\"listopts\"",
",",
"metavar",
"=",
"\"...\"",
",",
"nargs",
"=",
"argparse",
".",
"REMAINDER",
",",
"help",
"=",
"\"list command options\"",
")",
"args",
"=",
"parser",
".",
"parse_args",
"(",
"argv",
")",
"for",
"name",
",",
"func",
"in",
"sorted",
"(",
"self",
".",
"list_cmds",
"[",
"args",
".",
"list",
"[",
"0",
"]",
"]",
")",
":",
"func",
"(",
"args",
".",
"listopts",
",",
"func",
".",
"__doc__",
")"
] | Return a list of various things | [
"Return",
"a",
"list",
"of",
"various",
"things"
] | 9295b5597c09c434f170afbfd245d73f09affc39 | https://github.com/ployground/ploy/blob/9295b5597c09c434f170afbfd245d73f09affc39/ploy/__init__.py#L377-L393 | train |
ployground/ploy | ploy/__init__.py | Controller.cmd_ssh | def cmd_ssh(self, argv, help):
"""Log into the instance with ssh using the automatically generated known hosts"""
parser = argparse.ArgumentParser(
prog="%s ssh" % self.progname,
description=help,
)
instances = self.get_instances(command='init_ssh_key')
parser.add_argument("instance", nargs=1,
metavar="instance",
help="Name of the instance from the config.",
choices=sorted(instances))
parser.add_argument("...", nargs=argparse.REMAINDER,
help="ssh options")
iargs = enumerate(argv)
sid_index = None
user = None
for i, arg in iargs:
if not arg.startswith('-'):
sid_index = i
break
if arg[1] in '1246AaCfgKkMNnqsTtVvXxYy':
continue
elif arg[1] in 'bcDeFiLlmOopRSw':
value = iargs.next()
if arg[1] == 'l':
user = value[1]
continue
# fake parsing for nice error messages
if sid_index is None:
parser.parse_args([])
else:
sid = argv[sid_index]
if '@' in sid:
user, sid = sid.split('@', 1)
parser.parse_args([sid])
instance = instances[sid]
if user is None:
user = instance.config.get('user')
try:
ssh_info = instance.init_ssh_key(user=user)
except (instance.paramiko.SSHException, socket.error) as e:
log.error("Couldn't validate fingerprint for ssh connection.")
log.error(unicode(e))
log.error("Is the instance finished starting up?")
sys.exit(1)
client = ssh_info['client']
client.get_transport().sock.close()
client.close()
argv[sid_index:sid_index + 1] = instance.ssh_args_from_info(ssh_info)
argv[0:0] = ['ssh']
os.execvp('ssh', argv) | python | def cmd_ssh(self, argv, help):
"""Log into the instance with ssh using the automatically generated known hosts"""
parser = argparse.ArgumentParser(
prog="%s ssh" % self.progname,
description=help,
)
instances = self.get_instances(command='init_ssh_key')
parser.add_argument("instance", nargs=1,
metavar="instance",
help="Name of the instance from the config.",
choices=sorted(instances))
parser.add_argument("...", nargs=argparse.REMAINDER,
help="ssh options")
iargs = enumerate(argv)
sid_index = None
user = None
for i, arg in iargs:
if not arg.startswith('-'):
sid_index = i
break
if arg[1] in '1246AaCfgKkMNnqsTtVvXxYy':
continue
elif arg[1] in 'bcDeFiLlmOopRSw':
value = iargs.next()
if arg[1] == 'l':
user = value[1]
continue
# fake parsing for nice error messages
if sid_index is None:
parser.parse_args([])
else:
sid = argv[sid_index]
if '@' in sid:
user, sid = sid.split('@', 1)
parser.parse_args([sid])
instance = instances[sid]
if user is None:
user = instance.config.get('user')
try:
ssh_info = instance.init_ssh_key(user=user)
except (instance.paramiko.SSHException, socket.error) as e:
log.error("Couldn't validate fingerprint for ssh connection.")
log.error(unicode(e))
log.error("Is the instance finished starting up?")
sys.exit(1)
client = ssh_info['client']
client.get_transport().sock.close()
client.close()
argv[sid_index:sid_index + 1] = instance.ssh_args_from_info(ssh_info)
argv[0:0] = ['ssh']
os.execvp('ssh', argv) | [
"def",
"cmd_ssh",
"(",
"self",
",",
"argv",
",",
"help",
")",
":",
"parser",
"=",
"argparse",
".",
"ArgumentParser",
"(",
"prog",
"=",
"\"%s ssh\"",
"%",
"self",
".",
"progname",
",",
"description",
"=",
"help",
",",
")",
"instances",
"=",
"self",
".",
"get_instances",
"(",
"command",
"=",
"'init_ssh_key'",
")",
"parser",
".",
"add_argument",
"(",
"\"instance\"",
",",
"nargs",
"=",
"1",
",",
"metavar",
"=",
"\"instance\"",
",",
"help",
"=",
"\"Name of the instance from the config.\"",
",",
"choices",
"=",
"sorted",
"(",
"instances",
")",
")",
"parser",
".",
"add_argument",
"(",
"\"...\"",
",",
"nargs",
"=",
"argparse",
".",
"REMAINDER",
",",
"help",
"=",
"\"ssh options\"",
")",
"iargs",
"=",
"enumerate",
"(",
"argv",
")",
"sid_index",
"=",
"None",
"user",
"=",
"None",
"for",
"i",
",",
"arg",
"in",
"iargs",
":",
"if",
"not",
"arg",
".",
"startswith",
"(",
"'-'",
")",
":",
"sid_index",
"=",
"i",
"break",
"if",
"arg",
"[",
"1",
"]",
"in",
"'1246AaCfgKkMNnqsTtVvXxYy'",
":",
"continue",
"elif",
"arg",
"[",
"1",
"]",
"in",
"'bcDeFiLlmOopRSw'",
":",
"value",
"=",
"iargs",
".",
"next",
"(",
")",
"if",
"arg",
"[",
"1",
"]",
"==",
"'l'",
":",
"user",
"=",
"value",
"[",
"1",
"]",
"continue",
"# fake parsing for nice error messages",
"if",
"sid_index",
"is",
"None",
":",
"parser",
".",
"parse_args",
"(",
"[",
"]",
")",
"else",
":",
"sid",
"=",
"argv",
"[",
"sid_index",
"]",
"if",
"'@'",
"in",
"sid",
":",
"user",
",",
"sid",
"=",
"sid",
".",
"split",
"(",
"'@'",
",",
"1",
")",
"parser",
".",
"parse_args",
"(",
"[",
"sid",
"]",
")",
"instance",
"=",
"instances",
"[",
"sid",
"]",
"if",
"user",
"is",
"None",
":",
"user",
"=",
"instance",
".",
"config",
".",
"get",
"(",
"'user'",
")",
"try",
":",
"ssh_info",
"=",
"instance",
".",
"init_ssh_key",
"(",
"user",
"=",
"user",
")",
"except",
"(",
"instance",
".",
"paramiko",
".",
"SSHException",
",",
"socket",
".",
"error",
")",
"as",
"e",
":",
"log",
".",
"error",
"(",
"\"Couldn't validate fingerprint for ssh connection.\"",
")",
"log",
".",
"error",
"(",
"unicode",
"(",
"e",
")",
")",
"log",
".",
"error",
"(",
"\"Is the instance finished starting up?\"",
")",
"sys",
".",
"exit",
"(",
"1",
")",
"client",
"=",
"ssh_info",
"[",
"'client'",
"]",
"client",
".",
"get_transport",
"(",
")",
".",
"sock",
".",
"close",
"(",
")",
"client",
".",
"close",
"(",
")",
"argv",
"[",
"sid_index",
":",
"sid_index",
"+",
"1",
"]",
"=",
"instance",
".",
"ssh_args_from_info",
"(",
"ssh_info",
")",
"argv",
"[",
"0",
":",
"0",
"]",
"=",
"[",
"'ssh'",
"]",
"os",
".",
"execvp",
"(",
"'ssh'",
",",
"argv",
")"
] | Log into the instance with ssh using the automatically generated known hosts | [
"Log",
"into",
"the",
"instance",
"with",
"ssh",
"using",
"the",
"automatically",
"generated",
"known",
"hosts"
] | 9295b5597c09c434f170afbfd245d73f09affc39 | https://github.com/ployground/ploy/blob/9295b5597c09c434f170afbfd245d73f09affc39/ploy/__init__.py#L395-L445 | train |
mivade/tornadose | tornadose/handlers.py | BaseHandler.initialize | def initialize(self, store):
"""Common initialization of handlers happens here. If additional
initialization is required, this method must either be called with
``super`` or the child class must assign the ``store`` attribute and
register itself with the store.
"""
assert isinstance(store, stores.BaseStore)
self.messages = Queue()
self.store = store
self.store.register(self) | python | def initialize(self, store):
"""Common initialization of handlers happens here. If additional
initialization is required, this method must either be called with
``super`` or the child class must assign the ``store`` attribute and
register itself with the store.
"""
assert isinstance(store, stores.BaseStore)
self.messages = Queue()
self.store = store
self.store.register(self) | [
"def",
"initialize",
"(",
"self",
",",
"store",
")",
":",
"assert",
"isinstance",
"(",
"store",
",",
"stores",
".",
"BaseStore",
")",
"self",
".",
"messages",
"=",
"Queue",
"(",
")",
"self",
".",
"store",
"=",
"store",
"self",
".",
"store",
".",
"register",
"(",
"self",
")"
] | Common initialization of handlers happens here. If additional
initialization is required, this method must either be called with
``super`` or the child class must assign the ``store`` attribute and
register itself with the store. | [
"Common",
"initialization",
"of",
"handlers",
"happens",
"here",
".",
"If",
"additional",
"initialization",
"is",
"required",
"this",
"method",
"must",
"either",
"be",
"called",
"with",
"super",
"or",
"the",
"child",
"class",
"must",
"assign",
"the",
"store",
"attribute",
"and",
"register",
"itself",
"with",
"the",
"store",
"."
] | d220e0e3040d24c98997eee7a8a236602b4c5159 | https://github.com/mivade/tornadose/blob/d220e0e3040d24c98997eee7a8a236602b4c5159/tornadose/handlers.py#L22-L32 | train |
mivade/tornadose | tornadose/handlers.py | EventSource.prepare | def prepare(self):
"""Log access."""
request_time = 1000.0 * self.request.request_time()
access_log.info(
"%d %s %.2fms", self.get_status(),
self._request_summary(), request_time) | python | def prepare(self):
"""Log access."""
request_time = 1000.0 * self.request.request_time()
access_log.info(
"%d %s %.2fms", self.get_status(),
self._request_summary(), request_time) | [
"def",
"prepare",
"(",
"self",
")",
":",
"request_time",
"=",
"1000.0",
"*",
"self",
".",
"request",
".",
"request_time",
"(",
")",
"access_log",
".",
"info",
"(",
"\"%d %s %.2fms\"",
",",
"self",
".",
"get_status",
"(",
")",
",",
"self",
".",
"_request_summary",
"(",
")",
",",
"request_time",
")"
] | Log access. | [
"Log",
"access",
"."
] | d220e0e3040d24c98997eee7a8a236602b4c5159 | https://github.com/mivade/tornadose/blob/d220e0e3040d24c98997eee7a8a236602b4c5159/tornadose/handlers.py#L69-L74 | train |
mivade/tornadose | tornadose/handlers.py | EventSource.publish | async def publish(self, message):
"""Pushes data to a listener."""
try:
self.write('data: {}\n\n'.format(message))
await self.flush()
except StreamClosedError:
self.finished = True | python | async def publish(self, message):
"""Pushes data to a listener."""
try:
self.write('data: {}\n\n'.format(message))
await self.flush()
except StreamClosedError:
self.finished = True | [
"async",
"def",
"publish",
"(",
"self",
",",
"message",
")",
":",
"try",
":",
"self",
".",
"write",
"(",
"'data: {}\\n\\n'",
".",
"format",
"(",
"message",
")",
")",
"await",
"self",
".",
"flush",
"(",
")",
"except",
"StreamClosedError",
":",
"self",
".",
"finished",
"=",
"True"
] | Pushes data to a listener. | [
"Pushes",
"data",
"to",
"a",
"listener",
"."
] | d220e0e3040d24c98997eee7a8a236602b4c5159 | https://github.com/mivade/tornadose/blob/d220e0e3040d24c98997eee7a8a236602b4c5159/tornadose/handlers.py#L76-L82 | train |
mivade/tornadose | tornadose/handlers.py | WebSocketSubscriber.open | async def open(self):
"""Register with the publisher."""
self.store.register(self)
while not self.finished:
message = await self.messages.get()
await self.publish(message) | python | async def open(self):
"""Register with the publisher."""
self.store.register(self)
while not self.finished:
message = await self.messages.get()
await self.publish(message) | [
"async",
"def",
"open",
"(",
"self",
")",
":",
"self",
".",
"store",
".",
"register",
"(",
"self",
")",
"while",
"not",
"self",
".",
"finished",
":",
"message",
"=",
"await",
"self",
".",
"messages",
".",
"get",
"(",
")",
"await",
"self",
".",
"publish",
"(",
"message",
")"
] | Register with the publisher. | [
"Register",
"with",
"the",
"publisher",
"."
] | d220e0e3040d24c98997eee7a8a236602b4c5159 | https://github.com/mivade/tornadose/blob/d220e0e3040d24c98997eee7a8a236602b4c5159/tornadose/handlers.py#L102-L107 | train |
mivade/tornadose | tornadose/handlers.py | WebSocketSubscriber.publish | async def publish(self, message):
"""Push a new message to the client. The data will be
available as a JSON object with the key ``data``.
"""
try:
self.write_message(dict(data=message))
except WebSocketClosedError:
self._close() | python | async def publish(self, message):
"""Push a new message to the client. The data will be
available as a JSON object with the key ``data``.
"""
try:
self.write_message(dict(data=message))
except WebSocketClosedError:
self._close() | [
"async",
"def",
"publish",
"(",
"self",
",",
"message",
")",
":",
"try",
":",
"self",
".",
"write_message",
"(",
"dict",
"(",
"data",
"=",
"message",
")",
")",
"except",
"WebSocketClosedError",
":",
"self",
".",
"_close",
"(",
")"
] | Push a new message to the client. The data will be
available as a JSON object with the key ``data``. | [
"Push",
"a",
"new",
"message",
"to",
"the",
"client",
".",
"The",
"data",
"will",
"be",
"available",
"as",
"a",
"JSON",
"object",
"with",
"the",
"key",
"data",
"."
] | d220e0e3040d24c98997eee7a8a236602b4c5159 | https://github.com/mivade/tornadose/blob/d220e0e3040d24c98997eee7a8a236602b4c5159/tornadose/handlers.py#L116-L124 | train |
Subsets and Splits