repo
stringlengths 7
55
| path
stringlengths 4
127
| func_name
stringlengths 1
88
| original_string
stringlengths 75
19.8k
| language
stringclasses 1
value | code
stringlengths 75
19.8k
| code_tokens
list | docstring
stringlengths 3
17.3k
| docstring_tokens
list | sha
stringlengths 40
40
| url
stringlengths 87
242
| partition
stringclasses 1
value |
---|---|---|---|---|---|---|---|---|---|---|---|
openspending/os-package-registry | os_package_registry/package_registry.py | PackageRegistry.get_stats | def get_stats(self):
"""
Get some stats on the packages in the registry
"""
try:
query = {
# We only care about the aggregations, so don't return the hits
'size': 0,
'aggs': {
'num_packages': {
'value_count': {
'field': 'id',
},
},
'num_records': {
'sum': {
'field': 'package.count_of_rows',
},
},
'num_countries': {
'cardinality': {
'field': 'package.countryCode.keyword',
},
},
},
}
aggregations = self.es.search(index=self.index_name,
body=query)['aggregations']
return {
key: int(value['value'])
for key, value in aggregations.items()
}
except NotFoundError:
return {} | python | def get_stats(self):
"""
Get some stats on the packages in the registry
"""
try:
query = {
# We only care about the aggregations, so don't return the hits
'size': 0,
'aggs': {
'num_packages': {
'value_count': {
'field': 'id',
},
},
'num_records': {
'sum': {
'field': 'package.count_of_rows',
},
},
'num_countries': {
'cardinality': {
'field': 'package.countryCode.keyword',
},
},
},
}
aggregations = self.es.search(index=self.index_name,
body=query)['aggregations']
return {
key: int(value['value'])
for key, value in aggregations.items()
}
except NotFoundError:
return {} | [
"def",
"get_stats",
"(",
"self",
")",
":",
"try",
":",
"query",
"=",
"{",
"# We only care about the aggregations, so don't return the hits",
"'size'",
":",
"0",
",",
"'aggs'",
":",
"{",
"'num_packages'",
":",
"{",
"'value_count'",
":",
"{",
"'field'",
":",
"'id'",
",",
"}",
",",
"}",
",",
"'num_records'",
":",
"{",
"'sum'",
":",
"{",
"'field'",
":",
"'package.count_of_rows'",
",",
"}",
",",
"}",
",",
"'num_countries'",
":",
"{",
"'cardinality'",
":",
"{",
"'field'",
":",
"'package.countryCode.keyword'",
",",
"}",
",",
"}",
",",
"}",
",",
"}",
"aggregations",
"=",
"self",
".",
"es",
".",
"search",
"(",
"index",
"=",
"self",
".",
"index_name",
",",
"body",
"=",
"query",
")",
"[",
"'aggregations'",
"]",
"return",
"{",
"key",
":",
"int",
"(",
"value",
"[",
"'value'",
"]",
")",
"for",
"key",
",",
"value",
"in",
"aggregations",
".",
"items",
"(",
")",
"}",
"except",
"NotFoundError",
":",
"return",
"{",
"}"
]
| Get some stats on the packages in the registry | [
"Get",
"some",
"stats",
"on",
"the",
"packages",
"in",
"the",
"registry"
]
| 02f3628340417ed7d943a6cc6c25ea0469de22cd | https://github.com/openspending/os-package-registry/blob/02f3628340417ed7d943a6cc6c25ea0469de22cd/os_package_registry/package_registry.py#L256-L290 | train |
tjcsl/cslbot | cslbot/commands/fml.py | cmd | def cmd(send, msg, args):
"""Gets a random FML post.
Syntax: {command}
"""
req = get("http://api.fmylife.com/view/random", params={'language': 'en', 'key': args['config']['api']['fmlkey']})
doc = fromstring(req.content)
send(doc.xpath('//text')[0].text) | python | def cmd(send, msg, args):
"""Gets a random FML post.
Syntax: {command}
"""
req = get("http://api.fmylife.com/view/random", params={'language': 'en', 'key': args['config']['api']['fmlkey']})
doc = fromstring(req.content)
send(doc.xpath('//text')[0].text) | [
"def",
"cmd",
"(",
"send",
",",
"msg",
",",
"args",
")",
":",
"req",
"=",
"get",
"(",
"\"http://api.fmylife.com/view/random\"",
",",
"params",
"=",
"{",
"'language'",
":",
"'en'",
",",
"'key'",
":",
"args",
"[",
"'config'",
"]",
"[",
"'api'",
"]",
"[",
"'fmlkey'",
"]",
"}",
")",
"doc",
"=",
"fromstring",
"(",
"req",
".",
"content",
")",
"send",
"(",
"doc",
".",
"xpath",
"(",
"'//text'",
")",
"[",
"0",
"]",
".",
"text",
")"
]
| Gets a random FML post.
Syntax: {command} | [
"Gets",
"a",
"random",
"FML",
"post",
"."
]
| aebe07be47141f61d7c180706bddfb707f19b2b5 | https://github.com/tjcsl/cslbot/blob/aebe07be47141f61d7c180706bddfb707f19b2b5/cslbot/commands/fml.py#L26-L34 | train |
tjcsl/cslbot | cslbot/commands/score.py | cmd | def cmd(send, msg, args):
"""Gets scores.
Syntax: {command} <--high|--low|nick>
"""
if not args['config']['feature'].getboolean('hooks'):
send("Hooks are disabled, and this command depends on hooks. Please contact the bot admin(s).")
return
session = args['db']
parser = arguments.ArgParser(args['config'])
group = parser.add_mutually_exclusive_group()
group.add_argument('--high', action='store_true')
group.add_argument('--low', action='store_true')
group.add_argument('nick', nargs='?', action=arguments.NickParser)
try:
cmdargs = parser.parse_args(msg)
except arguments.ArgumentException as e:
send(str(e))
return
if cmdargs.high:
data = session.query(Scores).order_by(Scores.score.desc()).limit(3).all()
send('High Scores:')
for x in data:
send("%s: %s" % (x.nick, x.score))
elif cmdargs.low:
data = session.query(Scores).order_by(Scores.score).limit(3).all()
send('Low Scores:')
for x in data:
send("%s: %s" % (x.nick, x.score))
elif cmdargs.nick:
name = cmdargs.nick.lower()
if name == 'c':
send("We all know you love C better than anything else, so why rub it in?")
return
score = session.query(Scores).filter(Scores.nick == name).scalar()
if score is not None:
plural = '' if abs(score.score) == 1 else 's'
if name == args['botnick'].lower():
emote = ':)' if score.score > 0 else ':(' if score.score < 0 else ':|'
output = 'has %s point%s! %s' % (score.score, plural, emote)
send(output, 'action')
else:
send("%s has %i point%s!" % (name, score.score, plural))
else:
send("Nobody cares about %s" % name)
else:
if session.query(Scores).count() == 0:
send("Nobody cares about anything =(")
else:
query = session.query(Scores).order_by(func.random()).first()
plural = '' if abs(query.score) == 1 else 's'
send("%s has %i point%s!" % (query.nick, query.score, plural)) | python | def cmd(send, msg, args):
"""Gets scores.
Syntax: {command} <--high|--low|nick>
"""
if not args['config']['feature'].getboolean('hooks'):
send("Hooks are disabled, and this command depends on hooks. Please contact the bot admin(s).")
return
session = args['db']
parser = arguments.ArgParser(args['config'])
group = parser.add_mutually_exclusive_group()
group.add_argument('--high', action='store_true')
group.add_argument('--low', action='store_true')
group.add_argument('nick', nargs='?', action=arguments.NickParser)
try:
cmdargs = parser.parse_args(msg)
except arguments.ArgumentException as e:
send(str(e))
return
if cmdargs.high:
data = session.query(Scores).order_by(Scores.score.desc()).limit(3).all()
send('High Scores:')
for x in data:
send("%s: %s" % (x.nick, x.score))
elif cmdargs.low:
data = session.query(Scores).order_by(Scores.score).limit(3).all()
send('Low Scores:')
for x in data:
send("%s: %s" % (x.nick, x.score))
elif cmdargs.nick:
name = cmdargs.nick.lower()
if name == 'c':
send("We all know you love C better than anything else, so why rub it in?")
return
score = session.query(Scores).filter(Scores.nick == name).scalar()
if score is not None:
plural = '' if abs(score.score) == 1 else 's'
if name == args['botnick'].lower():
emote = ':)' if score.score > 0 else ':(' if score.score < 0 else ':|'
output = 'has %s point%s! %s' % (score.score, plural, emote)
send(output, 'action')
else:
send("%s has %i point%s!" % (name, score.score, plural))
else:
send("Nobody cares about %s" % name)
else:
if session.query(Scores).count() == 0:
send("Nobody cares about anything =(")
else:
query = session.query(Scores).order_by(func.random()).first()
plural = '' if abs(query.score) == 1 else 's'
send("%s has %i point%s!" % (query.nick, query.score, plural)) | [
"def",
"cmd",
"(",
"send",
",",
"msg",
",",
"args",
")",
":",
"if",
"not",
"args",
"[",
"'config'",
"]",
"[",
"'feature'",
"]",
".",
"getboolean",
"(",
"'hooks'",
")",
":",
"send",
"(",
"\"Hooks are disabled, and this command depends on hooks. Please contact the bot admin(s).\"",
")",
"return",
"session",
"=",
"args",
"[",
"'db'",
"]",
"parser",
"=",
"arguments",
".",
"ArgParser",
"(",
"args",
"[",
"'config'",
"]",
")",
"group",
"=",
"parser",
".",
"add_mutually_exclusive_group",
"(",
")",
"group",
".",
"add_argument",
"(",
"'--high'",
",",
"action",
"=",
"'store_true'",
")",
"group",
".",
"add_argument",
"(",
"'--low'",
",",
"action",
"=",
"'store_true'",
")",
"group",
".",
"add_argument",
"(",
"'nick'",
",",
"nargs",
"=",
"'?'",
",",
"action",
"=",
"arguments",
".",
"NickParser",
")",
"try",
":",
"cmdargs",
"=",
"parser",
".",
"parse_args",
"(",
"msg",
")",
"except",
"arguments",
".",
"ArgumentException",
"as",
"e",
":",
"send",
"(",
"str",
"(",
"e",
")",
")",
"return",
"if",
"cmdargs",
".",
"high",
":",
"data",
"=",
"session",
".",
"query",
"(",
"Scores",
")",
".",
"order_by",
"(",
"Scores",
".",
"score",
".",
"desc",
"(",
")",
")",
".",
"limit",
"(",
"3",
")",
".",
"all",
"(",
")",
"send",
"(",
"'High Scores:'",
")",
"for",
"x",
"in",
"data",
":",
"send",
"(",
"\"%s: %s\"",
"%",
"(",
"x",
".",
"nick",
",",
"x",
".",
"score",
")",
")",
"elif",
"cmdargs",
".",
"low",
":",
"data",
"=",
"session",
".",
"query",
"(",
"Scores",
")",
".",
"order_by",
"(",
"Scores",
".",
"score",
")",
".",
"limit",
"(",
"3",
")",
".",
"all",
"(",
")",
"send",
"(",
"'Low Scores:'",
")",
"for",
"x",
"in",
"data",
":",
"send",
"(",
"\"%s: %s\"",
"%",
"(",
"x",
".",
"nick",
",",
"x",
".",
"score",
")",
")",
"elif",
"cmdargs",
".",
"nick",
":",
"name",
"=",
"cmdargs",
".",
"nick",
".",
"lower",
"(",
")",
"if",
"name",
"==",
"'c'",
":",
"send",
"(",
"\"We all know you love C better than anything else, so why rub it in?\"",
")",
"return",
"score",
"=",
"session",
".",
"query",
"(",
"Scores",
")",
".",
"filter",
"(",
"Scores",
".",
"nick",
"==",
"name",
")",
".",
"scalar",
"(",
")",
"if",
"score",
"is",
"not",
"None",
":",
"plural",
"=",
"''",
"if",
"abs",
"(",
"score",
".",
"score",
")",
"==",
"1",
"else",
"'s'",
"if",
"name",
"==",
"args",
"[",
"'botnick'",
"]",
".",
"lower",
"(",
")",
":",
"emote",
"=",
"':)'",
"if",
"score",
".",
"score",
">",
"0",
"else",
"':('",
"if",
"score",
".",
"score",
"<",
"0",
"else",
"':|'",
"output",
"=",
"'has %s point%s! %s'",
"%",
"(",
"score",
".",
"score",
",",
"plural",
",",
"emote",
")",
"send",
"(",
"output",
",",
"'action'",
")",
"else",
":",
"send",
"(",
"\"%s has %i point%s!\"",
"%",
"(",
"name",
",",
"score",
".",
"score",
",",
"plural",
")",
")",
"else",
":",
"send",
"(",
"\"Nobody cares about %s\"",
"%",
"name",
")",
"else",
":",
"if",
"session",
".",
"query",
"(",
"Scores",
")",
".",
"count",
"(",
")",
"==",
"0",
":",
"send",
"(",
"\"Nobody cares about anything =(\"",
")",
"else",
":",
"query",
"=",
"session",
".",
"query",
"(",
"Scores",
")",
".",
"order_by",
"(",
"func",
".",
"random",
"(",
")",
")",
".",
"first",
"(",
")",
"plural",
"=",
"''",
"if",
"abs",
"(",
"query",
".",
"score",
")",
"==",
"1",
"else",
"'s'",
"send",
"(",
"\"%s has %i point%s!\"",
"%",
"(",
"query",
".",
"nick",
",",
"query",
".",
"score",
",",
"plural",
")",
")"
]
| Gets scores.
Syntax: {command} <--high|--low|nick> | [
"Gets",
"scores",
"."
]
| aebe07be47141f61d7c180706bddfb707f19b2b5 | https://github.com/tjcsl/cslbot/blob/aebe07be47141f61d7c180706bddfb707f19b2b5/cslbot/commands/score.py#L26-L78 | train |
acutesoftware/virtual-AI-simulator | vais/envirosim.py | Params.add_affect | def add_affect(self, name, src, dest, val, condition = None ):
"""
adds how param 'src' affects param 'dest' to the list
"""
self.affects.append(ParamAffects(name, src, dest, val, condition)) | python | def add_affect(self, name, src, dest, val, condition = None ):
"""
adds how param 'src' affects param 'dest' to the list
"""
self.affects.append(ParamAffects(name, src, dest, val, condition)) | [
"def",
"add_affect",
"(",
"self",
",",
"name",
",",
"src",
",",
"dest",
",",
"val",
",",
"condition",
"=",
"None",
")",
":",
"self",
".",
"affects",
".",
"append",
"(",
"ParamAffects",
"(",
"name",
",",
"src",
",",
"dest",
",",
"val",
",",
"condition",
")",
")"
]
| adds how param 'src' affects param 'dest' to the list | [
"adds",
"how",
"param",
"src",
"affects",
"param",
"dest",
"to",
"the",
"list"
]
| 57de679a5b1a58c38fefe6aea58af1f3a7e79c58 | https://github.com/acutesoftware/virtual-AI-simulator/blob/57de679a5b1a58c38fefe6aea58af1f3a7e79c58/vais/envirosim.py#L63-L67 | train |
acutesoftware/virtual-AI-simulator | vais/envirosim.py | Params.get_by_name | def get_by_name(self, nme):
"""
searches list of all parameters and returns the first
param that matches on name
"""
for p in self.params:
if p.name == nme:
return p
return None | python | def get_by_name(self, nme):
"""
searches list of all parameters and returns the first
param that matches on name
"""
for p in self.params:
if p.name == nme:
return p
return None | [
"def",
"get_by_name",
"(",
"self",
",",
"nme",
")",
":",
"for",
"p",
"in",
"self",
".",
"params",
":",
"if",
"p",
".",
"name",
"==",
"nme",
":",
"return",
"p",
"return",
"None"
]
| searches list of all parameters and returns the first
param that matches on name | [
"searches",
"list",
"of",
"all",
"parameters",
"and",
"returns",
"the",
"first",
"param",
"that",
"matches",
"on",
"name"
]
| 57de679a5b1a58c38fefe6aea58af1f3a7e79c58 | https://github.com/acutesoftware/virtual-AI-simulator/blob/57de679a5b1a58c38fefe6aea58af1f3a7e79c58/vais/envirosim.py#L69-L77 | train |
acutesoftware/virtual-AI-simulator | vais/envirosim.py | Params.get_affects_for_param | def get_affects_for_param(self, nme):
"""
searches all affects and returns a list
that affect the param named 'nme'
"""
res = []
for a in self.affects:
if a.name == nme:
res.append(a)
return res | python | def get_affects_for_param(self, nme):
"""
searches all affects and returns a list
that affect the param named 'nme'
"""
res = []
for a in self.affects:
if a.name == nme:
res.append(a)
return res | [
"def",
"get_affects_for_param",
"(",
"self",
",",
"nme",
")",
":",
"res",
"=",
"[",
"]",
"for",
"a",
"in",
"self",
".",
"affects",
":",
"if",
"a",
".",
"name",
"==",
"nme",
":",
"res",
".",
"append",
"(",
"a",
")",
"return",
"res"
]
| searches all affects and returns a list
that affect the param named 'nme' | [
"searches",
"all",
"affects",
"and",
"returns",
"a",
"list",
"that",
"affect",
"the",
"param",
"named",
"nme"
]
| 57de679a5b1a58c38fefe6aea58af1f3a7e79c58 | https://github.com/acutesoftware/virtual-AI-simulator/blob/57de679a5b1a58c38fefe6aea58af1f3a7e79c58/vais/envirosim.py#L79-L88 | train |
mswart/pyopenmensa | feed.py | buildPrices | def buildPrices(data, roles=None, regex=default_price_regex,
default=None, additional={}):
''' Create a dictionary with price information. Multiple ways are
supported.
:rtype: :obj:`dict`: keys are role as str, values are the prices as
cent count'''
if isinstance(data, dict):
data = [(item[0], convertPrice(item[1])) for item in data.items()]
return dict([v for v in data if v[1] is not None])
elif isinstance(data, (str, float, int)) and not isinstance(data, bool):
if default is None:
raise ValueError('You have to call setAdditionalCharges '
'before it is possible to pass a string as price')
basePrice = convertPrice(data)
if basePrice is None:
return {}
prices = {default: basePrice}
for role in additional:
extraCharge = convertPrice(additional[role])
if extraCharge is None:
continue
prices[role] = basePrice + extraCharge
return prices
elif roles:
prices = {}
priceRoles = iter(roles)
for priceData in data:
price = convertPrice(priceData)
if price is None:
continue
prices[next(priceRoles)] = price
return prices
else:
raise TypeError('This type is for prices not supported!') | python | def buildPrices(data, roles=None, regex=default_price_regex,
default=None, additional={}):
''' Create a dictionary with price information. Multiple ways are
supported.
:rtype: :obj:`dict`: keys are role as str, values are the prices as
cent count'''
if isinstance(data, dict):
data = [(item[0], convertPrice(item[1])) for item in data.items()]
return dict([v for v in data if v[1] is not None])
elif isinstance(data, (str, float, int)) and not isinstance(data, bool):
if default is None:
raise ValueError('You have to call setAdditionalCharges '
'before it is possible to pass a string as price')
basePrice = convertPrice(data)
if basePrice is None:
return {}
prices = {default: basePrice}
for role in additional:
extraCharge = convertPrice(additional[role])
if extraCharge is None:
continue
prices[role] = basePrice + extraCharge
return prices
elif roles:
prices = {}
priceRoles = iter(roles)
for priceData in data:
price = convertPrice(priceData)
if price is None:
continue
prices[next(priceRoles)] = price
return prices
else:
raise TypeError('This type is for prices not supported!') | [
"def",
"buildPrices",
"(",
"data",
",",
"roles",
"=",
"None",
",",
"regex",
"=",
"default_price_regex",
",",
"default",
"=",
"None",
",",
"additional",
"=",
"{",
"}",
")",
":",
"if",
"isinstance",
"(",
"data",
",",
"dict",
")",
":",
"data",
"=",
"[",
"(",
"item",
"[",
"0",
"]",
",",
"convertPrice",
"(",
"item",
"[",
"1",
"]",
")",
")",
"for",
"item",
"in",
"data",
".",
"items",
"(",
")",
"]",
"return",
"dict",
"(",
"[",
"v",
"for",
"v",
"in",
"data",
"if",
"v",
"[",
"1",
"]",
"is",
"not",
"None",
"]",
")",
"elif",
"isinstance",
"(",
"data",
",",
"(",
"str",
",",
"float",
",",
"int",
")",
")",
"and",
"not",
"isinstance",
"(",
"data",
",",
"bool",
")",
":",
"if",
"default",
"is",
"None",
":",
"raise",
"ValueError",
"(",
"'You have to call setAdditionalCharges '",
"'before it is possible to pass a string as price'",
")",
"basePrice",
"=",
"convertPrice",
"(",
"data",
")",
"if",
"basePrice",
"is",
"None",
":",
"return",
"{",
"}",
"prices",
"=",
"{",
"default",
":",
"basePrice",
"}",
"for",
"role",
"in",
"additional",
":",
"extraCharge",
"=",
"convertPrice",
"(",
"additional",
"[",
"role",
"]",
")",
"if",
"extraCharge",
"is",
"None",
":",
"continue",
"prices",
"[",
"role",
"]",
"=",
"basePrice",
"+",
"extraCharge",
"return",
"prices",
"elif",
"roles",
":",
"prices",
"=",
"{",
"}",
"priceRoles",
"=",
"iter",
"(",
"roles",
")",
"for",
"priceData",
"in",
"data",
":",
"price",
"=",
"convertPrice",
"(",
"priceData",
")",
"if",
"price",
"is",
"None",
":",
"continue",
"prices",
"[",
"next",
"(",
"priceRoles",
")",
"]",
"=",
"price",
"return",
"prices",
"else",
":",
"raise",
"TypeError",
"(",
"'This type is for prices not supported!'",
")"
]
| Create a dictionary with price information. Multiple ways are
supported.
:rtype: :obj:`dict`: keys are role as str, values are the prices as
cent count | [
"Create",
"a",
"dictionary",
"with",
"price",
"information",
".",
"Multiple",
"ways",
"are",
"supported",
"."
]
| c651da6ace33e2278349636daaa709d043dee6ff | https://github.com/mswart/pyopenmensa/blob/c651da6ace33e2278349636daaa709d043dee6ff/feed.py#L146-L180 | train |
mswart/pyopenmensa | feed.py | buildLegend | def buildLegend(legend=None, text=None, regex=None, key=lambda v: v):
''' Helper method to build or extend a legend from a text. The given regex
will be used to find legend inside the text.
:param dict legend: Initial legend data
:param str text: Text from which should legend information extracted.
None means do no extraction.
:param str regex: Regex to find legend part inside the given text. The
regex should have a named group `name` (key) and a named group
`value` (value).
:param callable key: function to map the key to a legend key
:rtype: dict'''
if legend is None:
legend = {}
if text is not None:
for match in re.finditer(regex or default_legend_regex,
text, re.UNICODE):
legend[key(match.group('name'))] = match.group('value').strip()
return legend | python | def buildLegend(legend=None, text=None, regex=None, key=lambda v: v):
''' Helper method to build or extend a legend from a text. The given regex
will be used to find legend inside the text.
:param dict legend: Initial legend data
:param str text: Text from which should legend information extracted.
None means do no extraction.
:param str regex: Regex to find legend part inside the given text. The
regex should have a named group `name` (key) and a named group
`value` (value).
:param callable key: function to map the key to a legend key
:rtype: dict'''
if legend is None:
legend = {}
if text is not None:
for match in re.finditer(regex or default_legend_regex,
text, re.UNICODE):
legend[key(match.group('name'))] = match.group('value').strip()
return legend | [
"def",
"buildLegend",
"(",
"legend",
"=",
"None",
",",
"text",
"=",
"None",
",",
"regex",
"=",
"None",
",",
"key",
"=",
"lambda",
"v",
":",
"v",
")",
":",
"if",
"legend",
"is",
"None",
":",
"legend",
"=",
"{",
"}",
"if",
"text",
"is",
"not",
"None",
":",
"for",
"match",
"in",
"re",
".",
"finditer",
"(",
"regex",
"or",
"default_legend_regex",
",",
"text",
",",
"re",
".",
"UNICODE",
")",
":",
"legend",
"[",
"key",
"(",
"match",
".",
"group",
"(",
"'name'",
")",
")",
"]",
"=",
"match",
".",
"group",
"(",
"'value'",
")",
".",
"strip",
"(",
")",
"return",
"legend"
]
| Helper method to build or extend a legend from a text. The given regex
will be used to find legend inside the text.
:param dict legend: Initial legend data
:param str text: Text from which should legend information extracted.
None means do no extraction.
:param str regex: Regex to find legend part inside the given text. The
regex should have a named group `name` (key) and a named group
`value` (value).
:param callable key: function to map the key to a legend key
:rtype: dict | [
"Helper",
"method",
"to",
"build",
"or",
"extend",
"a",
"legend",
"from",
"a",
"text",
".",
"The",
"given",
"regex",
"will",
"be",
"used",
"to",
"find",
"legend",
"inside",
"the",
"text",
"."
]
| c651da6ace33e2278349636daaa709d043dee6ff | https://github.com/mswart/pyopenmensa/blob/c651da6ace33e2278349636daaa709d043dee6ff/feed.py#L194-L212 | train |
mswart/pyopenmensa | feed.py | Feed.toTag | def toTag(self, output):
''' This methods returns all data of this feed as feed xml tag
:param output: XML Document to which the data should be added
:type output: xml.dom.DOMImplementation.createDocument
'''
feed = output.createElement('feed')
feed.setAttribute('name', self.name)
feed.setAttribute('priority', str(self.priority))
# schedule
schedule = output.createElement('schedule')
schedule.setAttribute('dayOfMonth', self.dayOfMonth)
schedule.setAttribute('dayOfWeek', self.dayOfWeek)
schedule.setAttribute('hour', self.hour)
schedule.setAttribute('minute', self.minute)
if self.retry:
schedule.setAttribute('retry', self.retry)
feed.appendChild(schedule)
# url
url = output.createElement('url')
url.appendChild(output.createTextNode(self.url))
feed.appendChild(url)
# source
if self.source:
source = output.createElement('source')
source.appendChild(output.createTextNode(self.source))
feed.appendChild(source)
return feed | python | def toTag(self, output):
''' This methods returns all data of this feed as feed xml tag
:param output: XML Document to which the data should be added
:type output: xml.dom.DOMImplementation.createDocument
'''
feed = output.createElement('feed')
feed.setAttribute('name', self.name)
feed.setAttribute('priority', str(self.priority))
# schedule
schedule = output.createElement('schedule')
schedule.setAttribute('dayOfMonth', self.dayOfMonth)
schedule.setAttribute('dayOfWeek', self.dayOfWeek)
schedule.setAttribute('hour', self.hour)
schedule.setAttribute('minute', self.minute)
if self.retry:
schedule.setAttribute('retry', self.retry)
feed.appendChild(schedule)
# url
url = output.createElement('url')
url.appendChild(output.createTextNode(self.url))
feed.appendChild(url)
# source
if self.source:
source = output.createElement('source')
source.appendChild(output.createTextNode(self.source))
feed.appendChild(source)
return feed | [
"def",
"toTag",
"(",
"self",
",",
"output",
")",
":",
"feed",
"=",
"output",
".",
"createElement",
"(",
"'feed'",
")",
"feed",
".",
"setAttribute",
"(",
"'name'",
",",
"self",
".",
"name",
")",
"feed",
".",
"setAttribute",
"(",
"'priority'",
",",
"str",
"(",
"self",
".",
"priority",
")",
")",
"# schedule",
"schedule",
"=",
"output",
".",
"createElement",
"(",
"'schedule'",
")",
"schedule",
".",
"setAttribute",
"(",
"'dayOfMonth'",
",",
"self",
".",
"dayOfMonth",
")",
"schedule",
".",
"setAttribute",
"(",
"'dayOfWeek'",
",",
"self",
".",
"dayOfWeek",
")",
"schedule",
".",
"setAttribute",
"(",
"'hour'",
",",
"self",
".",
"hour",
")",
"schedule",
".",
"setAttribute",
"(",
"'minute'",
",",
"self",
".",
"minute",
")",
"if",
"self",
".",
"retry",
":",
"schedule",
".",
"setAttribute",
"(",
"'retry'",
",",
"self",
".",
"retry",
")",
"feed",
".",
"appendChild",
"(",
"schedule",
")",
"# url",
"url",
"=",
"output",
".",
"createElement",
"(",
"'url'",
")",
"url",
".",
"appendChild",
"(",
"output",
".",
"createTextNode",
"(",
"self",
".",
"url",
")",
")",
"feed",
".",
"appendChild",
"(",
"url",
")",
"# source",
"if",
"self",
".",
"source",
":",
"source",
"=",
"output",
".",
"createElement",
"(",
"'source'",
")",
"source",
".",
"appendChild",
"(",
"output",
".",
"createTextNode",
"(",
"self",
".",
"source",
")",
")",
"feed",
".",
"appendChild",
"(",
"source",
")",
"return",
"feed"
]
| This methods returns all data of this feed as feed xml tag
:param output: XML Document to which the data should be added
:type output: xml.dom.DOMImplementation.createDocument | [
"This",
"methods",
"returns",
"all",
"data",
"of",
"this",
"feed",
"as",
"feed",
"xml",
"tag"
]
| c651da6ace33e2278349636daaa709d043dee6ff | https://github.com/mswart/pyopenmensa/blob/c651da6ace33e2278349636daaa709d043dee6ff/feed.py#L251-L281 | train |
mswart/pyopenmensa | feed.py | BaseBuilder.hasMealsFor | def hasMealsFor(self, date):
""" Checks whether for this day are information stored.
:param date: Date of the day
:type date: datetime.date
:rtype: bool"""
date = self._handleDate(date)
if date not in self._days or self._days[date] is False:
return False
return len(self._days[date]) > 0 | python | def hasMealsFor(self, date):
""" Checks whether for this day are information stored.
:param date: Date of the day
:type date: datetime.date
:rtype: bool"""
date = self._handleDate(date)
if date not in self._days or self._days[date] is False:
return False
return len(self._days[date]) > 0 | [
"def",
"hasMealsFor",
"(",
"self",
",",
"date",
")",
":",
"date",
"=",
"self",
".",
"_handleDate",
"(",
"date",
")",
"if",
"date",
"not",
"in",
"self",
".",
"_days",
"or",
"self",
".",
"_days",
"[",
"date",
"]",
"is",
"False",
":",
"return",
"False",
"return",
"len",
"(",
"self",
".",
"_days",
"[",
"date",
"]",
")",
">",
"0"
]
| Checks whether for this day are information stored.
:param date: Date of the day
:type date: datetime.date
:rtype: bool | [
"Checks",
"whether",
"for",
"this",
"day",
"are",
"information",
"stored",
"."
]
| c651da6ace33e2278349636daaa709d043dee6ff | https://github.com/mswart/pyopenmensa/blob/c651da6ace33e2278349636daaa709d043dee6ff/feed.py#L449-L458 | train |
mswart/pyopenmensa | feed.py | BaseBuilder.toXMLFeed | def toXMLFeed(self):
""" Convert this cateen information into string
which is a valid OpenMensa v2 xml feed
:rtype: str"""
feed = self.toXML()
xml_header = '<?xml version="1.0" encoding="UTF-8"?>\n'
return xml_header + feed.toprettyxml(indent=' ') | python | def toXMLFeed(self):
""" Convert this cateen information into string
which is a valid OpenMensa v2 xml feed
:rtype: str"""
feed = self.toXML()
xml_header = '<?xml version="1.0" encoding="UTF-8"?>\n'
return xml_header + feed.toprettyxml(indent=' ') | [
"def",
"toXMLFeed",
"(",
"self",
")",
":",
"feed",
"=",
"self",
".",
"toXML",
"(",
")",
"xml_header",
"=",
"'<?xml version=\"1.0\" encoding=\"UTF-8\"?>\\n'",
"return",
"xml_header",
"+",
"feed",
".",
"toprettyxml",
"(",
"indent",
"=",
"' '",
")"
]
| Convert this cateen information into string
which is a valid OpenMensa v2 xml feed
:rtype: str | [
"Convert",
"this",
"cateen",
"information",
"into",
"string",
"which",
"is",
"a",
"valid",
"OpenMensa",
"v2",
"xml",
"feed"
]
| c651da6ace33e2278349636daaa709d043dee6ff | https://github.com/mswart/pyopenmensa/blob/c651da6ace33e2278349636daaa709d043dee6ff/feed.py#L486-L494 | train |
mswart/pyopenmensa | feed.py | BaseBuilder.toTag | def toTag(self, output):
''' This methods adds all data of this canteen as canteen xml tag
to the given xml Document.
:meth:`toXMLFeed` uses this method to create the XML Feed. So there is
normally no need to call it directly.
:param output: XML Document to which the data should be added
:type output: xml.dom.DOMImplementation.createDocument
'''
# create canteen tag, which represents our data
canteen = output.createElement('canteen')
if self._name is not None:
canteen.appendChild(self._buildStringTag('name', self._name, output))
if self._address is not None:
canteen.appendChild(self._buildStringTag('address', self._address, output))
if self._city is not None:
canteen.appendChild(self._buildStringTag('city', self._city, output))
if self._phone is not None:
canteen.appendChild(self._buildStringTag('phone', self._phone, output))
if self._email is not None:
canteen.appendChild(self._buildStringTag('email', self._email, output))
if self._location is not None:
canteen.appendChild(self._buildLocationTag(self._location, output))
if self._availability is not None:
canteen.appendChild(self._buildStringTag('availability', self._availability, output))
# iterate above all feeds:
for feed in sorted(self.feeds, key=lambda v: v.priority):
canteen.appendChild(feed.toTag(output))
# iterate above all days (sorted):
for date in sorted(self._days.keys()):
day = output.createElement('day')
day.setAttribute('date', str(date))
if self._days[date] is False: # canteen closed
closed = output.createElement('closed')
day.appendChild(closed)
canteen.appendChild(day)
continue
# canteen is open
for categoryname in self._days[date]:
day.appendChild(self._buildCategoryTag(
categoryname, self._days[date][categoryname], output))
canteen.appendChild(day)
return canteen | python | def toTag(self, output):
''' This methods adds all data of this canteen as canteen xml tag
to the given xml Document.
:meth:`toXMLFeed` uses this method to create the XML Feed. So there is
normally no need to call it directly.
:param output: XML Document to which the data should be added
:type output: xml.dom.DOMImplementation.createDocument
'''
# create canteen tag, which represents our data
canteen = output.createElement('canteen')
if self._name is not None:
canteen.appendChild(self._buildStringTag('name', self._name, output))
if self._address is not None:
canteen.appendChild(self._buildStringTag('address', self._address, output))
if self._city is not None:
canteen.appendChild(self._buildStringTag('city', self._city, output))
if self._phone is not None:
canteen.appendChild(self._buildStringTag('phone', self._phone, output))
if self._email is not None:
canteen.appendChild(self._buildStringTag('email', self._email, output))
if self._location is not None:
canteen.appendChild(self._buildLocationTag(self._location, output))
if self._availability is not None:
canteen.appendChild(self._buildStringTag('availability', self._availability, output))
# iterate above all feeds:
for feed in sorted(self.feeds, key=lambda v: v.priority):
canteen.appendChild(feed.toTag(output))
# iterate above all days (sorted):
for date in sorted(self._days.keys()):
day = output.createElement('day')
day.setAttribute('date', str(date))
if self._days[date] is False: # canteen closed
closed = output.createElement('closed')
day.appendChild(closed)
canteen.appendChild(day)
continue
# canteen is open
for categoryname in self._days[date]:
day.appendChild(self._buildCategoryTag(
categoryname, self._days[date][categoryname], output))
canteen.appendChild(day)
return canteen | [
"def",
"toTag",
"(",
"self",
",",
"output",
")",
":",
"# create canteen tag, which represents our data",
"canteen",
"=",
"output",
".",
"createElement",
"(",
"'canteen'",
")",
"if",
"self",
".",
"_name",
"is",
"not",
"None",
":",
"canteen",
".",
"appendChild",
"(",
"self",
".",
"_buildStringTag",
"(",
"'name'",
",",
"self",
".",
"_name",
",",
"output",
")",
")",
"if",
"self",
".",
"_address",
"is",
"not",
"None",
":",
"canteen",
".",
"appendChild",
"(",
"self",
".",
"_buildStringTag",
"(",
"'address'",
",",
"self",
".",
"_address",
",",
"output",
")",
")",
"if",
"self",
".",
"_city",
"is",
"not",
"None",
":",
"canteen",
".",
"appendChild",
"(",
"self",
".",
"_buildStringTag",
"(",
"'city'",
",",
"self",
".",
"_city",
",",
"output",
")",
")",
"if",
"self",
".",
"_phone",
"is",
"not",
"None",
":",
"canteen",
".",
"appendChild",
"(",
"self",
".",
"_buildStringTag",
"(",
"'phone'",
",",
"self",
".",
"_phone",
",",
"output",
")",
")",
"if",
"self",
".",
"_email",
"is",
"not",
"None",
":",
"canteen",
".",
"appendChild",
"(",
"self",
".",
"_buildStringTag",
"(",
"'email'",
",",
"self",
".",
"_email",
",",
"output",
")",
")",
"if",
"self",
".",
"_location",
"is",
"not",
"None",
":",
"canteen",
".",
"appendChild",
"(",
"self",
".",
"_buildLocationTag",
"(",
"self",
".",
"_location",
",",
"output",
")",
")",
"if",
"self",
".",
"_availability",
"is",
"not",
"None",
":",
"canteen",
".",
"appendChild",
"(",
"self",
".",
"_buildStringTag",
"(",
"'availability'",
",",
"self",
".",
"_availability",
",",
"output",
")",
")",
"# iterate above all feeds:",
"for",
"feed",
"in",
"sorted",
"(",
"self",
".",
"feeds",
",",
"key",
"=",
"lambda",
"v",
":",
"v",
".",
"priority",
")",
":",
"canteen",
".",
"appendChild",
"(",
"feed",
".",
"toTag",
"(",
"output",
")",
")",
"# iterate above all days (sorted):",
"for",
"date",
"in",
"sorted",
"(",
"self",
".",
"_days",
".",
"keys",
"(",
")",
")",
":",
"day",
"=",
"output",
".",
"createElement",
"(",
"'day'",
")",
"day",
".",
"setAttribute",
"(",
"'date'",
",",
"str",
"(",
"date",
")",
")",
"if",
"self",
".",
"_days",
"[",
"date",
"]",
"is",
"False",
":",
"# canteen closed",
"closed",
"=",
"output",
".",
"createElement",
"(",
"'closed'",
")",
"day",
".",
"appendChild",
"(",
"closed",
")",
"canteen",
".",
"appendChild",
"(",
"day",
")",
"continue",
"# canteen is open",
"for",
"categoryname",
"in",
"self",
".",
"_days",
"[",
"date",
"]",
":",
"day",
".",
"appendChild",
"(",
"self",
".",
"_buildCategoryTag",
"(",
"categoryname",
",",
"self",
".",
"_days",
"[",
"date",
"]",
"[",
"categoryname",
"]",
",",
"output",
")",
")",
"canteen",
".",
"appendChild",
"(",
"day",
")",
"return",
"canteen"
]
| This methods adds all data of this canteen as canteen xml tag
to the given xml Document.
:meth:`toXMLFeed` uses this method to create the XML Feed. So there is
normally no need to call it directly.
:param output: XML Document to which the data should be added
:type output: xml.dom.DOMImplementation.createDocument | [
"This",
"methods",
"adds",
"all",
"data",
"of",
"this",
"canteen",
"as",
"canteen",
"xml",
"tag",
"to",
"the",
"given",
"xml",
"Document",
"."
]
| c651da6ace33e2278349636daaa709d043dee6ff | https://github.com/mswart/pyopenmensa/blob/c651da6ace33e2278349636daaa709d043dee6ff/feed.py#L512-L555 | train |
shapiromatron/bmds | bmds/reporter.py | Reporter.add_session | def add_session(
self,
session,
input_dataset=True,
summary_table=True,
recommendation_details=True,
recommended_model=True,
all_models=False,
):
"""
Add an existing session to a Word report.
Parameters
----------
session : bmds.Session
BMDS session to be included in reporting
input_dataset : bool
Include input dataset data table
summary_table : bool
Include model summary table
recommendation_details : bool
Include model recommendation details table
recommended_model : bool
Include the recommended model output and dose-response plot, if
one exists
all_models : bool
Include all models output and dose-response plots
Returns
-------
None.
"""
self.doc.add_paragraph(session.dataset._get_dataset_name(), self.styles.header_1)
self.doc.add_paragraph("BMDS version: {}".format(session.version_pretty))
if input_dataset:
self._add_dataset(session)
self.doc.add_paragraph()
if summary_table:
self._add_session_summary_table(session)
self.doc.add_paragraph()
if recommendation_details:
self._add_recommendation_details_table(session)
self.doc.add_paragraph()
if recommended_model and all_models:
self._add_recommended_model(session)
self._add_all_models(session, except_recommended=True)
self.doc.add_paragraph()
elif recommended_model:
self._add_recommended_model(session)
self.doc.add_paragraph()
elif all_models:
self._add_all_models(session, except_recommended=False)
self.doc.add_paragraph()
self.doc.add_page_break() | python | def add_session(
self,
session,
input_dataset=True,
summary_table=True,
recommendation_details=True,
recommended_model=True,
all_models=False,
):
"""
Add an existing session to a Word report.
Parameters
----------
session : bmds.Session
BMDS session to be included in reporting
input_dataset : bool
Include input dataset data table
summary_table : bool
Include model summary table
recommendation_details : bool
Include model recommendation details table
recommended_model : bool
Include the recommended model output and dose-response plot, if
one exists
all_models : bool
Include all models output and dose-response plots
Returns
-------
None.
"""
self.doc.add_paragraph(session.dataset._get_dataset_name(), self.styles.header_1)
self.doc.add_paragraph("BMDS version: {}".format(session.version_pretty))
if input_dataset:
self._add_dataset(session)
self.doc.add_paragraph()
if summary_table:
self._add_session_summary_table(session)
self.doc.add_paragraph()
if recommendation_details:
self._add_recommendation_details_table(session)
self.doc.add_paragraph()
if recommended_model and all_models:
self._add_recommended_model(session)
self._add_all_models(session, except_recommended=True)
self.doc.add_paragraph()
elif recommended_model:
self._add_recommended_model(session)
self.doc.add_paragraph()
elif all_models:
self._add_all_models(session, except_recommended=False)
self.doc.add_paragraph()
self.doc.add_page_break() | [
"def",
"add_session",
"(",
"self",
",",
"session",
",",
"input_dataset",
"=",
"True",
",",
"summary_table",
"=",
"True",
",",
"recommendation_details",
"=",
"True",
",",
"recommended_model",
"=",
"True",
",",
"all_models",
"=",
"False",
",",
")",
":",
"self",
".",
"doc",
".",
"add_paragraph",
"(",
"session",
".",
"dataset",
".",
"_get_dataset_name",
"(",
")",
",",
"self",
".",
"styles",
".",
"header_1",
")",
"self",
".",
"doc",
".",
"add_paragraph",
"(",
"\"BMDS version: {}\"",
".",
"format",
"(",
"session",
".",
"version_pretty",
")",
")",
"if",
"input_dataset",
":",
"self",
".",
"_add_dataset",
"(",
"session",
")",
"self",
".",
"doc",
".",
"add_paragraph",
"(",
")",
"if",
"summary_table",
":",
"self",
".",
"_add_session_summary_table",
"(",
"session",
")",
"self",
".",
"doc",
".",
"add_paragraph",
"(",
")",
"if",
"recommendation_details",
":",
"self",
".",
"_add_recommendation_details_table",
"(",
"session",
")",
"self",
".",
"doc",
".",
"add_paragraph",
"(",
")",
"if",
"recommended_model",
"and",
"all_models",
":",
"self",
".",
"_add_recommended_model",
"(",
"session",
")",
"self",
".",
"_add_all_models",
"(",
"session",
",",
"except_recommended",
"=",
"True",
")",
"self",
".",
"doc",
".",
"add_paragraph",
"(",
")",
"elif",
"recommended_model",
":",
"self",
".",
"_add_recommended_model",
"(",
"session",
")",
"self",
".",
"doc",
".",
"add_paragraph",
"(",
")",
"elif",
"all_models",
":",
"self",
".",
"_add_all_models",
"(",
"session",
",",
"except_recommended",
"=",
"False",
")",
"self",
".",
"doc",
".",
"add_paragraph",
"(",
")",
"self",
".",
"doc",
".",
"add_page_break",
"(",
")"
]
| Add an existing session to a Word report.
Parameters
----------
session : bmds.Session
BMDS session to be included in reporting
input_dataset : bool
Include input dataset data table
summary_table : bool
Include model summary table
recommendation_details : bool
Include model recommendation details table
recommended_model : bool
Include the recommended model output and dose-response plot, if
one exists
all_models : bool
Include all models output and dose-response plots
Returns
-------
None. | [
"Add",
"an",
"existing",
"session",
"to",
"a",
"Word",
"report",
"."
]
| 395c6ce84ad82876fd9fa4a89a3497fb61616de0 | https://github.com/shapiromatron/bmds/blob/395c6ce84ad82876fd9fa4a89a3497fb61616de0/bmds/reporter.py#L98-L157 | train |
shapiromatron/bmds | bmds/reporter.py | Reporter.save | def save(self, filename):
"""
Save document to a file.
Parameters
----------
filename : str
The output string filename
"""
self.doc.save(os.path.expanduser(filename)) | python | def save(self, filename):
"""
Save document to a file.
Parameters
----------
filename : str
The output string filename
"""
self.doc.save(os.path.expanduser(filename)) | [
"def",
"save",
"(",
"self",
",",
"filename",
")",
":",
"self",
".",
"doc",
".",
"save",
"(",
"os",
".",
"path",
".",
"expanduser",
"(",
"filename",
")",
")"
]
| Save document to a file.
Parameters
----------
filename : str
The output string filename | [
"Save",
"document",
"to",
"a",
"file",
"."
]
| 395c6ce84ad82876fd9fa4a89a3497fb61616de0 | https://github.com/shapiromatron/bmds/blob/395c6ce84ad82876fd9fa4a89a3497fb61616de0/bmds/reporter.py#L159-L169 | train |
shapiromatron/bmds | bmds/reporter.py | Reporter._get_session_for_table | def _get_session_for_table(self, base_session):
"""
Only present session for modeling when doses were dropped if it's succesful;
otherwise show the original modeling session.
"""
if base_session.recommended_model is None and base_session.doses_dropped > 0:
return base_session.doses_dropped_sessions[0]
return base_session | python | def _get_session_for_table(self, base_session):
"""
Only present session for modeling when doses were dropped if it's succesful;
otherwise show the original modeling session.
"""
if base_session.recommended_model is None and base_session.doses_dropped > 0:
return base_session.doses_dropped_sessions[0]
return base_session | [
"def",
"_get_session_for_table",
"(",
"self",
",",
"base_session",
")",
":",
"if",
"base_session",
".",
"recommended_model",
"is",
"None",
"and",
"base_session",
".",
"doses_dropped",
">",
"0",
":",
"return",
"base_session",
".",
"doses_dropped_sessions",
"[",
"0",
"]",
"return",
"base_session"
]
| Only present session for modeling when doses were dropped if it's succesful;
otherwise show the original modeling session. | [
"Only",
"present",
"session",
"for",
"modeling",
"when",
"doses",
"were",
"dropped",
"if",
"it",
"s",
"succesful",
";",
"otherwise",
"show",
"the",
"original",
"modeling",
"session",
"."
]
| 395c6ce84ad82876fd9fa4a89a3497fb61616de0 | https://github.com/shapiromatron/bmds/blob/395c6ce84ad82876fd9fa4a89a3497fb61616de0/bmds/reporter.py#L312-L319 | train |
jmbeach/KEP.py | src/keppy/device.py | Device.set_driver_simulated | def set_driver_simulated(self):
"""Sets the device driver type to simulated"""
self._device_dict["servermain.MULTIPLE_TYPES_DEVICE_DRIVER"] = "Simulator"
if self._is_sixteen_bit:
self._device_dict["servermain.DEVICE_MODEL"] = 0
else:
self._device_dict["servermain.DEVICE_MODEL"] = 1
self._device_dict["servermain.DEVICE_ID_OCTAL"] = 1 | python | def set_driver_simulated(self):
"""Sets the device driver type to simulated"""
self._device_dict["servermain.MULTIPLE_TYPES_DEVICE_DRIVER"] = "Simulator"
if self._is_sixteen_bit:
self._device_dict["servermain.DEVICE_MODEL"] = 0
else:
self._device_dict["servermain.DEVICE_MODEL"] = 1
self._device_dict["servermain.DEVICE_ID_OCTAL"] = 1 | [
"def",
"set_driver_simulated",
"(",
"self",
")",
":",
"self",
".",
"_device_dict",
"[",
"\"servermain.MULTIPLE_TYPES_DEVICE_DRIVER\"",
"]",
"=",
"\"Simulator\"",
"if",
"self",
".",
"_is_sixteen_bit",
":",
"self",
".",
"_device_dict",
"[",
"\"servermain.DEVICE_MODEL\"",
"]",
"=",
"0",
"else",
":",
"self",
".",
"_device_dict",
"[",
"\"servermain.DEVICE_MODEL\"",
"]",
"=",
"1",
"self",
".",
"_device_dict",
"[",
"\"servermain.DEVICE_ID_OCTAL\"",
"]",
"=",
"1"
]
| Sets the device driver type to simulated | [
"Sets",
"the",
"device",
"driver",
"type",
"to",
"simulated"
]
| 68cda64ab649640a486534867c81274c41e39446 | https://github.com/jmbeach/KEP.py/blob/68cda64ab649640a486534867c81274c41e39446/src/keppy/device.py#L15-L22 | train |
jmbeach/KEP.py | src/keppy/device.py | Device.parse_tag_groups | def parse_tag_groups(self):
"""Gets an array of TagGroup objects in the Kepware device"""
tag_groups = []
if 'tag_groups' not in self._device_dict:
return tag_groups
to_remove = []
for tag_group in self._device_dict['tag_groups']:
if tag_group['common.ALLTYPES_NAME'] in self._ignore_list:
to_remove.append(tag_group)
continue
tag_groups.append(TagGroup(tag_group))
for removable in to_remove:
self._device_dict['tag_groups'].remove(removable)
return tag_groups | python | def parse_tag_groups(self):
"""Gets an array of TagGroup objects in the Kepware device"""
tag_groups = []
if 'tag_groups' not in self._device_dict:
return tag_groups
to_remove = []
for tag_group in self._device_dict['tag_groups']:
if tag_group['common.ALLTYPES_NAME'] in self._ignore_list:
to_remove.append(tag_group)
continue
tag_groups.append(TagGroup(tag_group))
for removable in to_remove:
self._device_dict['tag_groups'].remove(removable)
return tag_groups | [
"def",
"parse_tag_groups",
"(",
"self",
")",
":",
"tag_groups",
"=",
"[",
"]",
"if",
"'tag_groups'",
"not",
"in",
"self",
".",
"_device_dict",
":",
"return",
"tag_groups",
"to_remove",
"=",
"[",
"]",
"for",
"tag_group",
"in",
"self",
".",
"_device_dict",
"[",
"'tag_groups'",
"]",
":",
"if",
"tag_group",
"[",
"'common.ALLTYPES_NAME'",
"]",
"in",
"self",
".",
"_ignore_list",
":",
"to_remove",
".",
"append",
"(",
"tag_group",
")",
"continue",
"tag_groups",
".",
"append",
"(",
"TagGroup",
"(",
"tag_group",
")",
")",
"for",
"removable",
"in",
"to_remove",
":",
"self",
".",
"_device_dict",
"[",
"'tag_groups'",
"]",
".",
"remove",
"(",
"removable",
")",
"return",
"tag_groups"
]
| Gets an array of TagGroup objects in the Kepware device | [
"Gets",
"an",
"array",
"of",
"TagGroup",
"objects",
"in",
"the",
"Kepware",
"device"
]
| 68cda64ab649640a486534867c81274c41e39446 | https://github.com/jmbeach/KEP.py/blob/68cda64ab649640a486534867c81274c41e39446/src/keppy/device.py#L24-L38 | train |
jmbeach/KEP.py | src/keppy/device.py | Device.update | def update(self):
"""Updates the dictionary of the device"""
if "tag_groups" not in self._device_dict:
return
for group in self.tag_groups:
group.update()
for i in range(len(self._device_dict["tag_groups"])):
tag_group_dict = self._device_dict["tag_groups"][i]
for group in self.tag_groups:
if group.name == tag_group_dict["common.ALLTYPES_NAME"]:
self._device_dict["tag_groups"][i] = group.as_dict() | python | def update(self):
"""Updates the dictionary of the device"""
if "tag_groups" not in self._device_dict:
return
for group in self.tag_groups:
group.update()
for i in range(len(self._device_dict["tag_groups"])):
tag_group_dict = self._device_dict["tag_groups"][i]
for group in self.tag_groups:
if group.name == tag_group_dict["common.ALLTYPES_NAME"]:
self._device_dict["tag_groups"][i] = group.as_dict() | [
"def",
"update",
"(",
"self",
")",
":",
"if",
"\"tag_groups\"",
"not",
"in",
"self",
".",
"_device_dict",
":",
"return",
"for",
"group",
"in",
"self",
".",
"tag_groups",
":",
"group",
".",
"update",
"(",
")",
"for",
"i",
"in",
"range",
"(",
"len",
"(",
"self",
".",
"_device_dict",
"[",
"\"tag_groups\"",
"]",
")",
")",
":",
"tag_group_dict",
"=",
"self",
".",
"_device_dict",
"[",
"\"tag_groups\"",
"]",
"[",
"i",
"]",
"for",
"group",
"in",
"self",
".",
"tag_groups",
":",
"if",
"group",
".",
"name",
"==",
"tag_group_dict",
"[",
"\"common.ALLTYPES_NAME\"",
"]",
":",
"self",
".",
"_device_dict",
"[",
"\"tag_groups\"",
"]",
"[",
"i",
"]",
"=",
"group",
".",
"as_dict",
"(",
")"
]
| Updates the dictionary of the device | [
"Updates",
"the",
"dictionary",
"of",
"the",
"device"
]
| 68cda64ab649640a486534867c81274c41e39446 | https://github.com/jmbeach/KEP.py/blob/68cda64ab649640a486534867c81274c41e39446/src/keppy/device.py#L71-L81 | train |
drericstrong/pyedna | pyedna/serv.py | AddAnalogShortIdMsecRecord | def AddAnalogShortIdMsecRecord(site_service, tag, time_value, msec, value,
low_warn=False, high_warn=False, low_alarm=False, high_alarm=False,
oor_low=False, oor_high=False, unreliable=False, manual=False):
"""
This function will add an analog value to the specified eDNA service and
tag, with many optional status definitions.
:param site_service: The site.service where data will be pushed
:param tag: The eDNA tag to push data. Tag only (e.g. ADE1CA01)
:param time_value: The time of the point, which MUST be in UTC Epoch
format. For example, "1483926416" not "2016/01/01 01:01:01".
:param msec: The additional milliseconds for the time_value
:param value: The value associated with the above time.
:param low_warn: TRUE if the point is in a low warning state
:param high_warn: TRUE if the point is in a high warning state
:param low_alarm: TRUE if the point is in a low alarm state
:param high_alarm: TRUE if the point is in a high alarm state
:param oor_low: TRUE if the point is out-of-range low
:param oor_high: TRUE if the point is out-of-range high
:param unreliable: TRUE if the point is unreliable
:param manual: TRUE if the point is manually set
:return: 0, if the data push is successful
"""
# Define all required variables in the correct ctypes format
szService = c_char_p(site_service.encode('utf-8'))
szPointId = c_char_p(tag.encode('utf-8'))
tTime = c_long(int(time_value))
dValue = c_double(value)
bLowWarning = c_int(int(low_warn))
bHighWarning = c_int(int(high_warn))
bLowAlarm = c_int(int(low_alarm))
bHighAlarm = c_int(int(high_alarm))
bOutOfRangeLow = c_int(int(oor_low))
bOutOfRangeHigh = c_int(int(oor_high))
bUnReliable = c_int(int(unreliable))
bManual = c_int(int(manual))
usMsec = c_ushort(msec)
# Try to push the data. Function will return 0 if successful.
nRet = dnaserv_dll.DnaAddAnalogShortIdMsecRecord(szService, szPointId,
tTime, dValue, bLowWarning, bHighWarning, bLowAlarm,
bHighAlarm, bOutOfRangeLow, bOutOfRangeHigh, bUnReliable,
bManual, usMsec)
return nRet | python | def AddAnalogShortIdMsecRecord(site_service, tag, time_value, msec, value,
low_warn=False, high_warn=False, low_alarm=False, high_alarm=False,
oor_low=False, oor_high=False, unreliable=False, manual=False):
"""
This function will add an analog value to the specified eDNA service and
tag, with many optional status definitions.
:param site_service: The site.service where data will be pushed
:param tag: The eDNA tag to push data. Tag only (e.g. ADE1CA01)
:param time_value: The time of the point, which MUST be in UTC Epoch
format. For example, "1483926416" not "2016/01/01 01:01:01".
:param msec: The additional milliseconds for the time_value
:param value: The value associated with the above time.
:param low_warn: TRUE if the point is in a low warning state
:param high_warn: TRUE if the point is in a high warning state
:param low_alarm: TRUE if the point is in a low alarm state
:param high_alarm: TRUE if the point is in a high alarm state
:param oor_low: TRUE if the point is out-of-range low
:param oor_high: TRUE if the point is out-of-range high
:param unreliable: TRUE if the point is unreliable
:param manual: TRUE if the point is manually set
:return: 0, if the data push is successful
"""
# Define all required variables in the correct ctypes format
szService = c_char_p(site_service.encode('utf-8'))
szPointId = c_char_p(tag.encode('utf-8'))
tTime = c_long(int(time_value))
dValue = c_double(value)
bLowWarning = c_int(int(low_warn))
bHighWarning = c_int(int(high_warn))
bLowAlarm = c_int(int(low_alarm))
bHighAlarm = c_int(int(high_alarm))
bOutOfRangeLow = c_int(int(oor_low))
bOutOfRangeHigh = c_int(int(oor_high))
bUnReliable = c_int(int(unreliable))
bManual = c_int(int(manual))
usMsec = c_ushort(msec)
# Try to push the data. Function will return 0 if successful.
nRet = dnaserv_dll.DnaAddAnalogShortIdMsecRecord(szService, szPointId,
tTime, dValue, bLowWarning, bHighWarning, bLowAlarm,
bHighAlarm, bOutOfRangeLow, bOutOfRangeHigh, bUnReliable,
bManual, usMsec)
return nRet | [
"def",
"AddAnalogShortIdMsecRecord",
"(",
"site_service",
",",
"tag",
",",
"time_value",
",",
"msec",
",",
"value",
",",
"low_warn",
"=",
"False",
",",
"high_warn",
"=",
"False",
",",
"low_alarm",
"=",
"False",
",",
"high_alarm",
"=",
"False",
",",
"oor_low",
"=",
"False",
",",
"oor_high",
"=",
"False",
",",
"unreliable",
"=",
"False",
",",
"manual",
"=",
"False",
")",
":",
"# Define all required variables in the correct ctypes format",
"szService",
"=",
"c_char_p",
"(",
"site_service",
".",
"encode",
"(",
"'utf-8'",
")",
")",
"szPointId",
"=",
"c_char_p",
"(",
"tag",
".",
"encode",
"(",
"'utf-8'",
")",
")",
"tTime",
"=",
"c_long",
"(",
"int",
"(",
"time_value",
")",
")",
"dValue",
"=",
"c_double",
"(",
"value",
")",
"bLowWarning",
"=",
"c_int",
"(",
"int",
"(",
"low_warn",
")",
")",
"bHighWarning",
"=",
"c_int",
"(",
"int",
"(",
"high_warn",
")",
")",
"bLowAlarm",
"=",
"c_int",
"(",
"int",
"(",
"low_alarm",
")",
")",
"bHighAlarm",
"=",
"c_int",
"(",
"int",
"(",
"high_alarm",
")",
")",
"bOutOfRangeLow",
"=",
"c_int",
"(",
"int",
"(",
"oor_low",
")",
")",
"bOutOfRangeHigh",
"=",
"c_int",
"(",
"int",
"(",
"oor_high",
")",
")",
"bUnReliable",
"=",
"c_int",
"(",
"int",
"(",
"unreliable",
")",
")",
"bManual",
"=",
"c_int",
"(",
"int",
"(",
"manual",
")",
")",
"usMsec",
"=",
"c_ushort",
"(",
"msec",
")",
"# Try to push the data. Function will return 0 if successful.",
"nRet",
"=",
"dnaserv_dll",
".",
"DnaAddAnalogShortIdMsecRecord",
"(",
"szService",
",",
"szPointId",
",",
"tTime",
",",
"dValue",
",",
"bLowWarning",
",",
"bHighWarning",
",",
"bLowAlarm",
",",
"bHighAlarm",
",",
"bOutOfRangeLow",
",",
"bOutOfRangeHigh",
",",
"bUnReliable",
",",
"bManual",
",",
"usMsec",
")",
"return",
"nRet"
]
| This function will add an analog value to the specified eDNA service and
tag, with many optional status definitions.
:param site_service: The site.service where data will be pushed
:param tag: The eDNA tag to push data. Tag only (e.g. ADE1CA01)
:param time_value: The time of the point, which MUST be in UTC Epoch
format. For example, "1483926416" not "2016/01/01 01:01:01".
:param msec: The additional milliseconds for the time_value
:param value: The value associated with the above time.
:param low_warn: TRUE if the point is in a low warning state
:param high_warn: TRUE if the point is in a high warning state
:param low_alarm: TRUE if the point is in a low alarm state
:param high_alarm: TRUE if the point is in a high alarm state
:param oor_low: TRUE if the point is out-of-range low
:param oor_high: TRUE if the point is out-of-range high
:param unreliable: TRUE if the point is unreliable
:param manual: TRUE if the point is manually set
:return: 0, if the data push is successful | [
"This",
"function",
"will",
"add",
"an",
"analog",
"value",
"to",
"the",
"specified",
"eDNA",
"service",
"and",
"tag",
"with",
"many",
"optional",
"status",
"definitions",
"."
]
| b8f8f52def4f26bb4f3a993ce3400769518385f6 | https://github.com/drericstrong/pyedna/blob/b8f8f52def4f26bb4f3a993ce3400769518385f6/pyedna/serv.py#L171-L214 | train |
drericstrong/pyedna | pyedna/serv.py | FlushShortIdRecords | def FlushShortIdRecords(site_service):
"""
Flush all the queued records.
:param site_service: The site.service where data was pushed
:return: message whether function was successful
"""
# Define all required variables in the correct ctypes format
szService = c_char_p(site_service.encode('utf-8'))
szMessage = create_string_buffer(b" ")
nMessage = c_ushort(20)
# Try to flush the data. Function will return message regarding success.
nRet = dnaserv_dll.DnaFlushShortIdRecords(szService, byref(szMessage),
nMessage)
return str(nRet) + szMessage.value.decode('utf-8') | python | def FlushShortIdRecords(site_service):
"""
Flush all the queued records.
:param site_service: The site.service where data was pushed
:return: message whether function was successful
"""
# Define all required variables in the correct ctypes format
szService = c_char_p(site_service.encode('utf-8'))
szMessage = create_string_buffer(b" ")
nMessage = c_ushort(20)
# Try to flush the data. Function will return message regarding success.
nRet = dnaserv_dll.DnaFlushShortIdRecords(szService, byref(szMessage),
nMessage)
return str(nRet) + szMessage.value.decode('utf-8') | [
"def",
"FlushShortIdRecords",
"(",
"site_service",
")",
":",
"# Define all required variables in the correct ctypes format",
"szService",
"=",
"c_char_p",
"(",
"site_service",
".",
"encode",
"(",
"'utf-8'",
")",
")",
"szMessage",
"=",
"create_string_buffer",
"(",
"b\" \"",
")",
"nMessage",
"=",
"c_ushort",
"(",
"20",
")",
"# Try to flush the data. Function will return message regarding success.",
"nRet",
"=",
"dnaserv_dll",
".",
"DnaFlushShortIdRecords",
"(",
"szService",
",",
"byref",
"(",
"szMessage",
")",
",",
"nMessage",
")",
"return",
"str",
"(",
"nRet",
")",
"+",
"szMessage",
".",
"value",
".",
"decode",
"(",
"'utf-8'",
")"
]
| Flush all the queued records.
:param site_service: The site.service where data was pushed
:return: message whether function was successful | [
"Flush",
"all",
"the",
"queued",
"records",
"."
]
| b8f8f52def4f26bb4f3a993ce3400769518385f6 | https://github.com/drericstrong/pyedna/blob/b8f8f52def4f26bb4f3a993ce3400769518385f6/pyedna/serv.py#L283-L298 | train |
tethysplatform/condorpy | condorpy/htcondor_object_base.py | HTCondorObjectBase.set_scheduler | def set_scheduler(self, host, username='root', password=None, private_key=None, private_key_pass=None):
"""
Defines the remote scheduler
Args:
host (str): the hostname or ip address of the remote scheduler
username (str, optional): the username used to connect to the remote scheduler. Default is 'root'
password (str, optional): the password for username on the remote scheduler. Either the password or the private_key must be defined. Default is None.
private_key (str, optional): the path to the private ssh key used to connect to the remote scheduler. Either the password or the private_key must be defined. Default is None.
private_key_pass (str, optional): the passphrase for the private_key. Default is None.
Returns:
An RemoteClient representing the remote scheduler.
"""
self._remote = RemoteClient(host, username, password, private_key, private_key_pass)
self._remote_id = uuid.uuid4().hex | python | def set_scheduler(self, host, username='root', password=None, private_key=None, private_key_pass=None):
"""
Defines the remote scheduler
Args:
host (str): the hostname or ip address of the remote scheduler
username (str, optional): the username used to connect to the remote scheduler. Default is 'root'
password (str, optional): the password for username on the remote scheduler. Either the password or the private_key must be defined. Default is None.
private_key (str, optional): the path to the private ssh key used to connect to the remote scheduler. Either the password or the private_key must be defined. Default is None.
private_key_pass (str, optional): the passphrase for the private_key. Default is None.
Returns:
An RemoteClient representing the remote scheduler.
"""
self._remote = RemoteClient(host, username, password, private_key, private_key_pass)
self._remote_id = uuid.uuid4().hex | [
"def",
"set_scheduler",
"(",
"self",
",",
"host",
",",
"username",
"=",
"'root'",
",",
"password",
"=",
"None",
",",
"private_key",
"=",
"None",
",",
"private_key_pass",
"=",
"None",
")",
":",
"self",
".",
"_remote",
"=",
"RemoteClient",
"(",
"host",
",",
"username",
",",
"password",
",",
"private_key",
",",
"private_key_pass",
")",
"self",
".",
"_remote_id",
"=",
"uuid",
".",
"uuid4",
"(",
")",
".",
"hex"
]
| Defines the remote scheduler
Args:
host (str): the hostname or ip address of the remote scheduler
username (str, optional): the username used to connect to the remote scheduler. Default is 'root'
password (str, optional): the password for username on the remote scheduler. Either the password or the private_key must be defined. Default is None.
private_key (str, optional): the path to the private ssh key used to connect to the remote scheduler. Either the password or the private_key must be defined. Default is None.
private_key_pass (str, optional): the passphrase for the private_key. Default is None.
Returns:
An RemoteClient representing the remote scheduler. | [
"Defines",
"the",
"remote",
"scheduler"
]
| a5aaaef0d73198f7d9756dda7abe98b4e209f1f4 | https://github.com/tethysplatform/condorpy/blob/a5aaaef0d73198f7d9756dda7abe98b4e209f1f4/condorpy/htcondor_object_base.py#L68-L83 | train |
tethysplatform/condorpy | condorpy/htcondor_object_base.py | HTCondorObjectBase.set_cwd | def set_cwd(fn):
"""
Decorator to set the specified working directory to execute the function, and then restore the previous cwd.
"""
def wrapped(self, *args, **kwargs):
log.info('Calling function: %s with args=%s', fn, args if args else [])
cwd = os.getcwd()
log.info('Saved cwd: %s', cwd)
os.chdir(self._cwd)
log.info('Changing working directory to: %s', self._cwd)
try:
return fn(self, *args, **kwargs)
finally:
os.chdir(cwd)
log.info('Restored working directory to: %s', cwd)
return wrapped | python | def set_cwd(fn):
"""
Decorator to set the specified working directory to execute the function, and then restore the previous cwd.
"""
def wrapped(self, *args, **kwargs):
log.info('Calling function: %s with args=%s', fn, args if args else [])
cwd = os.getcwd()
log.info('Saved cwd: %s', cwd)
os.chdir(self._cwd)
log.info('Changing working directory to: %s', self._cwd)
try:
return fn(self, *args, **kwargs)
finally:
os.chdir(cwd)
log.info('Restored working directory to: %s', cwd)
return wrapped | [
"def",
"set_cwd",
"(",
"fn",
")",
":",
"def",
"wrapped",
"(",
"self",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"log",
".",
"info",
"(",
"'Calling function: %s with args=%s'",
",",
"fn",
",",
"args",
"if",
"args",
"else",
"[",
"]",
")",
"cwd",
"=",
"os",
".",
"getcwd",
"(",
")",
"log",
".",
"info",
"(",
"'Saved cwd: %s'",
",",
"cwd",
")",
"os",
".",
"chdir",
"(",
"self",
".",
"_cwd",
")",
"log",
".",
"info",
"(",
"'Changing working directory to: %s'",
",",
"self",
".",
"_cwd",
")",
"try",
":",
"return",
"fn",
"(",
"self",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
"finally",
":",
"os",
".",
"chdir",
"(",
"cwd",
")",
"log",
".",
"info",
"(",
"'Restored working directory to: %s'",
",",
"cwd",
")",
"return",
"wrapped"
]
| Decorator to set the specified working directory to execute the function, and then restore the previous cwd. | [
"Decorator",
"to",
"set",
"the",
"specified",
"working",
"directory",
"to",
"execute",
"the",
"function",
"and",
"then",
"restore",
"the",
"previous",
"cwd",
"."
]
| a5aaaef0d73198f7d9756dda7abe98b4e209f1f4 | https://github.com/tethysplatform/condorpy/blob/a5aaaef0d73198f7d9756dda7abe98b4e209f1f4/condorpy/htcondor_object_base.py#L108-L124 | train |
tethysplatform/condorpy | condorpy/htcondor_object_base.py | HTCondorObjectBase.remove | def remove(self, options=[], sub_job_num=None):
"""Removes a job from the job queue, or from being executed.
Args:
options (list of str, optional): A list of command line options for the condor_rm command. For
details on valid options see: http://research.cs.wisc.edu/htcondor/manual/current/condor_rm.html.
Defaults to an empty list.
job_num (int, optional): The number of sub_job to remove rather than the whole cluster. Defaults to None.
"""
args = ['condor_rm']
args.extend(options)
job_id = '%s.%s' % (self.cluster_id, sub_job_num) if sub_job_num else str(self.cluster_id)
args.append(job_id)
out, err = self._execute(args)
return out,err | python | def remove(self, options=[], sub_job_num=None):
"""Removes a job from the job queue, or from being executed.
Args:
options (list of str, optional): A list of command line options for the condor_rm command. For
details on valid options see: http://research.cs.wisc.edu/htcondor/manual/current/condor_rm.html.
Defaults to an empty list.
job_num (int, optional): The number of sub_job to remove rather than the whole cluster. Defaults to None.
"""
args = ['condor_rm']
args.extend(options)
job_id = '%s.%s' % (self.cluster_id, sub_job_num) if sub_job_num else str(self.cluster_id)
args.append(job_id)
out, err = self._execute(args)
return out,err | [
"def",
"remove",
"(",
"self",
",",
"options",
"=",
"[",
"]",
",",
"sub_job_num",
"=",
"None",
")",
":",
"args",
"=",
"[",
"'condor_rm'",
"]",
"args",
".",
"extend",
"(",
"options",
")",
"job_id",
"=",
"'%s.%s'",
"%",
"(",
"self",
".",
"cluster_id",
",",
"sub_job_num",
")",
"if",
"sub_job_num",
"else",
"str",
"(",
"self",
".",
"cluster_id",
")",
"args",
".",
"append",
"(",
"job_id",
")",
"out",
",",
"err",
"=",
"self",
".",
"_execute",
"(",
"args",
")",
"return",
"out",
",",
"err"
]
| Removes a job from the job queue, or from being executed.
Args:
options (list of str, optional): A list of command line options for the condor_rm command. For
details on valid options see: http://research.cs.wisc.edu/htcondor/manual/current/condor_rm.html.
Defaults to an empty list.
job_num (int, optional): The number of sub_job to remove rather than the whole cluster. Defaults to None. | [
"Removes",
"a",
"job",
"from",
"the",
"job",
"queue",
"or",
"from",
"being",
"executed",
"."
]
| a5aaaef0d73198f7d9756dda7abe98b4e209f1f4 | https://github.com/tethysplatform/condorpy/blob/a5aaaef0d73198f7d9756dda7abe98b4e209f1f4/condorpy/htcondor_object_base.py#L144-L159 | train |
tethysplatform/condorpy | condorpy/htcondor_object_base.py | HTCondorObjectBase.close_remote | def close_remote(self):
"""Cleans up and closes connection to remote server if defined.
"""
if self._remote:
try:
# first see if remote dir is still there
self._remote.execute('ls %s' % (self._remote_id,))
if self.status != 'Completed':
self.remove()
self._remote.execute('rm -rf %s' % (self._remote_id,))
except RuntimeError:
pass
self._remote.close()
del self._remote | python | def close_remote(self):
"""Cleans up and closes connection to remote server if defined.
"""
if self._remote:
try:
# first see if remote dir is still there
self._remote.execute('ls %s' % (self._remote_id,))
if self.status != 'Completed':
self.remove()
self._remote.execute('rm -rf %s' % (self._remote_id,))
except RuntimeError:
pass
self._remote.close()
del self._remote | [
"def",
"close_remote",
"(",
"self",
")",
":",
"if",
"self",
".",
"_remote",
":",
"try",
":",
"# first see if remote dir is still there",
"self",
".",
"_remote",
".",
"execute",
"(",
"'ls %s'",
"%",
"(",
"self",
".",
"_remote_id",
",",
")",
")",
"if",
"self",
".",
"status",
"!=",
"'Completed'",
":",
"self",
".",
"remove",
"(",
")",
"self",
".",
"_remote",
".",
"execute",
"(",
"'rm -rf %s'",
"%",
"(",
"self",
".",
"_remote_id",
",",
")",
")",
"except",
"RuntimeError",
":",
"pass",
"self",
".",
"_remote",
".",
"close",
"(",
")",
"del",
"self",
".",
"_remote"
]
| Cleans up and closes connection to remote server if defined. | [
"Cleans",
"up",
"and",
"closes",
"connection",
"to",
"remote",
"server",
"if",
"defined",
"."
]
| a5aaaef0d73198f7d9756dda7abe98b4e209f1f4 | https://github.com/tethysplatform/condorpy/blob/a5aaaef0d73198f7d9756dda7abe98b4e209f1f4/condorpy/htcondor_object_base.py#L167-L181 | train |
VIVelev/PyDojoML | dojo/tree/utils/impurity_measurements.py | gini_impurity | def gini_impurity(s):
"""Calculate the Gini Impurity for a list of samples.
See:
https://en.wikipedia.org/wiki/Decision_tree_learning#Gini_impurity
"""
return 1 - sum(prop(s[i], s)**2 for i in range(len(s))) | python | def gini_impurity(s):
"""Calculate the Gini Impurity for a list of samples.
See:
https://en.wikipedia.org/wiki/Decision_tree_learning#Gini_impurity
"""
return 1 - sum(prop(s[i], s)**2 for i in range(len(s))) | [
"def",
"gini_impurity",
"(",
"s",
")",
":",
"return",
"1",
"-",
"sum",
"(",
"prop",
"(",
"s",
"[",
"i",
"]",
",",
"s",
")",
"**",
"2",
"for",
"i",
"in",
"range",
"(",
"len",
"(",
"s",
")",
")",
")"
]
| Calculate the Gini Impurity for a list of samples.
See:
https://en.wikipedia.org/wiki/Decision_tree_learning#Gini_impurity | [
"Calculate",
"the",
"Gini",
"Impurity",
"for",
"a",
"list",
"of",
"samples",
"."
]
| 773fdce6866aa6decd306a5a85f94129fed816eb | https://github.com/VIVelev/PyDojoML/blob/773fdce6866aa6decd306a5a85f94129fed816eb/dojo/tree/utils/impurity_measurements.py#L18-L24 | train |
VIVelev/PyDojoML | dojo/tree/utils/impurity_measurements.py | entropy | def entropy(s):
"""Calculate the Entropy Impurity for a list of samples.
"""
return -sum(
p*np.log(p) for i in range(len(s)) for p in [prop(s[i], s)]
) | python | def entropy(s):
"""Calculate the Entropy Impurity for a list of samples.
"""
return -sum(
p*np.log(p) for i in range(len(s)) for p in [prop(s[i], s)]
) | [
"def",
"entropy",
"(",
"s",
")",
":",
"return",
"-",
"sum",
"(",
"p",
"*",
"np",
".",
"log",
"(",
"p",
")",
"for",
"i",
"in",
"range",
"(",
"len",
"(",
"s",
")",
")",
"for",
"p",
"in",
"[",
"prop",
"(",
"s",
"[",
"i",
"]",
",",
"s",
")",
"]",
")"
]
| Calculate the Entropy Impurity for a list of samples. | [
"Calculate",
"the",
"Entropy",
"Impurity",
"for",
"a",
"list",
"of",
"samples",
"."
]
| 773fdce6866aa6decd306a5a85f94129fed816eb | https://github.com/VIVelev/PyDojoML/blob/773fdce6866aa6decd306a5a85f94129fed816eb/dojo/tree/utils/impurity_measurements.py#L28-L33 | train |
VIVelev/PyDojoML | dojo/tree/utils/impurity_measurements.py | info_gain | def info_gain(current_impurity, true_branch, false_branch, criterion):
"""Information Gain.
The uncertainty of the starting node, minus the weighted impurity of
two child nodes.
"""
measure_impurity = gini_impurity if criterion == "gini" else entropy
p = float(len(true_branch)) / (len(true_branch) + len(false_branch))
return current_impurity - p * measure_impurity(true_branch) - (1 - p) * measure_impurity(false_branch) | python | def info_gain(current_impurity, true_branch, false_branch, criterion):
"""Information Gain.
The uncertainty of the starting node, minus the weighted impurity of
two child nodes.
"""
measure_impurity = gini_impurity if criterion == "gini" else entropy
p = float(len(true_branch)) / (len(true_branch) + len(false_branch))
return current_impurity - p * measure_impurity(true_branch) - (1 - p) * measure_impurity(false_branch) | [
"def",
"info_gain",
"(",
"current_impurity",
",",
"true_branch",
",",
"false_branch",
",",
"criterion",
")",
":",
"measure_impurity",
"=",
"gini_impurity",
"if",
"criterion",
"==",
"\"gini\"",
"else",
"entropy",
"p",
"=",
"float",
"(",
"len",
"(",
"true_branch",
")",
")",
"/",
"(",
"len",
"(",
"true_branch",
")",
"+",
"len",
"(",
"false_branch",
")",
")",
"return",
"current_impurity",
"-",
"p",
"*",
"measure_impurity",
"(",
"true_branch",
")",
"-",
"(",
"1",
"-",
"p",
")",
"*",
"measure_impurity",
"(",
"false_branch",
")"
]
| Information Gain.
The uncertainty of the starting node, minus the weighted impurity of
two child nodes. | [
"Information",
"Gain",
".",
"The",
"uncertainty",
"of",
"the",
"starting",
"node",
"minus",
"the",
"weighted",
"impurity",
"of",
"two",
"child",
"nodes",
"."
]
| 773fdce6866aa6decd306a5a85f94129fed816eb | https://github.com/VIVelev/PyDojoML/blob/773fdce6866aa6decd306a5a85f94129fed816eb/dojo/tree/utils/impurity_measurements.py#L37-L47 | train |
tethysplatform/condorpy | condorpy/workflow.py | Workflow._update_statuses | def _update_statuses(self, sub_job_num=None):
"""
Update statuses of jobs nodes in workflow.
"""
# initialize status dictionary
status_dict = dict()
for val in CONDOR_JOB_STATUSES.values():
status_dict[val] = 0
for node in self.node_set:
job = node.job
try:
job_status = job.status
status_dict[job_status] += 1
except (KeyError, HTCondorError):
status_dict['Unexpanded'] += 1
return status_dict | python | def _update_statuses(self, sub_job_num=None):
"""
Update statuses of jobs nodes in workflow.
"""
# initialize status dictionary
status_dict = dict()
for val in CONDOR_JOB_STATUSES.values():
status_dict[val] = 0
for node in self.node_set:
job = node.job
try:
job_status = job.status
status_dict[job_status] += 1
except (KeyError, HTCondorError):
status_dict['Unexpanded'] += 1
return status_dict | [
"def",
"_update_statuses",
"(",
"self",
",",
"sub_job_num",
"=",
"None",
")",
":",
"# initialize status dictionary",
"status_dict",
"=",
"dict",
"(",
")",
"for",
"val",
"in",
"CONDOR_JOB_STATUSES",
".",
"values",
"(",
")",
":",
"status_dict",
"[",
"val",
"]",
"=",
"0",
"for",
"node",
"in",
"self",
".",
"node_set",
":",
"job",
"=",
"node",
".",
"job",
"try",
":",
"job_status",
"=",
"job",
".",
"status",
"status_dict",
"[",
"job_status",
"]",
"+=",
"1",
"except",
"(",
"KeyError",
",",
"HTCondorError",
")",
":",
"status_dict",
"[",
"'Unexpanded'",
"]",
"+=",
"1",
"return",
"status_dict"
]
| Update statuses of jobs nodes in workflow. | [
"Update",
"statuses",
"of",
"jobs",
"nodes",
"in",
"workflow",
"."
]
| a5aaaef0d73198f7d9756dda7abe98b4e209f1f4 | https://github.com/tethysplatform/condorpy/blob/a5aaaef0d73198f7d9756dda7abe98b4e209f1f4/condorpy/workflow.py#L180-L198 | train |
tethysplatform/condorpy | condorpy/workflow.py | Workflow.update_node_ids | def update_node_ids(self, sub_job_num=None):
"""
Associate Jobs with respective cluster ids.
"""
# Build condor_q and condor_history commands
dag_id = '%s.%s' % (self.cluster_id, sub_job_num) if sub_job_num else str(self.cluster_id)
job_delimiter = '+++'
attr_delimiter = ';;;'
format = [
'-format', '"%d' + attr_delimiter + '"', 'ClusterId',
'-format', '"%v' + attr_delimiter + '"', 'Cmd',
'-format', '"%v' + attr_delimiter + '"', 'Args', # Old way
'-format', '"%v' + job_delimiter + '"', 'Arguments' # New way
]
# Get ID, Executable, and Arguments for each job that is either started to be processed or finished in the workflow
cmd = 'condor_q -constraint DAGManJobID=={0} {1} && condor_history -constraint DAGManJobID=={0} {1}'.format(dag_id, ' '.join(format))
# 'condor_q -constraint DAGManJobID==1018 -format "%d\n" ClusterId -format "%s\n" CMD -format "%s\n" ARGS && condor_history -constraint DAGManJobID==1018 -format "%d\n" ClusterId -format "%s\n" CMD -format "%s\n" ARGS'
_args = [cmd]
out, err = self._execute(_args, shell=True, run_in_job_dir=False)
if err:
log.error('Error while associating ids for jobs dag %s: %s', dag_id, err)
raise HTCondorError(err)
if not out:
log.warning('Error while associating ids for jobs in dag %s: No jobs found for dag.', dag_id)
try:
# Split into one line per job
jobs_out = out.split(job_delimiter)
# Match node to cluster id using combination of cmd and arguments
for node in self._node_set:
job = node.job
# Skip jobs that already have cluster id defined
if job.cluster_id != job.NULL_CLUSTER_ID:
continue
for job_out in jobs_out:
if not job_out or attr_delimiter not in job_out:
continue
# Split line by attributes
cluster_id, cmd, _args, _arguments = job_out.split(attr_delimiter)
# If new form of arguments is used, _args will be 'undefined' and _arguments will not
if _args == 'undefined' and _arguments != 'undefined':
args = _arguments.strip()
# If both are undefined, then there are no arguments
elif _args == 'undefined' and _arguments == 'undefined':
args = None
# Otherwise, using old form and _arguments will be 'undefined' and _args will not.
else:
args = _args.strip()
job_cmd = job.executable
job_args = job.arguments.strip() if job.arguments else None
if job_cmd in cmd and job_args == args:
log.info('Linking cluster_id %s to job with command and arguments: %s %s', cluster_id,
job_cmd, job_args)
job._cluster_id = int(cluster_id)
break
except ValueError as e:
log.warning(str(e)) | python | def update_node_ids(self, sub_job_num=None):
"""
Associate Jobs with respective cluster ids.
"""
# Build condor_q and condor_history commands
dag_id = '%s.%s' % (self.cluster_id, sub_job_num) if sub_job_num else str(self.cluster_id)
job_delimiter = '+++'
attr_delimiter = ';;;'
format = [
'-format', '"%d' + attr_delimiter + '"', 'ClusterId',
'-format', '"%v' + attr_delimiter + '"', 'Cmd',
'-format', '"%v' + attr_delimiter + '"', 'Args', # Old way
'-format', '"%v' + job_delimiter + '"', 'Arguments' # New way
]
# Get ID, Executable, and Arguments for each job that is either started to be processed or finished in the workflow
cmd = 'condor_q -constraint DAGManJobID=={0} {1} && condor_history -constraint DAGManJobID=={0} {1}'.format(dag_id, ' '.join(format))
# 'condor_q -constraint DAGManJobID==1018 -format "%d\n" ClusterId -format "%s\n" CMD -format "%s\n" ARGS && condor_history -constraint DAGManJobID==1018 -format "%d\n" ClusterId -format "%s\n" CMD -format "%s\n" ARGS'
_args = [cmd]
out, err = self._execute(_args, shell=True, run_in_job_dir=False)
if err:
log.error('Error while associating ids for jobs dag %s: %s', dag_id, err)
raise HTCondorError(err)
if not out:
log.warning('Error while associating ids for jobs in dag %s: No jobs found for dag.', dag_id)
try:
# Split into one line per job
jobs_out = out.split(job_delimiter)
# Match node to cluster id using combination of cmd and arguments
for node in self._node_set:
job = node.job
# Skip jobs that already have cluster id defined
if job.cluster_id != job.NULL_CLUSTER_ID:
continue
for job_out in jobs_out:
if not job_out or attr_delimiter not in job_out:
continue
# Split line by attributes
cluster_id, cmd, _args, _arguments = job_out.split(attr_delimiter)
# If new form of arguments is used, _args will be 'undefined' and _arguments will not
if _args == 'undefined' and _arguments != 'undefined':
args = _arguments.strip()
# If both are undefined, then there are no arguments
elif _args == 'undefined' and _arguments == 'undefined':
args = None
# Otherwise, using old form and _arguments will be 'undefined' and _args will not.
else:
args = _args.strip()
job_cmd = job.executable
job_args = job.arguments.strip() if job.arguments else None
if job_cmd in cmd and job_args == args:
log.info('Linking cluster_id %s to job with command and arguments: %s %s', cluster_id,
job_cmd, job_args)
job._cluster_id = int(cluster_id)
break
except ValueError as e:
log.warning(str(e)) | [
"def",
"update_node_ids",
"(",
"self",
",",
"sub_job_num",
"=",
"None",
")",
":",
"# Build condor_q and condor_history commands",
"dag_id",
"=",
"'%s.%s'",
"%",
"(",
"self",
".",
"cluster_id",
",",
"sub_job_num",
")",
"if",
"sub_job_num",
"else",
"str",
"(",
"self",
".",
"cluster_id",
")",
"job_delimiter",
"=",
"'+++'",
"attr_delimiter",
"=",
"';;;'",
"format",
"=",
"[",
"'-format'",
",",
"'\"%d'",
"+",
"attr_delimiter",
"+",
"'\"'",
",",
"'ClusterId'",
",",
"'-format'",
",",
"'\"%v'",
"+",
"attr_delimiter",
"+",
"'\"'",
",",
"'Cmd'",
",",
"'-format'",
",",
"'\"%v'",
"+",
"attr_delimiter",
"+",
"'\"'",
",",
"'Args'",
",",
"# Old way",
"'-format'",
",",
"'\"%v'",
"+",
"job_delimiter",
"+",
"'\"'",
",",
"'Arguments'",
"# New way",
"]",
"# Get ID, Executable, and Arguments for each job that is either started to be processed or finished in the workflow",
"cmd",
"=",
"'condor_q -constraint DAGManJobID=={0} {1} && condor_history -constraint DAGManJobID=={0} {1}'",
".",
"format",
"(",
"dag_id",
",",
"' '",
".",
"join",
"(",
"format",
")",
")",
"# 'condor_q -constraint DAGManJobID==1018 -format \"%d\\n\" ClusterId -format \"%s\\n\" CMD -format \"%s\\n\" ARGS && condor_history -constraint DAGManJobID==1018 -format \"%d\\n\" ClusterId -format \"%s\\n\" CMD -format \"%s\\n\" ARGS'",
"_args",
"=",
"[",
"cmd",
"]",
"out",
",",
"err",
"=",
"self",
".",
"_execute",
"(",
"_args",
",",
"shell",
"=",
"True",
",",
"run_in_job_dir",
"=",
"False",
")",
"if",
"err",
":",
"log",
".",
"error",
"(",
"'Error while associating ids for jobs dag %s: %s'",
",",
"dag_id",
",",
"err",
")",
"raise",
"HTCondorError",
"(",
"err",
")",
"if",
"not",
"out",
":",
"log",
".",
"warning",
"(",
"'Error while associating ids for jobs in dag %s: No jobs found for dag.'",
",",
"dag_id",
")",
"try",
":",
"# Split into one line per job",
"jobs_out",
"=",
"out",
".",
"split",
"(",
"job_delimiter",
")",
"# Match node to cluster id using combination of cmd and arguments",
"for",
"node",
"in",
"self",
".",
"_node_set",
":",
"job",
"=",
"node",
".",
"job",
"# Skip jobs that already have cluster id defined",
"if",
"job",
".",
"cluster_id",
"!=",
"job",
".",
"NULL_CLUSTER_ID",
":",
"continue",
"for",
"job_out",
"in",
"jobs_out",
":",
"if",
"not",
"job_out",
"or",
"attr_delimiter",
"not",
"in",
"job_out",
":",
"continue",
"# Split line by attributes",
"cluster_id",
",",
"cmd",
",",
"_args",
",",
"_arguments",
"=",
"job_out",
".",
"split",
"(",
"attr_delimiter",
")",
"# If new form of arguments is used, _args will be 'undefined' and _arguments will not",
"if",
"_args",
"==",
"'undefined'",
"and",
"_arguments",
"!=",
"'undefined'",
":",
"args",
"=",
"_arguments",
".",
"strip",
"(",
")",
"# If both are undefined, then there are no arguments",
"elif",
"_args",
"==",
"'undefined'",
"and",
"_arguments",
"==",
"'undefined'",
":",
"args",
"=",
"None",
"# Otherwise, using old form and _arguments will be 'undefined' and _args will not.",
"else",
":",
"args",
"=",
"_args",
".",
"strip",
"(",
")",
"job_cmd",
"=",
"job",
".",
"executable",
"job_args",
"=",
"job",
".",
"arguments",
".",
"strip",
"(",
")",
"if",
"job",
".",
"arguments",
"else",
"None",
"if",
"job_cmd",
"in",
"cmd",
"and",
"job_args",
"==",
"args",
":",
"log",
".",
"info",
"(",
"'Linking cluster_id %s to job with command and arguments: %s %s'",
",",
"cluster_id",
",",
"job_cmd",
",",
"job_args",
")",
"job",
".",
"_cluster_id",
"=",
"int",
"(",
"cluster_id",
")",
"break",
"except",
"ValueError",
"as",
"e",
":",
"log",
".",
"warning",
"(",
"str",
"(",
"e",
")",
")"
]
| Associate Jobs with respective cluster ids. | [
"Associate",
"Jobs",
"with",
"respective",
"cluster",
"ids",
"."
]
| a5aaaef0d73198f7d9756dda7abe98b4e209f1f4 | https://github.com/tethysplatform/condorpy/blob/a5aaaef0d73198f7d9756dda7abe98b4e209f1f4/condorpy/workflow.py#L200-L269 | train |
tethysplatform/condorpy | condorpy/workflow.py | Workflow.submit | def submit(self, options=[]):
"""
ensures that all relatives of nodes in node_set are also added to the set before submitting
"""
self.complete_node_set()
self._write_job_file()
args = ['condor_submit_dag']
args.extend(options)
args.append(self.dag_file)
log.info('Submitting workflow %s with options: %s', self.name, args)
return super(Workflow, self).submit(args) | python | def submit(self, options=[]):
"""
ensures that all relatives of nodes in node_set are also added to the set before submitting
"""
self.complete_node_set()
self._write_job_file()
args = ['condor_submit_dag']
args.extend(options)
args.append(self.dag_file)
log.info('Submitting workflow %s with options: %s', self.name, args)
return super(Workflow, self).submit(args) | [
"def",
"submit",
"(",
"self",
",",
"options",
"=",
"[",
"]",
")",
":",
"self",
".",
"complete_node_set",
"(",
")",
"self",
".",
"_write_job_file",
"(",
")",
"args",
"=",
"[",
"'condor_submit_dag'",
"]",
"args",
".",
"extend",
"(",
"options",
")",
"args",
".",
"append",
"(",
"self",
".",
"dag_file",
")",
"log",
".",
"info",
"(",
"'Submitting workflow %s with options: %s'",
",",
"self",
".",
"name",
",",
"args",
")",
"return",
"super",
"(",
"Workflow",
",",
"self",
")",
".",
"submit",
"(",
"args",
")"
]
| ensures that all relatives of nodes in node_set are also added to the set before submitting | [
"ensures",
"that",
"all",
"relatives",
"of",
"nodes",
"in",
"node_set",
"are",
"also",
"added",
"to",
"the",
"set",
"before",
"submitting"
]
| a5aaaef0d73198f7d9756dda7abe98b4e209f1f4 | https://github.com/tethysplatform/condorpy/blob/a5aaaef0d73198f7d9756dda7abe98b4e209f1f4/condorpy/workflow.py#L287-L299 | train |
tjcsl/cslbot | cslbot/helpers/modutils.py | safe_reload | def safe_reload(modname: types.ModuleType) -> Union[None, str]:
"""Catch and log any errors that arise from reimporting a module, but do not die.
:return: None when import was successful. String is the first line of the error message
"""
try:
importlib.reload(modname)
return None
except Exception as e:
logging.error("Failed to reimport module: %s", modname)
msg, _ = backtrace.output_traceback(e)
return msg | python | def safe_reload(modname: types.ModuleType) -> Union[None, str]:
"""Catch and log any errors that arise from reimporting a module, but do not die.
:return: None when import was successful. String is the first line of the error message
"""
try:
importlib.reload(modname)
return None
except Exception as e:
logging.error("Failed to reimport module: %s", modname)
msg, _ = backtrace.output_traceback(e)
return msg | [
"def",
"safe_reload",
"(",
"modname",
":",
"types",
".",
"ModuleType",
")",
"->",
"Union",
"[",
"None",
",",
"str",
"]",
":",
"try",
":",
"importlib",
".",
"reload",
"(",
"modname",
")",
"return",
"None",
"except",
"Exception",
"as",
"e",
":",
"logging",
".",
"error",
"(",
"\"Failed to reimport module: %s\"",
",",
"modname",
")",
"msg",
",",
"_",
"=",
"backtrace",
".",
"output_traceback",
"(",
"e",
")",
"return",
"msg"
]
| Catch and log any errors that arise from reimporting a module, but do not die.
:return: None when import was successful. String is the first line of the error message | [
"Catch",
"and",
"log",
"any",
"errors",
"that",
"arise",
"from",
"reimporting",
"a",
"module",
"but",
"do",
"not",
"die",
"."
]
| aebe07be47141f61d7c180706bddfb707f19b2b5 | https://github.com/tjcsl/cslbot/blob/aebe07be47141f61d7c180706bddfb707f19b2b5/cslbot/helpers/modutils.py#L139-L151 | train |
tjcsl/cslbot | cslbot/helpers/modutils.py | safe_load | def safe_load(modname: str) -> Union[None, str]:
"""Load a module, logging errors instead of dying if it fails to load.
:return: None when import was successful. String is the first line of the error message
"""
try:
importlib.import_module(modname)
return None
except Exception as ex:
logging.error("Failed to import module: %s", modname)
msg, _ = backtrace.output_traceback(ex)
return msg | python | def safe_load(modname: str) -> Union[None, str]:
"""Load a module, logging errors instead of dying if it fails to load.
:return: None when import was successful. String is the first line of the error message
"""
try:
importlib.import_module(modname)
return None
except Exception as ex:
logging.error("Failed to import module: %s", modname)
msg, _ = backtrace.output_traceback(ex)
return msg | [
"def",
"safe_load",
"(",
"modname",
":",
"str",
")",
"->",
"Union",
"[",
"None",
",",
"str",
"]",
":",
"try",
":",
"importlib",
".",
"import_module",
"(",
"modname",
")",
"return",
"None",
"except",
"Exception",
"as",
"ex",
":",
"logging",
".",
"error",
"(",
"\"Failed to import module: %s\"",
",",
"modname",
")",
"msg",
",",
"_",
"=",
"backtrace",
".",
"output_traceback",
"(",
"ex",
")",
"return",
"msg"
]
| Load a module, logging errors instead of dying if it fails to load.
:return: None when import was successful. String is the first line of the error message | [
"Load",
"a",
"module",
"logging",
"errors",
"instead",
"of",
"dying",
"if",
"it",
"fails",
"to",
"load",
"."
]
| aebe07be47141f61d7c180706bddfb707f19b2b5 | https://github.com/tjcsl/cslbot/blob/aebe07be47141f61d7c180706bddfb707f19b2b5/cslbot/helpers/modutils.py#L154-L166 | train |
tjcsl/cslbot | cslbot/helpers/modutils.py | scan_and_reimport | def scan_and_reimport(mod_type: str) -> List[Tuple[str, str]]:
"""Scans folder for modules."""
mod_enabled, mod_disabled = get_modules(mod_type)
errors = []
for mod in mod_enabled + mod_disabled:
if mod in sys.modules:
msg = safe_reload(sys.modules[mod])
else:
msg = safe_load(mod)
if msg is not None:
errors.append((mod, msg))
return errors | python | def scan_and_reimport(mod_type: str) -> List[Tuple[str, str]]:
"""Scans folder for modules."""
mod_enabled, mod_disabled = get_modules(mod_type)
errors = []
for mod in mod_enabled + mod_disabled:
if mod in sys.modules:
msg = safe_reload(sys.modules[mod])
else:
msg = safe_load(mod)
if msg is not None:
errors.append((mod, msg))
return errors | [
"def",
"scan_and_reimport",
"(",
"mod_type",
":",
"str",
")",
"->",
"List",
"[",
"Tuple",
"[",
"str",
",",
"str",
"]",
"]",
":",
"mod_enabled",
",",
"mod_disabled",
"=",
"get_modules",
"(",
"mod_type",
")",
"errors",
"=",
"[",
"]",
"for",
"mod",
"in",
"mod_enabled",
"+",
"mod_disabled",
":",
"if",
"mod",
"in",
"sys",
".",
"modules",
":",
"msg",
"=",
"safe_reload",
"(",
"sys",
".",
"modules",
"[",
"mod",
"]",
")",
"else",
":",
"msg",
"=",
"safe_load",
"(",
"mod",
")",
"if",
"msg",
"is",
"not",
"None",
":",
"errors",
".",
"append",
"(",
"(",
"mod",
",",
"msg",
")",
")",
"return",
"errors"
]
| Scans folder for modules. | [
"Scans",
"folder",
"for",
"modules",
"."
]
| aebe07be47141f61d7c180706bddfb707f19b2b5 | https://github.com/tjcsl/cslbot/blob/aebe07be47141f61d7c180706bddfb707f19b2b5/cslbot/helpers/modutils.py#L169-L180 | train |
Yipit/pyeqs | pyeqs/dsl/aggregations.py | Aggregations._ranging | def _ranging(self):
"""
Should be a list of values to designate the buckets
"""
agg_ranges = []
for i, val in enumerate(self.range_list):
if i == 0:
agg_ranges.append({"to": val})
else:
previous = self.range_list[i - 1]
agg_ranges.append({"from": previous, "to": val})
if i + 1 == len(self.range_list):
agg_ranges.append({"from": val})
return agg_ranges | python | def _ranging(self):
"""
Should be a list of values to designate the buckets
"""
agg_ranges = []
for i, val in enumerate(self.range_list):
if i == 0:
agg_ranges.append({"to": val})
else:
previous = self.range_list[i - 1]
agg_ranges.append({"from": previous, "to": val})
if i + 1 == len(self.range_list):
agg_ranges.append({"from": val})
return agg_ranges | [
"def",
"_ranging",
"(",
"self",
")",
":",
"agg_ranges",
"=",
"[",
"]",
"for",
"i",
",",
"val",
"in",
"enumerate",
"(",
"self",
".",
"range_list",
")",
":",
"if",
"i",
"==",
"0",
":",
"agg_ranges",
".",
"append",
"(",
"{",
"\"to\"",
":",
"val",
"}",
")",
"else",
":",
"previous",
"=",
"self",
".",
"range_list",
"[",
"i",
"-",
"1",
"]",
"agg_ranges",
".",
"append",
"(",
"{",
"\"from\"",
":",
"previous",
",",
"\"to\"",
":",
"val",
"}",
")",
"if",
"i",
"+",
"1",
"==",
"len",
"(",
"self",
".",
"range_list",
")",
":",
"agg_ranges",
".",
"append",
"(",
"{",
"\"from\"",
":",
"val",
"}",
")",
"return",
"agg_ranges"
]
| Should be a list of values to designate the buckets | [
"Should",
"be",
"a",
"list",
"of",
"values",
"to",
"designate",
"the",
"buckets"
]
| 2e385c0a5d113af0e20be4d9393add2aabdd9565 | https://github.com/Yipit/pyeqs/blob/2e385c0a5d113af0e20be4d9393add2aabdd9565/pyeqs/dsl/aggregations.py#L81-L95 | train |
Keda87/python-quran-odoa | odoa.py | ODOA.get_random_surah | def get_random_surah(self, lang='id'):
"""
Perform http request to get random surah.
Parameter:
:lang -- String contains language code.
Return:
:dict -- Paired ayat, sound, description and the translation.
"""
# Ensure the language supported.
if lang not in self.SUPPORTED_LANGUAGES:
message = 'Currently your selected language not yet supported.'
raise ODOAException(message)
# Get random surah and construct the url.
rand_surah = random.randint(1, self.TOTAL_SURAH)
surah_url = '{base}/surah/surah_{pages}.json'.format(base=self.BASE_API,
pages=rand_surah)
try:
response = urlopen(surah_url) # Fetch data from given url.
data = json.loads(response.read().decode('utf-8')) # Get response and convert to dict.
except IOError:
traceback.print_exc(file=sys.stdout)
raise ODOAException
else:
# Get random ayat.
random_ayah = random.randint(1, int(data.get('count')))
ayah_key = 'verse_{index}'.format(index=random_ayah)
ayah = data['verse'][ayah_key].encode('utf-8')
surah_index = data.get('index')
surah_name = data.get('name')
# Get translation and sound url.
translation = self.__get_translation(surah=surah_index,
ayah=ayah_key,
lang=lang)
sound = self.__get_sound(surah=surah_index, ayah=random_ayah)
desc = '{name}:{ayah}'.format(name=surah_name, ayah=random_ayah)
meta = Metadata(ayah, desc, translation, sound)
return meta | python | def get_random_surah(self, lang='id'):
"""
Perform http request to get random surah.
Parameter:
:lang -- String contains language code.
Return:
:dict -- Paired ayat, sound, description and the translation.
"""
# Ensure the language supported.
if lang not in self.SUPPORTED_LANGUAGES:
message = 'Currently your selected language not yet supported.'
raise ODOAException(message)
# Get random surah and construct the url.
rand_surah = random.randint(1, self.TOTAL_SURAH)
surah_url = '{base}/surah/surah_{pages}.json'.format(base=self.BASE_API,
pages=rand_surah)
try:
response = urlopen(surah_url) # Fetch data from given url.
data = json.loads(response.read().decode('utf-8')) # Get response and convert to dict.
except IOError:
traceback.print_exc(file=sys.stdout)
raise ODOAException
else:
# Get random ayat.
random_ayah = random.randint(1, int(data.get('count')))
ayah_key = 'verse_{index}'.format(index=random_ayah)
ayah = data['verse'][ayah_key].encode('utf-8')
surah_index = data.get('index')
surah_name = data.get('name')
# Get translation and sound url.
translation = self.__get_translation(surah=surah_index,
ayah=ayah_key,
lang=lang)
sound = self.__get_sound(surah=surah_index, ayah=random_ayah)
desc = '{name}:{ayah}'.format(name=surah_name, ayah=random_ayah)
meta = Metadata(ayah, desc, translation, sound)
return meta | [
"def",
"get_random_surah",
"(",
"self",
",",
"lang",
"=",
"'id'",
")",
":",
"# Ensure the language supported.",
"if",
"lang",
"not",
"in",
"self",
".",
"SUPPORTED_LANGUAGES",
":",
"message",
"=",
"'Currently your selected language not yet supported.'",
"raise",
"ODOAException",
"(",
"message",
")",
"# Get random surah and construct the url.",
"rand_surah",
"=",
"random",
".",
"randint",
"(",
"1",
",",
"self",
".",
"TOTAL_SURAH",
")",
"surah_url",
"=",
"'{base}/surah/surah_{pages}.json'",
".",
"format",
"(",
"base",
"=",
"self",
".",
"BASE_API",
",",
"pages",
"=",
"rand_surah",
")",
"try",
":",
"response",
"=",
"urlopen",
"(",
"surah_url",
")",
"# Fetch data from given url.",
"data",
"=",
"json",
".",
"loads",
"(",
"response",
".",
"read",
"(",
")",
".",
"decode",
"(",
"'utf-8'",
")",
")",
"# Get response and convert to dict.",
"except",
"IOError",
":",
"traceback",
".",
"print_exc",
"(",
"file",
"=",
"sys",
".",
"stdout",
")",
"raise",
"ODOAException",
"else",
":",
"# Get random ayat.",
"random_ayah",
"=",
"random",
".",
"randint",
"(",
"1",
",",
"int",
"(",
"data",
".",
"get",
"(",
"'count'",
")",
")",
")",
"ayah_key",
"=",
"'verse_{index}'",
".",
"format",
"(",
"index",
"=",
"random_ayah",
")",
"ayah",
"=",
"data",
"[",
"'verse'",
"]",
"[",
"ayah_key",
"]",
".",
"encode",
"(",
"'utf-8'",
")",
"surah_index",
"=",
"data",
".",
"get",
"(",
"'index'",
")",
"surah_name",
"=",
"data",
".",
"get",
"(",
"'name'",
")",
"# Get translation and sound url.",
"translation",
"=",
"self",
".",
"__get_translation",
"(",
"surah",
"=",
"surah_index",
",",
"ayah",
"=",
"ayah_key",
",",
"lang",
"=",
"lang",
")",
"sound",
"=",
"self",
".",
"__get_sound",
"(",
"surah",
"=",
"surah_index",
",",
"ayah",
"=",
"random_ayah",
")",
"desc",
"=",
"'{name}:{ayah}'",
".",
"format",
"(",
"name",
"=",
"surah_name",
",",
"ayah",
"=",
"random_ayah",
")",
"meta",
"=",
"Metadata",
"(",
"ayah",
",",
"desc",
",",
"translation",
",",
"sound",
")",
"return",
"meta"
]
| Perform http request to get random surah.
Parameter:
:lang -- String contains language code.
Return:
:dict -- Paired ayat, sound, description and the translation. | [
"Perform",
"http",
"request",
"to",
"get",
"random",
"surah",
"."
]
| 1bf58feeef0355f5ca6c9ad678e54a8195f33eb8 | https://github.com/Keda87/python-quran-odoa/blob/1bf58feeef0355f5ca6c9ad678e54a8195f33eb8/odoa.py#L54-L92 | train |
Keda87/python-quran-odoa | odoa.py | ODOA.__get_translation | def __get_translation(self, surah, ayah, lang):
"""
Perform http request to get translation from given surah, ayah and
language.
Parameter:
:surah -- Surah index from API pages.
:ayat -- Ayat key.
:lang -- Language code.
Return:
:string -- Translation from given surah and ayat.
"""
# Construct url to fetch translation data.
url = '{base}/translations/{lang}/{lang}_translation_{surah}.json'.format(
base=self.BASE_API, lang=lang, surah=int(surah)
)
try:
response = urlopen(url) # Fetch data from give url.
data = json.loads(response.read().decode('utf-8')) # Get response and convert to dict.
translation = data['verse'][ayah]
except ODOAException:
return None
else:
return translation | python | def __get_translation(self, surah, ayah, lang):
"""
Perform http request to get translation from given surah, ayah and
language.
Parameter:
:surah -- Surah index from API pages.
:ayat -- Ayat key.
:lang -- Language code.
Return:
:string -- Translation from given surah and ayat.
"""
# Construct url to fetch translation data.
url = '{base}/translations/{lang}/{lang}_translation_{surah}.json'.format(
base=self.BASE_API, lang=lang, surah=int(surah)
)
try:
response = urlopen(url) # Fetch data from give url.
data = json.loads(response.read().decode('utf-8')) # Get response and convert to dict.
translation = data['verse'][ayah]
except ODOAException:
return None
else:
return translation | [
"def",
"__get_translation",
"(",
"self",
",",
"surah",
",",
"ayah",
",",
"lang",
")",
":",
"# Construct url to fetch translation data.",
"url",
"=",
"'{base}/translations/{lang}/{lang}_translation_{surah}.json'",
".",
"format",
"(",
"base",
"=",
"self",
".",
"BASE_API",
",",
"lang",
"=",
"lang",
",",
"surah",
"=",
"int",
"(",
"surah",
")",
")",
"try",
":",
"response",
"=",
"urlopen",
"(",
"url",
")",
"# Fetch data from give url.",
"data",
"=",
"json",
".",
"loads",
"(",
"response",
".",
"read",
"(",
")",
".",
"decode",
"(",
"'utf-8'",
")",
")",
"# Get response and convert to dict.",
"translation",
"=",
"data",
"[",
"'verse'",
"]",
"[",
"ayah",
"]",
"except",
"ODOAException",
":",
"return",
"None",
"else",
":",
"return",
"translation"
]
| Perform http request to get translation from given surah, ayah and
language.
Parameter:
:surah -- Surah index from API pages.
:ayat -- Ayat key.
:lang -- Language code.
Return:
:string -- Translation from given surah and ayat. | [
"Perform",
"http",
"request",
"to",
"get",
"translation",
"from",
"given",
"surah",
"ayah",
"and",
"language",
"."
]
| 1bf58feeef0355f5ca6c9ad678e54a8195f33eb8 | https://github.com/Keda87/python-quran-odoa/blob/1bf58feeef0355f5ca6c9ad678e54a8195f33eb8/odoa.py#L94-L118 | train |
Keda87/python-quran-odoa | odoa.py | ODOA.__get_sound | def __get_sound(self, surah, ayah):
"""
Perform http request to get sound from given surah and ayah.
Parameter:
:surah -- Surah index from API pages.
:ayat -- Ayat key.
Return:
:string -- URL for mp3 sound.
"""
# Formatting ayah with 0 leading.
# http://stackoverflow.com/questions/17118071/python-add-leading-zeroes-using-str-format
format_ayah = '{0:0>3}'.format(ayah)
sound_url = '{base}/sounds/{surah}/{ayah}.mp3'.format(
base=self.BASE_API, surah=surah, ayah=format_ayah
)
return sound_url | python | def __get_sound(self, surah, ayah):
"""
Perform http request to get sound from given surah and ayah.
Parameter:
:surah -- Surah index from API pages.
:ayat -- Ayat key.
Return:
:string -- URL for mp3 sound.
"""
# Formatting ayah with 0 leading.
# http://stackoverflow.com/questions/17118071/python-add-leading-zeroes-using-str-format
format_ayah = '{0:0>3}'.format(ayah)
sound_url = '{base}/sounds/{surah}/{ayah}.mp3'.format(
base=self.BASE_API, surah=surah, ayah=format_ayah
)
return sound_url | [
"def",
"__get_sound",
"(",
"self",
",",
"surah",
",",
"ayah",
")",
":",
"# Formatting ayah with 0 leading.",
"# http://stackoverflow.com/questions/17118071/python-add-leading-zeroes-using-str-format",
"format_ayah",
"=",
"'{0:0>3}'",
".",
"format",
"(",
"ayah",
")",
"sound_url",
"=",
"'{base}/sounds/{surah}/{ayah}.mp3'",
".",
"format",
"(",
"base",
"=",
"self",
".",
"BASE_API",
",",
"surah",
"=",
"surah",
",",
"ayah",
"=",
"format_ayah",
")",
"return",
"sound_url"
]
| Perform http request to get sound from given surah and ayah.
Parameter:
:surah -- Surah index from API pages.
:ayat -- Ayat key.
Return:
:string -- URL for mp3 sound. | [
"Perform",
"http",
"request",
"to",
"get",
"sound",
"from",
"given",
"surah",
"and",
"ayah",
"."
]
| 1bf58feeef0355f5ca6c9ad678e54a8195f33eb8 | https://github.com/Keda87/python-quran-odoa/blob/1bf58feeef0355f5ca6c9ad678e54a8195f33eb8/odoa.py#L120-L137 | train |
The-Politico/politico-civic-election-night | electionnight/serializers/votes.py | VotesSerializer.get_fipscode | def get_fipscode(self, obj):
"""County FIPS code"""
if obj.division.level.name == DivisionLevel.COUNTY:
return obj.division.code
return None | python | def get_fipscode(self, obj):
"""County FIPS code"""
if obj.division.level.name == DivisionLevel.COUNTY:
return obj.division.code
return None | [
"def",
"get_fipscode",
"(",
"self",
",",
"obj",
")",
":",
"if",
"obj",
".",
"division",
".",
"level",
".",
"name",
"==",
"DivisionLevel",
".",
"COUNTY",
":",
"return",
"obj",
".",
"division",
".",
"code",
"return",
"None"
]
| County FIPS code | [
"County",
"FIPS",
"code"
]
| a8aaf5be43872a7b84d2b0d7c2b6151d32d4d8b6 | https://github.com/The-Politico/politico-civic-election-night/blob/a8aaf5be43872a7b84d2b0d7c2b6151d32d4d8b6/electionnight/serializers/votes.py#L63-L67 | train |
The-Politico/politico-civic-election-night | electionnight/serializers/votes.py | VotesSerializer.get_statepostal | def get_statepostal(self, obj):
"""State postal abbreviation if county or state else ``None``."""
if obj.division.level.name == DivisionLevel.STATE:
return us.states.lookup(obj.division.code).abbr
elif obj.division.level.name == DivisionLevel.COUNTY:
return us.states.lookup(obj.division.parent.code).abbr
return None | python | def get_statepostal(self, obj):
"""State postal abbreviation if county or state else ``None``."""
if obj.division.level.name == DivisionLevel.STATE:
return us.states.lookup(obj.division.code).abbr
elif obj.division.level.name == DivisionLevel.COUNTY:
return us.states.lookup(obj.division.parent.code).abbr
return None | [
"def",
"get_statepostal",
"(",
"self",
",",
"obj",
")",
":",
"if",
"obj",
".",
"division",
".",
"level",
".",
"name",
"==",
"DivisionLevel",
".",
"STATE",
":",
"return",
"us",
".",
"states",
".",
"lookup",
"(",
"obj",
".",
"division",
".",
"code",
")",
".",
"abbr",
"elif",
"obj",
".",
"division",
".",
"level",
".",
"name",
"==",
"DivisionLevel",
".",
"COUNTY",
":",
"return",
"us",
".",
"states",
".",
"lookup",
"(",
"obj",
".",
"division",
".",
"parent",
".",
"code",
")",
".",
"abbr",
"return",
"None"
]
| State postal abbreviation if county or state else ``None``. | [
"State",
"postal",
"abbreviation",
"if",
"county",
"or",
"state",
"else",
"None",
"."
]
| a8aaf5be43872a7b84d2b0d7c2b6151d32d4d8b6 | https://github.com/The-Politico/politico-civic-election-night/blob/a8aaf5be43872a7b84d2b0d7c2b6151d32d4d8b6/electionnight/serializers/votes.py#L69-L75 | train |
The-Politico/politico-civic-election-night | electionnight/serializers/votes.py | VotesSerializer.get_polid | def get_polid(self, obj):
"""AP polid minus 'polid' prefix if polid else ``None``."""
ap_id = obj.candidate_election.candidate.ap_candidate_id
if 'polid-' in ap_id:
return ap_id.replace('polid-', '')
return None | python | def get_polid(self, obj):
"""AP polid minus 'polid' prefix if polid else ``None``."""
ap_id = obj.candidate_election.candidate.ap_candidate_id
if 'polid-' in ap_id:
return ap_id.replace('polid-', '')
return None | [
"def",
"get_polid",
"(",
"self",
",",
"obj",
")",
":",
"ap_id",
"=",
"obj",
".",
"candidate_election",
".",
"candidate",
".",
"ap_candidate_id",
"if",
"'polid-'",
"in",
"ap_id",
":",
"return",
"ap_id",
".",
"replace",
"(",
"'polid-'",
",",
"''",
")",
"return",
"None"
]
| AP polid minus 'polid' prefix if polid else ``None``. | [
"AP",
"polid",
"minus",
"polid",
"prefix",
"if",
"polid",
"else",
"None",
"."
]
| a8aaf5be43872a7b84d2b0d7c2b6151d32d4d8b6 | https://github.com/The-Politico/politico-civic-election-night/blob/a8aaf5be43872a7b84d2b0d7c2b6151d32d4d8b6/electionnight/serializers/votes.py#L77-L82 | train |
The-Politico/politico-civic-election-night | electionnight/serializers/votes.py | VotesSerializer.get_polnum | def get_polnum(self, obj):
"""AP polnum minus 'polnum' prefix if polnum else ``None``."""
ap_id = obj.candidate_election.candidate.ap_candidate_id
if 'polnum-' in ap_id:
return ap_id.replace('polnum-', '')
return None | python | def get_polnum(self, obj):
"""AP polnum minus 'polnum' prefix if polnum else ``None``."""
ap_id = obj.candidate_election.candidate.ap_candidate_id
if 'polnum-' in ap_id:
return ap_id.replace('polnum-', '')
return None | [
"def",
"get_polnum",
"(",
"self",
",",
"obj",
")",
":",
"ap_id",
"=",
"obj",
".",
"candidate_election",
".",
"candidate",
".",
"ap_candidate_id",
"if",
"'polnum-'",
"in",
"ap_id",
":",
"return",
"ap_id",
".",
"replace",
"(",
"'polnum-'",
",",
"''",
")",
"return",
"None"
]
| AP polnum minus 'polnum' prefix if polnum else ``None``. | [
"AP",
"polnum",
"minus",
"polnum",
"prefix",
"if",
"polnum",
"else",
"None",
"."
]
| a8aaf5be43872a7b84d2b0d7c2b6151d32d4d8b6 | https://github.com/The-Politico/politico-civic-election-night/blob/a8aaf5be43872a7b84d2b0d7c2b6151d32d4d8b6/electionnight/serializers/votes.py#L84-L89 | train |
The-Politico/politico-civic-election-night | electionnight/serializers/votes.py | VotesSerializer.get_precinctsreporting | def get_precinctsreporting(self, obj):
"""Precincts reporting if vote is top level result else ``None``."""
if obj.division.level == \
obj.candidate_election.election.division.level:
return obj.candidate_election.election.meta.precincts_reporting
return None | python | def get_precinctsreporting(self, obj):
"""Precincts reporting if vote is top level result else ``None``."""
if obj.division.level == \
obj.candidate_election.election.division.level:
return obj.candidate_election.election.meta.precincts_reporting
return None | [
"def",
"get_precinctsreporting",
"(",
"self",
",",
"obj",
")",
":",
"if",
"obj",
".",
"division",
".",
"level",
"==",
"obj",
".",
"candidate_election",
".",
"election",
".",
"division",
".",
"level",
":",
"return",
"obj",
".",
"candidate_election",
".",
"election",
".",
"meta",
".",
"precincts_reporting",
"return",
"None"
]
| Precincts reporting if vote is top level result else ``None``. | [
"Precincts",
"reporting",
"if",
"vote",
"is",
"top",
"level",
"result",
"else",
"None",
"."
]
| a8aaf5be43872a7b84d2b0d7c2b6151d32d4d8b6 | https://github.com/The-Politico/politico-civic-election-night/blob/a8aaf5be43872a7b84d2b0d7c2b6151d32d4d8b6/electionnight/serializers/votes.py#L91-L96 | train |
The-Politico/politico-civic-election-night | electionnight/serializers/votes.py | VotesSerializer.get_precinctsreportingpct | def get_precinctsreportingpct(self, obj):
"""
Precincts reporting percent if vote is top level result else ``None``.
"""
if obj.division.level == \
obj.candidate_election.election.division.level:
return obj.candidate_election.election.meta.precincts_reporting_pct
return None | python | def get_precinctsreportingpct(self, obj):
"""
Precincts reporting percent if vote is top level result else ``None``.
"""
if obj.division.level == \
obj.candidate_election.election.division.level:
return obj.candidate_election.election.meta.precincts_reporting_pct
return None | [
"def",
"get_precinctsreportingpct",
"(",
"self",
",",
"obj",
")",
":",
"if",
"obj",
".",
"division",
".",
"level",
"==",
"obj",
".",
"candidate_election",
".",
"election",
".",
"division",
".",
"level",
":",
"return",
"obj",
".",
"candidate_election",
".",
"election",
".",
"meta",
".",
"precincts_reporting_pct",
"return",
"None"
]
| Precincts reporting percent if vote is top level result else ``None``. | [
"Precincts",
"reporting",
"percent",
"if",
"vote",
"is",
"top",
"level",
"result",
"else",
"None",
"."
]
| a8aaf5be43872a7b84d2b0d7c2b6151d32d4d8b6 | https://github.com/The-Politico/politico-civic-election-night/blob/a8aaf5be43872a7b84d2b0d7c2b6151d32d4d8b6/electionnight/serializers/votes.py#L98-L105 | train |
The-Politico/politico-civic-election-night | electionnight/serializers/votes.py | VotesSerializer.get_precinctstotal | def get_precinctstotal(self, obj):
"""Precincts total if vote is top level result else ``None``."""
if obj.division.level == \
obj.candidate_election.election.division.level:
return obj.candidate_election.election.meta.precincts_total
return None | python | def get_precinctstotal(self, obj):
"""Precincts total if vote is top level result else ``None``."""
if obj.division.level == \
obj.candidate_election.election.division.level:
return obj.candidate_election.election.meta.precincts_total
return None | [
"def",
"get_precinctstotal",
"(",
"self",
",",
"obj",
")",
":",
"if",
"obj",
".",
"division",
".",
"level",
"==",
"obj",
".",
"candidate_election",
".",
"election",
".",
"division",
".",
"level",
":",
"return",
"obj",
".",
"candidate_election",
".",
"election",
".",
"meta",
".",
"precincts_total",
"return",
"None"
]
| Precincts total if vote is top level result else ``None``. | [
"Precincts",
"total",
"if",
"vote",
"is",
"top",
"level",
"result",
"else",
"None",
"."
]
| a8aaf5be43872a7b84d2b0d7c2b6151d32d4d8b6 | https://github.com/The-Politico/politico-civic-election-night/blob/a8aaf5be43872a7b84d2b0d7c2b6151d32d4d8b6/electionnight/serializers/votes.py#L107-L112 | train |
CI-WATER/mapkit | mapkit/RasterLoader.py | RasterLoader.load | def load(self, tableName='rasters', rasters=[]):
'''
Accepts a list of paths to raster files to load into the database.
Returns the ids of the rasters loaded successfully in the same order
as the list passed in.
'''
# Create table if necessary
Base.metadata.create_all(self._engine)
# Create a session
Session = sessionmaker(bind=self._engine)
session = Session()
for raster in rasters:
# Must read in using the raster2pgsql commandline tool.
rasterPath = raster['path']
if 'srid' in raster:
srid = str(raster['srid'])
else:
srid = '4326'
if 'no-data' in raster:
noData = str(raster['no-data'])
else:
noData = '-1'
wellKnownBinary = RasterLoader.rasterToWKB(rasterPath, srid, noData, self._raster2pgsql)
rasterBinary = wellKnownBinary
# Get the filename
filename = os.path.split(rasterPath)[1]
# Populate raster record
mapKitRaster = MapKitRaster()
mapKitRaster.filename = filename
mapKitRaster.raster = rasterBinary
if 'timestamp' in raster:
mapKitRaster.timestamp = raster['timestamp']
# Add to session
session.add(mapKitRaster)
session.commit() | python | def load(self, tableName='rasters', rasters=[]):
'''
Accepts a list of paths to raster files to load into the database.
Returns the ids of the rasters loaded successfully in the same order
as the list passed in.
'''
# Create table if necessary
Base.metadata.create_all(self._engine)
# Create a session
Session = sessionmaker(bind=self._engine)
session = Session()
for raster in rasters:
# Must read in using the raster2pgsql commandline tool.
rasterPath = raster['path']
if 'srid' in raster:
srid = str(raster['srid'])
else:
srid = '4326'
if 'no-data' in raster:
noData = str(raster['no-data'])
else:
noData = '-1'
wellKnownBinary = RasterLoader.rasterToWKB(rasterPath, srid, noData, self._raster2pgsql)
rasterBinary = wellKnownBinary
# Get the filename
filename = os.path.split(rasterPath)[1]
# Populate raster record
mapKitRaster = MapKitRaster()
mapKitRaster.filename = filename
mapKitRaster.raster = rasterBinary
if 'timestamp' in raster:
mapKitRaster.timestamp = raster['timestamp']
# Add to session
session.add(mapKitRaster)
session.commit() | [
"def",
"load",
"(",
"self",
",",
"tableName",
"=",
"'rasters'",
",",
"rasters",
"=",
"[",
"]",
")",
":",
"# Create table if necessary",
"Base",
".",
"metadata",
".",
"create_all",
"(",
"self",
".",
"_engine",
")",
"# Create a session",
"Session",
"=",
"sessionmaker",
"(",
"bind",
"=",
"self",
".",
"_engine",
")",
"session",
"=",
"Session",
"(",
")",
"for",
"raster",
"in",
"rasters",
":",
"# Must read in using the raster2pgsql commandline tool.",
"rasterPath",
"=",
"raster",
"[",
"'path'",
"]",
"if",
"'srid'",
"in",
"raster",
":",
"srid",
"=",
"str",
"(",
"raster",
"[",
"'srid'",
"]",
")",
"else",
":",
"srid",
"=",
"'4326'",
"if",
"'no-data'",
"in",
"raster",
":",
"noData",
"=",
"str",
"(",
"raster",
"[",
"'no-data'",
"]",
")",
"else",
":",
"noData",
"=",
"'-1'",
"wellKnownBinary",
"=",
"RasterLoader",
".",
"rasterToWKB",
"(",
"rasterPath",
",",
"srid",
",",
"noData",
",",
"self",
".",
"_raster2pgsql",
")",
"rasterBinary",
"=",
"wellKnownBinary",
"# Get the filename",
"filename",
"=",
"os",
".",
"path",
".",
"split",
"(",
"rasterPath",
")",
"[",
"1",
"]",
"# Populate raster record",
"mapKitRaster",
"=",
"MapKitRaster",
"(",
")",
"mapKitRaster",
".",
"filename",
"=",
"filename",
"mapKitRaster",
".",
"raster",
"=",
"rasterBinary",
"if",
"'timestamp'",
"in",
"raster",
":",
"mapKitRaster",
".",
"timestamp",
"=",
"raster",
"[",
"'timestamp'",
"]",
"# Add to session",
"session",
".",
"add",
"(",
"mapKitRaster",
")",
"session",
".",
"commit",
"(",
")"
]
| Accepts a list of paths to raster files to load into the database.
Returns the ids of the rasters loaded successfully in the same order
as the list passed in. | [
"Accepts",
"a",
"list",
"of",
"paths",
"to",
"raster",
"files",
"to",
"load",
"into",
"the",
"database",
".",
"Returns",
"the",
"ids",
"of",
"the",
"rasters",
"loaded",
"successfully",
"in",
"the",
"same",
"order",
"as",
"the",
"list",
"passed",
"in",
"."
]
| ce5fbded6af7adabdf1eec85631c6811ef8ecc34 | https://github.com/CI-WATER/mapkit/blob/ce5fbded6af7adabdf1eec85631c6811ef8ecc34/mapkit/RasterLoader.py#L35-L80 | train |
CI-WATER/mapkit | mapkit/RasterLoader.py | RasterLoader.rasterToWKB | def rasterToWKB(cls, rasterPath, srid, noData, raster2pgsql):
"""
Accepts a raster file and converts it to Well Known Binary text using the raster2pgsql
executable that comes with PostGIS. This is the format that rasters are stored in a
PostGIS database.
"""
raster2pgsqlProcess = subprocess.Popen([raster2pgsql,
'-s', srid,
'-N', noData,
rasterPath,
'n_a'],stdout=subprocess.PIPE)
# This commandline tool generates the SQL to load the raster into the database
# However, we want to use SQLAlchemy to load the values into the database.
# We do this by extracting the value from the sql that is generated.
sql, error = raster2pgsqlProcess.communicate()
if sql:
# This esoteric line is used to extract only the value of the raster (which is stored as a Well Know Binary string)
# Example of Output:
# BEGIN;
# INSERT INTO "idx_index_maps" ("rast") VALUES ('0100...56C096CE87'::raster);
# END;
# The WKB is wrapped in single quotes. Splitting on single quotes isolates it as the
# second item in the resulting list.
wellKnownBinary = sql.split("'")[1]
else:
print(error)
raise
return wellKnownBinary | python | def rasterToWKB(cls, rasterPath, srid, noData, raster2pgsql):
"""
Accepts a raster file and converts it to Well Known Binary text using the raster2pgsql
executable that comes with PostGIS. This is the format that rasters are stored in a
PostGIS database.
"""
raster2pgsqlProcess = subprocess.Popen([raster2pgsql,
'-s', srid,
'-N', noData,
rasterPath,
'n_a'],stdout=subprocess.PIPE)
# This commandline tool generates the SQL to load the raster into the database
# However, we want to use SQLAlchemy to load the values into the database.
# We do this by extracting the value from the sql that is generated.
sql, error = raster2pgsqlProcess.communicate()
if sql:
# This esoteric line is used to extract only the value of the raster (which is stored as a Well Know Binary string)
# Example of Output:
# BEGIN;
# INSERT INTO "idx_index_maps" ("rast") VALUES ('0100...56C096CE87'::raster);
# END;
# The WKB is wrapped in single quotes. Splitting on single quotes isolates it as the
# second item in the resulting list.
wellKnownBinary = sql.split("'")[1]
else:
print(error)
raise
return wellKnownBinary | [
"def",
"rasterToWKB",
"(",
"cls",
",",
"rasterPath",
",",
"srid",
",",
"noData",
",",
"raster2pgsql",
")",
":",
"raster2pgsqlProcess",
"=",
"subprocess",
".",
"Popen",
"(",
"[",
"raster2pgsql",
",",
"'-s'",
",",
"srid",
",",
"'-N'",
",",
"noData",
",",
"rasterPath",
",",
"'n_a'",
"]",
",",
"stdout",
"=",
"subprocess",
".",
"PIPE",
")",
"# This commandline tool generates the SQL to load the raster into the database",
"# However, we want to use SQLAlchemy to load the values into the database.",
"# We do this by extracting the value from the sql that is generated.",
"sql",
",",
"error",
"=",
"raster2pgsqlProcess",
".",
"communicate",
"(",
")",
"if",
"sql",
":",
"# This esoteric line is used to extract only the value of the raster (which is stored as a Well Know Binary string)",
"# Example of Output:",
"# BEGIN;",
"# INSERT INTO \"idx_index_maps\" (\"rast\") VALUES ('0100...56C096CE87'::raster);",
"# END;",
"# The WKB is wrapped in single quotes. Splitting on single quotes isolates it as the",
"# second item in the resulting list.",
"wellKnownBinary",
"=",
"sql",
".",
"split",
"(",
"\"'\"",
")",
"[",
"1",
"]",
"else",
":",
"print",
"(",
"error",
")",
"raise",
"return",
"wellKnownBinary"
]
| Accepts a raster file and converts it to Well Known Binary text using the raster2pgsql
executable that comes with PostGIS. This is the format that rasters are stored in a
PostGIS database. | [
"Accepts",
"a",
"raster",
"file",
"and",
"converts",
"it",
"to",
"Well",
"Known",
"Binary",
"text",
"using",
"the",
"raster2pgsql",
"executable",
"that",
"comes",
"with",
"PostGIS",
".",
"This",
"is",
"the",
"format",
"that",
"rasters",
"are",
"stored",
"in",
"a",
"PostGIS",
"database",
"."
]
| ce5fbded6af7adabdf1eec85631c6811ef8ecc34 | https://github.com/CI-WATER/mapkit/blob/ce5fbded6af7adabdf1eec85631c6811ef8ecc34/mapkit/RasterLoader.py#L83-L111 | train |
CI-WATER/mapkit | mapkit/RasterLoader.py | RasterLoader.grassAsciiRasterToWKB | def grassAsciiRasterToWKB(cls, session, grassRasterPath, srid, noData=0):
"""
Load GRASS ASCII rasters directly using the makeSingleBandWKBRaster method. Do this to eliminate the raster2pgsql
dependency.
"""
# Constants
NUM_HEADER_LINES = 6
# Defaults
north = 0.0
east = 0.0
west = 0.0
rows = 0
columns = 0
if grassRasterPath is not None:
# If the path to the file is given, open the file and extract contents.
with open(grassRasterPath, 'r') as f:
rasterLines = f.readlines()
else:
print("RASTER LOAD ERROR: Must provide the path the raster.")
raise
# Extract the headers from the file and derive metadata
for line in rasterLines[0:NUM_HEADER_LINES]:
spline = line.split()
if 'north' in spline[0].lower():
north = float(spline[1])
elif 'east' in spline[0].lower():
east = float(spline[1])
elif 'west' in spline[0].lower():
west = float(spline[1])
elif 'rows' in spline[0].lower():
rows = int(spline[1])
elif 'cols' in spline[0].lower():
columns = int(spline[1])
# Define raster metadata from headers
width = columns
height = rows
upperLeftX = west
upperLeftY = north
cellSizeX = int(abs(west - east) / columns)
cellSizeY = -1 * cellSizeX
# Assemble the data array string
dataArrayList = []
for line in rasterLines[NUM_HEADER_LINES:len(rasterLines)]:
dataArrayList.append('[{0}]'.format(', '.join(line.split())))
dataArrayString = '[{0}]'.format(', '.join(dataArrayList))
# Create well known binary raster
wellKnownBinary = cls.makeSingleBandWKBRaster(session=session,
width=width, height=height,
upperLeftX=upperLeftX, upperLeftY=upperLeftY,
cellSizeX=cellSizeX, cellSizeY=cellSizeY,
skewX=0, skewY=0,
srid=srid,
dataArray=dataArrayString,
noDataValue=noData)
return wellKnownBinary | python | def grassAsciiRasterToWKB(cls, session, grassRasterPath, srid, noData=0):
"""
Load GRASS ASCII rasters directly using the makeSingleBandWKBRaster method. Do this to eliminate the raster2pgsql
dependency.
"""
# Constants
NUM_HEADER_LINES = 6
# Defaults
north = 0.0
east = 0.0
west = 0.0
rows = 0
columns = 0
if grassRasterPath is not None:
# If the path to the file is given, open the file and extract contents.
with open(grassRasterPath, 'r') as f:
rasterLines = f.readlines()
else:
print("RASTER LOAD ERROR: Must provide the path the raster.")
raise
# Extract the headers from the file and derive metadata
for line in rasterLines[0:NUM_HEADER_LINES]:
spline = line.split()
if 'north' in spline[0].lower():
north = float(spline[1])
elif 'east' in spline[0].lower():
east = float(spline[1])
elif 'west' in spline[0].lower():
west = float(spline[1])
elif 'rows' in spline[0].lower():
rows = int(spline[1])
elif 'cols' in spline[0].lower():
columns = int(spline[1])
# Define raster metadata from headers
width = columns
height = rows
upperLeftX = west
upperLeftY = north
cellSizeX = int(abs(west - east) / columns)
cellSizeY = -1 * cellSizeX
# Assemble the data array string
dataArrayList = []
for line in rasterLines[NUM_HEADER_LINES:len(rasterLines)]:
dataArrayList.append('[{0}]'.format(', '.join(line.split())))
dataArrayString = '[{0}]'.format(', '.join(dataArrayList))
# Create well known binary raster
wellKnownBinary = cls.makeSingleBandWKBRaster(session=session,
width=width, height=height,
upperLeftX=upperLeftX, upperLeftY=upperLeftY,
cellSizeX=cellSizeX, cellSizeY=cellSizeY,
skewX=0, skewY=0,
srid=srid,
dataArray=dataArrayString,
noDataValue=noData)
return wellKnownBinary | [
"def",
"grassAsciiRasterToWKB",
"(",
"cls",
",",
"session",
",",
"grassRasterPath",
",",
"srid",
",",
"noData",
"=",
"0",
")",
":",
"# Constants",
"NUM_HEADER_LINES",
"=",
"6",
"# Defaults",
"north",
"=",
"0.0",
"east",
"=",
"0.0",
"west",
"=",
"0.0",
"rows",
"=",
"0",
"columns",
"=",
"0",
"if",
"grassRasterPath",
"is",
"not",
"None",
":",
"# If the path to the file is given, open the file and extract contents.",
"with",
"open",
"(",
"grassRasterPath",
",",
"'r'",
")",
"as",
"f",
":",
"rasterLines",
"=",
"f",
".",
"readlines",
"(",
")",
"else",
":",
"print",
"(",
"\"RASTER LOAD ERROR: Must provide the path the raster.\"",
")",
"raise",
"# Extract the headers from the file and derive metadata",
"for",
"line",
"in",
"rasterLines",
"[",
"0",
":",
"NUM_HEADER_LINES",
"]",
":",
"spline",
"=",
"line",
".",
"split",
"(",
")",
"if",
"'north'",
"in",
"spline",
"[",
"0",
"]",
".",
"lower",
"(",
")",
":",
"north",
"=",
"float",
"(",
"spline",
"[",
"1",
"]",
")",
"elif",
"'east'",
"in",
"spline",
"[",
"0",
"]",
".",
"lower",
"(",
")",
":",
"east",
"=",
"float",
"(",
"spline",
"[",
"1",
"]",
")",
"elif",
"'west'",
"in",
"spline",
"[",
"0",
"]",
".",
"lower",
"(",
")",
":",
"west",
"=",
"float",
"(",
"spline",
"[",
"1",
"]",
")",
"elif",
"'rows'",
"in",
"spline",
"[",
"0",
"]",
".",
"lower",
"(",
")",
":",
"rows",
"=",
"int",
"(",
"spline",
"[",
"1",
"]",
")",
"elif",
"'cols'",
"in",
"spline",
"[",
"0",
"]",
".",
"lower",
"(",
")",
":",
"columns",
"=",
"int",
"(",
"spline",
"[",
"1",
"]",
")",
"# Define raster metadata from headers",
"width",
"=",
"columns",
"height",
"=",
"rows",
"upperLeftX",
"=",
"west",
"upperLeftY",
"=",
"north",
"cellSizeX",
"=",
"int",
"(",
"abs",
"(",
"west",
"-",
"east",
")",
"/",
"columns",
")",
"cellSizeY",
"=",
"-",
"1",
"*",
"cellSizeX",
"# Assemble the data array string",
"dataArrayList",
"=",
"[",
"]",
"for",
"line",
"in",
"rasterLines",
"[",
"NUM_HEADER_LINES",
":",
"len",
"(",
"rasterLines",
")",
"]",
":",
"dataArrayList",
".",
"append",
"(",
"'[{0}]'",
".",
"format",
"(",
"', '",
".",
"join",
"(",
"line",
".",
"split",
"(",
")",
")",
")",
")",
"dataArrayString",
"=",
"'[{0}]'",
".",
"format",
"(",
"', '",
".",
"join",
"(",
"dataArrayList",
")",
")",
"# Create well known binary raster",
"wellKnownBinary",
"=",
"cls",
".",
"makeSingleBandWKBRaster",
"(",
"session",
"=",
"session",
",",
"width",
"=",
"width",
",",
"height",
"=",
"height",
",",
"upperLeftX",
"=",
"upperLeftX",
",",
"upperLeftY",
"=",
"upperLeftY",
",",
"cellSizeX",
"=",
"cellSizeX",
",",
"cellSizeY",
"=",
"cellSizeY",
",",
"skewX",
"=",
"0",
",",
"skewY",
"=",
"0",
",",
"srid",
"=",
"srid",
",",
"dataArray",
"=",
"dataArrayString",
",",
"noDataValue",
"=",
"noData",
")",
"return",
"wellKnownBinary"
]
| Load GRASS ASCII rasters directly using the makeSingleBandWKBRaster method. Do this to eliminate the raster2pgsql
dependency. | [
"Load",
"GRASS",
"ASCII",
"rasters",
"directly",
"using",
"the",
"makeSingleBandWKBRaster",
"method",
".",
"Do",
"this",
"to",
"eliminate",
"the",
"raster2pgsql",
"dependency",
"."
]
| ce5fbded6af7adabdf1eec85631c6811ef8ecc34 | https://github.com/CI-WATER/mapkit/blob/ce5fbded6af7adabdf1eec85631c6811ef8ecc34/mapkit/RasterLoader.py#L114-L178 | train |
tjcsl/cslbot | cslbot/commands/defersay.py | cmd | def cmd(send, msg, args):
"""Says something at a later time.
Syntax: {command} <delay> <msg>
"""
parser = arguments.ArgParser(args['config'])
parser.add_argument('delay')
parser.add_argument('msg', nargs='+')
try:
cmdargs = parser.parse_args(msg)
except arguments.ArgumentException as e:
send(str(e))
return
if isinstance(cmdargs.msg, list):
cmdargs.msg = ' '.join(cmdargs.msg)
cmdargs.delay = parse_time(cmdargs.delay)
if cmdargs.delay is None:
send("Invalid unit.")
elif cmdargs.delay < 0:
send("Time travel not yet implemented, sorry.")
else:
ident = args['handler'].workers.defer(cmdargs.delay, False, send, cmdargs.msg)
send("Message deferred, ident: %s" % ident) | python | def cmd(send, msg, args):
"""Says something at a later time.
Syntax: {command} <delay> <msg>
"""
parser = arguments.ArgParser(args['config'])
parser.add_argument('delay')
parser.add_argument('msg', nargs='+')
try:
cmdargs = parser.parse_args(msg)
except arguments.ArgumentException as e:
send(str(e))
return
if isinstance(cmdargs.msg, list):
cmdargs.msg = ' '.join(cmdargs.msg)
cmdargs.delay = parse_time(cmdargs.delay)
if cmdargs.delay is None:
send("Invalid unit.")
elif cmdargs.delay < 0:
send("Time travel not yet implemented, sorry.")
else:
ident = args['handler'].workers.defer(cmdargs.delay, False, send, cmdargs.msg)
send("Message deferred, ident: %s" % ident) | [
"def",
"cmd",
"(",
"send",
",",
"msg",
",",
"args",
")",
":",
"parser",
"=",
"arguments",
".",
"ArgParser",
"(",
"args",
"[",
"'config'",
"]",
")",
"parser",
".",
"add_argument",
"(",
"'delay'",
")",
"parser",
".",
"add_argument",
"(",
"'msg'",
",",
"nargs",
"=",
"'+'",
")",
"try",
":",
"cmdargs",
"=",
"parser",
".",
"parse_args",
"(",
"msg",
")",
"except",
"arguments",
".",
"ArgumentException",
"as",
"e",
":",
"send",
"(",
"str",
"(",
"e",
")",
")",
"return",
"if",
"isinstance",
"(",
"cmdargs",
".",
"msg",
",",
"list",
")",
":",
"cmdargs",
".",
"msg",
"=",
"' '",
".",
"join",
"(",
"cmdargs",
".",
"msg",
")",
"cmdargs",
".",
"delay",
"=",
"parse_time",
"(",
"cmdargs",
".",
"delay",
")",
"if",
"cmdargs",
".",
"delay",
"is",
"None",
":",
"send",
"(",
"\"Invalid unit.\"",
")",
"elif",
"cmdargs",
".",
"delay",
"<",
"0",
":",
"send",
"(",
"\"Time travel not yet implemented, sorry.\"",
")",
"else",
":",
"ident",
"=",
"args",
"[",
"'handler'",
"]",
".",
"workers",
".",
"defer",
"(",
"cmdargs",
".",
"delay",
",",
"False",
",",
"send",
",",
"cmdargs",
".",
"msg",
")",
"send",
"(",
"\"Message deferred, ident: %s\"",
"%",
"ident",
")"
]
| Says something at a later time.
Syntax: {command} <delay> <msg> | [
"Says",
"something",
"at",
"a",
"later",
"time",
"."
]
| aebe07be47141f61d7c180706bddfb707f19b2b5 | https://github.com/tjcsl/cslbot/blob/aebe07be47141f61d7c180706bddfb707f19b2b5/cslbot/commands/defersay.py#L24-L47 | train |
marrow/util | marrow/util/tuple.py | NamedTuple._replace | def _replace(self, **kwds):
'Return a new NamedTuple object replacing specified fields with new values'
result = self._make(map(kwds.pop, self._fields, self))
if kwds:
raise ValueError('Got unexpected field names: %r' % kwds.keys())
return result | python | def _replace(self, **kwds):
'Return a new NamedTuple object replacing specified fields with new values'
result = self._make(map(kwds.pop, self._fields, self))
if kwds:
raise ValueError('Got unexpected field names: %r' % kwds.keys())
return result | [
"def",
"_replace",
"(",
"self",
",",
"*",
"*",
"kwds",
")",
":",
"result",
"=",
"self",
".",
"_make",
"(",
"map",
"(",
"kwds",
".",
"pop",
",",
"self",
".",
"_fields",
",",
"self",
")",
")",
"if",
"kwds",
":",
"raise",
"ValueError",
"(",
"'Got unexpected field names: %r'",
"%",
"kwds",
".",
"keys",
"(",
")",
")",
"return",
"result"
]
| Return a new NamedTuple object replacing specified fields with new values | [
"Return",
"a",
"new",
"NamedTuple",
"object",
"replacing",
"specified",
"fields",
"with",
"new",
"values"
]
| abb8163dbd1fa0692d42a44d129b12ae2b39cdf2 | https://github.com/marrow/util/blob/abb8163dbd1fa0692d42a44d129b12ae2b39cdf2/marrow/util/tuple.py#L63-L70 | train |
rraadd88/rohan | rohan/dandage/io_dfs.py | concat_cols | def concat_cols(df1,df2,idx_col,df1_cols,df2_cols,
df1_suffix,df2_suffix,wc_cols=[],suffix_all=False):
"""
Concatenates two pandas tables
:param df1: dataframe 1
:param df2: dataframe 2
:param idx_col: column name which will be used as a common index
"""
df1=df1.set_index(idx_col)
df2=df2.set_index(idx_col)
if not len(wc_cols)==0:
for wc in wc_cols:
df1_cols=df1_cols+[c for c in df1.columns if wc in c]
df2_cols=df2_cols+[c for c in df2.columns if wc in c]
combo=pd.concat([df1.loc[:,df1_cols],df2.loc[:,df2_cols]],axis=1)
# find common columns and rename them
# print df1_cols
# print df2_cols
if suffix_all:
df1_cols=["%s%s" % (c,df1_suffix) for c in df1_cols]
df2_cols=["%s%s" % (c,df2_suffix) for c in df2_cols]
# df1_cols[df1_cols.index(col)]="%s%s" % (col,df1_suffix)
# df2_cols[df2_cols.index(col)]="%s%s" % (col,df2_suffix)
else:
common_cols=[col for col in df1_cols if col in df2_cols]
for col in common_cols:
df1_cols[df1_cols.index(col)]="%s%s" % (col,df1_suffix)
df2_cols[df2_cols.index(col)]="%s%s" % (col,df2_suffix)
combo.columns=df1_cols+df2_cols
combo.index.name=idx_col
return combo | python | def concat_cols(df1,df2,idx_col,df1_cols,df2_cols,
df1_suffix,df2_suffix,wc_cols=[],suffix_all=False):
"""
Concatenates two pandas tables
:param df1: dataframe 1
:param df2: dataframe 2
:param idx_col: column name which will be used as a common index
"""
df1=df1.set_index(idx_col)
df2=df2.set_index(idx_col)
if not len(wc_cols)==0:
for wc in wc_cols:
df1_cols=df1_cols+[c for c in df1.columns if wc in c]
df2_cols=df2_cols+[c for c in df2.columns if wc in c]
combo=pd.concat([df1.loc[:,df1_cols],df2.loc[:,df2_cols]],axis=1)
# find common columns and rename them
# print df1_cols
# print df2_cols
if suffix_all:
df1_cols=["%s%s" % (c,df1_suffix) for c in df1_cols]
df2_cols=["%s%s" % (c,df2_suffix) for c in df2_cols]
# df1_cols[df1_cols.index(col)]="%s%s" % (col,df1_suffix)
# df2_cols[df2_cols.index(col)]="%s%s" % (col,df2_suffix)
else:
common_cols=[col for col in df1_cols if col in df2_cols]
for col in common_cols:
df1_cols[df1_cols.index(col)]="%s%s" % (col,df1_suffix)
df2_cols[df2_cols.index(col)]="%s%s" % (col,df2_suffix)
combo.columns=df1_cols+df2_cols
combo.index.name=idx_col
return combo | [
"def",
"concat_cols",
"(",
"df1",
",",
"df2",
",",
"idx_col",
",",
"df1_cols",
",",
"df2_cols",
",",
"df1_suffix",
",",
"df2_suffix",
",",
"wc_cols",
"=",
"[",
"]",
",",
"suffix_all",
"=",
"False",
")",
":",
"df1",
"=",
"df1",
".",
"set_index",
"(",
"idx_col",
")",
"df2",
"=",
"df2",
".",
"set_index",
"(",
"idx_col",
")",
"if",
"not",
"len",
"(",
"wc_cols",
")",
"==",
"0",
":",
"for",
"wc",
"in",
"wc_cols",
":",
"df1_cols",
"=",
"df1_cols",
"+",
"[",
"c",
"for",
"c",
"in",
"df1",
".",
"columns",
"if",
"wc",
"in",
"c",
"]",
"df2_cols",
"=",
"df2_cols",
"+",
"[",
"c",
"for",
"c",
"in",
"df2",
".",
"columns",
"if",
"wc",
"in",
"c",
"]",
"combo",
"=",
"pd",
".",
"concat",
"(",
"[",
"df1",
".",
"loc",
"[",
":",
",",
"df1_cols",
"]",
",",
"df2",
".",
"loc",
"[",
":",
",",
"df2_cols",
"]",
"]",
",",
"axis",
"=",
"1",
")",
"# find common columns and rename them",
"# print df1_cols",
"# print df2_cols ",
"if",
"suffix_all",
":",
"df1_cols",
"=",
"[",
"\"%s%s\"",
"%",
"(",
"c",
",",
"df1_suffix",
")",
"for",
"c",
"in",
"df1_cols",
"]",
"df2_cols",
"=",
"[",
"\"%s%s\"",
"%",
"(",
"c",
",",
"df2_suffix",
")",
"for",
"c",
"in",
"df2_cols",
"]",
"# df1_cols[df1_cols.index(col)]=\"%s%s\" % (col,df1_suffix)",
"# df2_cols[df2_cols.index(col)]=\"%s%s\" % (col,df2_suffix)",
"else",
":",
"common_cols",
"=",
"[",
"col",
"for",
"col",
"in",
"df1_cols",
"if",
"col",
"in",
"df2_cols",
"]",
"for",
"col",
"in",
"common_cols",
":",
"df1_cols",
"[",
"df1_cols",
".",
"index",
"(",
"col",
")",
"]",
"=",
"\"%s%s\"",
"%",
"(",
"col",
",",
"df1_suffix",
")",
"df2_cols",
"[",
"df2_cols",
".",
"index",
"(",
"col",
")",
"]",
"=",
"\"%s%s\"",
"%",
"(",
"col",
",",
"df2_suffix",
")",
"combo",
".",
"columns",
"=",
"df1_cols",
"+",
"df2_cols",
"combo",
".",
"index",
".",
"name",
"=",
"idx_col",
"return",
"combo"
]
| Concatenates two pandas tables
:param df1: dataframe 1
:param df2: dataframe 2
:param idx_col: column name which will be used as a common index | [
"Concatenates",
"two",
"pandas",
"tables"
]
| b0643a3582a2fffc0165ace69fb80880d92bfb10 | https://github.com/rraadd88/rohan/blob/b0643a3582a2fffc0165ace69fb80880d92bfb10/rohan/dandage/io_dfs.py#L174-L206 | train |
rraadd88/rohan | rohan/dandage/io_dfs.py | get_colmin | def get_colmin(data):
"""
Get rowwise column names with minimum values
:param data: pandas dataframe
"""
data=data.T
colmins=[]
for col in data:
colmins.append(data[col].idxmin())
return colmins | python | def get_colmin(data):
"""
Get rowwise column names with minimum values
:param data: pandas dataframe
"""
data=data.T
colmins=[]
for col in data:
colmins.append(data[col].idxmin())
return colmins | [
"def",
"get_colmin",
"(",
"data",
")",
":",
"data",
"=",
"data",
".",
"T",
"colmins",
"=",
"[",
"]",
"for",
"col",
"in",
"data",
":",
"colmins",
".",
"append",
"(",
"data",
"[",
"col",
"]",
".",
"idxmin",
"(",
")",
")",
"return",
"colmins"
]
| Get rowwise column names with minimum values
:param data: pandas dataframe | [
"Get",
"rowwise",
"column",
"names",
"with",
"minimum",
"values"
]
| b0643a3582a2fffc0165ace69fb80880d92bfb10 | https://github.com/rraadd88/rohan/blob/b0643a3582a2fffc0165ace69fb80880d92bfb10/rohan/dandage/io_dfs.py#L208-L218 | train |
rraadd88/rohan | rohan/dandage/io_dfs.py | fhs2data_combo_appended | def fhs2data_combo_appended(fhs, cols=None,labels=None,labels_coln='labels',sep=',',
error_bad_lines=True):
"""
to be deprecated
Collates data from multiple csv files vertically
:param fhs: list of paths to csv files
:param cols: list of column names to concatenate
"""
if labels is None:
labels=[basename(fh) for fh in fhs]
if len(fhs)>0:
data_all=pd.DataFrame(columns=cols)
for fhi,fh in enumerate(fhs):
label=labels[fhi]
try:
data=pd.read_csv(fh,sep=sep,error_bad_lines=error_bad_lines)
except:
raise ValueError(f"something wrong with file pd.read_csv({fh},sep={sep})")
if len(data)!=0:
data.loc[:,labels_coln]=label
if not cols is None:
data=data.loc[:,cols]
data_all=data_all.append(data,sort=True)
return del_Unnamed(data_all) | python | def fhs2data_combo_appended(fhs, cols=None,labels=None,labels_coln='labels',sep=',',
error_bad_lines=True):
"""
to be deprecated
Collates data from multiple csv files vertically
:param fhs: list of paths to csv files
:param cols: list of column names to concatenate
"""
if labels is None:
labels=[basename(fh) for fh in fhs]
if len(fhs)>0:
data_all=pd.DataFrame(columns=cols)
for fhi,fh in enumerate(fhs):
label=labels[fhi]
try:
data=pd.read_csv(fh,sep=sep,error_bad_lines=error_bad_lines)
except:
raise ValueError(f"something wrong with file pd.read_csv({fh},sep={sep})")
if len(data)!=0:
data.loc[:,labels_coln]=label
if not cols is None:
data=data.loc[:,cols]
data_all=data_all.append(data,sort=True)
return del_Unnamed(data_all) | [
"def",
"fhs2data_combo_appended",
"(",
"fhs",
",",
"cols",
"=",
"None",
",",
"labels",
"=",
"None",
",",
"labels_coln",
"=",
"'labels'",
",",
"sep",
"=",
"','",
",",
"error_bad_lines",
"=",
"True",
")",
":",
"if",
"labels",
"is",
"None",
":",
"labels",
"=",
"[",
"basename",
"(",
"fh",
")",
"for",
"fh",
"in",
"fhs",
"]",
"if",
"len",
"(",
"fhs",
")",
">",
"0",
":",
"data_all",
"=",
"pd",
".",
"DataFrame",
"(",
"columns",
"=",
"cols",
")",
"for",
"fhi",
",",
"fh",
"in",
"enumerate",
"(",
"fhs",
")",
":",
"label",
"=",
"labels",
"[",
"fhi",
"]",
"try",
":",
"data",
"=",
"pd",
".",
"read_csv",
"(",
"fh",
",",
"sep",
"=",
"sep",
",",
"error_bad_lines",
"=",
"error_bad_lines",
")",
"except",
":",
"raise",
"ValueError",
"(",
"f\"something wrong with file pd.read_csv({fh},sep={sep})\"",
")",
"if",
"len",
"(",
"data",
")",
"!=",
"0",
":",
"data",
".",
"loc",
"[",
":",
",",
"labels_coln",
"]",
"=",
"label",
"if",
"not",
"cols",
"is",
"None",
":",
"data",
"=",
"data",
".",
"loc",
"[",
":",
",",
"cols",
"]",
"data_all",
"=",
"data_all",
".",
"append",
"(",
"data",
",",
"sort",
"=",
"True",
")",
"return",
"del_Unnamed",
"(",
"data_all",
")"
]
| to be deprecated
Collates data from multiple csv files vertically
:param fhs: list of paths to csv files
:param cols: list of column names to concatenate | [
"to",
"be",
"deprecated",
"Collates",
"data",
"from",
"multiple",
"csv",
"files",
"vertically"
]
| b0643a3582a2fffc0165ace69fb80880d92bfb10 | https://github.com/rraadd88/rohan/blob/b0643a3582a2fffc0165ace69fb80880d92bfb10/rohan/dandage/io_dfs.py#L248-L272 | train |
rraadd88/rohan | rohan/dandage/io_dfs.py | rename_cols | def rename_cols(df,names,renames=None,prefix=None,suffix=None):
"""
rename columns of a pandas table
:param df: pandas dataframe
:param names: list of new column names
"""
if not prefix is None:
renames=[ "%s%s" % (prefix,s) for s in names]
if not suffix is None:
renames=[ "%s%s" % (s,suffix) for s in names]
if not renames is None:
for i,name in enumerate(names):
# names=[renames[i] if s==names[i] else s for s in names]
rename=renames[i]
df.loc[:,rename]=df.loc[:,name]
df=df.drop(names,axis=1)
return df | python | def rename_cols(df,names,renames=None,prefix=None,suffix=None):
"""
rename columns of a pandas table
:param df: pandas dataframe
:param names: list of new column names
"""
if not prefix is None:
renames=[ "%s%s" % (prefix,s) for s in names]
if not suffix is None:
renames=[ "%s%s" % (s,suffix) for s in names]
if not renames is None:
for i,name in enumerate(names):
# names=[renames[i] if s==names[i] else s for s in names]
rename=renames[i]
df.loc[:,rename]=df.loc[:,name]
df=df.drop(names,axis=1)
return df | [
"def",
"rename_cols",
"(",
"df",
",",
"names",
",",
"renames",
"=",
"None",
",",
"prefix",
"=",
"None",
",",
"suffix",
"=",
"None",
")",
":",
"if",
"not",
"prefix",
"is",
"None",
":",
"renames",
"=",
"[",
"\"%s%s\"",
"%",
"(",
"prefix",
",",
"s",
")",
"for",
"s",
"in",
"names",
"]",
"if",
"not",
"suffix",
"is",
"None",
":",
"renames",
"=",
"[",
"\"%s%s\"",
"%",
"(",
"s",
",",
"suffix",
")",
"for",
"s",
"in",
"names",
"]",
"if",
"not",
"renames",
"is",
"None",
":",
"for",
"i",
",",
"name",
"in",
"enumerate",
"(",
"names",
")",
":",
"# names=[renames[i] if s==names[i] else s for s in names] ",
"rename",
"=",
"renames",
"[",
"i",
"]",
"df",
".",
"loc",
"[",
":",
",",
"rename",
"]",
"=",
"df",
".",
"loc",
"[",
":",
",",
"name",
"]",
"df",
"=",
"df",
".",
"drop",
"(",
"names",
",",
"axis",
"=",
"1",
")",
"return",
"df"
]
| rename columns of a pandas table
:param df: pandas dataframe
:param names: list of new column names | [
"rename",
"columns",
"of",
"a",
"pandas",
"table"
]
| b0643a3582a2fffc0165ace69fb80880d92bfb10 | https://github.com/rraadd88/rohan/blob/b0643a3582a2fffc0165ace69fb80880d92bfb10/rohan/dandage/io_dfs.py#L275-L292 | train |
rraadd88/rohan | rohan/dandage/io_dfs.py | reorderbydf | def reorderbydf(df2,df1):
"""
Reorder rows of a dataframe by other dataframe
:param df2: input dataframe
:param df1: template dataframe
"""
df3=pd.DataFrame()
for idx,row in df1.iterrows():
df3=df3.append(df2.loc[idx,:])
return df3 | python | def reorderbydf(df2,df1):
"""
Reorder rows of a dataframe by other dataframe
:param df2: input dataframe
:param df1: template dataframe
"""
df3=pd.DataFrame()
for idx,row in df1.iterrows():
df3=df3.append(df2.loc[idx,:])
return df3 | [
"def",
"reorderbydf",
"(",
"df2",
",",
"df1",
")",
":",
"df3",
"=",
"pd",
".",
"DataFrame",
"(",
")",
"for",
"idx",
",",
"row",
"in",
"df1",
".",
"iterrows",
"(",
")",
":",
"df3",
"=",
"df3",
".",
"append",
"(",
"df2",
".",
"loc",
"[",
"idx",
",",
":",
"]",
")",
"return",
"df3"
]
| Reorder rows of a dataframe by other dataframe
:param df2: input dataframe
:param df1: template dataframe | [
"Reorder",
"rows",
"of",
"a",
"dataframe",
"by",
"other",
"dataframe"
]
| b0643a3582a2fffc0165ace69fb80880d92bfb10 | https://github.com/rraadd88/rohan/blob/b0643a3582a2fffc0165ace69fb80880d92bfb10/rohan/dandage/io_dfs.py#L295-L305 | train |
rraadd88/rohan | rohan/dandage/io_dfs.py | df2unstack | def df2unstack(df,coln='columns',idxn='index',col='value'):
"""
will be deprecated
"""
return dmap2lin(df,idxn=idxn,coln=coln,colvalue_name=col) | python | def df2unstack(df,coln='columns',idxn='index',col='value'):
"""
will be deprecated
"""
return dmap2lin(df,idxn=idxn,coln=coln,colvalue_name=col) | [
"def",
"df2unstack",
"(",
"df",
",",
"coln",
"=",
"'columns'",
",",
"idxn",
"=",
"'index'",
",",
"col",
"=",
"'value'",
")",
":",
"return",
"dmap2lin",
"(",
"df",
",",
"idxn",
"=",
"idxn",
",",
"coln",
"=",
"coln",
",",
"colvalue_name",
"=",
"col",
")"
]
| will be deprecated | [
"will",
"be",
"deprecated"
]
| b0643a3582a2fffc0165ace69fb80880d92bfb10 | https://github.com/rraadd88/rohan/blob/b0643a3582a2fffc0165ace69fb80880d92bfb10/rohan/dandage/io_dfs.py#L325-L329 | train |
rraadd88/rohan | rohan/dandage/io_dfs.py | get_offdiag_vals | def get_offdiag_vals(dcorr):
"""
for lin dcorr i guess
"""
del_indexes=[]
for spc1 in np.unique(dcorr.index.get_level_values(0)):
for spc2 in np.unique(dcorr.index.get_level_values(0)):
if (not (spc1,spc2) in del_indexes) and (not (spc2,spc1) in del_indexes):
del_indexes.append((spc1,spc2))
# break
for spc1 in np.unique(dcorr.index.get_level_values(0)):
for spc2 in np.unique(dcorr.index.get_level_values(0)):
if spc1==spc2:
del_indexes.append((spc1,spc2))
return dcorr.drop(del_indexes) | python | def get_offdiag_vals(dcorr):
"""
for lin dcorr i guess
"""
del_indexes=[]
for spc1 in np.unique(dcorr.index.get_level_values(0)):
for spc2 in np.unique(dcorr.index.get_level_values(0)):
if (not (spc1,spc2) in del_indexes) and (not (spc2,spc1) in del_indexes):
del_indexes.append((spc1,spc2))
# break
for spc1 in np.unique(dcorr.index.get_level_values(0)):
for spc2 in np.unique(dcorr.index.get_level_values(0)):
if spc1==spc2:
del_indexes.append((spc1,spc2))
return dcorr.drop(del_indexes) | [
"def",
"get_offdiag_vals",
"(",
"dcorr",
")",
":",
"del_indexes",
"=",
"[",
"]",
"for",
"spc1",
"in",
"np",
".",
"unique",
"(",
"dcorr",
".",
"index",
".",
"get_level_values",
"(",
"0",
")",
")",
":",
"for",
"spc2",
"in",
"np",
".",
"unique",
"(",
"dcorr",
".",
"index",
".",
"get_level_values",
"(",
"0",
")",
")",
":",
"if",
"(",
"not",
"(",
"spc1",
",",
"spc2",
")",
"in",
"del_indexes",
")",
"and",
"(",
"not",
"(",
"spc2",
",",
"spc1",
")",
"in",
"del_indexes",
")",
":",
"del_indexes",
".",
"append",
"(",
"(",
"spc1",
",",
"spc2",
")",
")",
"# break",
"for",
"spc1",
"in",
"np",
".",
"unique",
"(",
"dcorr",
".",
"index",
".",
"get_level_values",
"(",
"0",
")",
")",
":",
"for",
"spc2",
"in",
"np",
".",
"unique",
"(",
"dcorr",
".",
"index",
".",
"get_level_values",
"(",
"0",
")",
")",
":",
"if",
"spc1",
"==",
"spc2",
":",
"del_indexes",
".",
"append",
"(",
"(",
"spc1",
",",
"spc2",
")",
")",
"return",
"dcorr",
".",
"drop",
"(",
"del_indexes",
")"
]
| for lin dcorr i guess | [
"for",
"lin",
"dcorr",
"i",
"guess"
]
| b0643a3582a2fffc0165ace69fb80880d92bfb10 | https://github.com/rraadd88/rohan/blob/b0643a3582a2fffc0165ace69fb80880d92bfb10/rohan/dandage/io_dfs.py#L490-L505 | train |
ymyzk/python-gyazo | gyazo/image.py | Image.from_dict | def from_dict(data):
"""Create a new instance from dict
:param data: A JSON dict
"""
data = deepcopy(data)
created_at = data.get('created_at', None)
if created_at is not None:
data['created_at'] = dateutil.parser.parse(created_at)
return Image(**data) | python | def from_dict(data):
"""Create a new instance from dict
:param data: A JSON dict
"""
data = deepcopy(data)
created_at = data.get('created_at', None)
if created_at is not None:
data['created_at'] = dateutil.parser.parse(created_at)
return Image(**data) | [
"def",
"from_dict",
"(",
"data",
")",
":",
"data",
"=",
"deepcopy",
"(",
"data",
")",
"created_at",
"=",
"data",
".",
"get",
"(",
"'created_at'",
",",
"None",
")",
"if",
"created_at",
"is",
"not",
"None",
":",
"data",
"[",
"'created_at'",
"]",
"=",
"dateutil",
".",
"parser",
".",
"parse",
"(",
"created_at",
")",
"return",
"Image",
"(",
"*",
"*",
"data",
")"
]
| Create a new instance from dict
:param data: A JSON dict | [
"Create",
"a",
"new",
"instance",
"from",
"dict"
]
| 52893118899ed308ff75245b55f73d745c98ed1d | https://github.com/ymyzk/python-gyazo/blob/52893118899ed308ff75245b55f73d745c98ed1d/gyazo/image.py#L31-L42 | train |
ymyzk/python-gyazo | gyazo/image.py | Image.to_json | def to_json(self, indent=None, sort_keys=True):
"""Return a JSON string representation of this instance
:param indent: specify an indent level or a string used to indent each
level
:param sort_keys: the output is sorted by key
"""
return json.dumps(self.to_dict(), indent=indent, sort_keys=sort_keys) | python | def to_json(self, indent=None, sort_keys=True):
"""Return a JSON string representation of this instance
:param indent: specify an indent level or a string used to indent each
level
:param sort_keys: the output is sorted by key
"""
return json.dumps(self.to_dict(), indent=indent, sort_keys=sort_keys) | [
"def",
"to_json",
"(",
"self",
",",
"indent",
"=",
"None",
",",
"sort_keys",
"=",
"True",
")",
":",
"return",
"json",
".",
"dumps",
"(",
"self",
".",
"to_dict",
"(",
")",
",",
"indent",
"=",
"indent",
",",
"sort_keys",
"=",
"sort_keys",
")"
]
| Return a JSON string representation of this instance
:param indent: specify an indent level or a string used to indent each
level
:param sort_keys: the output is sorted by key | [
"Return",
"a",
"JSON",
"string",
"representation",
"of",
"this",
"instance"
]
| 52893118899ed308ff75245b55f73d745c98ed1d | https://github.com/ymyzk/python-gyazo/blob/52893118899ed308ff75245b55f73d745c98ed1d/gyazo/image.py#L102-L109 | train |
ymyzk/python-gyazo | gyazo/image.py | Image.to_dict | def to_dict(self):
"""Return a dict representation of this instance"""
data = {}
if self.created_at:
data['created_at'] = self.created_at.strftime(
'%Y-%m-%dT%H:%M:%S%z')
if self.image_id:
data['image_id'] = self.image_id
if self.permalink_url:
data['permalink_url'] = self.permalink_url
if self.thumb_url:
data['thumb_url'] = self.thumb_url
if self.type:
data['type'] = self.type
if self.url:
data['url'] = self.url
return data | python | def to_dict(self):
"""Return a dict representation of this instance"""
data = {}
if self.created_at:
data['created_at'] = self.created_at.strftime(
'%Y-%m-%dT%H:%M:%S%z')
if self.image_id:
data['image_id'] = self.image_id
if self.permalink_url:
data['permalink_url'] = self.permalink_url
if self.thumb_url:
data['thumb_url'] = self.thumb_url
if self.type:
data['type'] = self.type
if self.url:
data['url'] = self.url
return data | [
"def",
"to_dict",
"(",
"self",
")",
":",
"data",
"=",
"{",
"}",
"if",
"self",
".",
"created_at",
":",
"data",
"[",
"'created_at'",
"]",
"=",
"self",
".",
"created_at",
".",
"strftime",
"(",
"'%Y-%m-%dT%H:%M:%S%z'",
")",
"if",
"self",
".",
"image_id",
":",
"data",
"[",
"'image_id'",
"]",
"=",
"self",
".",
"image_id",
"if",
"self",
".",
"permalink_url",
":",
"data",
"[",
"'permalink_url'",
"]",
"=",
"self",
".",
"permalink_url",
"if",
"self",
".",
"thumb_url",
":",
"data",
"[",
"'thumb_url'",
"]",
"=",
"self",
".",
"thumb_url",
"if",
"self",
".",
"type",
":",
"data",
"[",
"'type'",
"]",
"=",
"self",
".",
"type",
"if",
"self",
".",
"url",
":",
"data",
"[",
"'url'",
"]",
"=",
"self",
".",
"url",
"return",
"data"
]
| Return a dict representation of this instance | [
"Return",
"a",
"dict",
"representation",
"of",
"this",
"instance"
]
| 52893118899ed308ff75245b55f73d745c98ed1d | https://github.com/ymyzk/python-gyazo/blob/52893118899ed308ff75245b55f73d745c98ed1d/gyazo/image.py#L111-L129 | train |
ymyzk/python-gyazo | gyazo/image.py | Image.download | def download(self):
"""Download an image file if it exists
:raise GyazoError:
"""
if self.url:
try:
return requests.get(self.url).content
except requests.RequestException as e:
raise GyazoError(str(e))
return None | python | def download(self):
"""Download an image file if it exists
:raise GyazoError:
"""
if self.url:
try:
return requests.get(self.url).content
except requests.RequestException as e:
raise GyazoError(str(e))
return None | [
"def",
"download",
"(",
"self",
")",
":",
"if",
"self",
".",
"url",
":",
"try",
":",
"return",
"requests",
".",
"get",
"(",
"self",
".",
"url",
")",
".",
"content",
"except",
"requests",
".",
"RequestException",
"as",
"e",
":",
"raise",
"GyazoError",
"(",
"str",
"(",
"e",
")",
")",
"return",
"None"
]
| Download an image file if it exists
:raise GyazoError: | [
"Download",
"an",
"image",
"file",
"if",
"it",
"exists"
]
| 52893118899ed308ff75245b55f73d745c98ed1d | https://github.com/ymyzk/python-gyazo/blob/52893118899ed308ff75245b55f73d745c98ed1d/gyazo/image.py#L131-L141 | train |
ymyzk/python-gyazo | gyazo/image.py | Image.download_thumb | def download_thumb(self):
"""Download a thumbnail image file
:raise GyazoError:
"""
try:
return requests.get(self.thumb_url).content
except requests.RequestException as e:
raise GyazoError(str(e)) | python | def download_thumb(self):
"""Download a thumbnail image file
:raise GyazoError:
"""
try:
return requests.get(self.thumb_url).content
except requests.RequestException as e:
raise GyazoError(str(e)) | [
"def",
"download_thumb",
"(",
"self",
")",
":",
"try",
":",
"return",
"requests",
".",
"get",
"(",
"self",
".",
"thumb_url",
")",
".",
"content",
"except",
"requests",
".",
"RequestException",
"as",
"e",
":",
"raise",
"GyazoError",
"(",
"str",
"(",
"e",
")",
")"
]
| Download a thumbnail image file
:raise GyazoError: | [
"Download",
"a",
"thumbnail",
"image",
"file"
]
| 52893118899ed308ff75245b55f73d745c98ed1d | https://github.com/ymyzk/python-gyazo/blob/52893118899ed308ff75245b55f73d745c98ed1d/gyazo/image.py#L143-L151 | train |
ymyzk/python-gyazo | gyazo/image.py | ImageList.has_next_page | def has_next_page(self):
"""Whether there is a next page or not
:getter: Return true if there is a next page
"""
return self.current_page < math.ceil(self.total_count / self.per_page) | python | def has_next_page(self):
"""Whether there is a next page or not
:getter: Return true if there is a next page
"""
return self.current_page < math.ceil(self.total_count / self.per_page) | [
"def",
"has_next_page",
"(",
"self",
")",
":",
"return",
"self",
".",
"current_page",
"<",
"math",
".",
"ceil",
"(",
"self",
".",
"total_count",
"/",
"self",
".",
"per_page",
")"
]
| Whether there is a next page or not
:getter: Return true if there is a next page | [
"Whether",
"there",
"is",
"a",
"next",
"page",
"or",
"not"
]
| 52893118899ed308ff75245b55f73d745c98ed1d | https://github.com/ymyzk/python-gyazo/blob/52893118899ed308ff75245b55f73d745c98ed1d/gyazo/image.py#L198-L203 | train |
ymyzk/python-gyazo | gyazo/image.py | ImageList.set_attributes_from_headers | def set_attributes_from_headers(self, headers):
"""Set instance attributes with HTTP header
:param headers: HTTP header
"""
self.total_count = headers.get('x-total-count', None)
self.current_page = headers.get('x-current-page', None)
self.per_page = headers.get('x-per-page', None)
self.user_type = headers.get('x-user-type', None)
if self.total_count:
self.total_count = int(self.total_count)
if self.current_page:
self.current_page = int(self.current_page)
if self.per_page:
self.per_page = int(self.per_page) | python | def set_attributes_from_headers(self, headers):
"""Set instance attributes with HTTP header
:param headers: HTTP header
"""
self.total_count = headers.get('x-total-count', None)
self.current_page = headers.get('x-current-page', None)
self.per_page = headers.get('x-per-page', None)
self.user_type = headers.get('x-user-type', None)
if self.total_count:
self.total_count = int(self.total_count)
if self.current_page:
self.current_page = int(self.current_page)
if self.per_page:
self.per_page = int(self.per_page) | [
"def",
"set_attributes_from_headers",
"(",
"self",
",",
"headers",
")",
":",
"self",
".",
"total_count",
"=",
"headers",
".",
"get",
"(",
"'x-total-count'",
",",
"None",
")",
"self",
".",
"current_page",
"=",
"headers",
".",
"get",
"(",
"'x-current-page'",
",",
"None",
")",
"self",
".",
"per_page",
"=",
"headers",
".",
"get",
"(",
"'x-per-page'",
",",
"None",
")",
"self",
".",
"user_type",
"=",
"headers",
".",
"get",
"(",
"'x-user-type'",
",",
"None",
")",
"if",
"self",
".",
"total_count",
":",
"self",
".",
"total_count",
"=",
"int",
"(",
"self",
".",
"total_count",
")",
"if",
"self",
".",
"current_page",
":",
"self",
".",
"current_page",
"=",
"int",
"(",
"self",
".",
"current_page",
")",
"if",
"self",
".",
"per_page",
":",
"self",
".",
"per_page",
"=",
"int",
"(",
"self",
".",
"per_page",
")"
]
| Set instance attributes with HTTP header
:param headers: HTTP header | [
"Set",
"instance",
"attributes",
"with",
"HTTP",
"header"
]
| 52893118899ed308ff75245b55f73d745c98ed1d | https://github.com/ymyzk/python-gyazo/blob/52893118899ed308ff75245b55f73d745c98ed1d/gyazo/image.py#L212-L227 | train |
Xion/taipan | taipan/objective/base.py | ObjectMetaclass._validate_base_classes | def _validate_base_classes(meta, bases):
"""Validate the base classes of the new class to be created,
making sure none of them are ``@final``.
"""
for base in bases:
if meta._is_final(base):
raise ClassError(
"cannot inherit from @final class %s" % (base.__name__,)) | python | def _validate_base_classes(meta, bases):
"""Validate the base classes of the new class to be created,
making sure none of them are ``@final``.
"""
for base in bases:
if meta._is_final(base):
raise ClassError(
"cannot inherit from @final class %s" % (base.__name__,)) | [
"def",
"_validate_base_classes",
"(",
"meta",
",",
"bases",
")",
":",
"for",
"base",
"in",
"bases",
":",
"if",
"meta",
".",
"_is_final",
"(",
"base",
")",
":",
"raise",
"ClassError",
"(",
"\"cannot inherit from @final class %s\"",
"%",
"(",
"base",
".",
"__name__",
",",
")",
")"
]
| Validate the base classes of the new class to be created,
making sure none of them are ``@final``. | [
"Validate",
"the",
"base",
"classes",
"of",
"the",
"new",
"class",
"to",
"be",
"created",
"making",
"sure",
"none",
"of",
"them",
"are"
]
| f333f0287c8bd0915182c7d5308e5f05ef0cca78 | https://github.com/Xion/taipan/blob/f333f0287c8bd0915182c7d5308e5f05ef0cca78/taipan/objective/base.py#L50-L57 | train |
Xion/taipan | taipan/objective/base.py | ObjectMetaclass._validate_method_decoration | def _validate_method_decoration(meta, class_):
"""Validate the usage of ``@override`` and ``@final`` modifiers
on methods of the given ``class_``.
"""
# TODO(xion): employ some code inspection tricks to serve ClassErrors
# as if they were thrown at the offending class's/method's definition
super_mro = class_.__mro__[1:]
own_methods = ((name, member)
for name, member in class_.__dict__.items()
if is_method(member))
# check that ``@override`` modifier is present where it should be
# and absent where it shouldn't (e.g. ``@final`` methods)
for name, method in own_methods:
shadowed_method, base_class = next(
((getattr(base, name), base)
for base in super_mro if hasattr(base, name)),
(None, None)
)
if meta._is_override(method):
# ``@override`` is legal only when the method actually shadows
# a method from a superclass, and that metod is not ``@final``
if not shadowed_method:
raise ClassError("unnecessary @override on %s.%s" % (
class_.__name__, name), class_=class_)
if meta._is_final(shadowed_method):
raise ClassError(
"illegal @override on a @final method %s.%s" % (
base_class.__name__, name), class_=class_)
# if @override had parameter supplied, verify if it was
# the same class as the base of shadowed method
override_base = meta._get_override_base(method)
if override_base and base_class is not override_base:
if is_class(override_base):
raise ClassError(
"incorrect override base: expected %s, got %s" % (
base_class.__name__, override_base.__name__))
else:
raise ClassError(
"invalid override base specified: %s" % (
override_base,))
setattr(class_, name, method.method)
else:
if shadowed_method and name not in meta.OVERRIDE_EXEMPTIONS:
if meta._is_final(shadowed_method):
msg = "%s.%s is hiding a @final method %s.%s" % (
class_.__name__, name, base_class.__name__, name)
else:
msg = ("overridden method %s.%s "
"must be marked with @override" % (
class_.__name__, name))
raise ClassError(msg, class_=class_) | python | def _validate_method_decoration(meta, class_):
"""Validate the usage of ``@override`` and ``@final`` modifiers
on methods of the given ``class_``.
"""
# TODO(xion): employ some code inspection tricks to serve ClassErrors
# as if they were thrown at the offending class's/method's definition
super_mro = class_.__mro__[1:]
own_methods = ((name, member)
for name, member in class_.__dict__.items()
if is_method(member))
# check that ``@override`` modifier is present where it should be
# and absent where it shouldn't (e.g. ``@final`` methods)
for name, method in own_methods:
shadowed_method, base_class = next(
((getattr(base, name), base)
for base in super_mro if hasattr(base, name)),
(None, None)
)
if meta._is_override(method):
# ``@override`` is legal only when the method actually shadows
# a method from a superclass, and that metod is not ``@final``
if not shadowed_method:
raise ClassError("unnecessary @override on %s.%s" % (
class_.__name__, name), class_=class_)
if meta._is_final(shadowed_method):
raise ClassError(
"illegal @override on a @final method %s.%s" % (
base_class.__name__, name), class_=class_)
# if @override had parameter supplied, verify if it was
# the same class as the base of shadowed method
override_base = meta._get_override_base(method)
if override_base and base_class is not override_base:
if is_class(override_base):
raise ClassError(
"incorrect override base: expected %s, got %s" % (
base_class.__name__, override_base.__name__))
else:
raise ClassError(
"invalid override base specified: %s" % (
override_base,))
setattr(class_, name, method.method)
else:
if shadowed_method and name not in meta.OVERRIDE_EXEMPTIONS:
if meta._is_final(shadowed_method):
msg = "%s.%s is hiding a @final method %s.%s" % (
class_.__name__, name, base_class.__name__, name)
else:
msg = ("overridden method %s.%s "
"must be marked with @override" % (
class_.__name__, name))
raise ClassError(msg, class_=class_) | [
"def",
"_validate_method_decoration",
"(",
"meta",
",",
"class_",
")",
":",
"# TODO(xion): employ some code inspection tricks to serve ClassErrors",
"# as if they were thrown at the offending class's/method's definition",
"super_mro",
"=",
"class_",
".",
"__mro__",
"[",
"1",
":",
"]",
"own_methods",
"=",
"(",
"(",
"name",
",",
"member",
")",
"for",
"name",
",",
"member",
"in",
"class_",
".",
"__dict__",
".",
"items",
"(",
")",
"if",
"is_method",
"(",
"member",
")",
")",
"# check that ``@override`` modifier is present where it should be",
"# and absent where it shouldn't (e.g. ``@final`` methods)",
"for",
"name",
",",
"method",
"in",
"own_methods",
":",
"shadowed_method",
",",
"base_class",
"=",
"next",
"(",
"(",
"(",
"getattr",
"(",
"base",
",",
"name",
")",
",",
"base",
")",
"for",
"base",
"in",
"super_mro",
"if",
"hasattr",
"(",
"base",
",",
"name",
")",
")",
",",
"(",
"None",
",",
"None",
")",
")",
"if",
"meta",
".",
"_is_override",
"(",
"method",
")",
":",
"# ``@override`` is legal only when the method actually shadows",
"# a method from a superclass, and that metod is not ``@final``",
"if",
"not",
"shadowed_method",
":",
"raise",
"ClassError",
"(",
"\"unnecessary @override on %s.%s\"",
"%",
"(",
"class_",
".",
"__name__",
",",
"name",
")",
",",
"class_",
"=",
"class_",
")",
"if",
"meta",
".",
"_is_final",
"(",
"shadowed_method",
")",
":",
"raise",
"ClassError",
"(",
"\"illegal @override on a @final method %s.%s\"",
"%",
"(",
"base_class",
".",
"__name__",
",",
"name",
")",
",",
"class_",
"=",
"class_",
")",
"# if @override had parameter supplied, verify if it was",
"# the same class as the base of shadowed method",
"override_base",
"=",
"meta",
".",
"_get_override_base",
"(",
"method",
")",
"if",
"override_base",
"and",
"base_class",
"is",
"not",
"override_base",
":",
"if",
"is_class",
"(",
"override_base",
")",
":",
"raise",
"ClassError",
"(",
"\"incorrect override base: expected %s, got %s\"",
"%",
"(",
"base_class",
".",
"__name__",
",",
"override_base",
".",
"__name__",
")",
")",
"else",
":",
"raise",
"ClassError",
"(",
"\"invalid override base specified: %s\"",
"%",
"(",
"override_base",
",",
")",
")",
"setattr",
"(",
"class_",
",",
"name",
",",
"method",
".",
"method",
")",
"else",
":",
"if",
"shadowed_method",
"and",
"name",
"not",
"in",
"meta",
".",
"OVERRIDE_EXEMPTIONS",
":",
"if",
"meta",
".",
"_is_final",
"(",
"shadowed_method",
")",
":",
"msg",
"=",
"\"%s.%s is hiding a @final method %s.%s\"",
"%",
"(",
"class_",
".",
"__name__",
",",
"name",
",",
"base_class",
".",
"__name__",
",",
"name",
")",
"else",
":",
"msg",
"=",
"(",
"\"overridden method %s.%s \"",
"\"must be marked with @override\"",
"%",
"(",
"class_",
".",
"__name__",
",",
"name",
")",
")",
"raise",
"ClassError",
"(",
"msg",
",",
"class_",
"=",
"class_",
")"
]
| Validate the usage of ``@override`` and ``@final`` modifiers
on methods of the given ``class_``. | [
"Validate",
"the",
"usage",
"of"
]
| f333f0287c8bd0915182c7d5308e5f05ef0cca78 | https://github.com/Xion/taipan/blob/f333f0287c8bd0915182c7d5308e5f05ef0cca78/taipan/objective/base.py#L60-L114 | train |
Xion/taipan | taipan/objective/base.py | ObjectMetaclass._is_final | def _is_final(meta, arg):
"""Checks whether given class or method has been marked
with the ``@final`` decorator.
"""
if inspect.isclass(arg) and not isinstance(arg, ObjectMetaclass):
return False # of classes, only subclasses of Object can be final
# account for method wrappers, such as the one introduced by @override
from taipan.objective.modifiers import _WrappedMethod
if isinstance(arg, _WrappedMethod):
arg = arg.method
return getattr(arg, '__final__', False) | python | def _is_final(meta, arg):
"""Checks whether given class or method has been marked
with the ``@final`` decorator.
"""
if inspect.isclass(arg) and not isinstance(arg, ObjectMetaclass):
return False # of classes, only subclasses of Object can be final
# account for method wrappers, such as the one introduced by @override
from taipan.objective.modifiers import _WrappedMethod
if isinstance(arg, _WrappedMethod):
arg = arg.method
return getattr(arg, '__final__', False) | [
"def",
"_is_final",
"(",
"meta",
",",
"arg",
")",
":",
"if",
"inspect",
".",
"isclass",
"(",
"arg",
")",
"and",
"not",
"isinstance",
"(",
"arg",
",",
"ObjectMetaclass",
")",
":",
"return",
"False",
"# of classes, only subclasses of Object can be final",
"# account for method wrappers, such as the one introduced by @override",
"from",
"taipan",
".",
"objective",
".",
"modifiers",
"import",
"_WrappedMethod",
"if",
"isinstance",
"(",
"arg",
",",
"_WrappedMethod",
")",
":",
"arg",
"=",
"arg",
".",
"method",
"return",
"getattr",
"(",
"arg",
",",
"'__final__'",
",",
"False",
")"
]
| Checks whether given class or method has been marked
with the ``@final`` decorator. | [
"Checks",
"whether",
"given",
"class",
"or",
"method",
"has",
"been",
"marked",
"with",
"the"
]
| f333f0287c8bd0915182c7d5308e5f05ef0cca78 | https://github.com/Xion/taipan/blob/f333f0287c8bd0915182c7d5308e5f05ef0cca78/taipan/objective/base.py#L117-L129 | train |
Xion/taipan | taipan/objective/base.py | ObjectMetaclass._is_override | def _is_override(meta, method):
"""Checks whether given class or instance method has been marked
with the ``@override`` decorator.
"""
from taipan.objective.modifiers import _OverriddenMethod
return isinstance(method, _OverriddenMethod) | python | def _is_override(meta, method):
"""Checks whether given class or instance method has been marked
with the ``@override`` decorator.
"""
from taipan.objective.modifiers import _OverriddenMethod
return isinstance(method, _OverriddenMethod) | [
"def",
"_is_override",
"(",
"meta",
",",
"method",
")",
":",
"from",
"taipan",
".",
"objective",
".",
"modifiers",
"import",
"_OverriddenMethod",
"return",
"isinstance",
"(",
"method",
",",
"_OverriddenMethod",
")"
]
| Checks whether given class or instance method has been marked
with the ``@override`` decorator. | [
"Checks",
"whether",
"given",
"class",
"or",
"instance",
"method",
"has",
"been",
"marked",
"with",
"the"
]
| f333f0287c8bd0915182c7d5308e5f05ef0cca78 | https://github.com/Xion/taipan/blob/f333f0287c8bd0915182c7d5308e5f05ef0cca78/taipan/objective/base.py#L132-L137 | train |
fangpenlin/gluttony | gluttony/dependency.py | trace_dependencies | def trace_dependencies(req, requirement_set, dependencies, _visited=None):
"""Trace all dependency relationship
@param req: requirements to trace
@param requirement_set: RequirementSet
@param dependencies: list for storing dependencies relationships
@param _visited: visited requirement set
"""
_visited = _visited or set()
if req in _visited:
return
_visited.add(req)
for reqName in req.requirements():
try:
name = pkg_resources.Requirement.parse(reqName).project_name
except ValueError, e:
logger.error('Invalid requirement: %r (%s) in requirement %s' % (
reqName, e, req))
continue
subreq = requirement_set.get_requirement(name)
dependencies.append((req, subreq))
trace_dependencies(subreq, requirement_set, dependencies, _visited) | python | def trace_dependencies(req, requirement_set, dependencies, _visited=None):
"""Trace all dependency relationship
@param req: requirements to trace
@param requirement_set: RequirementSet
@param dependencies: list for storing dependencies relationships
@param _visited: visited requirement set
"""
_visited = _visited or set()
if req in _visited:
return
_visited.add(req)
for reqName in req.requirements():
try:
name = pkg_resources.Requirement.parse(reqName).project_name
except ValueError, e:
logger.error('Invalid requirement: %r (%s) in requirement %s' % (
reqName, e, req))
continue
subreq = requirement_set.get_requirement(name)
dependencies.append((req, subreq))
trace_dependencies(subreq, requirement_set, dependencies, _visited) | [
"def",
"trace_dependencies",
"(",
"req",
",",
"requirement_set",
",",
"dependencies",
",",
"_visited",
"=",
"None",
")",
":",
"_visited",
"=",
"_visited",
"or",
"set",
"(",
")",
"if",
"req",
"in",
"_visited",
":",
"return",
"_visited",
".",
"add",
"(",
"req",
")",
"for",
"reqName",
"in",
"req",
".",
"requirements",
"(",
")",
":",
"try",
":",
"name",
"=",
"pkg_resources",
".",
"Requirement",
".",
"parse",
"(",
"reqName",
")",
".",
"project_name",
"except",
"ValueError",
",",
"e",
":",
"logger",
".",
"error",
"(",
"'Invalid requirement: %r (%s) in requirement %s'",
"%",
"(",
"reqName",
",",
"e",
",",
"req",
")",
")",
"continue",
"subreq",
"=",
"requirement_set",
".",
"get_requirement",
"(",
"name",
")",
"dependencies",
".",
"append",
"(",
"(",
"req",
",",
"subreq",
")",
")",
"trace_dependencies",
"(",
"subreq",
",",
"requirement_set",
",",
"dependencies",
",",
"_visited",
")"
]
| Trace all dependency relationship
@param req: requirements to trace
@param requirement_set: RequirementSet
@param dependencies: list for storing dependencies relationships
@param _visited: visited requirement set | [
"Trace",
"all",
"dependency",
"relationship"
]
| 86c24c7555dbc8de073aee66edb07a030f77275e | https://github.com/fangpenlin/gluttony/blob/86c24c7555dbc8de073aee66edb07a030f77275e/gluttony/dependency.py#L7-L28 | train |
jmbeach/KEP.py | src/keppy/register.py | pad_zeroes | def pad_zeroes(addr, n_zeroes):
"""Padds the address with zeroes"""
if len(addr) < n_zeroes:
return pad_zeroes("0" + addr, n_zeroes)
return addr | python | def pad_zeroes(addr, n_zeroes):
"""Padds the address with zeroes"""
if len(addr) < n_zeroes:
return pad_zeroes("0" + addr, n_zeroes)
return addr | [
"def",
"pad_zeroes",
"(",
"addr",
",",
"n_zeroes",
")",
":",
"if",
"len",
"(",
"addr",
")",
"<",
"n_zeroes",
":",
"return",
"pad_zeroes",
"(",
"\"0\"",
"+",
"addr",
",",
"n_zeroes",
")",
"return",
"addr"
]
| Padds the address with zeroes | [
"Padds",
"the",
"address",
"with",
"zeroes"
]
| 68cda64ab649640a486534867c81274c41e39446 | https://github.com/jmbeach/KEP.py/blob/68cda64ab649640a486534867c81274c41e39446/src/keppy/register.py#L2-L6 | train |
jmbeach/KEP.py | src/keppy/register.py | next_addr | def next_addr(addr, i):
"""Gets address after the current + i"""
str_addr = pad_zeroes(str(int_addr(addr) + i), len(addr[1:]))
return addr[0] + str_addr | python | def next_addr(addr, i):
"""Gets address after the current + i"""
str_addr = pad_zeroes(str(int_addr(addr) + i), len(addr[1:]))
return addr[0] + str_addr | [
"def",
"next_addr",
"(",
"addr",
",",
"i",
")",
":",
"str_addr",
"=",
"pad_zeroes",
"(",
"str",
"(",
"int_addr",
"(",
"addr",
")",
"+",
"i",
")",
",",
"len",
"(",
"addr",
"[",
"1",
":",
"]",
")",
")",
"return",
"addr",
"[",
"0",
"]",
"+",
"str_addr"
]
| Gets address after the current + i | [
"Gets",
"address",
"after",
"the",
"current",
"+",
"i"
]
| 68cda64ab649640a486534867c81274c41e39446 | https://github.com/jmbeach/KEP.py/blob/68cda64ab649640a486534867c81274c41e39446/src/keppy/register.py#L12-L15 | train |
jmbeach/KEP.py | src/keppy/register.py | Register.mark_address | def mark_address(self, addr, size):
"""Marks address as being used in simulator"""
i = 0
while i < size:
self._register_map[addr] = True
i += 1 | python | def mark_address(self, addr, size):
"""Marks address as being used in simulator"""
i = 0
while i < size:
self._register_map[addr] = True
i += 1 | [
"def",
"mark_address",
"(",
"self",
",",
"addr",
",",
"size",
")",
":",
"i",
"=",
"0",
"while",
"i",
"<",
"size",
":",
"self",
".",
"_register_map",
"[",
"addr",
"]",
"=",
"True",
"i",
"+=",
"1"
]
| Marks address as being used in simulator | [
"Marks",
"address",
"as",
"being",
"used",
"in",
"simulator"
]
| 68cda64ab649640a486534867c81274c41e39446 | https://github.com/jmbeach/KEP.py/blob/68cda64ab649640a486534867c81274c41e39446/src/keppy/register.py#L53-L58 | train |
jmbeach/KEP.py | src/keppy/register.py | Register.next_address_avoid_collision | def next_address_avoid_collision(self, start_addr):
"""Finds the next address recursively which does not collide with any other address"""
i = 1
while self.is_address_in_use(next_addr(start_addr, i)):
i += 1
return next_addr(start_addr, i) | python | def next_address_avoid_collision(self, start_addr):
"""Finds the next address recursively which does not collide with any other address"""
i = 1
while self.is_address_in_use(next_addr(start_addr, i)):
i += 1
return next_addr(start_addr, i) | [
"def",
"next_address_avoid_collision",
"(",
"self",
",",
"start_addr",
")",
":",
"i",
"=",
"1",
"while",
"self",
".",
"is_address_in_use",
"(",
"next_addr",
"(",
"start_addr",
",",
"i",
")",
")",
":",
"i",
"+=",
"1",
"return",
"next_addr",
"(",
"start_addr",
",",
"i",
")"
]
| Finds the next address recursively which does not collide with any other address | [
"Finds",
"the",
"next",
"address",
"recursively",
"which",
"does",
"not",
"collide",
"with",
"any",
"other",
"address"
]
| 68cda64ab649640a486534867c81274c41e39446 | https://github.com/jmbeach/KEP.py/blob/68cda64ab649640a486534867c81274c41e39446/src/keppy/register.py#L64-L69 | train |
jmbeach/KEP.py | src/keppy/register.py | Register.move_to_next_bit_address | def move_to_next_bit_address(self):
"""Moves to next available bit address position"""
self._current_bit_address = self.next_bit_address()
self.mark_address(self._current_bit_address.split('.')[0], self._size_of_current_register_address) | python | def move_to_next_bit_address(self):
"""Moves to next available bit address position"""
self._current_bit_address = self.next_bit_address()
self.mark_address(self._current_bit_address.split('.')[0], self._size_of_current_register_address) | [
"def",
"move_to_next_bit_address",
"(",
"self",
")",
":",
"self",
".",
"_current_bit_address",
"=",
"self",
".",
"next_bit_address",
"(",
")",
"self",
".",
"mark_address",
"(",
"self",
".",
"_current_bit_address",
".",
"split",
"(",
"'.'",
")",
"[",
"0",
"]",
",",
"self",
".",
"_size_of_current_register_address",
")"
]
| Moves to next available bit address position | [
"Moves",
"to",
"next",
"available",
"bit",
"address",
"position"
]
| 68cda64ab649640a486534867c81274c41e39446 | https://github.com/jmbeach/KEP.py/blob/68cda64ab649640a486534867c81274c41e39446/src/keppy/register.py#L82-L85 | train |
jmbeach/KEP.py | src/keppy/register.py | Register.next_bit_address | def next_bit_address(self):
"""Gets the next boolean address"""
if self._current_bit_address == "":
if self._is_16bit:
return "{0}.{1}".format(
self.next_address(),
"00")
return "{0}.{1}".format(
self.next_address(),
"0")
if self._is_16bit:
bool_half = int(self._current_bit_address.split(".")[1])
if bool_half < 4:
register_half = self._current_bit_address.split(".")[0]
return "{0}.{1}".format(
register_half,
pad_zeroes(str(bool_half + 1), 2))
self.move_to_next_address(self._size_of_current_register_address)
return "{0}.{1}".format(
self.next_address(),
"00")
bool_half = int(self._current_bit_address.split(".")[1])
if bool_half < 3:
register_half = self._current_bit_address.split(".")[0]
return "{0}.{1}".format(
register_half,
bool_half + 1)
self.move_to_next_address(self._size_of_current_register_address)
return "{0}.{1}".format(
self.next_address(),
"0") | python | def next_bit_address(self):
"""Gets the next boolean address"""
if self._current_bit_address == "":
if self._is_16bit:
return "{0}.{1}".format(
self.next_address(),
"00")
return "{0}.{1}".format(
self.next_address(),
"0")
if self._is_16bit:
bool_half = int(self._current_bit_address.split(".")[1])
if bool_half < 4:
register_half = self._current_bit_address.split(".")[0]
return "{0}.{1}".format(
register_half,
pad_zeroes(str(bool_half + 1), 2))
self.move_to_next_address(self._size_of_current_register_address)
return "{0}.{1}".format(
self.next_address(),
"00")
bool_half = int(self._current_bit_address.split(".")[1])
if bool_half < 3:
register_half = self._current_bit_address.split(".")[0]
return "{0}.{1}".format(
register_half,
bool_half + 1)
self.move_to_next_address(self._size_of_current_register_address)
return "{0}.{1}".format(
self.next_address(),
"0") | [
"def",
"next_bit_address",
"(",
"self",
")",
":",
"if",
"self",
".",
"_current_bit_address",
"==",
"\"\"",
":",
"if",
"self",
".",
"_is_16bit",
":",
"return",
"\"{0}.{1}\"",
".",
"format",
"(",
"self",
".",
"next_address",
"(",
")",
",",
"\"00\"",
")",
"return",
"\"{0}.{1}\"",
".",
"format",
"(",
"self",
".",
"next_address",
"(",
")",
",",
"\"0\"",
")",
"if",
"self",
".",
"_is_16bit",
":",
"bool_half",
"=",
"int",
"(",
"self",
".",
"_current_bit_address",
".",
"split",
"(",
"\".\"",
")",
"[",
"1",
"]",
")",
"if",
"bool_half",
"<",
"4",
":",
"register_half",
"=",
"self",
".",
"_current_bit_address",
".",
"split",
"(",
"\".\"",
")",
"[",
"0",
"]",
"return",
"\"{0}.{1}\"",
".",
"format",
"(",
"register_half",
",",
"pad_zeroes",
"(",
"str",
"(",
"bool_half",
"+",
"1",
")",
",",
"2",
")",
")",
"self",
".",
"move_to_next_address",
"(",
"self",
".",
"_size_of_current_register_address",
")",
"return",
"\"{0}.{1}\"",
".",
"format",
"(",
"self",
".",
"next_address",
"(",
")",
",",
"\"00\"",
")",
"bool_half",
"=",
"int",
"(",
"self",
".",
"_current_bit_address",
".",
"split",
"(",
"\".\"",
")",
"[",
"1",
"]",
")",
"if",
"bool_half",
"<",
"3",
":",
"register_half",
"=",
"self",
".",
"_current_bit_address",
".",
"split",
"(",
"\".\"",
")",
"[",
"0",
"]",
"return",
"\"{0}.{1}\"",
".",
"format",
"(",
"register_half",
",",
"bool_half",
"+",
"1",
")",
"self",
".",
"move_to_next_address",
"(",
"self",
".",
"_size_of_current_register_address",
")",
"return",
"\"{0}.{1}\"",
".",
"format",
"(",
"self",
".",
"next_address",
"(",
")",
",",
"\"0\"",
")"
]
| Gets the next boolean address | [
"Gets",
"the",
"next",
"boolean",
"address"
]
| 68cda64ab649640a486534867c81274c41e39446 | https://github.com/jmbeach/KEP.py/blob/68cda64ab649640a486534867c81274c41e39446/src/keppy/register.py#L91-L121 | train |
The-Politico/politico-civic-election-night | electionnight/management/commands/bootstrap_electionnight_content.py | Command.route_election | def route_election(self, election):
"""
Legislative or executive office?
"""
if (
election.election_type.slug == ElectionType.GENERAL
or ElectionType.GENERAL_RUNOFF
):
self.bootstrap_general_election(election)
elif election.race.special:
self.bootstrap_special_election(election)
if election.race.office.is_executive:
self.bootstrap_executive_office(election)
else:
self.bootstrap_legislative_office(election) | python | def route_election(self, election):
"""
Legislative or executive office?
"""
if (
election.election_type.slug == ElectionType.GENERAL
or ElectionType.GENERAL_RUNOFF
):
self.bootstrap_general_election(election)
elif election.race.special:
self.bootstrap_special_election(election)
if election.race.office.is_executive:
self.bootstrap_executive_office(election)
else:
self.bootstrap_legislative_office(election) | [
"def",
"route_election",
"(",
"self",
",",
"election",
")",
":",
"if",
"(",
"election",
".",
"election_type",
".",
"slug",
"==",
"ElectionType",
".",
"GENERAL",
"or",
"ElectionType",
".",
"GENERAL_RUNOFF",
")",
":",
"self",
".",
"bootstrap_general_election",
"(",
"election",
")",
"elif",
"election",
".",
"race",
".",
"special",
":",
"self",
".",
"bootstrap_special_election",
"(",
"election",
")",
"if",
"election",
".",
"race",
".",
"office",
".",
"is_executive",
":",
"self",
".",
"bootstrap_executive_office",
"(",
"election",
")",
"else",
":",
"self",
".",
"bootstrap_legislative_office",
"(",
"election",
")"
]
| Legislative or executive office? | [
"Legislative",
"or",
"executive",
"office?"
]
| a8aaf5be43872a7b84d2b0d7c2b6151d32d4d8b6 | https://github.com/The-Politico/politico-civic-election-night/blob/a8aaf5be43872a7b84d2b0d7c2b6151d32d4d8b6/electionnight/management/commands/bootstrap_electionnight_content.py#L23-L38 | train |
venthur/python-debianbts | debianbts/debianbts.py | get_status | def get_status(*nrs):
"""Returns a list of Bugreport objects.
Given a list of bugnumbers this method returns a list of Bugreport
objects.
Parameters
----------
nrs : int or list of ints
the bugnumbers
Returns
-------
bugs : list of Bugreport objects
"""
# If we called get_status with one single bug, we get a single bug,
# if we called it with a list of bugs, we get a list,
# No available bugreports returns an empty list
bugs = []
list_ = []
for nr in nrs:
if isinstance(nr, list):
list_.extend(nr)
else:
list_.append(nr)
# Process the input in batches to avoid hitting resource limits on the BTS
soap_client = _build_soap_client()
for i in range(0, len(list_), BATCH_SIZE):
slice_ = list_[i:i + BATCH_SIZE]
# I build body by hand, pysimplesoap doesn't generate soap Arrays
# without using wsdl
method_el = SimpleXMLElement('<get_status></get_status>')
_build_int_array_el('arg0', method_el, slice_)
reply = soap_client.call('get_status', method_el)
for bug_item_el in reply('s-gensym3').children() or []:
bug_el = bug_item_el.children()[1]
bugs.append(_parse_status(bug_el))
return bugs | python | def get_status(*nrs):
"""Returns a list of Bugreport objects.
Given a list of bugnumbers this method returns a list of Bugreport
objects.
Parameters
----------
nrs : int or list of ints
the bugnumbers
Returns
-------
bugs : list of Bugreport objects
"""
# If we called get_status with one single bug, we get a single bug,
# if we called it with a list of bugs, we get a list,
# No available bugreports returns an empty list
bugs = []
list_ = []
for nr in nrs:
if isinstance(nr, list):
list_.extend(nr)
else:
list_.append(nr)
# Process the input in batches to avoid hitting resource limits on the BTS
soap_client = _build_soap_client()
for i in range(0, len(list_), BATCH_SIZE):
slice_ = list_[i:i + BATCH_SIZE]
# I build body by hand, pysimplesoap doesn't generate soap Arrays
# without using wsdl
method_el = SimpleXMLElement('<get_status></get_status>')
_build_int_array_el('arg0', method_el, slice_)
reply = soap_client.call('get_status', method_el)
for bug_item_el in reply('s-gensym3').children() or []:
bug_el = bug_item_el.children()[1]
bugs.append(_parse_status(bug_el))
return bugs | [
"def",
"get_status",
"(",
"*",
"nrs",
")",
":",
"# If we called get_status with one single bug, we get a single bug,",
"# if we called it with a list of bugs, we get a list,",
"# No available bugreports returns an empty list",
"bugs",
"=",
"[",
"]",
"list_",
"=",
"[",
"]",
"for",
"nr",
"in",
"nrs",
":",
"if",
"isinstance",
"(",
"nr",
",",
"list",
")",
":",
"list_",
".",
"extend",
"(",
"nr",
")",
"else",
":",
"list_",
".",
"append",
"(",
"nr",
")",
"# Process the input in batches to avoid hitting resource limits on the BTS",
"soap_client",
"=",
"_build_soap_client",
"(",
")",
"for",
"i",
"in",
"range",
"(",
"0",
",",
"len",
"(",
"list_",
")",
",",
"BATCH_SIZE",
")",
":",
"slice_",
"=",
"list_",
"[",
"i",
":",
"i",
"+",
"BATCH_SIZE",
"]",
"# I build body by hand, pysimplesoap doesn't generate soap Arrays",
"# without using wsdl",
"method_el",
"=",
"SimpleXMLElement",
"(",
"'<get_status></get_status>'",
")",
"_build_int_array_el",
"(",
"'arg0'",
",",
"method_el",
",",
"slice_",
")",
"reply",
"=",
"soap_client",
".",
"call",
"(",
"'get_status'",
",",
"method_el",
")",
"for",
"bug_item_el",
"in",
"reply",
"(",
"'s-gensym3'",
")",
".",
"children",
"(",
")",
"or",
"[",
"]",
":",
"bug_el",
"=",
"bug_item_el",
".",
"children",
"(",
")",
"[",
"1",
"]",
"bugs",
".",
"append",
"(",
"_parse_status",
"(",
"bug_el",
")",
")",
"return",
"bugs"
]
| Returns a list of Bugreport objects.
Given a list of bugnumbers this method returns a list of Bugreport
objects.
Parameters
----------
nrs : int or list of ints
the bugnumbers
Returns
-------
bugs : list of Bugreport objects | [
"Returns",
"a",
"list",
"of",
"Bugreport",
"objects",
"."
]
| 72cf11ae3458a8544142e9f365aaafe25634dd4f | https://github.com/venthur/python-debianbts/blob/72cf11ae3458a8544142e9f365aaafe25634dd4f/debianbts/debianbts.py#L208-L246 | train |
venthur/python-debianbts | debianbts/debianbts.py | get_usertag | def get_usertag(email, *tags):
"""Get buglists by usertags.
Parameters
----------
email : str
tags : tuple of strings
If tags are given the dictionary is limited to the matching
tags, if no tags are given all available tags are returned.
Returns
-------
mapping : dict
a mapping of usertag -> buglist
"""
reply = _soap_client_call('get_usertag', email, *tags)
map_el = reply('s-gensym3')
mapping = {}
# element <s-gensys3> in response can have standard type
# xsi:type=apachens:Map (example, for email [email protected])
# OR no type, in this case keys are the names of child elements and
# the array is contained in the child elements
type_attr = map_el.attributes().get('xsi:type')
if type_attr and type_attr.value == 'apachens:Map':
for usertag_el in map_el.children() or []:
tag = _uc(str(usertag_el('key')))
buglist_el = usertag_el('value')
mapping[tag] = [int(bug) for bug in buglist_el.children() or []]
else:
for usertag_el in map_el.children() or []:
tag = _uc(usertag_el.get_name())
mapping[tag] = [int(bug) for bug in usertag_el.children() or []]
return mapping | python | def get_usertag(email, *tags):
"""Get buglists by usertags.
Parameters
----------
email : str
tags : tuple of strings
If tags are given the dictionary is limited to the matching
tags, if no tags are given all available tags are returned.
Returns
-------
mapping : dict
a mapping of usertag -> buglist
"""
reply = _soap_client_call('get_usertag', email, *tags)
map_el = reply('s-gensym3')
mapping = {}
# element <s-gensys3> in response can have standard type
# xsi:type=apachens:Map (example, for email [email protected])
# OR no type, in this case keys are the names of child elements and
# the array is contained in the child elements
type_attr = map_el.attributes().get('xsi:type')
if type_attr and type_attr.value == 'apachens:Map':
for usertag_el in map_el.children() or []:
tag = _uc(str(usertag_el('key')))
buglist_el = usertag_el('value')
mapping[tag] = [int(bug) for bug in buglist_el.children() or []]
else:
for usertag_el in map_el.children() or []:
tag = _uc(usertag_el.get_name())
mapping[tag] = [int(bug) for bug in usertag_el.children() or []]
return mapping | [
"def",
"get_usertag",
"(",
"email",
",",
"*",
"tags",
")",
":",
"reply",
"=",
"_soap_client_call",
"(",
"'get_usertag'",
",",
"email",
",",
"*",
"tags",
")",
"map_el",
"=",
"reply",
"(",
"'s-gensym3'",
")",
"mapping",
"=",
"{",
"}",
"# element <s-gensys3> in response can have standard type",
"# xsi:type=apachens:Map (example, for email [email protected])",
"# OR no type, in this case keys are the names of child elements and",
"# the array is contained in the child elements",
"type_attr",
"=",
"map_el",
".",
"attributes",
"(",
")",
".",
"get",
"(",
"'xsi:type'",
")",
"if",
"type_attr",
"and",
"type_attr",
".",
"value",
"==",
"'apachens:Map'",
":",
"for",
"usertag_el",
"in",
"map_el",
".",
"children",
"(",
")",
"or",
"[",
"]",
":",
"tag",
"=",
"_uc",
"(",
"str",
"(",
"usertag_el",
"(",
"'key'",
")",
")",
")",
"buglist_el",
"=",
"usertag_el",
"(",
"'value'",
")",
"mapping",
"[",
"tag",
"]",
"=",
"[",
"int",
"(",
"bug",
")",
"for",
"bug",
"in",
"buglist_el",
".",
"children",
"(",
")",
"or",
"[",
"]",
"]",
"else",
":",
"for",
"usertag_el",
"in",
"map_el",
".",
"children",
"(",
")",
"or",
"[",
"]",
":",
"tag",
"=",
"_uc",
"(",
"usertag_el",
".",
"get_name",
"(",
")",
")",
"mapping",
"[",
"tag",
"]",
"=",
"[",
"int",
"(",
"bug",
")",
"for",
"bug",
"in",
"usertag_el",
".",
"children",
"(",
")",
"or",
"[",
"]",
"]",
"return",
"mapping"
]
| Get buglists by usertags.
Parameters
----------
email : str
tags : tuple of strings
If tags are given the dictionary is limited to the matching
tags, if no tags are given all available tags are returned.
Returns
-------
mapping : dict
a mapping of usertag -> buglist | [
"Get",
"buglists",
"by",
"usertags",
"."
]
| 72cf11ae3458a8544142e9f365aaafe25634dd4f | https://github.com/venthur/python-debianbts/blob/72cf11ae3458a8544142e9f365aaafe25634dd4f/debianbts/debianbts.py#L249-L282 | train |
venthur/python-debianbts | debianbts/debianbts.py | get_bug_log | def get_bug_log(nr):
"""Get Buglogs.
A buglog is a dictionary with the following mappings:
* "header" => string
* "body" => string
* "attachments" => list
* "msg_num" => int
* "message" => email.message.Message
Parameters
----------
nr : int
the bugnumber
Returns
-------
buglogs : list of dicts
"""
reply = _soap_client_call('get_bug_log', nr)
items_el = reply('soapenc:Array')
buglogs = []
for buglog_el in items_el.children():
buglog = {}
buglog["header"] = _parse_string_el(buglog_el("header"))
buglog["body"] = _parse_string_el(buglog_el("body"))
buglog["msg_num"] = int(buglog_el("msg_num"))
# server always returns an empty attachments array ?
buglog["attachments"] = []
mail_parser = email.feedparser.FeedParser()
mail_parser.feed(buglog["header"])
mail_parser.feed("\n\n")
mail_parser.feed(buglog["body"])
buglog["message"] = mail_parser.close()
buglogs.append(buglog)
return buglogs | python | def get_bug_log(nr):
"""Get Buglogs.
A buglog is a dictionary with the following mappings:
* "header" => string
* "body" => string
* "attachments" => list
* "msg_num" => int
* "message" => email.message.Message
Parameters
----------
nr : int
the bugnumber
Returns
-------
buglogs : list of dicts
"""
reply = _soap_client_call('get_bug_log', nr)
items_el = reply('soapenc:Array')
buglogs = []
for buglog_el in items_el.children():
buglog = {}
buglog["header"] = _parse_string_el(buglog_el("header"))
buglog["body"] = _parse_string_el(buglog_el("body"))
buglog["msg_num"] = int(buglog_el("msg_num"))
# server always returns an empty attachments array ?
buglog["attachments"] = []
mail_parser = email.feedparser.FeedParser()
mail_parser.feed(buglog["header"])
mail_parser.feed("\n\n")
mail_parser.feed(buglog["body"])
buglog["message"] = mail_parser.close()
buglogs.append(buglog)
return buglogs | [
"def",
"get_bug_log",
"(",
"nr",
")",
":",
"reply",
"=",
"_soap_client_call",
"(",
"'get_bug_log'",
",",
"nr",
")",
"items_el",
"=",
"reply",
"(",
"'soapenc:Array'",
")",
"buglogs",
"=",
"[",
"]",
"for",
"buglog_el",
"in",
"items_el",
".",
"children",
"(",
")",
":",
"buglog",
"=",
"{",
"}",
"buglog",
"[",
"\"header\"",
"]",
"=",
"_parse_string_el",
"(",
"buglog_el",
"(",
"\"header\"",
")",
")",
"buglog",
"[",
"\"body\"",
"]",
"=",
"_parse_string_el",
"(",
"buglog_el",
"(",
"\"body\"",
")",
")",
"buglog",
"[",
"\"msg_num\"",
"]",
"=",
"int",
"(",
"buglog_el",
"(",
"\"msg_num\"",
")",
")",
"# server always returns an empty attachments array ?",
"buglog",
"[",
"\"attachments\"",
"]",
"=",
"[",
"]",
"mail_parser",
"=",
"email",
".",
"feedparser",
".",
"FeedParser",
"(",
")",
"mail_parser",
".",
"feed",
"(",
"buglog",
"[",
"\"header\"",
"]",
")",
"mail_parser",
".",
"feed",
"(",
"\"\\n\\n\"",
")",
"mail_parser",
".",
"feed",
"(",
"buglog",
"[",
"\"body\"",
"]",
")",
"buglog",
"[",
"\"message\"",
"]",
"=",
"mail_parser",
".",
"close",
"(",
")",
"buglogs",
".",
"append",
"(",
"buglog",
")",
"return",
"buglogs"
]
| Get Buglogs.
A buglog is a dictionary with the following mappings:
* "header" => string
* "body" => string
* "attachments" => list
* "msg_num" => int
* "message" => email.message.Message
Parameters
----------
nr : int
the bugnumber
Returns
-------
buglogs : list of dicts | [
"Get",
"Buglogs",
"."
]
| 72cf11ae3458a8544142e9f365aaafe25634dd4f | https://github.com/venthur/python-debianbts/blob/72cf11ae3458a8544142e9f365aaafe25634dd4f/debianbts/debianbts.py#L285-L323 | train |
venthur/python-debianbts | debianbts/debianbts.py | newest_bugs | def newest_bugs(amount):
"""Returns the newest bugs.
This method can be used to query the BTS for the n newest bugs.
Parameters
----------
amount : int
the number of desired bugs. E.g. if `amount` is 10 the method
will return the 10 latest bugs.
Returns
-------
bugs : list of int
the bugnumbers
"""
reply = _soap_client_call('newest_bugs', amount)
items_el = reply('soapenc:Array')
return [int(item_el) for item_el in items_el.children() or []] | python | def newest_bugs(amount):
"""Returns the newest bugs.
This method can be used to query the BTS for the n newest bugs.
Parameters
----------
amount : int
the number of desired bugs. E.g. if `amount` is 10 the method
will return the 10 latest bugs.
Returns
-------
bugs : list of int
the bugnumbers
"""
reply = _soap_client_call('newest_bugs', amount)
items_el = reply('soapenc:Array')
return [int(item_el) for item_el in items_el.children() or []] | [
"def",
"newest_bugs",
"(",
"amount",
")",
":",
"reply",
"=",
"_soap_client_call",
"(",
"'newest_bugs'",
",",
"amount",
")",
"items_el",
"=",
"reply",
"(",
"'soapenc:Array'",
")",
"return",
"[",
"int",
"(",
"item_el",
")",
"for",
"item_el",
"in",
"items_el",
".",
"children",
"(",
")",
"or",
"[",
"]",
"]"
]
| Returns the newest bugs.
This method can be used to query the BTS for the n newest bugs.
Parameters
----------
amount : int
the number of desired bugs. E.g. if `amount` is 10 the method
will return the 10 latest bugs.
Returns
-------
bugs : list of int
the bugnumbers | [
"Returns",
"the",
"newest",
"bugs",
"."
]
| 72cf11ae3458a8544142e9f365aaafe25634dd4f | https://github.com/venthur/python-debianbts/blob/72cf11ae3458a8544142e9f365aaafe25634dd4f/debianbts/debianbts.py#L326-L345 | train |
venthur/python-debianbts | debianbts/debianbts.py | get_bugs | def get_bugs(*key_value):
"""Get list of bugs matching certain criteria.
The conditions are defined by key value pairs.
Possible keys are:
* "package": bugs for the given package
* "submitter": bugs from the submitter
* "maint": bugs belonging to a maintainer
* "src": bugs belonging to a source package
* "severity": bugs with a certain severity
* "status": can be either "done", "forwarded", or "open"
* "tag": see http://www.debian.org/Bugs/Developer#tags for
available tags
* "owner": bugs which are assigned to `owner`
* "bugs": takes single int or list of bugnumbers, filters the list
according to given criteria
* "correspondent": bugs where `correspondent` has sent a mail to
Arguments
---------
key_value : str
Returns
-------
bugs : list of ints
the bugnumbers
Examples
--------
>>> get_bugs('package', 'gtk-qt-engine', 'severity', 'normal')
[12345, 23456]
"""
# previous versions also accepted
# get_bugs(['package', 'gtk-qt-engine', 'severity', 'normal'])
# if key_value is a list in a one elemented tuple, remove the
# wrapping list
if len(key_value) == 1 and isinstance(key_value[0], list):
key_value = tuple(key_value[0])
# pysimplesoap doesn't generate soap Arrays without using wsdl
# I build body by hand, converting list to array and using standard
# pysimplesoap marshalling for other types
method_el = SimpleXMLElement('<get_bugs></get_bugs>')
for arg_n, kv in enumerate(key_value):
arg_name = 'arg' + str(arg_n)
if isinstance(kv, (list, tuple)):
_build_int_array_el(arg_name, method_el, kv)
else:
method_el.marshall(arg_name, kv)
soap_client = _build_soap_client()
reply = soap_client.call('get_bugs', method_el)
items_el = reply('soapenc:Array')
return [int(item_el) for item_el in items_el.children() or []] | python | def get_bugs(*key_value):
"""Get list of bugs matching certain criteria.
The conditions are defined by key value pairs.
Possible keys are:
* "package": bugs for the given package
* "submitter": bugs from the submitter
* "maint": bugs belonging to a maintainer
* "src": bugs belonging to a source package
* "severity": bugs with a certain severity
* "status": can be either "done", "forwarded", or "open"
* "tag": see http://www.debian.org/Bugs/Developer#tags for
available tags
* "owner": bugs which are assigned to `owner`
* "bugs": takes single int or list of bugnumbers, filters the list
according to given criteria
* "correspondent": bugs where `correspondent` has sent a mail to
Arguments
---------
key_value : str
Returns
-------
bugs : list of ints
the bugnumbers
Examples
--------
>>> get_bugs('package', 'gtk-qt-engine', 'severity', 'normal')
[12345, 23456]
"""
# previous versions also accepted
# get_bugs(['package', 'gtk-qt-engine', 'severity', 'normal'])
# if key_value is a list in a one elemented tuple, remove the
# wrapping list
if len(key_value) == 1 and isinstance(key_value[0], list):
key_value = tuple(key_value[0])
# pysimplesoap doesn't generate soap Arrays without using wsdl
# I build body by hand, converting list to array and using standard
# pysimplesoap marshalling for other types
method_el = SimpleXMLElement('<get_bugs></get_bugs>')
for arg_n, kv in enumerate(key_value):
arg_name = 'arg' + str(arg_n)
if isinstance(kv, (list, tuple)):
_build_int_array_el(arg_name, method_el, kv)
else:
method_el.marshall(arg_name, kv)
soap_client = _build_soap_client()
reply = soap_client.call('get_bugs', method_el)
items_el = reply('soapenc:Array')
return [int(item_el) for item_el in items_el.children() or []] | [
"def",
"get_bugs",
"(",
"*",
"key_value",
")",
":",
"# previous versions also accepted",
"# get_bugs(['package', 'gtk-qt-engine', 'severity', 'normal'])",
"# if key_value is a list in a one elemented tuple, remove the",
"# wrapping list",
"if",
"len",
"(",
"key_value",
")",
"==",
"1",
"and",
"isinstance",
"(",
"key_value",
"[",
"0",
"]",
",",
"list",
")",
":",
"key_value",
"=",
"tuple",
"(",
"key_value",
"[",
"0",
"]",
")",
"# pysimplesoap doesn't generate soap Arrays without using wsdl",
"# I build body by hand, converting list to array and using standard",
"# pysimplesoap marshalling for other types",
"method_el",
"=",
"SimpleXMLElement",
"(",
"'<get_bugs></get_bugs>'",
")",
"for",
"arg_n",
",",
"kv",
"in",
"enumerate",
"(",
"key_value",
")",
":",
"arg_name",
"=",
"'arg'",
"+",
"str",
"(",
"arg_n",
")",
"if",
"isinstance",
"(",
"kv",
",",
"(",
"list",
",",
"tuple",
")",
")",
":",
"_build_int_array_el",
"(",
"arg_name",
",",
"method_el",
",",
"kv",
")",
"else",
":",
"method_el",
".",
"marshall",
"(",
"arg_name",
",",
"kv",
")",
"soap_client",
"=",
"_build_soap_client",
"(",
")",
"reply",
"=",
"soap_client",
".",
"call",
"(",
"'get_bugs'",
",",
"method_el",
")",
"items_el",
"=",
"reply",
"(",
"'soapenc:Array'",
")",
"return",
"[",
"int",
"(",
"item_el",
")",
"for",
"item_el",
"in",
"items_el",
".",
"children",
"(",
")",
"or",
"[",
"]",
"]"
]
| Get list of bugs matching certain criteria.
The conditions are defined by key value pairs.
Possible keys are:
* "package": bugs for the given package
* "submitter": bugs from the submitter
* "maint": bugs belonging to a maintainer
* "src": bugs belonging to a source package
* "severity": bugs with a certain severity
* "status": can be either "done", "forwarded", or "open"
* "tag": see http://www.debian.org/Bugs/Developer#tags for
available tags
* "owner": bugs which are assigned to `owner`
* "bugs": takes single int or list of bugnumbers, filters the list
according to given criteria
* "correspondent": bugs where `correspondent` has sent a mail to
Arguments
---------
key_value : str
Returns
-------
bugs : list of ints
the bugnumbers
Examples
--------
>>> get_bugs('package', 'gtk-qt-engine', 'severity', 'normal')
[12345, 23456] | [
"Get",
"list",
"of",
"bugs",
"matching",
"certain",
"criteria",
"."
]
| 72cf11ae3458a8544142e9f365aaafe25634dd4f | https://github.com/venthur/python-debianbts/blob/72cf11ae3458a8544142e9f365aaafe25634dd4f/debianbts/debianbts.py#L348-L403 | train |
venthur/python-debianbts | debianbts/debianbts.py | _parse_status | def _parse_status(bug_el):
"""Return a bugreport object from a given status xml element"""
bug = Bugreport()
# plain fields
for field in ('originator', 'subject', 'msgid', 'package', 'severity',
'owner', 'summary', 'location', 'source', 'pending',
'forwarded'):
setattr(bug, field, _parse_string_el(bug_el(field)))
bug.date = datetime.utcfromtimestamp(float(bug_el('date')))
bug.log_modified = datetime.utcfromtimestamp(float(bug_el('log_modified')))
bug.tags = [_uc(tag) for tag in str(bug_el('tags')).split()]
bug.done = _parse_bool(bug_el('done'))
bug.archived = _parse_bool(bug_el('archived'))
bug.unarchived = _parse_bool(bug_el('unarchived'))
bug.bug_num = int(bug_el('bug_num'))
bug.mergedwith = [int(i) for i in str(bug_el('mergedwith')).split()]
bug.blockedby = [int(i) for i in str(bug_el('blockedby')).split()]
bug.blocks = [int(i) for i in str(bug_el('blocks')).split()]
bug.found_versions = [_uc(str(el)) for el in
bug_el('found_versions').children() or []]
bug.fixed_versions = [_uc(str(el)) for el in
bug_el('fixed_versions').children() or []]
affects = [_f for _f in str(bug_el('affects')).split(',') if _f]
bug.affects = [_uc(a).strip() for a in affects]
# Also available, but unused or broken
# bug.keywords = [_uc(keyword) for keyword in
# str(bug_el('keywords')).split()]
# bug.fixed = _parse_crappy_soap(tmp, "fixed")
# bug.found = _parse_crappy_soap(tmp, "found")
# bug.found_date = \
# [datetime.utcfromtimestamp(i) for i in tmp["found_date"]]
# bug.fixed_date = \
# [datetime.utcfromtimestamp(i) for i in tmp["fixed_date"]]
return bug | python | def _parse_status(bug_el):
"""Return a bugreport object from a given status xml element"""
bug = Bugreport()
# plain fields
for field in ('originator', 'subject', 'msgid', 'package', 'severity',
'owner', 'summary', 'location', 'source', 'pending',
'forwarded'):
setattr(bug, field, _parse_string_el(bug_el(field)))
bug.date = datetime.utcfromtimestamp(float(bug_el('date')))
bug.log_modified = datetime.utcfromtimestamp(float(bug_el('log_modified')))
bug.tags = [_uc(tag) for tag in str(bug_el('tags')).split()]
bug.done = _parse_bool(bug_el('done'))
bug.archived = _parse_bool(bug_el('archived'))
bug.unarchived = _parse_bool(bug_el('unarchived'))
bug.bug_num = int(bug_el('bug_num'))
bug.mergedwith = [int(i) for i in str(bug_el('mergedwith')).split()]
bug.blockedby = [int(i) for i in str(bug_el('blockedby')).split()]
bug.blocks = [int(i) for i in str(bug_el('blocks')).split()]
bug.found_versions = [_uc(str(el)) for el in
bug_el('found_versions').children() or []]
bug.fixed_versions = [_uc(str(el)) for el in
bug_el('fixed_versions').children() or []]
affects = [_f for _f in str(bug_el('affects')).split(',') if _f]
bug.affects = [_uc(a).strip() for a in affects]
# Also available, but unused or broken
# bug.keywords = [_uc(keyword) for keyword in
# str(bug_el('keywords')).split()]
# bug.fixed = _parse_crappy_soap(tmp, "fixed")
# bug.found = _parse_crappy_soap(tmp, "found")
# bug.found_date = \
# [datetime.utcfromtimestamp(i) for i in tmp["found_date"]]
# bug.fixed_date = \
# [datetime.utcfromtimestamp(i) for i in tmp["fixed_date"]]
return bug | [
"def",
"_parse_status",
"(",
"bug_el",
")",
":",
"bug",
"=",
"Bugreport",
"(",
")",
"# plain fields",
"for",
"field",
"in",
"(",
"'originator'",
",",
"'subject'",
",",
"'msgid'",
",",
"'package'",
",",
"'severity'",
",",
"'owner'",
",",
"'summary'",
",",
"'location'",
",",
"'source'",
",",
"'pending'",
",",
"'forwarded'",
")",
":",
"setattr",
"(",
"bug",
",",
"field",
",",
"_parse_string_el",
"(",
"bug_el",
"(",
"field",
")",
")",
")",
"bug",
".",
"date",
"=",
"datetime",
".",
"utcfromtimestamp",
"(",
"float",
"(",
"bug_el",
"(",
"'date'",
")",
")",
")",
"bug",
".",
"log_modified",
"=",
"datetime",
".",
"utcfromtimestamp",
"(",
"float",
"(",
"bug_el",
"(",
"'log_modified'",
")",
")",
")",
"bug",
".",
"tags",
"=",
"[",
"_uc",
"(",
"tag",
")",
"for",
"tag",
"in",
"str",
"(",
"bug_el",
"(",
"'tags'",
")",
")",
".",
"split",
"(",
")",
"]",
"bug",
".",
"done",
"=",
"_parse_bool",
"(",
"bug_el",
"(",
"'done'",
")",
")",
"bug",
".",
"archived",
"=",
"_parse_bool",
"(",
"bug_el",
"(",
"'archived'",
")",
")",
"bug",
".",
"unarchived",
"=",
"_parse_bool",
"(",
"bug_el",
"(",
"'unarchived'",
")",
")",
"bug",
".",
"bug_num",
"=",
"int",
"(",
"bug_el",
"(",
"'bug_num'",
")",
")",
"bug",
".",
"mergedwith",
"=",
"[",
"int",
"(",
"i",
")",
"for",
"i",
"in",
"str",
"(",
"bug_el",
"(",
"'mergedwith'",
")",
")",
".",
"split",
"(",
")",
"]",
"bug",
".",
"blockedby",
"=",
"[",
"int",
"(",
"i",
")",
"for",
"i",
"in",
"str",
"(",
"bug_el",
"(",
"'blockedby'",
")",
")",
".",
"split",
"(",
")",
"]",
"bug",
".",
"blocks",
"=",
"[",
"int",
"(",
"i",
")",
"for",
"i",
"in",
"str",
"(",
"bug_el",
"(",
"'blocks'",
")",
")",
".",
"split",
"(",
")",
"]",
"bug",
".",
"found_versions",
"=",
"[",
"_uc",
"(",
"str",
"(",
"el",
")",
")",
"for",
"el",
"in",
"bug_el",
"(",
"'found_versions'",
")",
".",
"children",
"(",
")",
"or",
"[",
"]",
"]",
"bug",
".",
"fixed_versions",
"=",
"[",
"_uc",
"(",
"str",
"(",
"el",
")",
")",
"for",
"el",
"in",
"bug_el",
"(",
"'fixed_versions'",
")",
".",
"children",
"(",
")",
"or",
"[",
"]",
"]",
"affects",
"=",
"[",
"_f",
"for",
"_f",
"in",
"str",
"(",
"bug_el",
"(",
"'affects'",
")",
")",
".",
"split",
"(",
"','",
")",
"if",
"_f",
"]",
"bug",
".",
"affects",
"=",
"[",
"_uc",
"(",
"a",
")",
".",
"strip",
"(",
")",
"for",
"a",
"in",
"affects",
"]",
"# Also available, but unused or broken",
"# bug.keywords = [_uc(keyword) for keyword in",
"# str(bug_el('keywords')).split()]",
"# bug.fixed = _parse_crappy_soap(tmp, \"fixed\")",
"# bug.found = _parse_crappy_soap(tmp, \"found\")",
"# bug.found_date = \\",
"# [datetime.utcfromtimestamp(i) for i in tmp[\"found_date\"]]",
"# bug.fixed_date = \\",
"# [datetime.utcfromtimestamp(i) for i in tmp[\"fixed_date\"]]",
"return",
"bug"
]
| Return a bugreport object from a given status xml element | [
"Return",
"a",
"bugreport",
"object",
"from",
"a",
"given",
"status",
"xml",
"element"
]
| 72cf11ae3458a8544142e9f365aaafe25634dd4f | https://github.com/venthur/python-debianbts/blob/72cf11ae3458a8544142e9f365aaafe25634dd4f/debianbts/debianbts.py#L406-L442 | train |
venthur/python-debianbts | debianbts/debianbts.py | _convert_soap_method_args | def _convert_soap_method_args(*args):
"""Convert arguments to be consumed by a SoapClient method
Soap client required a list of named arguments:
>>> _convert_soap_method_args('a', 1)
[('arg0', 'a'), ('arg1', 1)]
"""
soap_args = []
for arg_n, arg in enumerate(args):
soap_args.append(('arg' + str(arg_n), arg))
return soap_args | python | def _convert_soap_method_args(*args):
"""Convert arguments to be consumed by a SoapClient method
Soap client required a list of named arguments:
>>> _convert_soap_method_args('a', 1)
[('arg0', 'a'), ('arg1', 1)]
"""
soap_args = []
for arg_n, arg in enumerate(args):
soap_args.append(('arg' + str(arg_n), arg))
return soap_args | [
"def",
"_convert_soap_method_args",
"(",
"*",
"args",
")",
":",
"soap_args",
"=",
"[",
"]",
"for",
"arg_n",
",",
"arg",
"in",
"enumerate",
"(",
"args",
")",
":",
"soap_args",
".",
"append",
"(",
"(",
"'arg'",
"+",
"str",
"(",
"arg_n",
")",
",",
"arg",
")",
")",
"return",
"soap_args"
]
| Convert arguments to be consumed by a SoapClient method
Soap client required a list of named arguments:
>>> _convert_soap_method_args('a', 1)
[('arg0', 'a'), ('arg1', 1)] | [
"Convert",
"arguments",
"to",
"be",
"consumed",
"by",
"a",
"SoapClient",
"method"
]
| 72cf11ae3458a8544142e9f365aaafe25634dd4f | https://github.com/venthur/python-debianbts/blob/72cf11ae3458a8544142e9f365aaafe25634dd4f/debianbts/debianbts.py#L497-L508 | train |
venthur/python-debianbts | debianbts/debianbts.py | _soap_client_call | def _soap_client_call(method_name, *args):
"""Wrapper to call SoapClient method"""
# a new client instance is built for threading issues
soap_client = _build_soap_client()
soap_args = _convert_soap_method_args(*args)
# if pysimplesoap version requires it, apply a workaround for
# https://github.com/pysimplesoap/pysimplesoap/issues/31
if PYSIMPLESOAP_1_16_2:
return getattr(soap_client, method_name)(*soap_args)
else:
return getattr(soap_client, method_name)(soap_client, *soap_args) | python | def _soap_client_call(method_name, *args):
"""Wrapper to call SoapClient method"""
# a new client instance is built for threading issues
soap_client = _build_soap_client()
soap_args = _convert_soap_method_args(*args)
# if pysimplesoap version requires it, apply a workaround for
# https://github.com/pysimplesoap/pysimplesoap/issues/31
if PYSIMPLESOAP_1_16_2:
return getattr(soap_client, method_name)(*soap_args)
else:
return getattr(soap_client, method_name)(soap_client, *soap_args) | [
"def",
"_soap_client_call",
"(",
"method_name",
",",
"*",
"args",
")",
":",
"# a new client instance is built for threading issues",
"soap_client",
"=",
"_build_soap_client",
"(",
")",
"soap_args",
"=",
"_convert_soap_method_args",
"(",
"*",
"args",
")",
"# if pysimplesoap version requires it, apply a workaround for",
"# https://github.com/pysimplesoap/pysimplesoap/issues/31",
"if",
"PYSIMPLESOAP_1_16_2",
":",
"return",
"getattr",
"(",
"soap_client",
",",
"method_name",
")",
"(",
"*",
"soap_args",
")",
"else",
":",
"return",
"getattr",
"(",
"soap_client",
",",
"method_name",
")",
"(",
"soap_client",
",",
"*",
"soap_args",
")"
]
| Wrapper to call SoapClient method | [
"Wrapper",
"to",
"call",
"SoapClient",
"method"
]
| 72cf11ae3458a8544142e9f365aaafe25634dd4f | https://github.com/venthur/python-debianbts/blob/72cf11ae3458a8544142e9f365aaafe25634dd4f/debianbts/debianbts.py#L511-L521 | train |
venthur/python-debianbts | debianbts/debianbts.py | _parse_string_el | def _parse_string_el(el):
"""read a string element, maybe encoded in base64"""
value = str(el)
el_type = el.attributes().get('xsi:type')
if el_type and el_type.value == 'xsd:base64Binary':
value = base64.b64decode(value)
if not PY2:
value = value.decode('utf-8', errors='replace')
value = _uc(value)
return value | python | def _parse_string_el(el):
"""read a string element, maybe encoded in base64"""
value = str(el)
el_type = el.attributes().get('xsi:type')
if el_type and el_type.value == 'xsd:base64Binary':
value = base64.b64decode(value)
if not PY2:
value = value.decode('utf-8', errors='replace')
value = _uc(value)
return value | [
"def",
"_parse_string_el",
"(",
"el",
")",
":",
"value",
"=",
"str",
"(",
"el",
")",
"el_type",
"=",
"el",
".",
"attributes",
"(",
")",
".",
"get",
"(",
"'xsi:type'",
")",
"if",
"el_type",
"and",
"el_type",
".",
"value",
"==",
"'xsd:base64Binary'",
":",
"value",
"=",
"base64",
".",
"b64decode",
"(",
"value",
")",
"if",
"not",
"PY2",
":",
"value",
"=",
"value",
".",
"decode",
"(",
"'utf-8'",
",",
"errors",
"=",
"'replace'",
")",
"value",
"=",
"_uc",
"(",
"value",
")",
"return",
"value"
]
| read a string element, maybe encoded in base64 | [
"read",
"a",
"string",
"element",
"maybe",
"encoded",
"in",
"base64"
]
| 72cf11ae3458a8544142e9f365aaafe25634dd4f | https://github.com/venthur/python-debianbts/blob/72cf11ae3458a8544142e9f365aaafe25634dd4f/debianbts/debianbts.py#L544-L553 | train |
omisego/py-solc-simple | solc_simple/builder.py | Builder.get_solc_input | def get_solc_input(self):
"""Walks the contract directory and returns a Solidity input dict
Learn more about Solidity input JSON here: https://goo.gl/7zKBvj
Returns:
dict: A Solidity input JSON object as a dict
"""
def legal(r, file_name):
hidden = file_name[0] == '.'
dotsol = len(file_name) > 3 and file_name[-4:] == '.sol'
path = os.path.normpath(os.path.join(r, file_name))
notfile = not os.path.isfile(path)
symlink = Path(path).is_symlink()
return dotsol and (not (symlink or hidden or notfile))
solc_input = {
'language': 'Solidity',
'sources': {
file_name: {
'urls': [os.path.realpath(os.path.join(r, file_name))]
} for r, d, f in os.walk(self.contracts_dir) for file_name in f if legal(r, file_name)
},
'settings': {
'optimizer': {
'enabled': 1,
'runs': 10000
},
'outputSelection': {
"*": {
"": [
"legacyAST",
"ast"
],
"*": [
"abi",
"evm.bytecode.object",
"evm.bytecode.sourceMap",
"evm.deployedBytecode.object",
"evm.deployedBytecode.sourceMap"
]
}
}
}
}
return solc_input | python | def get_solc_input(self):
"""Walks the contract directory and returns a Solidity input dict
Learn more about Solidity input JSON here: https://goo.gl/7zKBvj
Returns:
dict: A Solidity input JSON object as a dict
"""
def legal(r, file_name):
hidden = file_name[0] == '.'
dotsol = len(file_name) > 3 and file_name[-4:] == '.sol'
path = os.path.normpath(os.path.join(r, file_name))
notfile = not os.path.isfile(path)
symlink = Path(path).is_symlink()
return dotsol and (not (symlink or hidden or notfile))
solc_input = {
'language': 'Solidity',
'sources': {
file_name: {
'urls': [os.path.realpath(os.path.join(r, file_name))]
} for r, d, f in os.walk(self.contracts_dir) for file_name in f if legal(r, file_name)
},
'settings': {
'optimizer': {
'enabled': 1,
'runs': 10000
},
'outputSelection': {
"*": {
"": [
"legacyAST",
"ast"
],
"*": [
"abi",
"evm.bytecode.object",
"evm.bytecode.sourceMap",
"evm.deployedBytecode.object",
"evm.deployedBytecode.sourceMap"
]
}
}
}
}
return solc_input | [
"def",
"get_solc_input",
"(",
"self",
")",
":",
"def",
"legal",
"(",
"r",
",",
"file_name",
")",
":",
"hidden",
"=",
"file_name",
"[",
"0",
"]",
"==",
"'.'",
"dotsol",
"=",
"len",
"(",
"file_name",
")",
">",
"3",
"and",
"file_name",
"[",
"-",
"4",
":",
"]",
"==",
"'.sol'",
"path",
"=",
"os",
".",
"path",
".",
"normpath",
"(",
"os",
".",
"path",
".",
"join",
"(",
"r",
",",
"file_name",
")",
")",
"notfile",
"=",
"not",
"os",
".",
"path",
".",
"isfile",
"(",
"path",
")",
"symlink",
"=",
"Path",
"(",
"path",
")",
".",
"is_symlink",
"(",
")",
"return",
"dotsol",
"and",
"(",
"not",
"(",
"symlink",
"or",
"hidden",
"or",
"notfile",
")",
")",
"solc_input",
"=",
"{",
"'language'",
":",
"'Solidity'",
",",
"'sources'",
":",
"{",
"file_name",
":",
"{",
"'urls'",
":",
"[",
"os",
".",
"path",
".",
"realpath",
"(",
"os",
".",
"path",
".",
"join",
"(",
"r",
",",
"file_name",
")",
")",
"]",
"}",
"for",
"r",
",",
"d",
",",
"f",
"in",
"os",
".",
"walk",
"(",
"self",
".",
"contracts_dir",
")",
"for",
"file_name",
"in",
"f",
"if",
"legal",
"(",
"r",
",",
"file_name",
")",
"}",
",",
"'settings'",
":",
"{",
"'optimizer'",
":",
"{",
"'enabled'",
":",
"1",
",",
"'runs'",
":",
"10000",
"}",
",",
"'outputSelection'",
":",
"{",
"\"*\"",
":",
"{",
"\"\"",
":",
"[",
"\"legacyAST\"",
",",
"\"ast\"",
"]",
",",
"\"*\"",
":",
"[",
"\"abi\"",
",",
"\"evm.bytecode.object\"",
",",
"\"evm.bytecode.sourceMap\"",
",",
"\"evm.deployedBytecode.object\"",
",",
"\"evm.deployedBytecode.sourceMap\"",
"]",
"}",
"}",
"}",
"}",
"return",
"solc_input"
]
| Walks the contract directory and returns a Solidity input dict
Learn more about Solidity input JSON here: https://goo.gl/7zKBvj
Returns:
dict: A Solidity input JSON object as a dict | [
"Walks",
"the",
"contract",
"directory",
"and",
"returns",
"a",
"Solidity",
"input",
"dict"
]
| 770116fdaa5aae01c2a247e2477f80621e98f0cc | https://github.com/omisego/py-solc-simple/blob/770116fdaa5aae01c2a247e2477f80621e98f0cc/solc_simple/builder.py#L17-L64 | train |
omisego/py-solc-simple | solc_simple/builder.py | Builder.compile_all | def compile_all(self):
"""Compiles all of the contracts in the self.contracts_dir directory
Creates {contract name}.json files in self.output_dir that contain
the build output for each contract.
"""
# Solidity input JSON
solc_input = self.get_solc_input()
# Compile the contracts
real_path = os.path.realpath(self.contracts_dir)
compilation_result = compile_standard(solc_input, allow_paths=real_path)
# Create the output folder if it doesn't already exist
os.makedirs(self.output_dir, exist_ok=True)
# Write the contract ABI to output files
compiled_contracts = compilation_result['contracts']
for contract_file in compiled_contracts:
for contract in compiled_contracts[contract_file]:
contract_name = contract.split('.')[0]
contract_data = compiled_contracts[contract_file][contract_name]
contract_data_path = self.output_dir + '/{0}.json'.format(contract_name)
with open(contract_data_path, "w+") as contract_data_file:
json.dump(contract_data, contract_data_file) | python | def compile_all(self):
"""Compiles all of the contracts in the self.contracts_dir directory
Creates {contract name}.json files in self.output_dir that contain
the build output for each contract.
"""
# Solidity input JSON
solc_input = self.get_solc_input()
# Compile the contracts
real_path = os.path.realpath(self.contracts_dir)
compilation_result = compile_standard(solc_input, allow_paths=real_path)
# Create the output folder if it doesn't already exist
os.makedirs(self.output_dir, exist_ok=True)
# Write the contract ABI to output files
compiled_contracts = compilation_result['contracts']
for contract_file in compiled_contracts:
for contract in compiled_contracts[contract_file]:
contract_name = contract.split('.')[0]
contract_data = compiled_contracts[contract_file][contract_name]
contract_data_path = self.output_dir + '/{0}.json'.format(contract_name)
with open(contract_data_path, "w+") as contract_data_file:
json.dump(contract_data, contract_data_file) | [
"def",
"compile_all",
"(",
"self",
")",
":",
"# Solidity input JSON",
"solc_input",
"=",
"self",
".",
"get_solc_input",
"(",
")",
"# Compile the contracts",
"real_path",
"=",
"os",
".",
"path",
".",
"realpath",
"(",
"self",
".",
"contracts_dir",
")",
"compilation_result",
"=",
"compile_standard",
"(",
"solc_input",
",",
"allow_paths",
"=",
"real_path",
")",
"# Create the output folder if it doesn't already exist",
"os",
".",
"makedirs",
"(",
"self",
".",
"output_dir",
",",
"exist_ok",
"=",
"True",
")",
"# Write the contract ABI to output files",
"compiled_contracts",
"=",
"compilation_result",
"[",
"'contracts'",
"]",
"for",
"contract_file",
"in",
"compiled_contracts",
":",
"for",
"contract",
"in",
"compiled_contracts",
"[",
"contract_file",
"]",
":",
"contract_name",
"=",
"contract",
".",
"split",
"(",
"'.'",
")",
"[",
"0",
"]",
"contract_data",
"=",
"compiled_contracts",
"[",
"contract_file",
"]",
"[",
"contract_name",
"]",
"contract_data_path",
"=",
"self",
".",
"output_dir",
"+",
"'/{0}.json'",
".",
"format",
"(",
"contract_name",
")",
"with",
"open",
"(",
"contract_data_path",
",",
"\"w+\"",
")",
"as",
"contract_data_file",
":",
"json",
".",
"dump",
"(",
"contract_data",
",",
"contract_data_file",
")"
]
| Compiles all of the contracts in the self.contracts_dir directory
Creates {contract name}.json files in self.output_dir that contain
the build output for each contract. | [
"Compiles",
"all",
"of",
"the",
"contracts",
"in",
"the",
"self",
".",
"contracts_dir",
"directory"
]
| 770116fdaa5aae01c2a247e2477f80621e98f0cc | https://github.com/omisego/py-solc-simple/blob/770116fdaa5aae01c2a247e2477f80621e98f0cc/solc_simple/builder.py#L66-L92 | train |
omisego/py-solc-simple | solc_simple/builder.py | Builder.get_contract_data | def get_contract_data(self, contract_name):
"""Returns the contract data for a given contract
Args:
contract_name (str): Name of the contract to return.
Returns:
str, str: ABI and bytecode of the contract
"""
contract_data_path = self.output_dir + '/{0}.json'.format(contract_name)
with open(contract_data_path, 'r') as contract_data_file:
contract_data = json.load(contract_data_file)
abi = contract_data['abi']
bytecode = contract_data['evm']['bytecode']['object']
return abi, bytecode | python | def get_contract_data(self, contract_name):
"""Returns the contract data for a given contract
Args:
contract_name (str): Name of the contract to return.
Returns:
str, str: ABI and bytecode of the contract
"""
contract_data_path = self.output_dir + '/{0}.json'.format(contract_name)
with open(contract_data_path, 'r') as contract_data_file:
contract_data = json.load(contract_data_file)
abi = contract_data['abi']
bytecode = contract_data['evm']['bytecode']['object']
return abi, bytecode | [
"def",
"get_contract_data",
"(",
"self",
",",
"contract_name",
")",
":",
"contract_data_path",
"=",
"self",
".",
"output_dir",
"+",
"'/{0}.json'",
".",
"format",
"(",
"contract_name",
")",
"with",
"open",
"(",
"contract_data_path",
",",
"'r'",
")",
"as",
"contract_data_file",
":",
"contract_data",
"=",
"json",
".",
"load",
"(",
"contract_data_file",
")",
"abi",
"=",
"contract_data",
"[",
"'abi'",
"]",
"bytecode",
"=",
"contract_data",
"[",
"'evm'",
"]",
"[",
"'bytecode'",
"]",
"[",
"'object'",
"]",
"return",
"abi",
",",
"bytecode"
]
| Returns the contract data for a given contract
Args:
contract_name (str): Name of the contract to return.
Returns:
str, str: ABI and bytecode of the contract | [
"Returns",
"the",
"contract",
"data",
"for",
"a",
"given",
"contract"
]
| 770116fdaa5aae01c2a247e2477f80621e98f0cc | https://github.com/omisego/py-solc-simple/blob/770116fdaa5aae01c2a247e2477f80621e98f0cc/solc_simple/builder.py#L94-L111 | train |
marrow/util | marrow/util/compat.py | exception | def exception(maxTBlevel=None):
"""Retrieve useful information about an exception.
Returns a bunch (attribute-access dict) with the following information:
* name: exception class name
* cls: the exception class
* exception: the exception instance
* trace: the traceback instance
* formatted: formatted traceback
* args: arguments to the exception instance
This functionality allows you to trap an exception in a method agnostic to
differences between Python 2.x and 3.x.
"""
try:
from marrow.util.bunch import Bunch
cls, exc, trbk = sys.exc_info()
excName = cls.__name__
excArgs = getattr(exc, 'args', None)
excTb = ''.join(traceback.format_exception(cls, exc, trbk, maxTBlevel))
return Bunch(
name=excName,
cls=cls,
exception=exc,
trace=trbk,
formatted=excTb,
args=excArgs
)
finally:
del cls, exc, trbk | python | def exception(maxTBlevel=None):
"""Retrieve useful information about an exception.
Returns a bunch (attribute-access dict) with the following information:
* name: exception class name
* cls: the exception class
* exception: the exception instance
* trace: the traceback instance
* formatted: formatted traceback
* args: arguments to the exception instance
This functionality allows you to trap an exception in a method agnostic to
differences between Python 2.x and 3.x.
"""
try:
from marrow.util.bunch import Bunch
cls, exc, trbk = sys.exc_info()
excName = cls.__name__
excArgs = getattr(exc, 'args', None)
excTb = ''.join(traceback.format_exception(cls, exc, trbk, maxTBlevel))
return Bunch(
name=excName,
cls=cls,
exception=exc,
trace=trbk,
formatted=excTb,
args=excArgs
)
finally:
del cls, exc, trbk | [
"def",
"exception",
"(",
"maxTBlevel",
"=",
"None",
")",
":",
"try",
":",
"from",
"marrow",
".",
"util",
".",
"bunch",
"import",
"Bunch",
"cls",
",",
"exc",
",",
"trbk",
"=",
"sys",
".",
"exc_info",
"(",
")",
"excName",
"=",
"cls",
".",
"__name__",
"excArgs",
"=",
"getattr",
"(",
"exc",
",",
"'args'",
",",
"None",
")",
"excTb",
"=",
"''",
".",
"join",
"(",
"traceback",
".",
"format_exception",
"(",
"cls",
",",
"exc",
",",
"trbk",
",",
"maxTBlevel",
")",
")",
"return",
"Bunch",
"(",
"name",
"=",
"excName",
",",
"cls",
"=",
"cls",
",",
"exception",
"=",
"exc",
",",
"trace",
"=",
"trbk",
",",
"formatted",
"=",
"excTb",
",",
"args",
"=",
"excArgs",
")",
"finally",
":",
"del",
"cls",
",",
"exc",
",",
"trbk"
]
| Retrieve useful information about an exception.
Returns a bunch (attribute-access dict) with the following information:
* name: exception class name
* cls: the exception class
* exception: the exception instance
* trace: the traceback instance
* formatted: formatted traceback
* args: arguments to the exception instance
This functionality allows you to trap an exception in a method agnostic to
differences between Python 2.x and 3.x. | [
"Retrieve",
"useful",
"information",
"about",
"an",
"exception",
"."
]
| abb8163dbd1fa0692d42a44d129b12ae2b39cdf2 | https://github.com/marrow/util/blob/abb8163dbd1fa0692d42a44d129b12ae2b39cdf2/marrow/util/compat.py#L55-L90 | train |
marrow/util | marrow/util/compat.py | native | def native(s, encoding='utf-8', fallback='iso-8859-1'):
"""Convert a given string into a native string."""
if isinstance(s, str):
return s
if str is unicode: # Python 3.x ->
return unicodestr(s, encoding, fallback)
return bytestring(s, encoding, fallback) | python | def native(s, encoding='utf-8', fallback='iso-8859-1'):
"""Convert a given string into a native string."""
if isinstance(s, str):
return s
if str is unicode: # Python 3.x ->
return unicodestr(s, encoding, fallback)
return bytestring(s, encoding, fallback) | [
"def",
"native",
"(",
"s",
",",
"encoding",
"=",
"'utf-8'",
",",
"fallback",
"=",
"'iso-8859-1'",
")",
":",
"if",
"isinstance",
"(",
"s",
",",
"str",
")",
":",
"return",
"s",
"if",
"str",
"is",
"unicode",
":",
"# Python 3.x ->",
"return",
"unicodestr",
"(",
"s",
",",
"encoding",
",",
"fallback",
")",
"return",
"bytestring",
"(",
"s",
",",
"encoding",
",",
"fallback",
")"
]
| Convert a given string into a native string. | [
"Convert",
"a",
"given",
"string",
"into",
"a",
"native",
"string",
"."
]
| abb8163dbd1fa0692d42a44d129b12ae2b39cdf2 | https://github.com/marrow/util/blob/abb8163dbd1fa0692d42a44d129b12ae2b39cdf2/marrow/util/compat.py#L106-L115 | train |
marrow/util | marrow/util/compat.py | unicodestr | def unicodestr(s, encoding='utf-8', fallback='iso-8859-1'):
"""Convert a string to unicode if it isn't already."""
if isinstance(s, unicode):
return s
try:
return s.decode(encoding)
except UnicodeError:
return s.decode(fallback) | python | def unicodestr(s, encoding='utf-8', fallback='iso-8859-1'):
"""Convert a string to unicode if it isn't already."""
if isinstance(s, unicode):
return s
try:
return s.decode(encoding)
except UnicodeError:
return s.decode(fallback) | [
"def",
"unicodestr",
"(",
"s",
",",
"encoding",
"=",
"'utf-8'",
",",
"fallback",
"=",
"'iso-8859-1'",
")",
":",
"if",
"isinstance",
"(",
"s",
",",
"unicode",
")",
":",
"return",
"s",
"try",
":",
"return",
"s",
".",
"decode",
"(",
"encoding",
")",
"except",
"UnicodeError",
":",
"return",
"s",
".",
"decode",
"(",
"fallback",
")"
]
| Convert a string to unicode if it isn't already. | [
"Convert",
"a",
"string",
"to",
"unicode",
"if",
"it",
"isn",
"t",
"already",
"."
]
| abb8163dbd1fa0692d42a44d129b12ae2b39cdf2 | https://github.com/marrow/util/blob/abb8163dbd1fa0692d42a44d129b12ae2b39cdf2/marrow/util/compat.py#L118-L127 | train |
marrow/util | marrow/util/compat.py | uvalues | def uvalues(a, encoding='utf-8', fallback='iso-8859-1'):
"""Return a list of decoded values from an iterator.
If any of the values fail to decode, re-decode all values using the
fallback.
"""
try:
return encoding, [s.decode(encoding) for s in a]
except UnicodeError:
return fallback, [s.decode(fallback) for s in a] | python | def uvalues(a, encoding='utf-8', fallback='iso-8859-1'):
"""Return a list of decoded values from an iterator.
If any of the values fail to decode, re-decode all values using the
fallback.
"""
try:
return encoding, [s.decode(encoding) for s in a]
except UnicodeError:
return fallback, [s.decode(fallback) for s in a] | [
"def",
"uvalues",
"(",
"a",
",",
"encoding",
"=",
"'utf-8'",
",",
"fallback",
"=",
"'iso-8859-1'",
")",
":",
"try",
":",
"return",
"encoding",
",",
"[",
"s",
".",
"decode",
"(",
"encoding",
")",
"for",
"s",
"in",
"a",
"]",
"except",
"UnicodeError",
":",
"return",
"fallback",
",",
"[",
"s",
".",
"decode",
"(",
"fallback",
")",
"for",
"s",
"in",
"a",
"]"
]
| Return a list of decoded values from an iterator.
If any of the values fail to decode, re-decode all values using the
fallback. | [
"Return",
"a",
"list",
"of",
"decoded",
"values",
"from",
"an",
"iterator",
"."
]
| abb8163dbd1fa0692d42a44d129b12ae2b39cdf2 | https://github.com/marrow/util/blob/abb8163dbd1fa0692d42a44d129b12ae2b39cdf2/marrow/util/compat.py#L130-L141 | train |
Yipit/pyeqs | pyeqs/query_builder.py | QueryBuilder._build_query | def _build_query(self):
"""
Build the base query dictionary
"""
if isinstance(self._query_string, QueryString):
self._query_dsl = self._query_string
elif isinstance(self._query_string, string_types):
self._query_dsl = QueryString(self._query_string)
else:
self._query_dsl = MatchAll() | python | def _build_query(self):
"""
Build the base query dictionary
"""
if isinstance(self._query_string, QueryString):
self._query_dsl = self._query_string
elif isinstance(self._query_string, string_types):
self._query_dsl = QueryString(self._query_string)
else:
self._query_dsl = MatchAll() | [
"def",
"_build_query",
"(",
"self",
")",
":",
"if",
"isinstance",
"(",
"self",
".",
"_query_string",
",",
"QueryString",
")",
":",
"self",
".",
"_query_dsl",
"=",
"self",
".",
"_query_string",
"elif",
"isinstance",
"(",
"self",
".",
"_query_string",
",",
"string_types",
")",
":",
"self",
".",
"_query_dsl",
"=",
"QueryString",
"(",
"self",
".",
"_query_string",
")",
"else",
":",
"self",
".",
"_query_dsl",
"=",
"MatchAll",
"(",
")"
]
| Build the base query dictionary | [
"Build",
"the",
"base",
"query",
"dictionary"
]
| 2e385c0a5d113af0e20be4d9393add2aabdd9565 | https://github.com/Yipit/pyeqs/blob/2e385c0a5d113af0e20be4d9393add2aabdd9565/pyeqs/query_builder.py#L30-L39 | train |
Yipit/pyeqs | pyeqs/query_builder.py | QueryBuilder._build_filtered_query | def _build_filtered_query(self, f, operator):
"""
Create the root of the filter tree
"""
self._filtered = True
if isinstance(f, Filter):
filter_object = f
else:
filter_object = Filter(operator).filter(f)
self._filter_dsl = filter_object | python | def _build_filtered_query(self, f, operator):
"""
Create the root of the filter tree
"""
self._filtered = True
if isinstance(f, Filter):
filter_object = f
else:
filter_object = Filter(operator).filter(f)
self._filter_dsl = filter_object | [
"def",
"_build_filtered_query",
"(",
"self",
",",
"f",
",",
"operator",
")",
":",
"self",
".",
"_filtered",
"=",
"True",
"if",
"isinstance",
"(",
"f",
",",
"Filter",
")",
":",
"filter_object",
"=",
"f",
"else",
":",
"filter_object",
"=",
"Filter",
"(",
"operator",
")",
".",
"filter",
"(",
"f",
")",
"self",
".",
"_filter_dsl",
"=",
"filter_object"
]
| Create the root of the filter tree | [
"Create",
"the",
"root",
"of",
"the",
"filter",
"tree"
]
| 2e385c0a5d113af0e20be4d9393add2aabdd9565 | https://github.com/Yipit/pyeqs/blob/2e385c0a5d113af0e20be4d9393add2aabdd9565/pyeqs/query_builder.py#L41-L50 | train |
Yipit/pyeqs | pyeqs/query_builder.py | QueryBuilder.filter | def filter(self, f, operator="and"):
"""
Add a filter to the query
Takes a Filter object, or a filterable DSL object.
"""
if self._filtered:
self._filter_dsl.filter(f)
else:
self._build_filtered_query(f, operator)
return self | python | def filter(self, f, operator="and"):
"""
Add a filter to the query
Takes a Filter object, or a filterable DSL object.
"""
if self._filtered:
self._filter_dsl.filter(f)
else:
self._build_filtered_query(f, operator)
return self | [
"def",
"filter",
"(",
"self",
",",
"f",
",",
"operator",
"=",
"\"and\"",
")",
":",
"if",
"self",
".",
"_filtered",
":",
"self",
".",
"_filter_dsl",
".",
"filter",
"(",
"f",
")",
"else",
":",
"self",
".",
"_build_filtered_query",
"(",
"f",
",",
"operator",
")",
"return",
"self"
]
| Add a filter to the query
Takes a Filter object, or a filterable DSL object. | [
"Add",
"a",
"filter",
"to",
"the",
"query"
]
| 2e385c0a5d113af0e20be4d9393add2aabdd9565 | https://github.com/Yipit/pyeqs/blob/2e385c0a5d113af0e20be4d9393add2aabdd9565/pyeqs/query_builder.py#L52-L62 | train |
brandjon/simplestruct | simplestruct/fields.py | TypedField.check | def check(self, inst, value):
"""Raise TypeError if value doesn't satisfy the constraints
for use on instance inst.
"""
if not (self.or_none and value is None):
if self.seq:
self.checktype_seq(value, self.kind,
unique=self.unique, inst=inst)
else:
self.checktype(value, self.kind, inst=inst) | python | def check(self, inst, value):
"""Raise TypeError if value doesn't satisfy the constraints
for use on instance inst.
"""
if not (self.or_none and value is None):
if self.seq:
self.checktype_seq(value, self.kind,
unique=self.unique, inst=inst)
else:
self.checktype(value, self.kind, inst=inst) | [
"def",
"check",
"(",
"self",
",",
"inst",
",",
"value",
")",
":",
"if",
"not",
"(",
"self",
".",
"or_none",
"and",
"value",
"is",
"None",
")",
":",
"if",
"self",
".",
"seq",
":",
"self",
".",
"checktype_seq",
"(",
"value",
",",
"self",
".",
"kind",
",",
"unique",
"=",
"self",
".",
"unique",
",",
"inst",
"=",
"inst",
")",
"else",
":",
"self",
".",
"checktype",
"(",
"value",
",",
"self",
".",
"kind",
",",
"inst",
"=",
"inst",
")"
]
| Raise TypeError if value doesn't satisfy the constraints
for use on instance inst. | [
"Raise",
"TypeError",
"if",
"value",
"doesn",
"t",
"satisfy",
"the",
"constraints",
"for",
"use",
"on",
"instance",
"inst",
"."
]
| f2bba77278838b5904fd72b35741da162f337c37 | https://github.com/brandjon/simplestruct/blob/f2bba77278838b5904fd72b35741da162f337c37/simplestruct/fields.py#L49-L58 | train |
brandjon/simplestruct | simplestruct/fields.py | TypedField.normalize | def normalize(self, inst, value):
"""Return value or a normalized form of it for use on
instance inst.
"""
if (not (self.or_none and value is None) and
self.seq):
value = tuple(value)
return value | python | def normalize(self, inst, value):
"""Return value or a normalized form of it for use on
instance inst.
"""
if (not (self.or_none and value is None) and
self.seq):
value = tuple(value)
return value | [
"def",
"normalize",
"(",
"self",
",",
"inst",
",",
"value",
")",
":",
"if",
"(",
"not",
"(",
"self",
".",
"or_none",
"and",
"value",
"is",
"None",
")",
"and",
"self",
".",
"seq",
")",
":",
"value",
"=",
"tuple",
"(",
"value",
")",
"return",
"value"
]
| Return value or a normalized form of it for use on
instance inst. | [
"Return",
"value",
"or",
"a",
"normalized",
"form",
"of",
"it",
"for",
"use",
"on",
"instance",
"inst",
"."
]
| f2bba77278838b5904fd72b35741da162f337c37 | https://github.com/brandjon/simplestruct/blob/f2bba77278838b5904fd72b35741da162f337c37/simplestruct/fields.py#L60-L67 | train |
VIVelev/PyDojoML | dojo/plot/decision.py | plot_decision_boundary | def plot_decision_boundary(model, X, y, step=0.1, figsize=(10, 8), alpha=0.4, size=20):
"""Plots the classification decision boundary of `model` on `X` with labels `y`.
Using numpy and matplotlib.
"""
x_min, x_max = X[:, 0].min() - 1, X[:, 0].max() + 1
y_min, y_max = X[:, 1].min() - 1, X[:, 1].max() + 1
xx, yy = np.meshgrid(np.arange(x_min, x_max, step),
np.arange(y_min, y_max, step))
f, ax = plt.subplots(figsize=figsize)
Z = model.predict(np.c_[xx.ravel(), yy.ravel()])
Z = Z.reshape(xx.shape)
ax.contourf(xx, yy, Z, alpha=alpha)
ax.scatter(X[:, 0], X[:, 1], c=y, s=size, edgecolor='k')
plt.show() | python | def plot_decision_boundary(model, X, y, step=0.1, figsize=(10, 8), alpha=0.4, size=20):
"""Plots the classification decision boundary of `model` on `X` with labels `y`.
Using numpy and matplotlib.
"""
x_min, x_max = X[:, 0].min() - 1, X[:, 0].max() + 1
y_min, y_max = X[:, 1].min() - 1, X[:, 1].max() + 1
xx, yy = np.meshgrid(np.arange(x_min, x_max, step),
np.arange(y_min, y_max, step))
f, ax = plt.subplots(figsize=figsize)
Z = model.predict(np.c_[xx.ravel(), yy.ravel()])
Z = Z.reshape(xx.shape)
ax.contourf(xx, yy, Z, alpha=alpha)
ax.scatter(X[:, 0], X[:, 1], c=y, s=size, edgecolor='k')
plt.show() | [
"def",
"plot_decision_boundary",
"(",
"model",
",",
"X",
",",
"y",
",",
"step",
"=",
"0.1",
",",
"figsize",
"=",
"(",
"10",
",",
"8",
")",
",",
"alpha",
"=",
"0.4",
",",
"size",
"=",
"20",
")",
":",
"x_min",
",",
"x_max",
"=",
"X",
"[",
":",
",",
"0",
"]",
".",
"min",
"(",
")",
"-",
"1",
",",
"X",
"[",
":",
",",
"0",
"]",
".",
"max",
"(",
")",
"+",
"1",
"y_min",
",",
"y_max",
"=",
"X",
"[",
":",
",",
"1",
"]",
".",
"min",
"(",
")",
"-",
"1",
",",
"X",
"[",
":",
",",
"1",
"]",
".",
"max",
"(",
")",
"+",
"1",
"xx",
",",
"yy",
"=",
"np",
".",
"meshgrid",
"(",
"np",
".",
"arange",
"(",
"x_min",
",",
"x_max",
",",
"step",
")",
",",
"np",
".",
"arange",
"(",
"y_min",
",",
"y_max",
",",
"step",
")",
")",
"f",
",",
"ax",
"=",
"plt",
".",
"subplots",
"(",
"figsize",
"=",
"figsize",
")",
"Z",
"=",
"model",
".",
"predict",
"(",
"np",
".",
"c_",
"[",
"xx",
".",
"ravel",
"(",
")",
",",
"yy",
".",
"ravel",
"(",
")",
"]",
")",
"Z",
"=",
"Z",
".",
"reshape",
"(",
"xx",
".",
"shape",
")",
"ax",
".",
"contourf",
"(",
"xx",
",",
"yy",
",",
"Z",
",",
"alpha",
"=",
"alpha",
")",
"ax",
".",
"scatter",
"(",
"X",
"[",
":",
",",
"0",
"]",
",",
"X",
"[",
":",
",",
"1",
"]",
",",
"c",
"=",
"y",
",",
"s",
"=",
"size",
",",
"edgecolor",
"=",
"'k'",
")",
"plt",
".",
"show",
"(",
")"
]
| Plots the classification decision boundary of `model` on `X` with labels `y`.
Using numpy and matplotlib. | [
"Plots",
"the",
"classification",
"decision",
"boundary",
"of",
"model",
"on",
"X",
"with",
"labels",
"y",
".",
"Using",
"numpy",
"and",
"matplotlib",
"."
]
| 773fdce6866aa6decd306a5a85f94129fed816eb | https://github.com/VIVelev/PyDojoML/blob/773fdce6866aa6decd306a5a85f94129fed816eb/dojo/plot/decision.py#L9-L25 | train |
Xion/taipan | taipan/functional/__init__.py | ensure_argcount | def ensure_argcount(args, min_=None, max_=None):
"""Checks whether iterable of positional arguments satisfies conditions.
:param args: Iterable of positional arguments, received via ``*args``
:param min_: Minimum number of arguments
:param max_: Maximum number of arguments
:return: ``args`` if the conditions are met
:raise TypeError: When conditions are not met
"""
ensure_sequence(args)
has_min = min_ is not None
has_max = max_ is not None
if not (has_min or has_max):
raise ValueError(
"minimum and/or maximum number of arguments must be provided")
if has_min and has_max and min_ > max_:
raise ValueError(
"maximum number of arguments must be greater or equal to minimum")
if has_min and len(args) < min_:
raise TypeError(
"expected at least %s arguments, got %s" % (min_, len(args)))
if has_max and len(args) > max_:
raise TypeError(
"expected at most %s arguments, got %s" % (max_, len(args)))
return args | python | def ensure_argcount(args, min_=None, max_=None):
"""Checks whether iterable of positional arguments satisfies conditions.
:param args: Iterable of positional arguments, received via ``*args``
:param min_: Minimum number of arguments
:param max_: Maximum number of arguments
:return: ``args`` if the conditions are met
:raise TypeError: When conditions are not met
"""
ensure_sequence(args)
has_min = min_ is not None
has_max = max_ is not None
if not (has_min or has_max):
raise ValueError(
"minimum and/or maximum number of arguments must be provided")
if has_min and has_max and min_ > max_:
raise ValueError(
"maximum number of arguments must be greater or equal to minimum")
if has_min and len(args) < min_:
raise TypeError(
"expected at least %s arguments, got %s" % (min_, len(args)))
if has_max and len(args) > max_:
raise TypeError(
"expected at most %s arguments, got %s" % (max_, len(args)))
return args | [
"def",
"ensure_argcount",
"(",
"args",
",",
"min_",
"=",
"None",
",",
"max_",
"=",
"None",
")",
":",
"ensure_sequence",
"(",
"args",
")",
"has_min",
"=",
"min_",
"is",
"not",
"None",
"has_max",
"=",
"max_",
"is",
"not",
"None",
"if",
"not",
"(",
"has_min",
"or",
"has_max",
")",
":",
"raise",
"ValueError",
"(",
"\"minimum and/or maximum number of arguments must be provided\"",
")",
"if",
"has_min",
"and",
"has_max",
"and",
"min_",
">",
"max_",
":",
"raise",
"ValueError",
"(",
"\"maximum number of arguments must be greater or equal to minimum\"",
")",
"if",
"has_min",
"and",
"len",
"(",
"args",
")",
"<",
"min_",
":",
"raise",
"TypeError",
"(",
"\"expected at least %s arguments, got %s\"",
"%",
"(",
"min_",
",",
"len",
"(",
"args",
")",
")",
")",
"if",
"has_max",
"and",
"len",
"(",
"args",
")",
">",
"max_",
":",
"raise",
"TypeError",
"(",
"\"expected at most %s arguments, got %s\"",
"%",
"(",
"max_",
",",
"len",
"(",
"args",
")",
")",
")",
"return",
"args"
]
| Checks whether iterable of positional arguments satisfies conditions.
:param args: Iterable of positional arguments, received via ``*args``
:param min_: Minimum number of arguments
:param max_: Maximum number of arguments
:return: ``args`` if the conditions are met
:raise TypeError: When conditions are not met | [
"Checks",
"whether",
"iterable",
"of",
"positional",
"arguments",
"satisfies",
"conditions",
"."
]
| f333f0287c8bd0915182c7d5308e5f05ef0cca78 | https://github.com/Xion/taipan/blob/f333f0287c8bd0915182c7d5308e5f05ef0cca78/taipan/functional/__init__.py#L17-L45 | train |
Xion/taipan | taipan/functional/__init__.py | ensure_keyword_args | def ensure_keyword_args(kwargs, mandatory=(), optional=()):
"""Checks whether dictionary of keyword arguments satisfies conditions.
:param kwargs: Dictionary of keyword arguments, received via ``*kwargs``
:param mandatory: Iterable of mandatory argument names
:param optional: Iterable of optional argument names
:return: ``kwargs`` if the conditions are met:
all ``mandatory`` arguments are present, and besides that
no arguments outside of ``optional`` ones are.
:raise TypeError: When conditions are not met
"""
from taipan.strings import ensure_string
ensure_mapping(kwargs)
mandatory = list(map(ensure_string, ensure_iterable(mandatory)))
optional = list(map(ensure_string, ensure_iterable(optional)))
if not (mandatory or optional):
raise ValueError(
"mandatory and/or optional argument names must be provided")
names = set(kwargs)
for name in mandatory:
try:
names.remove(name)
except KeyError:
raise TypeError(
"no value for mandatory keyword argument '%s'" % name)
excess = names - set(optional)
if excess:
if len(excess) == 1:
raise TypeError("unexpected keyword argument '%s'" % excess.pop())
else:
raise TypeError(
"unexpected keyword arguments: %s" % (tuple(excess),))
return kwargs | python | def ensure_keyword_args(kwargs, mandatory=(), optional=()):
"""Checks whether dictionary of keyword arguments satisfies conditions.
:param kwargs: Dictionary of keyword arguments, received via ``*kwargs``
:param mandatory: Iterable of mandatory argument names
:param optional: Iterable of optional argument names
:return: ``kwargs`` if the conditions are met:
all ``mandatory`` arguments are present, and besides that
no arguments outside of ``optional`` ones are.
:raise TypeError: When conditions are not met
"""
from taipan.strings import ensure_string
ensure_mapping(kwargs)
mandatory = list(map(ensure_string, ensure_iterable(mandatory)))
optional = list(map(ensure_string, ensure_iterable(optional)))
if not (mandatory or optional):
raise ValueError(
"mandatory and/or optional argument names must be provided")
names = set(kwargs)
for name in mandatory:
try:
names.remove(name)
except KeyError:
raise TypeError(
"no value for mandatory keyword argument '%s'" % name)
excess = names - set(optional)
if excess:
if len(excess) == 1:
raise TypeError("unexpected keyword argument '%s'" % excess.pop())
else:
raise TypeError(
"unexpected keyword arguments: %s" % (tuple(excess),))
return kwargs | [
"def",
"ensure_keyword_args",
"(",
"kwargs",
",",
"mandatory",
"=",
"(",
")",
",",
"optional",
"=",
"(",
")",
")",
":",
"from",
"taipan",
".",
"strings",
"import",
"ensure_string",
"ensure_mapping",
"(",
"kwargs",
")",
"mandatory",
"=",
"list",
"(",
"map",
"(",
"ensure_string",
",",
"ensure_iterable",
"(",
"mandatory",
")",
")",
")",
"optional",
"=",
"list",
"(",
"map",
"(",
"ensure_string",
",",
"ensure_iterable",
"(",
"optional",
")",
")",
")",
"if",
"not",
"(",
"mandatory",
"or",
"optional",
")",
":",
"raise",
"ValueError",
"(",
"\"mandatory and/or optional argument names must be provided\"",
")",
"names",
"=",
"set",
"(",
"kwargs",
")",
"for",
"name",
"in",
"mandatory",
":",
"try",
":",
"names",
".",
"remove",
"(",
"name",
")",
"except",
"KeyError",
":",
"raise",
"TypeError",
"(",
"\"no value for mandatory keyword argument '%s'\"",
"%",
"name",
")",
"excess",
"=",
"names",
"-",
"set",
"(",
"optional",
")",
"if",
"excess",
":",
"if",
"len",
"(",
"excess",
")",
"==",
"1",
":",
"raise",
"TypeError",
"(",
"\"unexpected keyword argument '%s'\"",
"%",
"excess",
".",
"pop",
"(",
")",
")",
"else",
":",
"raise",
"TypeError",
"(",
"\"unexpected keyword arguments: %s\"",
"%",
"(",
"tuple",
"(",
"excess",
")",
",",
")",
")",
"return",
"kwargs"
]
| Checks whether dictionary of keyword arguments satisfies conditions.
:param kwargs: Dictionary of keyword arguments, received via ``*kwargs``
:param mandatory: Iterable of mandatory argument names
:param optional: Iterable of optional argument names
:return: ``kwargs`` if the conditions are met:
all ``mandatory`` arguments are present, and besides that
no arguments outside of ``optional`` ones are.
:raise TypeError: When conditions are not met | [
"Checks",
"whether",
"dictionary",
"of",
"keyword",
"arguments",
"satisfies",
"conditions",
"."
]
| f333f0287c8bd0915182c7d5308e5f05ef0cca78 | https://github.com/Xion/taipan/blob/f333f0287c8bd0915182c7d5308e5f05ef0cca78/taipan/functional/__init__.py#L48-L86 | train |
intuition-io/insights | insights/plugins/hipchat.py | Bot._api_call | def _api_call(self, path, data={}, http_method=requests.get):
''' Process an http call against the hipchat api '''
log.info('performing api request', path=path)
response = http_method('/'.join([self.api_url, path]),
params={'auth_token': self.api_key},
data=data)
log.debug('{} remaining calls'.format(
response.headers['x-ratelimit-remaining']))
return response.json() | python | def _api_call(self, path, data={}, http_method=requests.get):
''' Process an http call against the hipchat api '''
log.info('performing api request', path=path)
response = http_method('/'.join([self.api_url, path]),
params={'auth_token': self.api_key},
data=data)
log.debug('{} remaining calls'.format(
response.headers['x-ratelimit-remaining']))
return response.json() | [
"def",
"_api_call",
"(",
"self",
",",
"path",
",",
"data",
"=",
"{",
"}",
",",
"http_method",
"=",
"requests",
".",
"get",
")",
":",
"log",
".",
"info",
"(",
"'performing api request'",
",",
"path",
"=",
"path",
")",
"response",
"=",
"http_method",
"(",
"'/'",
".",
"join",
"(",
"[",
"self",
".",
"api_url",
",",
"path",
"]",
")",
",",
"params",
"=",
"{",
"'auth_token'",
":",
"self",
".",
"api_key",
"}",
",",
"data",
"=",
"data",
")",
"log",
".",
"debug",
"(",
"'{} remaining calls'",
".",
"format",
"(",
"response",
".",
"headers",
"[",
"'x-ratelimit-remaining'",
"]",
")",
")",
"return",
"response",
".",
"json",
"(",
")"
]
| Process an http call against the hipchat api | [
"Process",
"an",
"http",
"call",
"against",
"the",
"hipchat",
"api"
]
| a4eae53a1886164db96751d2b0964aa2acb7c2d7 | https://github.com/intuition-io/insights/blob/a4eae53a1886164db96751d2b0964aa2acb7c2d7/insights/plugins/hipchat.py#L43-L51 | train |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.