repo
stringlengths 7
55
| path
stringlengths 4
127
| func_name
stringlengths 1
88
| original_string
stringlengths 75
19.8k
| language
stringclasses 1
value | code
stringlengths 75
19.8k
| code_tokens
list | docstring
stringlengths 3
17.3k
| docstring_tokens
list | sha
stringlengths 40
40
| url
stringlengths 87
242
| partition
stringclasses 1
value |
---|---|---|---|---|---|---|---|---|---|---|---|
brunato/lograptor | lograptor/cache.py | LookupCache.map_value | def map_value(self, value, gid):
"""
Return the value for a group id, applying requested mapping.
Map only groups related to a filter, ie when the basename of
the group is identical to the name of a filter.
"""
base_gid = self.base_gid_pattern.search(gid).group(1)
if self.anonymyze:
try:
if value in self._maps[base_gid]:
return self._maps[base_gid][value]
else:
k = (len(self._maps[base_gid]) + 1) % self.mapmax
new_item = u'{0}_{1:0{2}d}'.format(base_gid.upper(), k, self.mapexp)
self._maps[base_gid][value] = new_item
return new_item
except KeyError:
return value
elif base_gid in ['client', 'mail', 'from', 'rcpt', 'user'] and self.ip_lookup:
ip_match = self.ip_pattern.search(value)
if ip_match is None:
return value
host = self.gethost(ip_match.group(1))
if host == ip_match.group(1) or value.startswith(host):
return value
return u''.join([
value[:ip_match.start(1)],
self.gethost(ip_match.group(1)),
value[ip_match.end(1):]])
elif (base_gid == 'user' or base_gid == 'uid') and self.uid_lookup:
return self.getuname(value)
else:
return value | python | def map_value(self, value, gid):
"""
Return the value for a group id, applying requested mapping.
Map only groups related to a filter, ie when the basename of
the group is identical to the name of a filter.
"""
base_gid = self.base_gid_pattern.search(gid).group(1)
if self.anonymyze:
try:
if value in self._maps[base_gid]:
return self._maps[base_gid][value]
else:
k = (len(self._maps[base_gid]) + 1) % self.mapmax
new_item = u'{0}_{1:0{2}d}'.format(base_gid.upper(), k, self.mapexp)
self._maps[base_gid][value] = new_item
return new_item
except KeyError:
return value
elif base_gid in ['client', 'mail', 'from', 'rcpt', 'user'] and self.ip_lookup:
ip_match = self.ip_pattern.search(value)
if ip_match is None:
return value
host = self.gethost(ip_match.group(1))
if host == ip_match.group(1) or value.startswith(host):
return value
return u''.join([
value[:ip_match.start(1)],
self.gethost(ip_match.group(1)),
value[ip_match.end(1):]])
elif (base_gid == 'user' or base_gid == 'uid') and self.uid_lookup:
return self.getuname(value)
else:
return value | [
"def",
"map_value",
"(",
"self",
",",
"value",
",",
"gid",
")",
":",
"base_gid",
"=",
"self",
".",
"base_gid_pattern",
".",
"search",
"(",
"gid",
")",
".",
"group",
"(",
"1",
")",
"if",
"self",
".",
"anonymyze",
":",
"try",
":",
"if",
"value",
"in",
"self",
".",
"_maps",
"[",
"base_gid",
"]",
":",
"return",
"self",
".",
"_maps",
"[",
"base_gid",
"]",
"[",
"value",
"]",
"else",
":",
"k",
"=",
"(",
"len",
"(",
"self",
".",
"_maps",
"[",
"base_gid",
"]",
")",
"+",
"1",
")",
"%",
"self",
".",
"mapmax",
"new_item",
"=",
"u'{0}_{1:0{2}d}'",
".",
"format",
"(",
"base_gid",
".",
"upper",
"(",
")",
",",
"k",
",",
"self",
".",
"mapexp",
")",
"self",
".",
"_maps",
"[",
"base_gid",
"]",
"[",
"value",
"]",
"=",
"new_item",
"return",
"new_item",
"except",
"KeyError",
":",
"return",
"value",
"elif",
"base_gid",
"in",
"[",
"'client'",
",",
"'mail'",
",",
"'from'",
",",
"'rcpt'",
",",
"'user'",
"]",
"and",
"self",
".",
"ip_lookup",
":",
"ip_match",
"=",
"self",
".",
"ip_pattern",
".",
"search",
"(",
"value",
")",
"if",
"ip_match",
"is",
"None",
":",
"return",
"value",
"host",
"=",
"self",
".",
"gethost",
"(",
"ip_match",
".",
"group",
"(",
"1",
")",
")",
"if",
"host",
"==",
"ip_match",
".",
"group",
"(",
"1",
")",
"or",
"value",
".",
"startswith",
"(",
"host",
")",
":",
"return",
"value",
"return",
"u''",
".",
"join",
"(",
"[",
"value",
"[",
":",
"ip_match",
".",
"start",
"(",
"1",
")",
"]",
",",
"self",
".",
"gethost",
"(",
"ip_match",
".",
"group",
"(",
"1",
")",
")",
",",
"value",
"[",
"ip_match",
".",
"end",
"(",
"1",
")",
":",
"]",
"]",
")",
"elif",
"(",
"base_gid",
"==",
"'user'",
"or",
"base_gid",
"==",
"'uid'",
")",
"and",
"self",
".",
"uid_lookup",
":",
"return",
"self",
".",
"getuname",
"(",
"value",
")",
"else",
":",
"return",
"value"
]
| Return the value for a group id, applying requested mapping.
Map only groups related to a filter, ie when the basename of
the group is identical to the name of a filter. | [
"Return",
"the",
"value",
"for",
"a",
"group",
"id",
"applying",
"requested",
"mapping",
".",
"Map",
"only",
"groups",
"related",
"to",
"a",
"filter",
"ie",
"when",
"the",
"basename",
"of",
"the",
"group",
"is",
"identical",
"to",
"the",
"name",
"of",
"a",
"filter",
"."
]
| b1f09fe1b429ed15110610092704ef12d253f3c9 | https://github.com/brunato/lograptor/blob/b1f09fe1b429ed15110610092704ef12d253f3c9/lograptor/cache.py#L69-L101 | train |
brunato/lograptor | lograptor/cache.py | LookupCache.match_to_dict | def match_to_dict(self, match, gids):
"""
Map values from match into a dictionary.
"""
values = {}
for gid in gids:
try:
values[gid] = self.map_value(match.group(gid), gid)
except IndexError:
pass
return values | python | def match_to_dict(self, match, gids):
"""
Map values from match into a dictionary.
"""
values = {}
for gid in gids:
try:
values[gid] = self.map_value(match.group(gid), gid)
except IndexError:
pass
return values | [
"def",
"match_to_dict",
"(",
"self",
",",
"match",
",",
"gids",
")",
":",
"values",
"=",
"{",
"}",
"for",
"gid",
"in",
"gids",
":",
"try",
":",
"values",
"[",
"gid",
"]",
"=",
"self",
".",
"map_value",
"(",
"match",
".",
"group",
"(",
"gid",
")",
",",
"gid",
")",
"except",
"IndexError",
":",
"pass",
"return",
"values"
]
| Map values from match into a dictionary. | [
"Map",
"values",
"from",
"match",
"into",
"a",
"dictionary",
"."
]
| b1f09fe1b429ed15110610092704ef12d253f3c9 | https://github.com/brunato/lograptor/blob/b1f09fe1b429ed15110610092704ef12d253f3c9/lograptor/cache.py#L103-L113 | train |
brunato/lograptor | lograptor/cache.py | LookupCache.match_to_string | def match_to_string(self, match, gids, values=None):
"""
Return the mapped string from match object. If a dictionary of
values is provided then use it to build the string.
"""
s = match.string
parts = []
k = 0
for gid in sorted(gids, key=lambda x: gids[x]):
if values is None:
try:
value = self.map_value(match.group(gid), gid)
parts.append(s[k:match.start(gid)])
parts.append(value)
k = match.end(gid)
except IndexError:
continue
elif gid in values:
parts.append(s[k:match.start(gid)])
parts.append(values[gid])
k = match.end(gid)
parts.append(s[k:])
return u"".join(parts) | python | def match_to_string(self, match, gids, values=None):
"""
Return the mapped string from match object. If a dictionary of
values is provided then use it to build the string.
"""
s = match.string
parts = []
k = 0
for gid in sorted(gids, key=lambda x: gids[x]):
if values is None:
try:
value = self.map_value(match.group(gid), gid)
parts.append(s[k:match.start(gid)])
parts.append(value)
k = match.end(gid)
except IndexError:
continue
elif gid in values:
parts.append(s[k:match.start(gid)])
parts.append(values[gid])
k = match.end(gid)
parts.append(s[k:])
return u"".join(parts) | [
"def",
"match_to_string",
"(",
"self",
",",
"match",
",",
"gids",
",",
"values",
"=",
"None",
")",
":",
"s",
"=",
"match",
".",
"string",
"parts",
"=",
"[",
"]",
"k",
"=",
"0",
"for",
"gid",
"in",
"sorted",
"(",
"gids",
",",
"key",
"=",
"lambda",
"x",
":",
"gids",
"[",
"x",
"]",
")",
":",
"if",
"values",
"is",
"None",
":",
"try",
":",
"value",
"=",
"self",
".",
"map_value",
"(",
"match",
".",
"group",
"(",
"gid",
")",
",",
"gid",
")",
"parts",
".",
"append",
"(",
"s",
"[",
"k",
":",
"match",
".",
"start",
"(",
"gid",
")",
"]",
")",
"parts",
".",
"append",
"(",
"value",
")",
"k",
"=",
"match",
".",
"end",
"(",
"gid",
")",
"except",
"IndexError",
":",
"continue",
"elif",
"gid",
"in",
"values",
":",
"parts",
".",
"append",
"(",
"s",
"[",
"k",
":",
"match",
".",
"start",
"(",
"gid",
")",
"]",
")",
"parts",
".",
"append",
"(",
"values",
"[",
"gid",
"]",
")",
"k",
"=",
"match",
".",
"end",
"(",
"gid",
")",
"parts",
".",
"append",
"(",
"s",
"[",
"k",
":",
"]",
")",
"return",
"u\"\"",
".",
"join",
"(",
"parts",
")"
]
| Return the mapped string from match object. If a dictionary of
values is provided then use it to build the string. | [
"Return",
"the",
"mapped",
"string",
"from",
"match",
"object",
".",
"If",
"a",
"dictionary",
"of",
"values",
"is",
"provided",
"then",
"use",
"it",
"to",
"build",
"the",
"string",
"."
]
| b1f09fe1b429ed15110610092704ef12d253f3c9 | https://github.com/brunato/lograptor/blob/b1f09fe1b429ed15110610092704ef12d253f3c9/lograptor/cache.py#L115-L137 | train |
brunato/lograptor | lograptor/cache.py | LookupCache.gethost | def gethost(self, ip_addr):
"""
Do reverse lookup on an ip address
"""
# Handle silly fake ipv6 addresses
try:
if ip_addr[:7] == '::ffff:':
ip_addr = ip_addr[7:]
except TypeError:
pass
if ip_addr[0] in string.letters:
return ip_addr
try:
return self.hostsmap[ip_addr]
except KeyError:
pass
try:
name = socket.gethostbyaddr(ip_addr)[0]
except socket.error:
name = ip_addr
self.hostsmap[ip_addr] = name
return name | python | def gethost(self, ip_addr):
"""
Do reverse lookup on an ip address
"""
# Handle silly fake ipv6 addresses
try:
if ip_addr[:7] == '::ffff:':
ip_addr = ip_addr[7:]
except TypeError:
pass
if ip_addr[0] in string.letters:
return ip_addr
try:
return self.hostsmap[ip_addr]
except KeyError:
pass
try:
name = socket.gethostbyaddr(ip_addr)[0]
except socket.error:
name = ip_addr
self.hostsmap[ip_addr] = name
return name | [
"def",
"gethost",
"(",
"self",
",",
"ip_addr",
")",
":",
"# Handle silly fake ipv6 addresses",
"try",
":",
"if",
"ip_addr",
"[",
":",
"7",
"]",
"==",
"'::ffff:'",
":",
"ip_addr",
"=",
"ip_addr",
"[",
"7",
":",
"]",
"except",
"TypeError",
":",
"pass",
"if",
"ip_addr",
"[",
"0",
"]",
"in",
"string",
".",
"letters",
":",
"return",
"ip_addr",
"try",
":",
"return",
"self",
".",
"hostsmap",
"[",
"ip_addr",
"]",
"except",
"KeyError",
":",
"pass",
"try",
":",
"name",
"=",
"socket",
".",
"gethostbyaddr",
"(",
"ip_addr",
")",
"[",
"0",
"]",
"except",
"socket",
".",
"error",
":",
"name",
"=",
"ip_addr",
"self",
".",
"hostsmap",
"[",
"ip_addr",
"]",
"=",
"name",
"return",
"name"
]
| Do reverse lookup on an ip address | [
"Do",
"reverse",
"lookup",
"on",
"an",
"ip",
"address"
]
| b1f09fe1b429ed15110610092704ef12d253f3c9 | https://github.com/brunato/lograptor/blob/b1f09fe1b429ed15110610092704ef12d253f3c9/lograptor/cache.py#L139-L164 | train |
brunato/lograptor | lograptor/cache.py | LookupCache.getuname | def getuname(self, uid):
"""
Get the username of a given uid.
"""
uid = int(uid)
try:
return self.uidsmap[uid]
except KeyError:
pass
try:
name = pwd.getpwuid(uid)[0]
except (KeyError, AttributeError):
name = "uid=%d" % uid
self.uidsmap[uid] = name
return name | python | def getuname(self, uid):
"""
Get the username of a given uid.
"""
uid = int(uid)
try:
return self.uidsmap[uid]
except KeyError:
pass
try:
name = pwd.getpwuid(uid)[0]
except (KeyError, AttributeError):
name = "uid=%d" % uid
self.uidsmap[uid] = name
return name | [
"def",
"getuname",
"(",
"self",
",",
"uid",
")",
":",
"uid",
"=",
"int",
"(",
"uid",
")",
"try",
":",
"return",
"self",
".",
"uidsmap",
"[",
"uid",
"]",
"except",
"KeyError",
":",
"pass",
"try",
":",
"name",
"=",
"pwd",
".",
"getpwuid",
"(",
"uid",
")",
"[",
"0",
"]",
"except",
"(",
"KeyError",
",",
"AttributeError",
")",
":",
"name",
"=",
"\"uid=%d\"",
"%",
"uid",
"self",
".",
"uidsmap",
"[",
"uid",
"]",
"=",
"name",
"return",
"name"
]
| Get the username of a given uid. | [
"Get",
"the",
"username",
"of",
"a",
"given",
"uid",
"."
]
| b1f09fe1b429ed15110610092704ef12d253f3c9 | https://github.com/brunato/lograptor/blob/b1f09fe1b429ed15110610092704ef12d253f3c9/lograptor/cache.py#L166-L182 | train |
mardix/Mocha | mocha/core.py | redirect | def redirect(endpoint, **kw):
"""
Redirect allow to redirect dynamically using the classes methods without
knowing the right endpoint.
Expecting all endpoint have GET as method, it will try to pick the first
match, based on the endpoint provided or the based on the Rule map_url
An endpoint can also be passed along with **kw
An http: or https: can also be passed, and will redirect to that site.
example:
redirect(self.hello_world)
redirect(self.other_page, name="x", value="v")
redirect("https://google.com")
redirect(views.ContactPage.index)
:param endpoint:
:return: redirect url
"""
_endpoint = None
if isinstance(endpoint, six.string_types):
_endpoint = endpoint
# valid for https:// or /path/
# Endpoint should not have slashes. Use : (colon) to build endpoint
if "/" in endpoint:
return f_redirect(endpoint)
else:
for r in Mocha._app.url_map.iter_rules():
_endpoint = endpoint
if 'GET' in r.methods and endpoint in r.endpoint:
_endpoint = r.endpoint
break
else:
# self, will refer the caller method, by getting the method name
if isinstance(endpoint, Mocha):
fn = sys._getframe().f_back.f_code.co_name
endpoint = getattr(endpoint, fn)
if is_method(endpoint):
_endpoint = _get_action_endpoint(endpoint)
if not _endpoint:
_endpoint = _build_endpoint_route_name(endpoint)
if _endpoint:
return f_redirect(url_for(_endpoint, **kw))
else:
raise exceptions.MochaError("Invalid endpoint") | python | def redirect(endpoint, **kw):
"""
Redirect allow to redirect dynamically using the classes methods without
knowing the right endpoint.
Expecting all endpoint have GET as method, it will try to pick the first
match, based on the endpoint provided or the based on the Rule map_url
An endpoint can also be passed along with **kw
An http: or https: can also be passed, and will redirect to that site.
example:
redirect(self.hello_world)
redirect(self.other_page, name="x", value="v")
redirect("https://google.com")
redirect(views.ContactPage.index)
:param endpoint:
:return: redirect url
"""
_endpoint = None
if isinstance(endpoint, six.string_types):
_endpoint = endpoint
# valid for https:// or /path/
# Endpoint should not have slashes. Use : (colon) to build endpoint
if "/" in endpoint:
return f_redirect(endpoint)
else:
for r in Mocha._app.url_map.iter_rules():
_endpoint = endpoint
if 'GET' in r.methods and endpoint in r.endpoint:
_endpoint = r.endpoint
break
else:
# self, will refer the caller method, by getting the method name
if isinstance(endpoint, Mocha):
fn = sys._getframe().f_back.f_code.co_name
endpoint = getattr(endpoint, fn)
if is_method(endpoint):
_endpoint = _get_action_endpoint(endpoint)
if not _endpoint:
_endpoint = _build_endpoint_route_name(endpoint)
if _endpoint:
return f_redirect(url_for(_endpoint, **kw))
else:
raise exceptions.MochaError("Invalid endpoint") | [
"def",
"redirect",
"(",
"endpoint",
",",
"*",
"*",
"kw",
")",
":",
"_endpoint",
"=",
"None",
"if",
"isinstance",
"(",
"endpoint",
",",
"six",
".",
"string_types",
")",
":",
"_endpoint",
"=",
"endpoint",
"# valid for https:// or /path/",
"# Endpoint should not have slashes. Use : (colon) to build endpoint",
"if",
"\"/\"",
"in",
"endpoint",
":",
"return",
"f_redirect",
"(",
"endpoint",
")",
"else",
":",
"for",
"r",
"in",
"Mocha",
".",
"_app",
".",
"url_map",
".",
"iter_rules",
"(",
")",
":",
"_endpoint",
"=",
"endpoint",
"if",
"'GET'",
"in",
"r",
".",
"methods",
"and",
"endpoint",
"in",
"r",
".",
"endpoint",
":",
"_endpoint",
"=",
"r",
".",
"endpoint",
"break",
"else",
":",
"# self, will refer the caller method, by getting the method name",
"if",
"isinstance",
"(",
"endpoint",
",",
"Mocha",
")",
":",
"fn",
"=",
"sys",
".",
"_getframe",
"(",
")",
".",
"f_back",
".",
"f_code",
".",
"co_name",
"endpoint",
"=",
"getattr",
"(",
"endpoint",
",",
"fn",
")",
"if",
"is_method",
"(",
"endpoint",
")",
":",
"_endpoint",
"=",
"_get_action_endpoint",
"(",
"endpoint",
")",
"if",
"not",
"_endpoint",
":",
"_endpoint",
"=",
"_build_endpoint_route_name",
"(",
"endpoint",
")",
"if",
"_endpoint",
":",
"return",
"f_redirect",
"(",
"url_for",
"(",
"_endpoint",
",",
"*",
"*",
"kw",
")",
")",
"else",
":",
"raise",
"exceptions",
".",
"MochaError",
"(",
"\"Invalid endpoint\"",
")"
]
| Redirect allow to redirect dynamically using the classes methods without
knowing the right endpoint.
Expecting all endpoint have GET as method, it will try to pick the first
match, based on the endpoint provided or the based on the Rule map_url
An endpoint can also be passed along with **kw
An http: or https: can also be passed, and will redirect to that site.
example:
redirect(self.hello_world)
redirect(self.other_page, name="x", value="v")
redirect("https://google.com")
redirect(views.ContactPage.index)
:param endpoint:
:return: redirect url | [
"Redirect",
"allow",
"to",
"redirect",
"dynamically",
"using",
"the",
"classes",
"methods",
"without",
"knowing",
"the",
"right",
"endpoint",
".",
"Expecting",
"all",
"endpoint",
"have",
"GET",
"as",
"method",
"it",
"will",
"try",
"to",
"pick",
"the",
"first",
"match",
"based",
"on",
"the",
"endpoint",
"provided",
"or",
"the",
"based",
"on",
"the",
"Rule",
"map_url"
]
| bce481cb31a0972061dd99bc548701411dcb9de3 | https://github.com/mardix/Mocha/blob/bce481cb31a0972061dd99bc548701411dcb9de3/mocha/core.py#L395-L442 | train |
mardix/Mocha | mocha/core.py | get_true_argspec | def get_true_argspec(method):
"""Drills through layers of decorators attempting to locate the actual argspec for the method."""
argspec = inspect.getargspec(method)
args = argspec[0]
if args and args[0] == 'self':
return argspec
if hasattr(method, '__func__'):
method = method.__func__
if not hasattr(method, '__closure__') or method.__closure__ is None:
raise DecoratorCompatibilityError
closure = method.__closure__
for cell in closure:
inner_method = cell.cell_contents
if inner_method is method:
continue
if not inspect.isfunction(inner_method) \
and not inspect.ismethod(inner_method):
continue
true_argspec = get_true_argspec(inner_method)
if true_argspec:
return true_argspec | python | def get_true_argspec(method):
"""Drills through layers of decorators attempting to locate the actual argspec for the method."""
argspec = inspect.getargspec(method)
args = argspec[0]
if args and args[0] == 'self':
return argspec
if hasattr(method, '__func__'):
method = method.__func__
if not hasattr(method, '__closure__') or method.__closure__ is None:
raise DecoratorCompatibilityError
closure = method.__closure__
for cell in closure:
inner_method = cell.cell_contents
if inner_method is method:
continue
if not inspect.isfunction(inner_method) \
and not inspect.ismethod(inner_method):
continue
true_argspec = get_true_argspec(inner_method)
if true_argspec:
return true_argspec | [
"def",
"get_true_argspec",
"(",
"method",
")",
":",
"argspec",
"=",
"inspect",
".",
"getargspec",
"(",
"method",
")",
"args",
"=",
"argspec",
"[",
"0",
"]",
"if",
"args",
"and",
"args",
"[",
"0",
"]",
"==",
"'self'",
":",
"return",
"argspec",
"if",
"hasattr",
"(",
"method",
",",
"'__func__'",
")",
":",
"method",
"=",
"method",
".",
"__func__",
"if",
"not",
"hasattr",
"(",
"method",
",",
"'__closure__'",
")",
"or",
"method",
".",
"__closure__",
"is",
"None",
":",
"raise",
"DecoratorCompatibilityError",
"closure",
"=",
"method",
".",
"__closure__",
"for",
"cell",
"in",
"closure",
":",
"inner_method",
"=",
"cell",
".",
"cell_contents",
"if",
"inner_method",
"is",
"method",
":",
"continue",
"if",
"not",
"inspect",
".",
"isfunction",
"(",
"inner_method",
")",
"and",
"not",
"inspect",
".",
"ismethod",
"(",
"inner_method",
")",
":",
"continue",
"true_argspec",
"=",
"get_true_argspec",
"(",
"inner_method",
")",
"if",
"true_argspec",
":",
"return",
"true_argspec"
]
| Drills through layers of decorators attempting to locate the actual argspec for the method. | [
"Drills",
"through",
"layers",
"of",
"decorators",
"attempting",
"to",
"locate",
"the",
"actual",
"argspec",
"for",
"the",
"method",
"."
]
| bce481cb31a0972061dd99bc548701411dcb9de3 | https://github.com/mardix/Mocha/blob/bce481cb31a0972061dd99bc548701411dcb9de3/mocha/core.py#L1215-L1237 | train |
mardix/Mocha | mocha/core.py | Mocha.setup_installed_apps | def setup_installed_apps(cls):
"""
To import 3rd party applications along with associated properties
It is a list of dict or string.
When a dict, it contains the `app` key and the configuration,
if it's a string, it is just the app name
If you require dependencies from other packages, dependencies
must be placed before the calling package.
It is required that __init__ in the package app has an entry point method
-> 'main(**kw)' which will be used to setup the default app.
As a dict
INSTALLED_APPS = [
"it.can.be.a.string.to.the.module",
("in.a.tuple.with.props.dict", {options}),
[
("multi.app.list.in.a.list.of.tuple", {options}),
("multi.app.list.in.a.list.of.tuple2", {options})
]
]
:return:
"""
cls._installed_apps = cls._app.config.get("INSTALLED_APPS", [])
if cls._installed_apps:
def import_app(module, props={}):
_ = werkzeug.import_string(module)
setattr(_, "__options__", utils.dict_dot(props))
for k in cls._installed_apps:
if isinstance(k, six.string_types): # One string
import_app(k, {})
elif isinstance(k, tuple):
import_app(k[0], k[1])
elif isinstance(k, list): # list of tuple[(module props), ...]
for t in k:
import_app(t[0], t[1]) | python | def setup_installed_apps(cls):
"""
To import 3rd party applications along with associated properties
It is a list of dict or string.
When a dict, it contains the `app` key and the configuration,
if it's a string, it is just the app name
If you require dependencies from other packages, dependencies
must be placed before the calling package.
It is required that __init__ in the package app has an entry point method
-> 'main(**kw)' which will be used to setup the default app.
As a dict
INSTALLED_APPS = [
"it.can.be.a.string.to.the.module",
("in.a.tuple.with.props.dict", {options}),
[
("multi.app.list.in.a.list.of.tuple", {options}),
("multi.app.list.in.a.list.of.tuple2", {options})
]
]
:return:
"""
cls._installed_apps = cls._app.config.get("INSTALLED_APPS", [])
if cls._installed_apps:
def import_app(module, props={}):
_ = werkzeug.import_string(module)
setattr(_, "__options__", utils.dict_dot(props))
for k in cls._installed_apps:
if isinstance(k, six.string_types): # One string
import_app(k, {})
elif isinstance(k, tuple):
import_app(k[0], k[1])
elif isinstance(k, list): # list of tuple[(module props), ...]
for t in k:
import_app(t[0], t[1]) | [
"def",
"setup_installed_apps",
"(",
"cls",
")",
":",
"cls",
".",
"_installed_apps",
"=",
"cls",
".",
"_app",
".",
"config",
".",
"get",
"(",
"\"INSTALLED_APPS\"",
",",
"[",
"]",
")",
"if",
"cls",
".",
"_installed_apps",
":",
"def",
"import_app",
"(",
"module",
",",
"props",
"=",
"{",
"}",
")",
":",
"_",
"=",
"werkzeug",
".",
"import_string",
"(",
"module",
")",
"setattr",
"(",
"_",
",",
"\"__options__\"",
",",
"utils",
".",
"dict_dot",
"(",
"props",
")",
")",
"for",
"k",
"in",
"cls",
".",
"_installed_apps",
":",
"if",
"isinstance",
"(",
"k",
",",
"six",
".",
"string_types",
")",
":",
"# One string",
"import_app",
"(",
"k",
",",
"{",
"}",
")",
"elif",
"isinstance",
"(",
"k",
",",
"tuple",
")",
":",
"import_app",
"(",
"k",
"[",
"0",
"]",
",",
"k",
"[",
"1",
"]",
")",
"elif",
"isinstance",
"(",
"k",
",",
"list",
")",
":",
"# list of tuple[(module props), ...]",
"for",
"t",
"in",
"k",
":",
"import_app",
"(",
"t",
"[",
"0",
"]",
",",
"t",
"[",
"1",
"]",
")"
]
| To import 3rd party applications along with associated properties
It is a list of dict or string.
When a dict, it contains the `app` key and the configuration,
if it's a string, it is just the app name
If you require dependencies from other packages, dependencies
must be placed before the calling package.
It is required that __init__ in the package app has an entry point method
-> 'main(**kw)' which will be used to setup the default app.
As a dict
INSTALLED_APPS = [
"it.can.be.a.string.to.the.module",
("in.a.tuple.with.props.dict", {options}),
[
("multi.app.list.in.a.list.of.tuple", {options}),
("multi.app.list.in.a.list.of.tuple2", {options})
]
]
:return: | [
"To",
"import",
"3rd",
"party",
"applications",
"along",
"with",
"associated",
"properties"
]
| bce481cb31a0972061dd99bc548701411dcb9de3 | https://github.com/mardix/Mocha/blob/bce481cb31a0972061dd99bc548701411dcb9de3/mocha/core.py#L746-L787 | train |
mardix/Mocha | mocha/core.py | Mocha._add_asset_bundle | def _add_asset_bundle(cls, path):
"""
Add a webassets bundle yml file
"""
f = "%s/assets.yml" % path
if os.path.isfile(f):
cls._asset_bundles.add(f) | python | def _add_asset_bundle(cls, path):
"""
Add a webassets bundle yml file
"""
f = "%s/assets.yml" % path
if os.path.isfile(f):
cls._asset_bundles.add(f) | [
"def",
"_add_asset_bundle",
"(",
"cls",
",",
"path",
")",
":",
"f",
"=",
"\"%s/assets.yml\"",
"%",
"path",
"if",
"os",
".",
"path",
".",
"isfile",
"(",
"f",
")",
":",
"cls",
".",
"_asset_bundles",
".",
"add",
"(",
"f",
")"
]
| Add a webassets bundle yml file | [
"Add",
"a",
"webassets",
"bundle",
"yml",
"file"
]
| bce481cb31a0972061dd99bc548701411dcb9de3 | https://github.com/mardix/Mocha/blob/bce481cb31a0972061dd99bc548701411dcb9de3/mocha/core.py#L825-L831 | train |
mardix/Mocha | mocha/core.py | Mocha._setup_db | def _setup_db(cls):
"""
Setup the DB connection if DB_URL is set
"""
uri = cls._app.config.get("DB_URL")
if uri:
db.connect__(uri, cls._app) | python | def _setup_db(cls):
"""
Setup the DB connection if DB_URL is set
"""
uri = cls._app.config.get("DB_URL")
if uri:
db.connect__(uri, cls._app) | [
"def",
"_setup_db",
"(",
"cls",
")",
":",
"uri",
"=",
"cls",
".",
"_app",
".",
"config",
".",
"get",
"(",
"\"DB_URL\"",
")",
"if",
"uri",
":",
"db",
".",
"connect__",
"(",
"uri",
",",
"cls",
".",
"_app",
")"
]
| Setup the DB connection if DB_URL is set | [
"Setup",
"the",
"DB",
"connection",
"if",
"DB_URL",
"is",
"set"
]
| bce481cb31a0972061dd99bc548701411dcb9de3 | https://github.com/mardix/Mocha/blob/bce481cb31a0972061dd99bc548701411dcb9de3/mocha/core.py#L858-L864 | train |
mardix/Mocha | mocha/core.py | Mocha.parse_options | def parse_options(cls, options):
"""Extracts subdomain and endpoint values from the options dict and returns
them along with a new dict without those values.
"""
options = options.copy()
subdomain = options.pop('subdomain', None)
endpoint = options.pop('endpoint', None)
return subdomain, endpoint, options, | python | def parse_options(cls, options):
"""Extracts subdomain and endpoint values from the options dict and returns
them along with a new dict without those values.
"""
options = options.copy()
subdomain = options.pop('subdomain', None)
endpoint = options.pop('endpoint', None)
return subdomain, endpoint, options, | [
"def",
"parse_options",
"(",
"cls",
",",
"options",
")",
":",
"options",
"=",
"options",
".",
"copy",
"(",
")",
"subdomain",
"=",
"options",
".",
"pop",
"(",
"'subdomain'",
",",
"None",
")",
"endpoint",
"=",
"options",
".",
"pop",
"(",
"'endpoint'",
",",
"None",
")",
"return",
"subdomain",
",",
"endpoint",
",",
"options",
","
]
| Extracts subdomain and endpoint values from the options dict and returns
them along with a new dict without those values. | [
"Extracts",
"subdomain",
"and",
"endpoint",
"values",
"from",
"the",
"options",
"dict",
"and",
"returns",
"them",
"along",
"with",
"a",
"new",
"dict",
"without",
"those",
"values",
"."
]
| bce481cb31a0972061dd99bc548701411dcb9de3 | https://github.com/mardix/Mocha/blob/bce481cb31a0972061dd99bc548701411dcb9de3/mocha/core.py#L1002-L1009 | train |
mardix/Mocha | mocha/core.py | Mocha.get_base_route | def get_base_route(cls):
"""Returns the route base to use for the current class."""
base_route = cls.__name__.lower()
if cls.base_route is not None:
base_route = cls.base_route
base_rule = parse_rule(base_route)
cls.base_args = [r[2] for r in base_rule]
return base_route.strip("/") | python | def get_base_route(cls):
"""Returns the route base to use for the current class."""
base_route = cls.__name__.lower()
if cls.base_route is not None:
base_route = cls.base_route
base_rule = parse_rule(base_route)
cls.base_args = [r[2] for r in base_rule]
return base_route.strip("/") | [
"def",
"get_base_route",
"(",
"cls",
")",
":",
"base_route",
"=",
"cls",
".",
"__name__",
".",
"lower",
"(",
")",
"if",
"cls",
".",
"base_route",
"is",
"not",
"None",
":",
"base_route",
"=",
"cls",
".",
"base_route",
"base_rule",
"=",
"parse_rule",
"(",
"base_route",
")",
"cls",
".",
"base_args",
"=",
"[",
"r",
"[",
"2",
"]",
"for",
"r",
"in",
"base_rule",
"]",
"return",
"base_route",
".",
"strip",
"(",
"\"/\"",
")"
]
| Returns the route base to use for the current class. | [
"Returns",
"the",
"route",
"base",
"to",
"use",
"for",
"the",
"current",
"class",
"."
]
| bce481cb31a0972061dd99bc548701411dcb9de3 | https://github.com/mardix/Mocha/blob/bce481cb31a0972061dd99bc548701411dcb9de3/mocha/core.py#L1119-L1126 | train |
clement-alexandre/TotemBionet | totembionet/src/discrete_model/influence_graph.py | InfluenceGraph.find_gene_by_name | def find_gene_by_name(self, gene_name: str) -> Gene:
"""
Find and return a gene in the influence graph with the given name.
Raise an AttributeError if there is no gene in the graph with the given name.
"""
for gene in self.genes:
if gene.name == gene_name:
return gene
raise AttributeError(f'gene "{gene_name}" does not exist') | python | def find_gene_by_name(self, gene_name: str) -> Gene:
"""
Find and return a gene in the influence graph with the given name.
Raise an AttributeError if there is no gene in the graph with the given name.
"""
for gene in self.genes:
if gene.name == gene_name:
return gene
raise AttributeError(f'gene "{gene_name}" does not exist') | [
"def",
"find_gene_by_name",
"(",
"self",
",",
"gene_name",
":",
"str",
")",
"->",
"Gene",
":",
"for",
"gene",
"in",
"self",
".",
"genes",
":",
"if",
"gene",
".",
"name",
"==",
"gene_name",
":",
"return",
"gene",
"raise",
"AttributeError",
"(",
"f'gene \"{gene_name}\" does not exist'",
")"
]
| Find and return a gene in the influence graph with the given name.
Raise an AttributeError if there is no gene in the graph with the given name. | [
"Find",
"and",
"return",
"a",
"gene",
"in",
"the",
"influence",
"graph",
"with",
"the",
"given",
"name",
".",
"Raise",
"an",
"AttributeError",
"if",
"there",
"is",
"no",
"gene",
"in",
"the",
"graph",
"with",
"the",
"given",
"name",
"."
]
| f37a2f9358c1ce49f21c4a868b904da5dcd4614f | https://github.com/clement-alexandre/TotemBionet/blob/f37a2f9358c1ce49f21c4a868b904da5dcd4614f/totembionet/src/discrete_model/influence_graph.py#L24-L32 | train |
clement-alexandre/TotemBionet | totembionet/src/discrete_model/influence_graph.py | InfluenceGraph.find_multiplex_by_name | def find_multiplex_by_name(self, multiplex_name: str) -> Multiplex:
"""
Find and return a multiplex in the influence graph with the given name.
Raise an AttributeError if there is no multiplex in the graph with the given name.
"""
for multiplex in self.multiplexes:
if multiplex.name == multiplex_name:
return multiplex
raise AttributeError(f'multiplex "{multiplex_name}" does not exist') | python | def find_multiplex_by_name(self, multiplex_name: str) -> Multiplex:
"""
Find and return a multiplex in the influence graph with the given name.
Raise an AttributeError if there is no multiplex in the graph with the given name.
"""
for multiplex in self.multiplexes:
if multiplex.name == multiplex_name:
return multiplex
raise AttributeError(f'multiplex "{multiplex_name}" does not exist') | [
"def",
"find_multiplex_by_name",
"(",
"self",
",",
"multiplex_name",
":",
"str",
")",
"->",
"Multiplex",
":",
"for",
"multiplex",
"in",
"self",
".",
"multiplexes",
":",
"if",
"multiplex",
".",
"name",
"==",
"multiplex_name",
":",
"return",
"multiplex",
"raise",
"AttributeError",
"(",
"f'multiplex \"{multiplex_name}\" does not exist'",
")"
]
| Find and return a multiplex in the influence graph with the given name.
Raise an AttributeError if there is no multiplex in the graph with the given name. | [
"Find",
"and",
"return",
"a",
"multiplex",
"in",
"the",
"influence",
"graph",
"with",
"the",
"given",
"name",
".",
"Raise",
"an",
"AttributeError",
"if",
"there",
"is",
"no",
"multiplex",
"in",
"the",
"graph",
"with",
"the",
"given",
"name",
"."
]
| f37a2f9358c1ce49f21c4a868b904da5dcd4614f | https://github.com/clement-alexandre/TotemBionet/blob/f37a2f9358c1ce49f21c4a868b904da5dcd4614f/totembionet/src/discrete_model/influence_graph.py#L38-L46 | train |
clement-alexandre/TotemBionet | totembionet/src/discrete_model/influence_graph.py | InfluenceGraph.all_states | def all_states(self) -> Tuple[State, ...]:
""" Return all the possible states of this influence graph. """
return tuple(self._transform_list_of_states_to_state(states)
for states in self._cartesian_product_of_every_states_of_each_genes()) | python | def all_states(self) -> Tuple[State, ...]:
""" Return all the possible states of this influence graph. """
return tuple(self._transform_list_of_states_to_state(states)
for states in self._cartesian_product_of_every_states_of_each_genes()) | [
"def",
"all_states",
"(",
"self",
")",
"->",
"Tuple",
"[",
"State",
",",
"...",
"]",
":",
"return",
"tuple",
"(",
"self",
".",
"_transform_list_of_states_to_state",
"(",
"states",
")",
"for",
"states",
"in",
"self",
".",
"_cartesian_product_of_every_states_of_each_genes",
"(",
")",
")"
]
| Return all the possible states of this influence graph. | [
"Return",
"all",
"the",
"possible",
"states",
"of",
"this",
"influence",
"graph",
"."
]
| f37a2f9358c1ce49f21c4a868b904da5dcd4614f | https://github.com/clement-alexandre/TotemBionet/blob/f37a2f9358c1ce49f21c4a868b904da5dcd4614f/totembionet/src/discrete_model/influence_graph.py#L48-L51 | train |
clement-alexandre/TotemBionet | totembionet/src/discrete_model/influence_graph.py | InfluenceGraph._cartesian_product_of_every_states_of_each_genes | def _cartesian_product_of_every_states_of_each_genes(self) -> Tuple[Tuple[int, ...]]:
"""
Private method which return the cartesian product of the states
of the genes in the model. It represents all the possible state for a given model.
Examples
--------
The model contains 2 genes: operon = {0, 1, 2}
mucuB = {0, 1}
Then this method returns ((0, 0), (0, 1), (1, 0), (1, 1), (2, 0), (2, 1))
For reach tuple, the first element is the state of the operon gene, and the
second element stands for the state of the mucuB gene.
"""
if not self.genes:
return ()
return tuple(product(*[gene.states for gene in self.genes])) | python | def _cartesian_product_of_every_states_of_each_genes(self) -> Tuple[Tuple[int, ...]]:
"""
Private method which return the cartesian product of the states
of the genes in the model. It represents all the possible state for a given model.
Examples
--------
The model contains 2 genes: operon = {0, 1, 2}
mucuB = {0, 1}
Then this method returns ((0, 0), (0, 1), (1, 0), (1, 1), (2, 0), (2, 1))
For reach tuple, the first element is the state of the operon gene, and the
second element stands for the state of the mucuB gene.
"""
if not self.genes:
return ()
return tuple(product(*[gene.states for gene in self.genes])) | [
"def",
"_cartesian_product_of_every_states_of_each_genes",
"(",
"self",
")",
"->",
"Tuple",
"[",
"Tuple",
"[",
"int",
",",
"...",
"]",
"]",
":",
"if",
"not",
"self",
".",
"genes",
":",
"return",
"(",
")",
"return",
"tuple",
"(",
"product",
"(",
"*",
"[",
"gene",
".",
"states",
"for",
"gene",
"in",
"self",
".",
"genes",
"]",
")",
")"
]
| Private method which return the cartesian product of the states
of the genes in the model. It represents all the possible state for a given model.
Examples
--------
The model contains 2 genes: operon = {0, 1, 2}
mucuB = {0, 1}
Then this method returns ((0, 0), (0, 1), (1, 0), (1, 1), (2, 0), (2, 1))
For reach tuple, the first element is the state of the operon gene, and the
second element stands for the state of the mucuB gene. | [
"Private",
"method",
"which",
"return",
"the",
"cartesian",
"product",
"of",
"the",
"states",
"of",
"the",
"genes",
"in",
"the",
"model",
".",
"It",
"represents",
"all",
"the",
"possible",
"state",
"for",
"a",
"given",
"model",
"."
]
| f37a2f9358c1ce49f21c4a868b904da5dcd4614f | https://github.com/clement-alexandre/TotemBionet/blob/f37a2f9358c1ce49f21c4a868b904da5dcd4614f/totembionet/src/discrete_model/influence_graph.py#L53-L69 | train |
clement-alexandre/TotemBionet | totembionet/src/discrete_model/influence_graph.py | InfluenceGraph._transform_list_of_states_to_state | def _transform_list_of_states_to_state(self, state: List[int]) -> State:
"""
Private method which transform a list which contains the state of the gene
in the models to a State object.
Examples
--------
The model contains 2 genes: operon = {0, 1, 2}
mucuB = {0, 1}
>>> graph._transform_list_of_states_to_dict_of_states([0, 1])
{operon: 0, mucuB: 1}
>>> graph._transform_list_of_states_to_dict_of_states([2, 0])
{operon: 2, mucuB: 0}
"""
return State({gene: state[i] for i, gene in enumerate(self.genes)}) | python | def _transform_list_of_states_to_state(self, state: List[int]) -> State:
"""
Private method which transform a list which contains the state of the gene
in the models to a State object.
Examples
--------
The model contains 2 genes: operon = {0, 1, 2}
mucuB = {0, 1}
>>> graph._transform_list_of_states_to_dict_of_states([0, 1])
{operon: 0, mucuB: 1}
>>> graph._transform_list_of_states_to_dict_of_states([2, 0])
{operon: 2, mucuB: 0}
"""
return State({gene: state[i] for i, gene in enumerate(self.genes)}) | [
"def",
"_transform_list_of_states_to_state",
"(",
"self",
",",
"state",
":",
"List",
"[",
"int",
"]",
")",
"->",
"State",
":",
"return",
"State",
"(",
"{",
"gene",
":",
"state",
"[",
"i",
"]",
"for",
"i",
",",
"gene",
"in",
"enumerate",
"(",
"self",
".",
"genes",
")",
"}",
")"
]
| Private method which transform a list which contains the state of the gene
in the models to a State object.
Examples
--------
The model contains 2 genes: operon = {0, 1, 2}
mucuB = {0, 1}
>>> graph._transform_list_of_states_to_dict_of_states([0, 1])
{operon: 0, mucuB: 1}
>>> graph._transform_list_of_states_to_dict_of_states([2, 0])
{operon: 2, mucuB: 0} | [
"Private",
"method",
"which",
"transform",
"a",
"list",
"which",
"contains",
"the",
"state",
"of",
"the",
"gene",
"in",
"the",
"models",
"to",
"a",
"State",
"object",
"."
]
| f37a2f9358c1ce49f21c4a868b904da5dcd4614f | https://github.com/clement-alexandre/TotemBionet/blob/f37a2f9358c1ce49f21c4a868b904da5dcd4614f/totembionet/src/discrete_model/influence_graph.py#L71-L86 | train |
Kortemme-Lab/klab | klab/box_backup.py | read_sha1 | def read_sha1(
file_path,
buf_size = None,
start_byte = 0,
read_size = None,
extra_hashers = [], # update(data) will be called on all of these
):
'''
Determines the sha1 hash of a file in chunks, to prevent loading the entire file at once into memory
'''
read_size = read_size or os.stat(file_path).st_size
buf_size = buf_size or DEFAULT_BUFFER_SIZE
data_read = 0
total_sha1 = hashlib.sha1()
while data_read < read_size:
with open( file_path, 'rb', buffering = 0 ) as f:
f.seek( start_byte )
data = f.read( min(buf_size, read_size - data_read) )
assert( len(data) > 0 )
total_sha1.update( data )
for hasher in extra_hashers:
hasher.update( data )
data_read += len(data)
start_byte += len(data)
assert( data_read == read_size )
return total_sha1 | python | def read_sha1(
file_path,
buf_size = None,
start_byte = 0,
read_size = None,
extra_hashers = [], # update(data) will be called on all of these
):
'''
Determines the sha1 hash of a file in chunks, to prevent loading the entire file at once into memory
'''
read_size = read_size or os.stat(file_path).st_size
buf_size = buf_size or DEFAULT_BUFFER_SIZE
data_read = 0
total_sha1 = hashlib.sha1()
while data_read < read_size:
with open( file_path, 'rb', buffering = 0 ) as f:
f.seek( start_byte )
data = f.read( min(buf_size, read_size - data_read) )
assert( len(data) > 0 )
total_sha1.update( data )
for hasher in extra_hashers:
hasher.update( data )
data_read += len(data)
start_byte += len(data)
assert( data_read == read_size )
return total_sha1 | [
"def",
"read_sha1",
"(",
"file_path",
",",
"buf_size",
"=",
"None",
",",
"start_byte",
"=",
"0",
",",
"read_size",
"=",
"None",
",",
"extra_hashers",
"=",
"[",
"]",
",",
"# update(data) will be called on all of these",
")",
":",
"read_size",
"=",
"read_size",
"or",
"os",
".",
"stat",
"(",
"file_path",
")",
".",
"st_size",
"buf_size",
"=",
"buf_size",
"or",
"DEFAULT_BUFFER_SIZE",
"data_read",
"=",
"0",
"total_sha1",
"=",
"hashlib",
".",
"sha1",
"(",
")",
"while",
"data_read",
"<",
"read_size",
":",
"with",
"open",
"(",
"file_path",
",",
"'rb'",
",",
"buffering",
"=",
"0",
")",
"as",
"f",
":",
"f",
".",
"seek",
"(",
"start_byte",
")",
"data",
"=",
"f",
".",
"read",
"(",
"min",
"(",
"buf_size",
",",
"read_size",
"-",
"data_read",
")",
")",
"assert",
"(",
"len",
"(",
"data",
")",
">",
"0",
")",
"total_sha1",
".",
"update",
"(",
"data",
")",
"for",
"hasher",
"in",
"extra_hashers",
":",
"hasher",
".",
"update",
"(",
"data",
")",
"data_read",
"+=",
"len",
"(",
"data",
")",
"start_byte",
"+=",
"len",
"(",
"data",
")",
"assert",
"(",
"data_read",
"==",
"read_size",
")",
"return",
"total_sha1"
]
| Determines the sha1 hash of a file in chunks, to prevent loading the entire file at once into memory | [
"Determines",
"the",
"sha1",
"hash",
"of",
"a",
"file",
"in",
"chunks",
"to",
"prevent",
"loading",
"the",
"entire",
"file",
"at",
"once",
"into",
"memory"
]
| 6d410ad08f1bd9f7cbbb28d7d946e94fbaaa2b6b | https://github.com/Kortemme-Lab/klab/blob/6d410ad08f1bd9f7cbbb28d7d946e94fbaaa2b6b/klab/box_backup.py#L665-L692 | train |
Kortemme-Lab/klab | klab/box_backup.py | BoxAPI.verify_uploaded_file | def verify_uploaded_file(
self,
destination_folder_id,
source_path,
verbose = True,
):
'''
Verifies the integrity of a file uploaded to Box
'''
source_file_size = os.stat(source_path).st_size
total_part_size = 0
file_position = 0
uploaded_box_file_ids = self.find_file( destination_folder_id, os.path.basename( source_path ) )
total_sha1 = hashlib.sha1()
for i, file_id in enumerate(uploaded_box_file_ids):
file_info = self.client.file( file_id = file_id ).get()
uploaded_sha1 = file_info.response_object['sha1']
uploaded_size = file_info.response_object['size']
part_sha1 = read_sha1( source_path, start_byte = file_position, read_size = uploaded_size, extra_hashers = [total_sha1] )
if part_sha1.hexdigest() != uploaded_sha1:
print( '\n' )
print( 'Part sha1: ' + part_sha1.hexdigest() )
print( 'Uploaded sha1: ' + uploaded_sha1 )
print('Sha1 hash of uploaded file {0} ({1}) does not match'.format(file_info.response_object['name'], file_id) )
return False
file_position += uploaded_size
total_part_size += uploaded_size
if len(uploaded_box_file_ids) > 1:
print( 'Finished verifying part {0} of {1} of {2}'.format( i+1, len(uploaded_box_file_ids), file_id ) )
assert( source_file_size == total_part_size )
if verbose:
print( 'Verified uploaded file {0} ({1}) with sha1: {2}'.format(source_path, file_id, total_sha1.hexdigest()) )
return True | python | def verify_uploaded_file(
self,
destination_folder_id,
source_path,
verbose = True,
):
'''
Verifies the integrity of a file uploaded to Box
'''
source_file_size = os.stat(source_path).st_size
total_part_size = 0
file_position = 0
uploaded_box_file_ids = self.find_file( destination_folder_id, os.path.basename( source_path ) )
total_sha1 = hashlib.sha1()
for i, file_id in enumerate(uploaded_box_file_ids):
file_info = self.client.file( file_id = file_id ).get()
uploaded_sha1 = file_info.response_object['sha1']
uploaded_size = file_info.response_object['size']
part_sha1 = read_sha1( source_path, start_byte = file_position, read_size = uploaded_size, extra_hashers = [total_sha1] )
if part_sha1.hexdigest() != uploaded_sha1:
print( '\n' )
print( 'Part sha1: ' + part_sha1.hexdigest() )
print( 'Uploaded sha1: ' + uploaded_sha1 )
print('Sha1 hash of uploaded file {0} ({1}) does not match'.format(file_info.response_object['name'], file_id) )
return False
file_position += uploaded_size
total_part_size += uploaded_size
if len(uploaded_box_file_ids) > 1:
print( 'Finished verifying part {0} of {1} of {2}'.format( i+1, len(uploaded_box_file_ids), file_id ) )
assert( source_file_size == total_part_size )
if verbose:
print( 'Verified uploaded file {0} ({1}) with sha1: {2}'.format(source_path, file_id, total_sha1.hexdigest()) )
return True | [
"def",
"verify_uploaded_file",
"(",
"self",
",",
"destination_folder_id",
",",
"source_path",
",",
"verbose",
"=",
"True",
",",
")",
":",
"source_file_size",
"=",
"os",
".",
"stat",
"(",
"source_path",
")",
".",
"st_size",
"total_part_size",
"=",
"0",
"file_position",
"=",
"0",
"uploaded_box_file_ids",
"=",
"self",
".",
"find_file",
"(",
"destination_folder_id",
",",
"os",
".",
"path",
".",
"basename",
"(",
"source_path",
")",
")",
"total_sha1",
"=",
"hashlib",
".",
"sha1",
"(",
")",
"for",
"i",
",",
"file_id",
"in",
"enumerate",
"(",
"uploaded_box_file_ids",
")",
":",
"file_info",
"=",
"self",
".",
"client",
".",
"file",
"(",
"file_id",
"=",
"file_id",
")",
".",
"get",
"(",
")",
"uploaded_sha1",
"=",
"file_info",
".",
"response_object",
"[",
"'sha1'",
"]",
"uploaded_size",
"=",
"file_info",
".",
"response_object",
"[",
"'size'",
"]",
"part_sha1",
"=",
"read_sha1",
"(",
"source_path",
",",
"start_byte",
"=",
"file_position",
",",
"read_size",
"=",
"uploaded_size",
",",
"extra_hashers",
"=",
"[",
"total_sha1",
"]",
")",
"if",
"part_sha1",
".",
"hexdigest",
"(",
")",
"!=",
"uploaded_sha1",
":",
"print",
"(",
"'\\n'",
")",
"print",
"(",
"'Part sha1: '",
"+",
"part_sha1",
".",
"hexdigest",
"(",
")",
")",
"print",
"(",
"'Uploaded sha1: '",
"+",
"uploaded_sha1",
")",
"print",
"(",
"'Sha1 hash of uploaded file {0} ({1}) does not match'",
".",
"format",
"(",
"file_info",
".",
"response_object",
"[",
"'name'",
"]",
",",
"file_id",
")",
")",
"return",
"False",
"file_position",
"+=",
"uploaded_size",
"total_part_size",
"+=",
"uploaded_size",
"if",
"len",
"(",
"uploaded_box_file_ids",
")",
">",
"1",
":",
"print",
"(",
"'Finished verifying part {0} of {1} of {2}'",
".",
"format",
"(",
"i",
"+",
"1",
",",
"len",
"(",
"uploaded_box_file_ids",
")",
",",
"file_id",
")",
")",
"assert",
"(",
"source_file_size",
"==",
"total_part_size",
")",
"if",
"verbose",
":",
"print",
"(",
"'Verified uploaded file {0} ({1}) with sha1: {2}'",
".",
"format",
"(",
"source_path",
",",
"file_id",
",",
"total_sha1",
".",
"hexdigest",
"(",
")",
")",
")",
"return",
"True"
]
| Verifies the integrity of a file uploaded to Box | [
"Verifies",
"the",
"integrity",
"of",
"a",
"file",
"uploaded",
"to",
"Box"
]
| 6d410ad08f1bd9f7cbbb28d7d946e94fbaaa2b6b | https://github.com/Kortemme-Lab/klab/blob/6d410ad08f1bd9f7cbbb28d7d946e94fbaaa2b6b/klab/box_backup.py#L242-L280 | train |
uogbuji/versa | tools/py/reader/md.py | handle_resourcelist | def handle_resourcelist(ltext, **kwargs):
'''
A helper that converts lists of resources from a textual format such as Markdown, including absolutizing relative IRIs
'''
base=kwargs.get('base', VERSA_BASEIRI)
model=kwargs.get('model')
iris = ltext.strip().split()
newlist = model.generate_resource()
for i in iris:
model.add(newlist, VERSA_BASEIRI + 'item', I(iri.absolutize(i, base)))
return newlist | python | def handle_resourcelist(ltext, **kwargs):
'''
A helper that converts lists of resources from a textual format such as Markdown, including absolutizing relative IRIs
'''
base=kwargs.get('base', VERSA_BASEIRI)
model=kwargs.get('model')
iris = ltext.strip().split()
newlist = model.generate_resource()
for i in iris:
model.add(newlist, VERSA_BASEIRI + 'item', I(iri.absolutize(i, base)))
return newlist | [
"def",
"handle_resourcelist",
"(",
"ltext",
",",
"*",
"*",
"kwargs",
")",
":",
"base",
"=",
"kwargs",
".",
"get",
"(",
"'base'",
",",
"VERSA_BASEIRI",
")",
"model",
"=",
"kwargs",
".",
"get",
"(",
"'model'",
")",
"iris",
"=",
"ltext",
".",
"strip",
"(",
")",
".",
"split",
"(",
")",
"newlist",
"=",
"model",
".",
"generate_resource",
"(",
")",
"for",
"i",
"in",
"iris",
":",
"model",
".",
"add",
"(",
"newlist",
",",
"VERSA_BASEIRI",
"+",
"'item'",
",",
"I",
"(",
"iri",
".",
"absolutize",
"(",
"i",
",",
"base",
")",
")",
")",
"return",
"newlist"
]
| A helper that converts lists of resources from a textual format such as Markdown, including absolutizing relative IRIs | [
"A",
"helper",
"that",
"converts",
"lists",
"of",
"resources",
"from",
"a",
"textual",
"format",
"such",
"as",
"Markdown",
"including",
"absolutizing",
"relative",
"IRIs"
]
| f092ffc7ed363a5b170890955168500f32de0dd5 | https://github.com/uogbuji/versa/blob/f092ffc7ed363a5b170890955168500f32de0dd5/tools/py/reader/md.py#L63-L73 | train |
uogbuji/versa | tools/py/reader/md.py | handle_resourceset | def handle_resourceset(ltext, **kwargs):
'''
A helper that converts sets of resources from a textual format such as Markdown, including absolutizing relative IRIs
'''
fullprop=kwargs.get('fullprop')
rid=kwargs.get('rid')
base=kwargs.get('base', VERSA_BASEIRI)
model=kwargs.get('model')
iris = ltext.strip().split()
for i in iris:
model.add(rid, fullprop, I(iri.absolutize(i, base)))
return None | python | def handle_resourceset(ltext, **kwargs):
'''
A helper that converts sets of resources from a textual format such as Markdown, including absolutizing relative IRIs
'''
fullprop=kwargs.get('fullprop')
rid=kwargs.get('rid')
base=kwargs.get('base', VERSA_BASEIRI)
model=kwargs.get('model')
iris = ltext.strip().split()
for i in iris:
model.add(rid, fullprop, I(iri.absolutize(i, base)))
return None | [
"def",
"handle_resourceset",
"(",
"ltext",
",",
"*",
"*",
"kwargs",
")",
":",
"fullprop",
"=",
"kwargs",
".",
"get",
"(",
"'fullprop'",
")",
"rid",
"=",
"kwargs",
".",
"get",
"(",
"'rid'",
")",
"base",
"=",
"kwargs",
".",
"get",
"(",
"'base'",
",",
"VERSA_BASEIRI",
")",
"model",
"=",
"kwargs",
".",
"get",
"(",
"'model'",
")",
"iris",
"=",
"ltext",
".",
"strip",
"(",
")",
".",
"split",
"(",
")",
"for",
"i",
"in",
"iris",
":",
"model",
".",
"add",
"(",
"rid",
",",
"fullprop",
",",
"I",
"(",
"iri",
".",
"absolutize",
"(",
"i",
",",
"base",
")",
")",
")",
"return",
"None"
]
| A helper that converts sets of resources from a textual format such as Markdown, including absolutizing relative IRIs | [
"A",
"helper",
"that",
"converts",
"sets",
"of",
"resources",
"from",
"a",
"textual",
"format",
"such",
"as",
"Markdown",
"including",
"absolutizing",
"relative",
"IRIs"
]
| f092ffc7ed363a5b170890955168500f32de0dd5 | https://github.com/uogbuji/versa/blob/f092ffc7ed363a5b170890955168500f32de0dd5/tools/py/reader/md.py#L76-L87 | train |
Alveo/pyalveo | pyalveo/cache.py | Cache.create_cache_database | def create_cache_database(self):
""" Create a new SQLite3 database for use with Cache objects
:raises: IOError if there is a problem creating the database file
"""
conn = sqlite3.connect(self.database)
conn.text_factory = str
c = conn.cursor()
c.execute("""CREATE TABLE items
(url text, metadata text, datetime text)""")
c.execute("""CREATE TABLE documents
(url text, path text, datetime text)""")
c.execute("""CREATE TABLE primary_texts
(item_url text, primary_text text, datetime text)""")
conn.commit()
conn.close() | python | def create_cache_database(self):
""" Create a new SQLite3 database for use with Cache objects
:raises: IOError if there is a problem creating the database file
"""
conn = sqlite3.connect(self.database)
conn.text_factory = str
c = conn.cursor()
c.execute("""CREATE TABLE items
(url text, metadata text, datetime text)""")
c.execute("""CREATE TABLE documents
(url text, path text, datetime text)""")
c.execute("""CREATE TABLE primary_texts
(item_url text, primary_text text, datetime text)""")
conn.commit()
conn.close() | [
"def",
"create_cache_database",
"(",
"self",
")",
":",
"conn",
"=",
"sqlite3",
".",
"connect",
"(",
"self",
".",
"database",
")",
"conn",
".",
"text_factory",
"=",
"str",
"c",
"=",
"conn",
".",
"cursor",
"(",
")",
"c",
".",
"execute",
"(",
"\"\"\"CREATE TABLE items\n (url text, metadata text, datetime text)\"\"\"",
")",
"c",
".",
"execute",
"(",
"\"\"\"CREATE TABLE documents\n (url text, path text, datetime text)\"\"\"",
")",
"c",
".",
"execute",
"(",
"\"\"\"CREATE TABLE primary_texts\n (item_url text, primary_text text, datetime text)\"\"\"",
")",
"conn",
".",
"commit",
"(",
")",
"conn",
".",
"close",
"(",
")"
]
| Create a new SQLite3 database for use with Cache objects
:raises: IOError if there is a problem creating the database file | [
"Create",
"a",
"new",
"SQLite3",
"database",
"for",
"use",
"with",
"Cache",
"objects"
]
| 1e9eec22bc031bc9a08066f9966565a546e6242e | https://github.com/Alveo/pyalveo/blob/1e9eec22bc031bc9a08066f9966565a546e6242e/pyalveo/cache.py#L77-L96 | train |
Alveo/pyalveo | pyalveo/cache.py | Cache.__exists_row_not_too_old | def __exists_row_not_too_old(self, row):
""" Check if the given row exists and is not too old """
if row is None:
return False
record_time = dateutil.parser.parse(row[2])
now = datetime.datetime.now(dateutil.tz.gettz())
age = (record_time - now).total_seconds()
if age > self.max_age:
return False
return True | python | def __exists_row_not_too_old(self, row):
""" Check if the given row exists and is not too old """
if row is None:
return False
record_time = dateutil.parser.parse(row[2])
now = datetime.datetime.now(dateutil.tz.gettz())
age = (record_time - now).total_seconds()
if age > self.max_age:
return False
return True | [
"def",
"__exists_row_not_too_old",
"(",
"self",
",",
"row",
")",
":",
"if",
"row",
"is",
"None",
":",
"return",
"False",
"record_time",
"=",
"dateutil",
".",
"parser",
".",
"parse",
"(",
"row",
"[",
"2",
"]",
")",
"now",
"=",
"datetime",
".",
"datetime",
".",
"now",
"(",
"dateutil",
".",
"tz",
".",
"gettz",
"(",
")",
")",
"age",
"=",
"(",
"record_time",
"-",
"now",
")",
".",
"total_seconds",
"(",
")",
"if",
"age",
">",
"self",
".",
"max_age",
":",
"return",
"False",
"return",
"True"
]
| Check if the given row exists and is not too old | [
"Check",
"if",
"the",
"given",
"row",
"exists",
"and",
"is",
"not",
"too",
"old"
]
| 1e9eec22bc031bc9a08066f9966565a546e6242e | https://github.com/Alveo/pyalveo/blob/1e9eec22bc031bc9a08066f9966565a546e6242e/pyalveo/cache.py#L135-L145 | train |
Alveo/pyalveo | pyalveo/cache.py | Cache.has_item | def has_item(self, item_url):
""" Check if the metadata for the given item is present in
the cache
If the max_age attribute of this Cache is set to a nonzero value,
entries older than the value of max_age in seconds will be ignored
:type item_url: String or Item
:param item_url: the URL of the item, or an Item object
:rtype: Boolean
:returns: True if the item is present, False otherwise
"""
c = self.conn.cursor()
c.execute("SELECT * FROM items WHERE url=?", (str(item_url),))
row = c.fetchone()
c.close()
return self.__exists_row_not_too_old(row) | python | def has_item(self, item_url):
""" Check if the metadata for the given item is present in
the cache
If the max_age attribute of this Cache is set to a nonzero value,
entries older than the value of max_age in seconds will be ignored
:type item_url: String or Item
:param item_url: the URL of the item, or an Item object
:rtype: Boolean
:returns: True if the item is present, False otherwise
"""
c = self.conn.cursor()
c.execute("SELECT * FROM items WHERE url=?", (str(item_url),))
row = c.fetchone()
c.close()
return self.__exists_row_not_too_old(row) | [
"def",
"has_item",
"(",
"self",
",",
"item_url",
")",
":",
"c",
"=",
"self",
".",
"conn",
".",
"cursor",
"(",
")",
"c",
".",
"execute",
"(",
"\"SELECT * FROM items WHERE url=?\"",
",",
"(",
"str",
"(",
"item_url",
")",
",",
")",
")",
"row",
"=",
"c",
".",
"fetchone",
"(",
")",
"c",
".",
"close",
"(",
")",
"return",
"self",
".",
"__exists_row_not_too_old",
"(",
"row",
")"
]
| Check if the metadata for the given item is present in
the cache
If the max_age attribute of this Cache is set to a nonzero value,
entries older than the value of max_age in seconds will be ignored
:type item_url: String or Item
:param item_url: the URL of the item, or an Item object
:rtype: Boolean
:returns: True if the item is present, False otherwise | [
"Check",
"if",
"the",
"metadata",
"for",
"the",
"given",
"item",
"is",
"present",
"in",
"the",
"cache"
]
| 1e9eec22bc031bc9a08066f9966565a546e6242e | https://github.com/Alveo/pyalveo/blob/1e9eec22bc031bc9a08066f9966565a546e6242e/pyalveo/cache.py#L153-L172 | train |
Alveo/pyalveo | pyalveo/cache.py | Cache.has_document | def has_document(self, doc_url):
""" Check if the content of the given document is present
in the cache
If the max_age attribute of this Cache is set to a nonzero value,
entries older than the value of max_age in seconds will be ignored
:type doc_url: String or Document
:param doc_url: the URL of the document, or a Document object
:rtype: Boolean
:returns: True if the data is present, False otherwise
"""
c = self.conn.cursor()
c.execute("SELECT * FROM documents WHERE url=?", (str(doc_url),))
row = c.fetchone()
c.close()
return self.__exists_row_not_too_old(row) | python | def has_document(self, doc_url):
""" Check if the content of the given document is present
in the cache
If the max_age attribute of this Cache is set to a nonzero value,
entries older than the value of max_age in seconds will be ignored
:type doc_url: String or Document
:param doc_url: the URL of the document, or a Document object
:rtype: Boolean
:returns: True if the data is present, False otherwise
"""
c = self.conn.cursor()
c.execute("SELECT * FROM documents WHERE url=?", (str(doc_url),))
row = c.fetchone()
c.close()
return self.__exists_row_not_too_old(row) | [
"def",
"has_document",
"(",
"self",
",",
"doc_url",
")",
":",
"c",
"=",
"self",
".",
"conn",
".",
"cursor",
"(",
")",
"c",
".",
"execute",
"(",
"\"SELECT * FROM documents WHERE url=?\"",
",",
"(",
"str",
"(",
"doc_url",
")",
",",
")",
")",
"row",
"=",
"c",
".",
"fetchone",
"(",
")",
"c",
".",
"close",
"(",
")",
"return",
"self",
".",
"__exists_row_not_too_old",
"(",
"row",
")"
]
| Check if the content of the given document is present
in the cache
If the max_age attribute of this Cache is set to a nonzero value,
entries older than the value of max_age in seconds will be ignored
:type doc_url: String or Document
:param doc_url: the URL of the document, or a Document object
:rtype: Boolean
:returns: True if the data is present, False otherwise | [
"Check",
"if",
"the",
"content",
"of",
"the",
"given",
"document",
"is",
"present",
"in",
"the",
"cache"
]
| 1e9eec22bc031bc9a08066f9966565a546e6242e | https://github.com/Alveo/pyalveo/blob/1e9eec22bc031bc9a08066f9966565a546e6242e/pyalveo/cache.py#L175-L194 | train |
Alveo/pyalveo | pyalveo/cache.py | Cache.get_document | def get_document(self, doc_url):
""" Retrieve the content for the given document from the cache.
:type doc_url: String or Document
:param doc_url: the URL of the document, or a Document object
:rtype: String
:returns: the document data
:raises: ValueError if the item is not in the cache
"""
c = self.conn.cursor()
c.execute("SELECT * FROM documents WHERE url=?", (str(doc_url),))
row = c.fetchone()
c.close()
if row is None:
raise ValueError("Item not present in cache")
file_path = row[1]
try:
with open(file_path, 'rb') as f:
return f.read()
except IOError as e:
raise IOError("Error reading file " + file_path +
" to retrieve document " + doc_url +
": " + e.message) | python | def get_document(self, doc_url):
""" Retrieve the content for the given document from the cache.
:type doc_url: String or Document
:param doc_url: the URL of the document, or a Document object
:rtype: String
:returns: the document data
:raises: ValueError if the item is not in the cache
"""
c = self.conn.cursor()
c.execute("SELECT * FROM documents WHERE url=?", (str(doc_url),))
row = c.fetchone()
c.close()
if row is None:
raise ValueError("Item not present in cache")
file_path = row[1]
try:
with open(file_path, 'rb') as f:
return f.read()
except IOError as e:
raise IOError("Error reading file " + file_path +
" to retrieve document " + doc_url +
": " + e.message) | [
"def",
"get_document",
"(",
"self",
",",
"doc_url",
")",
":",
"c",
"=",
"self",
".",
"conn",
".",
"cursor",
"(",
")",
"c",
".",
"execute",
"(",
"\"SELECT * FROM documents WHERE url=?\"",
",",
"(",
"str",
"(",
"doc_url",
")",
",",
")",
")",
"row",
"=",
"c",
".",
"fetchone",
"(",
")",
"c",
".",
"close",
"(",
")",
"if",
"row",
"is",
"None",
":",
"raise",
"ValueError",
"(",
"\"Item not present in cache\"",
")",
"file_path",
"=",
"row",
"[",
"1",
"]",
"try",
":",
"with",
"open",
"(",
"file_path",
",",
"'rb'",
")",
"as",
"f",
":",
"return",
"f",
".",
"read",
"(",
")",
"except",
"IOError",
"as",
"e",
":",
"raise",
"IOError",
"(",
"\"Error reading file \"",
"+",
"file_path",
"+",
"\" to retrieve document \"",
"+",
"doc_url",
"+",
"\": \"",
"+",
"e",
".",
"message",
")"
]
| Retrieve the content for the given document from the cache.
:type doc_url: String or Document
:param doc_url: the URL of the document, or a Document object
:rtype: String
:returns: the document data
:raises: ValueError if the item is not in the cache | [
"Retrieve",
"the",
"content",
"for",
"the",
"given",
"document",
"from",
"the",
"cache",
"."
]
| 1e9eec22bc031bc9a08066f9966565a546e6242e | https://github.com/Alveo/pyalveo/blob/1e9eec22bc031bc9a08066f9966565a546e6242e/pyalveo/cache.py#L242-L268 | train |
Alveo/pyalveo | pyalveo/cache.py | Cache.get_primary_text | def get_primary_text(self, item_url):
""" Retrieve the primary text for the given item from the cache.
:type item_url: String or Item
:param item_url: the URL of the item, or an Item object
:rtype: String
:returns: the primary text
:raises: ValueError if the primary text is not in the cache
"""
c = self.conn.cursor()
c.execute("SELECT * FROM primary_texts WHERE item_url=?",
(str(item_url),))
row = c.fetchone()
c.close()
if row is None:
raise ValueError("Item not present in cache")
return row[1] | python | def get_primary_text(self, item_url):
""" Retrieve the primary text for the given item from the cache.
:type item_url: String or Item
:param item_url: the URL of the item, or an Item object
:rtype: String
:returns: the primary text
:raises: ValueError if the primary text is not in the cache
"""
c = self.conn.cursor()
c.execute("SELECT * FROM primary_texts WHERE item_url=?",
(str(item_url),))
row = c.fetchone()
c.close()
if row is None:
raise ValueError("Item not present in cache")
return row[1] | [
"def",
"get_primary_text",
"(",
"self",
",",
"item_url",
")",
":",
"c",
"=",
"self",
".",
"conn",
".",
"cursor",
"(",
")",
"c",
".",
"execute",
"(",
"\"SELECT * FROM primary_texts WHERE item_url=?\"",
",",
"(",
"str",
"(",
"item_url",
")",
",",
")",
")",
"row",
"=",
"c",
".",
"fetchone",
"(",
")",
"c",
".",
"close",
"(",
")",
"if",
"row",
"is",
"None",
":",
"raise",
"ValueError",
"(",
"\"Item not present in cache\"",
")",
"return",
"row",
"[",
"1",
"]"
]
| Retrieve the primary text for the given item from the cache.
:type item_url: String or Item
:param item_url: the URL of the item, or an Item object
:rtype: String
:returns: the primary text
:raises: ValueError if the primary text is not in the cache | [
"Retrieve",
"the",
"primary",
"text",
"for",
"the",
"given",
"item",
"from",
"the",
"cache",
"."
]
| 1e9eec22bc031bc9a08066f9966565a546e6242e | https://github.com/Alveo/pyalveo/blob/1e9eec22bc031bc9a08066f9966565a546e6242e/pyalveo/cache.py#L271-L291 | train |
Alveo/pyalveo | pyalveo/cache.py | Cache.add_item | def add_item(self, item_url, item_metadata):
""" Add the given item to the cache database, updating
the existing metadata if the item is already present
:type item_url: String or Item
:param item_url: the URL of the item, or an Item object
:type item_metadata: String
:param item_metadata: the item's metadata, as a JSON string
"""
c = self.conn.cursor()
c.execute("DELETE FROM items WHERE url=?", (str(item_url),))
self.conn.commit()
c.execute("INSERT INTO items VALUES (?, ?, ?)",
(str(item_url), item_metadata, self.__now_iso_8601()))
self.conn.commit()
c.close() | python | def add_item(self, item_url, item_metadata):
""" Add the given item to the cache database, updating
the existing metadata if the item is already present
:type item_url: String or Item
:param item_url: the URL of the item, or an Item object
:type item_metadata: String
:param item_metadata: the item's metadata, as a JSON string
"""
c = self.conn.cursor()
c.execute("DELETE FROM items WHERE url=?", (str(item_url),))
self.conn.commit()
c.execute("INSERT INTO items VALUES (?, ?, ?)",
(str(item_url), item_metadata, self.__now_iso_8601()))
self.conn.commit()
c.close() | [
"def",
"add_item",
"(",
"self",
",",
"item_url",
",",
"item_metadata",
")",
":",
"c",
"=",
"self",
".",
"conn",
".",
"cursor",
"(",
")",
"c",
".",
"execute",
"(",
"\"DELETE FROM items WHERE url=?\"",
",",
"(",
"str",
"(",
"item_url",
")",
",",
")",
")",
"self",
".",
"conn",
".",
"commit",
"(",
")",
"c",
".",
"execute",
"(",
"\"INSERT INTO items VALUES (?, ?, ?)\"",
",",
"(",
"str",
"(",
"item_url",
")",
",",
"item_metadata",
",",
"self",
".",
"__now_iso_8601",
"(",
")",
")",
")",
"self",
".",
"conn",
".",
"commit",
"(",
")",
"c",
".",
"close",
"(",
")"
]
| Add the given item to the cache database, updating
the existing metadata if the item is already present
:type item_url: String or Item
:param item_url: the URL of the item, or an Item object
:type item_metadata: String
:param item_metadata: the item's metadata, as a JSON string | [
"Add",
"the",
"given",
"item",
"to",
"the",
"cache",
"database",
"updating",
"the",
"existing",
"metadata",
"if",
"the",
"item",
"is",
"already",
"present"
]
| 1e9eec22bc031bc9a08066f9966565a546e6242e | https://github.com/Alveo/pyalveo/blob/1e9eec22bc031bc9a08066f9966565a546e6242e/pyalveo/cache.py#L294-L311 | train |
Alveo/pyalveo | pyalveo/cache.py | Cache.add_document | def add_document(self, doc_url, data):
""" Add the given document to the cache, updating
the existing content data if the document is already present
:type doc_url: String or Document
:param doc_url: the URL of the document, or a Document object
:type data: String
:param data: the document's content data
"""
file_path = self.__generate_filepath()
with open(file_path, 'wb') as f:
f.write(data)
c = self.conn.cursor()
c.execute("SELECT * FROM documents WHERE url=?", (str(doc_url),))
for row in c.fetchall():
old_file_path = row[1]
if os.path.isfile(old_file_path):
os.unlink(old_file_path)
c.execute("DELETE FROM documents WHERE url=?", (str(doc_url),))
self.conn.commit()
c.execute("INSERT INTO documents VALUES (?, ?, ?)",
(str(doc_url), file_path, self.__now_iso_8601()))
self.conn.commit()
c.close() | python | def add_document(self, doc_url, data):
""" Add the given document to the cache, updating
the existing content data if the document is already present
:type doc_url: String or Document
:param doc_url: the URL of the document, or a Document object
:type data: String
:param data: the document's content data
"""
file_path = self.__generate_filepath()
with open(file_path, 'wb') as f:
f.write(data)
c = self.conn.cursor()
c.execute("SELECT * FROM documents WHERE url=?", (str(doc_url),))
for row in c.fetchall():
old_file_path = row[1]
if os.path.isfile(old_file_path):
os.unlink(old_file_path)
c.execute("DELETE FROM documents WHERE url=?", (str(doc_url),))
self.conn.commit()
c.execute("INSERT INTO documents VALUES (?, ?, ?)",
(str(doc_url), file_path, self.__now_iso_8601()))
self.conn.commit()
c.close() | [
"def",
"add_document",
"(",
"self",
",",
"doc_url",
",",
"data",
")",
":",
"file_path",
"=",
"self",
".",
"__generate_filepath",
"(",
")",
"with",
"open",
"(",
"file_path",
",",
"'wb'",
")",
"as",
"f",
":",
"f",
".",
"write",
"(",
"data",
")",
"c",
"=",
"self",
".",
"conn",
".",
"cursor",
"(",
")",
"c",
".",
"execute",
"(",
"\"SELECT * FROM documents WHERE url=?\"",
",",
"(",
"str",
"(",
"doc_url",
")",
",",
")",
")",
"for",
"row",
"in",
"c",
".",
"fetchall",
"(",
")",
":",
"old_file_path",
"=",
"row",
"[",
"1",
"]",
"if",
"os",
".",
"path",
".",
"isfile",
"(",
"old_file_path",
")",
":",
"os",
".",
"unlink",
"(",
"old_file_path",
")",
"c",
".",
"execute",
"(",
"\"DELETE FROM documents WHERE url=?\"",
",",
"(",
"str",
"(",
"doc_url",
")",
",",
")",
")",
"self",
".",
"conn",
".",
"commit",
"(",
")",
"c",
".",
"execute",
"(",
"\"INSERT INTO documents VALUES (?, ?, ?)\"",
",",
"(",
"str",
"(",
"doc_url",
")",
",",
"file_path",
",",
"self",
".",
"__now_iso_8601",
"(",
")",
")",
")",
"self",
".",
"conn",
".",
"commit",
"(",
")",
"c",
".",
"close",
"(",
")"
]
| Add the given document to the cache, updating
the existing content data if the document is already present
:type doc_url: String or Document
:param doc_url: the URL of the document, or a Document object
:type data: String
:param data: the document's content data | [
"Add",
"the",
"given",
"document",
"to",
"the",
"cache",
"updating",
"the",
"existing",
"content",
"data",
"if",
"the",
"document",
"is",
"already",
"present"
]
| 1e9eec22bc031bc9a08066f9966565a546e6242e | https://github.com/Alveo/pyalveo/blob/1e9eec22bc031bc9a08066f9966565a546e6242e/pyalveo/cache.py#L329-L355 | train |
Alveo/pyalveo | pyalveo/cache.py | Cache.add_primary_text | def add_primary_text(self, item_url, primary_text):
""" Add the given primary text to the cache database, updating
the existing record if the primary text is already present
:type item_url: String or Item
:param item_url: the URL of the corresponding item, or an Item object
:type primary_text: String
:param primary_text: the item's primary text
"""
c = self.conn.cursor()
c.execute("DELETE FROM primary_texts WHERE item_url=?",
(str(item_url),))
self.conn.commit()
c.execute("INSERT INTO primary_texts VALUES (?, ?, ?)",
(str(item_url), primary_text, self.__now_iso_8601()))
self.conn.commit()
c.close() | python | def add_primary_text(self, item_url, primary_text):
""" Add the given primary text to the cache database, updating
the existing record if the primary text is already present
:type item_url: String or Item
:param item_url: the URL of the corresponding item, or an Item object
:type primary_text: String
:param primary_text: the item's primary text
"""
c = self.conn.cursor()
c.execute("DELETE FROM primary_texts WHERE item_url=?",
(str(item_url),))
self.conn.commit()
c.execute("INSERT INTO primary_texts VALUES (?, ?, ?)",
(str(item_url), primary_text, self.__now_iso_8601()))
self.conn.commit()
c.close() | [
"def",
"add_primary_text",
"(",
"self",
",",
"item_url",
",",
"primary_text",
")",
":",
"c",
"=",
"self",
".",
"conn",
".",
"cursor",
"(",
")",
"c",
".",
"execute",
"(",
"\"DELETE FROM primary_texts WHERE item_url=?\"",
",",
"(",
"str",
"(",
"item_url",
")",
",",
")",
")",
"self",
".",
"conn",
".",
"commit",
"(",
")",
"c",
".",
"execute",
"(",
"\"INSERT INTO primary_texts VALUES (?, ?, ?)\"",
",",
"(",
"str",
"(",
"item_url",
")",
",",
"primary_text",
",",
"self",
".",
"__now_iso_8601",
"(",
")",
")",
")",
"self",
".",
"conn",
".",
"commit",
"(",
")",
"c",
".",
"close",
"(",
")"
]
| Add the given primary text to the cache database, updating
the existing record if the primary text is already present
:type item_url: String or Item
:param item_url: the URL of the corresponding item, or an Item object
:type primary_text: String
:param primary_text: the item's primary text | [
"Add",
"the",
"given",
"primary",
"text",
"to",
"the",
"cache",
"database",
"updating",
"the",
"existing",
"record",
"if",
"the",
"primary",
"text",
"is",
"already",
"present"
]
| 1e9eec22bc031bc9a08066f9966565a546e6242e | https://github.com/Alveo/pyalveo/blob/1e9eec22bc031bc9a08066f9966565a546e6242e/pyalveo/cache.py#L357-L375 | train |
cgrok/cr-async | examples/crcog.py | ClashCog.profile | async def profile(self, ctx, tag):
'''Example command for use inside a discord bot cog.'''
if not self.check_valid_tag(tag):
return await ctx.send('Invalid tag!')
profile = await self.cr.get_profile(tag)
em = discord.Embed(color=0x00FFFFF)
em.set_author(name=str(profile), icon_url=profile.clan_badge_url)
em.set_thumbnail(url=profile.arena.badge_url)
# Example of adding data. (Bad)
for attr in self.cdir(profile):
value = getattr(profile, attr)
if not callable(value):
em.add_field(
name=attr.replace('_').title(),
value=str(value)
)
await ctx.send(embed=em) | python | async def profile(self, ctx, tag):
'''Example command for use inside a discord bot cog.'''
if not self.check_valid_tag(tag):
return await ctx.send('Invalid tag!')
profile = await self.cr.get_profile(tag)
em = discord.Embed(color=0x00FFFFF)
em.set_author(name=str(profile), icon_url=profile.clan_badge_url)
em.set_thumbnail(url=profile.arena.badge_url)
# Example of adding data. (Bad)
for attr in self.cdir(profile):
value = getattr(profile, attr)
if not callable(value):
em.add_field(
name=attr.replace('_').title(),
value=str(value)
)
await ctx.send(embed=em) | [
"async",
"def",
"profile",
"(",
"self",
",",
"ctx",
",",
"tag",
")",
":",
"if",
"not",
"self",
".",
"check_valid_tag",
"(",
"tag",
")",
":",
"return",
"await",
"ctx",
".",
"send",
"(",
"'Invalid tag!'",
")",
"profile",
"=",
"await",
"self",
".",
"cr",
".",
"get_profile",
"(",
"tag",
")",
"em",
"=",
"discord",
".",
"Embed",
"(",
"color",
"=",
"0x00FFFFF",
")",
"em",
".",
"set_author",
"(",
"name",
"=",
"str",
"(",
"profile",
")",
",",
"icon_url",
"=",
"profile",
".",
"clan_badge_url",
")",
"em",
".",
"set_thumbnail",
"(",
"url",
"=",
"profile",
".",
"arena",
".",
"badge_url",
")",
"# Example of adding data. (Bad)",
"for",
"attr",
"in",
"self",
".",
"cdir",
"(",
"profile",
")",
":",
"value",
"=",
"getattr",
"(",
"profile",
",",
"attr",
")",
"if",
"not",
"callable",
"(",
"value",
")",
":",
"em",
".",
"add_field",
"(",
"name",
"=",
"attr",
".",
"replace",
"(",
"'_'",
")",
".",
"title",
"(",
")",
",",
"value",
"=",
"str",
"(",
"value",
")",
")",
"await",
"ctx",
".",
"send",
"(",
"embed",
"=",
"em",
")"
]
| Example command for use inside a discord bot cog. | [
"Example",
"command",
"for",
"use",
"inside",
"a",
"discord",
"bot",
"cog",
"."
]
| f65a968e54704168706d137d1ba662f55f8ab852 | https://github.com/cgrok/cr-async/blob/f65a968e54704168706d137d1ba662f55f8ab852/examples/crcog.py#L22-L42 | train |
TheGhouls/oct | oct/results/stats_handler.py | StatsHandler.write_remaining | def write_remaining(self):
"""Write the remaning stack content
"""
if not self.results:
return
with db.execution_context():
with db.atomic():
Result.insert_many(self.results).execute()
del self.results[:] | python | def write_remaining(self):
"""Write the remaning stack content
"""
if not self.results:
return
with db.execution_context():
with db.atomic():
Result.insert_many(self.results).execute()
del self.results[:] | [
"def",
"write_remaining",
"(",
"self",
")",
":",
"if",
"not",
"self",
".",
"results",
":",
"return",
"with",
"db",
".",
"execution_context",
"(",
")",
":",
"with",
"db",
".",
"atomic",
"(",
")",
":",
"Result",
".",
"insert_many",
"(",
"self",
".",
"results",
")",
".",
"execute",
"(",
")",
"del",
"self",
".",
"results",
"[",
":",
"]"
]
| Write the remaning stack content | [
"Write",
"the",
"remaning",
"stack",
"content"
]
| 7e9bddeb3b8495a26442b1c86744e9fb187fe88f | https://github.com/TheGhouls/oct/blob/7e9bddeb3b8495a26442b1c86744e9fb187fe88f/oct/results/stats_handler.py#L50-L58 | train |
TheGhouls/oct | oct/utilities/configuration.py | configure | def configure(project_path, config_file=None):
"""Get the configuration of the test and return it as a config object
:return: the configured config object
:rtype: Object
"""
if config_file is None:
config_file = os.path.join(project_path, 'config.json')
try:
with open(config_file, 'r') as f:
config = json.load(f)
except ValueError as e:
raise OctConfigurationError("Configuration setting failed with error: %s" % e)
for key in REQUIRED_CONFIG_KEYS:
if key not in config:
raise OctConfigurationError("Error: the required configuration key %s is not define" % key)
return config | python | def configure(project_path, config_file=None):
"""Get the configuration of the test and return it as a config object
:return: the configured config object
:rtype: Object
"""
if config_file is None:
config_file = os.path.join(project_path, 'config.json')
try:
with open(config_file, 'r') as f:
config = json.load(f)
except ValueError as e:
raise OctConfigurationError("Configuration setting failed with error: %s" % e)
for key in REQUIRED_CONFIG_KEYS:
if key not in config:
raise OctConfigurationError("Error: the required configuration key %s is not define" % key)
return config | [
"def",
"configure",
"(",
"project_path",
",",
"config_file",
"=",
"None",
")",
":",
"if",
"config_file",
"is",
"None",
":",
"config_file",
"=",
"os",
".",
"path",
".",
"join",
"(",
"project_path",
",",
"'config.json'",
")",
"try",
":",
"with",
"open",
"(",
"config_file",
",",
"'r'",
")",
"as",
"f",
":",
"config",
"=",
"json",
".",
"load",
"(",
"f",
")",
"except",
"ValueError",
"as",
"e",
":",
"raise",
"OctConfigurationError",
"(",
"\"Configuration setting failed with error: %s\"",
"%",
"e",
")",
"for",
"key",
"in",
"REQUIRED_CONFIG_KEYS",
":",
"if",
"key",
"not",
"in",
"config",
":",
"raise",
"OctConfigurationError",
"(",
"\"Error: the required configuration key %s is not define\"",
"%",
"key",
")",
"return",
"config"
]
| Get the configuration of the test and return it as a config object
:return: the configured config object
:rtype: Object | [
"Get",
"the",
"configuration",
"of",
"the",
"test",
"and",
"return",
"it",
"as",
"a",
"config",
"object"
]
| 7e9bddeb3b8495a26442b1c86744e9fb187fe88f | https://github.com/TheGhouls/oct/blob/7e9bddeb3b8495a26442b1c86744e9fb187fe88f/oct/utilities/configuration.py#L27-L43 | train |
TheGhouls/oct | oct/utilities/configuration.py | configure_for_turret | def configure_for_turret(project_name, config_file):
"""Load the configuration file in python dict and check for keys that will be set to default value if not present
:param str project_name: the name of the project
:param str config_file: the path of the configuration file
:return: the loaded configuration
:rtype: dict
"""
config = configure(project_name, config_file)
for key in WARNING_CONFIG_KEYS:
if key not in config:
print("WARNING: %s configuration key not present, the value will be set to default value" % key)
common_config = {
'hq_address': config.get('hq_address', '127.0.0.1'),
'hq_publisher': config.get('publish_port', 5000),
'hq_rc': config.get('rc_port', 5001),
'turrets_requirements': config.get('turrets_requirements', [])
}
configs = []
for turret in config['turrets']:
if isinstance(turret, six.string_types):
turret = load_turret_config(project_name, turret)
turret.update(common_config)
turret.update(config.get('extra_turret_config', {}))
configs.append(turret)
return configs | python | def configure_for_turret(project_name, config_file):
"""Load the configuration file in python dict and check for keys that will be set to default value if not present
:param str project_name: the name of the project
:param str config_file: the path of the configuration file
:return: the loaded configuration
:rtype: dict
"""
config = configure(project_name, config_file)
for key in WARNING_CONFIG_KEYS:
if key not in config:
print("WARNING: %s configuration key not present, the value will be set to default value" % key)
common_config = {
'hq_address': config.get('hq_address', '127.0.0.1'),
'hq_publisher': config.get('publish_port', 5000),
'hq_rc': config.get('rc_port', 5001),
'turrets_requirements': config.get('turrets_requirements', [])
}
configs = []
for turret in config['turrets']:
if isinstance(turret, six.string_types):
turret = load_turret_config(project_name, turret)
turret.update(common_config)
turret.update(config.get('extra_turret_config', {}))
configs.append(turret)
return configs | [
"def",
"configure_for_turret",
"(",
"project_name",
",",
"config_file",
")",
":",
"config",
"=",
"configure",
"(",
"project_name",
",",
"config_file",
")",
"for",
"key",
"in",
"WARNING_CONFIG_KEYS",
":",
"if",
"key",
"not",
"in",
"config",
":",
"print",
"(",
"\"WARNING: %s configuration key not present, the value will be set to default value\"",
"%",
"key",
")",
"common_config",
"=",
"{",
"'hq_address'",
":",
"config",
".",
"get",
"(",
"'hq_address'",
",",
"'127.0.0.1'",
")",
",",
"'hq_publisher'",
":",
"config",
".",
"get",
"(",
"'publish_port'",
",",
"5000",
")",
",",
"'hq_rc'",
":",
"config",
".",
"get",
"(",
"'rc_port'",
",",
"5001",
")",
",",
"'turrets_requirements'",
":",
"config",
".",
"get",
"(",
"'turrets_requirements'",
",",
"[",
"]",
")",
"}",
"configs",
"=",
"[",
"]",
"for",
"turret",
"in",
"config",
"[",
"'turrets'",
"]",
":",
"if",
"isinstance",
"(",
"turret",
",",
"six",
".",
"string_types",
")",
":",
"turret",
"=",
"load_turret_config",
"(",
"project_name",
",",
"turret",
")",
"turret",
".",
"update",
"(",
"common_config",
")",
"turret",
".",
"update",
"(",
"config",
".",
"get",
"(",
"'extra_turret_config'",
",",
"{",
"}",
")",
")",
"configs",
".",
"append",
"(",
"turret",
")",
"return",
"configs"
]
| Load the configuration file in python dict and check for keys that will be set to default value if not present
:param str project_name: the name of the project
:param str config_file: the path of the configuration file
:return: the loaded configuration
:rtype: dict | [
"Load",
"the",
"configuration",
"file",
"in",
"python",
"dict",
"and",
"check",
"for",
"keys",
"that",
"will",
"be",
"set",
"to",
"default",
"value",
"if",
"not",
"present"
]
| 7e9bddeb3b8495a26442b1c86744e9fb187fe88f | https://github.com/TheGhouls/oct/blob/7e9bddeb3b8495a26442b1c86744e9fb187fe88f/oct/utilities/configuration.py#L56-L81 | train |
TheGhouls/oct | oct/utilities/configuration.py | get_db_uri | def get_db_uri(config, output_dir):
"""Process results_database parameters in config to format them for
set database function
:param dict config: project configuration dict
:param str output_dir: output directory for results
:return: string for db uri
"""
db_config = config.get("results_database", {"db_uri": "default"})
if db_config['db_uri'] == 'default':
return os.path.join(output_dir, "results.sqlite")
return db_config['db_uri'] | python | def get_db_uri(config, output_dir):
"""Process results_database parameters in config to format them for
set database function
:param dict config: project configuration dict
:param str output_dir: output directory for results
:return: string for db uri
"""
db_config = config.get("results_database", {"db_uri": "default"})
if db_config['db_uri'] == 'default':
return os.path.join(output_dir, "results.sqlite")
return db_config['db_uri'] | [
"def",
"get_db_uri",
"(",
"config",
",",
"output_dir",
")",
":",
"db_config",
"=",
"config",
".",
"get",
"(",
"\"results_database\"",
",",
"{",
"\"db_uri\"",
":",
"\"default\"",
"}",
")",
"if",
"db_config",
"[",
"'db_uri'",
"]",
"==",
"'default'",
":",
"return",
"os",
".",
"path",
".",
"join",
"(",
"output_dir",
",",
"\"results.sqlite\"",
")",
"return",
"db_config",
"[",
"'db_uri'",
"]"
]
| Process results_database parameters in config to format them for
set database function
:param dict config: project configuration dict
:param str output_dir: output directory for results
:return: string for db uri | [
"Process",
"results_database",
"parameters",
"in",
"config",
"to",
"format",
"them",
"for",
"set",
"database",
"function"
]
| 7e9bddeb3b8495a26442b1c86744e9fb187fe88f | https://github.com/TheGhouls/oct/blob/7e9bddeb3b8495a26442b1c86744e9fb187fe88f/oct/utilities/configuration.py#L98-L109 | train |
ethan92429/onshapepy | onshapepy/document.py | Document.update | def update(self):
"""All client calls to update this instance with Onshape."""
self.json = c.get_document(self.uri.did).json()
self.e_list = c.element_list(self.uri.as_dict()).json() | python | def update(self):
"""All client calls to update this instance with Onshape."""
self.json = c.get_document(self.uri.did).json()
self.e_list = c.element_list(self.uri.as_dict()).json() | [
"def",
"update",
"(",
"self",
")",
":",
"self",
".",
"json",
"=",
"c",
".",
"get_document",
"(",
"self",
".",
"uri",
".",
"did",
")",
".",
"json",
"(",
")",
"self",
".",
"e_list",
"=",
"c",
".",
"element_list",
"(",
"self",
".",
"uri",
".",
"as_dict",
"(",
")",
")",
".",
"json",
"(",
")"
]
| All client calls to update this instance with Onshape. | [
"All",
"client",
"calls",
"to",
"update",
"this",
"instance",
"with",
"Onshape",
"."
]
| 61dc7ccbdc6095fa6cc3b4a414e2f72d03d1c9df | https://github.com/ethan92429/onshapepy/blob/61dc7ccbdc6095fa6cc3b4a414e2f72d03d1c9df/onshapepy/document.py#L54-L57 | train |
ethan92429/onshapepy | onshapepy/document.py | Document.find_element | def find_element(self, name, type=ElementType.ANY):
"""Find an elemnent in the document with the given name - could be a PartStudio, Assembly or blob.
Args:
name: str
the name of the element.
Returns:
- onshapepy.uri of the element
"""
for e in self.e_list:
# if a type is specified and this isn't it, move to the next loop.
if type.value and not e['elementType'] == type:
continue
if e["name"] == name:
uri = self.uri
uri.eid = e["id"]
return uri | python | def find_element(self, name, type=ElementType.ANY):
"""Find an elemnent in the document with the given name - could be a PartStudio, Assembly or blob.
Args:
name: str
the name of the element.
Returns:
- onshapepy.uri of the element
"""
for e in self.e_list:
# if a type is specified and this isn't it, move to the next loop.
if type.value and not e['elementType'] == type:
continue
if e["name"] == name:
uri = self.uri
uri.eid = e["id"]
return uri | [
"def",
"find_element",
"(",
"self",
",",
"name",
",",
"type",
"=",
"ElementType",
".",
"ANY",
")",
":",
"for",
"e",
"in",
"self",
".",
"e_list",
":",
"# if a type is specified and this isn't it, move to the next loop.",
"if",
"type",
".",
"value",
"and",
"not",
"e",
"[",
"'elementType'",
"]",
"==",
"type",
":",
"continue",
"if",
"e",
"[",
"\"name\"",
"]",
"==",
"name",
":",
"uri",
"=",
"self",
".",
"uri",
"uri",
".",
"eid",
"=",
"e",
"[",
"\"id\"",
"]",
"return",
"uri"
]
| Find an elemnent in the document with the given name - could be a PartStudio, Assembly or blob.
Args:
name: str
the name of the element.
Returns:
- onshapepy.uri of the element | [
"Find",
"an",
"elemnent",
"in",
"the",
"document",
"with",
"the",
"given",
"name",
"-",
"could",
"be",
"a",
"PartStudio",
"Assembly",
"or",
"blob",
"."
]
| 61dc7ccbdc6095fa6cc3b4a414e2f72d03d1c9df | https://github.com/ethan92429/onshapepy/blob/61dc7ccbdc6095fa6cc3b4a414e2f72d03d1c9df/onshapepy/document.py#L63-L81 | train |
DsixTools/python-smeftrunner | smeftrunner/beta.py | beta_array | def beta_array(C, HIGHSCALE, *args, **kwargs):
"""Return the beta functions of all SM parameters and SMEFT Wilson
coefficients as a 1D numpy array."""
beta_odict = beta(C, HIGHSCALE, *args, **kwargs)
return np.hstack([np.asarray(b).ravel() for b in beta_odict.values()]) | python | def beta_array(C, HIGHSCALE, *args, **kwargs):
"""Return the beta functions of all SM parameters and SMEFT Wilson
coefficients as a 1D numpy array."""
beta_odict = beta(C, HIGHSCALE, *args, **kwargs)
return np.hstack([np.asarray(b).ravel() for b in beta_odict.values()]) | [
"def",
"beta_array",
"(",
"C",
",",
"HIGHSCALE",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"beta_odict",
"=",
"beta",
"(",
"C",
",",
"HIGHSCALE",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
"return",
"np",
".",
"hstack",
"(",
"[",
"np",
".",
"asarray",
"(",
"b",
")",
".",
"ravel",
"(",
")",
"for",
"b",
"in",
"beta_odict",
".",
"values",
"(",
")",
"]",
")"
]
| Return the beta functions of all SM parameters and SMEFT Wilson
coefficients as a 1D numpy array. | [
"Return",
"the",
"beta",
"functions",
"of",
"all",
"SM",
"parameters",
"and",
"SMEFT",
"Wilson",
"coefficients",
"as",
"a",
"1D",
"numpy",
"array",
"."
]
| 4c9130e53ad4f7bbb526657a82150ca9d57c4b37 | https://github.com/DsixTools/python-smeftrunner/blob/4c9130e53ad4f7bbb526657a82150ca9d57c4b37/smeftrunner/beta.py#L1823-L1827 | train |
peergradeio/flask-mongo-profiler | flask_mongo_profiler/contrib/flask_admin/views/base.py | RelationalSearchMixin._search | def _search(self, query, search_term):
"""
Improved search between words.
The original _search for MongoEngine dates back to November 12th, 2013 [1]_.
In this ref it's stated that there is a bug with complex Q queries preventing
multi-word searches. During this time, the MongoEngine version was earlier than
0.4 (predating PyPI) [2]_. Since then, there have been multiple releases [3]_
which appear to have fixed the query issue.
Treats id (_id) impliticly as a member of column_searchable_list, except it's
not computed in an OR/AND, a direct lookup is checked for.
References
----------
.. [1] Search for MongoEngine. 02b936b. November 23, 2013.
https://git.io/fxf8C. Accessed September, 29th, 2018.
.. [2] MongoEngine releases on PyPI.
https://pypi.org/project/mongoengine/#history. Accessed September 29th, 2018.
.. [3] MongoEngine release notes. http://docs.mongoengine.org/changelog.html.
Accessed September 29th, 2018.
"""
criterias = mongoengine.Q()
rel_criterias = mongoengine.Q()
terms = shlex.split(search_term)
# If an ObjectId pattern, see if we can get an instant lookup.
if len(terms) == 1 and re.match(RE_OBJECTID, terms[0]):
q = query.filter(id=bson.ObjectId(terms[0]))
if q.count() == 1: # Note: .get doesn't work, they need a QuerySet
return q
for term in terms:
op, term = parse_like_term(term)
# Case insensitive by default
if op == 'contains':
op = 'icontains'
criteria = mongoengine.Q()
for field in self._search_fields:
if isinstance(field, mongoengine.fields.ReferenceField):
rel_model = field.document_type
rel_fields = (
getattr(self, 'column_searchable_refs', {})
.get(field.name, {})
.get('fields', ['id'])
)
# If term isn't an ID, don't do an ID lookup
if rel_fields == ['id'] and not re.match(RE_OBJECTID, term):
continue
ids = [
o.id for o in search_relative_field(rel_model, rel_fields, term)
]
rel_criterias |= mongoengine.Q(**{'%s__in' % field.name: ids})
elif isinstance(field, mongoengine.fields.ListField):
if not isinstance(field.field, mongoengine.fields.ReferenceField):
continue # todo: support lists of other types
rel_model = field.field.document_type_obj
rel_fields = (
getattr(self, 'column_searchable_refs', {})
.get(field.name, {})
.get('fields', 'id')
)
ids = [
o.id for o in search_relative_field(rel_model, rel_fields, term)
]
rel_criterias |= mongoengine.Q(**{'%s__in' % field.name: ids})
else:
flt = {'%s__%s' % (field.name, op): term}
q = mongoengine.Q(**flt)
criteria |= q
criterias &= criteria
# import pprint
# pp = pprint.PrettyPrinter(indent=4).pprint
# print(pp(query.filter(criterias)._query))
return query.filter(criterias | rel_criterias) | python | def _search(self, query, search_term):
"""
Improved search between words.
The original _search for MongoEngine dates back to November 12th, 2013 [1]_.
In this ref it's stated that there is a bug with complex Q queries preventing
multi-word searches. During this time, the MongoEngine version was earlier than
0.4 (predating PyPI) [2]_. Since then, there have been multiple releases [3]_
which appear to have fixed the query issue.
Treats id (_id) impliticly as a member of column_searchable_list, except it's
not computed in an OR/AND, a direct lookup is checked for.
References
----------
.. [1] Search for MongoEngine. 02b936b. November 23, 2013.
https://git.io/fxf8C. Accessed September, 29th, 2018.
.. [2] MongoEngine releases on PyPI.
https://pypi.org/project/mongoengine/#history. Accessed September 29th, 2018.
.. [3] MongoEngine release notes. http://docs.mongoengine.org/changelog.html.
Accessed September 29th, 2018.
"""
criterias = mongoengine.Q()
rel_criterias = mongoengine.Q()
terms = shlex.split(search_term)
# If an ObjectId pattern, see if we can get an instant lookup.
if len(terms) == 1 and re.match(RE_OBJECTID, terms[0]):
q = query.filter(id=bson.ObjectId(terms[0]))
if q.count() == 1: # Note: .get doesn't work, they need a QuerySet
return q
for term in terms:
op, term = parse_like_term(term)
# Case insensitive by default
if op == 'contains':
op = 'icontains'
criteria = mongoengine.Q()
for field in self._search_fields:
if isinstance(field, mongoengine.fields.ReferenceField):
rel_model = field.document_type
rel_fields = (
getattr(self, 'column_searchable_refs', {})
.get(field.name, {})
.get('fields', ['id'])
)
# If term isn't an ID, don't do an ID lookup
if rel_fields == ['id'] and not re.match(RE_OBJECTID, term):
continue
ids = [
o.id for o in search_relative_field(rel_model, rel_fields, term)
]
rel_criterias |= mongoengine.Q(**{'%s__in' % field.name: ids})
elif isinstance(field, mongoengine.fields.ListField):
if not isinstance(field.field, mongoengine.fields.ReferenceField):
continue # todo: support lists of other types
rel_model = field.field.document_type_obj
rel_fields = (
getattr(self, 'column_searchable_refs', {})
.get(field.name, {})
.get('fields', 'id')
)
ids = [
o.id for o in search_relative_field(rel_model, rel_fields, term)
]
rel_criterias |= mongoengine.Q(**{'%s__in' % field.name: ids})
else:
flt = {'%s__%s' % (field.name, op): term}
q = mongoengine.Q(**flt)
criteria |= q
criterias &= criteria
# import pprint
# pp = pprint.PrettyPrinter(indent=4).pprint
# print(pp(query.filter(criterias)._query))
return query.filter(criterias | rel_criterias) | [
"def",
"_search",
"(",
"self",
",",
"query",
",",
"search_term",
")",
":",
"criterias",
"=",
"mongoengine",
".",
"Q",
"(",
")",
"rel_criterias",
"=",
"mongoengine",
".",
"Q",
"(",
")",
"terms",
"=",
"shlex",
".",
"split",
"(",
"search_term",
")",
"# If an ObjectId pattern, see if we can get an instant lookup.",
"if",
"len",
"(",
"terms",
")",
"==",
"1",
"and",
"re",
".",
"match",
"(",
"RE_OBJECTID",
",",
"terms",
"[",
"0",
"]",
")",
":",
"q",
"=",
"query",
".",
"filter",
"(",
"id",
"=",
"bson",
".",
"ObjectId",
"(",
"terms",
"[",
"0",
"]",
")",
")",
"if",
"q",
".",
"count",
"(",
")",
"==",
"1",
":",
"# Note: .get doesn't work, they need a QuerySet",
"return",
"q",
"for",
"term",
"in",
"terms",
":",
"op",
",",
"term",
"=",
"parse_like_term",
"(",
"term",
")",
"# Case insensitive by default",
"if",
"op",
"==",
"'contains'",
":",
"op",
"=",
"'icontains'",
"criteria",
"=",
"mongoengine",
".",
"Q",
"(",
")",
"for",
"field",
"in",
"self",
".",
"_search_fields",
":",
"if",
"isinstance",
"(",
"field",
",",
"mongoengine",
".",
"fields",
".",
"ReferenceField",
")",
":",
"rel_model",
"=",
"field",
".",
"document_type",
"rel_fields",
"=",
"(",
"getattr",
"(",
"self",
",",
"'column_searchable_refs'",
",",
"{",
"}",
")",
".",
"get",
"(",
"field",
".",
"name",
",",
"{",
"}",
")",
".",
"get",
"(",
"'fields'",
",",
"[",
"'id'",
"]",
")",
")",
"# If term isn't an ID, don't do an ID lookup",
"if",
"rel_fields",
"==",
"[",
"'id'",
"]",
"and",
"not",
"re",
".",
"match",
"(",
"RE_OBJECTID",
",",
"term",
")",
":",
"continue",
"ids",
"=",
"[",
"o",
".",
"id",
"for",
"o",
"in",
"search_relative_field",
"(",
"rel_model",
",",
"rel_fields",
",",
"term",
")",
"]",
"rel_criterias",
"|=",
"mongoengine",
".",
"Q",
"(",
"*",
"*",
"{",
"'%s__in'",
"%",
"field",
".",
"name",
":",
"ids",
"}",
")",
"elif",
"isinstance",
"(",
"field",
",",
"mongoengine",
".",
"fields",
".",
"ListField",
")",
":",
"if",
"not",
"isinstance",
"(",
"field",
".",
"field",
",",
"mongoengine",
".",
"fields",
".",
"ReferenceField",
")",
":",
"continue",
"# todo: support lists of other types",
"rel_model",
"=",
"field",
".",
"field",
".",
"document_type_obj",
"rel_fields",
"=",
"(",
"getattr",
"(",
"self",
",",
"'column_searchable_refs'",
",",
"{",
"}",
")",
".",
"get",
"(",
"field",
".",
"name",
",",
"{",
"}",
")",
".",
"get",
"(",
"'fields'",
",",
"'id'",
")",
")",
"ids",
"=",
"[",
"o",
".",
"id",
"for",
"o",
"in",
"search_relative_field",
"(",
"rel_model",
",",
"rel_fields",
",",
"term",
")",
"]",
"rel_criterias",
"|=",
"mongoengine",
".",
"Q",
"(",
"*",
"*",
"{",
"'%s__in'",
"%",
"field",
".",
"name",
":",
"ids",
"}",
")",
"else",
":",
"flt",
"=",
"{",
"'%s__%s'",
"%",
"(",
"field",
".",
"name",
",",
"op",
")",
":",
"term",
"}",
"q",
"=",
"mongoengine",
".",
"Q",
"(",
"*",
"*",
"flt",
")",
"criteria",
"|=",
"q",
"criterias",
"&=",
"criteria",
"# import pprint",
"# pp = pprint.PrettyPrinter(indent=4).pprint",
"# print(pp(query.filter(criterias)._query))",
"return",
"query",
".",
"filter",
"(",
"criterias",
"|",
"rel_criterias",
")"
]
| Improved search between words.
The original _search for MongoEngine dates back to November 12th, 2013 [1]_.
In this ref it's stated that there is a bug with complex Q queries preventing
multi-word searches. During this time, the MongoEngine version was earlier than
0.4 (predating PyPI) [2]_. Since then, there have been multiple releases [3]_
which appear to have fixed the query issue.
Treats id (_id) impliticly as a member of column_searchable_list, except it's
not computed in an OR/AND, a direct lookup is checked for.
References
----------
.. [1] Search for MongoEngine. 02b936b. November 23, 2013.
https://git.io/fxf8C. Accessed September, 29th, 2018.
.. [2] MongoEngine releases on PyPI.
https://pypi.org/project/mongoengine/#history. Accessed September 29th, 2018.
.. [3] MongoEngine release notes. http://docs.mongoengine.org/changelog.html.
Accessed September 29th, 2018. | [
"Improved",
"search",
"between",
"words",
"."
]
| a267eeb49fea07c9a24fb370bd9d7a90ed313ccf | https://github.com/peergradeio/flask-mongo-profiler/blob/a267eeb49fea07c9a24fb370bd9d7a90ed313ccf/flask_mongo_profiler/contrib/flask_admin/views/base.py#L114-L195 | train |
DavidDoukhan/py_sonicvisualiser | py_sonicvisualiser/SVDataset.py | SVDataset2D.set_data_from_iterable | def set_data_from_iterable(self, frames, values, labels=None):
"""
Initialize a dataset structure from iterable parameters
:param x: The temporal indices of the dataset
:param y: The values of the dataset
:type x: iterable
:type y: iterable
"""
if not isinstance(frames, collections.Iterable):
raise TypeError, "frames must be an iterable"
if not isinstance(values, collections.Iterable):
raise TypeError, "values must be an iterable"
assert(len(frames) == len(values))
self.frames = frames
self.values = values
if labels is None:
self.label2int['New Point'] = 0
self.int2label[0] = 'New Point'
self.labels = [0 for i in xrange(len(frames))]
else:
if not isinstance(labels, collections.Iterable):
raise TypeError, "labels must be an iterable"
for l in labels:
if l not in self.label2int:
self.label2int[l] = len(self.label2int)
self.int2label[len(self.int2label)] = l
self.labels.append(self.label2int[l]) | python | def set_data_from_iterable(self, frames, values, labels=None):
"""
Initialize a dataset structure from iterable parameters
:param x: The temporal indices of the dataset
:param y: The values of the dataset
:type x: iterable
:type y: iterable
"""
if not isinstance(frames, collections.Iterable):
raise TypeError, "frames must be an iterable"
if not isinstance(values, collections.Iterable):
raise TypeError, "values must be an iterable"
assert(len(frames) == len(values))
self.frames = frames
self.values = values
if labels is None:
self.label2int['New Point'] = 0
self.int2label[0] = 'New Point'
self.labels = [0 for i in xrange(len(frames))]
else:
if not isinstance(labels, collections.Iterable):
raise TypeError, "labels must be an iterable"
for l in labels:
if l not in self.label2int:
self.label2int[l] = len(self.label2int)
self.int2label[len(self.int2label)] = l
self.labels.append(self.label2int[l]) | [
"def",
"set_data_from_iterable",
"(",
"self",
",",
"frames",
",",
"values",
",",
"labels",
"=",
"None",
")",
":",
"if",
"not",
"isinstance",
"(",
"frames",
",",
"collections",
".",
"Iterable",
")",
":",
"raise",
"TypeError",
",",
"\"frames must be an iterable\"",
"if",
"not",
"isinstance",
"(",
"values",
",",
"collections",
".",
"Iterable",
")",
":",
"raise",
"TypeError",
",",
"\"values must be an iterable\"",
"assert",
"(",
"len",
"(",
"frames",
")",
"==",
"len",
"(",
"values",
")",
")",
"self",
".",
"frames",
"=",
"frames",
"self",
".",
"values",
"=",
"values",
"if",
"labels",
"is",
"None",
":",
"self",
".",
"label2int",
"[",
"'New Point'",
"]",
"=",
"0",
"self",
".",
"int2label",
"[",
"0",
"]",
"=",
"'New Point'",
"self",
".",
"labels",
"=",
"[",
"0",
"for",
"i",
"in",
"xrange",
"(",
"len",
"(",
"frames",
")",
")",
"]",
"else",
":",
"if",
"not",
"isinstance",
"(",
"labels",
",",
"collections",
".",
"Iterable",
")",
":",
"raise",
"TypeError",
",",
"\"labels must be an iterable\"",
"for",
"l",
"in",
"labels",
":",
"if",
"l",
"not",
"in",
"self",
".",
"label2int",
":",
"self",
".",
"label2int",
"[",
"l",
"]",
"=",
"len",
"(",
"self",
".",
"label2int",
")",
"self",
".",
"int2label",
"[",
"len",
"(",
"self",
".",
"int2label",
")",
"]",
"=",
"l",
"self",
".",
"labels",
".",
"append",
"(",
"self",
".",
"label2int",
"[",
"l",
"]",
")"
]
| Initialize a dataset structure from iterable parameters
:param x: The temporal indices of the dataset
:param y: The values of the dataset
:type x: iterable
:type y: iterable | [
"Initialize",
"a",
"dataset",
"structure",
"from",
"iterable",
"parameters"
]
| ebe83bd7dffb0275393255dcbcc6671cf0ade4a5 | https://github.com/DavidDoukhan/py_sonicvisualiser/blob/ebe83bd7dffb0275393255dcbcc6671cf0ade4a5/py_sonicvisualiser/SVDataset.py#L50-L77 | train |
DavidDoukhan/py_sonicvisualiser | py_sonicvisualiser/SVDataset.py | SVDataset2D.writexml | def writexml(self, writer, indent="", addindent="", newl=""):
"""
Write the continuous dataset using sonic visualiser xml conventions
"""
# dataset = self.data.appendChild(self.doc.createElement('dataset'))
# dataset.setAttribute('id', str(imodel))
# dataset.setAttribute('dimensions', '2')
writer.write('%s<dataset id="%s" dimensions="%s">%s' % (indent, self.datasetid, self.dimensions, newl))
indent2 = indent + addindent
for l, x, y in zip(self.labels, self.frames, self.values):
writer.write('%s<point label="%s" frame="%d" value="%f"/>%s' % (indent2, self.int2label[l], x, y, newl))
writer.write('%s</dataset>%s' % (indent, newl)) | python | def writexml(self, writer, indent="", addindent="", newl=""):
"""
Write the continuous dataset using sonic visualiser xml conventions
"""
# dataset = self.data.appendChild(self.doc.createElement('dataset'))
# dataset.setAttribute('id', str(imodel))
# dataset.setAttribute('dimensions', '2')
writer.write('%s<dataset id="%s" dimensions="%s">%s' % (indent, self.datasetid, self.dimensions, newl))
indent2 = indent + addindent
for l, x, y in zip(self.labels, self.frames, self.values):
writer.write('%s<point label="%s" frame="%d" value="%f"/>%s' % (indent2, self.int2label[l], x, y, newl))
writer.write('%s</dataset>%s' % (indent, newl)) | [
"def",
"writexml",
"(",
"self",
",",
"writer",
",",
"indent",
"=",
"\"\"",
",",
"addindent",
"=",
"\"\"",
",",
"newl",
"=",
"\"\"",
")",
":",
"# dataset = self.data.appendChild(self.doc.createElement('dataset'))",
"# dataset.setAttribute('id', str(imodel))",
"# dataset.setAttribute('dimensions', '2')",
"writer",
".",
"write",
"(",
"'%s<dataset id=\"%s\" dimensions=\"%s\">%s'",
"%",
"(",
"indent",
",",
"self",
".",
"datasetid",
",",
"self",
".",
"dimensions",
",",
"newl",
")",
")",
"indent2",
"=",
"indent",
"+",
"addindent",
"for",
"l",
",",
"x",
",",
"y",
"in",
"zip",
"(",
"self",
".",
"labels",
",",
"self",
".",
"frames",
",",
"self",
".",
"values",
")",
":",
"writer",
".",
"write",
"(",
"'%s<point label=\"%s\" frame=\"%d\" value=\"%f\"/>%s'",
"%",
"(",
"indent2",
",",
"self",
".",
"int2label",
"[",
"l",
"]",
",",
"x",
",",
"y",
",",
"newl",
")",
")",
"writer",
".",
"write",
"(",
"'%s</dataset>%s'",
"%",
"(",
"indent",
",",
"newl",
")",
")"
]
| Write the continuous dataset using sonic visualiser xml conventions | [
"Write",
"the",
"continuous",
"dataset",
"using",
"sonic",
"visualiser",
"xml",
"conventions"
]
| ebe83bd7dffb0275393255dcbcc6671cf0ade4a5 | https://github.com/DavidDoukhan/py_sonicvisualiser/blob/ebe83bd7dffb0275393255dcbcc6671cf0ade4a5/py_sonicvisualiser/SVDataset.py#L89-L100 | train |
assamite/creamas | creamas/math.py | gaus_pdf | def gaus_pdf(x, mean, std):
'''Gaussian distribution's probability density function.
See, e.g. `Wikipedia <https://en.wikipedia.org/wiki/Normal_distribution>`_.
:param x: point in x-axis
:type x: float or numpy.ndarray
:param float mean: mean or expectation
:param float str: standard deviation
:returns: pdf(s) in point **x**
:rtype: float or numpy.ndarray
'''
return exp(-((x - mean) / std)**2 / 2) / sqrt(2 * pi) / std | python | def gaus_pdf(x, mean, std):
'''Gaussian distribution's probability density function.
See, e.g. `Wikipedia <https://en.wikipedia.org/wiki/Normal_distribution>`_.
:param x: point in x-axis
:type x: float or numpy.ndarray
:param float mean: mean or expectation
:param float str: standard deviation
:returns: pdf(s) in point **x**
:rtype: float or numpy.ndarray
'''
return exp(-((x - mean) / std)**2 / 2) / sqrt(2 * pi) / std | [
"def",
"gaus_pdf",
"(",
"x",
",",
"mean",
",",
"std",
")",
":",
"return",
"exp",
"(",
"-",
"(",
"(",
"x",
"-",
"mean",
")",
"/",
"std",
")",
"**",
"2",
"/",
"2",
")",
"/",
"sqrt",
"(",
"2",
"*",
"pi",
")",
"/",
"std"
]
| Gaussian distribution's probability density function.
See, e.g. `Wikipedia <https://en.wikipedia.org/wiki/Normal_distribution>`_.
:param x: point in x-axis
:type x: float or numpy.ndarray
:param float mean: mean or expectation
:param float str: standard deviation
:returns: pdf(s) in point **x**
:rtype: float or numpy.ndarray | [
"Gaussian",
"distribution",
"s",
"probability",
"density",
"function",
"."
]
| 54dc3e31c97a3f938e58272f8ab80b6bcafeff58 | https://github.com/assamite/creamas/blob/54dc3e31c97a3f938e58272f8ab80b6bcafeff58/creamas/math.py#L10-L22 | train |
assamite/creamas | creamas/math.py | logistic | def logistic(x, x0, k, L):
'''Logistic function.
See, e.g `Wikipedia <https://en.wikipedia.org/wiki/Logistic_function>`_.
:param x: point in x-axis
:type x: float or numpy.ndarray
:param float x0: sigmoid's midpoint
:param float k: steepness of the curve
:param float L: maximum value of the curve
:returns: function's value(s) in point **x**
:rtype: float or numpy.ndarray
'''
return L / (1 + exp(-k * (x - x0))) | python | def logistic(x, x0, k, L):
'''Logistic function.
See, e.g `Wikipedia <https://en.wikipedia.org/wiki/Logistic_function>`_.
:param x: point in x-axis
:type x: float or numpy.ndarray
:param float x0: sigmoid's midpoint
:param float k: steepness of the curve
:param float L: maximum value of the curve
:returns: function's value(s) in point **x**
:rtype: float or numpy.ndarray
'''
return L / (1 + exp(-k * (x - x0))) | [
"def",
"logistic",
"(",
"x",
",",
"x0",
",",
"k",
",",
"L",
")",
":",
"return",
"L",
"/",
"(",
"1",
"+",
"exp",
"(",
"-",
"k",
"*",
"(",
"x",
"-",
"x0",
")",
")",
")"
]
| Logistic function.
See, e.g `Wikipedia <https://en.wikipedia.org/wiki/Logistic_function>`_.
:param x: point in x-axis
:type x: float or numpy.ndarray
:param float x0: sigmoid's midpoint
:param float k: steepness of the curve
:param float L: maximum value of the curve
:returns: function's value(s) in point **x**
:rtype: float or numpy.ndarray | [
"Logistic",
"function",
"."
]
| 54dc3e31c97a3f938e58272f8ab80b6bcafeff58 | https://github.com/assamite/creamas/blob/54dc3e31c97a3f938e58272f8ab80b6bcafeff58/creamas/math.py#L25-L38 | train |
assamite/creamas | creamas/examples/grid/grid_node.py | populate_menv | def populate_menv(menv, agent_cls_name, log_folder):
'''Populate given multiprocessing grid environment with agents.
:param menv: Instance of :py:class:`GridMultiEnvironment`
:param str agent_cls_name: Name of the agent class, e.g. 'grip_mp:GridAgent'
:param str log_folder: Root logging folder for the agents.
'''
gs = menv.gs
n_agents = gs[0] * gs[1]
n_slaves = len(menv.addrs)
logger.info("Populating {} with {} agents".format(HOST, n_agents*n_slaves))
run(menv.populate(agent_cls_name, n_agents, log_folder=log_folder))
logger.info("Populating complete.") | python | def populate_menv(menv, agent_cls_name, log_folder):
'''Populate given multiprocessing grid environment with agents.
:param menv: Instance of :py:class:`GridMultiEnvironment`
:param str agent_cls_name: Name of the agent class, e.g. 'grip_mp:GridAgent'
:param str log_folder: Root logging folder for the agents.
'''
gs = menv.gs
n_agents = gs[0] * gs[1]
n_slaves = len(menv.addrs)
logger.info("Populating {} with {} agents".format(HOST, n_agents*n_slaves))
run(menv.populate(agent_cls_name, n_agents, log_folder=log_folder))
logger.info("Populating complete.") | [
"def",
"populate_menv",
"(",
"menv",
",",
"agent_cls_name",
",",
"log_folder",
")",
":",
"gs",
"=",
"menv",
".",
"gs",
"n_agents",
"=",
"gs",
"[",
"0",
"]",
"*",
"gs",
"[",
"1",
"]",
"n_slaves",
"=",
"len",
"(",
"menv",
".",
"addrs",
")",
"logger",
".",
"info",
"(",
"\"Populating {} with {} agents\"",
".",
"format",
"(",
"HOST",
",",
"n_agents",
"*",
"n_slaves",
")",
")",
"run",
"(",
"menv",
".",
"populate",
"(",
"agent_cls_name",
",",
"n_agents",
",",
"log_folder",
"=",
"log_folder",
")",
")",
"logger",
".",
"info",
"(",
"\"Populating complete.\"",
")"
]
| Populate given multiprocessing grid environment with agents.
:param menv: Instance of :py:class:`GridMultiEnvironment`
:param str agent_cls_name: Name of the agent class, e.g. 'grip_mp:GridAgent'
:param str log_folder: Root logging folder for the agents. | [
"Populate",
"given",
"multiprocessing",
"grid",
"environment",
"with",
"agents",
"."
]
| 54dc3e31c97a3f938e58272f8ab80b6bcafeff58 | https://github.com/assamite/creamas/blob/54dc3e31c97a3f938e58272f8ab80b6bcafeff58/creamas/examples/grid/grid_node.py#L70-L82 | train |
assamite/creamas | creamas/examples/grid/grid_node.py | get_slave_addrs | def get_slave_addrs(mgr_addr, N):
'''Get ports for the slave environments.
Currently the ports are not checked for availability.
'''
return [(HOST, p) for p in range(mgr_addr+1, mgr_addr+1+N)] | python | def get_slave_addrs(mgr_addr, N):
'''Get ports for the slave environments.
Currently the ports are not checked for availability.
'''
return [(HOST, p) for p in range(mgr_addr+1, mgr_addr+1+N)] | [
"def",
"get_slave_addrs",
"(",
"mgr_addr",
",",
"N",
")",
":",
"return",
"[",
"(",
"HOST",
",",
"p",
")",
"for",
"p",
"in",
"range",
"(",
"mgr_addr",
"+",
"1",
",",
"mgr_addr",
"+",
"1",
"+",
"N",
")",
"]"
]
| Get ports for the slave environments.
Currently the ports are not checked for availability. | [
"Get",
"ports",
"for",
"the",
"slave",
"environments",
"."
]
| 54dc3e31c97a3f938e58272f8ab80b6bcafeff58 | https://github.com/assamite/creamas/blob/54dc3e31c97a3f938e58272f8ab80b6bcafeff58/creamas/examples/grid/grid_node.py#L85-L90 | train |
assamite/creamas | creamas/rules/rule.py | weighted_average | def weighted_average(rule, artifact):
"""Evaluate artifact's value to be weighted average of values returned by
rule's subrules.
"""
e = 0
w = 0
for i in range(len(rule.R)):
r = rule.R[i](artifact)
if r is not None:
e += r * rule.W[i]
w += abs(rule.W[i])
if w == 0.0:
return 0.0
return e / w | python | def weighted_average(rule, artifact):
"""Evaluate artifact's value to be weighted average of values returned by
rule's subrules.
"""
e = 0
w = 0
for i in range(len(rule.R)):
r = rule.R[i](artifact)
if r is not None:
e += r * rule.W[i]
w += abs(rule.W[i])
if w == 0.0:
return 0.0
return e / w | [
"def",
"weighted_average",
"(",
"rule",
",",
"artifact",
")",
":",
"e",
"=",
"0",
"w",
"=",
"0",
"for",
"i",
"in",
"range",
"(",
"len",
"(",
"rule",
".",
"R",
")",
")",
":",
"r",
"=",
"rule",
".",
"R",
"[",
"i",
"]",
"(",
"artifact",
")",
"if",
"r",
"is",
"not",
"None",
":",
"e",
"+=",
"r",
"*",
"rule",
".",
"W",
"[",
"i",
"]",
"w",
"+=",
"abs",
"(",
"rule",
".",
"W",
"[",
"i",
"]",
")",
"if",
"w",
"==",
"0.0",
":",
"return",
"0.0",
"return",
"e",
"/",
"w"
]
| Evaluate artifact's value to be weighted average of values returned by
rule's subrules. | [
"Evaluate",
"artifact",
"s",
"value",
"to",
"be",
"weighted",
"average",
"of",
"values",
"returned",
"by",
"rule",
"s",
"subrules",
"."
]
| 54dc3e31c97a3f938e58272f8ab80b6bcafeff58 | https://github.com/assamite/creamas/blob/54dc3e31c97a3f938e58272f8ab80b6bcafeff58/creamas/rules/rule.py#L207-L220 | train |
assamite/creamas | creamas/rules/rule.py | minimum | def minimum(rule, artifact):
"""Evaluate artifact's value to be minimum of values returned by rule's
subrules.
This evaluation function ignores subrule weights.
"""
m = 1.0
for i in range(len(rule.R)):
e = rule.R[i](artifact)
if e is not None:
if e < m:
m = e
return m | python | def minimum(rule, artifact):
"""Evaluate artifact's value to be minimum of values returned by rule's
subrules.
This evaluation function ignores subrule weights.
"""
m = 1.0
for i in range(len(rule.R)):
e = rule.R[i](artifact)
if e is not None:
if e < m:
m = e
return m | [
"def",
"minimum",
"(",
"rule",
",",
"artifact",
")",
":",
"m",
"=",
"1.0",
"for",
"i",
"in",
"range",
"(",
"len",
"(",
"rule",
".",
"R",
")",
")",
":",
"e",
"=",
"rule",
".",
"R",
"[",
"i",
"]",
"(",
"artifact",
")",
"if",
"e",
"is",
"not",
"None",
":",
"if",
"e",
"<",
"m",
":",
"m",
"=",
"e",
"return",
"m"
]
| Evaluate artifact's value to be minimum of values returned by rule's
subrules.
This evaluation function ignores subrule weights. | [
"Evaluate",
"artifact",
"s",
"value",
"to",
"be",
"minimum",
"of",
"values",
"returned",
"by",
"rule",
"s",
"subrules",
"."
]
| 54dc3e31c97a3f938e58272f8ab80b6bcafeff58 | https://github.com/assamite/creamas/blob/54dc3e31c97a3f938e58272f8ab80b6bcafeff58/creamas/rules/rule.py#L223-L235 | train |
assamite/creamas | creamas/rules/rule.py | Rule.add_subrule | def add_subrule(self, subrule, weight):
"""Add subrule to the rule.
:param subrule:
Subrule to add to this rule, an instance of :class:`Rule` or
:class:`RuleLeaf`.
:param float weight: Weight of the subrule
"""
if not issubclass(subrule.__class__, (Rule, RuleLeaf)):
raise TypeError("Rule's class must be (subclass of) {} or {}, got "
"{}.".format(Rule, RuleLeaf, subrule.__class__))
self.__domains = set.union(self.__domains, subrule.domains)
self.R.append(subrule)
self.W.append(weight) | python | def add_subrule(self, subrule, weight):
"""Add subrule to the rule.
:param subrule:
Subrule to add to this rule, an instance of :class:`Rule` or
:class:`RuleLeaf`.
:param float weight: Weight of the subrule
"""
if not issubclass(subrule.__class__, (Rule, RuleLeaf)):
raise TypeError("Rule's class must be (subclass of) {} or {}, got "
"{}.".format(Rule, RuleLeaf, subrule.__class__))
self.__domains = set.union(self.__domains, subrule.domains)
self.R.append(subrule)
self.W.append(weight) | [
"def",
"add_subrule",
"(",
"self",
",",
"subrule",
",",
"weight",
")",
":",
"if",
"not",
"issubclass",
"(",
"subrule",
".",
"__class__",
",",
"(",
"Rule",
",",
"RuleLeaf",
")",
")",
":",
"raise",
"TypeError",
"(",
"\"Rule's class must be (subclass of) {} or {}, got \"",
"\"{}.\"",
".",
"format",
"(",
"Rule",
",",
"RuleLeaf",
",",
"subrule",
".",
"__class__",
")",
")",
"self",
".",
"__domains",
"=",
"set",
".",
"union",
"(",
"self",
".",
"__domains",
",",
"subrule",
".",
"domains",
")",
"self",
".",
"R",
".",
"append",
"(",
"subrule",
")",
"self",
".",
"W",
".",
"append",
"(",
"weight",
")"
]
| Add subrule to the rule.
:param subrule:
Subrule to add to this rule, an instance of :class:`Rule` or
:class:`RuleLeaf`.
:param float weight: Weight of the subrule | [
"Add",
"subrule",
"to",
"the",
"rule",
"."
]
| 54dc3e31c97a3f938e58272f8ab80b6bcafeff58 | https://github.com/assamite/creamas/blob/54dc3e31c97a3f938e58272f8ab80b6bcafeff58/creamas/rules/rule.py#L153-L167 | train |
Kortemme-Lab/klab | klab/chainsequence.py | ChainSequences.parse_seqres | def parse_seqres(self, pdb):
"""Parse the SEQRES entries into the object"""
seqresre = re.compile("SEQRES")
seqreslines = [line for line in pdb.lines if seqresre.match(line)]
for line in seqreslines:
chain = line[11]
resnames = line[19:70].strip()
self.setdefault(chain, [])
self[chain] += resnames.split() | python | def parse_seqres(self, pdb):
"""Parse the SEQRES entries into the object"""
seqresre = re.compile("SEQRES")
seqreslines = [line for line in pdb.lines if seqresre.match(line)]
for line in seqreslines:
chain = line[11]
resnames = line[19:70].strip()
self.setdefault(chain, [])
self[chain] += resnames.split() | [
"def",
"parse_seqres",
"(",
"self",
",",
"pdb",
")",
":",
"seqresre",
"=",
"re",
".",
"compile",
"(",
"\"SEQRES\"",
")",
"seqreslines",
"=",
"[",
"line",
"for",
"line",
"in",
"pdb",
".",
"lines",
"if",
"seqresre",
".",
"match",
"(",
"line",
")",
"]",
"for",
"line",
"in",
"seqreslines",
":",
"chain",
"=",
"line",
"[",
"11",
"]",
"resnames",
"=",
"line",
"[",
"19",
":",
"70",
"]",
".",
"strip",
"(",
")",
"self",
".",
"setdefault",
"(",
"chain",
",",
"[",
"]",
")",
"self",
"[",
"chain",
"]",
"+=",
"resnames",
".",
"split",
"(",
")"
]
| Parse the SEQRES entries into the object | [
"Parse",
"the",
"SEQRES",
"entries",
"into",
"the",
"object"
]
| 6d410ad08f1bd9f7cbbb28d7d946e94fbaaa2b6b | https://github.com/Kortemme-Lab/klab/blob/6d410ad08f1bd9f7cbbb28d7d946e94fbaaa2b6b/klab/chainsequence.py#L44-L55 | train |
Kortemme-Lab/klab | klab/chainsequence.py | ChainSequences.parse_atoms | def parse_atoms(self, pdb):
"""Parse the ATOM entries into the object"""
atomre = re.compile("ATOM")
atomlines = [line for line in pdb.lines if atomre.match(line)]
chainresnums = {}
for line in atomlines:
chain = line[21]
resname = line[17:20]
resnum = line[22:27]
#print resnum
chainresnums.setdefault(chain, [])
if resnum in chainresnums[chain]:
assert self[chain][chainresnums[chain].index(resnum)] == resname
else:
if resnum[-1] == ' ':
self.setdefault(chain, [])
self[chain] += [resname]
chainresnums[chain] += [resnum]
return chainresnums | python | def parse_atoms(self, pdb):
"""Parse the ATOM entries into the object"""
atomre = re.compile("ATOM")
atomlines = [line for line in pdb.lines if atomre.match(line)]
chainresnums = {}
for line in atomlines:
chain = line[21]
resname = line[17:20]
resnum = line[22:27]
#print resnum
chainresnums.setdefault(chain, [])
if resnum in chainresnums[chain]:
assert self[chain][chainresnums[chain].index(resnum)] == resname
else:
if resnum[-1] == ' ':
self.setdefault(chain, [])
self[chain] += [resname]
chainresnums[chain] += [resnum]
return chainresnums | [
"def",
"parse_atoms",
"(",
"self",
",",
"pdb",
")",
":",
"atomre",
"=",
"re",
".",
"compile",
"(",
"\"ATOM\"",
")",
"atomlines",
"=",
"[",
"line",
"for",
"line",
"in",
"pdb",
".",
"lines",
"if",
"atomre",
".",
"match",
"(",
"line",
")",
"]",
"chainresnums",
"=",
"{",
"}",
"for",
"line",
"in",
"atomlines",
":",
"chain",
"=",
"line",
"[",
"21",
"]",
"resname",
"=",
"line",
"[",
"17",
":",
"20",
"]",
"resnum",
"=",
"line",
"[",
"22",
":",
"27",
"]",
"#print resnum",
"chainresnums",
".",
"setdefault",
"(",
"chain",
",",
"[",
"]",
")",
"if",
"resnum",
"in",
"chainresnums",
"[",
"chain",
"]",
":",
"assert",
"self",
"[",
"chain",
"]",
"[",
"chainresnums",
"[",
"chain",
"]",
".",
"index",
"(",
"resnum",
")",
"]",
"==",
"resname",
"else",
":",
"if",
"resnum",
"[",
"-",
"1",
"]",
"==",
"' '",
":",
"self",
".",
"setdefault",
"(",
"chain",
",",
"[",
"]",
")",
"self",
"[",
"chain",
"]",
"+=",
"[",
"resname",
"]",
"chainresnums",
"[",
"chain",
"]",
"+=",
"[",
"resnum",
"]",
"return",
"chainresnums"
]
| Parse the ATOM entries into the object | [
"Parse",
"the",
"ATOM",
"entries",
"into",
"the",
"object"
]
| 6d410ad08f1bd9f7cbbb28d7d946e94fbaaa2b6b | https://github.com/Kortemme-Lab/klab/blob/6d410ad08f1bd9f7cbbb28d7d946e94fbaaa2b6b/klab/chainsequence.py#L57-L81 | train |
Kortemme-Lab/klab | klab/chainsequence.py | ChainSequences.seqres_lines | def seqres_lines(self):
"""Generate SEQRES lines representing the contents"""
lines = []
for chain in self.keys():
seq = self[chain]
serNum = 1
startidx = 0
while startidx < len(seq):
endidx = min(startidx+13, len(seq))
lines += ["SEQRES %2i %s %4i %s\n" % (serNum, chain, len(seq), " ".join(seq[startidx:endidx]))]
serNum += 1
startidx += 13
return lines | python | def seqres_lines(self):
"""Generate SEQRES lines representing the contents"""
lines = []
for chain in self.keys():
seq = self[chain]
serNum = 1
startidx = 0
while startidx < len(seq):
endidx = min(startidx+13, len(seq))
lines += ["SEQRES %2i %s %4i %s\n" % (serNum, chain, len(seq), " ".join(seq[startidx:endidx]))]
serNum += 1
startidx += 13
return lines | [
"def",
"seqres_lines",
"(",
"self",
")",
":",
"lines",
"=",
"[",
"]",
"for",
"chain",
"in",
"self",
".",
"keys",
"(",
")",
":",
"seq",
"=",
"self",
"[",
"chain",
"]",
"serNum",
"=",
"1",
"startidx",
"=",
"0",
"while",
"startidx",
"<",
"len",
"(",
"seq",
")",
":",
"endidx",
"=",
"min",
"(",
"startidx",
"+",
"13",
",",
"len",
"(",
"seq",
")",
")",
"lines",
"+=",
"[",
"\"SEQRES %2i %s %4i %s\\n\"",
"%",
"(",
"serNum",
",",
"chain",
",",
"len",
"(",
"seq",
")",
",",
"\" \"",
".",
"join",
"(",
"seq",
"[",
"startidx",
":",
"endidx",
"]",
")",
")",
"]",
"serNum",
"+=",
"1",
"startidx",
"+=",
"13",
"return",
"lines"
]
| Generate SEQRES lines representing the contents | [
"Generate",
"SEQRES",
"lines",
"representing",
"the",
"contents"
]
| 6d410ad08f1bd9f7cbbb28d7d946e94fbaaa2b6b | https://github.com/Kortemme-Lab/klab/blob/6d410ad08f1bd9f7cbbb28d7d946e94fbaaa2b6b/klab/chainsequence.py#L83-L98 | train |
Kortemme-Lab/klab | klab/chainsequence.py | ChainSequences.replace_seqres | def replace_seqres(self, pdb, update_atoms = True):
"""Replace SEQRES lines with a new sequence, optionally removing
mutated sidechains"""
newpdb = PDB()
inserted_seqres = False
entries_before_seqres = set(["HEADER", "OBSLTE", "TITLE", "CAVEAT", "COMPND", "SOURCE",
"KEYWDS", "EXPDTA", "AUTHOR", "REVDAT", "SPRSDE", "JRNL",
"REMARK", "DBREF", "SEQADV"])
mutated_resids = {}
if update_atoms:
old_seqs = ChainSequences()
chainresnums = old_seqs.parse_atoms(pdb)
assert self.keys() == old_seqs.keys()
for chain in self.keys():
assert len(self[chain]) == len(old_seqs[chain])
for i in xrange(len(self[chain])):
if self[chain][i] != old_seqs[chain][i]:
resid = chain + chainresnums[chain][i]
mutated_resids[resid] = self[chain][i]
for line in pdb.lines:
entry = line[0:6]
if (not inserted_seqres) and entry not in entries_before_seqres:
inserted_seqres = True
newpdb.lines += self.seqres_lines()
if update_atoms and entry == "ATOM ":
resid = line[21:27]
atom = line[12:16].strip()
if not mutated_resids.has_key(resid):
newpdb.lines += [line]
else:
newpdb.lines += [line[:17] + mutated_resids[resid] + line[20:]]
elif entry != "SEQRES":
newpdb.lines += [line]
if update_atoms:
newpdb.remove_nonbackbone_atoms(mutated_resids.keys())
return newpdb | python | def replace_seqres(self, pdb, update_atoms = True):
"""Replace SEQRES lines with a new sequence, optionally removing
mutated sidechains"""
newpdb = PDB()
inserted_seqres = False
entries_before_seqres = set(["HEADER", "OBSLTE", "TITLE", "CAVEAT", "COMPND", "SOURCE",
"KEYWDS", "EXPDTA", "AUTHOR", "REVDAT", "SPRSDE", "JRNL",
"REMARK", "DBREF", "SEQADV"])
mutated_resids = {}
if update_atoms:
old_seqs = ChainSequences()
chainresnums = old_seqs.parse_atoms(pdb)
assert self.keys() == old_seqs.keys()
for chain in self.keys():
assert len(self[chain]) == len(old_seqs[chain])
for i in xrange(len(self[chain])):
if self[chain][i] != old_seqs[chain][i]:
resid = chain + chainresnums[chain][i]
mutated_resids[resid] = self[chain][i]
for line in pdb.lines:
entry = line[0:6]
if (not inserted_seqres) and entry not in entries_before_seqres:
inserted_seqres = True
newpdb.lines += self.seqres_lines()
if update_atoms and entry == "ATOM ":
resid = line[21:27]
atom = line[12:16].strip()
if not mutated_resids.has_key(resid):
newpdb.lines += [line]
else:
newpdb.lines += [line[:17] + mutated_resids[resid] + line[20:]]
elif entry != "SEQRES":
newpdb.lines += [line]
if update_atoms:
newpdb.remove_nonbackbone_atoms(mutated_resids.keys())
return newpdb | [
"def",
"replace_seqres",
"(",
"self",
",",
"pdb",
",",
"update_atoms",
"=",
"True",
")",
":",
"newpdb",
"=",
"PDB",
"(",
")",
"inserted_seqres",
"=",
"False",
"entries_before_seqres",
"=",
"set",
"(",
"[",
"\"HEADER\"",
",",
"\"OBSLTE\"",
",",
"\"TITLE\"",
",",
"\"CAVEAT\"",
",",
"\"COMPND\"",
",",
"\"SOURCE\"",
",",
"\"KEYWDS\"",
",",
"\"EXPDTA\"",
",",
"\"AUTHOR\"",
",",
"\"REVDAT\"",
",",
"\"SPRSDE\"",
",",
"\"JRNL\"",
",",
"\"REMARK\"",
",",
"\"DBREF\"",
",",
"\"SEQADV\"",
"]",
")",
"mutated_resids",
"=",
"{",
"}",
"if",
"update_atoms",
":",
"old_seqs",
"=",
"ChainSequences",
"(",
")",
"chainresnums",
"=",
"old_seqs",
".",
"parse_atoms",
"(",
"pdb",
")",
"assert",
"self",
".",
"keys",
"(",
")",
"==",
"old_seqs",
".",
"keys",
"(",
")",
"for",
"chain",
"in",
"self",
".",
"keys",
"(",
")",
":",
"assert",
"len",
"(",
"self",
"[",
"chain",
"]",
")",
"==",
"len",
"(",
"old_seqs",
"[",
"chain",
"]",
")",
"for",
"i",
"in",
"xrange",
"(",
"len",
"(",
"self",
"[",
"chain",
"]",
")",
")",
":",
"if",
"self",
"[",
"chain",
"]",
"[",
"i",
"]",
"!=",
"old_seqs",
"[",
"chain",
"]",
"[",
"i",
"]",
":",
"resid",
"=",
"chain",
"+",
"chainresnums",
"[",
"chain",
"]",
"[",
"i",
"]",
"mutated_resids",
"[",
"resid",
"]",
"=",
"self",
"[",
"chain",
"]",
"[",
"i",
"]",
"for",
"line",
"in",
"pdb",
".",
"lines",
":",
"entry",
"=",
"line",
"[",
"0",
":",
"6",
"]",
"if",
"(",
"not",
"inserted_seqres",
")",
"and",
"entry",
"not",
"in",
"entries_before_seqres",
":",
"inserted_seqres",
"=",
"True",
"newpdb",
".",
"lines",
"+=",
"self",
".",
"seqres_lines",
"(",
")",
"if",
"update_atoms",
"and",
"entry",
"==",
"\"ATOM \"",
":",
"resid",
"=",
"line",
"[",
"21",
":",
"27",
"]",
"atom",
"=",
"line",
"[",
"12",
":",
"16",
"]",
".",
"strip",
"(",
")",
"if",
"not",
"mutated_resids",
".",
"has_key",
"(",
"resid",
")",
":",
"newpdb",
".",
"lines",
"+=",
"[",
"line",
"]",
"else",
":",
"newpdb",
".",
"lines",
"+=",
"[",
"line",
"[",
":",
"17",
"]",
"+",
"mutated_resids",
"[",
"resid",
"]",
"+",
"line",
"[",
"20",
":",
"]",
"]",
"elif",
"entry",
"!=",
"\"SEQRES\"",
":",
"newpdb",
".",
"lines",
"+=",
"[",
"line",
"]",
"if",
"update_atoms",
":",
"newpdb",
".",
"remove_nonbackbone_atoms",
"(",
"mutated_resids",
".",
"keys",
"(",
")",
")",
"return",
"newpdb"
]
| Replace SEQRES lines with a new sequence, optionally removing
mutated sidechains | [
"Replace",
"SEQRES",
"lines",
"with",
"a",
"new",
"sequence",
"optionally",
"removing",
"mutated",
"sidechains"
]
| 6d410ad08f1bd9f7cbbb28d7d946e94fbaaa2b6b | https://github.com/Kortemme-Lab/klab/blob/6d410ad08f1bd9f7cbbb28d7d946e94fbaaa2b6b/klab/chainsequence.py#L100-L145 | train |
brunato/lograptor | lograptor/matcher.py | has_host_match | def has_host_match(log_data, hosts):
"""
Match the data with a list of hostname patterns. If the log line data
doesn't include host information considers the line as matched.
"""
hostname = getattr(log_data, 'host', None)
if hostname and hostname not in host_cache:
for host_pattern in hosts:
if host_pattern.search(hostname) is not None:
host_cache.add(hostname)
return True
else:
return False
return True | python | def has_host_match(log_data, hosts):
"""
Match the data with a list of hostname patterns. If the log line data
doesn't include host information considers the line as matched.
"""
hostname = getattr(log_data, 'host', None)
if hostname and hostname not in host_cache:
for host_pattern in hosts:
if host_pattern.search(hostname) is not None:
host_cache.add(hostname)
return True
else:
return False
return True | [
"def",
"has_host_match",
"(",
"log_data",
",",
"hosts",
")",
":",
"hostname",
"=",
"getattr",
"(",
"log_data",
",",
"'host'",
",",
"None",
")",
"if",
"hostname",
"and",
"hostname",
"not",
"in",
"host_cache",
":",
"for",
"host_pattern",
"in",
"hosts",
":",
"if",
"host_pattern",
".",
"search",
"(",
"hostname",
")",
"is",
"not",
"None",
":",
"host_cache",
".",
"add",
"(",
"hostname",
")",
"return",
"True",
"else",
":",
"return",
"False",
"return",
"True"
]
| Match the data with a list of hostname patterns. If the log line data
doesn't include host information considers the line as matched. | [
"Match",
"the",
"data",
"with",
"a",
"list",
"of",
"hostname",
"patterns",
".",
"If",
"the",
"log",
"line",
"data",
"doesn",
"t",
"include",
"host",
"information",
"considers",
"the",
"line",
"as",
"matched",
"."
]
| b1f09fe1b429ed15110610092704ef12d253f3c9 | https://github.com/brunato/lograptor/blob/b1f09fe1b429ed15110610092704ef12d253f3c9/lograptor/matcher.py#L159-L172 | train |
aacanakin/glim | glim/commands.py | StartCommand.run | def run(self, app):
"""Function starts the web server given configuration."""
GlimLog.info('Glim server started on %s environment' % self.args.env)
try:
kwargs = Config.get('app.server.options')
run(app.wsgi,
host=Config.get('app.server.host'),
port=Config.get('app.server.port'),
debug=Config.get('app.server.debugger'),
reloader=Config.get('app.server.reloader'),
server=Config.get('app.server.wsgi'),
**kwargs)
except Exception as e:
print(traceback.format_exc())
exit() | python | def run(self, app):
"""Function starts the web server given configuration."""
GlimLog.info('Glim server started on %s environment' % self.args.env)
try:
kwargs = Config.get('app.server.options')
run(app.wsgi,
host=Config.get('app.server.host'),
port=Config.get('app.server.port'),
debug=Config.get('app.server.debugger'),
reloader=Config.get('app.server.reloader'),
server=Config.get('app.server.wsgi'),
**kwargs)
except Exception as e:
print(traceback.format_exc())
exit() | [
"def",
"run",
"(",
"self",
",",
"app",
")",
":",
"GlimLog",
".",
"info",
"(",
"'Glim server started on %s environment'",
"%",
"self",
".",
"args",
".",
"env",
")",
"try",
":",
"kwargs",
"=",
"Config",
".",
"get",
"(",
"'app.server.options'",
")",
"run",
"(",
"app",
".",
"wsgi",
",",
"host",
"=",
"Config",
".",
"get",
"(",
"'app.server.host'",
")",
",",
"port",
"=",
"Config",
".",
"get",
"(",
"'app.server.port'",
")",
",",
"debug",
"=",
"Config",
".",
"get",
"(",
"'app.server.debugger'",
")",
",",
"reloader",
"=",
"Config",
".",
"get",
"(",
"'app.server.reloader'",
")",
",",
"server",
"=",
"Config",
".",
"get",
"(",
"'app.server.wsgi'",
")",
",",
"*",
"*",
"kwargs",
")",
"except",
"Exception",
"as",
"e",
":",
"print",
"(",
"traceback",
".",
"format_exc",
"(",
")",
")",
"exit",
"(",
")"
]
| Function starts the web server given configuration. | [
"Function",
"starts",
"the",
"web",
"server",
"given",
"configuration",
"."
]
| 71a20ac149a1292c0d6c1dc7414985ea51854f7a | https://github.com/aacanakin/glim/blob/71a20ac149a1292c0d6c1dc7414985ea51854f7a/glim/commands.py#L66-L80 | train |
Kortemme-Lab/klab | klab/benchmarking/analysis/ssm.py | get_symmetrical_std_devs | def get_symmetrical_std_devs(values, ignore_zeros = True):
"""Takes a list of values and splits it into positive and negative values. For both of these subsets, a symmetrical
distribution is created by mirroring each value along the origin and the standard deviation for both subsets is returned.
:param values: A list of numerical values.
:param ignore_zeros: Whether or not zeroes should be considered when determining the standard deviations.
:return: A pair of values - the standard deviations of the positive and negative subsets respectively.
"""
pos_stdeviation = get_symmetrical_std_dev(values, True, ignore_zeros = ignore_zeros)
neg_stdeviation = get_symmetrical_std_dev(values, False, ignore_zeros = ignore_zeros)
return pos_stdeviation, neg_stdeviation | python | def get_symmetrical_std_devs(values, ignore_zeros = True):
"""Takes a list of values and splits it into positive and negative values. For both of these subsets, a symmetrical
distribution is created by mirroring each value along the origin and the standard deviation for both subsets is returned.
:param values: A list of numerical values.
:param ignore_zeros: Whether or not zeroes should be considered when determining the standard deviations.
:return: A pair of values - the standard deviations of the positive and negative subsets respectively.
"""
pos_stdeviation = get_symmetrical_std_dev(values, True, ignore_zeros = ignore_zeros)
neg_stdeviation = get_symmetrical_std_dev(values, False, ignore_zeros = ignore_zeros)
return pos_stdeviation, neg_stdeviation | [
"def",
"get_symmetrical_std_devs",
"(",
"values",
",",
"ignore_zeros",
"=",
"True",
")",
":",
"pos_stdeviation",
"=",
"get_symmetrical_std_dev",
"(",
"values",
",",
"True",
",",
"ignore_zeros",
"=",
"ignore_zeros",
")",
"neg_stdeviation",
"=",
"get_symmetrical_std_dev",
"(",
"values",
",",
"False",
",",
"ignore_zeros",
"=",
"ignore_zeros",
")",
"return",
"pos_stdeviation",
",",
"neg_stdeviation"
]
| Takes a list of values and splits it into positive and negative values. For both of these subsets, a symmetrical
distribution is created by mirroring each value along the origin and the standard deviation for both subsets is returned.
:param values: A list of numerical values.
:param ignore_zeros: Whether or not zeroes should be considered when determining the standard deviations.
:return: A pair of values - the standard deviations of the positive and negative subsets respectively. | [
"Takes",
"a",
"list",
"of",
"values",
"and",
"splits",
"it",
"into",
"positive",
"and",
"negative",
"values",
".",
"For",
"both",
"of",
"these",
"subsets",
"a",
"symmetrical",
"distribution",
"is",
"created",
"by",
"mirroring",
"each",
"value",
"along",
"the",
"origin",
"and",
"the",
"standard",
"deviation",
"for",
"both",
"subsets",
"is",
"returned",
"."
]
| 6d410ad08f1bd9f7cbbb28d7d946e94fbaaa2b6b | https://github.com/Kortemme-Lab/klab/blob/6d410ad08f1bd9f7cbbb28d7d946e94fbaaa2b6b/klab/benchmarking/analysis/ssm.py#L160-L170 | train |
Kortemme-Lab/klab | klab/benchmarking/analysis/ssm.py | get_std_xy_dataset_statistics | def get_std_xy_dataset_statistics(x_values, y_values, expect_negative_correlation = False, STDev_cutoff = 1.0):
'''Calls parse_csv and returns the analysis in a format similar to get_xy_dataset_statistics in klab.stats.misc.'''
assert(len(x_values) == len(y_values))
csv_lines = ['ID,X,Y'] + [','.join(map(str, [c + 1, x_values[c], y_values[c]])) for c in xrange(len(x_values))]
data = parse_csv(csv_lines, expect_negative_correlation = expect_negative_correlation, STDev_cutoff = STDev_cutoff)
assert(len(data['predictions']) == 1)
assert(1 in data['predictions'])
assert(data['predictions'][1]['name'] == 'Y')
summary_data = data['predictions'][1]
stats = {}
for spair in field_name_mapper:
stats[spair[1]] = summary_data[spair[0]]
if stats['std_warnings']:
stats['std_warnings'] = '\n'.join(stats['std_warnings'])
else:
stats['std_warnings'] = None
return stats | python | def get_std_xy_dataset_statistics(x_values, y_values, expect_negative_correlation = False, STDev_cutoff = 1.0):
'''Calls parse_csv and returns the analysis in a format similar to get_xy_dataset_statistics in klab.stats.misc.'''
assert(len(x_values) == len(y_values))
csv_lines = ['ID,X,Y'] + [','.join(map(str, [c + 1, x_values[c], y_values[c]])) for c in xrange(len(x_values))]
data = parse_csv(csv_lines, expect_negative_correlation = expect_negative_correlation, STDev_cutoff = STDev_cutoff)
assert(len(data['predictions']) == 1)
assert(1 in data['predictions'])
assert(data['predictions'][1]['name'] == 'Y')
summary_data = data['predictions'][1]
stats = {}
for spair in field_name_mapper:
stats[spair[1]] = summary_data[spair[0]]
if stats['std_warnings']:
stats['std_warnings'] = '\n'.join(stats['std_warnings'])
else:
stats['std_warnings'] = None
return stats | [
"def",
"get_std_xy_dataset_statistics",
"(",
"x_values",
",",
"y_values",
",",
"expect_negative_correlation",
"=",
"False",
",",
"STDev_cutoff",
"=",
"1.0",
")",
":",
"assert",
"(",
"len",
"(",
"x_values",
")",
"==",
"len",
"(",
"y_values",
")",
")",
"csv_lines",
"=",
"[",
"'ID,X,Y'",
"]",
"+",
"[",
"','",
".",
"join",
"(",
"map",
"(",
"str",
",",
"[",
"c",
"+",
"1",
",",
"x_values",
"[",
"c",
"]",
",",
"y_values",
"[",
"c",
"]",
"]",
")",
")",
"for",
"c",
"in",
"xrange",
"(",
"len",
"(",
"x_values",
")",
")",
"]",
"data",
"=",
"parse_csv",
"(",
"csv_lines",
",",
"expect_negative_correlation",
"=",
"expect_negative_correlation",
",",
"STDev_cutoff",
"=",
"STDev_cutoff",
")",
"assert",
"(",
"len",
"(",
"data",
"[",
"'predictions'",
"]",
")",
"==",
"1",
")",
"assert",
"(",
"1",
"in",
"data",
"[",
"'predictions'",
"]",
")",
"assert",
"(",
"data",
"[",
"'predictions'",
"]",
"[",
"1",
"]",
"[",
"'name'",
"]",
"==",
"'Y'",
")",
"summary_data",
"=",
"data",
"[",
"'predictions'",
"]",
"[",
"1",
"]",
"stats",
"=",
"{",
"}",
"for",
"spair",
"in",
"field_name_mapper",
":",
"stats",
"[",
"spair",
"[",
"1",
"]",
"]",
"=",
"summary_data",
"[",
"spair",
"[",
"0",
"]",
"]",
"if",
"stats",
"[",
"'std_warnings'",
"]",
":",
"stats",
"[",
"'std_warnings'",
"]",
"=",
"'\\n'",
".",
"join",
"(",
"stats",
"[",
"'std_warnings'",
"]",
")",
"else",
":",
"stats",
"[",
"'std_warnings'",
"]",
"=",
"None",
"return",
"stats"
]
| Calls parse_csv and returns the analysis in a format similar to get_xy_dataset_statistics in klab.stats.misc. | [
"Calls",
"parse_csv",
"and",
"returns",
"the",
"analysis",
"in",
"a",
"format",
"similar",
"to",
"get_xy_dataset_statistics",
"in",
"klab",
".",
"stats",
".",
"misc",
"."
]
| 6d410ad08f1bd9f7cbbb28d7d946e94fbaaa2b6b | https://github.com/Kortemme-Lab/klab/blob/6d410ad08f1bd9f7cbbb28d7d946e94fbaaa2b6b/klab/benchmarking/analysis/ssm.py#L482-L500 | train |
clement-alexandre/TotemBionet | totembionet/src/discrete_model/gene.py | Gene.active_multiplex | def active_multiplex(self, state: 'State') -> Tuple['Multiplex']:
"""
Return a tuple of all the active multiplex in the given state.
"""
return tuple(multiplex for multiplex in self.multiplexes if multiplex.is_active(state)) | python | def active_multiplex(self, state: 'State') -> Tuple['Multiplex']:
"""
Return a tuple of all the active multiplex in the given state.
"""
return tuple(multiplex for multiplex in self.multiplexes if multiplex.is_active(state)) | [
"def",
"active_multiplex",
"(",
"self",
",",
"state",
":",
"'State'",
")",
"->",
"Tuple",
"[",
"'Multiplex'",
"]",
":",
"return",
"tuple",
"(",
"multiplex",
"for",
"multiplex",
"in",
"self",
".",
"multiplexes",
"if",
"multiplex",
".",
"is_active",
"(",
"state",
")",
")"
]
| Return a tuple of all the active multiplex in the given state. | [
"Return",
"a",
"tuple",
"of",
"all",
"the",
"active",
"multiplex",
"in",
"the",
"given",
"state",
"."
]
| f37a2f9358c1ce49f21c4a868b904da5dcd4614f | https://github.com/clement-alexandre/TotemBionet/blob/f37a2f9358c1ce49f21c4a868b904da5dcd4614f/totembionet/src/discrete_model/gene.py#L16-L20 | train |
assamite/creamas | creamas/core/agent.py | CreativeAgent.sanitized_name | def sanitized_name(self):
"""Sanitized name of the agent, used for file and directory creation.
"""
a = re.split("[:/]", self.name)
return "_".join([i for i in a if len(i) > 0]) | python | def sanitized_name(self):
"""Sanitized name of the agent, used for file and directory creation.
"""
a = re.split("[:/]", self.name)
return "_".join([i for i in a if len(i) > 0]) | [
"def",
"sanitized_name",
"(",
"self",
")",
":",
"a",
"=",
"re",
".",
"split",
"(",
"\"[:/]\"",
",",
"self",
".",
"name",
")",
"return",
"\"_\"",
".",
"join",
"(",
"[",
"i",
"for",
"i",
"in",
"a",
"if",
"len",
"(",
"i",
")",
">",
"0",
"]",
")"
]
| Sanitized name of the agent, used for file and directory creation. | [
"Sanitized",
"name",
"of",
"the",
"agent",
"used",
"for",
"file",
"and",
"directory",
"creation",
"."
]
| 54dc3e31c97a3f938e58272f8ab80b6bcafeff58 | https://github.com/assamite/creamas/blob/54dc3e31c97a3f938e58272f8ab80b6bcafeff58/creamas/core/agent.py#L93-L97 | train |
assamite/creamas | creamas/core/agent.py | CreativeAgent.get_connections | def get_connections(self, data=False):
"""Get agent's current connections.
:param bool data:
Also return the data dictionary for each connection.
:returns: A list of agent addresses or a dictionary
"""
if data:
return self._connections
return list(self._connections.keys()) | python | def get_connections(self, data=False):
"""Get agent's current connections.
:param bool data:
Also return the data dictionary for each connection.
:returns: A list of agent addresses or a dictionary
"""
if data:
return self._connections
return list(self._connections.keys()) | [
"def",
"get_connections",
"(",
"self",
",",
"data",
"=",
"False",
")",
":",
"if",
"data",
":",
"return",
"self",
".",
"_connections",
"return",
"list",
"(",
"self",
".",
"_connections",
".",
"keys",
"(",
")",
")"
]
| Get agent's current connections.
:param bool data:
Also return the data dictionary for each connection.
:returns: A list of agent addresses or a dictionary | [
"Get",
"agent",
"s",
"current",
"connections",
"."
]
| 54dc3e31c97a3f938e58272f8ab80b6bcafeff58 | https://github.com/assamite/creamas/blob/54dc3e31c97a3f938e58272f8ab80b6bcafeff58/creamas/core/agent.py#L229-L239 | train |
assamite/creamas | creamas/core/agent.py | CreativeAgent.publish | def publish(self, artifact):
"""Publish artifact to agent's environment.
:param artifact: artifact to be published
:type artifact: :py:class:`~creamas.core.artifact.Artifact`
"""
self.env.add_artifact(artifact)
self._log(logging.DEBUG, "Published {} to domain.".format(artifact)) | python | def publish(self, artifact):
"""Publish artifact to agent's environment.
:param artifact: artifact to be published
:type artifact: :py:class:`~creamas.core.artifact.Artifact`
"""
self.env.add_artifact(artifact)
self._log(logging.DEBUG, "Published {} to domain.".format(artifact)) | [
"def",
"publish",
"(",
"self",
",",
"artifact",
")",
":",
"self",
".",
"env",
".",
"add_artifact",
"(",
"artifact",
")",
"self",
".",
"_log",
"(",
"logging",
".",
"DEBUG",
",",
"\"Published {} to domain.\"",
".",
"format",
"(",
"artifact",
")",
")"
]
| Publish artifact to agent's environment.
:param artifact: artifact to be published
:type artifact: :py:class:`~creamas.core.artifact.Artifact` | [
"Publish",
"artifact",
"to",
"agent",
"s",
"environment",
"."
]
| 54dc3e31c97a3f938e58272f8ab80b6bcafeff58 | https://github.com/assamite/creamas/blob/54dc3e31c97a3f938e58272f8ab80b6bcafeff58/creamas/core/agent.py#L259-L266 | train |
assamite/creamas | creamas/core/agent.py | CreativeAgent.ask_opinion | async def ask_opinion(self, addr, artifact):
"""Ask an agent's opinion about an artifact.
:param str addr: Address of the agent which opinion is asked
:type addr: :py:class:`~creamas.core.agent.CreativeAgent`
:param object artifact: artifact to be evaluated
:returns: agent's evaluation of the artifact
:rtype: float
This is a shortcut to::
remote_agent = await self.env.connect(addr)
opinion = await remote_agent.evaluate(artifact)
.. note::
The artifact object should be serializable by the environment.
"""
remote_agent = await self.env.connect(addr)
return await remote_agent.evaluate(artifact) | python | async def ask_opinion(self, addr, artifact):
"""Ask an agent's opinion about an artifact.
:param str addr: Address of the agent which opinion is asked
:type addr: :py:class:`~creamas.core.agent.CreativeAgent`
:param object artifact: artifact to be evaluated
:returns: agent's evaluation of the artifact
:rtype: float
This is a shortcut to::
remote_agent = await self.env.connect(addr)
opinion = await remote_agent.evaluate(artifact)
.. note::
The artifact object should be serializable by the environment.
"""
remote_agent = await self.env.connect(addr)
return await remote_agent.evaluate(artifact) | [
"async",
"def",
"ask_opinion",
"(",
"self",
",",
"addr",
",",
"artifact",
")",
":",
"remote_agent",
"=",
"await",
"self",
".",
"env",
".",
"connect",
"(",
"addr",
")",
"return",
"await",
"remote_agent",
".",
"evaluate",
"(",
"artifact",
")"
]
| Ask an agent's opinion about an artifact.
:param str addr: Address of the agent which opinion is asked
:type addr: :py:class:`~creamas.core.agent.CreativeAgent`
:param object artifact: artifact to be evaluated
:returns: agent's evaluation of the artifact
:rtype: float
This is a shortcut to::
remote_agent = await self.env.connect(addr)
opinion = await remote_agent.evaluate(artifact)
.. note::
The artifact object should be serializable by the environment. | [
"Ask",
"an",
"agent",
"s",
"opinion",
"about",
"an",
"artifact",
"."
]
| 54dc3e31c97a3f938e58272f8ab80b6bcafeff58 | https://github.com/assamite/creamas/blob/54dc3e31c97a3f938e58272f8ab80b6bcafeff58/creamas/core/agent.py#L280-L299 | train |
projectshift/shift-boiler | boiler/feature/localization.py | localization_feature | def localization_feature(app):
"""
Localization feature
This will initialize support for translations and localization of values
such as numbers, money, dates and formatting timezones.
"""
# apply app default to babel
app.config['BABEL_DEFAULT_LOCALE'] = app.config['DEFAULT_LOCALE']
app.config['BABEL_DEFAULT_TIMEZONE'] = app.config['DEFAULT_TIMEZONE']
# init babel
babel = Babel()
babel.init_app(app) | python | def localization_feature(app):
"""
Localization feature
This will initialize support for translations and localization of values
such as numbers, money, dates and formatting timezones.
"""
# apply app default to babel
app.config['BABEL_DEFAULT_LOCALE'] = app.config['DEFAULT_LOCALE']
app.config['BABEL_DEFAULT_TIMEZONE'] = app.config['DEFAULT_TIMEZONE']
# init babel
babel = Babel()
babel.init_app(app) | [
"def",
"localization_feature",
"(",
"app",
")",
":",
"# apply app default to babel",
"app",
".",
"config",
"[",
"'BABEL_DEFAULT_LOCALE'",
"]",
"=",
"app",
".",
"config",
"[",
"'DEFAULT_LOCALE'",
"]",
"app",
".",
"config",
"[",
"'BABEL_DEFAULT_TIMEZONE'",
"]",
"=",
"app",
".",
"config",
"[",
"'DEFAULT_TIMEZONE'",
"]",
"# init babel",
"babel",
"=",
"Babel",
"(",
")",
"babel",
".",
"init_app",
"(",
"app",
")"
]
| Localization feature
This will initialize support for translations and localization of values
such as numbers, money, dates and formatting timezones. | [
"Localization",
"feature",
"This",
"will",
"initialize",
"support",
"for",
"translations",
"and",
"localization",
"of",
"values",
"such",
"as",
"numbers",
"money",
"dates",
"and",
"formatting",
"timezones",
"."
]
| 8e6f3a3e4b9493fb6c8bd16bed160ede153bfb0b | https://github.com/projectshift/shift-boiler/blob/8e6f3a3e4b9493fb6c8bd16bed160ede153bfb0b/boiler/feature/localization.py#L4-L17 | train |
adaptive-learning/proso-apps | proso/django/enrichment.py | enrich_json_objects_by_object_type | def enrich_json_objects_by_object_type(request, value):
"""
Take the given value and start enrichment by object_type. The va
Args:
request (django.http.request.HttpRequest): request which is currently processed
value (dict|list|django.db.models.Model):
in case of django.db.models.Model object (or list of these
objects), to_json method is invoked
Returns:
dict|list
"""
time_start_globally = time()
if isinstance(value, list):
json = [x.to_json() if hasattr(x, "to_json") else x for x in value]
else:
if isinstance(value, dict):
json = value
else:
json = value.to_json()
objects, nested = _collect_json_objects(json, by='object_type')
for enricher_info in _get_OBJECT_TYPE_ENRICHER_ORDER():
if len(enricher_info['object_types']) > 0:
enricher_objects = flatten([objects.get(object_type, []) for object_type in enricher_info['object_types']])
enricher_nested = any([nested.get(object_type, False) for object_type in enricher_info['object_types']])
else:
enricher_objects = flatten(objects.values())
enricher_nested = any(nested.values())
if len(enricher_objects) > 0:
time_start = time()
enricher_info['enricher'](request, enricher_objects, enricher_nested)
LOGGER.debug('enrichment "{}" took {} seconds'.format(enricher_info['enricher_name'], time() - time_start))
if not enricher_info['pure']:
# if the enricher modified object types we must collect objects
# again
objects, nested = _collect_json_objects(json, by='object_type')
LOGGER.debug('The whole enrichment of json objects by their object_type took {} seconds.'.format(time() - time_start_globally))
return json | python | def enrich_json_objects_by_object_type(request, value):
"""
Take the given value and start enrichment by object_type. The va
Args:
request (django.http.request.HttpRequest): request which is currently processed
value (dict|list|django.db.models.Model):
in case of django.db.models.Model object (or list of these
objects), to_json method is invoked
Returns:
dict|list
"""
time_start_globally = time()
if isinstance(value, list):
json = [x.to_json() if hasattr(x, "to_json") else x for x in value]
else:
if isinstance(value, dict):
json = value
else:
json = value.to_json()
objects, nested = _collect_json_objects(json, by='object_type')
for enricher_info in _get_OBJECT_TYPE_ENRICHER_ORDER():
if len(enricher_info['object_types']) > 0:
enricher_objects = flatten([objects.get(object_type, []) for object_type in enricher_info['object_types']])
enricher_nested = any([nested.get(object_type, False) for object_type in enricher_info['object_types']])
else:
enricher_objects = flatten(objects.values())
enricher_nested = any(nested.values())
if len(enricher_objects) > 0:
time_start = time()
enricher_info['enricher'](request, enricher_objects, enricher_nested)
LOGGER.debug('enrichment "{}" took {} seconds'.format(enricher_info['enricher_name'], time() - time_start))
if not enricher_info['pure']:
# if the enricher modified object types we must collect objects
# again
objects, nested = _collect_json_objects(json, by='object_type')
LOGGER.debug('The whole enrichment of json objects by their object_type took {} seconds.'.format(time() - time_start_globally))
return json | [
"def",
"enrich_json_objects_by_object_type",
"(",
"request",
",",
"value",
")",
":",
"time_start_globally",
"=",
"time",
"(",
")",
"if",
"isinstance",
"(",
"value",
",",
"list",
")",
":",
"json",
"=",
"[",
"x",
".",
"to_json",
"(",
")",
"if",
"hasattr",
"(",
"x",
",",
"\"to_json\"",
")",
"else",
"x",
"for",
"x",
"in",
"value",
"]",
"else",
":",
"if",
"isinstance",
"(",
"value",
",",
"dict",
")",
":",
"json",
"=",
"value",
"else",
":",
"json",
"=",
"value",
".",
"to_json",
"(",
")",
"objects",
",",
"nested",
"=",
"_collect_json_objects",
"(",
"json",
",",
"by",
"=",
"'object_type'",
")",
"for",
"enricher_info",
"in",
"_get_OBJECT_TYPE_ENRICHER_ORDER",
"(",
")",
":",
"if",
"len",
"(",
"enricher_info",
"[",
"'object_types'",
"]",
")",
">",
"0",
":",
"enricher_objects",
"=",
"flatten",
"(",
"[",
"objects",
".",
"get",
"(",
"object_type",
",",
"[",
"]",
")",
"for",
"object_type",
"in",
"enricher_info",
"[",
"'object_types'",
"]",
"]",
")",
"enricher_nested",
"=",
"any",
"(",
"[",
"nested",
".",
"get",
"(",
"object_type",
",",
"False",
")",
"for",
"object_type",
"in",
"enricher_info",
"[",
"'object_types'",
"]",
"]",
")",
"else",
":",
"enricher_objects",
"=",
"flatten",
"(",
"objects",
".",
"values",
"(",
")",
")",
"enricher_nested",
"=",
"any",
"(",
"nested",
".",
"values",
"(",
")",
")",
"if",
"len",
"(",
"enricher_objects",
")",
">",
"0",
":",
"time_start",
"=",
"time",
"(",
")",
"enricher_info",
"[",
"'enricher'",
"]",
"(",
"request",
",",
"enricher_objects",
",",
"enricher_nested",
")",
"LOGGER",
".",
"debug",
"(",
"'enrichment \"{}\" took {} seconds'",
".",
"format",
"(",
"enricher_info",
"[",
"'enricher_name'",
"]",
",",
"time",
"(",
")",
"-",
"time_start",
")",
")",
"if",
"not",
"enricher_info",
"[",
"'pure'",
"]",
":",
"# if the enricher modified object types we must collect objects",
"# again",
"objects",
",",
"nested",
"=",
"_collect_json_objects",
"(",
"json",
",",
"by",
"=",
"'object_type'",
")",
"LOGGER",
".",
"debug",
"(",
"'The whole enrichment of json objects by their object_type took {} seconds.'",
".",
"format",
"(",
"time",
"(",
")",
"-",
"time_start_globally",
")",
")",
"return",
"json"
]
| Take the given value and start enrichment by object_type. The va
Args:
request (django.http.request.HttpRequest): request which is currently processed
value (dict|list|django.db.models.Model):
in case of django.db.models.Model object (or list of these
objects), to_json method is invoked
Returns:
dict|list | [
"Take",
"the",
"given",
"value",
"and",
"start",
"enrichment",
"by",
"object_type",
".",
"The",
"va"
]
| 8278c72e498d6ef8d392cc47b48473f4ec037142 | https://github.com/adaptive-learning/proso-apps/blob/8278c72e498d6ef8d392cc47b48473f4ec037142/proso/django/enrichment.py#L70-L108 | train |
adaptive-learning/proso-apps | proso/django/enrichment.py | enrich_by_predicate | def enrich_by_predicate(request, json, fun, predicate, skip_nested=False, **kwargs):
"""
Take the JSON, find all its subparts satisfying the given condition and
them by the given function. Other key-word arguments are passed to the function.
.. testsetup::
from pprint import pprint
from proso.django.enrichment import enrich_by_predicate
request = None
.. testcode::
def enricher(request, json_list, nested):
for json_object in json_list:
json_object['enriched'] = True
enriched = enrich_by_predicate(
request,
[{'object_type': 'example_1'}, {'object_type': 'example_2'}],
enricher,
lambda x: True
)
pprint(enriched)
.. testoutput::
[{'enriched': True, 'object_type': 'example_1'},
{'enriched': True, 'object_type': 'example_2'}]
Args:
request (django.http.request.HttpRequest): request which is currently processed
json (list|dict): in-memory representation of JSON
fun: function which is be applied on found objects
predicate: function which is applied on all objects to determine which
objects should be processed further
skip_nested: ignore nested objects
Returns:
list|dict: transformed JSON
"""
time_start = time()
collected = []
memory = {'nested': False}
def _collect(json_inner, nested):
if nested and skip_nested:
return
if isinstance(json_inner, list):
list(map(lambda x: _collect(x, nested), json_inner))
elif isinstance(json_inner, dict):
if predicate(json_inner):
collected.append(json_inner)
if nested:
memory['nested'] = True
list(map(lambda x: _collect(x, True), list(json_inner.values())))
_collect(json, False)
if len(collected) > 0:
fun(request, collected, memory['nested'], **kwargs)
LOGGER.debug("enrichment of JSON by predicate by '%s' function took %s seconds", fun.__name__, (time() - time_start))
return json | python | def enrich_by_predicate(request, json, fun, predicate, skip_nested=False, **kwargs):
"""
Take the JSON, find all its subparts satisfying the given condition and
them by the given function. Other key-word arguments are passed to the function.
.. testsetup::
from pprint import pprint
from proso.django.enrichment import enrich_by_predicate
request = None
.. testcode::
def enricher(request, json_list, nested):
for json_object in json_list:
json_object['enriched'] = True
enriched = enrich_by_predicate(
request,
[{'object_type': 'example_1'}, {'object_type': 'example_2'}],
enricher,
lambda x: True
)
pprint(enriched)
.. testoutput::
[{'enriched': True, 'object_type': 'example_1'},
{'enriched': True, 'object_type': 'example_2'}]
Args:
request (django.http.request.HttpRequest): request which is currently processed
json (list|dict): in-memory representation of JSON
fun: function which is be applied on found objects
predicate: function which is applied on all objects to determine which
objects should be processed further
skip_nested: ignore nested objects
Returns:
list|dict: transformed JSON
"""
time_start = time()
collected = []
memory = {'nested': False}
def _collect(json_inner, nested):
if nested and skip_nested:
return
if isinstance(json_inner, list):
list(map(lambda x: _collect(x, nested), json_inner))
elif isinstance(json_inner, dict):
if predicate(json_inner):
collected.append(json_inner)
if nested:
memory['nested'] = True
list(map(lambda x: _collect(x, True), list(json_inner.values())))
_collect(json, False)
if len(collected) > 0:
fun(request, collected, memory['nested'], **kwargs)
LOGGER.debug("enrichment of JSON by predicate by '%s' function took %s seconds", fun.__name__, (time() - time_start))
return json | [
"def",
"enrich_by_predicate",
"(",
"request",
",",
"json",
",",
"fun",
",",
"predicate",
",",
"skip_nested",
"=",
"False",
",",
"*",
"*",
"kwargs",
")",
":",
"time_start",
"=",
"time",
"(",
")",
"collected",
"=",
"[",
"]",
"memory",
"=",
"{",
"'nested'",
":",
"False",
"}",
"def",
"_collect",
"(",
"json_inner",
",",
"nested",
")",
":",
"if",
"nested",
"and",
"skip_nested",
":",
"return",
"if",
"isinstance",
"(",
"json_inner",
",",
"list",
")",
":",
"list",
"(",
"map",
"(",
"lambda",
"x",
":",
"_collect",
"(",
"x",
",",
"nested",
")",
",",
"json_inner",
")",
")",
"elif",
"isinstance",
"(",
"json_inner",
",",
"dict",
")",
":",
"if",
"predicate",
"(",
"json_inner",
")",
":",
"collected",
".",
"append",
"(",
"json_inner",
")",
"if",
"nested",
":",
"memory",
"[",
"'nested'",
"]",
"=",
"True",
"list",
"(",
"map",
"(",
"lambda",
"x",
":",
"_collect",
"(",
"x",
",",
"True",
")",
",",
"list",
"(",
"json_inner",
".",
"values",
"(",
")",
")",
")",
")",
"_collect",
"(",
"json",
",",
"False",
")",
"if",
"len",
"(",
"collected",
")",
">",
"0",
":",
"fun",
"(",
"request",
",",
"collected",
",",
"memory",
"[",
"'nested'",
"]",
",",
"*",
"*",
"kwargs",
")",
"LOGGER",
".",
"debug",
"(",
"\"enrichment of JSON by predicate by '%s' function took %s seconds\"",
",",
"fun",
".",
"__name__",
",",
"(",
"time",
"(",
")",
"-",
"time_start",
")",
")",
"return",
"json"
]
| Take the JSON, find all its subparts satisfying the given condition and
them by the given function. Other key-word arguments are passed to the function.
.. testsetup::
from pprint import pprint
from proso.django.enrichment import enrich_by_predicate
request = None
.. testcode::
def enricher(request, json_list, nested):
for json_object in json_list:
json_object['enriched'] = True
enriched = enrich_by_predicate(
request,
[{'object_type': 'example_1'}, {'object_type': 'example_2'}],
enricher,
lambda x: True
)
pprint(enriched)
.. testoutput::
[{'enriched': True, 'object_type': 'example_1'},
{'enriched': True, 'object_type': 'example_2'}]
Args:
request (django.http.request.HttpRequest): request which is currently processed
json (list|dict): in-memory representation of JSON
fun: function which is be applied on found objects
predicate: function which is applied on all objects to determine which
objects should be processed further
skip_nested: ignore nested objects
Returns:
list|dict: transformed JSON | [
"Take",
"the",
"JSON",
"find",
"all",
"its",
"subparts",
"satisfying",
"the",
"given",
"condition",
"and",
"them",
"by",
"the",
"given",
"function",
".",
"Other",
"key",
"-",
"word",
"arguments",
"are",
"passed",
"to",
"the",
"function",
"."
]
| 8278c72e498d6ef8d392cc47b48473f4ec037142 | https://github.com/adaptive-learning/proso-apps/blob/8278c72e498d6ef8d392cc47b48473f4ec037142/proso/django/enrichment.py#L139-L201 | train |
adaptive-learning/proso-apps | proso/django/enrichment.py | enrich_by_object_type | def enrich_by_object_type(request, json, fun, object_type, skip_nested=False, **kwargs):
"""
Take the JSON, find its subparts having the given object part and transform
them by the given function. Other key-word arguments are passed to the function.
.. testsetup::
from pprint import pprint
from proso.django.enrichment import enrich_by_object_type
request = None
.. testcode::
def enricher(request, json_list, nested):
for json_object in json_list:
json_object['enriched'] = True
enriched = enrich_by_object_type(
request,
[{'object_type': 'example_1'}, {'object_type': 'example_2'}],
enricher,
['example_1']
)
pprint(enriched)
.. testoutput::
[{'enriched': True, 'object_type': 'example_1'}, {'object_type': 'example_2'}]
Args:
request (django.http.request.HttpRequest): request which is currently processed
json (list|dict): in-memory representation of JSON
fun: function which is be applied on found objects
object_type(str|list): object type or list of object types
skip_nested: ignore nested objects
Returns:
list|dict: transformed JSON
"""
if not isinstance(object_type, list):
object_type = [object_type]
predicate = lambda x: 'object_type' in x and x['object_type'] in object_type
return enrich_by_predicate(request, json, fun, predicate, skip_nested=skip_nested, **kwargs) | python | def enrich_by_object_type(request, json, fun, object_type, skip_nested=False, **kwargs):
"""
Take the JSON, find its subparts having the given object part and transform
them by the given function. Other key-word arguments are passed to the function.
.. testsetup::
from pprint import pprint
from proso.django.enrichment import enrich_by_object_type
request = None
.. testcode::
def enricher(request, json_list, nested):
for json_object in json_list:
json_object['enriched'] = True
enriched = enrich_by_object_type(
request,
[{'object_type': 'example_1'}, {'object_type': 'example_2'}],
enricher,
['example_1']
)
pprint(enriched)
.. testoutput::
[{'enriched': True, 'object_type': 'example_1'}, {'object_type': 'example_2'}]
Args:
request (django.http.request.HttpRequest): request which is currently processed
json (list|dict): in-memory representation of JSON
fun: function which is be applied on found objects
object_type(str|list): object type or list of object types
skip_nested: ignore nested objects
Returns:
list|dict: transformed JSON
"""
if not isinstance(object_type, list):
object_type = [object_type]
predicate = lambda x: 'object_type' in x and x['object_type'] in object_type
return enrich_by_predicate(request, json, fun, predicate, skip_nested=skip_nested, **kwargs) | [
"def",
"enrich_by_object_type",
"(",
"request",
",",
"json",
",",
"fun",
",",
"object_type",
",",
"skip_nested",
"=",
"False",
",",
"*",
"*",
"kwargs",
")",
":",
"if",
"not",
"isinstance",
"(",
"object_type",
",",
"list",
")",
":",
"object_type",
"=",
"[",
"object_type",
"]",
"predicate",
"=",
"lambda",
"x",
":",
"'object_type'",
"in",
"x",
"and",
"x",
"[",
"'object_type'",
"]",
"in",
"object_type",
"return",
"enrich_by_predicate",
"(",
"request",
",",
"json",
",",
"fun",
",",
"predicate",
",",
"skip_nested",
"=",
"skip_nested",
",",
"*",
"*",
"kwargs",
")"
]
| Take the JSON, find its subparts having the given object part and transform
them by the given function. Other key-word arguments are passed to the function.
.. testsetup::
from pprint import pprint
from proso.django.enrichment import enrich_by_object_type
request = None
.. testcode::
def enricher(request, json_list, nested):
for json_object in json_list:
json_object['enriched'] = True
enriched = enrich_by_object_type(
request,
[{'object_type': 'example_1'}, {'object_type': 'example_2'}],
enricher,
['example_1']
)
pprint(enriched)
.. testoutput::
[{'enriched': True, 'object_type': 'example_1'}, {'object_type': 'example_2'}]
Args:
request (django.http.request.HttpRequest): request which is currently processed
json (list|dict): in-memory representation of JSON
fun: function which is be applied on found objects
object_type(str|list): object type or list of object types
skip_nested: ignore nested objects
Returns:
list|dict: transformed JSON | [
"Take",
"the",
"JSON",
"find",
"its",
"subparts",
"having",
"the",
"given",
"object",
"part",
"and",
"transform",
"them",
"by",
"the",
"given",
"function",
".",
"Other",
"key",
"-",
"word",
"arguments",
"are",
"passed",
"to",
"the",
"function",
"."
]
| 8278c72e498d6ef8d392cc47b48473f4ec037142 | https://github.com/adaptive-learning/proso-apps/blob/8278c72e498d6ef8d392cc47b48473f4ec037142/proso/django/enrichment.py#L204-L247 | train |
adaptive-learning/proso-apps | proso_flashcards/models.py | change_parent | def change_parent(sender, instance, **kwargs):
"""
When the given flashcard has changed. Look at term and context and change
the corresponding item relation.
"""
if instance.id is None:
return
if len({'term', 'term_id'} & set(instance.changed_fields)) != 0:
diff = instance.diff
parent = diff['term'][0] if 'term' in diff else diff['term_id'][0]
child_id = instance.item_id
if parent is not None:
parent_id = parent.item_id if isinstance(parent, Term) else Term.objects.get(pk=parent).item_id
ItemRelation.objects.filter(parent_id=parent_id, child_id=child_id).delete()
ItemRelation.objects.get_or_create(parent_id=instance.term.item_id, child_id=child_id, visible=True)
if len({'term_secondary', 'term_secondary_id'} & set(instance.changed_fields)) != 0:
diff = instance.diff
child_id = instance.item_id
parent = diff['term_secondary'][0] if 'term_secondary' in diff else diff['term_secondary_id'][0]
if parent is not None:
parent_id = parent.item_id if isinstance(parent, Term) else Term.objects.get(pk=parent).item_id
ItemRelation.objects.filter(parent_id=parent_id, child_id=child_id).delete()
if instance.term_secondary is not None or instance.term_secondary_id is not None:
ItemRelation.objects.get_or_create(parent_id=instance.term_secondary.item_id, child_id=child_id, visible=True)
if len({'context', 'context_id'} & set(instance.changed_fields)) != 0:
diff = instance.diff
parent = diff['context'][0] if 'context' in diff else diff['context_id'][0]
child_id = instance.item_id
if parent is not None:
parent_id = parent.item_id if isinstance(parent, Context) else Context.objects.get(pk=parent).item_id
ItemRelation.objects.filter(parent_id=parent_id, child_id=child_id).delete()
ItemRelation.objects.get_or_create(parent_id=instance.context.item_id, child_id=child_id, visible=True) | python | def change_parent(sender, instance, **kwargs):
"""
When the given flashcard has changed. Look at term and context and change
the corresponding item relation.
"""
if instance.id is None:
return
if len({'term', 'term_id'} & set(instance.changed_fields)) != 0:
diff = instance.diff
parent = diff['term'][0] if 'term' in diff else diff['term_id'][0]
child_id = instance.item_id
if parent is not None:
parent_id = parent.item_id if isinstance(parent, Term) else Term.objects.get(pk=parent).item_id
ItemRelation.objects.filter(parent_id=parent_id, child_id=child_id).delete()
ItemRelation.objects.get_or_create(parent_id=instance.term.item_id, child_id=child_id, visible=True)
if len({'term_secondary', 'term_secondary_id'} & set(instance.changed_fields)) != 0:
diff = instance.diff
child_id = instance.item_id
parent = diff['term_secondary'][0] if 'term_secondary' in diff else diff['term_secondary_id'][0]
if parent is not None:
parent_id = parent.item_id if isinstance(parent, Term) else Term.objects.get(pk=parent).item_id
ItemRelation.objects.filter(parent_id=parent_id, child_id=child_id).delete()
if instance.term_secondary is not None or instance.term_secondary_id is not None:
ItemRelation.objects.get_or_create(parent_id=instance.term_secondary.item_id, child_id=child_id, visible=True)
if len({'context', 'context_id'} & set(instance.changed_fields)) != 0:
diff = instance.diff
parent = diff['context'][0] if 'context' in diff else diff['context_id'][0]
child_id = instance.item_id
if parent is not None:
parent_id = parent.item_id if isinstance(parent, Context) else Context.objects.get(pk=parent).item_id
ItemRelation.objects.filter(parent_id=parent_id, child_id=child_id).delete()
ItemRelation.objects.get_or_create(parent_id=instance.context.item_id, child_id=child_id, visible=True) | [
"def",
"change_parent",
"(",
"sender",
",",
"instance",
",",
"*",
"*",
"kwargs",
")",
":",
"if",
"instance",
".",
"id",
"is",
"None",
":",
"return",
"if",
"len",
"(",
"{",
"'term'",
",",
"'term_id'",
"}",
"&",
"set",
"(",
"instance",
".",
"changed_fields",
")",
")",
"!=",
"0",
":",
"diff",
"=",
"instance",
".",
"diff",
"parent",
"=",
"diff",
"[",
"'term'",
"]",
"[",
"0",
"]",
"if",
"'term'",
"in",
"diff",
"else",
"diff",
"[",
"'term_id'",
"]",
"[",
"0",
"]",
"child_id",
"=",
"instance",
".",
"item_id",
"if",
"parent",
"is",
"not",
"None",
":",
"parent_id",
"=",
"parent",
".",
"item_id",
"if",
"isinstance",
"(",
"parent",
",",
"Term",
")",
"else",
"Term",
".",
"objects",
".",
"get",
"(",
"pk",
"=",
"parent",
")",
".",
"item_id",
"ItemRelation",
".",
"objects",
".",
"filter",
"(",
"parent_id",
"=",
"parent_id",
",",
"child_id",
"=",
"child_id",
")",
".",
"delete",
"(",
")",
"ItemRelation",
".",
"objects",
".",
"get_or_create",
"(",
"parent_id",
"=",
"instance",
".",
"term",
".",
"item_id",
",",
"child_id",
"=",
"child_id",
",",
"visible",
"=",
"True",
")",
"if",
"len",
"(",
"{",
"'term_secondary'",
",",
"'term_secondary_id'",
"}",
"&",
"set",
"(",
"instance",
".",
"changed_fields",
")",
")",
"!=",
"0",
":",
"diff",
"=",
"instance",
".",
"diff",
"child_id",
"=",
"instance",
".",
"item_id",
"parent",
"=",
"diff",
"[",
"'term_secondary'",
"]",
"[",
"0",
"]",
"if",
"'term_secondary'",
"in",
"diff",
"else",
"diff",
"[",
"'term_secondary_id'",
"]",
"[",
"0",
"]",
"if",
"parent",
"is",
"not",
"None",
":",
"parent_id",
"=",
"parent",
".",
"item_id",
"if",
"isinstance",
"(",
"parent",
",",
"Term",
")",
"else",
"Term",
".",
"objects",
".",
"get",
"(",
"pk",
"=",
"parent",
")",
".",
"item_id",
"ItemRelation",
".",
"objects",
".",
"filter",
"(",
"parent_id",
"=",
"parent_id",
",",
"child_id",
"=",
"child_id",
")",
".",
"delete",
"(",
")",
"if",
"instance",
".",
"term_secondary",
"is",
"not",
"None",
"or",
"instance",
".",
"term_secondary_id",
"is",
"not",
"None",
":",
"ItemRelation",
".",
"objects",
".",
"get_or_create",
"(",
"parent_id",
"=",
"instance",
".",
"term_secondary",
".",
"item_id",
",",
"child_id",
"=",
"child_id",
",",
"visible",
"=",
"True",
")",
"if",
"len",
"(",
"{",
"'context'",
",",
"'context_id'",
"}",
"&",
"set",
"(",
"instance",
".",
"changed_fields",
")",
")",
"!=",
"0",
":",
"diff",
"=",
"instance",
".",
"diff",
"parent",
"=",
"diff",
"[",
"'context'",
"]",
"[",
"0",
"]",
"if",
"'context'",
"in",
"diff",
"else",
"diff",
"[",
"'context_id'",
"]",
"[",
"0",
"]",
"child_id",
"=",
"instance",
".",
"item_id",
"if",
"parent",
"is",
"not",
"None",
":",
"parent_id",
"=",
"parent",
".",
"item_id",
"if",
"isinstance",
"(",
"parent",
",",
"Context",
")",
"else",
"Context",
".",
"objects",
".",
"get",
"(",
"pk",
"=",
"parent",
")",
".",
"item_id",
"ItemRelation",
".",
"objects",
".",
"filter",
"(",
"parent_id",
"=",
"parent_id",
",",
"child_id",
"=",
"child_id",
")",
".",
"delete",
"(",
")",
"ItemRelation",
".",
"objects",
".",
"get_or_create",
"(",
"parent_id",
"=",
"instance",
".",
"context",
".",
"item_id",
",",
"child_id",
"=",
"child_id",
",",
"visible",
"=",
"True",
")"
]
| When the given flashcard has changed. Look at term and context and change
the corresponding item relation. | [
"When",
"the",
"given",
"flashcard",
"has",
"changed",
".",
"Look",
"at",
"term",
"and",
"context",
"and",
"change",
"the",
"corresponding",
"item",
"relation",
"."
]
| 8278c72e498d6ef8d392cc47b48473f4ec037142 | https://github.com/adaptive-learning/proso-apps/blob/8278c72e498d6ef8d392cc47b48473f4ec037142/proso_flashcards/models.py#L367-L398 | train |
Kortemme-Lab/klab | klab/bio/bonsai.py | example | def example():
'''This section gives examples of how to use the module.'''
# 1a8d is an example from the loops benchmark
# 1lfa contains hydrogens
b = Bonsai.retrieve('1lfa', cache_dir='/tmp')
search_radius = 10.0
atom_of_interest = b.get_atom(1095)
nearby_atoms = b.find_atoms_near_atom(atom_of_interest, search_radius)
for na in nearby_atoms:
assert(na - atom_of_interest <= search_radius)
for fa in b.get_atom_set_complement(nearby_atoms):
assert(fa - atom_of_interest > search_radius)
# Get all heavy atoms within the radius (including HETATM)
nearby_heavy_atoms = b.find_heavy_atoms_near_atom(atom_of_interest, search_radius)
# Get all C-alpha atoms within the radius
nearby_ca_atoms = b.find_atoms_near_atom(atom_of_interest, search_radius, atom_names_to_include = ['CA'])
# Get all carbon atoms within the radius
nearby_c_atoms = b.find_atoms_near_atom(atom_of_interest, search_radius, atom_names_to_include = b.get_atom_names_by_group(['C'])) | python | def example():
'''This section gives examples of how to use the module.'''
# 1a8d is an example from the loops benchmark
# 1lfa contains hydrogens
b = Bonsai.retrieve('1lfa', cache_dir='/tmp')
search_radius = 10.0
atom_of_interest = b.get_atom(1095)
nearby_atoms = b.find_atoms_near_atom(atom_of_interest, search_radius)
for na in nearby_atoms:
assert(na - atom_of_interest <= search_radius)
for fa in b.get_atom_set_complement(nearby_atoms):
assert(fa - atom_of_interest > search_radius)
# Get all heavy atoms within the radius (including HETATM)
nearby_heavy_atoms = b.find_heavy_atoms_near_atom(atom_of_interest, search_radius)
# Get all C-alpha atoms within the radius
nearby_ca_atoms = b.find_atoms_near_atom(atom_of_interest, search_radius, atom_names_to_include = ['CA'])
# Get all carbon atoms within the radius
nearby_c_atoms = b.find_atoms_near_atom(atom_of_interest, search_radius, atom_names_to_include = b.get_atom_names_by_group(['C'])) | [
"def",
"example",
"(",
")",
":",
"# 1a8d is an example from the loops benchmark",
"# 1lfa contains hydrogens",
"b",
"=",
"Bonsai",
".",
"retrieve",
"(",
"'1lfa'",
",",
"cache_dir",
"=",
"'/tmp'",
")",
"search_radius",
"=",
"10.0",
"atom_of_interest",
"=",
"b",
".",
"get_atom",
"(",
"1095",
")",
"nearby_atoms",
"=",
"b",
".",
"find_atoms_near_atom",
"(",
"atom_of_interest",
",",
"search_radius",
")",
"for",
"na",
"in",
"nearby_atoms",
":",
"assert",
"(",
"na",
"-",
"atom_of_interest",
"<=",
"search_radius",
")",
"for",
"fa",
"in",
"b",
".",
"get_atom_set_complement",
"(",
"nearby_atoms",
")",
":",
"assert",
"(",
"fa",
"-",
"atom_of_interest",
">",
"search_radius",
")",
"# Get all heavy atoms within the radius (including HETATM)",
"nearby_heavy_atoms",
"=",
"b",
".",
"find_heavy_atoms_near_atom",
"(",
"atom_of_interest",
",",
"search_radius",
")",
"# Get all C-alpha atoms within the radius",
"nearby_ca_atoms",
"=",
"b",
".",
"find_atoms_near_atom",
"(",
"atom_of_interest",
",",
"search_radius",
",",
"atom_names_to_include",
"=",
"[",
"'CA'",
"]",
")",
"# Get all carbon atoms within the radius",
"nearby_c_atoms",
"=",
"b",
".",
"find_atoms_near_atom",
"(",
"atom_of_interest",
",",
"search_radius",
",",
"atom_names_to_include",
"=",
"b",
".",
"get_atom_names_by_group",
"(",
"[",
"'C'",
"]",
")",
")"
]
| This section gives examples of how to use the module. | [
"This",
"section",
"gives",
"examples",
"of",
"how",
"to",
"use",
"the",
"module",
"."
]
| 6d410ad08f1bd9f7cbbb28d7d946e94fbaaa2b6b | https://github.com/Kortemme-Lab/klab/blob/6d410ad08f1bd9f7cbbb28d7d946e94fbaaa2b6b/klab/bio/bonsai.py#L788-L810 | train |
Kortemme-Lab/klab | klab/bio/bonsai.py | PDBSection.from_non_aligned_residue_IDs | def from_non_aligned_residue_IDs(Chain, StartResidueID, EndResidueID, Sequence = None):
'''A more forgiving method that does not care about the padding of the residue IDs.'''
return PDBSection(Chain, PDB.ResidueID2String(StartResidueID), PDB.ResidueID2String(EndResidueID), Sequence = Sequence) | python | def from_non_aligned_residue_IDs(Chain, StartResidueID, EndResidueID, Sequence = None):
'''A more forgiving method that does not care about the padding of the residue IDs.'''
return PDBSection(Chain, PDB.ResidueID2String(StartResidueID), PDB.ResidueID2String(EndResidueID), Sequence = Sequence) | [
"def",
"from_non_aligned_residue_IDs",
"(",
"Chain",
",",
"StartResidueID",
",",
"EndResidueID",
",",
"Sequence",
"=",
"None",
")",
":",
"return",
"PDBSection",
"(",
"Chain",
",",
"PDB",
".",
"ResidueID2String",
"(",
"StartResidueID",
")",
",",
"PDB",
".",
"ResidueID2String",
"(",
"EndResidueID",
")",
",",
"Sequence",
"=",
"Sequence",
")"
]
| A more forgiving method that does not care about the padding of the residue IDs. | [
"A",
"more",
"forgiving",
"method",
"that",
"does",
"not",
"care",
"about",
"the",
"padding",
"of",
"the",
"residue",
"IDs",
"."
]
| 6d410ad08f1bd9f7cbbb28d7d946e94fbaaa2b6b | https://github.com/Kortemme-Lab/klab/blob/6d410ad08f1bd9f7cbbb28d7d946e94fbaaa2b6b/klab/bio/bonsai.py#L63-L65 | train |
Kortemme-Lab/klab | klab/bio/bonsai.py | ResidueIndexedPDBFile.bin_atoms | def bin_atoms(self):
'''This function bins the Atoms into fixed-size sections of the protein space in 3D.'''
# Create the atom bins
low_point = numpy.array([self.min_x, self.min_y, self.min_z])
high_point = numpy.array([self.max_x, self.max_y, self.max_z])
atom_bin_dimensions = numpy.ceil((high_point - low_point) / self.bin_size)
self.atom_bin_dimensions = (int(atom_bin_dimensions[0]) - 1, int(atom_bin_dimensions[1]) - 1, int(atom_bin_dimensions[2]) - 1)
atom_bins = []
for x in range(int(atom_bin_dimensions[0])):
atom_bins.append([])
for y in range(int(atom_bin_dimensions[1])):
atom_bins[x].append([])
for z in range(int(atom_bin_dimensions[2])):
atom_bins[x][y].append(Bin(x, y, z))
# Assign each Atom to a bin
for serial_number, atom in self.atoms.iteritems():
bin_location = numpy.trunc((atom.point - low_point) / self.bin_size)
bin = atom_bins[int(bin_location[0])][int(bin_location[1])][int(bin_location[2])]
bin.append(atom)
atom.set_bin(bin)
# Sanity_check
if self.safe_mode:
num_atoms = 0
for x in range(int(atom_bin_dimensions[0])):
for y in range(int(atom_bin_dimensions[1])):
for z in range(int(atom_bin_dimensions[2])):
num_atoms += len(atom_bins[x][y][z])
assert(num_atoms == len(self.atoms))
# Snip empty sections (saves a little space after garbage collection - space savings increase with the number of empty arrays in the matrix)
blank_section = ()
for x in range(int(atom_bin_dimensions[0])):
for y in range(int(atom_bin_dimensions[1])):
for z in range(int(atom_bin_dimensions[2])):
if not atom_bins[x][y][z]:
atom_bins[x][y][z] = blank_section
self.atom_bins = atom_bins | python | def bin_atoms(self):
'''This function bins the Atoms into fixed-size sections of the protein space in 3D.'''
# Create the atom bins
low_point = numpy.array([self.min_x, self.min_y, self.min_z])
high_point = numpy.array([self.max_x, self.max_y, self.max_z])
atom_bin_dimensions = numpy.ceil((high_point - low_point) / self.bin_size)
self.atom_bin_dimensions = (int(atom_bin_dimensions[0]) - 1, int(atom_bin_dimensions[1]) - 1, int(atom_bin_dimensions[2]) - 1)
atom_bins = []
for x in range(int(atom_bin_dimensions[0])):
atom_bins.append([])
for y in range(int(atom_bin_dimensions[1])):
atom_bins[x].append([])
for z in range(int(atom_bin_dimensions[2])):
atom_bins[x][y].append(Bin(x, y, z))
# Assign each Atom to a bin
for serial_number, atom in self.atoms.iteritems():
bin_location = numpy.trunc((atom.point - low_point) / self.bin_size)
bin = atom_bins[int(bin_location[0])][int(bin_location[1])][int(bin_location[2])]
bin.append(atom)
atom.set_bin(bin)
# Sanity_check
if self.safe_mode:
num_atoms = 0
for x in range(int(atom_bin_dimensions[0])):
for y in range(int(atom_bin_dimensions[1])):
for z in range(int(atom_bin_dimensions[2])):
num_atoms += len(atom_bins[x][y][z])
assert(num_atoms == len(self.atoms))
# Snip empty sections (saves a little space after garbage collection - space savings increase with the number of empty arrays in the matrix)
blank_section = ()
for x in range(int(atom_bin_dimensions[0])):
for y in range(int(atom_bin_dimensions[1])):
for z in range(int(atom_bin_dimensions[2])):
if not atom_bins[x][y][z]:
atom_bins[x][y][z] = blank_section
self.atom_bins = atom_bins | [
"def",
"bin_atoms",
"(",
"self",
")",
":",
"# Create the atom bins",
"low_point",
"=",
"numpy",
".",
"array",
"(",
"[",
"self",
".",
"min_x",
",",
"self",
".",
"min_y",
",",
"self",
".",
"min_z",
"]",
")",
"high_point",
"=",
"numpy",
".",
"array",
"(",
"[",
"self",
".",
"max_x",
",",
"self",
".",
"max_y",
",",
"self",
".",
"max_z",
"]",
")",
"atom_bin_dimensions",
"=",
"numpy",
".",
"ceil",
"(",
"(",
"high_point",
"-",
"low_point",
")",
"/",
"self",
".",
"bin_size",
")",
"self",
".",
"atom_bin_dimensions",
"=",
"(",
"int",
"(",
"atom_bin_dimensions",
"[",
"0",
"]",
")",
"-",
"1",
",",
"int",
"(",
"atom_bin_dimensions",
"[",
"1",
"]",
")",
"-",
"1",
",",
"int",
"(",
"atom_bin_dimensions",
"[",
"2",
"]",
")",
"-",
"1",
")",
"atom_bins",
"=",
"[",
"]",
"for",
"x",
"in",
"range",
"(",
"int",
"(",
"atom_bin_dimensions",
"[",
"0",
"]",
")",
")",
":",
"atom_bins",
".",
"append",
"(",
"[",
"]",
")",
"for",
"y",
"in",
"range",
"(",
"int",
"(",
"atom_bin_dimensions",
"[",
"1",
"]",
")",
")",
":",
"atom_bins",
"[",
"x",
"]",
".",
"append",
"(",
"[",
"]",
")",
"for",
"z",
"in",
"range",
"(",
"int",
"(",
"atom_bin_dimensions",
"[",
"2",
"]",
")",
")",
":",
"atom_bins",
"[",
"x",
"]",
"[",
"y",
"]",
".",
"append",
"(",
"Bin",
"(",
"x",
",",
"y",
",",
"z",
")",
")",
"# Assign each Atom to a bin",
"for",
"serial_number",
",",
"atom",
"in",
"self",
".",
"atoms",
".",
"iteritems",
"(",
")",
":",
"bin_location",
"=",
"numpy",
".",
"trunc",
"(",
"(",
"atom",
".",
"point",
"-",
"low_point",
")",
"/",
"self",
".",
"bin_size",
")",
"bin",
"=",
"atom_bins",
"[",
"int",
"(",
"bin_location",
"[",
"0",
"]",
")",
"]",
"[",
"int",
"(",
"bin_location",
"[",
"1",
"]",
")",
"]",
"[",
"int",
"(",
"bin_location",
"[",
"2",
"]",
")",
"]",
"bin",
".",
"append",
"(",
"atom",
")",
"atom",
".",
"set_bin",
"(",
"bin",
")",
"# Sanity_check",
"if",
"self",
".",
"safe_mode",
":",
"num_atoms",
"=",
"0",
"for",
"x",
"in",
"range",
"(",
"int",
"(",
"atom_bin_dimensions",
"[",
"0",
"]",
")",
")",
":",
"for",
"y",
"in",
"range",
"(",
"int",
"(",
"atom_bin_dimensions",
"[",
"1",
"]",
")",
")",
":",
"for",
"z",
"in",
"range",
"(",
"int",
"(",
"atom_bin_dimensions",
"[",
"2",
"]",
")",
")",
":",
"num_atoms",
"+=",
"len",
"(",
"atom_bins",
"[",
"x",
"]",
"[",
"y",
"]",
"[",
"z",
"]",
")",
"assert",
"(",
"num_atoms",
"==",
"len",
"(",
"self",
".",
"atoms",
")",
")",
"# Snip empty sections (saves a little space after garbage collection - space savings increase with the number of empty arrays in the matrix)",
"blank_section",
"=",
"(",
")",
"for",
"x",
"in",
"range",
"(",
"int",
"(",
"atom_bin_dimensions",
"[",
"0",
"]",
")",
")",
":",
"for",
"y",
"in",
"range",
"(",
"int",
"(",
"atom_bin_dimensions",
"[",
"1",
"]",
")",
")",
":",
"for",
"z",
"in",
"range",
"(",
"int",
"(",
"atom_bin_dimensions",
"[",
"2",
"]",
")",
")",
":",
"if",
"not",
"atom_bins",
"[",
"x",
"]",
"[",
"y",
"]",
"[",
"z",
"]",
":",
"atom_bins",
"[",
"x",
"]",
"[",
"y",
"]",
"[",
"z",
"]",
"=",
"blank_section",
"self",
".",
"atom_bins",
"=",
"atom_bins"
]
| This function bins the Atoms into fixed-size sections of the protein space in 3D. | [
"This",
"function",
"bins",
"the",
"Atoms",
"into",
"fixed",
"-",
"size",
"sections",
"of",
"the",
"protein",
"space",
"in",
"3D",
"."
]
| 6d410ad08f1bd9f7cbbb28d7d946e94fbaaa2b6b | https://github.com/Kortemme-Lab/klab/blob/6d410ad08f1bd9f7cbbb28d7d946e94fbaaa2b6b/klab/bio/bonsai.py#L308-L348 | train |
Kortemme-Lab/klab | klab/bio/bonsai.py | Bonsai.find_heavy_atoms_near_atom | def find_heavy_atoms_near_atom(self, source_atom, search_radius, atom_hit_cache = set(), restrict_to_CA = False):
'''atom_hit_cache is a set of atom serial numbers which have already been tested. We keep track of these to avoid recalculating the distance.
'''
#todo: Benchmark atom_hit_cache to see if it actually speeds up the search
non_heavy_atoms = self.get_atom_names_by_group(set(['H', 'D', 'T']))
return self.find_atoms_near_atom(source_atom, search_radius, atom_names_to_exclude = non_heavy_atoms, atom_hit_cache = atom_hit_cache, restrict_to_CA = restrict_to_CA) | python | def find_heavy_atoms_near_atom(self, source_atom, search_radius, atom_hit_cache = set(), restrict_to_CA = False):
'''atom_hit_cache is a set of atom serial numbers which have already been tested. We keep track of these to avoid recalculating the distance.
'''
#todo: Benchmark atom_hit_cache to see if it actually speeds up the search
non_heavy_atoms = self.get_atom_names_by_group(set(['H', 'D', 'T']))
return self.find_atoms_near_atom(source_atom, search_radius, atom_names_to_exclude = non_heavy_atoms, atom_hit_cache = atom_hit_cache, restrict_to_CA = restrict_to_CA) | [
"def",
"find_heavy_atoms_near_atom",
"(",
"self",
",",
"source_atom",
",",
"search_radius",
",",
"atom_hit_cache",
"=",
"set",
"(",
")",
",",
"restrict_to_CA",
"=",
"False",
")",
":",
"#todo: Benchmark atom_hit_cache to see if it actually speeds up the search",
"non_heavy_atoms",
"=",
"self",
".",
"get_atom_names_by_group",
"(",
"set",
"(",
"[",
"'H'",
",",
"'D'",
",",
"'T'",
"]",
")",
")",
"return",
"self",
".",
"find_atoms_near_atom",
"(",
"source_atom",
",",
"search_radius",
",",
"atom_names_to_exclude",
"=",
"non_heavy_atoms",
",",
"atom_hit_cache",
"=",
"atom_hit_cache",
",",
"restrict_to_CA",
"=",
"restrict_to_CA",
")"
]
| atom_hit_cache is a set of atom serial numbers which have already been tested. We keep track of these to avoid recalculating the distance. | [
"atom_hit_cache",
"is",
"a",
"set",
"of",
"atom",
"serial",
"numbers",
"which",
"have",
"already",
"been",
"tested",
".",
"We",
"keep",
"track",
"of",
"these",
"to",
"avoid",
"recalculating",
"the",
"distance",
"."
]
| 6d410ad08f1bd9f7cbbb28d7d946e94fbaaa2b6b | https://github.com/Kortemme-Lab/klab/blob/6d410ad08f1bd9f7cbbb28d7d946e94fbaaa2b6b/klab/bio/bonsai.py#L414-L420 | train |
TUNE-Archive/freight_forwarder | freight_forwarder/config.py | Config.get | def get(self, attr_name, *args):
""" Get the most retrieval attribute in the configuration file. This method
will recursively look through the configuration file for the attribute specified
and return the last found value or None. The values can be referenced by
the key name provided in the configuration file or that value normalized with
snake_casing.
Usage::
>>> from freight_forwarder.config import Config
>>>
>>> config = Config()
>>> thing = config.get('thing', 'grandparent', 'parent')
:param attr_name: A :string: The configuration property name to get.
:param *args: A :tuple: if :strings: parent objects in which to look for attr. This is optional.
:return attr value:
"""
if not isinstance(attr_name, six.string_types):
raise TypeError('attr_name must be a str.')
# allow retrieval of data with alias or normalized name
if '-' in attr_name:
attr_name = attr_name.replace('-', '_')
parent_attr = self
attr = getattr(parent_attr, attr_name, None)
for arg in args:
if not isinstance(arg, six.string_types):
raise TypeError(
'each additional argument must be a string. {0} was not a string'.format(arg)
)
if hasattr(parent_attr, arg):
parent_attr = getattr(parent_attr, arg)
if hasattr(parent_attr, attr_name):
attr = getattr(parent_attr, attr_name)
else:
pass
return attr | python | def get(self, attr_name, *args):
""" Get the most retrieval attribute in the configuration file. This method
will recursively look through the configuration file for the attribute specified
and return the last found value or None. The values can be referenced by
the key name provided in the configuration file or that value normalized with
snake_casing.
Usage::
>>> from freight_forwarder.config import Config
>>>
>>> config = Config()
>>> thing = config.get('thing', 'grandparent', 'parent')
:param attr_name: A :string: The configuration property name to get.
:param *args: A :tuple: if :strings: parent objects in which to look for attr. This is optional.
:return attr value:
"""
if not isinstance(attr_name, six.string_types):
raise TypeError('attr_name must be a str.')
# allow retrieval of data with alias or normalized name
if '-' in attr_name:
attr_name = attr_name.replace('-', '_')
parent_attr = self
attr = getattr(parent_attr, attr_name, None)
for arg in args:
if not isinstance(arg, six.string_types):
raise TypeError(
'each additional argument must be a string. {0} was not a string'.format(arg)
)
if hasattr(parent_attr, arg):
parent_attr = getattr(parent_attr, arg)
if hasattr(parent_attr, attr_name):
attr = getattr(parent_attr, attr_name)
else:
pass
return attr | [
"def",
"get",
"(",
"self",
",",
"attr_name",
",",
"*",
"args",
")",
":",
"if",
"not",
"isinstance",
"(",
"attr_name",
",",
"six",
".",
"string_types",
")",
":",
"raise",
"TypeError",
"(",
"'attr_name must be a str.'",
")",
"# allow retrieval of data with alias or normalized name",
"if",
"'-'",
"in",
"attr_name",
":",
"attr_name",
"=",
"attr_name",
".",
"replace",
"(",
"'-'",
",",
"'_'",
")",
"parent_attr",
"=",
"self",
"attr",
"=",
"getattr",
"(",
"parent_attr",
",",
"attr_name",
",",
"None",
")",
"for",
"arg",
"in",
"args",
":",
"if",
"not",
"isinstance",
"(",
"arg",
",",
"six",
".",
"string_types",
")",
":",
"raise",
"TypeError",
"(",
"'each additional argument must be a string. {0} was not a string'",
".",
"format",
"(",
"arg",
")",
")",
"if",
"hasattr",
"(",
"parent_attr",
",",
"arg",
")",
":",
"parent_attr",
"=",
"getattr",
"(",
"parent_attr",
",",
"arg",
")",
"if",
"hasattr",
"(",
"parent_attr",
",",
"attr_name",
")",
":",
"attr",
"=",
"getattr",
"(",
"parent_attr",
",",
"attr_name",
")",
"else",
":",
"pass",
"return",
"attr"
]
| Get the most retrieval attribute in the configuration file. This method
will recursively look through the configuration file for the attribute specified
and return the last found value or None. The values can be referenced by
the key name provided in the configuration file or that value normalized with
snake_casing.
Usage::
>>> from freight_forwarder.config import Config
>>>
>>> config = Config()
>>> thing = config.get('thing', 'grandparent', 'parent')
:param attr_name: A :string: The configuration property name to get.
:param *args: A :tuple: if :strings: parent objects in which to look for attr. This is optional.
:return attr value: | [
"Get",
"the",
"most",
"retrieval",
"attribute",
"in",
"the",
"configuration",
"file",
".",
"This",
"method",
"will",
"recursively",
"look",
"through",
"the",
"configuration",
"file",
"for",
"the",
"attribute",
"specified",
"and",
"return",
"the",
"last",
"found",
"value",
"or",
"None",
".",
"The",
"values",
"can",
"be",
"referenced",
"by",
"the",
"key",
"name",
"provided",
"in",
"the",
"configuration",
"file",
"or",
"that",
"value",
"normalized",
"with",
"snake_casing",
"."
]
| 6ea4a49f474ec04abb8bb81b175c774a16b5312f | https://github.com/TUNE-Archive/freight_forwarder/blob/6ea4a49f474ec04abb8bb81b175c774a16b5312f/freight_forwarder/config.py#L883-L924 | train |
TUNE-Archive/freight_forwarder | freight_forwarder/config.py | Config.service_references | def service_references(self):
""" returns a list of service names
"""
services_blue_print = self._scheme_references.get('services')
if services_blue_print is None:
raise LookupError('unable to find any services in the config.')
# TODO: this needs to be cleaned up and made solid. maybe when creating the blueprint ref normalize the damn keys
return {key.replace('-', '_'): key for key in services_blue_print['keys']} | python | def service_references(self):
""" returns a list of service names
"""
services_blue_print = self._scheme_references.get('services')
if services_blue_print is None:
raise LookupError('unable to find any services in the config.')
# TODO: this needs to be cleaned up and made solid. maybe when creating the blueprint ref normalize the damn keys
return {key.replace('-', '_'): key for key in services_blue_print['keys']} | [
"def",
"service_references",
"(",
"self",
")",
":",
"services_blue_print",
"=",
"self",
".",
"_scheme_references",
".",
"get",
"(",
"'services'",
")",
"if",
"services_blue_print",
"is",
"None",
":",
"raise",
"LookupError",
"(",
"'unable to find any services in the config.'",
")",
"# TODO: this needs to be cleaned up and made solid. maybe when creating the blueprint ref normalize the damn keys",
"return",
"{",
"key",
".",
"replace",
"(",
"'-'",
",",
"'_'",
")",
":",
"key",
"for",
"key",
"in",
"services_blue_print",
"[",
"'keys'",
"]",
"}"
]
| returns a list of service names | [
"returns",
"a",
"list",
"of",
"service",
"names"
]
| 6ea4a49f474ec04abb8bb81b175c774a16b5312f | https://github.com/TUNE-Archive/freight_forwarder/blob/6ea4a49f474ec04abb8bb81b175c774a16b5312f/freight_forwarder/config.py#L928-L936 | train |
TUNE-Archive/freight_forwarder | freight_forwarder/config.py | Config.validate | def validate(self):
""" Validate the contents of the configuration file. Will return None if validation is successful or
raise an error if not.
"""
if not isinstance(self._data, dict):
raise TypeError('freight forwarder configuration file must be a dict.')
current_log_level = logger.get_level()
if self._verbose:
logger.set_level('DEBUG')
else:
logger.set_level('ERROR')
logger.info('Starting configuration validation', extra={"formatter": 'config-start'})
# copy config dict to allow config data to stay in its original state.
config_data = self._data.copy()
try:
self._walk_tree(config_data, ROOT_SCHEME)
except ConfigValidationException as e:
e.log_error()
raise
logger.info("Config validation passed.", extra={'formatter': 'config-success'})
logger.set_level(current_log_level) | python | def validate(self):
""" Validate the contents of the configuration file. Will return None if validation is successful or
raise an error if not.
"""
if not isinstance(self._data, dict):
raise TypeError('freight forwarder configuration file must be a dict.')
current_log_level = logger.get_level()
if self._verbose:
logger.set_level('DEBUG')
else:
logger.set_level('ERROR')
logger.info('Starting configuration validation', extra={"formatter": 'config-start'})
# copy config dict to allow config data to stay in its original state.
config_data = self._data.copy()
try:
self._walk_tree(config_data, ROOT_SCHEME)
except ConfigValidationException as e:
e.log_error()
raise
logger.info("Config validation passed.", extra={'formatter': 'config-success'})
logger.set_level(current_log_level) | [
"def",
"validate",
"(",
"self",
")",
":",
"if",
"not",
"isinstance",
"(",
"self",
".",
"_data",
",",
"dict",
")",
":",
"raise",
"TypeError",
"(",
"'freight forwarder configuration file must be a dict.'",
")",
"current_log_level",
"=",
"logger",
".",
"get_level",
"(",
")",
"if",
"self",
".",
"_verbose",
":",
"logger",
".",
"set_level",
"(",
"'DEBUG'",
")",
"else",
":",
"logger",
".",
"set_level",
"(",
"'ERROR'",
")",
"logger",
".",
"info",
"(",
"'Starting configuration validation'",
",",
"extra",
"=",
"{",
"\"formatter\"",
":",
"'config-start'",
"}",
")",
"# copy config dict to allow config data to stay in its original state.",
"config_data",
"=",
"self",
".",
"_data",
".",
"copy",
"(",
")",
"try",
":",
"self",
".",
"_walk_tree",
"(",
"config_data",
",",
"ROOT_SCHEME",
")",
"except",
"ConfigValidationException",
"as",
"e",
":",
"e",
".",
"log_error",
"(",
")",
"raise",
"logger",
".",
"info",
"(",
"\"Config validation passed.\"",
",",
"extra",
"=",
"{",
"'formatter'",
":",
"'config-success'",
"}",
")",
"logger",
".",
"set_level",
"(",
"current_log_level",
")"
]
| Validate the contents of the configuration file. Will return None if validation is successful or
raise an error if not. | [
"Validate",
"the",
"contents",
"of",
"the",
"configuration",
"file",
".",
"Will",
"return",
"None",
"if",
"validation",
"is",
"successful",
"or",
"raise",
"an",
"error",
"if",
"not",
"."
]
| 6ea4a49f474ec04abb8bb81b175c774a16b5312f | https://github.com/TUNE-Archive/freight_forwarder/blob/6ea4a49f474ec04abb8bb81b175c774a16b5312f/freight_forwarder/config.py#L965-L992 | train |
TUNE-Archive/freight_forwarder | freight_forwarder/config.py | Config._load | def _load(self):
""" Load a configuration file. This method will be called when the Config class is instantiated. The
configuration file can be json or yaml.
"""
if os.path.isdir(self._path):
for file_ext in ('yml', 'yaml', 'json'):
test_path = os.path.join(self._path, 'freight-forwarder.{0}'.format(file_ext))
if os.path.isfile(test_path):
self._path = test_path
break
if os.path.isfile(self._path):
file_name, file_extension = os.path.splitext(self._path)
with open(self._path, 'r') as config_file:
if file_extension in ('.yaml', '.yml'):
self._load_yml_config(config_file.read())
elif file_extension == '.json':
try:
config_data = json.loads(config_file.read())
self._data = normalize_keys(config_data)
except Exception:
raise SyntaxError("There is a syntax error in your freight-forwarder config.")
else:
raise TypeError("Configuration file most be yaml or json.")
else:
raise LookupError("Was unable to find a freight-forwarder configuration file.") | python | def _load(self):
""" Load a configuration file. This method will be called when the Config class is instantiated. The
configuration file can be json or yaml.
"""
if os.path.isdir(self._path):
for file_ext in ('yml', 'yaml', 'json'):
test_path = os.path.join(self._path, 'freight-forwarder.{0}'.format(file_ext))
if os.path.isfile(test_path):
self._path = test_path
break
if os.path.isfile(self._path):
file_name, file_extension = os.path.splitext(self._path)
with open(self._path, 'r') as config_file:
if file_extension in ('.yaml', '.yml'):
self._load_yml_config(config_file.read())
elif file_extension == '.json':
try:
config_data = json.loads(config_file.read())
self._data = normalize_keys(config_data)
except Exception:
raise SyntaxError("There is a syntax error in your freight-forwarder config.")
else:
raise TypeError("Configuration file most be yaml or json.")
else:
raise LookupError("Was unable to find a freight-forwarder configuration file.") | [
"def",
"_load",
"(",
"self",
")",
":",
"if",
"os",
".",
"path",
".",
"isdir",
"(",
"self",
".",
"_path",
")",
":",
"for",
"file_ext",
"in",
"(",
"'yml'",
",",
"'yaml'",
",",
"'json'",
")",
":",
"test_path",
"=",
"os",
".",
"path",
".",
"join",
"(",
"self",
".",
"_path",
",",
"'freight-forwarder.{0}'",
".",
"format",
"(",
"file_ext",
")",
")",
"if",
"os",
".",
"path",
".",
"isfile",
"(",
"test_path",
")",
":",
"self",
".",
"_path",
"=",
"test_path",
"break",
"if",
"os",
".",
"path",
".",
"isfile",
"(",
"self",
".",
"_path",
")",
":",
"file_name",
",",
"file_extension",
"=",
"os",
".",
"path",
".",
"splitext",
"(",
"self",
".",
"_path",
")",
"with",
"open",
"(",
"self",
".",
"_path",
",",
"'r'",
")",
"as",
"config_file",
":",
"if",
"file_extension",
"in",
"(",
"'.yaml'",
",",
"'.yml'",
")",
":",
"self",
".",
"_load_yml_config",
"(",
"config_file",
".",
"read",
"(",
")",
")",
"elif",
"file_extension",
"==",
"'.json'",
":",
"try",
":",
"config_data",
"=",
"json",
".",
"loads",
"(",
"config_file",
".",
"read",
"(",
")",
")",
"self",
".",
"_data",
"=",
"normalize_keys",
"(",
"config_data",
")",
"except",
"Exception",
":",
"raise",
"SyntaxError",
"(",
"\"There is a syntax error in your freight-forwarder config.\"",
")",
"else",
":",
"raise",
"TypeError",
"(",
"\"Configuration file most be yaml or json.\"",
")",
"else",
":",
"raise",
"LookupError",
"(",
"\"Was unable to find a freight-forwarder configuration file.\"",
")"
]
| Load a configuration file. This method will be called when the Config class is instantiated. The
configuration file can be json or yaml. | [
"Load",
"a",
"configuration",
"file",
".",
"This",
"method",
"will",
"be",
"called",
"when",
"the",
"Config",
"class",
"is",
"instantiated",
".",
"The",
"configuration",
"file",
"can",
"be",
"json",
"or",
"yaml",
"."
]
| 6ea4a49f474ec04abb8bb81b175c774a16b5312f | https://github.com/TUNE-Archive/freight_forwarder/blob/6ea4a49f474ec04abb8bb81b175c774a16b5312f/freight_forwarder/config.py#L997-L1024 | train |
TUNE-Archive/freight_forwarder | freight_forwarder/config.py | Config._load_yml_config | def _load_yml_config(self, config_file):
""" loads a yaml str, creates a few constructs for pyaml, serializes and normalized the config data. Then
assigns the config data to self._data.
:param config_file: A :string: loaded from a yaml file.
"""
if not isinstance(config_file, six.string_types):
raise TypeError('config_file must be a str.')
try:
def construct_yaml_int(self, node):
obj = SafeConstructor.construct_yaml_int(self, node)
data = ConfigInt(
obj,
node.start_mark,
node.end_mark
)
return data
def construct_yaml_float(self, node):
obj, = SafeConstructor.construct_yaml_float(self, node)
data = ConfigFloat(
obj,
node.start_mark,
node.end_mark
)
return data
def construct_yaml_str(self, node):
# Override the default string handling function
# to always return unicode objects
obj = SafeConstructor.construct_scalar(self, node)
assert isinstance(obj, six.string_types)
data = ConfigUnicode(
obj,
node.start_mark,
node.end_mark
)
return data
def construct_yaml_mapping(self, node):
obj, = SafeConstructor.construct_yaml_map(self, node)
data = ConfigDict(
obj,
node.start_mark,
node.end_mark
)
return data
def construct_yaml_seq(self, node):
obj, = SafeConstructor.construct_yaml_seq(self, node)
data = ConfigSeq(
obj,
node.start_mark,
node.end_mark
)
return data
# SafeConstructor.add_constructor(u'tag:yaml.org,2002:bool', construct_yaml_bool)
SafeConstructor.add_constructor(u'tag:yaml.org,2002:float', construct_yaml_float)
SafeConstructor.add_constructor(u'tag:yaml.org,2002:int', construct_yaml_int)
SafeConstructor.add_constructor(u'tag:yaml.org,2002:map', construct_yaml_mapping)
SafeConstructor.add_constructor(u'tag:yaml.org,2002:seq', construct_yaml_seq)
SafeConstructor.add_constructor(u'tag:yaml.org,2002:str', construct_yaml_str)
data = SafeLoader(config_file).get_data()
if data is None:
raise AttributeError('The configuration file needs to have data in it.')
self._data = normalize_keys(data, snake_case=False)
except YAMLError as e:
if hasattr(e, 'problem_mark'):
mark = e.problem_mark
raise SyntaxError(
"There is a syntax error in your freight-forwarder config file line: {0} column: {1}".format(
mark.line + 1,
mark.column + 1
)
)
else:
raise SyntaxError("There is a syntax error in your freight-forwarder config.") | python | def _load_yml_config(self, config_file):
""" loads a yaml str, creates a few constructs for pyaml, serializes and normalized the config data. Then
assigns the config data to self._data.
:param config_file: A :string: loaded from a yaml file.
"""
if not isinstance(config_file, six.string_types):
raise TypeError('config_file must be a str.')
try:
def construct_yaml_int(self, node):
obj = SafeConstructor.construct_yaml_int(self, node)
data = ConfigInt(
obj,
node.start_mark,
node.end_mark
)
return data
def construct_yaml_float(self, node):
obj, = SafeConstructor.construct_yaml_float(self, node)
data = ConfigFloat(
obj,
node.start_mark,
node.end_mark
)
return data
def construct_yaml_str(self, node):
# Override the default string handling function
# to always return unicode objects
obj = SafeConstructor.construct_scalar(self, node)
assert isinstance(obj, six.string_types)
data = ConfigUnicode(
obj,
node.start_mark,
node.end_mark
)
return data
def construct_yaml_mapping(self, node):
obj, = SafeConstructor.construct_yaml_map(self, node)
data = ConfigDict(
obj,
node.start_mark,
node.end_mark
)
return data
def construct_yaml_seq(self, node):
obj, = SafeConstructor.construct_yaml_seq(self, node)
data = ConfigSeq(
obj,
node.start_mark,
node.end_mark
)
return data
# SafeConstructor.add_constructor(u'tag:yaml.org,2002:bool', construct_yaml_bool)
SafeConstructor.add_constructor(u'tag:yaml.org,2002:float', construct_yaml_float)
SafeConstructor.add_constructor(u'tag:yaml.org,2002:int', construct_yaml_int)
SafeConstructor.add_constructor(u'tag:yaml.org,2002:map', construct_yaml_mapping)
SafeConstructor.add_constructor(u'tag:yaml.org,2002:seq', construct_yaml_seq)
SafeConstructor.add_constructor(u'tag:yaml.org,2002:str', construct_yaml_str)
data = SafeLoader(config_file).get_data()
if data is None:
raise AttributeError('The configuration file needs to have data in it.')
self._data = normalize_keys(data, snake_case=False)
except YAMLError as e:
if hasattr(e, 'problem_mark'):
mark = e.problem_mark
raise SyntaxError(
"There is a syntax error in your freight-forwarder config file line: {0} column: {1}".format(
mark.line + 1,
mark.column + 1
)
)
else:
raise SyntaxError("There is a syntax error in your freight-forwarder config.") | [
"def",
"_load_yml_config",
"(",
"self",
",",
"config_file",
")",
":",
"if",
"not",
"isinstance",
"(",
"config_file",
",",
"six",
".",
"string_types",
")",
":",
"raise",
"TypeError",
"(",
"'config_file must be a str.'",
")",
"try",
":",
"def",
"construct_yaml_int",
"(",
"self",
",",
"node",
")",
":",
"obj",
"=",
"SafeConstructor",
".",
"construct_yaml_int",
"(",
"self",
",",
"node",
")",
"data",
"=",
"ConfigInt",
"(",
"obj",
",",
"node",
".",
"start_mark",
",",
"node",
".",
"end_mark",
")",
"return",
"data",
"def",
"construct_yaml_float",
"(",
"self",
",",
"node",
")",
":",
"obj",
",",
"=",
"SafeConstructor",
".",
"construct_yaml_float",
"(",
"self",
",",
"node",
")",
"data",
"=",
"ConfigFloat",
"(",
"obj",
",",
"node",
".",
"start_mark",
",",
"node",
".",
"end_mark",
")",
"return",
"data",
"def",
"construct_yaml_str",
"(",
"self",
",",
"node",
")",
":",
"# Override the default string handling function",
"# to always return unicode objects",
"obj",
"=",
"SafeConstructor",
".",
"construct_scalar",
"(",
"self",
",",
"node",
")",
"assert",
"isinstance",
"(",
"obj",
",",
"six",
".",
"string_types",
")",
"data",
"=",
"ConfigUnicode",
"(",
"obj",
",",
"node",
".",
"start_mark",
",",
"node",
".",
"end_mark",
")",
"return",
"data",
"def",
"construct_yaml_mapping",
"(",
"self",
",",
"node",
")",
":",
"obj",
",",
"=",
"SafeConstructor",
".",
"construct_yaml_map",
"(",
"self",
",",
"node",
")",
"data",
"=",
"ConfigDict",
"(",
"obj",
",",
"node",
".",
"start_mark",
",",
"node",
".",
"end_mark",
")",
"return",
"data",
"def",
"construct_yaml_seq",
"(",
"self",
",",
"node",
")",
":",
"obj",
",",
"=",
"SafeConstructor",
".",
"construct_yaml_seq",
"(",
"self",
",",
"node",
")",
"data",
"=",
"ConfigSeq",
"(",
"obj",
",",
"node",
".",
"start_mark",
",",
"node",
".",
"end_mark",
")",
"return",
"data",
"# SafeConstructor.add_constructor(u'tag:yaml.org,2002:bool', construct_yaml_bool)",
"SafeConstructor",
".",
"add_constructor",
"(",
"u'tag:yaml.org,2002:float'",
",",
"construct_yaml_float",
")",
"SafeConstructor",
".",
"add_constructor",
"(",
"u'tag:yaml.org,2002:int'",
",",
"construct_yaml_int",
")",
"SafeConstructor",
".",
"add_constructor",
"(",
"u'tag:yaml.org,2002:map'",
",",
"construct_yaml_mapping",
")",
"SafeConstructor",
".",
"add_constructor",
"(",
"u'tag:yaml.org,2002:seq'",
",",
"construct_yaml_seq",
")",
"SafeConstructor",
".",
"add_constructor",
"(",
"u'tag:yaml.org,2002:str'",
",",
"construct_yaml_str",
")",
"data",
"=",
"SafeLoader",
"(",
"config_file",
")",
".",
"get_data",
"(",
")",
"if",
"data",
"is",
"None",
":",
"raise",
"AttributeError",
"(",
"'The configuration file needs to have data in it.'",
")",
"self",
".",
"_data",
"=",
"normalize_keys",
"(",
"data",
",",
"snake_case",
"=",
"False",
")",
"except",
"YAMLError",
"as",
"e",
":",
"if",
"hasattr",
"(",
"e",
",",
"'problem_mark'",
")",
":",
"mark",
"=",
"e",
".",
"problem_mark",
"raise",
"SyntaxError",
"(",
"\"There is a syntax error in your freight-forwarder config file line: {0} column: {1}\"",
".",
"format",
"(",
"mark",
".",
"line",
"+",
"1",
",",
"mark",
".",
"column",
"+",
"1",
")",
")",
"else",
":",
"raise",
"SyntaxError",
"(",
"\"There is a syntax error in your freight-forwarder config.\"",
")"
]
| loads a yaml str, creates a few constructs for pyaml, serializes and normalized the config data. Then
assigns the config data to self._data.
:param config_file: A :string: loaded from a yaml file. | [
"loads",
"a",
"yaml",
"str",
"creates",
"a",
"few",
"constructs",
"for",
"pyaml",
"serializes",
"and",
"normalized",
"the",
"config",
"data",
".",
"Then",
"assigns",
"the",
"config",
"data",
"to",
"self",
".",
"_data",
"."
]
| 6ea4a49f474ec04abb8bb81b175c774a16b5312f | https://github.com/TUNE-Archive/freight_forwarder/blob/6ea4a49f474ec04abb8bb81b175c774a16b5312f/freight_forwarder/config.py#L1026-L1112 | train |
TUNE-Archive/freight_forwarder | freight_forwarder/config.py | Config._create_attr | def _create_attr(self, property_key, data, ancestors):
""" Dynamically Creates attributes on for a Config. Also adds name and alias to each Config object.
:param property_key: A :string: configuration property name.
:param data: The adds the user supplied for this specific property.
:param ancestors: A :OrderedDict: that provides a history of its ancestors.
"""
if not isinstance(property_key, six.string_types):
raise TypeError("property_key must be a string. type: {0} was passed.".format(type(property_key)))
if not isinstance(ancestors, OrderedDict):
raise TypeError("ancestors must be an OrderedDict. type: {0} was passed.".format(type(ancestors)))
previous_element = self
normalized_key = normalize_value(property_key).replace('-', '_')
normalized_ancestor_key = None
# TODO: clean up and validation
if ancestors:
for ancestor_key, ancestors_value in six.iteritems(ancestors):
normalized_ancestor_key = normalize_value(ancestor_key).replace('-', '_')
if normalized_ancestor_key.lower() == 'root':
continue
if not hasattr(previous_element, normalized_ancestor_key):
config_attr = ConfigDict({}, ancestors_value.start_mark, ancestors_value.end_mark)
config_attr.name = normalized_ancestor_key
config_attr.alias = ancestor_key
setattr(
previous_element,
normalized_ancestor_key,
config_attr
)
previous_element = getattr(previous_element, normalized_ancestor_key)
if normalized_key == normalized_ancestor_key:
pass
else:
if isinstance(data, ConfigNode):
data.name = normalized_key
data.alias = property_key
setattr(previous_element, normalized_key, data) | python | def _create_attr(self, property_key, data, ancestors):
""" Dynamically Creates attributes on for a Config. Also adds name and alias to each Config object.
:param property_key: A :string: configuration property name.
:param data: The adds the user supplied for this specific property.
:param ancestors: A :OrderedDict: that provides a history of its ancestors.
"""
if not isinstance(property_key, six.string_types):
raise TypeError("property_key must be a string. type: {0} was passed.".format(type(property_key)))
if not isinstance(ancestors, OrderedDict):
raise TypeError("ancestors must be an OrderedDict. type: {0} was passed.".format(type(ancestors)))
previous_element = self
normalized_key = normalize_value(property_key).replace('-', '_')
normalized_ancestor_key = None
# TODO: clean up and validation
if ancestors:
for ancestor_key, ancestors_value in six.iteritems(ancestors):
normalized_ancestor_key = normalize_value(ancestor_key).replace('-', '_')
if normalized_ancestor_key.lower() == 'root':
continue
if not hasattr(previous_element, normalized_ancestor_key):
config_attr = ConfigDict({}, ancestors_value.start_mark, ancestors_value.end_mark)
config_attr.name = normalized_ancestor_key
config_attr.alias = ancestor_key
setattr(
previous_element,
normalized_ancestor_key,
config_attr
)
previous_element = getattr(previous_element, normalized_ancestor_key)
if normalized_key == normalized_ancestor_key:
pass
else:
if isinstance(data, ConfigNode):
data.name = normalized_key
data.alias = property_key
setattr(previous_element, normalized_key, data) | [
"def",
"_create_attr",
"(",
"self",
",",
"property_key",
",",
"data",
",",
"ancestors",
")",
":",
"if",
"not",
"isinstance",
"(",
"property_key",
",",
"six",
".",
"string_types",
")",
":",
"raise",
"TypeError",
"(",
"\"property_key must be a string. type: {0} was passed.\"",
".",
"format",
"(",
"type",
"(",
"property_key",
")",
")",
")",
"if",
"not",
"isinstance",
"(",
"ancestors",
",",
"OrderedDict",
")",
":",
"raise",
"TypeError",
"(",
"\"ancestors must be an OrderedDict. type: {0} was passed.\"",
".",
"format",
"(",
"type",
"(",
"ancestors",
")",
")",
")",
"previous_element",
"=",
"self",
"normalized_key",
"=",
"normalize_value",
"(",
"property_key",
")",
".",
"replace",
"(",
"'-'",
",",
"'_'",
")",
"normalized_ancestor_key",
"=",
"None",
"# TODO: clean up and validation",
"if",
"ancestors",
":",
"for",
"ancestor_key",
",",
"ancestors_value",
"in",
"six",
".",
"iteritems",
"(",
"ancestors",
")",
":",
"normalized_ancestor_key",
"=",
"normalize_value",
"(",
"ancestor_key",
")",
".",
"replace",
"(",
"'-'",
",",
"'_'",
")",
"if",
"normalized_ancestor_key",
".",
"lower",
"(",
")",
"==",
"'root'",
":",
"continue",
"if",
"not",
"hasattr",
"(",
"previous_element",
",",
"normalized_ancestor_key",
")",
":",
"config_attr",
"=",
"ConfigDict",
"(",
"{",
"}",
",",
"ancestors_value",
".",
"start_mark",
",",
"ancestors_value",
".",
"end_mark",
")",
"config_attr",
".",
"name",
"=",
"normalized_ancestor_key",
"config_attr",
".",
"alias",
"=",
"ancestor_key",
"setattr",
"(",
"previous_element",
",",
"normalized_ancestor_key",
",",
"config_attr",
")",
"previous_element",
"=",
"getattr",
"(",
"previous_element",
",",
"normalized_ancestor_key",
")",
"if",
"normalized_key",
"==",
"normalized_ancestor_key",
":",
"pass",
"else",
":",
"if",
"isinstance",
"(",
"data",
",",
"ConfigNode",
")",
":",
"data",
".",
"name",
"=",
"normalized_key",
"data",
".",
"alias",
"=",
"property_key",
"setattr",
"(",
"previous_element",
",",
"normalized_key",
",",
"data",
")"
]
| Dynamically Creates attributes on for a Config. Also adds name and alias to each Config object.
:param property_key: A :string: configuration property name.
:param data: The adds the user supplied for this specific property.
:param ancestors: A :OrderedDict: that provides a history of its ancestors. | [
"Dynamically",
"Creates",
"attributes",
"on",
"for",
"a",
"Config",
".",
"Also",
"adds",
"name",
"and",
"alias",
"to",
"each",
"Config",
"object",
"."
]
| 6ea4a49f474ec04abb8bb81b175c774a16b5312f | https://github.com/TUNE-Archive/freight_forwarder/blob/6ea4a49f474ec04abb8bb81b175c774a16b5312f/freight_forwarder/config.py#L1140-L1185 | train |
TUNE-Archive/freight_forwarder | freight_forwarder/config.py | Config._collect_unrecognized_values | def _collect_unrecognized_values(self, scheme, data, ancestors):
""" Looks for values that aren't defined in the scheme and returns a dict with any unrecognized values found.
:param scheme: A :dict:, The scheme defining the validations.
:param data: A :dict: user supplied for this specific property.
:param ancestors: A :OrderedDict: that provides a history of its ancestors.
:rtype: A :dict: of unrecognized configuration properties.
"""
if not isinstance(ancestors, OrderedDict):
raise TypeError("ancestors must be an OrderedDict. type: {0} was passed.".format(type(ancestors)))
if not isinstance(scheme, dict):
raise TypeError('scheme must be a dict. type: {0} was passed'.format(type(scheme)))
unrecognized_values = {}
if isinstance(data, dict):
pruned_scheme = [key for key in scheme.keys() if key not in RESERVED_SCHEME_KEYS and key[0] not in RESERVED_SCHEME_KEYS]
for key, value in six.iteritems(data):
if key in pruned_scheme:
continue
unrecognized_values[key] = value
validations = scheme.get('is')
if validations and 'one_of' in validations:
for nested_scheme in validations['one_of']:
if isinstance(nested_scheme, dict):
updated_scheme = self._update_scheme(nested_scheme, ancestors)
pruned_scheme = [key for key in updated_scheme.keys() if key not in RESERVED_SCHEME_KEYS and key[0] not in RESERVED_SCHEME_KEYS]
for key in pruned_scheme:
if key in unrecognized_values:
del unrecognized_values[key]
else:
# TODO: maybe return an error?
pass
return unrecognized_values | python | def _collect_unrecognized_values(self, scheme, data, ancestors):
""" Looks for values that aren't defined in the scheme and returns a dict with any unrecognized values found.
:param scheme: A :dict:, The scheme defining the validations.
:param data: A :dict: user supplied for this specific property.
:param ancestors: A :OrderedDict: that provides a history of its ancestors.
:rtype: A :dict: of unrecognized configuration properties.
"""
if not isinstance(ancestors, OrderedDict):
raise TypeError("ancestors must be an OrderedDict. type: {0} was passed.".format(type(ancestors)))
if not isinstance(scheme, dict):
raise TypeError('scheme must be a dict. type: {0} was passed'.format(type(scheme)))
unrecognized_values = {}
if isinstance(data, dict):
pruned_scheme = [key for key in scheme.keys() if key not in RESERVED_SCHEME_KEYS and key[0] not in RESERVED_SCHEME_KEYS]
for key, value in six.iteritems(data):
if key in pruned_scheme:
continue
unrecognized_values[key] = value
validations = scheme.get('is')
if validations and 'one_of' in validations:
for nested_scheme in validations['one_of']:
if isinstance(nested_scheme, dict):
updated_scheme = self._update_scheme(nested_scheme, ancestors)
pruned_scheme = [key for key in updated_scheme.keys() if key not in RESERVED_SCHEME_KEYS and key[0] not in RESERVED_SCHEME_KEYS]
for key in pruned_scheme:
if key in unrecognized_values:
del unrecognized_values[key]
else:
# TODO: maybe return an error?
pass
return unrecognized_values | [
"def",
"_collect_unrecognized_values",
"(",
"self",
",",
"scheme",
",",
"data",
",",
"ancestors",
")",
":",
"if",
"not",
"isinstance",
"(",
"ancestors",
",",
"OrderedDict",
")",
":",
"raise",
"TypeError",
"(",
"\"ancestors must be an OrderedDict. type: {0} was passed.\"",
".",
"format",
"(",
"type",
"(",
"ancestors",
")",
")",
")",
"if",
"not",
"isinstance",
"(",
"scheme",
",",
"dict",
")",
":",
"raise",
"TypeError",
"(",
"'scheme must be a dict. type: {0} was passed'",
".",
"format",
"(",
"type",
"(",
"scheme",
")",
")",
")",
"unrecognized_values",
"=",
"{",
"}",
"if",
"isinstance",
"(",
"data",
",",
"dict",
")",
":",
"pruned_scheme",
"=",
"[",
"key",
"for",
"key",
"in",
"scheme",
".",
"keys",
"(",
")",
"if",
"key",
"not",
"in",
"RESERVED_SCHEME_KEYS",
"and",
"key",
"[",
"0",
"]",
"not",
"in",
"RESERVED_SCHEME_KEYS",
"]",
"for",
"key",
",",
"value",
"in",
"six",
".",
"iteritems",
"(",
"data",
")",
":",
"if",
"key",
"in",
"pruned_scheme",
":",
"continue",
"unrecognized_values",
"[",
"key",
"]",
"=",
"value",
"validations",
"=",
"scheme",
".",
"get",
"(",
"'is'",
")",
"if",
"validations",
"and",
"'one_of'",
"in",
"validations",
":",
"for",
"nested_scheme",
"in",
"validations",
"[",
"'one_of'",
"]",
":",
"if",
"isinstance",
"(",
"nested_scheme",
",",
"dict",
")",
":",
"updated_scheme",
"=",
"self",
".",
"_update_scheme",
"(",
"nested_scheme",
",",
"ancestors",
")",
"pruned_scheme",
"=",
"[",
"key",
"for",
"key",
"in",
"updated_scheme",
".",
"keys",
"(",
")",
"if",
"key",
"not",
"in",
"RESERVED_SCHEME_KEYS",
"and",
"key",
"[",
"0",
"]",
"not",
"in",
"RESERVED_SCHEME_KEYS",
"]",
"for",
"key",
"in",
"pruned_scheme",
":",
"if",
"key",
"in",
"unrecognized_values",
":",
"del",
"unrecognized_values",
"[",
"key",
"]",
"else",
":",
"# TODO: maybe return an error?",
"pass",
"return",
"unrecognized_values"
]
| Looks for values that aren't defined in the scheme and returns a dict with any unrecognized values found.
:param scheme: A :dict:, The scheme defining the validations.
:param data: A :dict: user supplied for this specific property.
:param ancestors: A :OrderedDict: that provides a history of its ancestors.
:rtype: A :dict: of unrecognized configuration properties. | [
"Looks",
"for",
"values",
"that",
"aren",
"t",
"defined",
"in",
"the",
"scheme",
"and",
"returns",
"a",
"dict",
"with",
"any",
"unrecognized",
"values",
"found",
"."
]
| 6ea4a49f474ec04abb8bb81b175c774a16b5312f | https://github.com/TUNE-Archive/freight_forwarder/blob/6ea4a49f474ec04abb8bb81b175c774a16b5312f/freight_forwarder/config.py#L1187-L1225 | train |
TUNE-Archive/freight_forwarder | freight_forwarder/config.py | Config._update_scheme | def _update_scheme(self, scheme, ancestors):
""" Updates the current scheme based off special pre-defined keys and retruns a new updated scheme.
:param scheme: A :dict:, The scheme defining the validations.
:param ancestors: A :OrderedDict: that provides a history of its ancestors.
:rtype: A new :dict: with updated scheme values.
"""
if not isinstance(ancestors, OrderedDict):
raise TypeError("ancestors must be an OrderedDict. type: {0} was passed.".format(type(ancestors)))
if not isinstance(scheme, dict):
raise TypeError('scheme must be a dict. type: {0} was passed'.format(type(scheme)))
# TODO: what if we have more than one scheme :P need to fix this.
definitions = ROOT_SCHEME.get('_')
if 'inherit' in scheme:
scheme = self._scheme_propagation(scheme, definitions)
updated_scheme = {}
for scheme_key in six.iterkeys(scheme):
if not isinstance(scheme_key, six.string_types):
raise TypeError('scheme keys are required to be strings. type: {0} was passed.'.format(scheme_key))
if '@' in scheme_key:
ref = scheme_key[1:]
scheme_reference = self._scheme_references.get(ref)
if not scheme_reference:
raise ConfigValidationException(ancestors, ref, scheme_reference, 'required', scheme)
for reference_key in scheme_reference['keys']:
scheme_reference['scheme'].update(scheme[scheme_key])
updated_scheme[reference_key] = scheme_reference['scheme']
elif '~' in scheme_key:
ref = scheme_key[1:]
scheme_reference = self._scheme_references.get(ref)
if not scheme_reference:
raise LookupError("was unable to find {0} in scheme reference.".format(ref))
for reference_key in scheme_reference['keys']:
updated_scheme[reference_key] = scheme[scheme_key]
scheme.update(updated_scheme)
return scheme | python | def _update_scheme(self, scheme, ancestors):
""" Updates the current scheme based off special pre-defined keys and retruns a new updated scheme.
:param scheme: A :dict:, The scheme defining the validations.
:param ancestors: A :OrderedDict: that provides a history of its ancestors.
:rtype: A new :dict: with updated scheme values.
"""
if not isinstance(ancestors, OrderedDict):
raise TypeError("ancestors must be an OrderedDict. type: {0} was passed.".format(type(ancestors)))
if not isinstance(scheme, dict):
raise TypeError('scheme must be a dict. type: {0} was passed'.format(type(scheme)))
# TODO: what if we have more than one scheme :P need to fix this.
definitions = ROOT_SCHEME.get('_')
if 'inherit' in scheme:
scheme = self._scheme_propagation(scheme, definitions)
updated_scheme = {}
for scheme_key in six.iterkeys(scheme):
if not isinstance(scheme_key, six.string_types):
raise TypeError('scheme keys are required to be strings. type: {0} was passed.'.format(scheme_key))
if '@' in scheme_key:
ref = scheme_key[1:]
scheme_reference = self._scheme_references.get(ref)
if not scheme_reference:
raise ConfigValidationException(ancestors, ref, scheme_reference, 'required', scheme)
for reference_key in scheme_reference['keys']:
scheme_reference['scheme'].update(scheme[scheme_key])
updated_scheme[reference_key] = scheme_reference['scheme']
elif '~' in scheme_key:
ref = scheme_key[1:]
scheme_reference = self._scheme_references.get(ref)
if not scheme_reference:
raise LookupError("was unable to find {0} in scheme reference.".format(ref))
for reference_key in scheme_reference['keys']:
updated_scheme[reference_key] = scheme[scheme_key]
scheme.update(updated_scheme)
return scheme | [
"def",
"_update_scheme",
"(",
"self",
",",
"scheme",
",",
"ancestors",
")",
":",
"if",
"not",
"isinstance",
"(",
"ancestors",
",",
"OrderedDict",
")",
":",
"raise",
"TypeError",
"(",
"\"ancestors must be an OrderedDict. type: {0} was passed.\"",
".",
"format",
"(",
"type",
"(",
"ancestors",
")",
")",
")",
"if",
"not",
"isinstance",
"(",
"scheme",
",",
"dict",
")",
":",
"raise",
"TypeError",
"(",
"'scheme must be a dict. type: {0} was passed'",
".",
"format",
"(",
"type",
"(",
"scheme",
")",
")",
")",
"# TODO: what if we have more than one scheme :P need to fix this.",
"definitions",
"=",
"ROOT_SCHEME",
".",
"get",
"(",
"'_'",
")",
"if",
"'inherit'",
"in",
"scheme",
":",
"scheme",
"=",
"self",
".",
"_scheme_propagation",
"(",
"scheme",
",",
"definitions",
")",
"updated_scheme",
"=",
"{",
"}",
"for",
"scheme_key",
"in",
"six",
".",
"iterkeys",
"(",
"scheme",
")",
":",
"if",
"not",
"isinstance",
"(",
"scheme_key",
",",
"six",
".",
"string_types",
")",
":",
"raise",
"TypeError",
"(",
"'scheme keys are required to be strings. type: {0} was passed.'",
".",
"format",
"(",
"scheme_key",
")",
")",
"if",
"'@'",
"in",
"scheme_key",
":",
"ref",
"=",
"scheme_key",
"[",
"1",
":",
"]",
"scheme_reference",
"=",
"self",
".",
"_scheme_references",
".",
"get",
"(",
"ref",
")",
"if",
"not",
"scheme_reference",
":",
"raise",
"ConfigValidationException",
"(",
"ancestors",
",",
"ref",
",",
"scheme_reference",
",",
"'required'",
",",
"scheme",
")",
"for",
"reference_key",
"in",
"scheme_reference",
"[",
"'keys'",
"]",
":",
"scheme_reference",
"[",
"'scheme'",
"]",
".",
"update",
"(",
"scheme",
"[",
"scheme_key",
"]",
")",
"updated_scheme",
"[",
"reference_key",
"]",
"=",
"scheme_reference",
"[",
"'scheme'",
"]",
"elif",
"'~'",
"in",
"scheme_key",
":",
"ref",
"=",
"scheme_key",
"[",
"1",
":",
"]",
"scheme_reference",
"=",
"self",
".",
"_scheme_references",
".",
"get",
"(",
"ref",
")",
"if",
"not",
"scheme_reference",
":",
"raise",
"LookupError",
"(",
"\"was unable to find {0} in scheme reference.\"",
".",
"format",
"(",
"ref",
")",
")",
"for",
"reference_key",
"in",
"scheme_reference",
"[",
"'keys'",
"]",
":",
"updated_scheme",
"[",
"reference_key",
"]",
"=",
"scheme",
"[",
"scheme_key",
"]",
"scheme",
".",
"update",
"(",
"updated_scheme",
")",
"return",
"scheme"
]
| Updates the current scheme based off special pre-defined keys and retruns a new updated scheme.
:param scheme: A :dict:, The scheme defining the validations.
:param ancestors: A :OrderedDict: that provides a history of its ancestors.
:rtype: A new :dict: with updated scheme values. | [
"Updates",
"the",
"current",
"scheme",
"based",
"off",
"special",
"pre",
"-",
"defined",
"keys",
"and",
"retruns",
"a",
"new",
"updated",
"scheme",
"."
]
| 6ea4a49f474ec04abb8bb81b175c774a16b5312f | https://github.com/TUNE-Archive/freight_forwarder/blob/6ea4a49f474ec04abb8bb81b175c774a16b5312f/freight_forwarder/config.py#L1227-L1272 | train |
TUNE-Archive/freight_forwarder | freight_forwarder/config.py | Config._walk_tree | def _walk_tree(self, data, scheme, ancestors=None, property_name=None, prefix=None):
""" This function takes configuration data and a validation scheme
then walk the configuration tree validating the configuraton data agenst
the scheme provided. Will raise error on failure otherwise return None.
Usage::
>>> self._walk_tree(
>>> OrderedDict([('root', config_data)]),
>>> registries,
>>> REGISTRIES_SCHEME
>>> )
:param ancestors: A :OrderedDict:, The first element of the dict must be 'root'.
:param data: The data that needs to be validated agents the scheme.
:param scheme: A :dict:, The scheme defining the validations.
:param property_name: A :string:, This is the name of the data getting validated.
:param prefix:
:rtype: :None: will raise error if a validation fails.
"""
if property_name is None:
property_name = 'root'
# hack until i add this to references
# reorder validates putting required first. If the data doesn't exist there is no need to continue.
order = ['registries'] + [key for key in scheme.keys() if key not in ('registries',)]
scheme = OrderedDict(sorted(scheme.items(), key=lambda x: order.index(x[0])))
if data is None:
return
elif not isinstance(property_name, six.string_types):
raise TypeError('property_name must be a string.')
ancestors = self._update_ancestors(data, property_name, ancestors)
if isinstance(ancestors, OrderedDict):
if list(ancestors)[0] != 'root':
raise LookupError('root must be the first item in ancestors.')
else:
raise TypeError('ancestors must be an OrderedDict. {0} was passed'.format(type(ancestors)))
if not isinstance(scheme, dict):
raise TypeError('scheme must be a dict. {0} was passed.'.format(type(scheme)))
scheme = self._update_scheme(scheme, ancestors)
if property_name is not None and data:
data = self._get_cascading_attr(
property_name, *list(ancestors)[1:]
) if scheme.get('cascading', False) else data
for err in self.__execute_validations(scheme.get('is', {}), data, property_name, ancestors, prefix=prefix):
if err:
raise err
else:
self._create_attr(property_name, data, ancestors)
self.__validate_unrecognized_values(scheme, data, ancestors, prefix)
self.__populate_scheme_references(scheme, property_name)
self.__validate_config_properties(scheme, data, ancestors, prefix) | python | def _walk_tree(self, data, scheme, ancestors=None, property_name=None, prefix=None):
""" This function takes configuration data and a validation scheme
then walk the configuration tree validating the configuraton data agenst
the scheme provided. Will raise error on failure otherwise return None.
Usage::
>>> self._walk_tree(
>>> OrderedDict([('root', config_data)]),
>>> registries,
>>> REGISTRIES_SCHEME
>>> )
:param ancestors: A :OrderedDict:, The first element of the dict must be 'root'.
:param data: The data that needs to be validated agents the scheme.
:param scheme: A :dict:, The scheme defining the validations.
:param property_name: A :string:, This is the name of the data getting validated.
:param prefix:
:rtype: :None: will raise error if a validation fails.
"""
if property_name is None:
property_name = 'root'
# hack until i add this to references
# reorder validates putting required first. If the data doesn't exist there is no need to continue.
order = ['registries'] + [key for key in scheme.keys() if key not in ('registries',)]
scheme = OrderedDict(sorted(scheme.items(), key=lambda x: order.index(x[0])))
if data is None:
return
elif not isinstance(property_name, six.string_types):
raise TypeError('property_name must be a string.')
ancestors = self._update_ancestors(data, property_name, ancestors)
if isinstance(ancestors, OrderedDict):
if list(ancestors)[0] != 'root':
raise LookupError('root must be the first item in ancestors.')
else:
raise TypeError('ancestors must be an OrderedDict. {0} was passed'.format(type(ancestors)))
if not isinstance(scheme, dict):
raise TypeError('scheme must be a dict. {0} was passed.'.format(type(scheme)))
scheme = self._update_scheme(scheme, ancestors)
if property_name is not None and data:
data = self._get_cascading_attr(
property_name, *list(ancestors)[1:]
) if scheme.get('cascading', False) else data
for err in self.__execute_validations(scheme.get('is', {}), data, property_name, ancestors, prefix=prefix):
if err:
raise err
else:
self._create_attr(property_name, data, ancestors)
self.__validate_unrecognized_values(scheme, data, ancestors, prefix)
self.__populate_scheme_references(scheme, property_name)
self.__validate_config_properties(scheme, data, ancestors, prefix) | [
"def",
"_walk_tree",
"(",
"self",
",",
"data",
",",
"scheme",
",",
"ancestors",
"=",
"None",
",",
"property_name",
"=",
"None",
",",
"prefix",
"=",
"None",
")",
":",
"if",
"property_name",
"is",
"None",
":",
"property_name",
"=",
"'root'",
"# hack until i add this to references",
"# reorder validates putting required first. If the data doesn't exist there is no need to continue.",
"order",
"=",
"[",
"'registries'",
"]",
"+",
"[",
"key",
"for",
"key",
"in",
"scheme",
".",
"keys",
"(",
")",
"if",
"key",
"not",
"in",
"(",
"'registries'",
",",
")",
"]",
"scheme",
"=",
"OrderedDict",
"(",
"sorted",
"(",
"scheme",
".",
"items",
"(",
")",
",",
"key",
"=",
"lambda",
"x",
":",
"order",
".",
"index",
"(",
"x",
"[",
"0",
"]",
")",
")",
")",
"if",
"data",
"is",
"None",
":",
"return",
"elif",
"not",
"isinstance",
"(",
"property_name",
",",
"six",
".",
"string_types",
")",
":",
"raise",
"TypeError",
"(",
"'property_name must be a string.'",
")",
"ancestors",
"=",
"self",
".",
"_update_ancestors",
"(",
"data",
",",
"property_name",
",",
"ancestors",
")",
"if",
"isinstance",
"(",
"ancestors",
",",
"OrderedDict",
")",
":",
"if",
"list",
"(",
"ancestors",
")",
"[",
"0",
"]",
"!=",
"'root'",
":",
"raise",
"LookupError",
"(",
"'root must be the first item in ancestors.'",
")",
"else",
":",
"raise",
"TypeError",
"(",
"'ancestors must be an OrderedDict. {0} was passed'",
".",
"format",
"(",
"type",
"(",
"ancestors",
")",
")",
")",
"if",
"not",
"isinstance",
"(",
"scheme",
",",
"dict",
")",
":",
"raise",
"TypeError",
"(",
"'scheme must be a dict. {0} was passed.'",
".",
"format",
"(",
"type",
"(",
"scheme",
")",
")",
")",
"scheme",
"=",
"self",
".",
"_update_scheme",
"(",
"scheme",
",",
"ancestors",
")",
"if",
"property_name",
"is",
"not",
"None",
"and",
"data",
":",
"data",
"=",
"self",
".",
"_get_cascading_attr",
"(",
"property_name",
",",
"*",
"list",
"(",
"ancestors",
")",
"[",
"1",
":",
"]",
")",
"if",
"scheme",
".",
"get",
"(",
"'cascading'",
",",
"False",
")",
"else",
"data",
"for",
"err",
"in",
"self",
".",
"__execute_validations",
"(",
"scheme",
".",
"get",
"(",
"'is'",
",",
"{",
"}",
")",
",",
"data",
",",
"property_name",
",",
"ancestors",
",",
"prefix",
"=",
"prefix",
")",
":",
"if",
"err",
":",
"raise",
"err",
"else",
":",
"self",
".",
"_create_attr",
"(",
"property_name",
",",
"data",
",",
"ancestors",
")",
"self",
".",
"__validate_unrecognized_values",
"(",
"scheme",
",",
"data",
",",
"ancestors",
",",
"prefix",
")",
"self",
".",
"__populate_scheme_references",
"(",
"scheme",
",",
"property_name",
")",
"self",
".",
"__validate_config_properties",
"(",
"scheme",
",",
"data",
",",
"ancestors",
",",
"prefix",
")"
]
| This function takes configuration data and a validation scheme
then walk the configuration tree validating the configuraton data agenst
the scheme provided. Will raise error on failure otherwise return None.
Usage::
>>> self._walk_tree(
>>> OrderedDict([('root', config_data)]),
>>> registries,
>>> REGISTRIES_SCHEME
>>> )
:param ancestors: A :OrderedDict:, The first element of the dict must be 'root'.
:param data: The data that needs to be validated agents the scheme.
:param scheme: A :dict:, The scheme defining the validations.
:param property_name: A :string:, This is the name of the data getting validated.
:param prefix:
:rtype: :None: will raise error if a validation fails. | [
"This",
"function",
"takes",
"configuration",
"data",
"and",
"a",
"validation",
"scheme",
"then",
"walk",
"the",
"configuration",
"tree",
"validating",
"the",
"configuraton",
"data",
"agenst",
"the",
"scheme",
"provided",
".",
"Will",
"raise",
"error",
"on",
"failure",
"otherwise",
"return",
"None",
"."
]
| 6ea4a49f474ec04abb8bb81b175c774a16b5312f | https://github.com/TUNE-Archive/freight_forwarder/blob/6ea4a49f474ec04abb8bb81b175c774a16b5312f/freight_forwarder/config.py#L1331-L1390 | train |
TUNE-Archive/freight_forwarder | freight_forwarder/config.py | Config._update_ancestors | def _update_ancestors(self, config_data, property_name, ancestors=None):
""" Update ancestors for a specific property.
:param ancestors: A :OrderedDict:, representing the ancestors of a property.
:param config_data: The data that needs to be validated agents the scheme.
:param property_name: A :string: of the properties name.
:rtype: A :OrderDict: that has been updated with new parents.
"""
if not isinstance(property_name, six.string_types):
raise TypeError("property_key must be a string. type: {0} was passed.".format(type(property_name)))
if ancestors is None:
ancestors = OrderedDict([('root', config_data)])
elif not isinstance(ancestors, OrderedDict):
raise TypeError("ancestors must be an OrderedDict. type: {0} was passed.".format(type(ancestors)))
elif 'root' not in ancestors:
raise LookupError(
'root must be in ancestors. currently in the ancestors chain {0}'.format(', '.join(ancestors.keys()))
)
ancestors = ancestors.copy()
for previous_key in list(ancestors)[::-1]:
previous_item = ancestors[previous_key]
if isinstance(config_data, dict):
if property_name in previous_item:
ancestors[property_name] = config_data
break
return ancestors | python | def _update_ancestors(self, config_data, property_name, ancestors=None):
""" Update ancestors for a specific property.
:param ancestors: A :OrderedDict:, representing the ancestors of a property.
:param config_data: The data that needs to be validated agents the scheme.
:param property_name: A :string: of the properties name.
:rtype: A :OrderDict: that has been updated with new parents.
"""
if not isinstance(property_name, six.string_types):
raise TypeError("property_key must be a string. type: {0} was passed.".format(type(property_name)))
if ancestors is None:
ancestors = OrderedDict([('root', config_data)])
elif not isinstance(ancestors, OrderedDict):
raise TypeError("ancestors must be an OrderedDict. type: {0} was passed.".format(type(ancestors)))
elif 'root' not in ancestors:
raise LookupError(
'root must be in ancestors. currently in the ancestors chain {0}'.format(', '.join(ancestors.keys()))
)
ancestors = ancestors.copy()
for previous_key in list(ancestors)[::-1]:
previous_item = ancestors[previous_key]
if isinstance(config_data, dict):
if property_name in previous_item:
ancestors[property_name] = config_data
break
return ancestors | [
"def",
"_update_ancestors",
"(",
"self",
",",
"config_data",
",",
"property_name",
",",
"ancestors",
"=",
"None",
")",
":",
"if",
"not",
"isinstance",
"(",
"property_name",
",",
"six",
".",
"string_types",
")",
":",
"raise",
"TypeError",
"(",
"\"property_key must be a string. type: {0} was passed.\"",
".",
"format",
"(",
"type",
"(",
"property_name",
")",
")",
")",
"if",
"ancestors",
"is",
"None",
":",
"ancestors",
"=",
"OrderedDict",
"(",
"[",
"(",
"'root'",
",",
"config_data",
")",
"]",
")",
"elif",
"not",
"isinstance",
"(",
"ancestors",
",",
"OrderedDict",
")",
":",
"raise",
"TypeError",
"(",
"\"ancestors must be an OrderedDict. type: {0} was passed.\"",
".",
"format",
"(",
"type",
"(",
"ancestors",
")",
")",
")",
"elif",
"'root'",
"not",
"in",
"ancestors",
":",
"raise",
"LookupError",
"(",
"'root must be in ancestors. currently in the ancestors chain {0}'",
".",
"format",
"(",
"', '",
".",
"join",
"(",
"ancestors",
".",
"keys",
"(",
")",
")",
")",
")",
"ancestors",
"=",
"ancestors",
".",
"copy",
"(",
")",
"for",
"previous_key",
"in",
"list",
"(",
"ancestors",
")",
"[",
":",
":",
"-",
"1",
"]",
":",
"previous_item",
"=",
"ancestors",
"[",
"previous_key",
"]",
"if",
"isinstance",
"(",
"config_data",
",",
"dict",
")",
":",
"if",
"property_name",
"in",
"previous_item",
":",
"ancestors",
"[",
"property_name",
"]",
"=",
"config_data",
"break",
"return",
"ancestors"
]
| Update ancestors for a specific property.
:param ancestors: A :OrderedDict:, representing the ancestors of a property.
:param config_data: The data that needs to be validated agents the scheme.
:param property_name: A :string: of the properties name.
:rtype: A :OrderDict: that has been updated with new parents. | [
"Update",
"ancestors",
"for",
"a",
"specific",
"property",
"."
]
| 6ea4a49f474ec04abb8bb81b175c774a16b5312f | https://github.com/TUNE-Archive/freight_forwarder/blob/6ea4a49f474ec04abb8bb81b175c774a16b5312f/freight_forwarder/config.py#L1392-L1424 | train |
TUNE-Archive/freight_forwarder | freight_forwarder/config.py | Config._reference_keys | def _reference_keys(self, reference):
""" Returns a list of all of keys for a given reference.
:param reference: a :string:
:rtype: A :list: of reference keys.
"""
if not isinstance(reference, six.string_types):
raise TypeError(
'When using ~ to reference dynamic attributes ref must be a str. a {0} was provided.'.format(type(reference).__name__)
)
if '~' in reference:
reference = reference[1:]
scheme = self._scheme_references.get(reference)
if not scheme:
# TODO: need to create nice error here as well and print pretty message.
raise LookupError(
"Was unable to find {0} in the scheme references. "
"available references {1}".format(reference, ', '.join(self._scheme_references.keys()))
)
return scheme['keys']
else:
raise AttributeError('references must start with ~. Please update {0} and retry.'.format(reference)) | python | def _reference_keys(self, reference):
""" Returns a list of all of keys for a given reference.
:param reference: a :string:
:rtype: A :list: of reference keys.
"""
if not isinstance(reference, six.string_types):
raise TypeError(
'When using ~ to reference dynamic attributes ref must be a str. a {0} was provided.'.format(type(reference).__name__)
)
if '~' in reference:
reference = reference[1:]
scheme = self._scheme_references.get(reference)
if not scheme:
# TODO: need to create nice error here as well and print pretty message.
raise LookupError(
"Was unable to find {0} in the scheme references. "
"available references {1}".format(reference, ', '.join(self._scheme_references.keys()))
)
return scheme['keys']
else:
raise AttributeError('references must start with ~. Please update {0} and retry.'.format(reference)) | [
"def",
"_reference_keys",
"(",
"self",
",",
"reference",
")",
":",
"if",
"not",
"isinstance",
"(",
"reference",
",",
"six",
".",
"string_types",
")",
":",
"raise",
"TypeError",
"(",
"'When using ~ to reference dynamic attributes ref must be a str. a {0} was provided.'",
".",
"format",
"(",
"type",
"(",
"reference",
")",
".",
"__name__",
")",
")",
"if",
"'~'",
"in",
"reference",
":",
"reference",
"=",
"reference",
"[",
"1",
":",
"]",
"scheme",
"=",
"self",
".",
"_scheme_references",
".",
"get",
"(",
"reference",
")",
"if",
"not",
"scheme",
":",
"# TODO: need to create nice error here as well and print pretty message.",
"raise",
"LookupError",
"(",
"\"Was unable to find {0} in the scheme references. \"",
"\"available references {1}\"",
".",
"format",
"(",
"reference",
",",
"', '",
".",
"join",
"(",
"self",
".",
"_scheme_references",
".",
"keys",
"(",
")",
")",
")",
")",
"return",
"scheme",
"[",
"'keys'",
"]",
"else",
":",
"raise",
"AttributeError",
"(",
"'references must start with ~. Please update {0} and retry.'",
".",
"format",
"(",
"reference",
")",
")"
]
| Returns a list of all of keys for a given reference.
:param reference: a :string:
:rtype: A :list: of reference keys. | [
"Returns",
"a",
"list",
"of",
"all",
"of",
"keys",
"for",
"a",
"given",
"reference",
"."
]
| 6ea4a49f474ec04abb8bb81b175c774a16b5312f | https://github.com/TUNE-Archive/freight_forwarder/blob/6ea4a49f474ec04abb8bb81b175c774a16b5312f/freight_forwarder/config.py#L1426-L1450 | train |
TUNE-Archive/freight_forwarder | freight_forwarder/config.py | Config.__execute_validations | def __execute_validations(self, validations, data, property_name, ancestors, negation=False, prefix=None):
""" Validate the data for a specific configuration value. This method will look up all of the validations provided
and dynamically call any validation methods. If a validation fails a error will be thrown. If no errors are found
a attributes will be dynamically created on the Config object for the configuration value.
:param validations: A :dict: with any required validations and expected values.
:param data: the data to validate.
:param property_name: A :string:, the properties name.
:param ancestors: A :OrderedDict:, representing the ancestors of a property.
"""
if not isinstance(ancestors, OrderedDict):
raise TypeError("ancestors must be an OrderedDict. type: {0} was passed.".format(type(ancestors)))
if not isinstance(validations, dict):
raise TypeError('validations is required to be a dict. type: {1} was passed.'.format(type(validations)))
if not isinstance(property_name, six.string_types):
raise TypeError("property_key must be a string. type: {0} was passed.".format(type(property_name)))
# reorder validates putting required first. If the data doesn't exist there is no need to continue.
order = ['type', 'required'] + [key for key in validations.keys() if key not in ('required', 'type')]
ordered_validations = OrderedDict(sorted(validations.items(), key=lambda x: order.index(x[0])))
for validation, value in six.iteritems(ordered_validations):
if validation in VALIDATORS:
if validation == 'not':
# TODO: need to test to make sure this works
for err in self.__execute_validations(value, data, property_name, ancestors, negation, prefix):
yield err
continue
for err in getattr(self, '_{0}'.format(validation))(value, data, property_name, ancestors, negation, prefix):
yield err
else:
raise LookupError("{0} isn't a validator or reserved scheme key.".format(validation)) | python | def __execute_validations(self, validations, data, property_name, ancestors, negation=False, prefix=None):
""" Validate the data for a specific configuration value. This method will look up all of the validations provided
and dynamically call any validation methods. If a validation fails a error will be thrown. If no errors are found
a attributes will be dynamically created on the Config object for the configuration value.
:param validations: A :dict: with any required validations and expected values.
:param data: the data to validate.
:param property_name: A :string:, the properties name.
:param ancestors: A :OrderedDict:, representing the ancestors of a property.
"""
if not isinstance(ancestors, OrderedDict):
raise TypeError("ancestors must be an OrderedDict. type: {0} was passed.".format(type(ancestors)))
if not isinstance(validations, dict):
raise TypeError('validations is required to be a dict. type: {1} was passed.'.format(type(validations)))
if not isinstance(property_name, six.string_types):
raise TypeError("property_key must be a string. type: {0} was passed.".format(type(property_name)))
# reorder validates putting required first. If the data doesn't exist there is no need to continue.
order = ['type', 'required'] + [key for key in validations.keys() if key not in ('required', 'type')]
ordered_validations = OrderedDict(sorted(validations.items(), key=lambda x: order.index(x[0])))
for validation, value in six.iteritems(ordered_validations):
if validation in VALIDATORS:
if validation == 'not':
# TODO: need to test to make sure this works
for err in self.__execute_validations(value, data, property_name, ancestors, negation, prefix):
yield err
continue
for err in getattr(self, '_{0}'.format(validation))(value, data, property_name, ancestors, negation, prefix):
yield err
else:
raise LookupError("{0} isn't a validator or reserved scheme key.".format(validation)) | [
"def",
"__execute_validations",
"(",
"self",
",",
"validations",
",",
"data",
",",
"property_name",
",",
"ancestors",
",",
"negation",
"=",
"False",
",",
"prefix",
"=",
"None",
")",
":",
"if",
"not",
"isinstance",
"(",
"ancestors",
",",
"OrderedDict",
")",
":",
"raise",
"TypeError",
"(",
"\"ancestors must be an OrderedDict. type: {0} was passed.\"",
".",
"format",
"(",
"type",
"(",
"ancestors",
")",
")",
")",
"if",
"not",
"isinstance",
"(",
"validations",
",",
"dict",
")",
":",
"raise",
"TypeError",
"(",
"'validations is required to be a dict. type: {1} was passed.'",
".",
"format",
"(",
"type",
"(",
"validations",
")",
")",
")",
"if",
"not",
"isinstance",
"(",
"property_name",
",",
"six",
".",
"string_types",
")",
":",
"raise",
"TypeError",
"(",
"\"property_key must be a string. type: {0} was passed.\"",
".",
"format",
"(",
"type",
"(",
"property_name",
")",
")",
")",
"# reorder validates putting required first. If the data doesn't exist there is no need to continue.",
"order",
"=",
"[",
"'type'",
",",
"'required'",
"]",
"+",
"[",
"key",
"for",
"key",
"in",
"validations",
".",
"keys",
"(",
")",
"if",
"key",
"not",
"in",
"(",
"'required'",
",",
"'type'",
")",
"]",
"ordered_validations",
"=",
"OrderedDict",
"(",
"sorted",
"(",
"validations",
".",
"items",
"(",
")",
",",
"key",
"=",
"lambda",
"x",
":",
"order",
".",
"index",
"(",
"x",
"[",
"0",
"]",
")",
")",
")",
"for",
"validation",
",",
"value",
"in",
"six",
".",
"iteritems",
"(",
"ordered_validations",
")",
":",
"if",
"validation",
"in",
"VALIDATORS",
":",
"if",
"validation",
"==",
"'not'",
":",
"# TODO: need to test to make sure this works",
"for",
"err",
"in",
"self",
".",
"__execute_validations",
"(",
"value",
",",
"data",
",",
"property_name",
",",
"ancestors",
",",
"negation",
",",
"prefix",
")",
":",
"yield",
"err",
"continue",
"for",
"err",
"in",
"getattr",
"(",
"self",
",",
"'_{0}'",
".",
"format",
"(",
"validation",
")",
")",
"(",
"value",
",",
"data",
",",
"property_name",
",",
"ancestors",
",",
"negation",
",",
"prefix",
")",
":",
"yield",
"err",
"else",
":",
"raise",
"LookupError",
"(",
"\"{0} isn't a validator or reserved scheme key.\"",
".",
"format",
"(",
"validation",
")",
")"
]
| Validate the data for a specific configuration value. This method will look up all of the validations provided
and dynamically call any validation methods. If a validation fails a error will be thrown. If no errors are found
a attributes will be dynamically created on the Config object for the configuration value.
:param validations: A :dict: with any required validations and expected values.
:param data: the data to validate.
:param property_name: A :string:, the properties name.
:param ancestors: A :OrderedDict:, representing the ancestors of a property. | [
"Validate",
"the",
"data",
"for",
"a",
"specific",
"configuration",
"value",
".",
"This",
"method",
"will",
"look",
"up",
"all",
"of",
"the",
"validations",
"provided",
"and",
"dynamically",
"call",
"any",
"validation",
"methods",
".",
"If",
"a",
"validation",
"fails",
"a",
"error",
"will",
"be",
"thrown",
".",
"If",
"no",
"errors",
"are",
"found",
"a",
"attributes",
"will",
"be",
"dynamically",
"created",
"on",
"the",
"Config",
"object",
"for",
"the",
"configuration",
"value",
"."
]
| 6ea4a49f474ec04abb8bb81b175c774a16b5312f | https://github.com/TUNE-Archive/freight_forwarder/blob/6ea4a49f474ec04abb8bb81b175c774a16b5312f/freight_forwarder/config.py#L1505-L1542 | train |
ZEDGR/pychal | challonge/tournaments.py | create | def create(name, url, tournament_type="single elimination", **params):
"""Create a new tournament."""
params.update({
"name": name,
"url": url,
"tournament_type": tournament_type,
})
return api.fetch_and_parse("POST", "tournaments", "tournament", **params) | python | def create(name, url, tournament_type="single elimination", **params):
"""Create a new tournament."""
params.update({
"name": name,
"url": url,
"tournament_type": tournament_type,
})
return api.fetch_and_parse("POST", "tournaments", "tournament", **params) | [
"def",
"create",
"(",
"name",
",",
"url",
",",
"tournament_type",
"=",
"\"single elimination\"",
",",
"*",
"*",
"params",
")",
":",
"params",
".",
"update",
"(",
"{",
"\"name\"",
":",
"name",
",",
"\"url\"",
":",
"url",
",",
"\"tournament_type\"",
":",
"tournament_type",
",",
"}",
")",
"return",
"api",
".",
"fetch_and_parse",
"(",
"\"POST\"",
",",
"\"tournaments\"",
",",
"\"tournament\"",
",",
"*",
"*",
"params",
")"
]
| Create a new tournament. | [
"Create",
"a",
"new",
"tournament",
"."
]
| 3600fa9e0557a2a14eb1ad0c0711d28dad3693d7 | https://github.com/ZEDGR/pychal/blob/3600fa9e0557a2a14eb1ad0c0711d28dad3693d7/challonge/tournaments.py#L9-L17 | train |
projectshift/shift-boiler | boiler/feature/users.py | users_feature | def users_feature(app):
"""
Add users feature
Allows to register users and assign groups, instantiates flask login, flask principal
and oauth integration
"""
# check we have jwt secret configures
if not app.config.get('USER_JWT_SECRET', None):
raise x.JwtSecretMissing('Please set USER_JWT_SECRET in config')
# use custom session interface
app.session_interface = BoilerSessionInterface()
# init user service
user_service.init(app)
# init login manager
login_manager.init_app(app)
login_manager.login_view = 'user.login'
login_manager.login_message = None
@login_manager.user_loader
def load_user(id):
return user_service.get(id)
# init OAuth
oauth.init_app(app)
registry = OauthProviders(app)
providers = registry.get_providers()
with app.app_context():
for provider in providers:
if provider not in oauth.remote_apps:
oauth.remote_app(provider, **providers[provider])
registry.register_token_getter(provider)
# init principal
principal.init_app(app)
@principal.identity_loader
def load_identity():
if current_user.is_authenticated:
return Identity(current_user.id)
session.pop('identity.name', None)
session.pop('identity.auth_type', None)
return AnonymousIdentity()
@identity_loaded.connect_via(app)
def on_identity_loaded(sender, identity):
identity.user = current_user
if not current_user.is_authenticated:
return
identity.provides.add(UserNeed(current_user.id))
for role in current_user.roles:
identity.provides.add(RoleNeed(role.handle)) | python | def users_feature(app):
"""
Add users feature
Allows to register users and assign groups, instantiates flask login, flask principal
and oauth integration
"""
# check we have jwt secret configures
if not app.config.get('USER_JWT_SECRET', None):
raise x.JwtSecretMissing('Please set USER_JWT_SECRET in config')
# use custom session interface
app.session_interface = BoilerSessionInterface()
# init user service
user_service.init(app)
# init login manager
login_manager.init_app(app)
login_manager.login_view = 'user.login'
login_manager.login_message = None
@login_manager.user_loader
def load_user(id):
return user_service.get(id)
# init OAuth
oauth.init_app(app)
registry = OauthProviders(app)
providers = registry.get_providers()
with app.app_context():
for provider in providers:
if provider not in oauth.remote_apps:
oauth.remote_app(provider, **providers[provider])
registry.register_token_getter(provider)
# init principal
principal.init_app(app)
@principal.identity_loader
def load_identity():
if current_user.is_authenticated:
return Identity(current_user.id)
session.pop('identity.name', None)
session.pop('identity.auth_type', None)
return AnonymousIdentity()
@identity_loaded.connect_via(app)
def on_identity_loaded(sender, identity):
identity.user = current_user
if not current_user.is_authenticated:
return
identity.provides.add(UserNeed(current_user.id))
for role in current_user.roles:
identity.provides.add(RoleNeed(role.handle)) | [
"def",
"users_feature",
"(",
"app",
")",
":",
"# check we have jwt secret configures",
"if",
"not",
"app",
".",
"config",
".",
"get",
"(",
"'USER_JWT_SECRET'",
",",
"None",
")",
":",
"raise",
"x",
".",
"JwtSecretMissing",
"(",
"'Please set USER_JWT_SECRET in config'",
")",
"# use custom session interface",
"app",
".",
"session_interface",
"=",
"BoilerSessionInterface",
"(",
")",
"# init user service",
"user_service",
".",
"init",
"(",
"app",
")",
"# init login manager",
"login_manager",
".",
"init_app",
"(",
"app",
")",
"login_manager",
".",
"login_view",
"=",
"'user.login'",
"login_manager",
".",
"login_message",
"=",
"None",
"@",
"login_manager",
".",
"user_loader",
"def",
"load_user",
"(",
"id",
")",
":",
"return",
"user_service",
".",
"get",
"(",
"id",
")",
"# init OAuth",
"oauth",
".",
"init_app",
"(",
"app",
")",
"registry",
"=",
"OauthProviders",
"(",
"app",
")",
"providers",
"=",
"registry",
".",
"get_providers",
"(",
")",
"with",
"app",
".",
"app_context",
"(",
")",
":",
"for",
"provider",
"in",
"providers",
":",
"if",
"provider",
"not",
"in",
"oauth",
".",
"remote_apps",
":",
"oauth",
".",
"remote_app",
"(",
"provider",
",",
"*",
"*",
"providers",
"[",
"provider",
"]",
")",
"registry",
".",
"register_token_getter",
"(",
"provider",
")",
"# init principal",
"principal",
".",
"init_app",
"(",
"app",
")",
"@",
"principal",
".",
"identity_loader",
"def",
"load_identity",
"(",
")",
":",
"if",
"current_user",
".",
"is_authenticated",
":",
"return",
"Identity",
"(",
"current_user",
".",
"id",
")",
"session",
".",
"pop",
"(",
"'identity.name'",
",",
"None",
")",
"session",
".",
"pop",
"(",
"'identity.auth_type'",
",",
"None",
")",
"return",
"AnonymousIdentity",
"(",
")",
"@",
"identity_loaded",
".",
"connect_via",
"(",
"app",
")",
"def",
"on_identity_loaded",
"(",
"sender",
",",
"identity",
")",
":",
"identity",
".",
"user",
"=",
"current_user",
"if",
"not",
"current_user",
".",
"is_authenticated",
":",
"return",
"identity",
".",
"provides",
".",
"add",
"(",
"UserNeed",
"(",
"current_user",
".",
"id",
")",
")",
"for",
"role",
"in",
"current_user",
".",
"roles",
":",
"identity",
".",
"provides",
".",
"add",
"(",
"RoleNeed",
"(",
"role",
".",
"handle",
")",
")"
]
| Add users feature
Allows to register users and assign groups, instantiates flask login, flask principal
and oauth integration | [
"Add",
"users",
"feature",
"Allows",
"to",
"register",
"users",
"and",
"assign",
"groups",
"instantiates",
"flask",
"login",
"flask",
"principal",
"and",
"oauth",
"integration"
]
| 8e6f3a3e4b9493fb6c8bd16bed160ede153bfb0b | https://github.com/projectshift/shift-boiler/blob/8e6f3a3e4b9493fb6c8bd16bed160ede153bfb0b/boiler/feature/users.py#L14-L69 | train |
ethan92429/onshapepy | onshapepy/core/client.py | Client.rename_document | def rename_document(self, did, name):
'''
Renames the specified document.
Args:
- did (str): Document ID
- name (str): New document name
Returns:
- requests.Response: Onshape response data
'''
payload = {
'name': name
}
return self._api.request('post', '/api/documents/' + did, body=payload) | python | def rename_document(self, did, name):
'''
Renames the specified document.
Args:
- did (str): Document ID
- name (str): New document name
Returns:
- requests.Response: Onshape response data
'''
payload = {
'name': name
}
return self._api.request('post', '/api/documents/' + did, body=payload) | [
"def",
"rename_document",
"(",
"self",
",",
"did",
",",
"name",
")",
":",
"payload",
"=",
"{",
"'name'",
":",
"name",
"}",
"return",
"self",
".",
"_api",
".",
"request",
"(",
"'post'",
",",
"'/api/documents/'",
"+",
"did",
",",
"body",
"=",
"payload",
")"
]
| Renames the specified document.
Args:
- did (str): Document ID
- name (str): New document name
Returns:
- requests.Response: Onshape response data | [
"Renames",
"the",
"specified",
"document",
"."
]
| 61dc7ccbdc6095fa6cc3b4a414e2f72d03d1c9df | https://github.com/ethan92429/onshapepy/blob/61dc7ccbdc6095fa6cc3b4a414e2f72d03d1c9df/onshapepy/core/client.py#L98-L114 | train |
ethan92429/onshapepy | onshapepy/core/client.py | Client.copy_workspace | def copy_workspace(self, uri, new_name):
'''
Copy the current workspace.
Args:
- uri (dict): the uri of the workspace being copied. Needs to have a did and wid key.
- new_name (str): the new name of the copied workspace.
Returns:
- requests.Response: Onshape response data
'''
payload = {
'isPublic': True,
'newName': new_name
}
return self._api.request('post', '/api/documents/' + uri['did'] + '/workspaces/' + uri['wvm'] + '/copy', body=payload) | python | def copy_workspace(self, uri, new_name):
'''
Copy the current workspace.
Args:
- uri (dict): the uri of the workspace being copied. Needs to have a did and wid key.
- new_name (str): the new name of the copied workspace.
Returns:
- requests.Response: Onshape response data
'''
payload = {
'isPublic': True,
'newName': new_name
}
return self._api.request('post', '/api/documents/' + uri['did'] + '/workspaces/' + uri['wvm'] + '/copy', body=payload) | [
"def",
"copy_workspace",
"(",
"self",
",",
"uri",
",",
"new_name",
")",
":",
"payload",
"=",
"{",
"'isPublic'",
":",
"True",
",",
"'newName'",
":",
"new_name",
"}",
"return",
"self",
".",
"_api",
".",
"request",
"(",
"'post'",
",",
"'/api/documents/'",
"+",
"uri",
"[",
"'did'",
"]",
"+",
"'/workspaces/'",
"+",
"uri",
"[",
"'wvm'",
"]",
"+",
"'/copy'",
",",
"body",
"=",
"payload",
")"
]
| Copy the current workspace.
Args:
- uri (dict): the uri of the workspace being copied. Needs to have a did and wid key.
- new_name (str): the new name of the copied workspace.
Returns:
- requests.Response: Onshape response data | [
"Copy",
"the",
"current",
"workspace",
"."
]
| 61dc7ccbdc6095fa6cc3b4a414e2f72d03d1c9df | https://github.com/ethan92429/onshapepy/blob/61dc7ccbdc6095fa6cc3b4a414e2f72d03d1c9df/onshapepy/core/client.py#L152-L169 | train |
ethan92429/onshapepy | onshapepy/core/client.py | Client.create_workspace | def create_workspace(self, did, name, version_id=None):
'''
Create a workspace in the specified document.
Args:
- did (str): the document id of where to create the new workspace
- name (str): the new name of the copied workspace.
- version_id (str): the ID of the version to be copied into a new workspace
Returns:
- requests.Response: Onshape response data
'''
payload = {
'isPublic': True,
'name': name,
}
if version_id:
payload['versionId'] = version_id
return self._api.request('post', '/api/documents/d/' + did + '/workspaces', body=payload) | python | def create_workspace(self, did, name, version_id=None):
'''
Create a workspace in the specified document.
Args:
- did (str): the document id of where to create the new workspace
- name (str): the new name of the copied workspace.
- version_id (str): the ID of the version to be copied into a new workspace
Returns:
- requests.Response: Onshape response data
'''
payload = {
'isPublic': True,
'name': name,
}
if version_id:
payload['versionId'] = version_id
return self._api.request('post', '/api/documents/d/' + did + '/workspaces', body=payload) | [
"def",
"create_workspace",
"(",
"self",
",",
"did",
",",
"name",
",",
"version_id",
"=",
"None",
")",
":",
"payload",
"=",
"{",
"'isPublic'",
":",
"True",
",",
"'name'",
":",
"name",
",",
"}",
"if",
"version_id",
":",
"payload",
"[",
"'versionId'",
"]",
"=",
"version_id",
"return",
"self",
".",
"_api",
".",
"request",
"(",
"'post'",
",",
"'/api/documents/d/'",
"+",
"did",
"+",
"'/workspaces'",
",",
"body",
"=",
"payload",
")"
]
| Create a workspace in the specified document.
Args:
- did (str): the document id of where to create the new workspace
- name (str): the new name of the copied workspace.
- version_id (str): the ID of the version to be copied into a new workspace
Returns:
- requests.Response: Onshape response data | [
"Create",
"a",
"workspace",
"in",
"the",
"specified",
"document",
"."
]
| 61dc7ccbdc6095fa6cc3b4a414e2f72d03d1c9df | https://github.com/ethan92429/onshapepy/blob/61dc7ccbdc6095fa6cc3b4a414e2f72d03d1c9df/onshapepy/core/client.py#L171-L192 | train |
ethan92429/onshapepy | onshapepy/core/client.py | Client.get_partstudio_tessellatededges | def get_partstudio_tessellatededges(self, did, wid, eid):
'''
Gets the tessellation of the edges of all parts in a part studio.
Args:
- did (str): Document ID
- wid (str): Workspace ID
- eid (str): Element ID
Returns:
- requests.Response: Onshape response data
'''
return self._api.request('get', '/api/partstudios/d/' + did + '/w/' + wid + '/e/' + eid + '/tessellatededges') | python | def get_partstudio_tessellatededges(self, did, wid, eid):
'''
Gets the tessellation of the edges of all parts in a part studio.
Args:
- did (str): Document ID
- wid (str): Workspace ID
- eid (str): Element ID
Returns:
- requests.Response: Onshape response data
'''
return self._api.request('get', '/api/partstudios/d/' + did + '/w/' + wid + '/e/' + eid + '/tessellatededges') | [
"def",
"get_partstudio_tessellatededges",
"(",
"self",
",",
"did",
",",
"wid",
",",
"eid",
")",
":",
"return",
"self",
".",
"_api",
".",
"request",
"(",
"'get'",
",",
"'/api/partstudios/d/'",
"+",
"did",
"+",
"'/w/'",
"+",
"wid",
"+",
"'/e/'",
"+",
"eid",
"+",
"'/tessellatededges'",
")"
]
| Gets the tessellation of the edges of all parts in a part studio.
Args:
- did (str): Document ID
- wid (str): Workspace ID
- eid (str): Element ID
Returns:
- requests.Response: Onshape response data | [
"Gets",
"the",
"tessellation",
"of",
"the",
"edges",
"of",
"all",
"parts",
"in",
"a",
"part",
"studio",
"."
]
| 61dc7ccbdc6095fa6cc3b4a414e2f72d03d1c9df | https://github.com/ethan92429/onshapepy/blob/61dc7ccbdc6095fa6cc3b4a414e2f72d03d1c9df/onshapepy/core/client.py#L228-L241 | train |
ethan92429/onshapepy | onshapepy/core/client.py | Client.upload_blob | def upload_blob(self, did, wid, filepath='./blob.json'):
'''
Uploads a file to a new blob element in the specified doc.
Args:
- did (str): Document ID
- wid (str): Workspace ID
- filepath (str, default='./blob.json'): Blob element location
Returns:
- requests.Response: Onshape response data
'''
chars = string.ascii_letters + string.digits
boundary_key = ''.join(random.choice(chars) for i in range(8))
mimetype = mimetypes.guess_type(filepath)[0]
encoded_filename = os.path.basename(filepath)
file_content_length = str(os.path.getsize(filepath))
blob = open(filepath)
req_headers = {
'Content-Type': 'multipart/form-data; boundary="%s"' % boundary_key
}
# build request body
payload = '--' + boundary_key + '\r\nContent-Disposition: form-data; name="encodedFilename"\r\n\r\n' + encoded_filename + '\r\n'
payload += '--' + boundary_key + '\r\nContent-Disposition: form-data; name="fileContentLength"\r\n\r\n' + file_content_length + '\r\n'
payload += '--' + boundary_key + '\r\nContent-Disposition: form-data; name="file"; filename="' + encoded_filename + '"\r\n'
payload += 'Content-Type: ' + mimetype + '\r\n\r\n'
payload += blob.read()
payload += '\r\n--' + boundary_key + '--'
return self._api.request('post', '/api/blobelements/d/' + did + '/w/' + wid, headers=req_headers, body=payload) | python | def upload_blob(self, did, wid, filepath='./blob.json'):
'''
Uploads a file to a new blob element in the specified doc.
Args:
- did (str): Document ID
- wid (str): Workspace ID
- filepath (str, default='./blob.json'): Blob element location
Returns:
- requests.Response: Onshape response data
'''
chars = string.ascii_letters + string.digits
boundary_key = ''.join(random.choice(chars) for i in range(8))
mimetype = mimetypes.guess_type(filepath)[0]
encoded_filename = os.path.basename(filepath)
file_content_length = str(os.path.getsize(filepath))
blob = open(filepath)
req_headers = {
'Content-Type': 'multipart/form-data; boundary="%s"' % boundary_key
}
# build request body
payload = '--' + boundary_key + '\r\nContent-Disposition: form-data; name="encodedFilename"\r\n\r\n' + encoded_filename + '\r\n'
payload += '--' + boundary_key + '\r\nContent-Disposition: form-data; name="fileContentLength"\r\n\r\n' + file_content_length + '\r\n'
payload += '--' + boundary_key + '\r\nContent-Disposition: form-data; name="file"; filename="' + encoded_filename + '"\r\n'
payload += 'Content-Type: ' + mimetype + '\r\n\r\n'
payload += blob.read()
payload += '\r\n--' + boundary_key + '--'
return self._api.request('post', '/api/blobelements/d/' + did + '/w/' + wid, headers=req_headers, body=payload) | [
"def",
"upload_blob",
"(",
"self",
",",
"did",
",",
"wid",
",",
"filepath",
"=",
"'./blob.json'",
")",
":",
"chars",
"=",
"string",
".",
"ascii_letters",
"+",
"string",
".",
"digits",
"boundary_key",
"=",
"''",
".",
"join",
"(",
"random",
".",
"choice",
"(",
"chars",
")",
"for",
"i",
"in",
"range",
"(",
"8",
")",
")",
"mimetype",
"=",
"mimetypes",
".",
"guess_type",
"(",
"filepath",
")",
"[",
"0",
"]",
"encoded_filename",
"=",
"os",
".",
"path",
".",
"basename",
"(",
"filepath",
")",
"file_content_length",
"=",
"str",
"(",
"os",
".",
"path",
".",
"getsize",
"(",
"filepath",
")",
")",
"blob",
"=",
"open",
"(",
"filepath",
")",
"req_headers",
"=",
"{",
"'Content-Type'",
":",
"'multipart/form-data; boundary=\"%s\"'",
"%",
"boundary_key",
"}",
"# build request body",
"payload",
"=",
"'--'",
"+",
"boundary_key",
"+",
"'\\r\\nContent-Disposition: form-data; name=\"encodedFilename\"\\r\\n\\r\\n'",
"+",
"encoded_filename",
"+",
"'\\r\\n'",
"payload",
"+=",
"'--'",
"+",
"boundary_key",
"+",
"'\\r\\nContent-Disposition: form-data; name=\"fileContentLength\"\\r\\n\\r\\n'",
"+",
"file_content_length",
"+",
"'\\r\\n'",
"payload",
"+=",
"'--'",
"+",
"boundary_key",
"+",
"'\\r\\nContent-Disposition: form-data; name=\"file\"; filename=\"'",
"+",
"encoded_filename",
"+",
"'\"\\r\\n'",
"payload",
"+=",
"'Content-Type: '",
"+",
"mimetype",
"+",
"'\\r\\n\\r\\n'",
"payload",
"+=",
"blob",
".",
"read",
"(",
")",
"payload",
"+=",
"'\\r\\n--'",
"+",
"boundary_key",
"+",
"'--'",
"return",
"self",
".",
"_api",
".",
"request",
"(",
"'post'",
",",
"'/api/blobelements/d/'",
"+",
"did",
"+",
"'/w/'",
"+",
"wid",
",",
"headers",
"=",
"req_headers",
",",
"body",
"=",
"payload",
")"
]
| Uploads a file to a new blob element in the specified doc.
Args:
- did (str): Document ID
- wid (str): Workspace ID
- filepath (str, default='./blob.json'): Blob element location
Returns:
- requests.Response: Onshape response data | [
"Uploads",
"a",
"file",
"to",
"a",
"new",
"blob",
"element",
"in",
"the",
"specified",
"doc",
"."
]
| 61dc7ccbdc6095fa6cc3b4a414e2f72d03d1c9df | https://github.com/ethan92429/onshapepy/blob/61dc7ccbdc6095fa6cc3b4a414e2f72d03d1c9df/onshapepy/core/client.py#L243-L276 | train |
ethan92429/onshapepy | onshapepy/core/client.py | Client.part_studio_stl | def part_studio_stl(self, did, wid, eid):
'''
Exports STL export from a part studio
Args:
- did (str): Document ID
- wid (str): Workspace ID
- eid (str): Element ID
Returns:
- requests.Response: Onshape response data
'''
req_headers = {
'Accept': 'application/vnd.onshape.v1+octet-stream'
}
return self._api.request('get', '/api/partstudios/d/' + did + '/w/' + wid + '/e/' + eid + '/stl', headers=req_headers) | python | def part_studio_stl(self, did, wid, eid):
'''
Exports STL export from a part studio
Args:
- did (str): Document ID
- wid (str): Workspace ID
- eid (str): Element ID
Returns:
- requests.Response: Onshape response data
'''
req_headers = {
'Accept': 'application/vnd.onshape.v1+octet-stream'
}
return self._api.request('get', '/api/partstudios/d/' + did + '/w/' + wid + '/e/' + eid + '/stl', headers=req_headers) | [
"def",
"part_studio_stl",
"(",
"self",
",",
"did",
",",
"wid",
",",
"eid",
")",
":",
"req_headers",
"=",
"{",
"'Accept'",
":",
"'application/vnd.onshape.v1+octet-stream'",
"}",
"return",
"self",
".",
"_api",
".",
"request",
"(",
"'get'",
",",
"'/api/partstudios/d/'",
"+",
"did",
"+",
"'/w/'",
"+",
"wid",
"+",
"'/e/'",
"+",
"eid",
"+",
"'/stl'",
",",
"headers",
"=",
"req_headers",
")"
]
| Exports STL export from a part studio
Args:
- did (str): Document ID
- wid (str): Workspace ID
- eid (str): Element ID
Returns:
- requests.Response: Onshape response data | [
"Exports",
"STL",
"export",
"from",
"a",
"part",
"studio"
]
| 61dc7ccbdc6095fa6cc3b4a414e2f72d03d1c9df | https://github.com/ethan92429/onshapepy/blob/61dc7ccbdc6095fa6cc3b4a414e2f72d03d1c9df/onshapepy/core/client.py#L278-L294 | train |
ethan92429/onshapepy | onshapepy/core/client.py | Client.create_assembly_instance | def create_assembly_instance(self, assembly_uri, part_uri, configuration):
'''
Insert a configurable part into an assembly.
Args:
- assembly (dict): eid, wid, and did of the assembly into which will be inserted
- part (dict): eid and did of the configurable part
- configuration (dict): the configuration
Returns:
- requests.Response: Onshape response data
'''
payload = {
"documentId": part_uri["did"],
"elementId": part_uri["eid"],
# could be added if needed:
# "partId": "String",
# "featureId": "String",
# "microversionId": "String",
"versionId": part_uri["wvm"],
# "microversionId": "String",
"isAssembly": False,
"isWholePartStudio": True,
"configuration": self.encode_configuration(part_uri["did"], part_uri["eid"], configuration)
}
return self._api.request('post', '/api/assemblies/d/' + assembly_uri["did"] + '/' + assembly_uri["wvm_type"] +
'/' + assembly_uri["wvm"] + '/e/' + assembly_uri["eid"] + '/instances', body=payload) | python | def create_assembly_instance(self, assembly_uri, part_uri, configuration):
'''
Insert a configurable part into an assembly.
Args:
- assembly (dict): eid, wid, and did of the assembly into which will be inserted
- part (dict): eid and did of the configurable part
- configuration (dict): the configuration
Returns:
- requests.Response: Onshape response data
'''
payload = {
"documentId": part_uri["did"],
"elementId": part_uri["eid"],
# could be added if needed:
# "partId": "String",
# "featureId": "String",
# "microversionId": "String",
"versionId": part_uri["wvm"],
# "microversionId": "String",
"isAssembly": False,
"isWholePartStudio": True,
"configuration": self.encode_configuration(part_uri["did"], part_uri["eid"], configuration)
}
return self._api.request('post', '/api/assemblies/d/' + assembly_uri["did"] + '/' + assembly_uri["wvm_type"] +
'/' + assembly_uri["wvm"] + '/e/' + assembly_uri["eid"] + '/instances', body=payload) | [
"def",
"create_assembly_instance",
"(",
"self",
",",
"assembly_uri",
",",
"part_uri",
",",
"configuration",
")",
":",
"payload",
"=",
"{",
"\"documentId\"",
":",
"part_uri",
"[",
"\"did\"",
"]",
",",
"\"elementId\"",
":",
"part_uri",
"[",
"\"eid\"",
"]",
",",
"# could be added if needed:",
"# \"partId\": \"String\",",
"# \"featureId\": \"String\",",
"# \"microversionId\": \"String\",",
"\"versionId\"",
":",
"part_uri",
"[",
"\"wvm\"",
"]",
",",
"# \"microversionId\": \"String\",",
"\"isAssembly\"",
":",
"False",
",",
"\"isWholePartStudio\"",
":",
"True",
",",
"\"configuration\"",
":",
"self",
".",
"encode_configuration",
"(",
"part_uri",
"[",
"\"did\"",
"]",
",",
"part_uri",
"[",
"\"eid\"",
"]",
",",
"configuration",
")",
"}",
"return",
"self",
".",
"_api",
".",
"request",
"(",
"'post'",
",",
"'/api/assemblies/d/'",
"+",
"assembly_uri",
"[",
"\"did\"",
"]",
"+",
"'/'",
"+",
"assembly_uri",
"[",
"\"wvm_type\"",
"]",
"+",
"'/'",
"+",
"assembly_uri",
"[",
"\"wvm\"",
"]",
"+",
"'/e/'",
"+",
"assembly_uri",
"[",
"\"eid\"",
"]",
"+",
"'/instances'",
",",
"body",
"=",
"payload",
")"
]
| Insert a configurable part into an assembly.
Args:
- assembly (dict): eid, wid, and did of the assembly into which will be inserted
- part (dict): eid and did of the configurable part
- configuration (dict): the configuration
Returns:
- requests.Response: Onshape response data | [
"Insert",
"a",
"configurable",
"part",
"into",
"an",
"assembly",
"."
]
| 61dc7ccbdc6095fa6cc3b4a414e2f72d03d1c9df | https://github.com/ethan92429/onshapepy/blob/61dc7ccbdc6095fa6cc3b4a414e2f72d03d1c9df/onshapepy/core/client.py#L298-L325 | train |
ethan92429/onshapepy | onshapepy/core/client.py | Client.encode_configuration | def encode_configuration(self, did, eid, parameters):
'''
Encode parameters as a URL-ready string
Args:
- did (str): Document ID
- eid (str): Element ID
- parameters (dict): key-value pairs of the parameters to be encoded
Returns:
- configuration (str): the url-ready configuration string.
'''
# change to the type of list the API is expecting
parameters = [{"parameterId": k, "parameterValue": v} for (k,v) in parameters.items()]
payload = {
'parameters':parameters
}
req_headers = {
'Accept': 'application/vnd.onshape.v1+json',
'Content-Type': 'application/json'
}
res = self._api.request('post', '/api/elements/d/' + did + '/e/' + eid + '/configurationencodings', body=payload, headers=req_headers)
return json.loads(res.content.decode("utf-8"))["encodedId"] | python | def encode_configuration(self, did, eid, parameters):
'''
Encode parameters as a URL-ready string
Args:
- did (str): Document ID
- eid (str): Element ID
- parameters (dict): key-value pairs of the parameters to be encoded
Returns:
- configuration (str): the url-ready configuration string.
'''
# change to the type of list the API is expecting
parameters = [{"parameterId": k, "parameterValue": v} for (k,v) in parameters.items()]
payload = {
'parameters':parameters
}
req_headers = {
'Accept': 'application/vnd.onshape.v1+json',
'Content-Type': 'application/json'
}
res = self._api.request('post', '/api/elements/d/' + did + '/e/' + eid + '/configurationencodings', body=payload, headers=req_headers)
return json.loads(res.content.decode("utf-8"))["encodedId"] | [
"def",
"encode_configuration",
"(",
"self",
",",
"did",
",",
"eid",
",",
"parameters",
")",
":",
"# change to the type of list the API is expecting",
"parameters",
"=",
"[",
"{",
"\"parameterId\"",
":",
"k",
",",
"\"parameterValue\"",
":",
"v",
"}",
"for",
"(",
"k",
",",
"v",
")",
"in",
"parameters",
".",
"items",
"(",
")",
"]",
"payload",
"=",
"{",
"'parameters'",
":",
"parameters",
"}",
"req_headers",
"=",
"{",
"'Accept'",
":",
"'application/vnd.onshape.v1+json'",
",",
"'Content-Type'",
":",
"'application/json'",
"}",
"res",
"=",
"self",
".",
"_api",
".",
"request",
"(",
"'post'",
",",
"'/api/elements/d/'",
"+",
"did",
"+",
"'/e/'",
"+",
"eid",
"+",
"'/configurationencodings'",
",",
"body",
"=",
"payload",
",",
"headers",
"=",
"req_headers",
")",
"return",
"json",
".",
"loads",
"(",
"res",
".",
"content",
".",
"decode",
"(",
"\"utf-8\"",
")",
")",
"[",
"\"encodedId\"",
"]"
]
| Encode parameters as a URL-ready string
Args:
- did (str): Document ID
- eid (str): Element ID
- parameters (dict): key-value pairs of the parameters to be encoded
Returns:
- configuration (str): the url-ready configuration string. | [
"Encode",
"parameters",
"as",
"a",
"URL",
"-",
"ready",
"string"
]
| 61dc7ccbdc6095fa6cc3b4a414e2f72d03d1c9df | https://github.com/ethan92429/onshapepy/blob/61dc7ccbdc6095fa6cc3b4a414e2f72d03d1c9df/onshapepy/core/client.py#L327-L352 | train |
ethan92429/onshapepy | onshapepy/core/client.py | Client.get_configuration | def get_configuration(self, uri):
'''
get the configuration of a PartStudio
Args:
- uri (dict): points to a particular element
Returns:
- requests.Response: Onshape response data
'''
req_headers = {
'Accept': 'application/vnd.onshape.v1+json',
'Content-Type': 'application/json'
}
return self._api.request('get', '/api/partstudios/d/' + uri["did"] + '/' + uri["wvm_type"] + '/' + uri["wvm"] + '/e/' + uri["eid"] + '/configuration', headers=req_headers) | python | def get_configuration(self, uri):
'''
get the configuration of a PartStudio
Args:
- uri (dict): points to a particular element
Returns:
- requests.Response: Onshape response data
'''
req_headers = {
'Accept': 'application/vnd.onshape.v1+json',
'Content-Type': 'application/json'
}
return self._api.request('get', '/api/partstudios/d/' + uri["did"] + '/' + uri["wvm_type"] + '/' + uri["wvm"] + '/e/' + uri["eid"] + '/configuration', headers=req_headers) | [
"def",
"get_configuration",
"(",
"self",
",",
"uri",
")",
":",
"req_headers",
"=",
"{",
"'Accept'",
":",
"'application/vnd.onshape.v1+json'",
",",
"'Content-Type'",
":",
"'application/json'",
"}",
"return",
"self",
".",
"_api",
".",
"request",
"(",
"'get'",
",",
"'/api/partstudios/d/'",
"+",
"uri",
"[",
"\"did\"",
"]",
"+",
"'/'",
"+",
"uri",
"[",
"\"wvm_type\"",
"]",
"+",
"'/'",
"+",
"uri",
"[",
"\"wvm\"",
"]",
"+",
"'/e/'",
"+",
"uri",
"[",
"\"eid\"",
"]",
"+",
"'/configuration'",
",",
"headers",
"=",
"req_headers",
")"
]
| get the configuration of a PartStudio
Args:
- uri (dict): points to a particular element
Returns:
- requests.Response: Onshape response data | [
"get",
"the",
"configuration",
"of",
"a",
"PartStudio"
]
| 61dc7ccbdc6095fa6cc3b4a414e2f72d03d1c9df | https://github.com/ethan92429/onshapepy/blob/61dc7ccbdc6095fa6cc3b4a414e2f72d03d1c9df/onshapepy/core/client.py#L355-L370 | train |
ethan92429/onshapepy | onshapepy/core/client.py | Client.update_configuration | def update_configuration(self, did, wid, eid, payload):
'''
Update the configuration specified in the payload
Args:
- did (str): Document ID
- eid (str): Element ID
- payload (json): the request body
Returns:
- configuration (str): the url-ready configuration string.
'''
req_headers = {
'Accept': 'application/vnd.onshape.v1+json',
'Content-Type': 'application/json'
}
res = self._api.request('post', '/api/partstudios/d/' + did + '/w/' + wid + '/e/' + eid + '/configuration', body=payload, headers=req_headers)
return res | python | def update_configuration(self, did, wid, eid, payload):
'''
Update the configuration specified in the payload
Args:
- did (str): Document ID
- eid (str): Element ID
- payload (json): the request body
Returns:
- configuration (str): the url-ready configuration string.
'''
req_headers = {
'Accept': 'application/vnd.onshape.v1+json',
'Content-Type': 'application/json'
}
res = self._api.request('post', '/api/partstudios/d/' + did + '/w/' + wid + '/e/' + eid + '/configuration', body=payload, headers=req_headers)
return res | [
"def",
"update_configuration",
"(",
"self",
",",
"did",
",",
"wid",
",",
"eid",
",",
"payload",
")",
":",
"req_headers",
"=",
"{",
"'Accept'",
":",
"'application/vnd.onshape.v1+json'",
",",
"'Content-Type'",
":",
"'application/json'",
"}",
"res",
"=",
"self",
".",
"_api",
".",
"request",
"(",
"'post'",
",",
"'/api/partstudios/d/'",
"+",
"did",
"+",
"'/w/'",
"+",
"wid",
"+",
"'/e/'",
"+",
"eid",
"+",
"'/configuration'",
",",
"body",
"=",
"payload",
",",
"headers",
"=",
"req_headers",
")",
"return",
"res"
]
| Update the configuration specified in the payload
Args:
- did (str): Document ID
- eid (str): Element ID
- payload (json): the request body
Returns:
- configuration (str): the url-ready configuration string. | [
"Update",
"the",
"configuration",
"specified",
"in",
"the",
"payload"
]
| 61dc7ccbdc6095fa6cc3b4a414e2f72d03d1c9df | https://github.com/ethan92429/onshapepy/blob/61dc7ccbdc6095fa6cc3b4a414e2f72d03d1c9df/onshapepy/core/client.py#L375-L394 | train |
teitei-tk/Flask-REST-Controller | flask_rest_controller/routing.py | set_routing | def set_routing(app, view_data):
"""
apply the routing configuration you've described
example:
view_data = [
("/", "app.IndexController", "index"),
]
1. "/" is receive request path
2. "app.IndexController" is to process the received request controller class path
3. "index" string To generate a URL that refers to the application
"""
routing_modules = convert_routing_module(view_data)
for module in routing_modules:
view = import_string(module.import_path)
app.add_url_rule(module.url, view_func=view.as_view(module.endpoint)) | python | def set_routing(app, view_data):
"""
apply the routing configuration you've described
example:
view_data = [
("/", "app.IndexController", "index"),
]
1. "/" is receive request path
2. "app.IndexController" is to process the received request controller class path
3. "index" string To generate a URL that refers to the application
"""
routing_modules = convert_routing_module(view_data)
for module in routing_modules:
view = import_string(module.import_path)
app.add_url_rule(module.url, view_func=view.as_view(module.endpoint)) | [
"def",
"set_routing",
"(",
"app",
",",
"view_data",
")",
":",
"routing_modules",
"=",
"convert_routing_module",
"(",
"view_data",
")",
"for",
"module",
"in",
"routing_modules",
":",
"view",
"=",
"import_string",
"(",
"module",
".",
"import_path",
")",
"app",
".",
"add_url_rule",
"(",
"module",
".",
"url",
",",
"view_func",
"=",
"view",
".",
"as_view",
"(",
"module",
".",
"endpoint",
")",
")"
]
| apply the routing configuration you've described
example:
view_data = [
("/", "app.IndexController", "index"),
]
1. "/" is receive request path
2. "app.IndexController" is to process the received request controller class path
3. "index" string To generate a URL that refers to the application | [
"apply",
"the",
"routing",
"configuration",
"you",
"ve",
"described"
]
| b4386b523f3d2c6550051c95d5ba74e5ff459946 | https://github.com/teitei-tk/Flask-REST-Controller/blob/b4386b523f3d2c6550051c95d5ba74e5ff459946/flask_rest_controller/routing.py#L16-L34 | train |
aacanakin/glim | glim/command.py | CommandAdapter.retrieve_commands | def retrieve_commands(self, module):
"""
Function smartly imports Command type classes given module
Args
----
module (module):
The module which Command classes will be extracted from
Returns
-------
commands (list):
A list of Command instances
Note:
This function will not register any command class
named "Command" or "GlimCommand".
When extending Command class, be sure to have "Command"
string on your custom commands.
"""
commands = []
for name, obj in inspect.getmembers(module):
if name != 'Command' and 'Command' in name:
if name != 'GlimCommand':
cobject = getattr(module, name)
commands.append(cobject)
return commands | python | def retrieve_commands(self, module):
"""
Function smartly imports Command type classes given module
Args
----
module (module):
The module which Command classes will be extracted from
Returns
-------
commands (list):
A list of Command instances
Note:
This function will not register any command class
named "Command" or "GlimCommand".
When extending Command class, be sure to have "Command"
string on your custom commands.
"""
commands = []
for name, obj in inspect.getmembers(module):
if name != 'Command' and 'Command' in name:
if name != 'GlimCommand':
cobject = getattr(module, name)
commands.append(cobject)
return commands | [
"def",
"retrieve_commands",
"(",
"self",
",",
"module",
")",
":",
"commands",
"=",
"[",
"]",
"for",
"name",
",",
"obj",
"in",
"inspect",
".",
"getmembers",
"(",
"module",
")",
":",
"if",
"name",
"!=",
"'Command'",
"and",
"'Command'",
"in",
"name",
":",
"if",
"name",
"!=",
"'GlimCommand'",
":",
"cobject",
"=",
"getattr",
"(",
"module",
",",
"name",
")",
"commands",
".",
"append",
"(",
"cobject",
")",
"return",
"commands"
]
| Function smartly imports Command type classes given module
Args
----
module (module):
The module which Command classes will be extracted from
Returns
-------
commands (list):
A list of Command instances
Note:
This function will not register any command class
named "Command" or "GlimCommand".
When extending Command class, be sure to have "Command"
string on your custom commands. | [
"Function",
"smartly",
"imports",
"Command",
"type",
"classes",
"given",
"module"
]
| 71a20ac149a1292c0d6c1dc7414985ea51854f7a | https://github.com/aacanakin/glim/blob/71a20ac149a1292c0d6c1dc7414985ea51854f7a/glim/command.py#L33-L62 | train |
aacanakin/glim | glim/command.py | CommandAdapter.match | def match(self, args):
"""
Function dispatches the active command line utility.
Args
----
args (argparse.parse_args()):
The parsed arguments using parser.parse_args() function.
Returns
-------
command (glim.command.Command): the active command object.
"""
command = None
for c in self.commands:
if c.name == args.which:
c.args = args
command = c
break
return command | python | def match(self, args):
"""
Function dispatches the active command line utility.
Args
----
args (argparse.parse_args()):
The parsed arguments using parser.parse_args() function.
Returns
-------
command (glim.command.Command): the active command object.
"""
command = None
for c in self.commands:
if c.name == args.which:
c.args = args
command = c
break
return command | [
"def",
"match",
"(",
"self",
",",
"args",
")",
":",
"command",
"=",
"None",
"for",
"c",
"in",
"self",
".",
"commands",
":",
"if",
"c",
".",
"name",
"==",
"args",
".",
"which",
":",
"c",
".",
"args",
"=",
"args",
"command",
"=",
"c",
"break",
"return",
"command"
]
| Function dispatches the active command line utility.
Args
----
args (argparse.parse_args()):
The parsed arguments using parser.parse_args() function.
Returns
-------
command (glim.command.Command): the active command object. | [
"Function",
"dispatches",
"the",
"active",
"command",
"line",
"utility",
"."
]
| 71a20ac149a1292c0d6c1dc7414985ea51854f7a | https://github.com/aacanakin/glim/blob/71a20ac149a1292c0d6c1dc7414985ea51854f7a/glim/command.py#L125-L144 | train |
aacanakin/glim | glim/command.py | CommandAdapter.dispatch | def dispatch(self, command, app):
"""
Function runs the active command.
Args
----
command (glim.command.Command): the command object.
app (glim.app.App): the glim app object.
Note:
Exception handling should be done in Command class
itself. If not, an unhandled exception may result
in app crash!
"""
if self.is_glimcommand(command):
command.run(app)
else:
command.run() | python | def dispatch(self, command, app):
"""
Function runs the active command.
Args
----
command (glim.command.Command): the command object.
app (glim.app.App): the glim app object.
Note:
Exception handling should be done in Command class
itself. If not, an unhandled exception may result
in app crash!
"""
if self.is_glimcommand(command):
command.run(app)
else:
command.run() | [
"def",
"dispatch",
"(",
"self",
",",
"command",
",",
"app",
")",
":",
"if",
"self",
".",
"is_glimcommand",
"(",
"command",
")",
":",
"command",
".",
"run",
"(",
"app",
")",
"else",
":",
"command",
".",
"run",
"(",
")"
]
| Function runs the active command.
Args
----
command (glim.command.Command): the command object.
app (glim.app.App): the glim app object.
Note:
Exception handling should be done in Command class
itself. If not, an unhandled exception may result
in app crash! | [
"Function",
"runs",
"the",
"active",
"command",
"."
]
| 71a20ac149a1292c0d6c1dc7414985ea51854f7a | https://github.com/aacanakin/glim/blob/71a20ac149a1292c0d6c1dc7414985ea51854f7a/glim/command.py#L160-L177 | train |
Kortemme-Lab/klab | klab/bio/fasta.py | FASTA.replace_sequence | def replace_sequence(self, pdb_ID, chain_id, replacement_sequence):
'''Replaces a sequence with another. Typically not useful but I use it in the ResidueRelatrix to make sure that the FASTA and SEQRES sequences match.'''
old_sequences = self.sequences
old_unique_sequences = self.unique_sequences
self.sequences = []
self.unique_sequences = {}
for s in old_sequences:
if s[0] == pdb_ID and s[1] == chain_id:
self._add_sequence(pdb_ID, chain_id, replacement_sequence)
else:
self._add_sequence(s[0], s[1], s[2])
self._find_identical_sequences() | python | def replace_sequence(self, pdb_ID, chain_id, replacement_sequence):
'''Replaces a sequence with another. Typically not useful but I use it in the ResidueRelatrix to make sure that the FASTA and SEQRES sequences match.'''
old_sequences = self.sequences
old_unique_sequences = self.unique_sequences
self.sequences = []
self.unique_sequences = {}
for s in old_sequences:
if s[0] == pdb_ID and s[1] == chain_id:
self._add_sequence(pdb_ID, chain_id, replacement_sequence)
else:
self._add_sequence(s[0], s[1], s[2])
self._find_identical_sequences() | [
"def",
"replace_sequence",
"(",
"self",
",",
"pdb_ID",
",",
"chain_id",
",",
"replacement_sequence",
")",
":",
"old_sequences",
"=",
"self",
".",
"sequences",
"old_unique_sequences",
"=",
"self",
".",
"unique_sequences",
"self",
".",
"sequences",
"=",
"[",
"]",
"self",
".",
"unique_sequences",
"=",
"{",
"}",
"for",
"s",
"in",
"old_sequences",
":",
"if",
"s",
"[",
"0",
"]",
"==",
"pdb_ID",
"and",
"s",
"[",
"1",
"]",
"==",
"chain_id",
":",
"self",
".",
"_add_sequence",
"(",
"pdb_ID",
",",
"chain_id",
",",
"replacement_sequence",
")",
"else",
":",
"self",
".",
"_add_sequence",
"(",
"s",
"[",
"0",
"]",
",",
"s",
"[",
"1",
"]",
",",
"s",
"[",
"2",
"]",
")",
"self",
".",
"_find_identical_sequences",
"(",
")"
]
| Replaces a sequence with another. Typically not useful but I use it in the ResidueRelatrix to make sure that the FASTA and SEQRES sequences match. | [
"Replaces",
"a",
"sequence",
"with",
"another",
".",
"Typically",
"not",
"useful",
"but",
"I",
"use",
"it",
"in",
"the",
"ResidueRelatrix",
"to",
"make",
"sure",
"that",
"the",
"FASTA",
"and",
"SEQRES",
"sequences",
"match",
"."
]
| 6d410ad08f1bd9f7cbbb28d7d946e94fbaaa2b6b | https://github.com/Kortemme-Lab/klab/blob/6d410ad08f1bd9f7cbbb28d7d946e94fbaaa2b6b/klab/bio/fasta.py#L63-L75 | train |
Kortemme-Lab/klab | klab/bio/fasta.py | FASTA.retrieve | def retrieve(pdb_id, cache_dir = None, bio_cache = None):
'''Creates a FASTA object by using a cached copy of the file if it exists or by retrieving the file from the RCSB.'''
pdb_id = pdb_id.upper()
if bio_cache:
return FASTA(bio_cache.get_fasta_contents(pdb_id))
# Check to see whether we have a cached copy
if cache_dir:
filename = os.path.join(cache_dir, "%s.fasta" % pdb_id)
if os.path.exists(filename):
return FASTA(read_file(filename))
else:
filename += ".txt"
if os.path.exists(filename):
return FASTA(read_file(filename))
# Get a copy from the RCSB
contents = rcsb.retrieve_fasta(pdb_id)
# Create a cached copy if appropriate
if cache_dir:
write_file(os.path.join(cache_dir, "%s.fasta" % pdb_id), contents)
# Return the object
return FASTA(contents) | python | def retrieve(pdb_id, cache_dir = None, bio_cache = None):
'''Creates a FASTA object by using a cached copy of the file if it exists or by retrieving the file from the RCSB.'''
pdb_id = pdb_id.upper()
if bio_cache:
return FASTA(bio_cache.get_fasta_contents(pdb_id))
# Check to see whether we have a cached copy
if cache_dir:
filename = os.path.join(cache_dir, "%s.fasta" % pdb_id)
if os.path.exists(filename):
return FASTA(read_file(filename))
else:
filename += ".txt"
if os.path.exists(filename):
return FASTA(read_file(filename))
# Get a copy from the RCSB
contents = rcsb.retrieve_fasta(pdb_id)
# Create a cached copy if appropriate
if cache_dir:
write_file(os.path.join(cache_dir, "%s.fasta" % pdb_id), contents)
# Return the object
return FASTA(contents) | [
"def",
"retrieve",
"(",
"pdb_id",
",",
"cache_dir",
"=",
"None",
",",
"bio_cache",
"=",
"None",
")",
":",
"pdb_id",
"=",
"pdb_id",
".",
"upper",
"(",
")",
"if",
"bio_cache",
":",
"return",
"FASTA",
"(",
"bio_cache",
".",
"get_fasta_contents",
"(",
"pdb_id",
")",
")",
"# Check to see whether we have a cached copy",
"if",
"cache_dir",
":",
"filename",
"=",
"os",
".",
"path",
".",
"join",
"(",
"cache_dir",
",",
"\"%s.fasta\"",
"%",
"pdb_id",
")",
"if",
"os",
".",
"path",
".",
"exists",
"(",
"filename",
")",
":",
"return",
"FASTA",
"(",
"read_file",
"(",
"filename",
")",
")",
"else",
":",
"filename",
"+=",
"\".txt\"",
"if",
"os",
".",
"path",
".",
"exists",
"(",
"filename",
")",
":",
"return",
"FASTA",
"(",
"read_file",
"(",
"filename",
")",
")",
"# Get a copy from the RCSB",
"contents",
"=",
"rcsb",
".",
"retrieve_fasta",
"(",
"pdb_id",
")",
"# Create a cached copy if appropriate",
"if",
"cache_dir",
":",
"write_file",
"(",
"os",
".",
"path",
".",
"join",
"(",
"cache_dir",
",",
"\"%s.fasta\"",
"%",
"pdb_id",
")",
",",
"contents",
")",
"# Return the object",
"return",
"FASTA",
"(",
"contents",
")"
]
| Creates a FASTA object by using a cached copy of the file if it exists or by retrieving the file from the RCSB. | [
"Creates",
"a",
"FASTA",
"object",
"by",
"using",
"a",
"cached",
"copy",
"of",
"the",
"file",
"if",
"it",
"exists",
"or",
"by",
"retrieving",
"the",
"file",
"from",
"the",
"RCSB",
"."
]
| 6d410ad08f1bd9f7cbbb28d7d946e94fbaaa2b6b | https://github.com/Kortemme-Lab/klab/blob/6d410ad08f1bd9f7cbbb28d7d946e94fbaaa2b6b/klab/bio/fasta.py#L91-L117 | train |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.