repo
stringlengths 7
55
| path
stringlengths 4
127
| func_name
stringlengths 1
88
| original_string
stringlengths 75
19.8k
| language
stringclasses 1
value | code
stringlengths 75
19.8k
| code_tokens
sequence | docstring
stringlengths 3
17.3k
| docstring_tokens
sequence | sha
stringlengths 40
40
| url
stringlengths 87
242
| partition
stringclasses 1
value |
---|---|---|---|---|---|---|---|---|---|---|---|
dbarsam/python-vsgen | vsgen/util/entrypoints.py | entrypoint | def entrypoint(section, option):
"""
Returns the the entry point object given a section, option pair.
:param str section: The section name in the entry point collection
:param str option: The option name in the entry point collection
:return: The entry point object if available.
"""
try:
return entrypoints(section)[option]
except KeyError:
raise KeyError('Cannot resolve type "{}" to a recognised vsgen "{}" type.'.format(option, section)) | python | def entrypoint(section, option):
"""
Returns the the entry point object given a section, option pair.
:param str section: The section name in the entry point collection
:param str option: The option name in the entry point collection
:return: The entry point object if available.
"""
try:
return entrypoints(section)[option]
except KeyError:
raise KeyError('Cannot resolve type "{}" to a recognised vsgen "{}" type.'.format(option, section)) | [
"def",
"entrypoint",
"(",
"section",
",",
"option",
")",
":",
"try",
":",
"return",
"entrypoints",
"(",
"section",
")",
"[",
"option",
"]",
"except",
"KeyError",
":",
"raise",
"KeyError",
"(",
"'Cannot resolve type \"{}\" to a recognised vsgen \"{}\" type.'",
".",
"format",
"(",
"option",
",",
"section",
")",
")"
] | Returns the the entry point object given a section, option pair.
:param str section: The section name in the entry point collection
:param str option: The option name in the entry point collection
:return: The entry point object if available. | [
"Returns",
"the",
"the",
"entry",
"point",
"object",
"given",
"a",
"section",
"option",
"pair",
"."
] | 640191bb018a1ff7d7b7a4982e0d3c1a423ba878 | https://github.com/dbarsam/python-vsgen/blob/640191bb018a1ff7d7b7a4982e0d3c1a423ba878/vsgen/util/entrypoints.py#L22-L33 | train |
go-macaroon-bakery/py-macaroon-bakery | macaroonbakery/checkers/_declared.py | infer_declared | def infer_declared(ms, namespace=None):
'''Retrieves any declared information from the given macaroons and returns
it as a key-value map.
Information is declared with a first party caveat as created by
declared_caveat.
If there are two caveats that declare the same key with different values,
the information is omitted from the map. When the caveats are later
checked, this will cause the check to fail.
namespace is the Namespace used to retrieve the prefix associated to the
uri, if None it will use the STD_NAMESPACE only.
'''
conditions = []
for m in ms:
for cav in m.caveats:
if cav.location is None or cav.location == '':
conditions.append(cav.caveat_id_bytes.decode('utf-8'))
return infer_declared_from_conditions(conditions, namespace) | python | def infer_declared(ms, namespace=None):
'''Retrieves any declared information from the given macaroons and returns
it as a key-value map.
Information is declared with a first party caveat as created by
declared_caveat.
If there are two caveats that declare the same key with different values,
the information is omitted from the map. When the caveats are later
checked, this will cause the check to fail.
namespace is the Namespace used to retrieve the prefix associated to the
uri, if None it will use the STD_NAMESPACE only.
'''
conditions = []
for m in ms:
for cav in m.caveats:
if cav.location is None or cav.location == '':
conditions.append(cav.caveat_id_bytes.decode('utf-8'))
return infer_declared_from_conditions(conditions, namespace) | [
"def",
"infer_declared",
"(",
"ms",
",",
"namespace",
"=",
"None",
")",
":",
"conditions",
"=",
"[",
"]",
"for",
"m",
"in",
"ms",
":",
"for",
"cav",
"in",
"m",
".",
"caveats",
":",
"if",
"cav",
".",
"location",
"is",
"None",
"or",
"cav",
".",
"location",
"==",
"''",
":",
"conditions",
".",
"append",
"(",
"cav",
".",
"caveat_id_bytes",
".",
"decode",
"(",
"'utf-8'",
")",
")",
"return",
"infer_declared_from_conditions",
"(",
"conditions",
",",
"namespace",
")"
] | Retrieves any declared information from the given macaroons and returns
it as a key-value map.
Information is declared with a first party caveat as created by
declared_caveat.
If there are two caveats that declare the same key with different values,
the information is omitted from the map. When the caveats are later
checked, this will cause the check to fail.
namespace is the Namespace used to retrieve the prefix associated to the
uri, if None it will use the STD_NAMESPACE only. | [
"Retrieves",
"any",
"declared",
"information",
"from",
"the",
"given",
"macaroons",
"and",
"returns",
"it",
"as",
"a",
"key",
"-",
"value",
"map",
".",
"Information",
"is",
"declared",
"with",
"a",
"first",
"party",
"caveat",
"as",
"created",
"by",
"declared_caveat",
"."
] | 63ce1ef1dabe816eb8aaec48fbb46761c34ddf77 | https://github.com/go-macaroon-bakery/py-macaroon-bakery/blob/63ce1ef1dabe816eb8aaec48fbb46761c34ddf77/macaroonbakery/checkers/_declared.py#L15-L32 | train |
go-macaroon-bakery/py-macaroon-bakery | macaroonbakery/checkers/_declared.py | infer_declared_from_conditions | def infer_declared_from_conditions(conds, namespace=None):
''' like infer_declared except that it is passed a set of first party
caveat conditions as a list of string rather than a set of macaroons.
'''
conflicts = []
# If we can't resolve that standard namespace, then we'll look for
# just bare "declared" caveats which will work OK for legacy
# macaroons with no namespace.
if namespace is None:
namespace = Namespace()
prefix = namespace.resolve(STD_NAMESPACE)
if prefix is None:
prefix = ''
declared_cond = prefix + COND_DECLARED
info = {}
for cond in conds:
try:
name, rest = parse_caveat(cond)
except ValueError:
name, rest = '', ''
if name != declared_cond:
continue
parts = rest.split(' ', 1)
if len(parts) != 2:
continue
key, val = parts[0], parts[1]
old_val = info.get(key)
if old_val is not None and old_val != val:
conflicts.append(key)
continue
info[key] = val
for key in set(conflicts):
del info[key]
return info | python | def infer_declared_from_conditions(conds, namespace=None):
''' like infer_declared except that it is passed a set of first party
caveat conditions as a list of string rather than a set of macaroons.
'''
conflicts = []
# If we can't resolve that standard namespace, then we'll look for
# just bare "declared" caveats which will work OK for legacy
# macaroons with no namespace.
if namespace is None:
namespace = Namespace()
prefix = namespace.resolve(STD_NAMESPACE)
if prefix is None:
prefix = ''
declared_cond = prefix + COND_DECLARED
info = {}
for cond in conds:
try:
name, rest = parse_caveat(cond)
except ValueError:
name, rest = '', ''
if name != declared_cond:
continue
parts = rest.split(' ', 1)
if len(parts) != 2:
continue
key, val = parts[0], parts[1]
old_val = info.get(key)
if old_val is not None and old_val != val:
conflicts.append(key)
continue
info[key] = val
for key in set(conflicts):
del info[key]
return info | [
"def",
"infer_declared_from_conditions",
"(",
"conds",
",",
"namespace",
"=",
"None",
")",
":",
"conflicts",
"=",
"[",
"]",
"# If we can't resolve that standard namespace, then we'll look for",
"# just bare \"declared\" caveats which will work OK for legacy",
"# macaroons with no namespace.",
"if",
"namespace",
"is",
"None",
":",
"namespace",
"=",
"Namespace",
"(",
")",
"prefix",
"=",
"namespace",
".",
"resolve",
"(",
"STD_NAMESPACE",
")",
"if",
"prefix",
"is",
"None",
":",
"prefix",
"=",
"''",
"declared_cond",
"=",
"prefix",
"+",
"COND_DECLARED",
"info",
"=",
"{",
"}",
"for",
"cond",
"in",
"conds",
":",
"try",
":",
"name",
",",
"rest",
"=",
"parse_caveat",
"(",
"cond",
")",
"except",
"ValueError",
":",
"name",
",",
"rest",
"=",
"''",
",",
"''",
"if",
"name",
"!=",
"declared_cond",
":",
"continue",
"parts",
"=",
"rest",
".",
"split",
"(",
"' '",
",",
"1",
")",
"if",
"len",
"(",
"parts",
")",
"!=",
"2",
":",
"continue",
"key",
",",
"val",
"=",
"parts",
"[",
"0",
"]",
",",
"parts",
"[",
"1",
"]",
"old_val",
"=",
"info",
".",
"get",
"(",
"key",
")",
"if",
"old_val",
"is",
"not",
"None",
"and",
"old_val",
"!=",
"val",
":",
"conflicts",
".",
"append",
"(",
"key",
")",
"continue",
"info",
"[",
"key",
"]",
"=",
"val",
"for",
"key",
"in",
"set",
"(",
"conflicts",
")",
":",
"del",
"info",
"[",
"key",
"]",
"return",
"info"
] | like infer_declared except that it is passed a set of first party
caveat conditions as a list of string rather than a set of macaroons. | [
"like",
"infer_declared",
"except",
"that",
"it",
"is",
"passed",
"a",
"set",
"of",
"first",
"party",
"caveat",
"conditions",
"as",
"a",
"list",
"of",
"string",
"rather",
"than",
"a",
"set",
"of",
"macaroons",
"."
] | 63ce1ef1dabe816eb8aaec48fbb46761c34ddf77 | https://github.com/go-macaroon-bakery/py-macaroon-bakery/blob/63ce1ef1dabe816eb8aaec48fbb46761c34ddf77/macaroonbakery/checkers/_declared.py#L35-L69 | train |
useblocks/groundwork | groundwork/patterns/gw_base_pattern.py | GwBasePattern._pre_activate_injection | def _pre_activate_injection(self):
"""
Injects functions before the activation routine of child classes gets called
"""
# Let's be sure that this plugins class is registered and available on application level under
# application.plugins.classes. This allows to reuse this class for *new* plugins.
if not self.app.plugins.classes.exist(self.__class__.__name__):
self.app.plugins.classes.register([self.__class__])
self._load_needed_plugins()
self.app.signals.send("plugin_activate_pre", self) | python | def _pre_activate_injection(self):
"""
Injects functions before the activation routine of child classes gets called
"""
# Let's be sure that this plugins class is registered and available on application level under
# application.plugins.classes. This allows to reuse this class for *new* plugins.
if not self.app.plugins.classes.exist(self.__class__.__name__):
self.app.plugins.classes.register([self.__class__])
self._load_needed_plugins()
self.app.signals.send("plugin_activate_pre", self) | [
"def",
"_pre_activate_injection",
"(",
"self",
")",
":",
"# Let's be sure that this plugins class is registered and available on application level under",
"# application.plugins.classes. This allows to reuse this class for *new* plugins.",
"if",
"not",
"self",
".",
"app",
".",
"plugins",
".",
"classes",
".",
"exist",
"(",
"self",
".",
"__class__",
".",
"__name__",
")",
":",
"self",
".",
"app",
".",
"plugins",
".",
"classes",
".",
"register",
"(",
"[",
"self",
".",
"__class__",
"]",
")",
"self",
".",
"_load_needed_plugins",
"(",
")",
"self",
".",
"app",
".",
"signals",
".",
"send",
"(",
"\"plugin_activate_pre\"",
",",
"self",
")"
] | Injects functions before the activation routine of child classes gets called | [
"Injects",
"functions",
"before",
"the",
"activation",
"routine",
"of",
"child",
"classes",
"gets",
"called"
] | d34fce43f54246ca4db0f7b89e450dcdc847c68c | https://github.com/useblocks/groundwork/blob/d34fce43f54246ca4db0f7b89e450dcdc847c68c/groundwork/patterns/gw_base_pattern.py#L135-L146 | train |
useblocks/groundwork | groundwork/patterns/gw_base_pattern.py | SignalsPlugin.register | def register(self, signal, description):
"""
Registers a new signal.
Only registered signals are allowed to be send.
:param signal: Unique name of the signal
:param description: Description of the reason or use case, why this signal is needed.
Used for documentation.
"""
return self.__app.signals.register(signal, self._plugin, description) | python | def register(self, signal, description):
"""
Registers a new signal.
Only registered signals are allowed to be send.
:param signal: Unique name of the signal
:param description: Description of the reason or use case, why this signal is needed.
Used for documentation.
"""
return self.__app.signals.register(signal, self._plugin, description) | [
"def",
"register",
"(",
"self",
",",
"signal",
",",
"description",
")",
":",
"return",
"self",
".",
"__app",
".",
"signals",
".",
"register",
"(",
"signal",
",",
"self",
".",
"_plugin",
",",
"description",
")"
] | Registers a new signal.
Only registered signals are allowed to be send.
:param signal: Unique name of the signal
:param description: Description of the reason or use case, why this signal is needed.
Used for documentation. | [
"Registers",
"a",
"new",
"signal",
".",
"Only",
"registered",
"signals",
"are",
"allowed",
"to",
"be",
"send",
"."
] | d34fce43f54246ca4db0f7b89e450dcdc847c68c | https://github.com/useblocks/groundwork/blob/d34fce43f54246ca4db0f7b89e450dcdc847c68c/groundwork/patterns/gw_base_pattern.py#L248-L257 | train |
useblocks/groundwork | groundwork/patterns/gw_base_pattern.py | SignalsPlugin.get | def get(self, signal=None):
"""
Returns a single signal or a dictionary of signals for this plugin.
"""
return self.__app.signals.get(signal, self._plugin) | python | def get(self, signal=None):
"""
Returns a single signal or a dictionary of signals for this plugin.
"""
return self.__app.signals.get(signal, self._plugin) | [
"def",
"get",
"(",
"self",
",",
"signal",
"=",
"None",
")",
":",
"return",
"self",
".",
"__app",
".",
"signals",
".",
"get",
"(",
"signal",
",",
"self",
".",
"_plugin",
")"
] | Returns a single signal or a dictionary of signals for this plugin. | [
"Returns",
"a",
"single",
"signal",
"or",
"a",
"dictionary",
"of",
"signals",
"for",
"this",
"plugin",
"."
] | d34fce43f54246ca4db0f7b89e450dcdc847c68c | https://github.com/useblocks/groundwork/blob/d34fce43f54246ca4db0f7b89e450dcdc847c68c/groundwork/patterns/gw_base_pattern.py#L294-L298 | train |
useblocks/groundwork | groundwork/patterns/gw_base_pattern.py | SignalsPlugin.get_receiver | def get_receiver(self, receiver=None):
"""
Returns a single receiver or a dictionary of receivers for this plugin.
"""
return self.__app.signals.get_receiver(receiver, self._plugin) | python | def get_receiver(self, receiver=None):
"""
Returns a single receiver or a dictionary of receivers for this plugin.
"""
return self.__app.signals.get_receiver(receiver, self._plugin) | [
"def",
"get_receiver",
"(",
"self",
",",
"receiver",
"=",
"None",
")",
":",
"return",
"self",
".",
"__app",
".",
"signals",
".",
"get_receiver",
"(",
"receiver",
",",
"self",
".",
"_plugin",
")"
] | Returns a single receiver or a dictionary of receivers for this plugin. | [
"Returns",
"a",
"single",
"receiver",
"or",
"a",
"dictionary",
"of",
"receivers",
"for",
"this",
"plugin",
"."
] | d34fce43f54246ca4db0f7b89e450dcdc847c68c | https://github.com/useblocks/groundwork/blob/d34fce43f54246ca4db0f7b89e450dcdc847c68c/groundwork/patterns/gw_base_pattern.py#L300-L304 | train |
ets-labs/python-domain-models | domain_models/views.py | ContextViewMetaClass.validate | def validate(mcs, bases, attributes):
"""Check attributes."""
if bases[0] is object:
return None
mcs.check_model_cls(attributes)
mcs.check_include_exclude(attributes)
mcs.check_properties(attributes) | python | def validate(mcs, bases, attributes):
"""Check attributes."""
if bases[0] is object:
return None
mcs.check_model_cls(attributes)
mcs.check_include_exclude(attributes)
mcs.check_properties(attributes) | [
"def",
"validate",
"(",
"mcs",
",",
"bases",
",",
"attributes",
")",
":",
"if",
"bases",
"[",
"0",
"]",
"is",
"object",
":",
"return",
"None",
"mcs",
".",
"check_model_cls",
"(",
"attributes",
")",
"mcs",
".",
"check_include_exclude",
"(",
"attributes",
")",
"mcs",
".",
"check_properties",
"(",
"attributes",
")"
] | Check attributes. | [
"Check",
"attributes",
"."
] | 7de1816ba0338f20fdb3e0f57fad0ffd5bea13f9 | https://github.com/ets-labs/python-domain-models/blob/7de1816ba0338f20fdb3e0f57fad0ffd5bea13f9/domain_models/views.py#L18-L24 | train |
ets-labs/python-domain-models | domain_models/views.py | ContextViewMetaClass.get_properties | def get_properties(attributes):
"""Return tuple of names of defined properties.
:type attributes: dict
:rtype: list
"""
return [key for key, value in six.iteritems(attributes)
if isinstance(value, property)] | python | def get_properties(attributes):
"""Return tuple of names of defined properties.
:type attributes: dict
:rtype: list
"""
return [key for key, value in six.iteritems(attributes)
if isinstance(value, property)] | [
"def",
"get_properties",
"(",
"attributes",
")",
":",
"return",
"[",
"key",
"for",
"key",
",",
"value",
"in",
"six",
".",
"iteritems",
"(",
"attributes",
")",
"if",
"isinstance",
"(",
"value",
",",
"property",
")",
"]"
] | Return tuple of names of defined properties.
:type attributes: dict
:rtype: list | [
"Return",
"tuple",
"of",
"names",
"of",
"defined",
"properties",
"."
] | 7de1816ba0338f20fdb3e0f57fad0ffd5bea13f9 | https://github.com/ets-labs/python-domain-models/blob/7de1816ba0338f20fdb3e0f57fad0ffd5bea13f9/domain_models/views.py#L76-L83 | train |
ets-labs/python-domain-models | domain_models/views.py | ContextViewMetaClass.check_properties | def check_properties(mcs, attributes):
"""Check whether intersections exist.
:type attributes: dict
"""
include, exclude = mcs.get_prepared_include_exclude(attributes)
properties = mcs.get_properties(attributes)
intersections = list(
set(properties).intersection(include if include else exclude))
if not intersections:
return None
attr_name = '__include__' if include else '__exclude__'
raise AttributeError(
"It is not allowed to mention already defined properties: "
"{0} in {1} attributes.".format(", ".join(intersections),
attr_name)) | python | def check_properties(mcs, attributes):
"""Check whether intersections exist.
:type attributes: dict
"""
include, exclude = mcs.get_prepared_include_exclude(attributes)
properties = mcs.get_properties(attributes)
intersections = list(
set(properties).intersection(include if include else exclude))
if not intersections:
return None
attr_name = '__include__' if include else '__exclude__'
raise AttributeError(
"It is not allowed to mention already defined properties: "
"{0} in {1} attributes.".format(", ".join(intersections),
attr_name)) | [
"def",
"check_properties",
"(",
"mcs",
",",
"attributes",
")",
":",
"include",
",",
"exclude",
"=",
"mcs",
".",
"get_prepared_include_exclude",
"(",
"attributes",
")",
"properties",
"=",
"mcs",
".",
"get_properties",
"(",
"attributes",
")",
"intersections",
"=",
"list",
"(",
"set",
"(",
"properties",
")",
".",
"intersection",
"(",
"include",
"if",
"include",
"else",
"exclude",
")",
")",
"if",
"not",
"intersections",
":",
"return",
"None",
"attr_name",
"=",
"'__include__'",
"if",
"include",
"else",
"'__exclude__'",
"raise",
"AttributeError",
"(",
"\"It is not allowed to mention already defined properties: \"",
"\"{0} in {1} attributes.\"",
".",
"format",
"(",
"\", \"",
".",
"join",
"(",
"intersections",
")",
",",
"attr_name",
")",
")"
] | Check whether intersections exist.
:type attributes: dict | [
"Check",
"whether",
"intersections",
"exist",
"."
] | 7de1816ba0338f20fdb3e0f57fad0ffd5bea13f9 | https://github.com/ets-labs/python-domain-models/blob/7de1816ba0338f20fdb3e0f57fad0ffd5bea13f9/domain_models/views.py#L86-L103 | train |
SandstoneHPC/sandstone-ide | sandstone/lib/filesystem/filewatcher.py | FilesystemEventHandler.on_deleted | def on_deleted(self, event):
"""
Event Handler when a file is deleted
"""
key = 'filesystem:file_deleted'
data = {
'filepath': event.src_path,
'is_directory': event.is_directory,
'dirpath': os.path.dirname(event.src_path)
}
bmsg = BroadcastMessage(key=key, data=data)
BroadcastManager.broadcast(bmsg) | python | def on_deleted(self, event):
"""
Event Handler when a file is deleted
"""
key = 'filesystem:file_deleted'
data = {
'filepath': event.src_path,
'is_directory': event.is_directory,
'dirpath': os.path.dirname(event.src_path)
}
bmsg = BroadcastMessage(key=key, data=data)
BroadcastManager.broadcast(bmsg) | [
"def",
"on_deleted",
"(",
"self",
",",
"event",
")",
":",
"key",
"=",
"'filesystem:file_deleted'",
"data",
"=",
"{",
"'filepath'",
":",
"event",
".",
"src_path",
",",
"'is_directory'",
":",
"event",
".",
"is_directory",
",",
"'dirpath'",
":",
"os",
".",
"path",
".",
"dirname",
"(",
"event",
".",
"src_path",
")",
"}",
"bmsg",
"=",
"BroadcastMessage",
"(",
"key",
"=",
"key",
",",
"data",
"=",
"data",
")",
"BroadcastManager",
".",
"broadcast",
"(",
"bmsg",
")"
] | Event Handler when a file is deleted | [
"Event",
"Handler",
"when",
"a",
"file",
"is",
"deleted"
] | 7a47947fb07281c3e3018042863dc67e7e56dc04 | https://github.com/SandstoneHPC/sandstone-ide/blob/7a47947fb07281c3e3018042863dc67e7e56dc04/sandstone/lib/filesystem/filewatcher.py#L27-L39 | train |
go-macaroon-bakery/py-macaroon-bakery | macaroonbakery/httpbakery/agent/_agent.py | read_auth_info | def read_auth_info(agent_file_content):
'''Loads agent authentication information from the
specified content string, as read from an agents file.
The returned information is suitable for passing as an argument
to the AgentInteractor constructor.
@param agent_file_content The agent file content (str)
@return AuthInfo The authentication information
@raises AgentFileFormatError when the file format is bad.
'''
try:
data = json.loads(agent_file_content)
return AuthInfo(
key=bakery.PrivateKey.deserialize(data['key']['private']),
agents=list(
Agent(url=a['url'], username=a['username'])
for a in data.get('agents', [])
),
)
except (
KeyError,
ValueError,
TypeError,
) as e:
raise AgentFileFormatError('invalid agent file', e) | python | def read_auth_info(agent_file_content):
'''Loads agent authentication information from the
specified content string, as read from an agents file.
The returned information is suitable for passing as an argument
to the AgentInteractor constructor.
@param agent_file_content The agent file content (str)
@return AuthInfo The authentication information
@raises AgentFileFormatError when the file format is bad.
'''
try:
data = json.loads(agent_file_content)
return AuthInfo(
key=bakery.PrivateKey.deserialize(data['key']['private']),
agents=list(
Agent(url=a['url'], username=a['username'])
for a in data.get('agents', [])
),
)
except (
KeyError,
ValueError,
TypeError,
) as e:
raise AgentFileFormatError('invalid agent file', e) | [
"def",
"read_auth_info",
"(",
"agent_file_content",
")",
":",
"try",
":",
"data",
"=",
"json",
".",
"loads",
"(",
"agent_file_content",
")",
"return",
"AuthInfo",
"(",
"key",
"=",
"bakery",
".",
"PrivateKey",
".",
"deserialize",
"(",
"data",
"[",
"'key'",
"]",
"[",
"'private'",
"]",
")",
",",
"agents",
"=",
"list",
"(",
"Agent",
"(",
"url",
"=",
"a",
"[",
"'url'",
"]",
",",
"username",
"=",
"a",
"[",
"'username'",
"]",
")",
"for",
"a",
"in",
"data",
".",
"get",
"(",
"'agents'",
",",
"[",
"]",
")",
")",
",",
")",
"except",
"(",
"KeyError",
",",
"ValueError",
",",
"TypeError",
",",
")",
"as",
"e",
":",
"raise",
"AgentFileFormatError",
"(",
"'invalid agent file'",
",",
"e",
")"
] | Loads agent authentication information from the
specified content string, as read from an agents file.
The returned information is suitable for passing as an argument
to the AgentInteractor constructor.
@param agent_file_content The agent file content (str)
@return AuthInfo The authentication information
@raises AgentFileFormatError when the file format is bad. | [
"Loads",
"agent",
"authentication",
"information",
"from",
"the",
"specified",
"content",
"string",
"as",
"read",
"from",
"an",
"agents",
"file",
".",
"The",
"returned",
"information",
"is",
"suitable",
"for",
"passing",
"as",
"an",
"argument",
"to",
"the",
"AgentInteractor",
"constructor",
"."
] | 63ce1ef1dabe816eb8aaec48fbb46761c34ddf77 | https://github.com/go-macaroon-bakery/py-macaroon-bakery/blob/63ce1ef1dabe816eb8aaec48fbb46761c34ddf77/macaroonbakery/httpbakery/agent/_agent.py#L37-L60 | train |
go-macaroon-bakery/py-macaroon-bakery | macaroonbakery/httpbakery/agent/_agent.py | AgentInteractor.interact | def interact(self, client, location, interaction_required_err):
'''Implement Interactor.interact by obtaining obtaining
a macaroon from the discharger, discharging it with the
local private key using the discharged macaroon as
a discharge token'''
p = interaction_required_err.interaction_method('agent',
InteractionInfo)
if p.login_url is None or p.login_url == '':
raise httpbakery.InteractionError(
'no login-url field found in agent interaction method')
agent = self._find_agent(location)
if not location.endswith('/'):
location += '/'
login_url = urljoin(location, p.login_url)
resp = requests.get(
login_url, params={
'username': agent.username,
'public-key': str(self._auth_info.key.public_key)},
auth=client.auth())
if resp.status_code != 200:
raise httpbakery.InteractionError(
'cannot acquire agent macaroon: {} {}'.format(
resp.status_code, resp.text)
)
m = resp.json().get('macaroon')
if m is None:
raise httpbakery.InteractionError('no macaroon in response')
m = bakery.Macaroon.from_dict(m)
ms = bakery.discharge_all(m, None, self._auth_info.key)
b = bytearray()
for m in ms:
b.extend(utils.b64decode(m.serialize()))
return httpbakery.DischargeToken(kind='agent', value=bytes(b)) | python | def interact(self, client, location, interaction_required_err):
'''Implement Interactor.interact by obtaining obtaining
a macaroon from the discharger, discharging it with the
local private key using the discharged macaroon as
a discharge token'''
p = interaction_required_err.interaction_method('agent',
InteractionInfo)
if p.login_url is None or p.login_url == '':
raise httpbakery.InteractionError(
'no login-url field found in agent interaction method')
agent = self._find_agent(location)
if not location.endswith('/'):
location += '/'
login_url = urljoin(location, p.login_url)
resp = requests.get(
login_url, params={
'username': agent.username,
'public-key': str(self._auth_info.key.public_key)},
auth=client.auth())
if resp.status_code != 200:
raise httpbakery.InteractionError(
'cannot acquire agent macaroon: {} {}'.format(
resp.status_code, resp.text)
)
m = resp.json().get('macaroon')
if m is None:
raise httpbakery.InteractionError('no macaroon in response')
m = bakery.Macaroon.from_dict(m)
ms = bakery.discharge_all(m, None, self._auth_info.key)
b = bytearray()
for m in ms:
b.extend(utils.b64decode(m.serialize()))
return httpbakery.DischargeToken(kind='agent', value=bytes(b)) | [
"def",
"interact",
"(",
"self",
",",
"client",
",",
"location",
",",
"interaction_required_err",
")",
":",
"p",
"=",
"interaction_required_err",
".",
"interaction_method",
"(",
"'agent'",
",",
"InteractionInfo",
")",
"if",
"p",
".",
"login_url",
"is",
"None",
"or",
"p",
".",
"login_url",
"==",
"''",
":",
"raise",
"httpbakery",
".",
"InteractionError",
"(",
"'no login-url field found in agent interaction method'",
")",
"agent",
"=",
"self",
".",
"_find_agent",
"(",
"location",
")",
"if",
"not",
"location",
".",
"endswith",
"(",
"'/'",
")",
":",
"location",
"+=",
"'/'",
"login_url",
"=",
"urljoin",
"(",
"location",
",",
"p",
".",
"login_url",
")",
"resp",
"=",
"requests",
".",
"get",
"(",
"login_url",
",",
"params",
"=",
"{",
"'username'",
":",
"agent",
".",
"username",
",",
"'public-key'",
":",
"str",
"(",
"self",
".",
"_auth_info",
".",
"key",
".",
"public_key",
")",
"}",
",",
"auth",
"=",
"client",
".",
"auth",
"(",
")",
")",
"if",
"resp",
".",
"status_code",
"!=",
"200",
":",
"raise",
"httpbakery",
".",
"InteractionError",
"(",
"'cannot acquire agent macaroon: {} {}'",
".",
"format",
"(",
"resp",
".",
"status_code",
",",
"resp",
".",
"text",
")",
")",
"m",
"=",
"resp",
".",
"json",
"(",
")",
".",
"get",
"(",
"'macaroon'",
")",
"if",
"m",
"is",
"None",
":",
"raise",
"httpbakery",
".",
"InteractionError",
"(",
"'no macaroon in response'",
")",
"m",
"=",
"bakery",
".",
"Macaroon",
".",
"from_dict",
"(",
"m",
")",
"ms",
"=",
"bakery",
".",
"discharge_all",
"(",
"m",
",",
"None",
",",
"self",
".",
"_auth_info",
".",
"key",
")",
"b",
"=",
"bytearray",
"(",
")",
"for",
"m",
"in",
"ms",
":",
"b",
".",
"extend",
"(",
"utils",
".",
"b64decode",
"(",
"m",
".",
"serialize",
"(",
")",
")",
")",
"return",
"httpbakery",
".",
"DischargeToken",
"(",
"kind",
"=",
"'agent'",
",",
"value",
"=",
"bytes",
"(",
"b",
")",
")"
] | Implement Interactor.interact by obtaining obtaining
a macaroon from the discharger, discharging it with the
local private key using the discharged macaroon as
a discharge token | [
"Implement",
"Interactor",
".",
"interact",
"by",
"obtaining",
"obtaining",
"a",
"macaroon",
"from",
"the",
"discharger",
"discharging",
"it",
"with",
"the",
"local",
"private",
"key",
"using",
"the",
"discharged",
"macaroon",
"as",
"a",
"discharge",
"token"
] | 63ce1ef1dabe816eb8aaec48fbb46761c34ddf77 | https://github.com/go-macaroon-bakery/py-macaroon-bakery/blob/63ce1ef1dabe816eb8aaec48fbb46761c34ddf77/macaroonbakery/httpbakery/agent/_agent.py#L98-L130 | train |
go-macaroon-bakery/py-macaroon-bakery | macaroonbakery/httpbakery/agent/_agent.py | AgentInteractor.legacy_interact | def legacy_interact(self, client, location, visit_url):
'''Implement LegacyInteractor.legacy_interact by obtaining
the discharge macaroon using the client's private key
'''
agent = self._find_agent(location)
# Shallow-copy the client so that we don't unexpectedly side-effect
# it by changing the key. Another possibility might be to
# set up agent authentication differently, in such a way that
# we're sure that client.key is the same as self._auth_info.key.
client = copy.copy(client)
client.key = self._auth_info.key
resp = client.request(
method='POST',
url=visit_url,
json={
'username': agent.username,
'public_key': str(self._auth_info.key.public_key),
},
)
if resp.status_code != 200:
raise httpbakery.InteractionError(
'cannot acquire agent macaroon from {}: {} (response body: {!r})'.format(visit_url, resp.status_code, resp.text))
if not resp.json().get('agent_login', False):
raise httpbakery.InteractionError('agent login failed') | python | def legacy_interact(self, client, location, visit_url):
'''Implement LegacyInteractor.legacy_interact by obtaining
the discharge macaroon using the client's private key
'''
agent = self._find_agent(location)
# Shallow-copy the client so that we don't unexpectedly side-effect
# it by changing the key. Another possibility might be to
# set up agent authentication differently, in such a way that
# we're sure that client.key is the same as self._auth_info.key.
client = copy.copy(client)
client.key = self._auth_info.key
resp = client.request(
method='POST',
url=visit_url,
json={
'username': agent.username,
'public_key': str(self._auth_info.key.public_key),
},
)
if resp.status_code != 200:
raise httpbakery.InteractionError(
'cannot acquire agent macaroon from {}: {} (response body: {!r})'.format(visit_url, resp.status_code, resp.text))
if not resp.json().get('agent_login', False):
raise httpbakery.InteractionError('agent login failed') | [
"def",
"legacy_interact",
"(",
"self",
",",
"client",
",",
"location",
",",
"visit_url",
")",
":",
"agent",
"=",
"self",
".",
"_find_agent",
"(",
"location",
")",
"# Shallow-copy the client so that we don't unexpectedly side-effect",
"# it by changing the key. Another possibility might be to",
"# set up agent authentication differently, in such a way that",
"# we're sure that client.key is the same as self._auth_info.key.",
"client",
"=",
"copy",
".",
"copy",
"(",
"client",
")",
"client",
".",
"key",
"=",
"self",
".",
"_auth_info",
".",
"key",
"resp",
"=",
"client",
".",
"request",
"(",
"method",
"=",
"'POST'",
",",
"url",
"=",
"visit_url",
",",
"json",
"=",
"{",
"'username'",
":",
"agent",
".",
"username",
",",
"'public_key'",
":",
"str",
"(",
"self",
".",
"_auth_info",
".",
"key",
".",
"public_key",
")",
",",
"}",
",",
")",
"if",
"resp",
".",
"status_code",
"!=",
"200",
":",
"raise",
"httpbakery",
".",
"InteractionError",
"(",
"'cannot acquire agent macaroon from {}: {} (response body: {!r})'",
".",
"format",
"(",
"visit_url",
",",
"resp",
".",
"status_code",
",",
"resp",
".",
"text",
")",
")",
"if",
"not",
"resp",
".",
"json",
"(",
")",
".",
"get",
"(",
"'agent_login'",
",",
"False",
")",
":",
"raise",
"httpbakery",
".",
"InteractionError",
"(",
"'agent login failed'",
")"
] | Implement LegacyInteractor.legacy_interact by obtaining
the discharge macaroon using the client's private key | [
"Implement",
"LegacyInteractor",
".",
"legacy_interact",
"by",
"obtaining",
"the",
"discharge",
"macaroon",
"using",
"the",
"client",
"s",
"private",
"key"
] | 63ce1ef1dabe816eb8aaec48fbb46761c34ddf77 | https://github.com/go-macaroon-bakery/py-macaroon-bakery/blob/63ce1ef1dabe816eb8aaec48fbb46761c34ddf77/macaroonbakery/httpbakery/agent/_agent.py#L143-L166 | train |
go-macaroon-bakery/py-macaroon-bakery | macaroonbakery/checkers/_time.py | expiry_time | def expiry_time(ns, cavs):
''' Returns the minimum time of any time-before caveats found
in the given list or None if no such caveats were found.
The ns parameter is
:param ns: used to determine the standard namespace prefix - if
the standard namespace is not found, the empty prefix is assumed.
:param cavs: a list of pymacaroons.Caveat
:return: datetime.DateTime or None.
'''
prefix = ns.resolve(STD_NAMESPACE)
time_before_cond = condition_with_prefix(
prefix, COND_TIME_BEFORE)
t = None
for cav in cavs:
if not cav.first_party():
continue
cav = cav.caveat_id_bytes.decode('utf-8')
name, rest = parse_caveat(cav)
if name != time_before_cond:
continue
try:
et = pyrfc3339.parse(rest, utc=True).replace(tzinfo=None)
if t is None or et < t:
t = et
except ValueError:
continue
return t | python | def expiry_time(ns, cavs):
''' Returns the minimum time of any time-before caveats found
in the given list or None if no such caveats were found.
The ns parameter is
:param ns: used to determine the standard namespace prefix - if
the standard namespace is not found, the empty prefix is assumed.
:param cavs: a list of pymacaroons.Caveat
:return: datetime.DateTime or None.
'''
prefix = ns.resolve(STD_NAMESPACE)
time_before_cond = condition_with_prefix(
prefix, COND_TIME_BEFORE)
t = None
for cav in cavs:
if not cav.first_party():
continue
cav = cav.caveat_id_bytes.decode('utf-8')
name, rest = parse_caveat(cav)
if name != time_before_cond:
continue
try:
et = pyrfc3339.parse(rest, utc=True).replace(tzinfo=None)
if t is None or et < t:
t = et
except ValueError:
continue
return t | [
"def",
"expiry_time",
"(",
"ns",
",",
"cavs",
")",
":",
"prefix",
"=",
"ns",
".",
"resolve",
"(",
"STD_NAMESPACE",
")",
"time_before_cond",
"=",
"condition_with_prefix",
"(",
"prefix",
",",
"COND_TIME_BEFORE",
")",
"t",
"=",
"None",
"for",
"cav",
"in",
"cavs",
":",
"if",
"not",
"cav",
".",
"first_party",
"(",
")",
":",
"continue",
"cav",
"=",
"cav",
".",
"caveat_id_bytes",
".",
"decode",
"(",
"'utf-8'",
")",
"name",
",",
"rest",
"=",
"parse_caveat",
"(",
"cav",
")",
"if",
"name",
"!=",
"time_before_cond",
":",
"continue",
"try",
":",
"et",
"=",
"pyrfc3339",
".",
"parse",
"(",
"rest",
",",
"utc",
"=",
"True",
")",
".",
"replace",
"(",
"tzinfo",
"=",
"None",
")",
"if",
"t",
"is",
"None",
"or",
"et",
"<",
"t",
":",
"t",
"=",
"et",
"except",
"ValueError",
":",
"continue",
"return",
"t"
] | Returns the minimum time of any time-before caveats found
in the given list or None if no such caveats were found.
The ns parameter is
:param ns: used to determine the standard namespace prefix - if
the standard namespace is not found, the empty prefix is assumed.
:param cavs: a list of pymacaroons.Caveat
:return: datetime.DateTime or None. | [
"Returns",
"the",
"minimum",
"time",
"of",
"any",
"time",
"-",
"before",
"caveats",
"found",
"in",
"the",
"given",
"list",
"or",
"None",
"if",
"no",
"such",
"caveats",
"were",
"found",
"."
] | 63ce1ef1dabe816eb8aaec48fbb46761c34ddf77 | https://github.com/go-macaroon-bakery/py-macaroon-bakery/blob/63ce1ef1dabe816eb8aaec48fbb46761c34ddf77/macaroonbakery/checkers/_time.py#L40-L67 | train |
Phyks/libbmc | libbmc/tools.py | replace_all | def replace_all(text, replace_dict):
"""
Replace multiple strings in a text.
.. note::
Replacements are made successively, without any warranty on the order \
in which they are made.
:param text: Text to replace in.
:param replace_dict: Dictionary mapping strings to replace with their \
substitution.
:returns: Text after replacements.
>>> replace_all("foo bar foo thing", {"foo": "oof", "bar": "rab"})
'oof rab oof thing'
"""
for i, j in replace_dict.items():
text = text.replace(i, j)
return text | python | def replace_all(text, replace_dict):
"""
Replace multiple strings in a text.
.. note::
Replacements are made successively, without any warranty on the order \
in which they are made.
:param text: Text to replace in.
:param replace_dict: Dictionary mapping strings to replace with their \
substitution.
:returns: Text after replacements.
>>> replace_all("foo bar foo thing", {"foo": "oof", "bar": "rab"})
'oof rab oof thing'
"""
for i, j in replace_dict.items():
text = text.replace(i, j)
return text | [
"def",
"replace_all",
"(",
"text",
",",
"replace_dict",
")",
":",
"for",
"i",
",",
"j",
"in",
"replace_dict",
".",
"items",
"(",
")",
":",
"text",
"=",
"text",
".",
"replace",
"(",
"i",
",",
"j",
")",
"return",
"text"
] | Replace multiple strings in a text.
.. note::
Replacements are made successively, without any warranty on the order \
in which they are made.
:param text: Text to replace in.
:param replace_dict: Dictionary mapping strings to replace with their \
substitution.
:returns: Text after replacements.
>>> replace_all("foo bar foo thing", {"foo": "oof", "bar": "rab"})
'oof rab oof thing' | [
"Replace",
"multiple",
"strings",
"in",
"a",
"text",
"."
] | 9ef1a29d2514157d1edd6c13ecbd61b07ae9315e | https://github.com/Phyks/libbmc/blob/9ef1a29d2514157d1edd6c13ecbd61b07ae9315e/libbmc/tools.py#L16-L36 | train |
Phyks/libbmc | libbmc/tools.py | map_or_apply | def map_or_apply(function, param):
"""
Map the function on ``param``, or apply it, depending whether ``param`` \
is a list or an item.
:param function: The function to apply.
:param param: The parameter to feed the function with (list or item).
:returns: The computed value or ``None``.
"""
try:
if isinstance(param, list):
return [next(iter(function(i))) for i in param]
else:
return next(iter(function(param)))
except StopIteration:
return None | python | def map_or_apply(function, param):
"""
Map the function on ``param``, or apply it, depending whether ``param`` \
is a list or an item.
:param function: The function to apply.
:param param: The parameter to feed the function with (list or item).
:returns: The computed value or ``None``.
"""
try:
if isinstance(param, list):
return [next(iter(function(i))) for i in param]
else:
return next(iter(function(param)))
except StopIteration:
return None | [
"def",
"map_or_apply",
"(",
"function",
",",
"param",
")",
":",
"try",
":",
"if",
"isinstance",
"(",
"param",
",",
"list",
")",
":",
"return",
"[",
"next",
"(",
"iter",
"(",
"function",
"(",
"i",
")",
")",
")",
"for",
"i",
"in",
"param",
"]",
"else",
":",
"return",
"next",
"(",
"iter",
"(",
"function",
"(",
"param",
")",
")",
")",
"except",
"StopIteration",
":",
"return",
"None"
] | Map the function on ``param``, or apply it, depending whether ``param`` \
is a list or an item.
:param function: The function to apply.
:param param: The parameter to feed the function with (list or item).
:returns: The computed value or ``None``. | [
"Map",
"the",
"function",
"on",
"param",
"or",
"apply",
"it",
"depending",
"whether",
"param",
"\\",
"is",
"a",
"list",
"or",
"an",
"item",
"."
] | 9ef1a29d2514157d1edd6c13ecbd61b07ae9315e | https://github.com/Phyks/libbmc/blob/9ef1a29d2514157d1edd6c13ecbd61b07ae9315e/libbmc/tools.py#L39-L54 | train |
Phyks/libbmc | libbmc/tools.py | batch | def batch(iterable, size):
"""
Get items from a sequence a batch at a time.
.. note:
Adapted from
https://code.activestate.com/recipes/303279-getting-items-in-batches/.
.. note:
All batches must be exhausted immediately.
:params iterable: An iterable to get batches from.
:params size: Size of the batches.
:returns: A new batch of the given size at each time.
>>> [list(i) for i in batch([1, 2, 3, 4, 5], 2)]
[[1, 2], [3, 4], [5]]
"""
item = iter(iterable)
while True:
batch_iterator = islice(item, size)
try:
yield chain([next(batch_iterator)], batch_iterator)
except StopIteration:
return | python | def batch(iterable, size):
"""
Get items from a sequence a batch at a time.
.. note:
Adapted from
https://code.activestate.com/recipes/303279-getting-items-in-batches/.
.. note:
All batches must be exhausted immediately.
:params iterable: An iterable to get batches from.
:params size: Size of the batches.
:returns: A new batch of the given size at each time.
>>> [list(i) for i in batch([1, 2, 3, 4, 5], 2)]
[[1, 2], [3, 4], [5]]
"""
item = iter(iterable)
while True:
batch_iterator = islice(item, size)
try:
yield chain([next(batch_iterator)], batch_iterator)
except StopIteration:
return | [
"def",
"batch",
"(",
"iterable",
",",
"size",
")",
":",
"item",
"=",
"iter",
"(",
"iterable",
")",
"while",
"True",
":",
"batch_iterator",
"=",
"islice",
"(",
"item",
",",
"size",
")",
"try",
":",
"yield",
"chain",
"(",
"[",
"next",
"(",
"batch_iterator",
")",
"]",
",",
"batch_iterator",
")",
"except",
"StopIteration",
":",
"return"
] | Get items from a sequence a batch at a time.
.. note:
Adapted from
https://code.activestate.com/recipes/303279-getting-items-in-batches/.
.. note:
All batches must be exhausted immediately.
:params iterable: An iterable to get batches from.
:params size: Size of the batches.
:returns: A new batch of the given size at each time.
>>> [list(i) for i in batch([1, 2, 3, 4, 5], 2)]
[[1, 2], [3, 4], [5]] | [
"Get",
"items",
"from",
"a",
"sequence",
"a",
"batch",
"at",
"a",
"time",
"."
] | 9ef1a29d2514157d1edd6c13ecbd61b07ae9315e | https://github.com/Phyks/libbmc/blob/9ef1a29d2514157d1edd6c13ecbd61b07ae9315e/libbmc/tools.py#L87-L114 | train |
Phyks/libbmc | libbmc/tools.py | slugify | def slugify(value):
"""
Normalizes string, converts to lowercase, removes non-alpha characters,
and converts spaces to hyphens to have nice filenames.
From Django's "django/template/defaultfilters.py".
>>> slugify("El pingüino Wenceslao hizo kilómetros bajo exhaustiva lluvia y frío, añoraba a su querido cachorro. ortez ce vieux whisky au juge blond qui fume sur son île intérieure, à Γαζέες καὶ μυρτιὲς δὲν θὰ βρῶ πιὰ στὸ χρυσαφὶ ξέφωτο いろはにほへとちりぬるを Pchnąć w tę łódź jeża lub ośm skrzyń fig กว่าบรรดาฝูงสัตว์เดรัจฉาน")
'El_pinguino_Wenceslao_hizo_kilometros_bajo_exhaustiva_lluvia_y_frio_anoraba_a_su_querido_cachorro_ortez_ce_vieux_whisky_au_juge_blond_qui_fume_sur_son_ile_interieure_a_Pchnac_w_te_odz_jeza_lub_osm_skrzyn_fig'
"""
try:
unicode_type = unicode
except NameError:
unicode_type = str
if not isinstance(value, unicode_type):
value = unicode_type(value)
value = (unicodedata.normalize('NFKD', value).
encode('ascii', 'ignore').decode('ascii'))
value = unicode_type(_SLUGIFY_STRIP_RE.sub('', value).strip())
return _SLUGIFY_HYPHENATE_RE.sub('_', value) | python | def slugify(value):
"""
Normalizes string, converts to lowercase, removes non-alpha characters,
and converts spaces to hyphens to have nice filenames.
From Django's "django/template/defaultfilters.py".
>>> slugify("El pingüino Wenceslao hizo kilómetros bajo exhaustiva lluvia y frío, añoraba a su querido cachorro. ortez ce vieux whisky au juge blond qui fume sur son île intérieure, à Γαζέες καὶ μυρτιὲς δὲν θὰ βρῶ πιὰ στὸ χρυσαφὶ ξέφωτο いろはにほへとちりぬるを Pchnąć w tę łódź jeża lub ośm skrzyń fig กว่าบรรดาฝูงสัตว์เดรัจฉาน")
'El_pinguino_Wenceslao_hizo_kilometros_bajo_exhaustiva_lluvia_y_frio_anoraba_a_su_querido_cachorro_ortez_ce_vieux_whisky_au_juge_blond_qui_fume_sur_son_ile_interieure_a_Pchnac_w_te_odz_jeza_lub_osm_skrzyn_fig'
"""
try:
unicode_type = unicode
except NameError:
unicode_type = str
if not isinstance(value, unicode_type):
value = unicode_type(value)
value = (unicodedata.normalize('NFKD', value).
encode('ascii', 'ignore').decode('ascii'))
value = unicode_type(_SLUGIFY_STRIP_RE.sub('', value).strip())
return _SLUGIFY_HYPHENATE_RE.sub('_', value) | [
"def",
"slugify",
"(",
"value",
")",
":",
"try",
":",
"unicode_type",
"=",
"unicode",
"except",
"NameError",
":",
"unicode_type",
"=",
"str",
"if",
"not",
"isinstance",
"(",
"value",
",",
"unicode_type",
")",
":",
"value",
"=",
"unicode_type",
"(",
"value",
")",
"value",
"=",
"(",
"unicodedata",
".",
"normalize",
"(",
"'NFKD'",
",",
"value",
")",
".",
"encode",
"(",
"'ascii'",
",",
"'ignore'",
")",
".",
"decode",
"(",
"'ascii'",
")",
")",
"value",
"=",
"unicode_type",
"(",
"_SLUGIFY_STRIP_RE",
".",
"sub",
"(",
"''",
",",
"value",
")",
".",
"strip",
"(",
")",
")",
"return",
"_SLUGIFY_HYPHENATE_RE",
".",
"sub",
"(",
"'_'",
",",
"value",
")"
] | Normalizes string, converts to lowercase, removes non-alpha characters,
and converts spaces to hyphens to have nice filenames.
From Django's "django/template/defaultfilters.py".
>>> slugify("El pingüino Wenceslao hizo kilómetros bajo exhaustiva lluvia y frío, añoraba a su querido cachorro. ortez ce vieux whisky au juge blond qui fume sur son île intérieure, à Γαζέες καὶ μυρτιὲς δὲν θὰ βρῶ πιὰ στὸ χρυσαφὶ ξέφωτο いろはにほへとちりぬるを Pchnąć w tę łódź jeża lub ośm skrzyń fig กว่าบรรดาฝูงสัตว์เดรัจฉาน")
'El_pinguino_Wenceslao_hizo_kilometros_bajo_exhaustiva_lluvia_y_frio_anoraba_a_su_querido_cachorro_ortez_ce_vieux_whisky_au_juge_blond_qui_fume_sur_son_ile_interieure_a_Pchnac_w_te_odz_jeza_lub_osm_skrzyn_fig' | [
"Normalizes",
"string",
"converts",
"to",
"lowercase",
"removes",
"non",
"-",
"alpha",
"characters",
"and",
"converts",
"spaces",
"to",
"hyphens",
"to",
"have",
"nice",
"filenames",
"."
] | 9ef1a29d2514157d1edd6c13ecbd61b07ae9315e | https://github.com/Phyks/libbmc/blob/9ef1a29d2514157d1edd6c13ecbd61b07ae9315e/libbmc/tools.py#L131-L150 | train |
Phyks/libbmc | libbmc/citations/repositories/arxiv.py | get_plaintext_citations | def get_plaintext_citations(arxiv_id):
"""
Get the citations of a given preprint, in plain text.
.. note::
Bulk download of sources from arXiv is not permitted by their API. \
You should have a look at http://arxiv.org/help/bulk_data_s3.
:param arxiv_id: The arXiv id (e.g. ``1401.2910`` or ``1401.2910v1``) in \
a canonical form.
:returns: A list of cleaned plaintext citations.
"""
plaintext_citations = []
# Get the list of bbl files for this preprint
bbl_files = arxiv.get_bbl(arxiv_id)
for bbl_file in bbl_files:
# Fetch the cited DOIs for each of the bbl files
plaintext_citations.extend(bbl.get_plaintext_citations(bbl_file))
return plaintext_citations | python | def get_plaintext_citations(arxiv_id):
"""
Get the citations of a given preprint, in plain text.
.. note::
Bulk download of sources from arXiv is not permitted by their API. \
You should have a look at http://arxiv.org/help/bulk_data_s3.
:param arxiv_id: The arXiv id (e.g. ``1401.2910`` or ``1401.2910v1``) in \
a canonical form.
:returns: A list of cleaned plaintext citations.
"""
plaintext_citations = []
# Get the list of bbl files for this preprint
bbl_files = arxiv.get_bbl(arxiv_id)
for bbl_file in bbl_files:
# Fetch the cited DOIs for each of the bbl files
plaintext_citations.extend(bbl.get_plaintext_citations(bbl_file))
return plaintext_citations | [
"def",
"get_plaintext_citations",
"(",
"arxiv_id",
")",
":",
"plaintext_citations",
"=",
"[",
"]",
"# Get the list of bbl files for this preprint",
"bbl_files",
"=",
"arxiv",
".",
"get_bbl",
"(",
"arxiv_id",
")",
"for",
"bbl_file",
"in",
"bbl_files",
":",
"# Fetch the cited DOIs for each of the bbl files",
"plaintext_citations",
".",
"extend",
"(",
"bbl",
".",
"get_plaintext_citations",
"(",
"bbl_file",
")",
")",
"return",
"plaintext_citations"
] | Get the citations of a given preprint, in plain text.
.. note::
Bulk download of sources from arXiv is not permitted by their API. \
You should have a look at http://arxiv.org/help/bulk_data_s3.
:param arxiv_id: The arXiv id (e.g. ``1401.2910`` or ``1401.2910v1``) in \
a canonical form.
:returns: A list of cleaned plaintext citations. | [
"Get",
"the",
"citations",
"of",
"a",
"given",
"preprint",
"in",
"plain",
"text",
"."
] | 9ef1a29d2514157d1edd6c13ecbd61b07ae9315e | https://github.com/Phyks/libbmc/blob/9ef1a29d2514157d1edd6c13ecbd61b07ae9315e/libbmc/citations/repositories/arxiv.py#L9-L28 | train |
Phyks/libbmc | libbmc/citations/repositories/arxiv.py | get_cited_dois | def get_cited_dois(arxiv_id):
"""
Get the DOIs of the papers cited in a .bbl file.
.. note::
Bulk download of sources from arXiv is not permitted by their API. \
You should have a look at http://arxiv.org/help/bulk_data_s3.
:param arxiv_id: The arXiv id (e.g. ``1401.2910`` or ``1401.2910v1``) in \
a canonical form.
:returns: A dict of cleaned plaintext citations and their associated DOI.
"""
dois = {}
# Get the list of bbl files for this preprint
bbl_files = arxiv.get_bbl(arxiv_id)
for bbl_file in bbl_files:
# Fetch the cited DOIs for each of the bbl files
dois.update(bbl.get_cited_dois(bbl_file))
return dois | python | def get_cited_dois(arxiv_id):
"""
Get the DOIs of the papers cited in a .bbl file.
.. note::
Bulk download of sources from arXiv is not permitted by their API. \
You should have a look at http://arxiv.org/help/bulk_data_s3.
:param arxiv_id: The arXiv id (e.g. ``1401.2910`` or ``1401.2910v1``) in \
a canonical form.
:returns: A dict of cleaned plaintext citations and their associated DOI.
"""
dois = {}
# Get the list of bbl files for this preprint
bbl_files = arxiv.get_bbl(arxiv_id)
for bbl_file in bbl_files:
# Fetch the cited DOIs for each of the bbl files
dois.update(bbl.get_cited_dois(bbl_file))
return dois | [
"def",
"get_cited_dois",
"(",
"arxiv_id",
")",
":",
"dois",
"=",
"{",
"}",
"# Get the list of bbl files for this preprint",
"bbl_files",
"=",
"arxiv",
".",
"get_bbl",
"(",
"arxiv_id",
")",
"for",
"bbl_file",
"in",
"bbl_files",
":",
"# Fetch the cited DOIs for each of the bbl files",
"dois",
".",
"update",
"(",
"bbl",
".",
"get_cited_dois",
"(",
"bbl_file",
")",
")",
"return",
"dois"
] | Get the DOIs of the papers cited in a .bbl file.
.. note::
Bulk download of sources from arXiv is not permitted by their API. \
You should have a look at http://arxiv.org/help/bulk_data_s3.
:param arxiv_id: The arXiv id (e.g. ``1401.2910`` or ``1401.2910v1``) in \
a canonical form.
:returns: A dict of cleaned plaintext citations and their associated DOI. | [
"Get",
"the",
"DOIs",
"of",
"the",
"papers",
"cited",
"in",
"a",
".",
"bbl",
"file",
"."
] | 9ef1a29d2514157d1edd6c13ecbd61b07ae9315e | https://github.com/Phyks/libbmc/blob/9ef1a29d2514157d1edd6c13ecbd61b07ae9315e/libbmc/citations/repositories/arxiv.py#L31-L50 | train |
eonpatapon/contrail-api-cli | contrail_api_cli/main.py | get_subcommand_kwargs | def get_subcommand_kwargs(mgr, name, namespace):
"""Get subcommand options from global parsed
arguments.
"""
subcmd = mgr.get(name)
subcmd_kwargs = {}
for opt in list(subcmd.args.values()) + list(subcmd.options.values()):
if hasattr(namespace, opt.dest):
subcmd_kwargs[opt.dest] = getattr(namespace, opt.dest)
return (subcmd, subcmd_kwargs) | python | def get_subcommand_kwargs(mgr, name, namespace):
"""Get subcommand options from global parsed
arguments.
"""
subcmd = mgr.get(name)
subcmd_kwargs = {}
for opt in list(subcmd.args.values()) + list(subcmd.options.values()):
if hasattr(namespace, opt.dest):
subcmd_kwargs[opt.dest] = getattr(namespace, opt.dest)
return (subcmd, subcmd_kwargs) | [
"def",
"get_subcommand_kwargs",
"(",
"mgr",
",",
"name",
",",
"namespace",
")",
":",
"subcmd",
"=",
"mgr",
".",
"get",
"(",
"name",
")",
"subcmd_kwargs",
"=",
"{",
"}",
"for",
"opt",
"in",
"list",
"(",
"subcmd",
".",
"args",
".",
"values",
"(",
")",
")",
"+",
"list",
"(",
"subcmd",
".",
"options",
".",
"values",
"(",
")",
")",
":",
"if",
"hasattr",
"(",
"namespace",
",",
"opt",
".",
"dest",
")",
":",
"subcmd_kwargs",
"[",
"opt",
".",
"dest",
"]",
"=",
"getattr",
"(",
"namespace",
",",
"opt",
".",
"dest",
")",
"return",
"(",
"subcmd",
",",
"subcmd_kwargs",
")"
] | Get subcommand options from global parsed
arguments. | [
"Get",
"subcommand",
"options",
"from",
"global",
"parsed",
"arguments",
"."
] | 1571bf523fa054f3d6bf83dba43a224fea173a73 | https://github.com/eonpatapon/contrail-api-cli/blob/1571bf523fa054f3d6bf83dba43a224fea173a73/contrail_api_cli/main.py#L23-L32 | train |
Phyks/libbmc | libbmc/citations/plaintext.py | get_plaintext_citations | def get_plaintext_citations(file):
"""
Parse a plaintext file to get a clean list of plaintext citations. The \
file should have one citation per line.
:param file: Either the path to the plaintext file or the content of a \
plaintext file.
:returns: A list of cleaned plaintext citations.
"""
# Handle path or content
if os.path.isfile(file):
with open(file, 'r') as fh:
content = fh.readlines()
else:
content = file.splitlines()
# Clean every line to have plaintext
cleaned_citations = [tools.clean_whitespaces(line) for line in content]
return cleaned_citations | python | def get_plaintext_citations(file):
"""
Parse a plaintext file to get a clean list of plaintext citations. The \
file should have one citation per line.
:param file: Either the path to the plaintext file or the content of a \
plaintext file.
:returns: A list of cleaned plaintext citations.
"""
# Handle path or content
if os.path.isfile(file):
with open(file, 'r') as fh:
content = fh.readlines()
else:
content = file.splitlines()
# Clean every line to have plaintext
cleaned_citations = [tools.clean_whitespaces(line) for line in content]
return cleaned_citations | [
"def",
"get_plaintext_citations",
"(",
"file",
")",
":",
"# Handle path or content",
"if",
"os",
".",
"path",
".",
"isfile",
"(",
"file",
")",
":",
"with",
"open",
"(",
"file",
",",
"'r'",
")",
"as",
"fh",
":",
"content",
"=",
"fh",
".",
"readlines",
"(",
")",
"else",
":",
"content",
"=",
"file",
".",
"splitlines",
"(",
")",
"# Clean every line to have plaintext",
"cleaned_citations",
"=",
"[",
"tools",
".",
"clean_whitespaces",
"(",
"line",
")",
"for",
"line",
"in",
"content",
"]",
"return",
"cleaned_citations"
] | Parse a plaintext file to get a clean list of plaintext citations. The \
file should have one citation per line.
:param file: Either the path to the plaintext file or the content of a \
plaintext file.
:returns: A list of cleaned plaintext citations. | [
"Parse",
"a",
"plaintext",
"file",
"to",
"get",
"a",
"clean",
"list",
"of",
"plaintext",
"citations",
".",
"The",
"\\",
"file",
"should",
"have",
"one",
"citation",
"per",
"line",
"."
] | 9ef1a29d2514157d1edd6c13ecbd61b07ae9315e | https://github.com/Phyks/libbmc/blob/9ef1a29d2514157d1edd6c13ecbd61b07ae9315e/libbmc/citations/plaintext.py#L20-L37 | train |
Phyks/libbmc | libbmc/citations/plaintext.py | get_cited_dois | def get_cited_dois(file):
"""
Get the DOIs of the papers cited in a plaintext file. The file should \
have one citation per line.
.. note::
This function is also used as a backend tool by most of the others \
citations processors, to factorize the code.
:param file: Either the path to the plaintext file or the content of a \
plaintext file. It can also be a parsed list of plaintext \
citations and, in this case, no preprocessing is done.
:returns: A dict of cleaned plaintext citations and their associated DOI.
"""
# If file is not a pre-processed list of plaintext citations
if not isinstance(file, list):
# It is either a path to a plaintext file or the content of a plaintext
# file, we need some pre-processing to get a list of citations.
plaintext_citations = get_plaintext_citations(file)
else:
# Else, we passed a list of plaintext citations.
plaintext_citations = file
dois = {}
crossref_queue = []
# Try to get the DOI directly from the citation
for citation in plaintext_citations[:]:
# Some citations already contain a DOI so try to match it directly
matched_dois = doi.extract_from_text(citation)
if len(matched_dois) > 0:
# Add the DOI and go on
dois[citation] = next(iter(matched_dois))
continue
# Same thing for arXiv id
matched_arxiv = arxiv.extract_from_text(citation)
if len(matched_arxiv) > 0:
# Add the associated DOI and go on
dois[citation] = arxiv.to_doi(next(iter(matched_arxiv)))
continue
# If no match found, stack it for next step
# Note to remove URLs in the citation as the plaintext citations can
# contain URLs and they are bad for the CrossRef API.
crossref_queue.append(tools.remove_urls(citation))
# Do batch with remaining papers, to prevent from the timeout of CrossRef
for batch in tools.batch(crossref_queue, CROSSREF_MAX_BATCH_SIZE):
batch = [i for i in batch]
try:
# Fetch results from CrossRef
request = requests.post(CROSSREF_LINKS_API_URL, json=batch)
for result in request.json()["results"]:
# Try to get a DOI
try:
dois[result["text"]] = result["doi"]
except KeyError:
# Or set it to None
dois[result["text"]] = None
except (RequestException, ValueError, KeyError):
# If an exception occurred, set all the DOIs to None for the
# current batch
for i in batch:
dois[i] = None
return dois | python | def get_cited_dois(file):
"""
Get the DOIs of the papers cited in a plaintext file. The file should \
have one citation per line.
.. note::
This function is also used as a backend tool by most of the others \
citations processors, to factorize the code.
:param file: Either the path to the plaintext file or the content of a \
plaintext file. It can also be a parsed list of plaintext \
citations and, in this case, no preprocessing is done.
:returns: A dict of cleaned plaintext citations and their associated DOI.
"""
# If file is not a pre-processed list of plaintext citations
if not isinstance(file, list):
# It is either a path to a plaintext file or the content of a plaintext
# file, we need some pre-processing to get a list of citations.
plaintext_citations = get_plaintext_citations(file)
else:
# Else, we passed a list of plaintext citations.
plaintext_citations = file
dois = {}
crossref_queue = []
# Try to get the DOI directly from the citation
for citation in plaintext_citations[:]:
# Some citations already contain a DOI so try to match it directly
matched_dois = doi.extract_from_text(citation)
if len(matched_dois) > 0:
# Add the DOI and go on
dois[citation] = next(iter(matched_dois))
continue
# Same thing for arXiv id
matched_arxiv = arxiv.extract_from_text(citation)
if len(matched_arxiv) > 0:
# Add the associated DOI and go on
dois[citation] = arxiv.to_doi(next(iter(matched_arxiv)))
continue
# If no match found, stack it for next step
# Note to remove URLs in the citation as the plaintext citations can
# contain URLs and they are bad for the CrossRef API.
crossref_queue.append(tools.remove_urls(citation))
# Do batch with remaining papers, to prevent from the timeout of CrossRef
for batch in tools.batch(crossref_queue, CROSSREF_MAX_BATCH_SIZE):
batch = [i for i in batch]
try:
# Fetch results from CrossRef
request = requests.post(CROSSREF_LINKS_API_URL, json=batch)
for result in request.json()["results"]:
# Try to get a DOI
try:
dois[result["text"]] = result["doi"]
except KeyError:
# Or set it to None
dois[result["text"]] = None
except (RequestException, ValueError, KeyError):
# If an exception occurred, set all the DOIs to None for the
# current batch
for i in batch:
dois[i] = None
return dois | [
"def",
"get_cited_dois",
"(",
"file",
")",
":",
"# If file is not a pre-processed list of plaintext citations",
"if",
"not",
"isinstance",
"(",
"file",
",",
"list",
")",
":",
"# It is either a path to a plaintext file or the content of a plaintext",
"# file, we need some pre-processing to get a list of citations.",
"plaintext_citations",
"=",
"get_plaintext_citations",
"(",
"file",
")",
"else",
":",
"# Else, we passed a list of plaintext citations.",
"plaintext_citations",
"=",
"file",
"dois",
"=",
"{",
"}",
"crossref_queue",
"=",
"[",
"]",
"# Try to get the DOI directly from the citation",
"for",
"citation",
"in",
"plaintext_citations",
"[",
":",
"]",
":",
"# Some citations already contain a DOI so try to match it directly",
"matched_dois",
"=",
"doi",
".",
"extract_from_text",
"(",
"citation",
")",
"if",
"len",
"(",
"matched_dois",
")",
">",
"0",
":",
"# Add the DOI and go on",
"dois",
"[",
"citation",
"]",
"=",
"next",
"(",
"iter",
"(",
"matched_dois",
")",
")",
"continue",
"# Same thing for arXiv id",
"matched_arxiv",
"=",
"arxiv",
".",
"extract_from_text",
"(",
"citation",
")",
"if",
"len",
"(",
"matched_arxiv",
")",
">",
"0",
":",
"# Add the associated DOI and go on",
"dois",
"[",
"citation",
"]",
"=",
"arxiv",
".",
"to_doi",
"(",
"next",
"(",
"iter",
"(",
"matched_arxiv",
")",
")",
")",
"continue",
"# If no match found, stack it for next step",
"# Note to remove URLs in the citation as the plaintext citations can",
"# contain URLs and they are bad for the CrossRef API.",
"crossref_queue",
".",
"append",
"(",
"tools",
".",
"remove_urls",
"(",
"citation",
")",
")",
"# Do batch with remaining papers, to prevent from the timeout of CrossRef",
"for",
"batch",
"in",
"tools",
".",
"batch",
"(",
"crossref_queue",
",",
"CROSSREF_MAX_BATCH_SIZE",
")",
":",
"batch",
"=",
"[",
"i",
"for",
"i",
"in",
"batch",
"]",
"try",
":",
"# Fetch results from CrossRef",
"request",
"=",
"requests",
".",
"post",
"(",
"CROSSREF_LINKS_API_URL",
",",
"json",
"=",
"batch",
")",
"for",
"result",
"in",
"request",
".",
"json",
"(",
")",
"[",
"\"results\"",
"]",
":",
"# Try to get a DOI",
"try",
":",
"dois",
"[",
"result",
"[",
"\"text\"",
"]",
"]",
"=",
"result",
"[",
"\"doi\"",
"]",
"except",
"KeyError",
":",
"# Or set it to None",
"dois",
"[",
"result",
"[",
"\"text\"",
"]",
"]",
"=",
"None",
"except",
"(",
"RequestException",
",",
"ValueError",
",",
"KeyError",
")",
":",
"# If an exception occurred, set all the DOIs to None for the",
"# current batch",
"for",
"i",
"in",
"batch",
":",
"dois",
"[",
"i",
"]",
"=",
"None",
"return",
"dois"
] | Get the DOIs of the papers cited in a plaintext file. The file should \
have one citation per line.
.. note::
This function is also used as a backend tool by most of the others \
citations processors, to factorize the code.
:param file: Either the path to the plaintext file or the content of a \
plaintext file. It can also be a parsed list of plaintext \
citations and, in this case, no preprocessing is done.
:returns: A dict of cleaned plaintext citations and their associated DOI. | [
"Get",
"the",
"DOIs",
"of",
"the",
"papers",
"cited",
"in",
"a",
"plaintext",
"file",
".",
"The",
"file",
"should",
"\\",
"have",
"one",
"citation",
"per",
"line",
"."
] | 9ef1a29d2514157d1edd6c13ecbd61b07ae9315e | https://github.com/Phyks/libbmc/blob/9ef1a29d2514157d1edd6c13ecbd61b07ae9315e/libbmc/citations/plaintext.py#L40-L103 | train |
Phyks/libbmc | libbmc/isbn.py | is_valid | def is_valid(isbn_id):
"""
Check that a given string is a valid ISBN.
:param isbn_id: the isbn to be checked.
:returns: boolean indicating whether the isbn is valid or not.
>>> is_valid("978-3-16-148410-0")
True
>>> is_valid("9783161484100")
True
>>> is_valid("9783161484100aa")
False
>>> is_valid("abcd")
False
>>> is_valid("0136091814")
True
>>> is_valid("0136091812")
False
>>> is_valid("9780136091817")
False
>>> is_valid("123456789X")
True
"""
return (
(not isbnlib.notisbn(isbn_id)) and (
isbnlib.get_canonical_isbn(isbn_id) == isbn_id or
isbnlib.mask(isbnlib.get_canonical_isbn(isbn_id)) == isbn_id)
) | python | def is_valid(isbn_id):
"""
Check that a given string is a valid ISBN.
:param isbn_id: the isbn to be checked.
:returns: boolean indicating whether the isbn is valid or not.
>>> is_valid("978-3-16-148410-0")
True
>>> is_valid("9783161484100")
True
>>> is_valid("9783161484100aa")
False
>>> is_valid("abcd")
False
>>> is_valid("0136091814")
True
>>> is_valid("0136091812")
False
>>> is_valid("9780136091817")
False
>>> is_valid("123456789X")
True
"""
return (
(not isbnlib.notisbn(isbn_id)) and (
isbnlib.get_canonical_isbn(isbn_id) == isbn_id or
isbnlib.mask(isbnlib.get_canonical_isbn(isbn_id)) == isbn_id)
) | [
"def",
"is_valid",
"(",
"isbn_id",
")",
":",
"return",
"(",
"(",
"not",
"isbnlib",
".",
"notisbn",
"(",
"isbn_id",
")",
")",
"and",
"(",
"isbnlib",
".",
"get_canonical_isbn",
"(",
"isbn_id",
")",
"==",
"isbn_id",
"or",
"isbnlib",
".",
"mask",
"(",
"isbnlib",
".",
"get_canonical_isbn",
"(",
"isbn_id",
")",
")",
"==",
"isbn_id",
")",
")"
] | Check that a given string is a valid ISBN.
:param isbn_id: the isbn to be checked.
:returns: boolean indicating whether the isbn is valid or not.
>>> is_valid("978-3-16-148410-0")
True
>>> is_valid("9783161484100")
True
>>> is_valid("9783161484100aa")
False
>>> is_valid("abcd")
False
>>> is_valid("0136091814")
True
>>> is_valid("0136091812")
False
>>> is_valid("9780136091817")
False
>>> is_valid("123456789X")
True | [
"Check",
"that",
"a",
"given",
"string",
"is",
"a",
"valid",
"ISBN",
"."
] | 9ef1a29d2514157d1edd6c13ecbd61b07ae9315e | https://github.com/Phyks/libbmc/blob/9ef1a29d2514157d1edd6c13ecbd61b07ae9315e/libbmc/isbn.py#L14-L49 | train |
Phyks/libbmc | libbmc/isbn.py | extract_from_text | def extract_from_text(text):
"""
Extract ISBNs from a text.
:param text: Some text.
:returns: A list of canonical ISBNs found in the text.
>>> extract_from_text("978-3-16-148410-0 9783161484100 9783161484100aa abcd 0136091814 0136091812 9780136091817 123456789X")
['9783161484100', '9783161484100', '9783161484100', '0136091814', '123456789X']
"""
isbns = [isbnlib.get_canonical_isbn(isbn)
for isbn in isbnlib.get_isbnlike(text)]
return [i for i in isbns if i is not None] | python | def extract_from_text(text):
"""
Extract ISBNs from a text.
:param text: Some text.
:returns: A list of canonical ISBNs found in the text.
>>> extract_from_text("978-3-16-148410-0 9783161484100 9783161484100aa abcd 0136091814 0136091812 9780136091817 123456789X")
['9783161484100', '9783161484100', '9783161484100', '0136091814', '123456789X']
"""
isbns = [isbnlib.get_canonical_isbn(isbn)
for isbn in isbnlib.get_isbnlike(text)]
return [i for i in isbns if i is not None] | [
"def",
"extract_from_text",
"(",
"text",
")",
":",
"isbns",
"=",
"[",
"isbnlib",
".",
"get_canonical_isbn",
"(",
"isbn",
")",
"for",
"isbn",
"in",
"isbnlib",
".",
"get_isbnlike",
"(",
"text",
")",
"]",
"return",
"[",
"i",
"for",
"i",
"in",
"isbns",
"if",
"i",
"is",
"not",
"None",
"]"
] | Extract ISBNs from a text.
:param text: Some text.
:returns: A list of canonical ISBNs found in the text.
>>> extract_from_text("978-3-16-148410-0 9783161484100 9783161484100aa abcd 0136091814 0136091812 9780136091817 123456789X")
['9783161484100', '9783161484100', '9783161484100', '0136091814', '123456789X'] | [
"Extract",
"ISBNs",
"from",
"a",
"text",
"."
] | 9ef1a29d2514157d1edd6c13ecbd61b07ae9315e | https://github.com/Phyks/libbmc/blob/9ef1a29d2514157d1edd6c13ecbd61b07ae9315e/libbmc/isbn.py#L52-L64 | train |
Phyks/libbmc | libbmc/isbn.py | get_bibtex | def get_bibtex(isbn_identifier):
"""
Get a BibTeX string for the given ISBN.
:param isbn_identifier: ISBN to fetch BibTeX entry for.
:returns: A BibTeX string or ``None`` if could not fetch it.
>>> get_bibtex('9783161484100')
'@book{9783161484100,\\n title = {Berkeley, Oakland: Albany, Emeryville, Alameda, Kensington},\\n author = {Peekaboo Maps},\\n isbn = {9783161484100},\\n year = {2009},\\n publisher = {Peek A Boo Maps}\\n}'
"""
# Try to find the BibTeX using associated DOIs
bibtex = doi.get_bibtex(to_doi(isbn_identifier))
if bibtex is None:
# In some cases, there are no DOIs for a given ISBN. In this case, try
# to fetch bibtex directly from the ISBN, using a combination of
# Google Books and worldcat.org results.
bibtex = isbnlib.registry.bibformatters['bibtex'](
isbnlib.meta(isbn_identifier, 'default'))
return bibtex | python | def get_bibtex(isbn_identifier):
"""
Get a BibTeX string for the given ISBN.
:param isbn_identifier: ISBN to fetch BibTeX entry for.
:returns: A BibTeX string or ``None`` if could not fetch it.
>>> get_bibtex('9783161484100')
'@book{9783161484100,\\n title = {Berkeley, Oakland: Albany, Emeryville, Alameda, Kensington},\\n author = {Peekaboo Maps},\\n isbn = {9783161484100},\\n year = {2009},\\n publisher = {Peek A Boo Maps}\\n}'
"""
# Try to find the BibTeX using associated DOIs
bibtex = doi.get_bibtex(to_doi(isbn_identifier))
if bibtex is None:
# In some cases, there are no DOIs for a given ISBN. In this case, try
# to fetch bibtex directly from the ISBN, using a combination of
# Google Books and worldcat.org results.
bibtex = isbnlib.registry.bibformatters['bibtex'](
isbnlib.meta(isbn_identifier, 'default'))
return bibtex | [
"def",
"get_bibtex",
"(",
"isbn_identifier",
")",
":",
"# Try to find the BibTeX using associated DOIs",
"bibtex",
"=",
"doi",
".",
"get_bibtex",
"(",
"to_doi",
"(",
"isbn_identifier",
")",
")",
"if",
"bibtex",
"is",
"None",
":",
"# In some cases, there are no DOIs for a given ISBN. In this case, try",
"# to fetch bibtex directly from the ISBN, using a combination of",
"# Google Books and worldcat.org results.",
"bibtex",
"=",
"isbnlib",
".",
"registry",
".",
"bibformatters",
"[",
"'bibtex'",
"]",
"(",
"isbnlib",
".",
"meta",
"(",
"isbn_identifier",
",",
"'default'",
")",
")",
"return",
"bibtex"
] | Get a BibTeX string for the given ISBN.
:param isbn_identifier: ISBN to fetch BibTeX entry for.
:returns: A BibTeX string or ``None`` if could not fetch it.
>>> get_bibtex('9783161484100')
'@book{9783161484100,\\n title = {Berkeley, Oakland: Albany, Emeryville, Alameda, Kensington},\\n author = {Peekaboo Maps},\\n isbn = {9783161484100},\\n year = {2009},\\n publisher = {Peek A Boo Maps}\\n}' | [
"Get",
"a",
"BibTeX",
"string",
"for",
"the",
"given",
"ISBN",
"."
] | 9ef1a29d2514157d1edd6c13ecbd61b07ae9315e | https://github.com/Phyks/libbmc/blob/9ef1a29d2514157d1edd6c13ecbd61b07ae9315e/libbmc/isbn.py#L67-L85 | train |
eonpatapon/contrail-api-cli | contrail_api_cli/parser.py | CommandParser.used_options | def used_options(self):
"""Return options already used in the
command line
rtype: command.Option generator
"""
for option_str in filter(lambda c: c.startswith('-'), self.words):
for option in list(self.cmd.options.values()):
if option_str in option.option_strings:
yield option | python | def used_options(self):
"""Return options already used in the
command line
rtype: command.Option generator
"""
for option_str in filter(lambda c: c.startswith('-'), self.words):
for option in list(self.cmd.options.values()):
if option_str in option.option_strings:
yield option | [
"def",
"used_options",
"(",
"self",
")",
":",
"for",
"option_str",
"in",
"filter",
"(",
"lambda",
"c",
":",
"c",
".",
"startswith",
"(",
"'-'",
")",
",",
"self",
".",
"words",
")",
":",
"for",
"option",
"in",
"list",
"(",
"self",
".",
"cmd",
".",
"options",
".",
"values",
"(",
")",
")",
":",
"if",
"option_str",
"in",
"option",
".",
"option_strings",
":",
"yield",
"option"
] | Return options already used in the
command line
rtype: command.Option generator | [
"Return",
"options",
"already",
"used",
"in",
"the",
"command",
"line"
] | 1571bf523fa054f3d6bf83dba43a224fea173a73 | https://github.com/eonpatapon/contrail-api-cli/blob/1571bf523fa054f3d6bf83dba43a224fea173a73/contrail_api_cli/parser.py#L40-L49 | train |
eonpatapon/contrail-api-cli | contrail_api_cli/parser.py | CommandParser.available_options | def available_options(self):
"""Return options that can be used given
the current cmd line
rtype: command.Option generator
"""
for option in list(self.cmd.options.values()):
if (option.is_multiple or
option not in list(self.used_options)):
yield option | python | def available_options(self):
"""Return options that can be used given
the current cmd line
rtype: command.Option generator
"""
for option in list(self.cmd.options.values()):
if (option.is_multiple or
option not in list(self.used_options)):
yield option | [
"def",
"available_options",
"(",
"self",
")",
":",
"for",
"option",
"in",
"list",
"(",
"self",
".",
"cmd",
".",
"options",
".",
"values",
"(",
")",
")",
":",
"if",
"(",
"option",
".",
"is_multiple",
"or",
"option",
"not",
"in",
"list",
"(",
"self",
".",
"used_options",
")",
")",
":",
"yield",
"option"
] | Return options that can be used given
the current cmd line
rtype: command.Option generator | [
"Return",
"options",
"that",
"can",
"be",
"used",
"given",
"the",
"current",
"cmd",
"line"
] | 1571bf523fa054f3d6bf83dba43a224fea173a73 | https://github.com/eonpatapon/contrail-api-cli/blob/1571bf523fa054f3d6bf83dba43a224fea173a73/contrail_api_cli/parser.py#L52-L61 | train |
eonpatapon/contrail-api-cli | contrail_api_cli/parser.py | CommandParser.used_args | def used_args(self):
"""Return args already used in the
command line
rtype: command.Arg generator
"""
# get all arguments values from the command line
values = []
for idx, c in enumerate(self.words[1:]):
if c.startswith('-'):
continue
option_str = self.words[1:][idx - 1]
option = self.get_option(option_str)
if option is None or not option.need_value:
values.append((c, c == self.document.get_word_before_cursor(WORD=True)))
logger.debug("Found args values %s" % values)
# consume values
for arg in self.cmd.args.values():
if not values:
raise StopIteration
if arg.is_multiple:
values = []
yield arg
elif type(arg.nargs) is int:
for _ in range(arg.nargs):
value = values.pop(0)
# not the current argument
if value[1] is False:
yield arg
if not values:
raise StopIteration | python | def used_args(self):
"""Return args already used in the
command line
rtype: command.Arg generator
"""
# get all arguments values from the command line
values = []
for idx, c in enumerate(self.words[1:]):
if c.startswith('-'):
continue
option_str = self.words[1:][idx - 1]
option = self.get_option(option_str)
if option is None or not option.need_value:
values.append((c, c == self.document.get_word_before_cursor(WORD=True)))
logger.debug("Found args values %s" % values)
# consume values
for arg in self.cmd.args.values():
if not values:
raise StopIteration
if arg.is_multiple:
values = []
yield arg
elif type(arg.nargs) is int:
for _ in range(arg.nargs):
value = values.pop(0)
# not the current argument
if value[1] is False:
yield arg
if not values:
raise StopIteration | [
"def",
"used_args",
"(",
"self",
")",
":",
"# get all arguments values from the command line",
"values",
"=",
"[",
"]",
"for",
"idx",
",",
"c",
"in",
"enumerate",
"(",
"self",
".",
"words",
"[",
"1",
":",
"]",
")",
":",
"if",
"c",
".",
"startswith",
"(",
"'-'",
")",
":",
"continue",
"option_str",
"=",
"self",
".",
"words",
"[",
"1",
":",
"]",
"[",
"idx",
"-",
"1",
"]",
"option",
"=",
"self",
".",
"get_option",
"(",
"option_str",
")",
"if",
"option",
"is",
"None",
"or",
"not",
"option",
".",
"need_value",
":",
"values",
".",
"append",
"(",
"(",
"c",
",",
"c",
"==",
"self",
".",
"document",
".",
"get_word_before_cursor",
"(",
"WORD",
"=",
"True",
")",
")",
")",
"logger",
".",
"debug",
"(",
"\"Found args values %s\"",
"%",
"values",
")",
"# consume values",
"for",
"arg",
"in",
"self",
".",
"cmd",
".",
"args",
".",
"values",
"(",
")",
":",
"if",
"not",
"values",
":",
"raise",
"StopIteration",
"if",
"arg",
".",
"is_multiple",
":",
"values",
"=",
"[",
"]",
"yield",
"arg",
"elif",
"type",
"(",
"arg",
".",
"nargs",
")",
"is",
"int",
":",
"for",
"_",
"in",
"range",
"(",
"arg",
".",
"nargs",
")",
":",
"value",
"=",
"values",
".",
"pop",
"(",
"0",
")",
"# not the current argument",
"if",
"value",
"[",
"1",
"]",
"is",
"False",
":",
"yield",
"arg",
"if",
"not",
"values",
":",
"raise",
"StopIteration"
] | Return args already used in the
command line
rtype: command.Arg generator | [
"Return",
"args",
"already",
"used",
"in",
"the",
"command",
"line"
] | 1571bf523fa054f3d6bf83dba43a224fea173a73 | https://github.com/eonpatapon/contrail-api-cli/blob/1571bf523fa054f3d6bf83dba43a224fea173a73/contrail_api_cli/parser.py#L64-L94 | train |
eonpatapon/contrail-api-cli | contrail_api_cli/parser.py | CommandParser.available_args | def available_args(self):
"""Return args that can be used given
the current cmd line
rtype: command.Arg generator
"""
used = list(self.used_args)
logger.debug('Found used args: %s' % used)
for arg in list(self.cmd.args.values()):
if (arg.is_multiple or
arg not in used):
yield arg
elif (type(arg.nargs) is int and
arg.nargs > 1 and
not arg.nargs == used.count(arg)):
yield arg | python | def available_args(self):
"""Return args that can be used given
the current cmd line
rtype: command.Arg generator
"""
used = list(self.used_args)
logger.debug('Found used args: %s' % used)
for arg in list(self.cmd.args.values()):
if (arg.is_multiple or
arg not in used):
yield arg
elif (type(arg.nargs) is int and
arg.nargs > 1 and
not arg.nargs == used.count(arg)):
yield arg | [
"def",
"available_args",
"(",
"self",
")",
":",
"used",
"=",
"list",
"(",
"self",
".",
"used_args",
")",
"logger",
".",
"debug",
"(",
"'Found used args: %s'",
"%",
"used",
")",
"for",
"arg",
"in",
"list",
"(",
"self",
".",
"cmd",
".",
"args",
".",
"values",
"(",
")",
")",
":",
"if",
"(",
"arg",
".",
"is_multiple",
"or",
"arg",
"not",
"in",
"used",
")",
":",
"yield",
"arg",
"elif",
"(",
"type",
"(",
"arg",
".",
"nargs",
")",
"is",
"int",
"and",
"arg",
".",
"nargs",
">",
"1",
"and",
"not",
"arg",
".",
"nargs",
"==",
"used",
".",
"count",
"(",
"arg",
")",
")",
":",
"yield",
"arg"
] | Return args that can be used given
the current cmd line
rtype: command.Arg generator | [
"Return",
"args",
"that",
"can",
"be",
"used",
"given",
"the",
"current",
"cmd",
"line"
] | 1571bf523fa054f3d6bf83dba43a224fea173a73 | https://github.com/eonpatapon/contrail-api-cli/blob/1571bf523fa054f3d6bf83dba43a224fea173a73/contrail_api_cli/parser.py#L97-L112 | train |
ph4r05/monero-serialize | monero_serialize/core/erefs.py | is_elem_ref | def is_elem_ref(elem_ref):
"""
Returns true if the elem_ref is an element reference
:param elem_ref:
:return:
"""
return (
elem_ref
and isinstance(elem_ref, tuple)
and len(elem_ref) == 3
and (elem_ref[0] == ElemRefObj or elem_ref[0] == ElemRefArr)
) | python | def is_elem_ref(elem_ref):
"""
Returns true if the elem_ref is an element reference
:param elem_ref:
:return:
"""
return (
elem_ref
and isinstance(elem_ref, tuple)
and len(elem_ref) == 3
and (elem_ref[0] == ElemRefObj or elem_ref[0] == ElemRefArr)
) | [
"def",
"is_elem_ref",
"(",
"elem_ref",
")",
":",
"return",
"(",
"elem_ref",
"and",
"isinstance",
"(",
"elem_ref",
",",
"tuple",
")",
"and",
"len",
"(",
"elem_ref",
")",
"==",
"3",
"and",
"(",
"elem_ref",
"[",
"0",
"]",
"==",
"ElemRefObj",
"or",
"elem_ref",
"[",
"0",
"]",
"==",
"ElemRefArr",
")",
")"
] | Returns true if the elem_ref is an element reference
:param elem_ref:
:return: | [
"Returns",
"true",
"if",
"the",
"elem_ref",
"is",
"an",
"element",
"reference"
] | cebb3ba2aaf2e9211b1dcc6db2bab02946d06e42 | https://github.com/ph4r05/monero-serialize/blob/cebb3ba2aaf2e9211b1dcc6db2bab02946d06e42/monero_serialize/core/erefs.py#L11-L23 | train |
ph4r05/monero-serialize | monero_serialize/core/erefs.py | get_elem | def get_elem(elem_ref, default=None):
"""
Gets the element referenced by elem_ref or returns the elem_ref directly if its not a reference.
:param elem_ref:
:param default:
:return:
"""
if not is_elem_ref(elem_ref):
return elem_ref
elif elem_ref[0] == ElemRefObj:
return getattr(elem_ref[1], elem_ref[2], default)
elif elem_ref[0] == ElemRefArr:
return elem_ref[1][elem_ref[2]] | python | def get_elem(elem_ref, default=None):
"""
Gets the element referenced by elem_ref or returns the elem_ref directly if its not a reference.
:param elem_ref:
:param default:
:return:
"""
if not is_elem_ref(elem_ref):
return elem_ref
elif elem_ref[0] == ElemRefObj:
return getattr(elem_ref[1], elem_ref[2], default)
elif elem_ref[0] == ElemRefArr:
return elem_ref[1][elem_ref[2]] | [
"def",
"get_elem",
"(",
"elem_ref",
",",
"default",
"=",
"None",
")",
":",
"if",
"not",
"is_elem_ref",
"(",
"elem_ref",
")",
":",
"return",
"elem_ref",
"elif",
"elem_ref",
"[",
"0",
"]",
"==",
"ElemRefObj",
":",
"return",
"getattr",
"(",
"elem_ref",
"[",
"1",
"]",
",",
"elem_ref",
"[",
"2",
"]",
",",
"default",
")",
"elif",
"elem_ref",
"[",
"0",
"]",
"==",
"ElemRefArr",
":",
"return",
"elem_ref",
"[",
"1",
"]",
"[",
"elem_ref",
"[",
"2",
"]",
"]"
] | Gets the element referenced by elem_ref or returns the elem_ref directly if its not a reference.
:param elem_ref:
:param default:
:return: | [
"Gets",
"the",
"element",
"referenced",
"by",
"elem_ref",
"or",
"returns",
"the",
"elem_ref",
"directly",
"if",
"its",
"not",
"a",
"reference",
"."
] | cebb3ba2aaf2e9211b1dcc6db2bab02946d06e42 | https://github.com/ph4r05/monero-serialize/blob/cebb3ba2aaf2e9211b1dcc6db2bab02946d06e42/monero_serialize/core/erefs.py#L40-L53 | train |
ph4r05/monero-serialize | monero_serialize/core/erefs.py | set_elem | def set_elem(elem_ref, elem):
"""
Sets element referenced by the elem_ref. Returns the elem.
:param elem_ref:
:param elem:
:return:
"""
if elem_ref is None or elem_ref == elem or not is_elem_ref(elem_ref):
return elem
elif elem_ref[0] == ElemRefObj:
setattr(elem_ref[1], elem_ref[2], elem)
return elem
elif elem_ref[0] == ElemRefArr:
elem_ref[1][elem_ref[2]] = elem
return elem | python | def set_elem(elem_ref, elem):
"""
Sets element referenced by the elem_ref. Returns the elem.
:param elem_ref:
:param elem:
:return:
"""
if elem_ref is None or elem_ref == elem or not is_elem_ref(elem_ref):
return elem
elif elem_ref[0] == ElemRefObj:
setattr(elem_ref[1], elem_ref[2], elem)
return elem
elif elem_ref[0] == ElemRefArr:
elem_ref[1][elem_ref[2]] = elem
return elem | [
"def",
"set_elem",
"(",
"elem_ref",
",",
"elem",
")",
":",
"if",
"elem_ref",
"is",
"None",
"or",
"elem_ref",
"==",
"elem",
"or",
"not",
"is_elem_ref",
"(",
"elem_ref",
")",
":",
"return",
"elem",
"elif",
"elem_ref",
"[",
"0",
"]",
"==",
"ElemRefObj",
":",
"setattr",
"(",
"elem_ref",
"[",
"1",
"]",
",",
"elem_ref",
"[",
"2",
"]",
",",
"elem",
")",
"return",
"elem",
"elif",
"elem_ref",
"[",
"0",
"]",
"==",
"ElemRefArr",
":",
"elem_ref",
"[",
"1",
"]",
"[",
"elem_ref",
"[",
"2",
"]",
"]",
"=",
"elem",
"return",
"elem"
] | Sets element referenced by the elem_ref. Returns the elem.
:param elem_ref:
:param elem:
:return: | [
"Sets",
"element",
"referenced",
"by",
"the",
"elem_ref",
".",
"Returns",
"the",
"elem",
"."
] | cebb3ba2aaf2e9211b1dcc6db2bab02946d06e42 | https://github.com/ph4r05/monero-serialize/blob/cebb3ba2aaf2e9211b1dcc6db2bab02946d06e42/monero_serialize/core/erefs.py#L56-L73 | train |
crm416/semantic | semantic/solver.py | MathService._preprocess | def _preprocess(inp):
"""Revise wording to match canonical and expected forms."""
inp = re.sub(r'(\b)a(\b)', r'\g<1>one\g<2>', inp)
inp = re.sub(r'to the (.*) power', r'to \g<1>', inp)
inp = re.sub(r'to the (.*?)(\b)', r'to \g<1>\g<2>', inp)
inp = re.sub(r'log of', r'log', inp)
inp = re.sub(r'(square )?root( of)?', r'sqrt', inp)
inp = re.sub(r'squared', r'to two', inp)
inp = re.sub(r'cubed', r'to three', inp)
inp = re.sub(r'divided?( by)?', r'divide', inp)
inp = re.sub(r'(\b)over(\b)', r'\g<1>divide\g<2>', inp)
inp = re.sub(r'(\b)EE(\b)', r'\g<1>e\g<2>', inp)
inp = re.sub(r'(\b)E(\b)', r'\g<1>e\g<2>', inp)
inp = re.sub(r'(\b)pie(\b)', r'\g<1>pi\g<2>', inp)
inp = re.sub(r'(\b)PI(\b)', r'\g<1>pi\g<2>', inp)
def findImplicitMultiplications(inp):
"""Replace omitted 'times' references."""
def findConstantMultiplications(inp):
split = inp.split(' ')
revision = ""
converter = NumberService()
for i, w in enumerate(split):
if i > 0 and w in MathService.__constants__:
if converter.isValid(split[i - 1]):
revision += " times"
if not revision:
revision = w
else:
revision += " " + w
return revision
def findUnaryMultiplications(inp):
split = inp.split(' ')
revision = ""
for i, w in enumerate(split):
if i > 0 and w in MathService.__unaryOperators__:
last_op = split[i - 1]
binary = last_op in MathService.__binaryOperators__
unary = last_op in MathService.__unaryOperators__
if last_op and not (binary or unary):
revision += " times"
if not revision:
revision = w
else:
revision += " " + w
return revision
return findUnaryMultiplications(findConstantMultiplications(inp))
return findImplicitMultiplications(inp) | python | def _preprocess(inp):
"""Revise wording to match canonical and expected forms."""
inp = re.sub(r'(\b)a(\b)', r'\g<1>one\g<2>', inp)
inp = re.sub(r'to the (.*) power', r'to \g<1>', inp)
inp = re.sub(r'to the (.*?)(\b)', r'to \g<1>\g<2>', inp)
inp = re.sub(r'log of', r'log', inp)
inp = re.sub(r'(square )?root( of)?', r'sqrt', inp)
inp = re.sub(r'squared', r'to two', inp)
inp = re.sub(r'cubed', r'to three', inp)
inp = re.sub(r'divided?( by)?', r'divide', inp)
inp = re.sub(r'(\b)over(\b)', r'\g<1>divide\g<2>', inp)
inp = re.sub(r'(\b)EE(\b)', r'\g<1>e\g<2>', inp)
inp = re.sub(r'(\b)E(\b)', r'\g<1>e\g<2>', inp)
inp = re.sub(r'(\b)pie(\b)', r'\g<1>pi\g<2>', inp)
inp = re.sub(r'(\b)PI(\b)', r'\g<1>pi\g<2>', inp)
def findImplicitMultiplications(inp):
"""Replace omitted 'times' references."""
def findConstantMultiplications(inp):
split = inp.split(' ')
revision = ""
converter = NumberService()
for i, w in enumerate(split):
if i > 0 and w in MathService.__constants__:
if converter.isValid(split[i - 1]):
revision += " times"
if not revision:
revision = w
else:
revision += " " + w
return revision
def findUnaryMultiplications(inp):
split = inp.split(' ')
revision = ""
for i, w in enumerate(split):
if i > 0 and w in MathService.__unaryOperators__:
last_op = split[i - 1]
binary = last_op in MathService.__binaryOperators__
unary = last_op in MathService.__unaryOperators__
if last_op and not (binary or unary):
revision += " times"
if not revision:
revision = w
else:
revision += " " + w
return revision
return findUnaryMultiplications(findConstantMultiplications(inp))
return findImplicitMultiplications(inp) | [
"def",
"_preprocess",
"(",
"inp",
")",
":",
"inp",
"=",
"re",
".",
"sub",
"(",
"r'(\\b)a(\\b)'",
",",
"r'\\g<1>one\\g<2>'",
",",
"inp",
")",
"inp",
"=",
"re",
".",
"sub",
"(",
"r'to the (.*) power'",
",",
"r'to \\g<1>'",
",",
"inp",
")",
"inp",
"=",
"re",
".",
"sub",
"(",
"r'to the (.*?)(\\b)'",
",",
"r'to \\g<1>\\g<2>'",
",",
"inp",
")",
"inp",
"=",
"re",
".",
"sub",
"(",
"r'log of'",
",",
"r'log'",
",",
"inp",
")",
"inp",
"=",
"re",
".",
"sub",
"(",
"r'(square )?root( of)?'",
",",
"r'sqrt'",
",",
"inp",
")",
"inp",
"=",
"re",
".",
"sub",
"(",
"r'squared'",
",",
"r'to two'",
",",
"inp",
")",
"inp",
"=",
"re",
".",
"sub",
"(",
"r'cubed'",
",",
"r'to three'",
",",
"inp",
")",
"inp",
"=",
"re",
".",
"sub",
"(",
"r'divided?( by)?'",
",",
"r'divide'",
",",
"inp",
")",
"inp",
"=",
"re",
".",
"sub",
"(",
"r'(\\b)over(\\b)'",
",",
"r'\\g<1>divide\\g<2>'",
",",
"inp",
")",
"inp",
"=",
"re",
".",
"sub",
"(",
"r'(\\b)EE(\\b)'",
",",
"r'\\g<1>e\\g<2>'",
",",
"inp",
")",
"inp",
"=",
"re",
".",
"sub",
"(",
"r'(\\b)E(\\b)'",
",",
"r'\\g<1>e\\g<2>'",
",",
"inp",
")",
"inp",
"=",
"re",
".",
"sub",
"(",
"r'(\\b)pie(\\b)'",
",",
"r'\\g<1>pi\\g<2>'",
",",
"inp",
")",
"inp",
"=",
"re",
".",
"sub",
"(",
"r'(\\b)PI(\\b)'",
",",
"r'\\g<1>pi\\g<2>'",
",",
"inp",
")",
"def",
"findImplicitMultiplications",
"(",
"inp",
")",
":",
"\"\"\"Replace omitted 'times' references.\"\"\"",
"def",
"findConstantMultiplications",
"(",
"inp",
")",
":",
"split",
"=",
"inp",
".",
"split",
"(",
"' '",
")",
"revision",
"=",
"\"\"",
"converter",
"=",
"NumberService",
"(",
")",
"for",
"i",
",",
"w",
"in",
"enumerate",
"(",
"split",
")",
":",
"if",
"i",
">",
"0",
"and",
"w",
"in",
"MathService",
".",
"__constants__",
":",
"if",
"converter",
".",
"isValid",
"(",
"split",
"[",
"i",
"-",
"1",
"]",
")",
":",
"revision",
"+=",
"\" times\"",
"if",
"not",
"revision",
":",
"revision",
"=",
"w",
"else",
":",
"revision",
"+=",
"\" \"",
"+",
"w",
"return",
"revision",
"def",
"findUnaryMultiplications",
"(",
"inp",
")",
":",
"split",
"=",
"inp",
".",
"split",
"(",
"' '",
")",
"revision",
"=",
"\"\"",
"for",
"i",
",",
"w",
"in",
"enumerate",
"(",
"split",
")",
":",
"if",
"i",
">",
"0",
"and",
"w",
"in",
"MathService",
".",
"__unaryOperators__",
":",
"last_op",
"=",
"split",
"[",
"i",
"-",
"1",
"]",
"binary",
"=",
"last_op",
"in",
"MathService",
".",
"__binaryOperators__",
"unary",
"=",
"last_op",
"in",
"MathService",
".",
"__unaryOperators__",
"if",
"last_op",
"and",
"not",
"(",
"binary",
"or",
"unary",
")",
":",
"revision",
"+=",
"\" times\"",
"if",
"not",
"revision",
":",
"revision",
"=",
"w",
"else",
":",
"revision",
"+=",
"\" \"",
"+",
"w",
"return",
"revision",
"return",
"findUnaryMultiplications",
"(",
"findConstantMultiplications",
"(",
"inp",
")",
")",
"return",
"findImplicitMultiplications",
"(",
"inp",
")"
] | Revise wording to match canonical and expected forms. | [
"Revise",
"wording",
"to",
"match",
"canonical",
"and",
"expected",
"forms",
"."
] | 46deb8fefb3ea58aad2fedc8d0d62f3ee254b8fe | https://github.com/crm416/semantic/blob/46deb8fefb3ea58aad2fedc8d0d62f3ee254b8fe/semantic/solver.py#L66-L123 | train |
crm416/semantic | semantic/solver.py | MathService._calculate | def _calculate(numbers, symbols):
"""Calculates a final value given a set of numbers and symbols."""
if len(numbers) is 1:
return numbers[0]
precedence = [[pow], [mul, div], [add, sub]]
# Find most important operation
for op_group in precedence:
for i, op in enumerate(symbols):
if op in op_group:
# Apply operation
a = numbers[i]
b = numbers[i + 1]
result = MathService._applyBinary(a, b, op)
new_numbers = numbers[:i] + [result] + numbers[i + 2:]
new_symbols = symbols[:i] + symbols[i + 1:]
return MathService._calculate(new_numbers, new_symbols) | python | def _calculate(numbers, symbols):
"""Calculates a final value given a set of numbers and symbols."""
if len(numbers) is 1:
return numbers[0]
precedence = [[pow], [mul, div], [add, sub]]
# Find most important operation
for op_group in precedence:
for i, op in enumerate(symbols):
if op in op_group:
# Apply operation
a = numbers[i]
b = numbers[i + 1]
result = MathService._applyBinary(a, b, op)
new_numbers = numbers[:i] + [result] + numbers[i + 2:]
new_symbols = symbols[:i] + symbols[i + 1:]
return MathService._calculate(new_numbers, new_symbols) | [
"def",
"_calculate",
"(",
"numbers",
",",
"symbols",
")",
":",
"if",
"len",
"(",
"numbers",
")",
"is",
"1",
":",
"return",
"numbers",
"[",
"0",
"]",
"precedence",
"=",
"[",
"[",
"pow",
"]",
",",
"[",
"mul",
",",
"div",
"]",
",",
"[",
"add",
",",
"sub",
"]",
"]",
"# Find most important operation",
"for",
"op_group",
"in",
"precedence",
":",
"for",
"i",
",",
"op",
"in",
"enumerate",
"(",
"symbols",
")",
":",
"if",
"op",
"in",
"op_group",
":",
"# Apply operation",
"a",
"=",
"numbers",
"[",
"i",
"]",
"b",
"=",
"numbers",
"[",
"i",
"+",
"1",
"]",
"result",
"=",
"MathService",
".",
"_applyBinary",
"(",
"a",
",",
"b",
",",
"op",
")",
"new_numbers",
"=",
"numbers",
"[",
":",
"i",
"]",
"+",
"[",
"result",
"]",
"+",
"numbers",
"[",
"i",
"+",
"2",
":",
"]",
"new_symbols",
"=",
"symbols",
"[",
":",
"i",
"]",
"+",
"symbols",
"[",
"i",
"+",
"1",
":",
"]",
"return",
"MathService",
".",
"_calculate",
"(",
"new_numbers",
",",
"new_symbols",
")"
] | Calculates a final value given a set of numbers and symbols. | [
"Calculates",
"a",
"final",
"value",
"given",
"a",
"set",
"of",
"numbers",
"and",
"symbols",
"."
] | 46deb8fefb3ea58aad2fedc8d0d62f3ee254b8fe | https://github.com/crm416/semantic/blob/46deb8fefb3ea58aad2fedc8d0d62f3ee254b8fe/semantic/solver.py#L126-L144 | train |
crm416/semantic | semantic/solver.py | MathService.parseEquation | def parseEquation(self, inp):
"""Solves the equation specified by the input string.
Args:
inp (str): An equation, specified in words, containing some
combination of numbers, binary, and unary operations.
Returns:
The floating-point result of carrying out the computation.
"""
inp = MathService._preprocess(inp)
split = inp.split(' ')
# Recursive call on unary operators
for i, w in enumerate(split):
if w in self.__unaryOperators__:
op = self.__unaryOperators__[w]
# Split equation into halves
eq1 = ' '.join(split[:i])
eq2 = ' '.join(split[i + 1:])
# Calculate second half
result = MathService._applyUnary(self.parseEquation(eq2), op)
return self.parseEquation(eq1 + " " + str(result))
def extractNumbersAndSymbols(inp):
numbers = []
symbols = []
# Divide into values (numbers), operators (symbols)
next_number = ""
for w in inp.split(' '):
if w in self.__binaryOperators__:
symbols.append(self.__binaryOperators__[w])
if next_number:
numbers.append(next_number)
next_number = ""
else:
if next_number:
next_number += " "
next_number += w
if next_number:
numbers.append(next_number)
# Cast numbers from words to integers
def convert(n):
if n in self.__constants__:
return self.__constants__[n]
converter = NumberService()
return converter.parse(n)
numbers = [convert(n) for n in numbers]
return numbers, symbols
numbers, symbols = extractNumbersAndSymbols(inp)
return MathService._calculate(numbers, symbols) | python | def parseEquation(self, inp):
"""Solves the equation specified by the input string.
Args:
inp (str): An equation, specified in words, containing some
combination of numbers, binary, and unary operations.
Returns:
The floating-point result of carrying out the computation.
"""
inp = MathService._preprocess(inp)
split = inp.split(' ')
# Recursive call on unary operators
for i, w in enumerate(split):
if w in self.__unaryOperators__:
op = self.__unaryOperators__[w]
# Split equation into halves
eq1 = ' '.join(split[:i])
eq2 = ' '.join(split[i + 1:])
# Calculate second half
result = MathService._applyUnary(self.parseEquation(eq2), op)
return self.parseEquation(eq1 + " " + str(result))
def extractNumbersAndSymbols(inp):
numbers = []
symbols = []
# Divide into values (numbers), operators (symbols)
next_number = ""
for w in inp.split(' '):
if w in self.__binaryOperators__:
symbols.append(self.__binaryOperators__[w])
if next_number:
numbers.append(next_number)
next_number = ""
else:
if next_number:
next_number += " "
next_number += w
if next_number:
numbers.append(next_number)
# Cast numbers from words to integers
def convert(n):
if n in self.__constants__:
return self.__constants__[n]
converter = NumberService()
return converter.parse(n)
numbers = [convert(n) for n in numbers]
return numbers, symbols
numbers, symbols = extractNumbersAndSymbols(inp)
return MathService._calculate(numbers, symbols) | [
"def",
"parseEquation",
"(",
"self",
",",
"inp",
")",
":",
"inp",
"=",
"MathService",
".",
"_preprocess",
"(",
"inp",
")",
"split",
"=",
"inp",
".",
"split",
"(",
"' '",
")",
"# Recursive call on unary operators",
"for",
"i",
",",
"w",
"in",
"enumerate",
"(",
"split",
")",
":",
"if",
"w",
"in",
"self",
".",
"__unaryOperators__",
":",
"op",
"=",
"self",
".",
"__unaryOperators__",
"[",
"w",
"]",
"# Split equation into halves",
"eq1",
"=",
"' '",
".",
"join",
"(",
"split",
"[",
":",
"i",
"]",
")",
"eq2",
"=",
"' '",
".",
"join",
"(",
"split",
"[",
"i",
"+",
"1",
":",
"]",
")",
"# Calculate second half",
"result",
"=",
"MathService",
".",
"_applyUnary",
"(",
"self",
".",
"parseEquation",
"(",
"eq2",
")",
",",
"op",
")",
"return",
"self",
".",
"parseEquation",
"(",
"eq1",
"+",
"\" \"",
"+",
"str",
"(",
"result",
")",
")",
"def",
"extractNumbersAndSymbols",
"(",
"inp",
")",
":",
"numbers",
"=",
"[",
"]",
"symbols",
"=",
"[",
"]",
"# Divide into values (numbers), operators (symbols)",
"next_number",
"=",
"\"\"",
"for",
"w",
"in",
"inp",
".",
"split",
"(",
"' '",
")",
":",
"if",
"w",
"in",
"self",
".",
"__binaryOperators__",
":",
"symbols",
".",
"append",
"(",
"self",
".",
"__binaryOperators__",
"[",
"w",
"]",
")",
"if",
"next_number",
":",
"numbers",
".",
"append",
"(",
"next_number",
")",
"next_number",
"=",
"\"\"",
"else",
":",
"if",
"next_number",
":",
"next_number",
"+=",
"\" \"",
"next_number",
"+=",
"w",
"if",
"next_number",
":",
"numbers",
".",
"append",
"(",
"next_number",
")",
"# Cast numbers from words to integers",
"def",
"convert",
"(",
"n",
")",
":",
"if",
"n",
"in",
"self",
".",
"__constants__",
":",
"return",
"self",
".",
"__constants__",
"[",
"n",
"]",
"converter",
"=",
"NumberService",
"(",
")",
"return",
"converter",
".",
"parse",
"(",
"n",
")",
"numbers",
"=",
"[",
"convert",
"(",
"n",
")",
"for",
"n",
"in",
"numbers",
"]",
"return",
"numbers",
",",
"symbols",
"numbers",
",",
"symbols",
"=",
"extractNumbersAndSymbols",
"(",
"inp",
")",
"return",
"MathService",
".",
"_calculate",
"(",
"numbers",
",",
"symbols",
")"
] | Solves the equation specified by the input string.
Args:
inp (str): An equation, specified in words, containing some
combination of numbers, binary, and unary operations.
Returns:
The floating-point result of carrying out the computation. | [
"Solves",
"the",
"equation",
"specified",
"by",
"the",
"input",
"string",
"."
] | 46deb8fefb3ea58aad2fedc8d0d62f3ee254b8fe | https://github.com/crm416/semantic/blob/46deb8fefb3ea58aad2fedc8d0d62f3ee254b8fe/semantic/solver.py#L146-L209 | train |
useblocks/groundwork | groundwork/patterns/gw_commands_pattern.py | CommandsListPlugin.register | def register(self, command, description, function, params=[]):
"""
Registers a new command for a plugin.
:param command: Name of the command
:param description: Description of the command. Is used as help message on cli
:param function: function reference, which gets invoked if command gets called.
:param params: list of click options and arguments
:return: command object
"""
return self.app.commands.register(command, description, function, params, self.plugin) | python | def register(self, command, description, function, params=[]):
"""
Registers a new command for a plugin.
:param command: Name of the command
:param description: Description of the command. Is used as help message on cli
:param function: function reference, which gets invoked if command gets called.
:param params: list of click options and arguments
:return: command object
"""
return self.app.commands.register(command, description, function, params, self.plugin) | [
"def",
"register",
"(",
"self",
",",
"command",
",",
"description",
",",
"function",
",",
"params",
"=",
"[",
"]",
")",
":",
"return",
"self",
".",
"app",
".",
"commands",
".",
"register",
"(",
"command",
",",
"description",
",",
"function",
",",
"params",
",",
"self",
".",
"plugin",
")"
] | Registers a new command for a plugin.
:param command: Name of the command
:param description: Description of the command. Is used as help message on cli
:param function: function reference, which gets invoked if command gets called.
:param params: list of click options and arguments
:return: command object | [
"Registers",
"a",
"new",
"command",
"for",
"a",
"plugin",
"."
] | d34fce43f54246ca4db0f7b89e450dcdc847c68c | https://github.com/useblocks/groundwork/blob/d34fce43f54246ca4db0f7b89e450dcdc847c68c/groundwork/patterns/gw_commands_pattern.py#L80-L90 | train |
useblocks/groundwork | groundwork/patterns/gw_commands_pattern.py | CommandsListPlugin.get | def get(self, name=None):
"""
Returns commands, which can be filtered by name.
:param name: name of the command
:type name: str
:return: None, single command or dict of commands
"""
return self.app.commands.get(name, self.plugin) | python | def get(self, name=None):
"""
Returns commands, which can be filtered by name.
:param name: name of the command
:type name: str
:return: None, single command or dict of commands
"""
return self.app.commands.get(name, self.plugin) | [
"def",
"get",
"(",
"self",
",",
"name",
"=",
"None",
")",
":",
"return",
"self",
".",
"app",
".",
"commands",
".",
"get",
"(",
"name",
",",
"self",
".",
"plugin",
")"
] | Returns commands, which can be filtered by name.
:param name: name of the command
:type name: str
:return: None, single command or dict of commands | [
"Returns",
"commands",
"which",
"can",
"be",
"filtered",
"by",
"name",
"."
] | d34fce43f54246ca4db0f7b89e450dcdc847c68c | https://github.com/useblocks/groundwork/blob/d34fce43f54246ca4db0f7b89e450dcdc847c68c/groundwork/patterns/gw_commands_pattern.py#L101-L109 | train |
useblocks/groundwork | groundwork/patterns/gw_commands_pattern.py | CommandsListApplication.get | def get(self, name=None, plugin=None):
"""
Returns commands, which can be filtered by name or plugin.
:param name: name of the command
:type name: str
:param plugin: plugin object, which registers the commands
:type plugin: instance of GwBasePattern
:return: None, single command or dict of commands
"""
if plugin is not None:
if name is None:
command_list = {}
for key in self._commands.keys():
if self._commands[key].plugin == plugin:
command_list[key] = self._commands[key]
return command_list
else:
if name in self._commands.keys():
if self._commands[name].plugin == plugin:
return self._commands[name]
else:
return None
else:
return None
else:
if name is None:
return self._commands
else:
if name in self._commands.keys():
return self._commands[name]
else:
return None | python | def get(self, name=None, plugin=None):
"""
Returns commands, which can be filtered by name or plugin.
:param name: name of the command
:type name: str
:param plugin: plugin object, which registers the commands
:type plugin: instance of GwBasePattern
:return: None, single command or dict of commands
"""
if plugin is not None:
if name is None:
command_list = {}
for key in self._commands.keys():
if self._commands[key].plugin == plugin:
command_list[key] = self._commands[key]
return command_list
else:
if name in self._commands.keys():
if self._commands[name].plugin == plugin:
return self._commands[name]
else:
return None
else:
return None
else:
if name is None:
return self._commands
else:
if name in self._commands.keys():
return self._commands[name]
else:
return None | [
"def",
"get",
"(",
"self",
",",
"name",
"=",
"None",
",",
"plugin",
"=",
"None",
")",
":",
"if",
"plugin",
"is",
"not",
"None",
":",
"if",
"name",
"is",
"None",
":",
"command_list",
"=",
"{",
"}",
"for",
"key",
"in",
"self",
".",
"_commands",
".",
"keys",
"(",
")",
":",
"if",
"self",
".",
"_commands",
"[",
"key",
"]",
".",
"plugin",
"==",
"plugin",
":",
"command_list",
"[",
"key",
"]",
"=",
"self",
".",
"_commands",
"[",
"key",
"]",
"return",
"command_list",
"else",
":",
"if",
"name",
"in",
"self",
".",
"_commands",
".",
"keys",
"(",
")",
":",
"if",
"self",
".",
"_commands",
"[",
"name",
"]",
".",
"plugin",
"==",
"plugin",
":",
"return",
"self",
".",
"_commands",
"[",
"name",
"]",
"else",
":",
"return",
"None",
"else",
":",
"return",
"None",
"else",
":",
"if",
"name",
"is",
"None",
":",
"return",
"self",
".",
"_commands",
"else",
":",
"if",
"name",
"in",
"self",
".",
"_commands",
".",
"keys",
"(",
")",
":",
"return",
"self",
".",
"_commands",
"[",
"name",
"]",
"else",
":",
"return",
"None"
] | Returns commands, which can be filtered by name or plugin.
:param name: name of the command
:type name: str
:param plugin: plugin object, which registers the commands
:type plugin: instance of GwBasePattern
:return: None, single command or dict of commands | [
"Returns",
"commands",
"which",
"can",
"be",
"filtered",
"by",
"name",
"or",
"plugin",
"."
] | d34fce43f54246ca4db0f7b89e450dcdc847c68c | https://github.com/useblocks/groundwork/blob/d34fce43f54246ca4db0f7b89e450dcdc847c68c/groundwork/patterns/gw_commands_pattern.py#L130-L162 | train |
useblocks/groundwork | groundwork/patterns/gw_commands_pattern.py | CommandsListApplication.unregister | def unregister(self, command):
"""
Unregisters an existing command, so that this command is no longer available on the command line interface.
This function is mainly used during plugin deactivation.
:param command: Name of the command
"""
if command not in self._commands.keys():
self.log.warning("Can not unregister command %s" % command)
else:
# Click does not have any kind of a function to unregister/remove/deactivate already added commands.
# So we need to delete the related objects manually from the click internal commands dictionary for
# our root command.
del(self._click_root_command.commands[command])
# Finally lets delete the command from our internal dictionary too.
del(self._commands[command])
self.log.debug("Command %s got unregistered" % command) | python | def unregister(self, command):
"""
Unregisters an existing command, so that this command is no longer available on the command line interface.
This function is mainly used during plugin deactivation.
:param command: Name of the command
"""
if command not in self._commands.keys():
self.log.warning("Can not unregister command %s" % command)
else:
# Click does not have any kind of a function to unregister/remove/deactivate already added commands.
# So we need to delete the related objects manually from the click internal commands dictionary for
# our root command.
del(self._click_root_command.commands[command])
# Finally lets delete the command from our internal dictionary too.
del(self._commands[command])
self.log.debug("Command %s got unregistered" % command) | [
"def",
"unregister",
"(",
"self",
",",
"command",
")",
":",
"if",
"command",
"not",
"in",
"self",
".",
"_commands",
".",
"keys",
"(",
")",
":",
"self",
".",
"log",
".",
"warning",
"(",
"\"Can not unregister command %s\"",
"%",
"command",
")",
"else",
":",
"# Click does not have any kind of a function to unregister/remove/deactivate already added commands.",
"# So we need to delete the related objects manually from the click internal commands dictionary for",
"# our root command.",
"del",
"(",
"self",
".",
"_click_root_command",
".",
"commands",
"[",
"command",
"]",
")",
"# Finally lets delete the command from our internal dictionary too.",
"del",
"(",
"self",
".",
"_commands",
"[",
"command",
"]",
")",
"self",
".",
"log",
".",
"debug",
"(",
"\"Command %s got unregistered\"",
"%",
"command",
")"
] | Unregisters an existing command, so that this command is no longer available on the command line interface.
This function is mainly used during plugin deactivation.
:param command: Name of the command | [
"Unregisters",
"an",
"existing",
"command",
"so",
"that",
"this",
"command",
"is",
"no",
"longer",
"available",
"on",
"the",
"command",
"line",
"interface",
"."
] | d34fce43f54246ca4db0f7b89e450dcdc847c68c | https://github.com/useblocks/groundwork/blob/d34fce43f54246ca4db0f7b89e450dcdc847c68c/groundwork/patterns/gw_commands_pattern.py#L185-L202 | train |
go-macaroon-bakery/py-macaroon-bakery | macaroonbakery/checkers/_caveat.py | declared_caveat | def declared_caveat(key, value):
'''Returns a "declared" caveat asserting that the given key is
set to the given value.
If a macaroon has exactly one first party caveat asserting the value of a
particular key, then infer_declared will be able to infer the value, and
then the check will allow the declared value if it has the value
specified here.
If the key is empty or contains a space, it will return an error caveat.
'''
if key.find(' ') >= 0 or key == '':
return error_caveat('invalid caveat \'declared\' key "{}"'.format(key))
return _first_party(COND_DECLARED, key + ' ' + value) | python | def declared_caveat(key, value):
'''Returns a "declared" caveat asserting that the given key is
set to the given value.
If a macaroon has exactly one first party caveat asserting the value of a
particular key, then infer_declared will be able to infer the value, and
then the check will allow the declared value if it has the value
specified here.
If the key is empty or contains a space, it will return an error caveat.
'''
if key.find(' ') >= 0 or key == '':
return error_caveat('invalid caveat \'declared\' key "{}"'.format(key))
return _first_party(COND_DECLARED, key + ' ' + value) | [
"def",
"declared_caveat",
"(",
"key",
",",
"value",
")",
":",
"if",
"key",
".",
"find",
"(",
"' '",
")",
">=",
"0",
"or",
"key",
"==",
"''",
":",
"return",
"error_caveat",
"(",
"'invalid caveat \\'declared\\' key \"{}\"'",
".",
"format",
"(",
"key",
")",
")",
"return",
"_first_party",
"(",
"COND_DECLARED",
",",
"key",
"+",
"' '",
"+",
"value",
")"
] | Returns a "declared" caveat asserting that the given key is
set to the given value.
If a macaroon has exactly one first party caveat asserting the value of a
particular key, then infer_declared will be able to infer the value, and
then the check will allow the declared value if it has the value
specified here.
If the key is empty or contains a space, it will return an error caveat. | [
"Returns",
"a",
"declared",
"caveat",
"asserting",
"that",
"the",
"given",
"key",
"is",
"set",
"to",
"the",
"given",
"value",
"."
] | 63ce1ef1dabe816eb8aaec48fbb46761c34ddf77 | https://github.com/go-macaroon-bakery/py-macaroon-bakery/blob/63ce1ef1dabe816eb8aaec48fbb46761c34ddf77/macaroonbakery/checkers/_caveat.py#L33-L46 | train |
go-macaroon-bakery/py-macaroon-bakery | macaroonbakery/checkers/_caveat.py | _operation_caveat | def _operation_caveat(cond, ops):
''' Helper for allow_caveat and deny_caveat.
It checks that all operation names are valid before creating the caveat.
'''
for op in ops:
if op.find(' ') != -1:
return error_caveat('invalid operation name "{}"'.format(op))
return _first_party(cond, ' '.join(ops)) | python | def _operation_caveat(cond, ops):
''' Helper for allow_caveat and deny_caveat.
It checks that all operation names are valid before creating the caveat.
'''
for op in ops:
if op.find(' ') != -1:
return error_caveat('invalid operation name "{}"'.format(op))
return _first_party(cond, ' '.join(ops)) | [
"def",
"_operation_caveat",
"(",
"cond",
",",
"ops",
")",
":",
"for",
"op",
"in",
"ops",
":",
"if",
"op",
".",
"find",
"(",
"' '",
")",
"!=",
"-",
"1",
":",
"return",
"error_caveat",
"(",
"'invalid operation name \"{}\"'",
".",
"format",
"(",
"op",
")",
")",
"return",
"_first_party",
"(",
"cond",
",",
"' '",
".",
"join",
"(",
"ops",
")",
")"
] | Helper for allow_caveat and deny_caveat.
It checks that all operation names are valid before creating the caveat. | [
"Helper",
"for",
"allow_caveat",
"and",
"deny_caveat",
"."
] | 63ce1ef1dabe816eb8aaec48fbb46761c34ddf77 | https://github.com/go-macaroon-bakery/py-macaroon-bakery/blob/63ce1ef1dabe816eb8aaec48fbb46761c34ddf77/macaroonbakery/checkers/_caveat.py#L81-L89 | train |
go-macaroon-bakery/py-macaroon-bakery | macaroonbakery/_utils/__init__.py | to_bytes | def to_bytes(s):
'''Return s as a bytes type, using utf-8 encoding if necessary.
@param s string or bytes
@return bytes
'''
if isinstance(s, six.binary_type):
return s
if isinstance(s, six.string_types):
return s.encode('utf-8')
raise TypeError('want string or bytes, got {}', type(s)) | python | def to_bytes(s):
'''Return s as a bytes type, using utf-8 encoding if necessary.
@param s string or bytes
@return bytes
'''
if isinstance(s, six.binary_type):
return s
if isinstance(s, six.string_types):
return s.encode('utf-8')
raise TypeError('want string or bytes, got {}', type(s)) | [
"def",
"to_bytes",
"(",
"s",
")",
":",
"if",
"isinstance",
"(",
"s",
",",
"six",
".",
"binary_type",
")",
":",
"return",
"s",
"if",
"isinstance",
"(",
"s",
",",
"six",
".",
"string_types",
")",
":",
"return",
"s",
".",
"encode",
"(",
"'utf-8'",
")",
"raise",
"TypeError",
"(",
"'want string or bytes, got {}'",
",",
"type",
"(",
"s",
")",
")"
] | Return s as a bytes type, using utf-8 encoding if necessary.
@param s string or bytes
@return bytes | [
"Return",
"s",
"as",
"a",
"bytes",
"type",
"using",
"utf",
"-",
"8",
"encoding",
"if",
"necessary",
"."
] | 63ce1ef1dabe816eb8aaec48fbb46761c34ddf77 | https://github.com/go-macaroon-bakery/py-macaroon-bakery/blob/63ce1ef1dabe816eb8aaec48fbb46761c34ddf77/macaroonbakery/_utils/__init__.py#L18-L27 | train |
go-macaroon-bakery/py-macaroon-bakery | macaroonbakery/_utils/__init__.py | b64decode | def b64decode(s):
'''Base64 decodes a base64-encoded string in URL-safe
or normal format, with or without padding.
The argument may be string or bytes.
@param s bytes decode
@return bytes decoded
@raises ValueError on failure
'''
# add padding if necessary.
s = to_bytes(s)
if not s.endswith(b'='):
s = s + b'=' * (-len(s) % 4)
try:
if '_' or '-' in s:
return base64.urlsafe_b64decode(s)
else:
return base64.b64decode(s)
except (TypeError, binascii.Error) as e:
raise ValueError(str(e)) | python | def b64decode(s):
'''Base64 decodes a base64-encoded string in URL-safe
or normal format, with or without padding.
The argument may be string or bytes.
@param s bytes decode
@return bytes decoded
@raises ValueError on failure
'''
# add padding if necessary.
s = to_bytes(s)
if not s.endswith(b'='):
s = s + b'=' * (-len(s) % 4)
try:
if '_' or '-' in s:
return base64.urlsafe_b64decode(s)
else:
return base64.b64decode(s)
except (TypeError, binascii.Error) as e:
raise ValueError(str(e)) | [
"def",
"b64decode",
"(",
"s",
")",
":",
"# add padding if necessary.",
"s",
"=",
"to_bytes",
"(",
"s",
")",
"if",
"not",
"s",
".",
"endswith",
"(",
"b'='",
")",
":",
"s",
"=",
"s",
"+",
"b'='",
"*",
"(",
"-",
"len",
"(",
"s",
")",
"%",
"4",
")",
"try",
":",
"if",
"'_'",
"or",
"'-'",
"in",
"s",
":",
"return",
"base64",
".",
"urlsafe_b64decode",
"(",
"s",
")",
"else",
":",
"return",
"base64",
".",
"b64decode",
"(",
"s",
")",
"except",
"(",
"TypeError",
",",
"binascii",
".",
"Error",
")",
"as",
"e",
":",
"raise",
"ValueError",
"(",
"str",
"(",
"e",
")",
")"
] | Base64 decodes a base64-encoded string in URL-safe
or normal format, with or without padding.
The argument may be string or bytes.
@param s bytes decode
@return bytes decoded
@raises ValueError on failure | [
"Base64",
"decodes",
"a",
"base64",
"-",
"encoded",
"string",
"in",
"URL",
"-",
"safe",
"or",
"normal",
"format",
"with",
"or",
"without",
"padding",
".",
"The",
"argument",
"may",
"be",
"string",
"or",
"bytes",
"."
] | 63ce1ef1dabe816eb8aaec48fbb46761c34ddf77 | https://github.com/go-macaroon-bakery/py-macaroon-bakery/blob/63ce1ef1dabe816eb8aaec48fbb46761c34ddf77/macaroonbakery/_utils/__init__.py#L79-L98 | train |
go-macaroon-bakery/py-macaroon-bakery | macaroonbakery/_utils/__init__.py | raw_urlsafe_b64encode | def raw_urlsafe_b64encode(b):
'''Base64 encode using URL-safe encoding with padding removed.
@param b bytes to decode
@return bytes decoded
'''
b = to_bytes(b)
b = base64.urlsafe_b64encode(b)
b = b.rstrip(b'=') # strip padding
return b | python | def raw_urlsafe_b64encode(b):
'''Base64 encode using URL-safe encoding with padding removed.
@param b bytes to decode
@return bytes decoded
'''
b = to_bytes(b)
b = base64.urlsafe_b64encode(b)
b = b.rstrip(b'=') # strip padding
return b | [
"def",
"raw_urlsafe_b64encode",
"(",
"b",
")",
":",
"b",
"=",
"to_bytes",
"(",
"b",
")",
"b",
"=",
"base64",
".",
"urlsafe_b64encode",
"(",
"b",
")",
"b",
"=",
"b",
".",
"rstrip",
"(",
"b'='",
")",
"# strip padding",
"return",
"b"
] | Base64 encode using URL-safe encoding with padding removed.
@param b bytes to decode
@return bytes decoded | [
"Base64",
"encode",
"using",
"URL",
"-",
"safe",
"encoding",
"with",
"padding",
"removed",
"."
] | 63ce1ef1dabe816eb8aaec48fbb46761c34ddf77 | https://github.com/go-macaroon-bakery/py-macaroon-bakery/blob/63ce1ef1dabe816eb8aaec48fbb46761c34ddf77/macaroonbakery/_utils/__init__.py#L101-L110 | train |
go-macaroon-bakery/py-macaroon-bakery | macaroonbakery/_utils/__init__.py | cookie | def cookie(
url,
name,
value,
expires=None):
'''Return a new Cookie using a slightly more
friendly API than that provided by six.moves.http_cookiejar
@param name The cookie name {str}
@param value The cookie value {str}
@param url The URL path of the cookie {str}
@param expires The expiry time of the cookie {datetime}. If provided,
it must be a naive timestamp in UTC.
'''
u = urlparse(url)
domain = u.hostname
if '.' not in domain and not _is_ip_addr(domain):
domain += ".local"
port = str(u.port) if u.port is not None else None
secure = u.scheme == 'https'
if expires is not None:
if expires.tzinfo is not None:
raise ValueError('Cookie expiration must be a naive datetime')
expires = (expires - datetime(1970, 1, 1)).total_seconds()
return http_cookiejar.Cookie(
version=0,
name=name,
value=value,
port=port,
port_specified=port is not None,
domain=domain,
domain_specified=True,
domain_initial_dot=False,
path=u.path,
path_specified=True,
secure=secure,
expires=expires,
discard=False,
comment=None,
comment_url=None,
rest=None,
rfc2109=False,
) | python | def cookie(
url,
name,
value,
expires=None):
'''Return a new Cookie using a slightly more
friendly API than that provided by six.moves.http_cookiejar
@param name The cookie name {str}
@param value The cookie value {str}
@param url The URL path of the cookie {str}
@param expires The expiry time of the cookie {datetime}. If provided,
it must be a naive timestamp in UTC.
'''
u = urlparse(url)
domain = u.hostname
if '.' not in domain and not _is_ip_addr(domain):
domain += ".local"
port = str(u.port) if u.port is not None else None
secure = u.scheme == 'https'
if expires is not None:
if expires.tzinfo is not None:
raise ValueError('Cookie expiration must be a naive datetime')
expires = (expires - datetime(1970, 1, 1)).total_seconds()
return http_cookiejar.Cookie(
version=0,
name=name,
value=value,
port=port,
port_specified=port is not None,
domain=domain,
domain_specified=True,
domain_initial_dot=False,
path=u.path,
path_specified=True,
secure=secure,
expires=expires,
discard=False,
comment=None,
comment_url=None,
rest=None,
rfc2109=False,
) | [
"def",
"cookie",
"(",
"url",
",",
"name",
",",
"value",
",",
"expires",
"=",
"None",
")",
":",
"u",
"=",
"urlparse",
"(",
"url",
")",
"domain",
"=",
"u",
".",
"hostname",
"if",
"'.'",
"not",
"in",
"domain",
"and",
"not",
"_is_ip_addr",
"(",
"domain",
")",
":",
"domain",
"+=",
"\".local\"",
"port",
"=",
"str",
"(",
"u",
".",
"port",
")",
"if",
"u",
".",
"port",
"is",
"not",
"None",
"else",
"None",
"secure",
"=",
"u",
".",
"scheme",
"==",
"'https'",
"if",
"expires",
"is",
"not",
"None",
":",
"if",
"expires",
".",
"tzinfo",
"is",
"not",
"None",
":",
"raise",
"ValueError",
"(",
"'Cookie expiration must be a naive datetime'",
")",
"expires",
"=",
"(",
"expires",
"-",
"datetime",
"(",
"1970",
",",
"1",
",",
"1",
")",
")",
".",
"total_seconds",
"(",
")",
"return",
"http_cookiejar",
".",
"Cookie",
"(",
"version",
"=",
"0",
",",
"name",
"=",
"name",
",",
"value",
"=",
"value",
",",
"port",
"=",
"port",
",",
"port_specified",
"=",
"port",
"is",
"not",
"None",
",",
"domain",
"=",
"domain",
",",
"domain_specified",
"=",
"True",
",",
"domain_initial_dot",
"=",
"False",
",",
"path",
"=",
"u",
".",
"path",
",",
"path_specified",
"=",
"True",
",",
"secure",
"=",
"secure",
",",
"expires",
"=",
"expires",
",",
"discard",
"=",
"False",
",",
"comment",
"=",
"None",
",",
"comment_url",
"=",
"None",
",",
"rest",
"=",
"None",
",",
"rfc2109",
"=",
"False",
",",
")"
] | Return a new Cookie using a slightly more
friendly API than that provided by six.moves.http_cookiejar
@param name The cookie name {str}
@param value The cookie value {str}
@param url The URL path of the cookie {str}
@param expires The expiry time of the cookie {datetime}. If provided,
it must be a naive timestamp in UTC. | [
"Return",
"a",
"new",
"Cookie",
"using",
"a",
"slightly",
"more",
"friendly",
"API",
"than",
"that",
"provided",
"by",
"six",
".",
"moves",
".",
"http_cookiejar"
] | 63ce1ef1dabe816eb8aaec48fbb46761c34ddf77 | https://github.com/go-macaroon-bakery/py-macaroon-bakery/blob/63ce1ef1dabe816eb8aaec48fbb46761c34ddf77/macaroonbakery/_utils/__init__.py#L123-L165 | train |
swevm/scaleio-py | scaleiopy/im.py | Im._login | def _login(self):
"""
LOGIN CAN ONLY BE DONE BY POSTING TO A HTTP FORM.
A COOKIE IS THEN USED FOR INTERACTING WITH THE API
"""
self.logger.debug("Logging into " + "{}/{}".format(self._im_api_url, "j_spring_security_check"))
self._im_session.headers.update({'Content-Type':'application/x-www-form-urlencoded', 'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_10_1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/39.0.2171.99 Safari/537.36'})
#self._im_session.mount('https://', TLS1Adapter())
#self._im_verify_ssl = False
self.j_username = self._username
self.j_password = self._password
requests.packages.urllib3.disable_warnings() # Disable unverified connection warning.
payload = {'j_username': self.j_username, 'j_password': self.j_password, 'submit':'Login'}
# login to ScaleIO IM
r = self._im_session.post(
"{}/{}".format(self._im_api_url,"j_spring_security_check"),
verify=self._im_verify_ssl,
#headers = {'Content-Type':'application/x-www-form-urlencoded', 'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_10_1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/39.0.2171.99 Safari/537.36'},
data=payload)
self.logger.debug("Login POST response: " + "{}".format(r.text))
self._im_logged_in = True
"""
ADD CODE:
Check if this is IM have existing configuration. If so populate ScaleIO_configurtion_object
""" | python | def _login(self):
"""
LOGIN CAN ONLY BE DONE BY POSTING TO A HTTP FORM.
A COOKIE IS THEN USED FOR INTERACTING WITH THE API
"""
self.logger.debug("Logging into " + "{}/{}".format(self._im_api_url, "j_spring_security_check"))
self._im_session.headers.update({'Content-Type':'application/x-www-form-urlencoded', 'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_10_1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/39.0.2171.99 Safari/537.36'})
#self._im_session.mount('https://', TLS1Adapter())
#self._im_verify_ssl = False
self.j_username = self._username
self.j_password = self._password
requests.packages.urllib3.disable_warnings() # Disable unverified connection warning.
payload = {'j_username': self.j_username, 'j_password': self.j_password, 'submit':'Login'}
# login to ScaleIO IM
r = self._im_session.post(
"{}/{}".format(self._im_api_url,"j_spring_security_check"),
verify=self._im_verify_ssl,
#headers = {'Content-Type':'application/x-www-form-urlencoded', 'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_10_1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/39.0.2171.99 Safari/537.36'},
data=payload)
self.logger.debug("Login POST response: " + "{}".format(r.text))
self._im_logged_in = True
"""
ADD CODE:
Check if this is IM have existing configuration. If so populate ScaleIO_configurtion_object
""" | [
"def",
"_login",
"(",
"self",
")",
":",
"self",
".",
"logger",
".",
"debug",
"(",
"\"Logging into \"",
"+",
"\"{}/{}\"",
".",
"format",
"(",
"self",
".",
"_im_api_url",
",",
"\"j_spring_security_check\"",
")",
")",
"self",
".",
"_im_session",
".",
"headers",
".",
"update",
"(",
"{",
"'Content-Type'",
":",
"'application/x-www-form-urlencoded'",
",",
"'User-Agent'",
":",
"'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_10_1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/39.0.2171.99 Safari/537.36'",
"}",
")",
"#self._im_session.mount('https://', TLS1Adapter())",
"#self._im_verify_ssl = False",
"self",
".",
"j_username",
"=",
"self",
".",
"_username",
"self",
".",
"j_password",
"=",
"self",
".",
"_password",
"requests",
".",
"packages",
".",
"urllib3",
".",
"disable_warnings",
"(",
")",
"# Disable unverified connection warning.",
"payload",
"=",
"{",
"'j_username'",
":",
"self",
".",
"j_username",
",",
"'j_password'",
":",
"self",
".",
"j_password",
",",
"'submit'",
":",
"'Login'",
"}",
"# login to ScaleIO IM",
"r",
"=",
"self",
".",
"_im_session",
".",
"post",
"(",
"\"{}/{}\"",
".",
"format",
"(",
"self",
".",
"_im_api_url",
",",
"\"j_spring_security_check\"",
")",
",",
"verify",
"=",
"self",
".",
"_im_verify_ssl",
",",
"#headers = {'Content-Type':'application/x-www-form-urlencoded', 'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_10_1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/39.0.2171.99 Safari/537.36'},",
"data",
"=",
"payload",
")",
"self",
".",
"logger",
".",
"debug",
"(",
"\"Login POST response: \"",
"+",
"\"{}\"",
".",
"format",
"(",
"r",
".",
"text",
")",
")",
"self",
".",
"_im_logged_in",
"=",
"True",
"\"\"\"\n ADD CODE:\n Check if this is IM have existing configuration. If so populate ScaleIO_configurtion_object\n \"\"\""
] | LOGIN CAN ONLY BE DONE BY POSTING TO A HTTP FORM.
A COOKIE IS THEN USED FOR INTERACTING WITH THE API | [
"LOGIN",
"CAN",
"ONLY",
"BE",
"DONE",
"BY",
"POSTING",
"TO",
"A",
"HTTP",
"FORM",
".",
"A",
"COOKIE",
"IS",
"THEN",
"USED",
"FOR",
"INTERACTING",
"WITH",
"THE",
"API"
] | d043a0137cb925987fd5c895a3210968ce1d9028 | https://github.com/swevm/scaleio-py/blob/d043a0137cb925987fd5c895a3210968ce1d9028/scaleiopy/im.py#L107-L134 | train |
swevm/scaleio-py | scaleiopy/im.py | Im._do_get | def _do_get(self, uri, **kwargs):
"""
Convinient method for GET requests
Returns http request status value from a POST request
"""
#TODO:
# Add error handling. Check for HTTP status here would be much more conveinent than in each calling method
scaleioapi_get_headers = {'Content-type':'application/json','Version':'1.0'}
self.logger.debug("_do_get() " + "{}/{}".format(self._api_url,uri))
if kwargs:
for key, value in kwargs.iteritems():
if key == 'headers':
scaleio_get_headersvalue = value
try:
#response = self._im_session.get("{}/{}".format(self._api_url, uri), headers = scaleioapi_get_headers, payload = scaleio_payload).json()
response = self._im_session.get("{}/{}".format(self._api_url, uri), **kwargs).json()
#response = self._session.get(url, headers=scaleioapi_post_headers, **kwargs)
if response.status_code == requests.codes.ok:
return response
else:
raise RuntimeError("_do_get() - HTTP response error" + response.status_code)
except:
raise RuntimeError("_do_get() - Communication error with ScaleIO gateway")
return response | python | def _do_get(self, uri, **kwargs):
"""
Convinient method for GET requests
Returns http request status value from a POST request
"""
#TODO:
# Add error handling. Check for HTTP status here would be much more conveinent than in each calling method
scaleioapi_get_headers = {'Content-type':'application/json','Version':'1.0'}
self.logger.debug("_do_get() " + "{}/{}".format(self._api_url,uri))
if kwargs:
for key, value in kwargs.iteritems():
if key == 'headers':
scaleio_get_headersvalue = value
try:
#response = self._im_session.get("{}/{}".format(self._api_url, uri), headers = scaleioapi_get_headers, payload = scaleio_payload).json()
response = self._im_session.get("{}/{}".format(self._api_url, uri), **kwargs).json()
#response = self._session.get(url, headers=scaleioapi_post_headers, **kwargs)
if response.status_code == requests.codes.ok:
return response
else:
raise RuntimeError("_do_get() - HTTP response error" + response.status_code)
except:
raise RuntimeError("_do_get() - Communication error with ScaleIO gateway")
return response | [
"def",
"_do_get",
"(",
"self",
",",
"uri",
",",
"*",
"*",
"kwargs",
")",
":",
"#TODO:",
"# Add error handling. Check for HTTP status here would be much more conveinent than in each calling method",
"scaleioapi_get_headers",
"=",
"{",
"'Content-type'",
":",
"'application/json'",
",",
"'Version'",
":",
"'1.0'",
"}",
"self",
".",
"logger",
".",
"debug",
"(",
"\"_do_get() \"",
"+",
"\"{}/{}\"",
".",
"format",
"(",
"self",
".",
"_api_url",
",",
"uri",
")",
")",
"if",
"kwargs",
":",
"for",
"key",
",",
"value",
"in",
"kwargs",
".",
"iteritems",
"(",
")",
":",
"if",
"key",
"==",
"'headers'",
":",
"scaleio_get_headersvalue",
"=",
"value",
"try",
":",
"#response = self._im_session.get(\"{}/{}\".format(self._api_url, uri), headers = scaleioapi_get_headers, payload = scaleio_payload).json()",
"response",
"=",
"self",
".",
"_im_session",
".",
"get",
"(",
"\"{}/{}\"",
".",
"format",
"(",
"self",
".",
"_api_url",
",",
"uri",
")",
",",
"*",
"*",
"kwargs",
")",
".",
"json",
"(",
")",
"#response = self._session.get(url, headers=scaleioapi_post_headers, **kwargs)",
"if",
"response",
".",
"status_code",
"==",
"requests",
".",
"codes",
".",
"ok",
":",
"return",
"response",
"else",
":",
"raise",
"RuntimeError",
"(",
"\"_do_get() - HTTP response error\"",
"+",
"response",
".",
"status_code",
")",
"except",
":",
"raise",
"RuntimeError",
"(",
"\"_do_get() - Communication error with ScaleIO gateway\"",
")",
"return",
"response"
] | Convinient method for GET requests
Returns http request status value from a POST request | [
"Convinient",
"method",
"for",
"GET",
"requests",
"Returns",
"http",
"request",
"status",
"value",
"from",
"a",
"POST",
"request"
] | d043a0137cb925987fd5c895a3210968ce1d9028 | https://github.com/swevm/scaleio-py/blob/d043a0137cb925987fd5c895a3210968ce1d9028/scaleiopy/im.py#L144-L169 | train |
swevm/scaleio-py | scaleiopy/im.py | Im.uploadFileToIM | def uploadFileToIM (self, directory, filename, title):
"""
Parameters as they look in the form for uploading packages to IM
"""
self.logger.debug("uploadFileToIM(" + "{},{},{})".format(directory, filename, title))
parameters = {'data-filename-placement':'inside',
'title':str(filename),
'filename':str(filename),
'type':'file',
'name':'files',
'id':'fileToUpload',
'multiple':''
}
file_dict = {'files':(str(filename), open(directory + filename, 'rb'), 'application/x-rpm')}
m = MultipartEncoder(fields=file_dict)
temp_username = self._username
temp_password = self._password
temp_im_api_url = self._im_api_url
temp_im_session = requests.Session()
temp_im_session.mount('https://', TLS1Adapter())
temp_im_verify_ssl = self._im_verify_ssl
resp = temp_im_session.post(
"{}/{}".format(temp_im_api_url,"types/InstallationPackage/instances/uploadPackage"),
auth=HTTPBasicAuth(temp_username, temp_password),
#headers = m.content_type,
files = file_dict,
verify = False,
data = parameters
)
self.logger.info("Uploaded: " + "{}".format(filename))
self.logger.debug("HTTP Response: " + "{}".format(resp.status_code)) | python | def uploadFileToIM (self, directory, filename, title):
"""
Parameters as they look in the form for uploading packages to IM
"""
self.logger.debug("uploadFileToIM(" + "{},{},{})".format(directory, filename, title))
parameters = {'data-filename-placement':'inside',
'title':str(filename),
'filename':str(filename),
'type':'file',
'name':'files',
'id':'fileToUpload',
'multiple':''
}
file_dict = {'files':(str(filename), open(directory + filename, 'rb'), 'application/x-rpm')}
m = MultipartEncoder(fields=file_dict)
temp_username = self._username
temp_password = self._password
temp_im_api_url = self._im_api_url
temp_im_session = requests.Session()
temp_im_session.mount('https://', TLS1Adapter())
temp_im_verify_ssl = self._im_verify_ssl
resp = temp_im_session.post(
"{}/{}".format(temp_im_api_url,"types/InstallationPackage/instances/uploadPackage"),
auth=HTTPBasicAuth(temp_username, temp_password),
#headers = m.content_type,
files = file_dict,
verify = False,
data = parameters
)
self.logger.info("Uploaded: " + "{}".format(filename))
self.logger.debug("HTTP Response: " + "{}".format(resp.status_code)) | [
"def",
"uploadFileToIM",
"(",
"self",
",",
"directory",
",",
"filename",
",",
"title",
")",
":",
"self",
".",
"logger",
".",
"debug",
"(",
"\"uploadFileToIM(\"",
"+",
"\"{},{},{})\"",
".",
"format",
"(",
"directory",
",",
"filename",
",",
"title",
")",
")",
"parameters",
"=",
"{",
"'data-filename-placement'",
":",
"'inside'",
",",
"'title'",
":",
"str",
"(",
"filename",
")",
",",
"'filename'",
":",
"str",
"(",
"filename",
")",
",",
"'type'",
":",
"'file'",
",",
"'name'",
":",
"'files'",
",",
"'id'",
":",
"'fileToUpload'",
",",
"'multiple'",
":",
"''",
"}",
"file_dict",
"=",
"{",
"'files'",
":",
"(",
"str",
"(",
"filename",
")",
",",
"open",
"(",
"directory",
"+",
"filename",
",",
"'rb'",
")",
",",
"'application/x-rpm'",
")",
"}",
"m",
"=",
"MultipartEncoder",
"(",
"fields",
"=",
"file_dict",
")",
"temp_username",
"=",
"self",
".",
"_username",
"temp_password",
"=",
"self",
".",
"_password",
"temp_im_api_url",
"=",
"self",
".",
"_im_api_url",
"temp_im_session",
"=",
"requests",
".",
"Session",
"(",
")",
"temp_im_session",
".",
"mount",
"(",
"'https://'",
",",
"TLS1Adapter",
"(",
")",
")",
"temp_im_verify_ssl",
"=",
"self",
".",
"_im_verify_ssl",
"resp",
"=",
"temp_im_session",
".",
"post",
"(",
"\"{}/{}\"",
".",
"format",
"(",
"temp_im_api_url",
",",
"\"types/InstallationPackage/instances/uploadPackage\"",
")",
",",
"auth",
"=",
"HTTPBasicAuth",
"(",
"temp_username",
",",
"temp_password",
")",
",",
"#headers = m.content_type,",
"files",
"=",
"file_dict",
",",
"verify",
"=",
"False",
",",
"data",
"=",
"parameters",
")",
"self",
".",
"logger",
".",
"info",
"(",
"\"Uploaded: \"",
"+",
"\"{}\"",
".",
"format",
"(",
"filename",
")",
")",
"self",
".",
"logger",
".",
"debug",
"(",
"\"HTTP Response: \"",
"+",
"\"{}\"",
".",
"format",
"(",
"resp",
".",
"status_code",
")",
")"
] | Parameters as they look in the form for uploading packages to IM | [
"Parameters",
"as",
"they",
"look",
"in",
"the",
"form",
"for",
"uploading",
"packages",
"to",
"IM"
] | d043a0137cb925987fd5c895a3210968ce1d9028 | https://github.com/swevm/scaleio-py/blob/d043a0137cb925987fd5c895a3210968ce1d9028/scaleiopy/im.py#L670-L702 | train |
ph4r05/monero-serialize | monero_serialize/xmrrpc.py | dump_varint_t | async def dump_varint_t(writer, type_or, pv):
"""
Binary dump of the integer of given type
:param writer:
:param type_or:
:param pv:
:return:
"""
width = int_mark_to_size(type_or)
n = (pv << 2) | type_or
buffer = _UINT_BUFFER
for _ in range(width):
buffer[0] = n & 0xff
await writer.awrite(buffer)
n >>= 8
return width | python | async def dump_varint_t(writer, type_or, pv):
"""
Binary dump of the integer of given type
:param writer:
:param type_or:
:param pv:
:return:
"""
width = int_mark_to_size(type_or)
n = (pv << 2) | type_or
buffer = _UINT_BUFFER
for _ in range(width):
buffer[0] = n & 0xff
await writer.awrite(buffer)
n >>= 8
return width | [
"async",
"def",
"dump_varint_t",
"(",
"writer",
",",
"type_or",
",",
"pv",
")",
":",
"width",
"=",
"int_mark_to_size",
"(",
"type_or",
")",
"n",
"=",
"(",
"pv",
"<<",
"2",
")",
"|",
"type_or",
"buffer",
"=",
"_UINT_BUFFER",
"for",
"_",
"in",
"range",
"(",
"width",
")",
":",
"buffer",
"[",
"0",
"]",
"=",
"n",
"&",
"0xff",
"await",
"writer",
".",
"awrite",
"(",
"buffer",
")",
"n",
">>=",
"8",
"return",
"width"
] | Binary dump of the integer of given type
:param writer:
:param type_or:
:param pv:
:return: | [
"Binary",
"dump",
"of",
"the",
"integer",
"of",
"given",
"type"
] | cebb3ba2aaf2e9211b1dcc6db2bab02946d06e42 | https://github.com/ph4r05/monero-serialize/blob/cebb3ba2aaf2e9211b1dcc6db2bab02946d06e42/monero_serialize/xmrrpc.py#L210-L228 | train |
ph4r05/monero-serialize | monero_serialize/xmrrpc.py | dump_varint | async def dump_varint(writer, val):
"""
Binary dump of the variable size integer
:param writer:
:param val:
:return:
"""
if val <= 63:
return await dump_varint_t(writer, PortableRawSizeMark.BYTE, val)
elif val <= 16383:
return await dump_varint_t(writer, PortableRawSizeMark.WORD, val)
elif val <= 1073741823:
return await dump_varint_t(writer, PortableRawSizeMark.DWORD, val)
else:
if val > 4611686018427387903:
raise ValueError('Int too big')
return await dump_varint_t(writer, PortableRawSizeMark.INT64, val) | python | async def dump_varint(writer, val):
"""
Binary dump of the variable size integer
:param writer:
:param val:
:return:
"""
if val <= 63:
return await dump_varint_t(writer, PortableRawSizeMark.BYTE, val)
elif val <= 16383:
return await dump_varint_t(writer, PortableRawSizeMark.WORD, val)
elif val <= 1073741823:
return await dump_varint_t(writer, PortableRawSizeMark.DWORD, val)
else:
if val > 4611686018427387903:
raise ValueError('Int too big')
return await dump_varint_t(writer, PortableRawSizeMark.INT64, val) | [
"async",
"def",
"dump_varint",
"(",
"writer",
",",
"val",
")",
":",
"if",
"val",
"<=",
"63",
":",
"return",
"await",
"dump_varint_t",
"(",
"writer",
",",
"PortableRawSizeMark",
".",
"BYTE",
",",
"val",
")",
"elif",
"val",
"<=",
"16383",
":",
"return",
"await",
"dump_varint_t",
"(",
"writer",
",",
"PortableRawSizeMark",
".",
"WORD",
",",
"val",
")",
"elif",
"val",
"<=",
"1073741823",
":",
"return",
"await",
"dump_varint_t",
"(",
"writer",
",",
"PortableRawSizeMark",
".",
"DWORD",
",",
"val",
")",
"else",
":",
"if",
"val",
">",
"4611686018427387903",
":",
"raise",
"ValueError",
"(",
"'Int too big'",
")",
"return",
"await",
"dump_varint_t",
"(",
"writer",
",",
"PortableRawSizeMark",
".",
"INT64",
",",
"val",
")"
] | Binary dump of the variable size integer
:param writer:
:param val:
:return: | [
"Binary",
"dump",
"of",
"the",
"variable",
"size",
"integer"
] | cebb3ba2aaf2e9211b1dcc6db2bab02946d06e42 | https://github.com/ph4r05/monero-serialize/blob/cebb3ba2aaf2e9211b1dcc6db2bab02946d06e42/monero_serialize/xmrrpc.py#L231-L248 | train |
ph4r05/monero-serialize | monero_serialize/xmrrpc.py | load_varint | async def load_varint(reader):
"""
Binary load of variable size integer serialized by dump_varint
:param reader:
:return:
"""
buffer = _UINT_BUFFER
await reader.areadinto(buffer)
width = int_mark_to_size(buffer[0] & PortableRawSizeMark.MASK)
result = buffer[0]
shift = 8
for _ in range(width-1):
await reader.areadinto(buffer)
result += buffer[0] << shift
shift += 8
return result >> 2 | python | async def load_varint(reader):
"""
Binary load of variable size integer serialized by dump_varint
:param reader:
:return:
"""
buffer = _UINT_BUFFER
await reader.areadinto(buffer)
width = int_mark_to_size(buffer[0] & PortableRawSizeMark.MASK)
result = buffer[0]
shift = 8
for _ in range(width-1):
await reader.areadinto(buffer)
result += buffer[0] << shift
shift += 8
return result >> 2 | [
"async",
"def",
"load_varint",
"(",
"reader",
")",
":",
"buffer",
"=",
"_UINT_BUFFER",
"await",
"reader",
".",
"areadinto",
"(",
"buffer",
")",
"width",
"=",
"int_mark_to_size",
"(",
"buffer",
"[",
"0",
"]",
"&",
"PortableRawSizeMark",
".",
"MASK",
")",
"result",
"=",
"buffer",
"[",
"0",
"]",
"shift",
"=",
"8",
"for",
"_",
"in",
"range",
"(",
"width",
"-",
"1",
")",
":",
"await",
"reader",
".",
"areadinto",
"(",
"buffer",
")",
"result",
"+=",
"buffer",
"[",
"0",
"]",
"<<",
"shift",
"shift",
"+=",
"8",
"return",
"result",
">>",
"2"
] | Binary load of variable size integer serialized by dump_varint
:param reader:
:return: | [
"Binary",
"load",
"of",
"variable",
"size",
"integer",
"serialized",
"by",
"dump_varint"
] | cebb3ba2aaf2e9211b1dcc6db2bab02946d06e42 | https://github.com/ph4r05/monero-serialize/blob/cebb3ba2aaf2e9211b1dcc6db2bab02946d06e42/monero_serialize/xmrrpc.py#L251-L269 | train |
ph4r05/monero-serialize | monero_serialize/xmrrpc.py | dump_string | async def dump_string(writer, val):
"""
Binary string dump
:param writer:
:param val:
:return:
"""
await dump_varint(writer, len(val))
await writer.awrite(val) | python | async def dump_string(writer, val):
"""
Binary string dump
:param writer:
:param val:
:return:
"""
await dump_varint(writer, len(val))
await writer.awrite(val) | [
"async",
"def",
"dump_string",
"(",
"writer",
",",
"val",
")",
":",
"await",
"dump_varint",
"(",
"writer",
",",
"len",
"(",
"val",
")",
")",
"await",
"writer",
".",
"awrite",
"(",
"val",
")"
] | Binary string dump
:param writer:
:param val:
:return: | [
"Binary",
"string",
"dump"
] | cebb3ba2aaf2e9211b1dcc6db2bab02946d06e42 | https://github.com/ph4r05/monero-serialize/blob/cebb3ba2aaf2e9211b1dcc6db2bab02946d06e42/monero_serialize/xmrrpc.py#L272-L281 | train |
ph4r05/monero-serialize | monero_serialize/xmrrpc.py | load_string | async def load_string(reader):
"""
Loads string from binary stream
:param reader:
:return:
"""
ivalue = await load_varint(reader)
fvalue = bytearray(ivalue)
await reader.areadinto(fvalue)
return bytes(fvalue) | python | async def load_string(reader):
"""
Loads string from binary stream
:param reader:
:return:
"""
ivalue = await load_varint(reader)
fvalue = bytearray(ivalue)
await reader.areadinto(fvalue)
return bytes(fvalue) | [
"async",
"def",
"load_string",
"(",
"reader",
")",
":",
"ivalue",
"=",
"await",
"load_varint",
"(",
"reader",
")",
"fvalue",
"=",
"bytearray",
"(",
"ivalue",
")",
"await",
"reader",
".",
"areadinto",
"(",
"fvalue",
")",
"return",
"bytes",
"(",
"fvalue",
")"
] | Loads string from binary stream
:param reader:
:return: | [
"Loads",
"string",
"from",
"binary",
"stream"
] | cebb3ba2aaf2e9211b1dcc6db2bab02946d06e42 | https://github.com/ph4r05/monero-serialize/blob/cebb3ba2aaf2e9211b1dcc6db2bab02946d06e42/monero_serialize/xmrrpc.py#L284-L294 | train |
ph4r05/monero-serialize | monero_serialize/xmrrpc.py | dump_blob | async def dump_blob(writer, elem, elem_type, params=None):
"""
Dumps blob to a binary stream
:param writer:
:param elem:
:param elem_type:
:param params:
:return:
"""
elem_is_blob = isinstance(elem, x.BlobType)
data = bytes(getattr(elem, x.BlobType.DATA_ATTR) if elem_is_blob else elem)
await dump_varint(writer, len(elem))
await writer.awrite(data) | python | async def dump_blob(writer, elem, elem_type, params=None):
"""
Dumps blob to a binary stream
:param writer:
:param elem:
:param elem_type:
:param params:
:return:
"""
elem_is_blob = isinstance(elem, x.BlobType)
data = bytes(getattr(elem, x.BlobType.DATA_ATTR) if elem_is_blob else elem)
await dump_varint(writer, len(elem))
await writer.awrite(data) | [
"async",
"def",
"dump_blob",
"(",
"writer",
",",
"elem",
",",
"elem_type",
",",
"params",
"=",
"None",
")",
":",
"elem_is_blob",
"=",
"isinstance",
"(",
"elem",
",",
"x",
".",
"BlobType",
")",
"data",
"=",
"bytes",
"(",
"getattr",
"(",
"elem",
",",
"x",
".",
"BlobType",
".",
"DATA_ATTR",
")",
"if",
"elem_is_blob",
"else",
"elem",
")",
"await",
"dump_varint",
"(",
"writer",
",",
"len",
"(",
"elem",
")",
")",
"await",
"writer",
".",
"awrite",
"(",
"data",
")"
] | Dumps blob to a binary stream
:param writer:
:param elem:
:param elem_type:
:param params:
:return: | [
"Dumps",
"blob",
"to",
"a",
"binary",
"stream"
] | cebb3ba2aaf2e9211b1dcc6db2bab02946d06e42 | https://github.com/ph4r05/monero-serialize/blob/cebb3ba2aaf2e9211b1dcc6db2bab02946d06e42/monero_serialize/xmrrpc.py#L297-L310 | train |
ph4r05/monero-serialize | monero_serialize/xmrrpc.py | Blobber.container_load | async def container_load(self, container_type, params=None, container=None, obj=None):
"""
Loads container of elements from the reader. Supports the container ref.
Returns loaded container.
Blob array writer as in XMRRPC is serialized without size serialization.
:param container_type:
:param params:
:param container:
:param obj:
:return:
"""
elem_type = x.container_elem_type(container_type, params)
elem_size = await self.get_element_size(elem_type=elem_type, params=params)
# If container is of fixed size we know the size to load from the input.
# Otherwise we have to read to the end
data_left = len(self.iobj.buffer)
c_len = container_type.SIZE
if not container_type.FIX_SIZE:
if data_left == 0:
return None
if data_left % elem_size != 0:
raise helpers.ArchiveException('Container size mod elem size not 0')
c_len = data_left // elem_size
res = container if container else []
for i in range(c_len):
try:
self.tracker.push_index(i)
fvalue = await self._load_field(elem_type,
params[1:] if params else None,
x.eref(res, i) if container else None)
self.tracker.pop()
except Exception as e:
raise helpers.ArchiveException(e, tracker=self.tracker) from e
if not container:
res.append(fvalue)
return res | python | async def container_load(self, container_type, params=None, container=None, obj=None):
"""
Loads container of elements from the reader. Supports the container ref.
Returns loaded container.
Blob array writer as in XMRRPC is serialized without size serialization.
:param container_type:
:param params:
:param container:
:param obj:
:return:
"""
elem_type = x.container_elem_type(container_type, params)
elem_size = await self.get_element_size(elem_type=elem_type, params=params)
# If container is of fixed size we know the size to load from the input.
# Otherwise we have to read to the end
data_left = len(self.iobj.buffer)
c_len = container_type.SIZE
if not container_type.FIX_SIZE:
if data_left == 0:
return None
if data_left % elem_size != 0:
raise helpers.ArchiveException('Container size mod elem size not 0')
c_len = data_left // elem_size
res = container if container else []
for i in range(c_len):
try:
self.tracker.push_index(i)
fvalue = await self._load_field(elem_type,
params[1:] if params else None,
x.eref(res, i) if container else None)
self.tracker.pop()
except Exception as e:
raise helpers.ArchiveException(e, tracker=self.tracker) from e
if not container:
res.append(fvalue)
return res | [
"async",
"def",
"container_load",
"(",
"self",
",",
"container_type",
",",
"params",
"=",
"None",
",",
"container",
"=",
"None",
",",
"obj",
"=",
"None",
")",
":",
"elem_type",
"=",
"x",
".",
"container_elem_type",
"(",
"container_type",
",",
"params",
")",
"elem_size",
"=",
"await",
"self",
".",
"get_element_size",
"(",
"elem_type",
"=",
"elem_type",
",",
"params",
"=",
"params",
")",
"# If container is of fixed size we know the size to load from the input.",
"# Otherwise we have to read to the end",
"data_left",
"=",
"len",
"(",
"self",
".",
"iobj",
".",
"buffer",
")",
"c_len",
"=",
"container_type",
".",
"SIZE",
"if",
"not",
"container_type",
".",
"FIX_SIZE",
":",
"if",
"data_left",
"==",
"0",
":",
"return",
"None",
"if",
"data_left",
"%",
"elem_size",
"!=",
"0",
":",
"raise",
"helpers",
".",
"ArchiveException",
"(",
"'Container size mod elem size not 0'",
")",
"c_len",
"=",
"data_left",
"//",
"elem_size",
"res",
"=",
"container",
"if",
"container",
"else",
"[",
"]",
"for",
"i",
"in",
"range",
"(",
"c_len",
")",
":",
"try",
":",
"self",
".",
"tracker",
".",
"push_index",
"(",
"i",
")",
"fvalue",
"=",
"await",
"self",
".",
"_load_field",
"(",
"elem_type",
",",
"params",
"[",
"1",
":",
"]",
"if",
"params",
"else",
"None",
",",
"x",
".",
"eref",
"(",
"res",
",",
"i",
")",
"if",
"container",
"else",
"None",
")",
"self",
".",
"tracker",
".",
"pop",
"(",
")",
"except",
"Exception",
"as",
"e",
":",
"raise",
"helpers",
".",
"ArchiveException",
"(",
"e",
",",
"tracker",
"=",
"self",
".",
"tracker",
")",
"from",
"e",
"if",
"not",
"container",
":",
"res",
".",
"append",
"(",
"fvalue",
")",
"return",
"res"
] | Loads container of elements from the reader. Supports the container ref.
Returns loaded container.
Blob array writer as in XMRRPC is serialized without size serialization.
:param container_type:
:param params:
:param container:
:param obj:
:return: | [
"Loads",
"container",
"of",
"elements",
"from",
"the",
"reader",
".",
"Supports",
"the",
"container",
"ref",
".",
"Returns",
"loaded",
"container",
".",
"Blob",
"array",
"writer",
"as",
"in",
"XMRRPC",
"is",
"serialized",
"without",
"size",
"serialization",
"."
] | cebb3ba2aaf2e9211b1dcc6db2bab02946d06e42 | https://github.com/ph4r05/monero-serialize/blob/cebb3ba2aaf2e9211b1dcc6db2bab02946d06e42/monero_serialize/xmrrpc.py#L682-L721 | train |
jenisys/parse_type | bin/make_localpi.py | make_index_for | def make_index_for(package, index_dir, verbose=True):
"""
Create an 'index.html' for one package.
:param package: Package object to use.
:param index_dir: Where 'index.html' should be created.
"""
index_template = """\
<html>
<head><title>{title}</title></head>
<body>
<h1>{title}</h1>
<ul>
{packages}
</ul>
</body>
</html>
"""
item_template = '<li><a href="{1}">{0}</a></li>'
index_filename = os.path.join(index_dir, "index.html")
if not os.path.isdir(index_dir):
os.makedirs(index_dir)
parts = []
for pkg_filename in package.files:
pkg_name = os.path.basename(pkg_filename)
if pkg_name == "index.html":
# -- ROOT-INDEX:
pkg_name = os.path.basename(os.path.dirname(pkg_filename))
else:
pkg_name = package.splitext(pkg_name)
pkg_relpath_to = os.path.relpath(pkg_filename, index_dir)
parts.append(item_template.format(pkg_name, pkg_relpath_to))
if not parts:
print("OOPS: Package %s has no files" % package.name)
return
if verbose:
root_index = not Package.isa(package.files[0])
if root_index:
info = "with %d package(s)" % len(package.files)
else:
package_versions = sorted(set(package.versions))
info = ", ".join(reversed(package_versions))
message = "%-30s %s" % (package.name, info)
print(message)
with open(index_filename, "w") as f:
packages = "\n".join(parts)
text = index_template.format(title=package.name, packages=packages)
f.write(text.strip())
f.close() | python | def make_index_for(package, index_dir, verbose=True):
"""
Create an 'index.html' for one package.
:param package: Package object to use.
:param index_dir: Where 'index.html' should be created.
"""
index_template = """\
<html>
<head><title>{title}</title></head>
<body>
<h1>{title}</h1>
<ul>
{packages}
</ul>
</body>
</html>
"""
item_template = '<li><a href="{1}">{0}</a></li>'
index_filename = os.path.join(index_dir, "index.html")
if not os.path.isdir(index_dir):
os.makedirs(index_dir)
parts = []
for pkg_filename in package.files:
pkg_name = os.path.basename(pkg_filename)
if pkg_name == "index.html":
# -- ROOT-INDEX:
pkg_name = os.path.basename(os.path.dirname(pkg_filename))
else:
pkg_name = package.splitext(pkg_name)
pkg_relpath_to = os.path.relpath(pkg_filename, index_dir)
parts.append(item_template.format(pkg_name, pkg_relpath_to))
if not parts:
print("OOPS: Package %s has no files" % package.name)
return
if verbose:
root_index = not Package.isa(package.files[0])
if root_index:
info = "with %d package(s)" % len(package.files)
else:
package_versions = sorted(set(package.versions))
info = ", ".join(reversed(package_versions))
message = "%-30s %s" % (package.name, info)
print(message)
with open(index_filename, "w") as f:
packages = "\n".join(parts)
text = index_template.format(title=package.name, packages=packages)
f.write(text.strip())
f.close() | [
"def",
"make_index_for",
"(",
"package",
",",
"index_dir",
",",
"verbose",
"=",
"True",
")",
":",
"index_template",
"=",
"\"\"\"\\\n<html>\n<head><title>{title}</title></head>\n<body>\n<h1>{title}</h1>\n<ul>\n{packages}\n</ul>\n</body>\n</html>\n\"\"\"",
"item_template",
"=",
"'<li><a href=\"{1}\">{0}</a></li>'",
"index_filename",
"=",
"os",
".",
"path",
".",
"join",
"(",
"index_dir",
",",
"\"index.html\"",
")",
"if",
"not",
"os",
".",
"path",
".",
"isdir",
"(",
"index_dir",
")",
":",
"os",
".",
"makedirs",
"(",
"index_dir",
")",
"parts",
"=",
"[",
"]",
"for",
"pkg_filename",
"in",
"package",
".",
"files",
":",
"pkg_name",
"=",
"os",
".",
"path",
".",
"basename",
"(",
"pkg_filename",
")",
"if",
"pkg_name",
"==",
"\"index.html\"",
":",
"# -- ROOT-INDEX:",
"pkg_name",
"=",
"os",
".",
"path",
".",
"basename",
"(",
"os",
".",
"path",
".",
"dirname",
"(",
"pkg_filename",
")",
")",
"else",
":",
"pkg_name",
"=",
"package",
".",
"splitext",
"(",
"pkg_name",
")",
"pkg_relpath_to",
"=",
"os",
".",
"path",
".",
"relpath",
"(",
"pkg_filename",
",",
"index_dir",
")",
"parts",
".",
"append",
"(",
"item_template",
".",
"format",
"(",
"pkg_name",
",",
"pkg_relpath_to",
")",
")",
"if",
"not",
"parts",
":",
"print",
"(",
"\"OOPS: Package %s has no files\"",
"%",
"package",
".",
"name",
")",
"return",
"if",
"verbose",
":",
"root_index",
"=",
"not",
"Package",
".",
"isa",
"(",
"package",
".",
"files",
"[",
"0",
"]",
")",
"if",
"root_index",
":",
"info",
"=",
"\"with %d package(s)\"",
"%",
"len",
"(",
"package",
".",
"files",
")",
"else",
":",
"package_versions",
"=",
"sorted",
"(",
"set",
"(",
"package",
".",
"versions",
")",
")",
"info",
"=",
"\", \"",
".",
"join",
"(",
"reversed",
"(",
"package_versions",
")",
")",
"message",
"=",
"\"%-30s %s\"",
"%",
"(",
"package",
".",
"name",
",",
"info",
")",
"print",
"(",
"message",
")",
"with",
"open",
"(",
"index_filename",
",",
"\"w\"",
")",
"as",
"f",
":",
"packages",
"=",
"\"\\n\"",
".",
"join",
"(",
"parts",
")",
"text",
"=",
"index_template",
".",
"format",
"(",
"title",
"=",
"package",
".",
"name",
",",
"packages",
"=",
"packages",
")",
"f",
".",
"write",
"(",
"text",
".",
"strip",
"(",
")",
")",
"f",
".",
"close",
"(",
")"
] | Create an 'index.html' for one package.
:param package: Package object to use.
:param index_dir: Where 'index.html' should be created. | [
"Create",
"an",
"index",
".",
"html",
"for",
"one",
"package",
"."
] | 7cad3a67a5ca725cb786da31f656fd473084289f | https://github.com/jenisys/parse_type/blob/7cad3a67a5ca725cb786da31f656fd473084289f/bin/make_localpi.py#L118-L170 | train |
jenisys/parse_type | bin/make_localpi.py | make_package_index | def make_package_index(download_dir):
"""
Create a pypi server like file structure below download directory.
:param download_dir: Download directory with packages.
EXAMPLE BEFORE:
+-- downloads/
+-- alice-1.0.zip
+-- alice-1.0.tar.gz
+-- bob-1.3.0.tar.gz
+-- bob-1.4.2.tar.gz
+-- charly-1.0.tar.bz2
EXAMPLE AFTERWARDS:
+-- downloads/
+-- simple/
| +-- alice/index.html --> ../../alice-*.*
| +-- bob/index.html --> ../../bob-*.*
| +-- charly/index.html --> ../../charly-*.*
| +-- index.html --> alice/index.html, bob/index.html, ...
+-- alice-1.0.zip
+-- alice-1.0.tar.gz
+-- bob-1.3.0.tar.gz
+-- bob-1.4.2.tar.gz
+-- charly-1.0.tar.bz2
"""
if not os.path.isdir(download_dir):
raise ValueError("No such directory: %r" % download_dir)
pkg_rootdir = os.path.join(download_dir, "simple")
if os.path.isdir(pkg_rootdir):
shutil.rmtree(pkg_rootdir, ignore_errors=True)
os.mkdir(pkg_rootdir)
# -- STEP: Collect all packages.
package_map = {}
packages = []
for filename in sorted(os.listdir(download_dir)):
if not Package.isa(filename):
continue
pkg_filepath = os.path.join(download_dir, filename)
package_name = Package.get_pkgname(pkg_filepath)
package = package_map.get(package_name, None)
if not package:
# -- NEW PACKAGE DETECTED: Store/register package.
package = Package(pkg_filepath)
package_map[package.name] = package
packages.append(package)
else:
# -- SAME PACKAGE: Collect other variant/version.
package.files.append(pkg_filepath)
# -- STEP: Make local PYTHON PACKAGE INDEX.
root_package = Package(None, "Python Package Index")
root_package.files = [ os.path.join(pkg_rootdir, pkg.name, "index.html")
for pkg in packages ]
make_index_for(root_package, pkg_rootdir)
for package in packages:
index_dir = os.path.join(pkg_rootdir, package.name)
make_index_for(package, index_dir) | python | def make_package_index(download_dir):
"""
Create a pypi server like file structure below download directory.
:param download_dir: Download directory with packages.
EXAMPLE BEFORE:
+-- downloads/
+-- alice-1.0.zip
+-- alice-1.0.tar.gz
+-- bob-1.3.0.tar.gz
+-- bob-1.4.2.tar.gz
+-- charly-1.0.tar.bz2
EXAMPLE AFTERWARDS:
+-- downloads/
+-- simple/
| +-- alice/index.html --> ../../alice-*.*
| +-- bob/index.html --> ../../bob-*.*
| +-- charly/index.html --> ../../charly-*.*
| +-- index.html --> alice/index.html, bob/index.html, ...
+-- alice-1.0.zip
+-- alice-1.0.tar.gz
+-- bob-1.3.0.tar.gz
+-- bob-1.4.2.tar.gz
+-- charly-1.0.tar.bz2
"""
if not os.path.isdir(download_dir):
raise ValueError("No such directory: %r" % download_dir)
pkg_rootdir = os.path.join(download_dir, "simple")
if os.path.isdir(pkg_rootdir):
shutil.rmtree(pkg_rootdir, ignore_errors=True)
os.mkdir(pkg_rootdir)
# -- STEP: Collect all packages.
package_map = {}
packages = []
for filename in sorted(os.listdir(download_dir)):
if not Package.isa(filename):
continue
pkg_filepath = os.path.join(download_dir, filename)
package_name = Package.get_pkgname(pkg_filepath)
package = package_map.get(package_name, None)
if not package:
# -- NEW PACKAGE DETECTED: Store/register package.
package = Package(pkg_filepath)
package_map[package.name] = package
packages.append(package)
else:
# -- SAME PACKAGE: Collect other variant/version.
package.files.append(pkg_filepath)
# -- STEP: Make local PYTHON PACKAGE INDEX.
root_package = Package(None, "Python Package Index")
root_package.files = [ os.path.join(pkg_rootdir, pkg.name, "index.html")
for pkg in packages ]
make_index_for(root_package, pkg_rootdir)
for package in packages:
index_dir = os.path.join(pkg_rootdir, package.name)
make_index_for(package, index_dir) | [
"def",
"make_package_index",
"(",
"download_dir",
")",
":",
"if",
"not",
"os",
".",
"path",
".",
"isdir",
"(",
"download_dir",
")",
":",
"raise",
"ValueError",
"(",
"\"No such directory: %r\"",
"%",
"download_dir",
")",
"pkg_rootdir",
"=",
"os",
".",
"path",
".",
"join",
"(",
"download_dir",
",",
"\"simple\"",
")",
"if",
"os",
".",
"path",
".",
"isdir",
"(",
"pkg_rootdir",
")",
":",
"shutil",
".",
"rmtree",
"(",
"pkg_rootdir",
",",
"ignore_errors",
"=",
"True",
")",
"os",
".",
"mkdir",
"(",
"pkg_rootdir",
")",
"# -- STEP: Collect all packages.",
"package_map",
"=",
"{",
"}",
"packages",
"=",
"[",
"]",
"for",
"filename",
"in",
"sorted",
"(",
"os",
".",
"listdir",
"(",
"download_dir",
")",
")",
":",
"if",
"not",
"Package",
".",
"isa",
"(",
"filename",
")",
":",
"continue",
"pkg_filepath",
"=",
"os",
".",
"path",
".",
"join",
"(",
"download_dir",
",",
"filename",
")",
"package_name",
"=",
"Package",
".",
"get_pkgname",
"(",
"pkg_filepath",
")",
"package",
"=",
"package_map",
".",
"get",
"(",
"package_name",
",",
"None",
")",
"if",
"not",
"package",
":",
"# -- NEW PACKAGE DETECTED: Store/register package.",
"package",
"=",
"Package",
"(",
"pkg_filepath",
")",
"package_map",
"[",
"package",
".",
"name",
"]",
"=",
"package",
"packages",
".",
"append",
"(",
"package",
")",
"else",
":",
"# -- SAME PACKAGE: Collect other variant/version.",
"package",
".",
"files",
".",
"append",
"(",
"pkg_filepath",
")",
"# -- STEP: Make local PYTHON PACKAGE INDEX.",
"root_package",
"=",
"Package",
"(",
"None",
",",
"\"Python Package Index\"",
")",
"root_package",
".",
"files",
"=",
"[",
"os",
".",
"path",
".",
"join",
"(",
"pkg_rootdir",
",",
"pkg",
".",
"name",
",",
"\"index.html\"",
")",
"for",
"pkg",
"in",
"packages",
"]",
"make_index_for",
"(",
"root_package",
",",
"pkg_rootdir",
")",
"for",
"package",
"in",
"packages",
":",
"index_dir",
"=",
"os",
".",
"path",
".",
"join",
"(",
"pkg_rootdir",
",",
"package",
".",
"name",
")",
"make_index_for",
"(",
"package",
",",
"index_dir",
")"
] | Create a pypi server like file structure below download directory.
:param download_dir: Download directory with packages.
EXAMPLE BEFORE:
+-- downloads/
+-- alice-1.0.zip
+-- alice-1.0.tar.gz
+-- bob-1.3.0.tar.gz
+-- bob-1.4.2.tar.gz
+-- charly-1.0.tar.bz2
EXAMPLE AFTERWARDS:
+-- downloads/
+-- simple/
| +-- alice/index.html --> ../../alice-*.*
| +-- bob/index.html --> ../../bob-*.*
| +-- charly/index.html --> ../../charly-*.*
| +-- index.html --> alice/index.html, bob/index.html, ...
+-- alice-1.0.zip
+-- alice-1.0.tar.gz
+-- bob-1.3.0.tar.gz
+-- bob-1.4.2.tar.gz
+-- charly-1.0.tar.bz2 | [
"Create",
"a",
"pypi",
"server",
"like",
"file",
"structure",
"below",
"download",
"directory",
"."
] | 7cad3a67a5ca725cb786da31f656fd473084289f | https://github.com/jenisys/parse_type/blob/7cad3a67a5ca725cb786da31f656fd473084289f/bin/make_localpi.py#L173-L233 | train |
dbarsam/python-vsgen | vsgen/util/config.py | VSGConfigParser._convert_to_list | def _convert_to_list(self, value, delimiters):
"""
Return a list value translating from other types if necessary.
:param str value: The value to convert.
"""
if not value:
return []
if delimiters:
return [l.strip() for l in value.split(delimiters)]
return [l.strip() for l in value.split()] | python | def _convert_to_list(self, value, delimiters):
"""
Return a list value translating from other types if necessary.
:param str value: The value to convert.
"""
if not value:
return []
if delimiters:
return [l.strip() for l in value.split(delimiters)]
return [l.strip() for l in value.split()] | [
"def",
"_convert_to_list",
"(",
"self",
",",
"value",
",",
"delimiters",
")",
":",
"if",
"not",
"value",
":",
"return",
"[",
"]",
"if",
"delimiters",
":",
"return",
"[",
"l",
".",
"strip",
"(",
")",
"for",
"l",
"in",
"value",
".",
"split",
"(",
"delimiters",
")",
"]",
"return",
"[",
"l",
".",
"strip",
"(",
")",
"for",
"l",
"in",
"value",
".",
"split",
"(",
")",
"]"
] | Return a list value translating from other types if necessary.
:param str value: The value to convert. | [
"Return",
"a",
"list",
"value",
"translating",
"from",
"other",
"types",
"if",
"necessary",
"."
] | 640191bb018a1ff7d7b7a4982e0d3c1a423ba878 | https://github.com/dbarsam/python-vsgen/blob/640191bb018a1ff7d7b7a4982e0d3c1a423ba878/vsgen/util/config.py#L30-L40 | train |
dbarsam/python-vsgen | vsgen/util/config.py | VSGConfigParser.getlist | def getlist(self, section, option, raw=False, vars=None, fallback=[], delimiters=','):
"""
A convenience method which coerces the option in the specified section to a list of strings.
"""
v = self.get(section, option, raw=raw, vars=vars, fallback=fallback)
return self._convert_to_list(v, delimiters=delimiters) | python | def getlist(self, section, option, raw=False, vars=None, fallback=[], delimiters=','):
"""
A convenience method which coerces the option in the specified section to a list of strings.
"""
v = self.get(section, option, raw=raw, vars=vars, fallback=fallback)
return self._convert_to_list(v, delimiters=delimiters) | [
"def",
"getlist",
"(",
"self",
",",
"section",
",",
"option",
",",
"raw",
"=",
"False",
",",
"vars",
"=",
"None",
",",
"fallback",
"=",
"[",
"]",
",",
"delimiters",
"=",
"','",
")",
":",
"v",
"=",
"self",
".",
"get",
"(",
"section",
",",
"option",
",",
"raw",
"=",
"raw",
",",
"vars",
"=",
"vars",
",",
"fallback",
"=",
"fallback",
")",
"return",
"self",
".",
"_convert_to_list",
"(",
"v",
",",
"delimiters",
"=",
"delimiters",
")"
] | A convenience method which coerces the option in the specified section to a list of strings. | [
"A",
"convenience",
"method",
"which",
"coerces",
"the",
"option",
"in",
"the",
"specified",
"section",
"to",
"a",
"list",
"of",
"strings",
"."
] | 640191bb018a1ff7d7b7a4982e0d3c1a423ba878 | https://github.com/dbarsam/python-vsgen/blob/640191bb018a1ff7d7b7a4982e0d3c1a423ba878/vsgen/util/config.py#L50-L55 | train |
dbarsam/python-vsgen | vsgen/util/config.py | VSGConfigParser.getfile | def getfile(self, section, option, raw=False, vars=None, fallback="", validate=False):
"""
A convenience method which coerces the option in the specified section to a file.
"""
v = self.get(section, option, raw=raw, vars=vars, fallback=fallback)
v = self._convert_to_path(v)
return v if not validate or os.path.isfile(v) else fallback | python | def getfile(self, section, option, raw=False, vars=None, fallback="", validate=False):
"""
A convenience method which coerces the option in the specified section to a file.
"""
v = self.get(section, option, raw=raw, vars=vars, fallback=fallback)
v = self._convert_to_path(v)
return v if not validate or os.path.isfile(v) else fallback | [
"def",
"getfile",
"(",
"self",
",",
"section",
",",
"option",
",",
"raw",
"=",
"False",
",",
"vars",
"=",
"None",
",",
"fallback",
"=",
"\"\"",
",",
"validate",
"=",
"False",
")",
":",
"v",
"=",
"self",
".",
"get",
"(",
"section",
",",
"option",
",",
"raw",
"=",
"raw",
",",
"vars",
"=",
"vars",
",",
"fallback",
"=",
"fallback",
")",
"v",
"=",
"self",
".",
"_convert_to_path",
"(",
"v",
")",
"return",
"v",
"if",
"not",
"validate",
"or",
"os",
".",
"path",
".",
"isfile",
"(",
"v",
")",
"else",
"fallback"
] | A convenience method which coerces the option in the specified section to a file. | [
"A",
"convenience",
"method",
"which",
"coerces",
"the",
"option",
"in",
"the",
"specified",
"section",
"to",
"a",
"file",
"."
] | 640191bb018a1ff7d7b7a4982e0d3c1a423ba878 | https://github.com/dbarsam/python-vsgen/blob/640191bb018a1ff7d7b7a4982e0d3c1a423ba878/vsgen/util/config.py#L57-L63 | train |
dbarsam/python-vsgen | vsgen/util/config.py | VSGConfigParser.getdir | def getdir(self, section, option, raw=False, vars=None, fallback="", validate=False):
"""
A convenience method which coerces the option in the specified section to a directory.
"""
v = self.get(section, option, raw=raw, vars=vars, fallback=fallback)
v = self._convert_to_path(v)
return v if not validate or os.path.isdir(v) else fallback | python | def getdir(self, section, option, raw=False, vars=None, fallback="", validate=False):
"""
A convenience method which coerces the option in the specified section to a directory.
"""
v = self.get(section, option, raw=raw, vars=vars, fallback=fallback)
v = self._convert_to_path(v)
return v if not validate or os.path.isdir(v) else fallback | [
"def",
"getdir",
"(",
"self",
",",
"section",
",",
"option",
",",
"raw",
"=",
"False",
",",
"vars",
"=",
"None",
",",
"fallback",
"=",
"\"\"",
",",
"validate",
"=",
"False",
")",
":",
"v",
"=",
"self",
".",
"get",
"(",
"section",
",",
"option",
",",
"raw",
"=",
"raw",
",",
"vars",
"=",
"vars",
",",
"fallback",
"=",
"fallback",
")",
"v",
"=",
"self",
".",
"_convert_to_path",
"(",
"v",
")",
"return",
"v",
"if",
"not",
"validate",
"or",
"os",
".",
"path",
".",
"isdir",
"(",
"v",
")",
"else",
"fallback"
] | A convenience method which coerces the option in the specified section to a directory. | [
"A",
"convenience",
"method",
"which",
"coerces",
"the",
"option",
"in",
"the",
"specified",
"section",
"to",
"a",
"directory",
"."
] | 640191bb018a1ff7d7b7a4982e0d3c1a423ba878 | https://github.com/dbarsam/python-vsgen/blob/640191bb018a1ff7d7b7a4982e0d3c1a423ba878/vsgen/util/config.py#L65-L71 | train |
dbarsam/python-vsgen | vsgen/util/config.py | VSGConfigParser.getdirs | def getdirs(self, section, option, raw=False, vars=None, fallback=[]):
"""
A convenience method which coerces the option in the specified section to a list of directories.
"""
globs = self.getlist(section, option, fallback=[])
return [f for g in globs for f in glob.glob(g) if os.path.isdir(f)] | python | def getdirs(self, section, option, raw=False, vars=None, fallback=[]):
"""
A convenience method which coerces the option in the specified section to a list of directories.
"""
globs = self.getlist(section, option, fallback=[])
return [f for g in globs for f in glob.glob(g) if os.path.isdir(f)] | [
"def",
"getdirs",
"(",
"self",
",",
"section",
",",
"option",
",",
"raw",
"=",
"False",
",",
"vars",
"=",
"None",
",",
"fallback",
"=",
"[",
"]",
")",
":",
"globs",
"=",
"self",
".",
"getlist",
"(",
"section",
",",
"option",
",",
"fallback",
"=",
"[",
"]",
")",
"return",
"[",
"f",
"for",
"g",
"in",
"globs",
"for",
"f",
"in",
"glob",
".",
"glob",
"(",
"g",
")",
"if",
"os",
".",
"path",
".",
"isdir",
"(",
"f",
")",
"]"
] | A convenience method which coerces the option in the specified section to a list of directories. | [
"A",
"convenience",
"method",
"which",
"coerces",
"the",
"option",
"in",
"the",
"specified",
"section",
"to",
"a",
"list",
"of",
"directories",
"."
] | 640191bb018a1ff7d7b7a4982e0d3c1a423ba878 | https://github.com/dbarsam/python-vsgen/blob/640191bb018a1ff7d7b7a4982e0d3c1a423ba878/vsgen/util/config.py#L73-L78 | train |
useblocks/groundwork | groundwork/patterns/gw_documents_pattern.py | DocumentsListPlugin.register | def register(self, name, content, description=None):
"""
Register a new document.
:param content: Content of this document. Jinja and rst are supported.
:type content: str
:param name: Unique name of the document for documentation purposes.
:param description: Short description of this document
"""
return self.__app.documents.register(name, content, self._plugin, description) | python | def register(self, name, content, description=None):
"""
Register a new document.
:param content: Content of this document. Jinja and rst are supported.
:type content: str
:param name: Unique name of the document for documentation purposes.
:param description: Short description of this document
"""
return self.__app.documents.register(name, content, self._plugin, description) | [
"def",
"register",
"(",
"self",
",",
"name",
",",
"content",
",",
"description",
"=",
"None",
")",
":",
"return",
"self",
".",
"__app",
".",
"documents",
".",
"register",
"(",
"name",
",",
"content",
",",
"self",
".",
"_plugin",
",",
"description",
")"
] | Register a new document.
:param content: Content of this document. Jinja and rst are supported.
:type content: str
:param name: Unique name of the document for documentation purposes.
:param description: Short description of this document | [
"Register",
"a",
"new",
"document",
"."
] | d34fce43f54246ca4db0f7b89e450dcdc847c68c | https://github.com/useblocks/groundwork/blob/d34fce43f54246ca4db0f7b89e450dcdc847c68c/groundwork/patterns/gw_documents_pattern.py#L66-L75 | train |
useblocks/groundwork | groundwork/patterns/gw_documents_pattern.py | DocumentsListApplication.unregister | def unregister(self, document):
"""
Unregisters an existing document, so that this document is no longer available.
This function is mainly used during plugin deactivation.
:param document: Name of the document
"""
if document not in self.documents.keys():
self.log.warning("Can not unregister document %s" % document)
else:
del (self.documents[document])
self.__log.debug("Document %s got unregistered" % document) | python | def unregister(self, document):
"""
Unregisters an existing document, so that this document is no longer available.
This function is mainly used during plugin deactivation.
:param document: Name of the document
"""
if document not in self.documents.keys():
self.log.warning("Can not unregister document %s" % document)
else:
del (self.documents[document])
self.__log.debug("Document %s got unregistered" % document) | [
"def",
"unregister",
"(",
"self",
",",
"document",
")",
":",
"if",
"document",
"not",
"in",
"self",
".",
"documents",
".",
"keys",
"(",
")",
":",
"self",
".",
"log",
".",
"warning",
"(",
"\"Can not unregister document %s\"",
"%",
"document",
")",
"else",
":",
"del",
"(",
"self",
".",
"documents",
"[",
"document",
"]",
")",
"self",
".",
"__log",
".",
"debug",
"(",
"\"Document %s got unregistered\"",
"%",
"document",
")"
] | Unregisters an existing document, so that this document is no longer available.
This function is mainly used during plugin deactivation.
:param document: Name of the document | [
"Unregisters",
"an",
"existing",
"document",
"so",
"that",
"this",
"document",
"is",
"no",
"longer",
"available",
"."
] | d34fce43f54246ca4db0f7b89e450dcdc847c68c | https://github.com/useblocks/groundwork/blob/d34fce43f54246ca4db0f7b89e450dcdc847c68c/groundwork/patterns/gw_documents_pattern.py#L116-L128 | train |
useblocks/groundwork | groundwork/patterns/gw_documents_pattern.py | DocumentsListApplication.get | def get(self, document=None, plugin=None):
"""
Get one or more documents.
:param document: Name of the document
:type document: str
:param plugin: Plugin object, under which the document was registered
:type plugin: GwBasePattern
"""
if plugin is not None:
if document is None:
documents_list = {}
for key in self.documents.keys():
if self.documents[key].plugin == plugin:
documents_list[key] = self.documents[key]
return documents_list
else:
if document in self.documents.keys():
if self.documents[document].plugin == plugin:
return self.documents[document]
else:
return None
else:
return None
else:
if document is None:
return self.documents
else:
if document in self.documents.keys():
return self.documents[document]
else:
return None | python | def get(self, document=None, plugin=None):
"""
Get one or more documents.
:param document: Name of the document
:type document: str
:param plugin: Plugin object, under which the document was registered
:type plugin: GwBasePattern
"""
if plugin is not None:
if document is None:
documents_list = {}
for key in self.documents.keys():
if self.documents[key].plugin == plugin:
documents_list[key] = self.documents[key]
return documents_list
else:
if document in self.documents.keys():
if self.documents[document].plugin == plugin:
return self.documents[document]
else:
return None
else:
return None
else:
if document is None:
return self.documents
else:
if document in self.documents.keys():
return self.documents[document]
else:
return None | [
"def",
"get",
"(",
"self",
",",
"document",
"=",
"None",
",",
"plugin",
"=",
"None",
")",
":",
"if",
"plugin",
"is",
"not",
"None",
":",
"if",
"document",
"is",
"None",
":",
"documents_list",
"=",
"{",
"}",
"for",
"key",
"in",
"self",
".",
"documents",
".",
"keys",
"(",
")",
":",
"if",
"self",
".",
"documents",
"[",
"key",
"]",
".",
"plugin",
"==",
"plugin",
":",
"documents_list",
"[",
"key",
"]",
"=",
"self",
".",
"documents",
"[",
"key",
"]",
"return",
"documents_list",
"else",
":",
"if",
"document",
"in",
"self",
".",
"documents",
".",
"keys",
"(",
")",
":",
"if",
"self",
".",
"documents",
"[",
"document",
"]",
".",
"plugin",
"==",
"plugin",
":",
"return",
"self",
".",
"documents",
"[",
"document",
"]",
"else",
":",
"return",
"None",
"else",
":",
"return",
"None",
"else",
":",
"if",
"document",
"is",
"None",
":",
"return",
"self",
".",
"documents",
"else",
":",
"if",
"document",
"in",
"self",
".",
"documents",
".",
"keys",
"(",
")",
":",
"return",
"self",
".",
"documents",
"[",
"document",
"]",
"else",
":",
"return",
"None"
] | Get one or more documents.
:param document: Name of the document
:type document: str
:param plugin: Plugin object, under which the document was registered
:type plugin: GwBasePattern | [
"Get",
"one",
"or",
"more",
"documents",
"."
] | d34fce43f54246ca4db0f7b89e450dcdc847c68c | https://github.com/useblocks/groundwork/blob/d34fce43f54246ca4db0f7b89e450dcdc847c68c/groundwork/patterns/gw_documents_pattern.py#L130-L161 | train |
useblocks/groundwork | groundwork/pluginmanager.py | PluginManager.initialise_by_names | def initialise_by_names(self, plugins=None):
"""
Initialises given plugins, but does not activate them.
This is needed to import and configure libraries, which are imported by used patterns, like GwFlask.
After this action, all needed python modules are imported and configured.
Also the groundwork application object is ready and contains functions and objects, which were added
by patterns, like app.commands from GwCommandsPattern.
The class of a given plugin must already be registered in the :class:`.PluginClassManager`.
:param plugins: List of plugin names
:type plugins: list of strings
"""
if plugins is None:
plugins = []
self._log.debug("Plugins Initialisation started")
if not isinstance(plugins, list):
raise AttributeError("plugins must be a list, not %s" % type(plugins))
self._log.debug("Plugins to initialise: %s" % ", ".join(plugins))
plugin_initialised = []
for plugin_name in plugins:
if not isinstance(plugin_name, str):
raise AttributeError("plugin name must be a str, not %s" % type(plugin_name))
plugin_class = self.classes.get(plugin_name)
self.initialise(plugin_class.clazz, plugin_name)
plugin_initialised.append(plugin_name)
self._log.info("Plugins initialised: %s" % ", ".join(plugin_initialised)) | python | def initialise_by_names(self, plugins=None):
"""
Initialises given plugins, but does not activate them.
This is needed to import and configure libraries, which are imported by used patterns, like GwFlask.
After this action, all needed python modules are imported and configured.
Also the groundwork application object is ready and contains functions and objects, which were added
by patterns, like app.commands from GwCommandsPattern.
The class of a given plugin must already be registered in the :class:`.PluginClassManager`.
:param plugins: List of plugin names
:type plugins: list of strings
"""
if plugins is None:
plugins = []
self._log.debug("Plugins Initialisation started")
if not isinstance(plugins, list):
raise AttributeError("plugins must be a list, not %s" % type(plugins))
self._log.debug("Plugins to initialise: %s" % ", ".join(plugins))
plugin_initialised = []
for plugin_name in plugins:
if not isinstance(plugin_name, str):
raise AttributeError("plugin name must be a str, not %s" % type(plugin_name))
plugin_class = self.classes.get(plugin_name)
self.initialise(plugin_class.clazz, plugin_name)
plugin_initialised.append(plugin_name)
self._log.info("Plugins initialised: %s" % ", ".join(plugin_initialised)) | [
"def",
"initialise_by_names",
"(",
"self",
",",
"plugins",
"=",
"None",
")",
":",
"if",
"plugins",
"is",
"None",
":",
"plugins",
"=",
"[",
"]",
"self",
".",
"_log",
".",
"debug",
"(",
"\"Plugins Initialisation started\"",
")",
"if",
"not",
"isinstance",
"(",
"plugins",
",",
"list",
")",
":",
"raise",
"AttributeError",
"(",
"\"plugins must be a list, not %s\"",
"%",
"type",
"(",
"plugins",
")",
")",
"self",
".",
"_log",
".",
"debug",
"(",
"\"Plugins to initialise: %s\"",
"%",
"\", \"",
".",
"join",
"(",
"plugins",
")",
")",
"plugin_initialised",
"=",
"[",
"]",
"for",
"plugin_name",
"in",
"plugins",
":",
"if",
"not",
"isinstance",
"(",
"plugin_name",
",",
"str",
")",
":",
"raise",
"AttributeError",
"(",
"\"plugin name must be a str, not %s\"",
"%",
"type",
"(",
"plugin_name",
")",
")",
"plugin_class",
"=",
"self",
".",
"classes",
".",
"get",
"(",
"plugin_name",
")",
"self",
".",
"initialise",
"(",
"plugin_class",
".",
"clazz",
",",
"plugin_name",
")",
"plugin_initialised",
".",
"append",
"(",
"plugin_name",
")",
"self",
".",
"_log",
".",
"info",
"(",
"\"Plugins initialised: %s\"",
"%",
"\", \"",
".",
"join",
"(",
"plugin_initialised",
")",
")"
] | Initialises given plugins, but does not activate them.
This is needed to import and configure libraries, which are imported by used patterns, like GwFlask.
After this action, all needed python modules are imported and configured.
Also the groundwork application object is ready and contains functions and objects, which were added
by patterns, like app.commands from GwCommandsPattern.
The class of a given plugin must already be registered in the :class:`.PluginClassManager`.
:param plugins: List of plugin names
:type plugins: list of strings | [
"Initialises",
"given",
"plugins",
"but",
"does",
"not",
"activate",
"them",
"."
] | d34fce43f54246ca4db0f7b89e450dcdc847c68c | https://github.com/useblocks/groundwork/blob/d34fce43f54246ca4db0f7b89e450dcdc847c68c/groundwork/pluginmanager.py#L46-L79 | train |
useblocks/groundwork | groundwork/pluginmanager.py | PluginManager.activate | def activate(self, plugins=[]):
"""
Activates given plugins.
This calls mainly plugin.activate() and plugins register needed resources like commands, signals or
documents.
If given plugins have not been initialised, this is also done via :func:`_load`.
:param plugins: List of plugin names
:type plugins: list of strings
"""
self._log.debug("Plugins Activation started")
if not isinstance(plugins, list):
raise AttributeError("plugins must be a list, not %s" % type(plugins))
self._log.debug("Plugins to activate: %s" % ", ".join(plugins))
plugins_activated = []
for plugin_name in plugins:
if not isinstance(plugin_name, str):
raise AttributeError("plugin name must be a str, not %s" % type(plugin_name))
if plugin_name not in self._plugins.keys() and plugin_name in self.classes._classes.keys():
self._log.debug("Initialisation needed before activation.")
try:
self.initialise_by_names([plugin_name])
except Exception as e:
self._log.error("Couldn't initialise plugin %s. Reason %s" % (plugin_name, e))
if self._app.strict:
error = "Couldn't initialise plugin %s" % plugin_name
if sys.version_info[0] < 3:
error += "Reason: %s" % e
raise_from(Exception(error), e)
else:
continue
if plugin_name in self._plugins.keys():
self._log.debug("Activating plugin %s" % plugin_name)
if not self._plugins[plugin_name].active:
try:
self._plugins[plugin_name].activate()
except Exception as e:
raise_from(
PluginNotActivatableException("Plugin %s could not be activated: %s" % (plugin_name,
e)), e)
else:
self._log.debug("Plugin %s activated" % plugin_name)
plugins_activated.append(plugin_name)
else:
self._log.warning("Plugin %s got already activated." % plugin_name)
if self._app.strict:
raise PluginNotInitialisableException()
self._log.info("Plugins activated: %s" % ", ".join(plugins_activated)) | python | def activate(self, plugins=[]):
"""
Activates given plugins.
This calls mainly plugin.activate() and plugins register needed resources like commands, signals or
documents.
If given plugins have not been initialised, this is also done via :func:`_load`.
:param plugins: List of plugin names
:type plugins: list of strings
"""
self._log.debug("Plugins Activation started")
if not isinstance(plugins, list):
raise AttributeError("plugins must be a list, not %s" % type(plugins))
self._log.debug("Plugins to activate: %s" % ", ".join(plugins))
plugins_activated = []
for plugin_name in plugins:
if not isinstance(plugin_name, str):
raise AttributeError("plugin name must be a str, not %s" % type(plugin_name))
if plugin_name not in self._plugins.keys() and plugin_name in self.classes._classes.keys():
self._log.debug("Initialisation needed before activation.")
try:
self.initialise_by_names([plugin_name])
except Exception as e:
self._log.error("Couldn't initialise plugin %s. Reason %s" % (plugin_name, e))
if self._app.strict:
error = "Couldn't initialise plugin %s" % plugin_name
if sys.version_info[0] < 3:
error += "Reason: %s" % e
raise_from(Exception(error), e)
else:
continue
if plugin_name in self._plugins.keys():
self._log.debug("Activating plugin %s" % plugin_name)
if not self._plugins[plugin_name].active:
try:
self._plugins[plugin_name].activate()
except Exception as e:
raise_from(
PluginNotActivatableException("Plugin %s could not be activated: %s" % (plugin_name,
e)), e)
else:
self._log.debug("Plugin %s activated" % plugin_name)
plugins_activated.append(plugin_name)
else:
self._log.warning("Plugin %s got already activated." % plugin_name)
if self._app.strict:
raise PluginNotInitialisableException()
self._log.info("Plugins activated: %s" % ", ".join(plugins_activated)) | [
"def",
"activate",
"(",
"self",
",",
"plugins",
"=",
"[",
"]",
")",
":",
"self",
".",
"_log",
".",
"debug",
"(",
"\"Plugins Activation started\"",
")",
"if",
"not",
"isinstance",
"(",
"plugins",
",",
"list",
")",
":",
"raise",
"AttributeError",
"(",
"\"plugins must be a list, not %s\"",
"%",
"type",
"(",
"plugins",
")",
")",
"self",
".",
"_log",
".",
"debug",
"(",
"\"Plugins to activate: %s\"",
"%",
"\", \"",
".",
"join",
"(",
"plugins",
")",
")",
"plugins_activated",
"=",
"[",
"]",
"for",
"plugin_name",
"in",
"plugins",
":",
"if",
"not",
"isinstance",
"(",
"plugin_name",
",",
"str",
")",
":",
"raise",
"AttributeError",
"(",
"\"plugin name must be a str, not %s\"",
"%",
"type",
"(",
"plugin_name",
")",
")",
"if",
"plugin_name",
"not",
"in",
"self",
".",
"_plugins",
".",
"keys",
"(",
")",
"and",
"plugin_name",
"in",
"self",
".",
"classes",
".",
"_classes",
".",
"keys",
"(",
")",
":",
"self",
".",
"_log",
".",
"debug",
"(",
"\"Initialisation needed before activation.\"",
")",
"try",
":",
"self",
".",
"initialise_by_names",
"(",
"[",
"plugin_name",
"]",
")",
"except",
"Exception",
"as",
"e",
":",
"self",
".",
"_log",
".",
"error",
"(",
"\"Couldn't initialise plugin %s. Reason %s\"",
"%",
"(",
"plugin_name",
",",
"e",
")",
")",
"if",
"self",
".",
"_app",
".",
"strict",
":",
"error",
"=",
"\"Couldn't initialise plugin %s\"",
"%",
"plugin_name",
"if",
"sys",
".",
"version_info",
"[",
"0",
"]",
"<",
"3",
":",
"error",
"+=",
"\"Reason: %s\"",
"%",
"e",
"raise_from",
"(",
"Exception",
"(",
"error",
")",
",",
"e",
")",
"else",
":",
"continue",
"if",
"plugin_name",
"in",
"self",
".",
"_plugins",
".",
"keys",
"(",
")",
":",
"self",
".",
"_log",
".",
"debug",
"(",
"\"Activating plugin %s\"",
"%",
"plugin_name",
")",
"if",
"not",
"self",
".",
"_plugins",
"[",
"plugin_name",
"]",
".",
"active",
":",
"try",
":",
"self",
".",
"_plugins",
"[",
"plugin_name",
"]",
".",
"activate",
"(",
")",
"except",
"Exception",
"as",
"e",
":",
"raise_from",
"(",
"PluginNotActivatableException",
"(",
"\"Plugin %s could not be activated: %s\"",
"%",
"(",
"plugin_name",
",",
"e",
")",
")",
",",
"e",
")",
"else",
":",
"self",
".",
"_log",
".",
"debug",
"(",
"\"Plugin %s activated\"",
"%",
"plugin_name",
")",
"plugins_activated",
".",
"append",
"(",
"plugin_name",
")",
"else",
":",
"self",
".",
"_log",
".",
"warning",
"(",
"\"Plugin %s got already activated.\"",
"%",
"plugin_name",
")",
"if",
"self",
".",
"_app",
".",
"strict",
":",
"raise",
"PluginNotInitialisableException",
"(",
")",
"self",
".",
"_log",
".",
"info",
"(",
"\"Plugins activated: %s\"",
"%",
"\", \"",
".",
"join",
"(",
"plugins_activated",
")",
")"
] | Activates given plugins.
This calls mainly plugin.activate() and plugins register needed resources like commands, signals or
documents.
If given plugins have not been initialised, this is also done via :func:`_load`.
:param plugins: List of plugin names
:type plugins: list of strings | [
"Activates",
"given",
"plugins",
"."
] | d34fce43f54246ca4db0f7b89e450dcdc847c68c | https://github.com/useblocks/groundwork/blob/d34fce43f54246ca4db0f7b89e450dcdc847c68c/groundwork/pluginmanager.py#L129-L183 | train |
useblocks/groundwork | groundwork/pluginmanager.py | PluginManager.deactivate | def deactivate(self, plugins=[]):
"""
Deactivates given plugins.
A given plugin must be activated, otherwise it is ignored and no action takes place (no signals are fired,
no deactivate functions are called.)
A deactivated plugin is still loaded and initialised and can be reactivated by calling :func:`activate` again.
It is also still registered in the :class:`.PluginManager` and can be requested via :func:`get`.
:param plugins: List of plugin names
:type plugins: list of strings
"""
self._log.debug("Plugins Deactivation started")
if not isinstance(plugins, list):
raise AttributeError("plugins must be a list, not %s" % type(plugins))
self._log.debug("Plugins to deactivate: %s" % ", ".join(plugins))
plugins_deactivated = []
for plugin_name in plugins:
if not isinstance(plugin_name, str):
raise AttributeError("plugin name must be a str, not %s" % type(plugin_name))
if plugin_name not in self._plugins.keys():
self._log.info("Unknown activated plugin %s" % plugin_name)
continue
else:
self._log.debug("Deactivating plugin %s" % plugin_name)
if not self._plugins[plugin_name].active:
self._log.warning("Plugin %s seems to be already deactivated" % plugin_name)
else:
try:
self._plugins[plugin_name].deactivate()
except Exception as e:
raise_from(
PluginNotDeactivatableException("Plugin %s could not be deactivated" % plugin_name), e)
else:
self._log.debug("Plugin %s deactivated" % plugin_name)
plugins_deactivated.append(plugin_name)
self._log.info("Plugins deactivated: %s" % ", ".join(plugins_deactivated)) | python | def deactivate(self, plugins=[]):
"""
Deactivates given plugins.
A given plugin must be activated, otherwise it is ignored and no action takes place (no signals are fired,
no deactivate functions are called.)
A deactivated plugin is still loaded and initialised and can be reactivated by calling :func:`activate` again.
It is also still registered in the :class:`.PluginManager` and can be requested via :func:`get`.
:param plugins: List of plugin names
:type plugins: list of strings
"""
self._log.debug("Plugins Deactivation started")
if not isinstance(plugins, list):
raise AttributeError("plugins must be a list, not %s" % type(plugins))
self._log.debug("Plugins to deactivate: %s" % ", ".join(plugins))
plugins_deactivated = []
for plugin_name in plugins:
if not isinstance(plugin_name, str):
raise AttributeError("plugin name must be a str, not %s" % type(plugin_name))
if plugin_name not in self._plugins.keys():
self._log.info("Unknown activated plugin %s" % plugin_name)
continue
else:
self._log.debug("Deactivating plugin %s" % plugin_name)
if not self._plugins[plugin_name].active:
self._log.warning("Plugin %s seems to be already deactivated" % plugin_name)
else:
try:
self._plugins[plugin_name].deactivate()
except Exception as e:
raise_from(
PluginNotDeactivatableException("Plugin %s could not be deactivated" % plugin_name), e)
else:
self._log.debug("Plugin %s deactivated" % plugin_name)
plugins_deactivated.append(plugin_name)
self._log.info("Plugins deactivated: %s" % ", ".join(plugins_deactivated)) | [
"def",
"deactivate",
"(",
"self",
",",
"plugins",
"=",
"[",
"]",
")",
":",
"self",
".",
"_log",
".",
"debug",
"(",
"\"Plugins Deactivation started\"",
")",
"if",
"not",
"isinstance",
"(",
"plugins",
",",
"list",
")",
":",
"raise",
"AttributeError",
"(",
"\"plugins must be a list, not %s\"",
"%",
"type",
"(",
"plugins",
")",
")",
"self",
".",
"_log",
".",
"debug",
"(",
"\"Plugins to deactivate: %s\"",
"%",
"\", \"",
".",
"join",
"(",
"plugins",
")",
")",
"plugins_deactivated",
"=",
"[",
"]",
"for",
"plugin_name",
"in",
"plugins",
":",
"if",
"not",
"isinstance",
"(",
"plugin_name",
",",
"str",
")",
":",
"raise",
"AttributeError",
"(",
"\"plugin name must be a str, not %s\"",
"%",
"type",
"(",
"plugin_name",
")",
")",
"if",
"plugin_name",
"not",
"in",
"self",
".",
"_plugins",
".",
"keys",
"(",
")",
":",
"self",
".",
"_log",
".",
"info",
"(",
"\"Unknown activated plugin %s\"",
"%",
"plugin_name",
")",
"continue",
"else",
":",
"self",
".",
"_log",
".",
"debug",
"(",
"\"Deactivating plugin %s\"",
"%",
"plugin_name",
")",
"if",
"not",
"self",
".",
"_plugins",
"[",
"plugin_name",
"]",
".",
"active",
":",
"self",
".",
"_log",
".",
"warning",
"(",
"\"Plugin %s seems to be already deactivated\"",
"%",
"plugin_name",
")",
"else",
":",
"try",
":",
"self",
".",
"_plugins",
"[",
"plugin_name",
"]",
".",
"deactivate",
"(",
")",
"except",
"Exception",
"as",
"e",
":",
"raise_from",
"(",
"PluginNotDeactivatableException",
"(",
"\"Plugin %s could not be deactivated\"",
"%",
"plugin_name",
")",
",",
"e",
")",
"else",
":",
"self",
".",
"_log",
".",
"debug",
"(",
"\"Plugin %s deactivated\"",
"%",
"plugin_name",
")",
"plugins_deactivated",
".",
"append",
"(",
"plugin_name",
")",
"self",
".",
"_log",
".",
"info",
"(",
"\"Plugins deactivated: %s\"",
"%",
"\", \"",
".",
"join",
"(",
"plugins_deactivated",
")",
")"
] | Deactivates given plugins.
A given plugin must be activated, otherwise it is ignored and no action takes place (no signals are fired,
no deactivate functions are called.)
A deactivated plugin is still loaded and initialised and can be reactivated by calling :func:`activate` again.
It is also still registered in the :class:`.PluginManager` and can be requested via :func:`get`.
:param plugins: List of plugin names
:type plugins: list of strings | [
"Deactivates",
"given",
"plugins",
"."
] | d34fce43f54246ca4db0f7b89e450dcdc847c68c | https://github.com/useblocks/groundwork/blob/d34fce43f54246ca4db0f7b89e450dcdc847c68c/groundwork/pluginmanager.py#L185-L227 | train |
useblocks/groundwork | groundwork/pluginmanager.py | PluginManager.get | def get(self, name=None):
"""
Returns the plugin object with the given name.
Or if a name is not given, the complete plugin dictionary is returned.
:param name: Name of a plugin
:return: None, single plugin or dictionary of plugins
"""
if name is None:
return self._plugins
else:
if name not in self._plugins.keys():
return None
else:
return self._plugins[name] | python | def get(self, name=None):
"""
Returns the plugin object with the given name.
Or if a name is not given, the complete plugin dictionary is returned.
:param name: Name of a plugin
:return: None, single plugin or dictionary of plugins
"""
if name is None:
return self._plugins
else:
if name not in self._plugins.keys():
return None
else:
return self._plugins[name] | [
"def",
"get",
"(",
"self",
",",
"name",
"=",
"None",
")",
":",
"if",
"name",
"is",
"None",
":",
"return",
"self",
".",
"_plugins",
"else",
":",
"if",
"name",
"not",
"in",
"self",
".",
"_plugins",
".",
"keys",
"(",
")",
":",
"return",
"None",
"else",
":",
"return",
"self",
".",
"_plugins",
"[",
"name",
"]"
] | Returns the plugin object with the given name.
Or if a name is not given, the complete plugin dictionary is returned.
:param name: Name of a plugin
:return: None, single plugin or dictionary of plugins | [
"Returns",
"the",
"plugin",
"object",
"with",
"the",
"given",
"name",
".",
"Or",
"if",
"a",
"name",
"is",
"not",
"given",
"the",
"complete",
"plugin",
"dictionary",
"is",
"returned",
"."
] | d34fce43f54246ca4db0f7b89e450dcdc847c68c | https://github.com/useblocks/groundwork/blob/d34fce43f54246ca4db0f7b89e450dcdc847c68c/groundwork/pluginmanager.py#L229-L243 | train |
useblocks/groundwork | groundwork/pluginmanager.py | PluginManager.is_active | def is_active(self, name):
"""
Returns True if plugin exists and is active.
If plugin does not exist, it returns None
:param name: plugin name
:return: boolean or None
"""
if name in self._plugins.keys():
return self._plugins["name"].active
return None | python | def is_active(self, name):
"""
Returns True if plugin exists and is active.
If plugin does not exist, it returns None
:param name: plugin name
:return: boolean or None
"""
if name in self._plugins.keys():
return self._plugins["name"].active
return None | [
"def",
"is_active",
"(",
"self",
",",
"name",
")",
":",
"if",
"name",
"in",
"self",
".",
"_plugins",
".",
"keys",
"(",
")",
":",
"return",
"self",
".",
"_plugins",
"[",
"\"name\"",
"]",
".",
"active",
"return",
"None"
] | Returns True if plugin exists and is active.
If plugin does not exist, it returns None
:param name: plugin name
:return: boolean or None | [
"Returns",
"True",
"if",
"plugin",
"exists",
"and",
"is",
"active",
".",
"If",
"plugin",
"does",
"not",
"exist",
"it",
"returns",
"None"
] | d34fce43f54246ca4db0f7b89e450dcdc847c68c | https://github.com/useblocks/groundwork/blob/d34fce43f54246ca4db0f7b89e450dcdc847c68c/groundwork/pluginmanager.py#L255-L265 | train |
useblocks/groundwork | groundwork/pluginmanager.py | PluginClassManager.register | def register(self, classes=[]):
"""
Registers new plugins.
The registration only creates a new entry for a plugin inside the _classes dictionary.
It does not activate or even initialise the plugin.
A plugin must be a class, which inherits directly or indirectly from GwBasePattern.
:param classes: List of plugin classes
:type classes: list
"""
if not isinstance(classes, list):
raise AttributeError("plugins must be a list, not %s." % type(classes))
plugin_registered = []
for plugin_class in classes:
plugin_name = plugin_class.__name__
self.register_class(plugin_class, plugin_name)
self._log.debug("Plugin %s registered" % plugin_name)
plugin_registered.append(plugin_name)
self._log.info("Plugins registered: %s" % ", ".join(plugin_registered)) | python | def register(self, classes=[]):
"""
Registers new plugins.
The registration only creates a new entry for a plugin inside the _classes dictionary.
It does not activate or even initialise the plugin.
A plugin must be a class, which inherits directly or indirectly from GwBasePattern.
:param classes: List of plugin classes
:type classes: list
"""
if not isinstance(classes, list):
raise AttributeError("plugins must be a list, not %s." % type(classes))
plugin_registered = []
for plugin_class in classes:
plugin_name = plugin_class.__name__
self.register_class(plugin_class, plugin_name)
self._log.debug("Plugin %s registered" % plugin_name)
plugin_registered.append(plugin_name)
self._log.info("Plugins registered: %s" % ", ".join(plugin_registered)) | [
"def",
"register",
"(",
"self",
",",
"classes",
"=",
"[",
"]",
")",
":",
"if",
"not",
"isinstance",
"(",
"classes",
",",
"list",
")",
":",
"raise",
"AttributeError",
"(",
"\"plugins must be a list, not %s.\"",
"%",
"type",
"(",
"classes",
")",
")",
"plugin_registered",
"=",
"[",
"]",
"for",
"plugin_class",
"in",
"classes",
":",
"plugin_name",
"=",
"plugin_class",
".",
"__name__",
"self",
".",
"register_class",
"(",
"plugin_class",
",",
"plugin_name",
")",
"self",
".",
"_log",
".",
"debug",
"(",
"\"Plugin %s registered\"",
"%",
"plugin_name",
")",
"plugin_registered",
".",
"append",
"(",
"plugin_name",
")",
"self",
".",
"_log",
".",
"info",
"(",
"\"Plugins registered: %s\"",
"%",
"\", \"",
".",
"join",
"(",
"plugin_registered",
")",
")"
] | Registers new plugins.
The registration only creates a new entry for a plugin inside the _classes dictionary.
It does not activate or even initialise the plugin.
A plugin must be a class, which inherits directly or indirectly from GwBasePattern.
:param classes: List of plugin classes
:type classes: list | [
"Registers",
"new",
"plugins",
"."
] | d34fce43f54246ca4db0f7b89e450dcdc847c68c | https://github.com/useblocks/groundwork/blob/d34fce43f54246ca4db0f7b89e450dcdc847c68c/groundwork/pluginmanager.py#L331-L354 | train |
useblocks/groundwork | groundwork/pluginmanager.py | PluginClassManager.get | def get(self, name=None):
"""
Returns the plugin class object with the given name.
Or if a name is not given, the complete plugin dictionary is returned.
:param name: Name of a plugin
:return: None, single plugin or dictionary of plugins
"""
if name is None:
return self._classes
else:
if name not in self._classes.keys():
return None
else:
return self._classes[name] | python | def get(self, name=None):
"""
Returns the plugin class object with the given name.
Or if a name is not given, the complete plugin dictionary is returned.
:param name: Name of a plugin
:return: None, single plugin or dictionary of plugins
"""
if name is None:
return self._classes
else:
if name not in self._classes.keys():
return None
else:
return self._classes[name] | [
"def",
"get",
"(",
"self",
",",
"name",
"=",
"None",
")",
":",
"if",
"name",
"is",
"None",
":",
"return",
"self",
".",
"_classes",
"else",
":",
"if",
"name",
"not",
"in",
"self",
".",
"_classes",
".",
"keys",
"(",
")",
":",
"return",
"None",
"else",
":",
"return",
"self",
".",
"_classes",
"[",
"name",
"]"
] | Returns the plugin class object with the given name.
Or if a name is not given, the complete plugin dictionary is returned.
:param name: Name of a plugin
:return: None, single plugin or dictionary of plugins | [
"Returns",
"the",
"plugin",
"class",
"object",
"with",
"the",
"given",
"name",
".",
"Or",
"if",
"a",
"name",
"is",
"not",
"given",
"the",
"complete",
"plugin",
"dictionary",
"is",
"returned",
"."
] | d34fce43f54246ca4db0f7b89e450dcdc847c68c | https://github.com/useblocks/groundwork/blob/d34fce43f54246ca4db0f7b89e450dcdc847c68c/groundwork/pluginmanager.py#L390-L404 | train |
dbarsam/python-vsgen | vsgen/solution.py | VSGSolution.write | def write(self):
"""
Writes the ``.sln`` file to disk.
"""
filters = {
'MSGUID': lambda x: ('{%s}' % x).upper(),
'relslnfile': lambda x: os.path.relpath(x, os.path.dirname(self.FileName))
}
context = {
'sln': self
}
return self.render(self.__jinja_template__, self.FileName, context, filters) | python | def write(self):
"""
Writes the ``.sln`` file to disk.
"""
filters = {
'MSGUID': lambda x: ('{%s}' % x).upper(),
'relslnfile': lambda x: os.path.relpath(x, os.path.dirname(self.FileName))
}
context = {
'sln': self
}
return self.render(self.__jinja_template__, self.FileName, context, filters) | [
"def",
"write",
"(",
"self",
")",
":",
"filters",
"=",
"{",
"'MSGUID'",
":",
"lambda",
"x",
":",
"(",
"'{%s}'",
"%",
"x",
")",
".",
"upper",
"(",
")",
",",
"'relslnfile'",
":",
"lambda",
"x",
":",
"os",
".",
"path",
".",
"relpath",
"(",
"x",
",",
"os",
".",
"path",
".",
"dirname",
"(",
"self",
".",
"FileName",
")",
")",
"}",
"context",
"=",
"{",
"'sln'",
":",
"self",
"}",
"return",
"self",
".",
"render",
"(",
"self",
".",
"__jinja_template__",
",",
"self",
".",
"FileName",
",",
"context",
",",
"filters",
")"
] | Writes the ``.sln`` file to disk. | [
"Writes",
"the",
".",
"sln",
"file",
"to",
"disk",
"."
] | 640191bb018a1ff7d7b7a4982e0d3c1a423ba878 | https://github.com/dbarsam/python-vsgen/blob/640191bb018a1ff7d7b7a4982e0d3c1a423ba878/vsgen/solution.py#L49-L60 | train |
atarashansky/self-assembling-manifold | SAM.py | SAM.load_annotations | def load_annotations(self, aname, sep=','):
"""Loads cell annotations.
Loads the cell annoations specified by the 'aname' path.
Parameters
----------
aname - string
The path to the annotations file. First column should be cell IDs
and second column should be the desired annotations.
"""
ann = pd.read_csv(aname)
cell_names = np.array(list(self.adata.obs_names))
all_cell_names = np.array(list(self.adata_raw.obs_names))
if(ann.shape[1] > 1):
ann = pd.read_csv(aname, index_col=0, sep=sep)
if(ann.shape[0] != all_cell_names.size):
ann = pd.read_csv(aname, index_col=0, header=None, sep=sep)
else:
if(ann.shape[0] != all_cell_names.size):
ann = pd.read_csv(aname, header=None, sep=sep)
ann.index = np.array(list(ann.index.astype('<U100')))
ann1 = np.array(list(ann.T[cell_names].T.values.flatten()))
ann2 = np.array(list(ann.values.flatten()))
self.adata_raw.obs['annotations'] = pd.Categorical(ann2)
self.adata.obs['annotations'] = pd.Categorical(ann1) | python | def load_annotations(self, aname, sep=','):
"""Loads cell annotations.
Loads the cell annoations specified by the 'aname' path.
Parameters
----------
aname - string
The path to the annotations file. First column should be cell IDs
and second column should be the desired annotations.
"""
ann = pd.read_csv(aname)
cell_names = np.array(list(self.adata.obs_names))
all_cell_names = np.array(list(self.adata_raw.obs_names))
if(ann.shape[1] > 1):
ann = pd.read_csv(aname, index_col=0, sep=sep)
if(ann.shape[0] != all_cell_names.size):
ann = pd.read_csv(aname, index_col=0, header=None, sep=sep)
else:
if(ann.shape[0] != all_cell_names.size):
ann = pd.read_csv(aname, header=None, sep=sep)
ann.index = np.array(list(ann.index.astype('<U100')))
ann1 = np.array(list(ann.T[cell_names].T.values.flatten()))
ann2 = np.array(list(ann.values.flatten()))
self.adata_raw.obs['annotations'] = pd.Categorical(ann2)
self.adata.obs['annotations'] = pd.Categorical(ann1) | [
"def",
"load_annotations",
"(",
"self",
",",
"aname",
",",
"sep",
"=",
"','",
")",
":",
"ann",
"=",
"pd",
".",
"read_csv",
"(",
"aname",
")",
"cell_names",
"=",
"np",
".",
"array",
"(",
"list",
"(",
"self",
".",
"adata",
".",
"obs_names",
")",
")",
"all_cell_names",
"=",
"np",
".",
"array",
"(",
"list",
"(",
"self",
".",
"adata_raw",
".",
"obs_names",
")",
")",
"if",
"(",
"ann",
".",
"shape",
"[",
"1",
"]",
">",
"1",
")",
":",
"ann",
"=",
"pd",
".",
"read_csv",
"(",
"aname",
",",
"index_col",
"=",
"0",
",",
"sep",
"=",
"sep",
")",
"if",
"(",
"ann",
".",
"shape",
"[",
"0",
"]",
"!=",
"all_cell_names",
".",
"size",
")",
":",
"ann",
"=",
"pd",
".",
"read_csv",
"(",
"aname",
",",
"index_col",
"=",
"0",
",",
"header",
"=",
"None",
",",
"sep",
"=",
"sep",
")",
"else",
":",
"if",
"(",
"ann",
".",
"shape",
"[",
"0",
"]",
"!=",
"all_cell_names",
".",
"size",
")",
":",
"ann",
"=",
"pd",
".",
"read_csv",
"(",
"aname",
",",
"header",
"=",
"None",
",",
"sep",
"=",
"sep",
")",
"ann",
".",
"index",
"=",
"np",
".",
"array",
"(",
"list",
"(",
"ann",
".",
"index",
".",
"astype",
"(",
"'<U100'",
")",
")",
")",
"ann1",
"=",
"np",
".",
"array",
"(",
"list",
"(",
"ann",
".",
"T",
"[",
"cell_names",
"]",
".",
"T",
".",
"values",
".",
"flatten",
"(",
")",
")",
")",
"ann2",
"=",
"np",
".",
"array",
"(",
"list",
"(",
"ann",
".",
"values",
".",
"flatten",
"(",
")",
")",
")",
"self",
".",
"adata_raw",
".",
"obs",
"[",
"'annotations'",
"]",
"=",
"pd",
".",
"Categorical",
"(",
"ann2",
")",
"self",
".",
"adata",
".",
"obs",
"[",
"'annotations'",
"]",
"=",
"pd",
".",
"Categorical",
"(",
"ann1",
")"
] | Loads cell annotations.
Loads the cell annoations specified by the 'aname' path.
Parameters
----------
aname - string
The path to the annotations file. First column should be cell IDs
and second column should be the desired annotations. | [
"Loads",
"cell",
"annotations",
"."
] | 4db4793f65af62047492327716932ba81a67f679 | https://github.com/atarashansky/self-assembling-manifold/blob/4db4793f65af62047492327716932ba81a67f679/SAM.py#L460-L489 | train |
atarashansky/self-assembling-manifold | SAM.py | SAM.dispersion_ranking_NN | def dispersion_ranking_NN(self, nnm, num_norm_avg=50):
"""Computes the spatial dispersion factors for each gene.
Parameters
----------
nnm - scipy.sparse, float
Square cell-to-cell nearest-neighbor matrix.
num_norm_avg - int, optional, default 50
The top 'num_norm_avg' dispersions are averaged to determine the
normalization factor when calculating the weights. This ensures
that outlier genes do not significantly skew the weight
distribution.
Returns:
-------
indices - ndarray, int
The indices corresponding to the gene weights sorted in decreasing
order.
weights - ndarray, float
The vector of gene weights.
"""
self.knn_avg(nnm)
D_avg = self.adata.layers['X_knn_avg']
mu, var = sf.mean_variance_axis(D_avg, axis=0)
dispersions = np.zeros(var.size)
dispersions[mu > 0] = var[mu > 0] / mu[mu > 0]
self.adata.var['spatial_dispersions'] = dispersions.copy()
ma = np.sort(dispersions)[-num_norm_avg:].mean()
dispersions[dispersions >= ma] = ma
weights = ((dispersions / dispersions.max())**0.5).flatten()
self.adata.var['weights'] = weights
return weights | python | def dispersion_ranking_NN(self, nnm, num_norm_avg=50):
"""Computes the spatial dispersion factors for each gene.
Parameters
----------
nnm - scipy.sparse, float
Square cell-to-cell nearest-neighbor matrix.
num_norm_avg - int, optional, default 50
The top 'num_norm_avg' dispersions are averaged to determine the
normalization factor when calculating the weights. This ensures
that outlier genes do not significantly skew the weight
distribution.
Returns:
-------
indices - ndarray, int
The indices corresponding to the gene weights sorted in decreasing
order.
weights - ndarray, float
The vector of gene weights.
"""
self.knn_avg(nnm)
D_avg = self.adata.layers['X_knn_avg']
mu, var = sf.mean_variance_axis(D_avg, axis=0)
dispersions = np.zeros(var.size)
dispersions[mu > 0] = var[mu > 0] / mu[mu > 0]
self.adata.var['spatial_dispersions'] = dispersions.copy()
ma = np.sort(dispersions)[-num_norm_avg:].mean()
dispersions[dispersions >= ma] = ma
weights = ((dispersions / dispersions.max())**0.5).flatten()
self.adata.var['weights'] = weights
return weights | [
"def",
"dispersion_ranking_NN",
"(",
"self",
",",
"nnm",
",",
"num_norm_avg",
"=",
"50",
")",
":",
"self",
".",
"knn_avg",
"(",
"nnm",
")",
"D_avg",
"=",
"self",
".",
"adata",
".",
"layers",
"[",
"'X_knn_avg'",
"]",
"mu",
",",
"var",
"=",
"sf",
".",
"mean_variance_axis",
"(",
"D_avg",
",",
"axis",
"=",
"0",
")",
"dispersions",
"=",
"np",
".",
"zeros",
"(",
"var",
".",
"size",
")",
"dispersions",
"[",
"mu",
">",
"0",
"]",
"=",
"var",
"[",
"mu",
">",
"0",
"]",
"/",
"mu",
"[",
"mu",
">",
"0",
"]",
"self",
".",
"adata",
".",
"var",
"[",
"'spatial_dispersions'",
"]",
"=",
"dispersions",
".",
"copy",
"(",
")",
"ma",
"=",
"np",
".",
"sort",
"(",
"dispersions",
")",
"[",
"-",
"num_norm_avg",
":",
"]",
".",
"mean",
"(",
")",
"dispersions",
"[",
"dispersions",
">=",
"ma",
"]",
"=",
"ma",
"weights",
"=",
"(",
"(",
"dispersions",
"/",
"dispersions",
".",
"max",
"(",
")",
")",
"**",
"0.5",
")",
".",
"flatten",
"(",
")",
"self",
".",
"adata",
".",
"var",
"[",
"'weights'",
"]",
"=",
"weights",
"return",
"weights"
] | Computes the spatial dispersion factors for each gene.
Parameters
----------
nnm - scipy.sparse, float
Square cell-to-cell nearest-neighbor matrix.
num_norm_avg - int, optional, default 50
The top 'num_norm_avg' dispersions are averaged to determine the
normalization factor when calculating the weights. This ensures
that outlier genes do not significantly skew the weight
distribution.
Returns:
-------
indices - ndarray, int
The indices corresponding to the gene weights sorted in decreasing
order.
weights - ndarray, float
The vector of gene weights. | [
"Computes",
"the",
"spatial",
"dispersion",
"factors",
"for",
"each",
"gene",
"."
] | 4db4793f65af62047492327716932ba81a67f679 | https://github.com/atarashansky/self-assembling-manifold/blob/4db4793f65af62047492327716932ba81a67f679/SAM.py#L491-L532 | train |
atarashansky/self-assembling-manifold | SAM.py | SAM.plot_correlated_groups | def plot_correlated_groups(self, group=None, n_genes=5, **kwargs):
"""Plots orthogonal expression patterns.
In the default mode, plots orthogonal gene expression patterns. A
specific correlated group of genes can be specified to plot gene
expression patterns within that group.
Parameters
----------
group - int, optional, default None
If specified, display the genes within the desired correlated
group. Otherwise, display the top ranked gene within each distinct
correlated group.
n_genes - int, optional, default 5
The number of top ranked genes to display within a correlated
group if 'group' is specified.
**kwargs -
All keyword arguments in 'show_gene_expression' and 'scatter'
are eligible.
"""
geneID_groups = self.adata.uns['gene_groups']
if(group is None):
for i in range(len(geneID_groups)):
self.show_gene_expression(geneID_groups[i][0], **kwargs)
else:
for i in range(n_genes):
self.show_gene_expression(geneID_groups[group][i], **kwargs) | python | def plot_correlated_groups(self, group=None, n_genes=5, **kwargs):
"""Plots orthogonal expression patterns.
In the default mode, plots orthogonal gene expression patterns. A
specific correlated group of genes can be specified to plot gene
expression patterns within that group.
Parameters
----------
group - int, optional, default None
If specified, display the genes within the desired correlated
group. Otherwise, display the top ranked gene within each distinct
correlated group.
n_genes - int, optional, default 5
The number of top ranked genes to display within a correlated
group if 'group' is specified.
**kwargs -
All keyword arguments in 'show_gene_expression' and 'scatter'
are eligible.
"""
geneID_groups = self.adata.uns['gene_groups']
if(group is None):
for i in range(len(geneID_groups)):
self.show_gene_expression(geneID_groups[i][0], **kwargs)
else:
for i in range(n_genes):
self.show_gene_expression(geneID_groups[group][i], **kwargs) | [
"def",
"plot_correlated_groups",
"(",
"self",
",",
"group",
"=",
"None",
",",
"n_genes",
"=",
"5",
",",
"*",
"*",
"kwargs",
")",
":",
"geneID_groups",
"=",
"self",
".",
"adata",
".",
"uns",
"[",
"'gene_groups'",
"]",
"if",
"(",
"group",
"is",
"None",
")",
":",
"for",
"i",
"in",
"range",
"(",
"len",
"(",
"geneID_groups",
")",
")",
":",
"self",
".",
"show_gene_expression",
"(",
"geneID_groups",
"[",
"i",
"]",
"[",
"0",
"]",
",",
"*",
"*",
"kwargs",
")",
"else",
":",
"for",
"i",
"in",
"range",
"(",
"n_genes",
")",
":",
"self",
".",
"show_gene_expression",
"(",
"geneID_groups",
"[",
"group",
"]",
"[",
"i",
"]",
",",
"*",
"*",
"kwargs",
")"
] | Plots orthogonal expression patterns.
In the default mode, plots orthogonal gene expression patterns. A
specific correlated group of genes can be specified to plot gene
expression patterns within that group.
Parameters
----------
group - int, optional, default None
If specified, display the genes within the desired correlated
group. Otherwise, display the top ranked gene within each distinct
correlated group.
n_genes - int, optional, default 5
The number of top ranked genes to display within a correlated
group if 'group' is specified.
**kwargs -
All keyword arguments in 'show_gene_expression' and 'scatter'
are eligible. | [
"Plots",
"orthogonal",
"expression",
"patterns",
"."
] | 4db4793f65af62047492327716932ba81a67f679 | https://github.com/atarashansky/self-assembling-manifold/blob/4db4793f65af62047492327716932ba81a67f679/SAM.py#L857-L885 | train |
atarashansky/self-assembling-manifold | SAM.py | SAM.plot_correlated_genes | def plot_correlated_genes(
self,
name,
n_genes=5,
number_of_features=1000,
**kwargs):
"""Plots gene expression patterns correlated with the input gene.
Parameters
----------
name - string
The name of the gene with respect to which correlated gene
expression patterns will be displayed.
n_genes - int, optional, default 5
The number of top ranked correlated genes to display.
**kwargs -
All keyword arguments in 'show_gene_expression' and 'scatter'
are eligible.
"""
all_gene_names = np.array(list(self.adata.var_names))
if((all_gene_names == name).sum() == 0):
print(
"Gene not found in the filtered dataset. Note that genes "
"are case sensitive.")
return
sds = self.corr_bin_genes(
input_gene=name,
number_of_features=number_of_features)
if (n_genes + 1 > sds.size):
x = sds.size
else:
x = n_genes + 1
for i in range(1, x):
self.show_gene_expression(sds[i], **kwargs)
return sds[1:] | python | def plot_correlated_genes(
self,
name,
n_genes=5,
number_of_features=1000,
**kwargs):
"""Plots gene expression patterns correlated with the input gene.
Parameters
----------
name - string
The name of the gene with respect to which correlated gene
expression patterns will be displayed.
n_genes - int, optional, default 5
The number of top ranked correlated genes to display.
**kwargs -
All keyword arguments in 'show_gene_expression' and 'scatter'
are eligible.
"""
all_gene_names = np.array(list(self.adata.var_names))
if((all_gene_names == name).sum() == 0):
print(
"Gene not found in the filtered dataset. Note that genes "
"are case sensitive.")
return
sds = self.corr_bin_genes(
input_gene=name,
number_of_features=number_of_features)
if (n_genes + 1 > sds.size):
x = sds.size
else:
x = n_genes + 1
for i in range(1, x):
self.show_gene_expression(sds[i], **kwargs)
return sds[1:] | [
"def",
"plot_correlated_genes",
"(",
"self",
",",
"name",
",",
"n_genes",
"=",
"5",
",",
"number_of_features",
"=",
"1000",
",",
"*",
"*",
"kwargs",
")",
":",
"all_gene_names",
"=",
"np",
".",
"array",
"(",
"list",
"(",
"self",
".",
"adata",
".",
"var_names",
")",
")",
"if",
"(",
"(",
"all_gene_names",
"==",
"name",
")",
".",
"sum",
"(",
")",
"==",
"0",
")",
":",
"print",
"(",
"\"Gene not found in the filtered dataset. Note that genes \"",
"\"are case sensitive.\"",
")",
"return",
"sds",
"=",
"self",
".",
"corr_bin_genes",
"(",
"input_gene",
"=",
"name",
",",
"number_of_features",
"=",
"number_of_features",
")",
"if",
"(",
"n_genes",
"+",
"1",
">",
"sds",
".",
"size",
")",
":",
"x",
"=",
"sds",
".",
"size",
"else",
":",
"x",
"=",
"n_genes",
"+",
"1",
"for",
"i",
"in",
"range",
"(",
"1",
",",
"x",
")",
":",
"self",
".",
"show_gene_expression",
"(",
"sds",
"[",
"i",
"]",
",",
"*",
"*",
"kwargs",
")",
"return",
"sds",
"[",
"1",
":",
"]"
] | Plots gene expression patterns correlated with the input gene.
Parameters
----------
name - string
The name of the gene with respect to which correlated gene
expression patterns will be displayed.
n_genes - int, optional, default 5
The number of top ranked correlated genes to display.
**kwargs -
All keyword arguments in 'show_gene_expression' and 'scatter'
are eligible. | [
"Plots",
"gene",
"expression",
"patterns",
"correlated",
"with",
"the",
"input",
"gene",
"."
] | 4db4793f65af62047492327716932ba81a67f679 | https://github.com/atarashansky/self-assembling-manifold/blob/4db4793f65af62047492327716932ba81a67f679/SAM.py#L887-L924 | train |
atarashansky/self-assembling-manifold | SAM.py | SAM.run_tsne | def run_tsne(self, X=None, metric='correlation', **kwargs):
"""Wrapper for sklearn's t-SNE implementation.
See sklearn for the t-SNE documentation. All arguments are the same
with the exception that 'metric' is set to 'precomputed' by default,
implying that this function expects a distance matrix by default.
"""
if(X is not None):
dt = man.TSNE(metric=metric, **kwargs).fit_transform(X)
return dt
else:
dt = man.TSNE(metric=self.distance,
**kwargs).fit_transform(self.adata.obsm['X_pca'])
tsne2d = dt
self.adata.obsm['X_tsne'] = tsne2d | python | def run_tsne(self, X=None, metric='correlation', **kwargs):
"""Wrapper for sklearn's t-SNE implementation.
See sklearn for the t-SNE documentation. All arguments are the same
with the exception that 'metric' is set to 'precomputed' by default,
implying that this function expects a distance matrix by default.
"""
if(X is not None):
dt = man.TSNE(metric=metric, **kwargs).fit_transform(X)
return dt
else:
dt = man.TSNE(metric=self.distance,
**kwargs).fit_transform(self.adata.obsm['X_pca'])
tsne2d = dt
self.adata.obsm['X_tsne'] = tsne2d | [
"def",
"run_tsne",
"(",
"self",
",",
"X",
"=",
"None",
",",
"metric",
"=",
"'correlation'",
",",
"*",
"*",
"kwargs",
")",
":",
"if",
"(",
"X",
"is",
"not",
"None",
")",
":",
"dt",
"=",
"man",
".",
"TSNE",
"(",
"metric",
"=",
"metric",
",",
"*",
"*",
"kwargs",
")",
".",
"fit_transform",
"(",
"X",
")",
"return",
"dt",
"else",
":",
"dt",
"=",
"man",
".",
"TSNE",
"(",
"metric",
"=",
"self",
".",
"distance",
",",
"*",
"*",
"kwargs",
")",
".",
"fit_transform",
"(",
"self",
".",
"adata",
".",
"obsm",
"[",
"'X_pca'",
"]",
")",
"tsne2d",
"=",
"dt",
"self",
".",
"adata",
".",
"obsm",
"[",
"'X_tsne'",
"]",
"=",
"tsne2d"
] | Wrapper for sklearn's t-SNE implementation.
See sklearn for the t-SNE documentation. All arguments are the same
with the exception that 'metric' is set to 'precomputed' by default,
implying that this function expects a distance matrix by default. | [
"Wrapper",
"for",
"sklearn",
"s",
"t",
"-",
"SNE",
"implementation",
"."
] | 4db4793f65af62047492327716932ba81a67f679 | https://github.com/atarashansky/self-assembling-manifold/blob/4db4793f65af62047492327716932ba81a67f679/SAM.py#L1011-L1026 | train |
atarashansky/self-assembling-manifold | SAM.py | SAM.run_umap | def run_umap(self, X=None, metric=None, **kwargs):
"""Wrapper for umap-learn.
See https://github.com/lmcinnes/umap sklearn for the documentation
and source code.
"""
import umap as umap
if metric is None:
metric = self.distance
if(X is not None):
umap_obj = umap.UMAP(metric=metric, **kwargs)
dt = umap_obj.fit_transform(X)
return dt
else:
umap_obj = umap.UMAP(metric=metric, **kwargs)
umap2d = umap_obj.fit_transform(self.adata.obsm['X_pca'])
self.adata.obsm['X_umap'] = umap2d | python | def run_umap(self, X=None, metric=None, **kwargs):
"""Wrapper for umap-learn.
See https://github.com/lmcinnes/umap sklearn for the documentation
and source code.
"""
import umap as umap
if metric is None:
metric = self.distance
if(X is not None):
umap_obj = umap.UMAP(metric=metric, **kwargs)
dt = umap_obj.fit_transform(X)
return dt
else:
umap_obj = umap.UMAP(metric=metric, **kwargs)
umap2d = umap_obj.fit_transform(self.adata.obsm['X_pca'])
self.adata.obsm['X_umap'] = umap2d | [
"def",
"run_umap",
"(",
"self",
",",
"X",
"=",
"None",
",",
"metric",
"=",
"None",
",",
"*",
"*",
"kwargs",
")",
":",
"import",
"umap",
"as",
"umap",
"if",
"metric",
"is",
"None",
":",
"metric",
"=",
"self",
".",
"distance",
"if",
"(",
"X",
"is",
"not",
"None",
")",
":",
"umap_obj",
"=",
"umap",
".",
"UMAP",
"(",
"metric",
"=",
"metric",
",",
"*",
"*",
"kwargs",
")",
"dt",
"=",
"umap_obj",
".",
"fit_transform",
"(",
"X",
")",
"return",
"dt",
"else",
":",
"umap_obj",
"=",
"umap",
".",
"UMAP",
"(",
"metric",
"=",
"metric",
",",
"*",
"*",
"kwargs",
")",
"umap2d",
"=",
"umap_obj",
".",
"fit_transform",
"(",
"self",
".",
"adata",
".",
"obsm",
"[",
"'X_pca'",
"]",
")",
"self",
".",
"adata",
".",
"obsm",
"[",
"'X_umap'",
"]",
"=",
"umap2d"
] | Wrapper for umap-learn.
See https://github.com/lmcinnes/umap sklearn for the documentation
and source code. | [
"Wrapper",
"for",
"umap",
"-",
"learn",
"."
] | 4db4793f65af62047492327716932ba81a67f679 | https://github.com/atarashansky/self-assembling-manifold/blob/4db4793f65af62047492327716932ba81a67f679/SAM.py#L1028-L1048 | train |
atarashansky/self-assembling-manifold | SAM.py | SAM.scatter | def scatter(self, projection=None, c=None, cmap='rainbow', linewidth=0.0,
edgecolor='k', axes=None, colorbar=True, s=10, **kwargs):
"""Display a scatter plot.
Displays a scatter plot using the SAM projection or another input
projection with or without annotations.
Parameters
----------
projection - ndarray of floats, optional, default None
An N x 2 matrix, where N is the number of data points. If None,
use an existing SAM projection (default t-SNE). Can take on values
'umap' or 'tsne' to specify either the SAM UMAP embedding or
SAM t-SNE embedding.
c - ndarray or str, optional, default None
Colors for each cell in the scatter plot. Can be a vector of
floats or strings for cell annotations. Can also be a key
for sam.adata.obs (i.e. 'louvain_clusters').
axes - matplotlib axis, optional, default None
Plot output to the specified, existing axes. If None, create new
figure window.
cmap - string, optional, default 'rainbow'
The colormap to use for the input color values.
colorbar - bool, optional default True
If True, display a colorbar indicating which values / annotations
correspond to which color in the scatter plot.
Keyword arguments -
All other keyword arguments that can be passed into
matplotlib.pyplot.scatter can be used.
"""
if (not PLOTTING):
print("matplotlib not installed!")
else:
if(isinstance(projection, str)):
try:
dt = self.adata.obsm[projection]
except KeyError:
print('Please create a projection first using run_umap or'
'run_tsne')
elif(projection is None):
try:
dt = self.adata.obsm['X_umap']
except KeyError:
try:
dt = self.adata.obsm['X_tsne']
except KeyError:
print("Please create either a t-SNE or UMAP projection"
"first.")
return
else:
dt = projection
if(axes is None):
plt.figure()
axes = plt.gca()
if(c is None):
plt.scatter(dt[:, 0], dt[:, 1], s=s,
linewidth=linewidth, edgecolor=edgecolor, **kwargs)
else:
if isinstance(c, str):
try:
c = self.adata.obs[c].get_values()
except KeyError:
0 # do nothing
if((isinstance(c[0], str) or isinstance(c[0], np.str_)) and
(isinstance(c, np.ndarray) or isinstance(c, list))):
i = ut.convert_annotations(c)
ui, ai = np.unique(i, return_index=True)
cax = axes.scatter(dt[:,0], dt[:,1], c=i, cmap=cmap, s=s,
linewidth=linewidth,
edgecolor=edgecolor,
**kwargs)
if(colorbar):
cbar = plt.colorbar(cax, ax=axes, ticks=ui)
cbar.ax.set_yticklabels(c[ai])
else:
if not (isinstance(c, np.ndarray) or isinstance(c, list)):
colorbar = False
i = c
cax = axes.scatter(dt[:,0], dt[:,1], c=i, cmap=cmap, s=s,
linewidth=linewidth,
edgecolor=edgecolor,
**kwargs)
if(colorbar):
plt.colorbar(cax, ax=axes) | python | def scatter(self, projection=None, c=None, cmap='rainbow', linewidth=0.0,
edgecolor='k', axes=None, colorbar=True, s=10, **kwargs):
"""Display a scatter plot.
Displays a scatter plot using the SAM projection or another input
projection with or without annotations.
Parameters
----------
projection - ndarray of floats, optional, default None
An N x 2 matrix, where N is the number of data points. If None,
use an existing SAM projection (default t-SNE). Can take on values
'umap' or 'tsne' to specify either the SAM UMAP embedding or
SAM t-SNE embedding.
c - ndarray or str, optional, default None
Colors for each cell in the scatter plot. Can be a vector of
floats or strings for cell annotations. Can also be a key
for sam.adata.obs (i.e. 'louvain_clusters').
axes - matplotlib axis, optional, default None
Plot output to the specified, existing axes. If None, create new
figure window.
cmap - string, optional, default 'rainbow'
The colormap to use for the input color values.
colorbar - bool, optional default True
If True, display a colorbar indicating which values / annotations
correspond to which color in the scatter plot.
Keyword arguments -
All other keyword arguments that can be passed into
matplotlib.pyplot.scatter can be used.
"""
if (not PLOTTING):
print("matplotlib not installed!")
else:
if(isinstance(projection, str)):
try:
dt = self.adata.obsm[projection]
except KeyError:
print('Please create a projection first using run_umap or'
'run_tsne')
elif(projection is None):
try:
dt = self.adata.obsm['X_umap']
except KeyError:
try:
dt = self.adata.obsm['X_tsne']
except KeyError:
print("Please create either a t-SNE or UMAP projection"
"first.")
return
else:
dt = projection
if(axes is None):
plt.figure()
axes = plt.gca()
if(c is None):
plt.scatter(dt[:, 0], dt[:, 1], s=s,
linewidth=linewidth, edgecolor=edgecolor, **kwargs)
else:
if isinstance(c, str):
try:
c = self.adata.obs[c].get_values()
except KeyError:
0 # do nothing
if((isinstance(c[0], str) or isinstance(c[0], np.str_)) and
(isinstance(c, np.ndarray) or isinstance(c, list))):
i = ut.convert_annotations(c)
ui, ai = np.unique(i, return_index=True)
cax = axes.scatter(dt[:,0], dt[:,1], c=i, cmap=cmap, s=s,
linewidth=linewidth,
edgecolor=edgecolor,
**kwargs)
if(colorbar):
cbar = plt.colorbar(cax, ax=axes, ticks=ui)
cbar.ax.set_yticklabels(c[ai])
else:
if not (isinstance(c, np.ndarray) or isinstance(c, list)):
colorbar = False
i = c
cax = axes.scatter(dt[:,0], dt[:,1], c=i, cmap=cmap, s=s,
linewidth=linewidth,
edgecolor=edgecolor,
**kwargs)
if(colorbar):
plt.colorbar(cax, ax=axes) | [
"def",
"scatter",
"(",
"self",
",",
"projection",
"=",
"None",
",",
"c",
"=",
"None",
",",
"cmap",
"=",
"'rainbow'",
",",
"linewidth",
"=",
"0.0",
",",
"edgecolor",
"=",
"'k'",
",",
"axes",
"=",
"None",
",",
"colorbar",
"=",
"True",
",",
"s",
"=",
"10",
",",
"*",
"*",
"kwargs",
")",
":",
"if",
"(",
"not",
"PLOTTING",
")",
":",
"print",
"(",
"\"matplotlib not installed!\"",
")",
"else",
":",
"if",
"(",
"isinstance",
"(",
"projection",
",",
"str",
")",
")",
":",
"try",
":",
"dt",
"=",
"self",
".",
"adata",
".",
"obsm",
"[",
"projection",
"]",
"except",
"KeyError",
":",
"print",
"(",
"'Please create a projection first using run_umap or'",
"'run_tsne'",
")",
"elif",
"(",
"projection",
"is",
"None",
")",
":",
"try",
":",
"dt",
"=",
"self",
".",
"adata",
".",
"obsm",
"[",
"'X_umap'",
"]",
"except",
"KeyError",
":",
"try",
":",
"dt",
"=",
"self",
".",
"adata",
".",
"obsm",
"[",
"'X_tsne'",
"]",
"except",
"KeyError",
":",
"print",
"(",
"\"Please create either a t-SNE or UMAP projection\"",
"\"first.\"",
")",
"return",
"else",
":",
"dt",
"=",
"projection",
"if",
"(",
"axes",
"is",
"None",
")",
":",
"plt",
".",
"figure",
"(",
")",
"axes",
"=",
"plt",
".",
"gca",
"(",
")",
"if",
"(",
"c",
"is",
"None",
")",
":",
"plt",
".",
"scatter",
"(",
"dt",
"[",
":",
",",
"0",
"]",
",",
"dt",
"[",
":",
",",
"1",
"]",
",",
"s",
"=",
"s",
",",
"linewidth",
"=",
"linewidth",
",",
"edgecolor",
"=",
"edgecolor",
",",
"*",
"*",
"kwargs",
")",
"else",
":",
"if",
"isinstance",
"(",
"c",
",",
"str",
")",
":",
"try",
":",
"c",
"=",
"self",
".",
"adata",
".",
"obs",
"[",
"c",
"]",
".",
"get_values",
"(",
")",
"except",
"KeyError",
":",
"0",
"# do nothing",
"if",
"(",
"(",
"isinstance",
"(",
"c",
"[",
"0",
"]",
",",
"str",
")",
"or",
"isinstance",
"(",
"c",
"[",
"0",
"]",
",",
"np",
".",
"str_",
")",
")",
"and",
"(",
"isinstance",
"(",
"c",
",",
"np",
".",
"ndarray",
")",
"or",
"isinstance",
"(",
"c",
",",
"list",
")",
")",
")",
":",
"i",
"=",
"ut",
".",
"convert_annotations",
"(",
"c",
")",
"ui",
",",
"ai",
"=",
"np",
".",
"unique",
"(",
"i",
",",
"return_index",
"=",
"True",
")",
"cax",
"=",
"axes",
".",
"scatter",
"(",
"dt",
"[",
":",
",",
"0",
"]",
",",
"dt",
"[",
":",
",",
"1",
"]",
",",
"c",
"=",
"i",
",",
"cmap",
"=",
"cmap",
",",
"s",
"=",
"s",
",",
"linewidth",
"=",
"linewidth",
",",
"edgecolor",
"=",
"edgecolor",
",",
"*",
"*",
"kwargs",
")",
"if",
"(",
"colorbar",
")",
":",
"cbar",
"=",
"plt",
".",
"colorbar",
"(",
"cax",
",",
"ax",
"=",
"axes",
",",
"ticks",
"=",
"ui",
")",
"cbar",
".",
"ax",
".",
"set_yticklabels",
"(",
"c",
"[",
"ai",
"]",
")",
"else",
":",
"if",
"not",
"(",
"isinstance",
"(",
"c",
",",
"np",
".",
"ndarray",
")",
"or",
"isinstance",
"(",
"c",
",",
"list",
")",
")",
":",
"colorbar",
"=",
"False",
"i",
"=",
"c",
"cax",
"=",
"axes",
".",
"scatter",
"(",
"dt",
"[",
":",
",",
"0",
"]",
",",
"dt",
"[",
":",
",",
"1",
"]",
",",
"c",
"=",
"i",
",",
"cmap",
"=",
"cmap",
",",
"s",
"=",
"s",
",",
"linewidth",
"=",
"linewidth",
",",
"edgecolor",
"=",
"edgecolor",
",",
"*",
"*",
"kwargs",
")",
"if",
"(",
"colorbar",
")",
":",
"plt",
".",
"colorbar",
"(",
"cax",
",",
"ax",
"=",
"axes",
")"
] | Display a scatter plot.
Displays a scatter plot using the SAM projection or another input
projection with or without annotations.
Parameters
----------
projection - ndarray of floats, optional, default None
An N x 2 matrix, where N is the number of data points. If None,
use an existing SAM projection (default t-SNE). Can take on values
'umap' or 'tsne' to specify either the SAM UMAP embedding or
SAM t-SNE embedding.
c - ndarray or str, optional, default None
Colors for each cell in the scatter plot. Can be a vector of
floats or strings for cell annotations. Can also be a key
for sam.adata.obs (i.e. 'louvain_clusters').
axes - matplotlib axis, optional, default None
Plot output to the specified, existing axes. If None, create new
figure window.
cmap - string, optional, default 'rainbow'
The colormap to use for the input color values.
colorbar - bool, optional default True
If True, display a colorbar indicating which values / annotations
correspond to which color in the scatter plot.
Keyword arguments -
All other keyword arguments that can be passed into
matplotlib.pyplot.scatter can be used. | [
"Display",
"a",
"scatter",
"plot",
"."
] | 4db4793f65af62047492327716932ba81a67f679 | https://github.com/atarashansky/self-assembling-manifold/blob/4db4793f65af62047492327716932ba81a67f679/SAM.py#L1077-L1175 | train |
atarashansky/self-assembling-manifold | SAM.py | SAM.show_gene_expression | def show_gene_expression(self, gene, avg=True, axes=None, **kwargs):
"""Display a gene's expressions.
Displays a scatter plot using the SAM projection or another input
projection with a particular gene's expressions overlaid.
Parameters
----------
gene - string
a case-sensitive string indicating the gene expression pattern
to display.
avg - bool, optional, default True
If True, the plots use the k-nearest-neighbor-averaged expression
values to smooth out noisy expression patterns and improves
visualization.
axes - matplotlib axis, optional, default None
Plot output to the specified, existing axes. If None, create new
figure window.
**kwargs - all keyword arguments in 'SAM.scatter' are eligible.
"""
all_gene_names = np.array(list(self.adata.var_names))
cell_names = np.array(list(self.adata.obs_names))
all_cell_names = np.array(list(self.adata_raw.obs_names))
idx = np.where(all_gene_names == gene)[0]
name = gene
if(idx.size == 0):
print(
"Gene note found in the filtered dataset. Note that genes "
"are case sensitive.")
return
if(avg):
a = self.adata.layers['X_knn_avg'][:, idx].toarray().flatten()
if a.sum() == 0:
a = np.log2(self.adata_raw.X[np.in1d(
all_cell_names, cell_names), :][:,
idx].toarray().flatten() + 1)
else:
a = np.log2(self.adata_raw.X[np.in1d(
all_cell_names, cell_names), :][:,
idx].toarray().flatten() + 1)
if axes is None:
plt.figure()
axes = plt.gca()
self.scatter(c=a, axes=axes, **kwargs)
axes.set_title(name) | python | def show_gene_expression(self, gene, avg=True, axes=None, **kwargs):
"""Display a gene's expressions.
Displays a scatter plot using the SAM projection or another input
projection with a particular gene's expressions overlaid.
Parameters
----------
gene - string
a case-sensitive string indicating the gene expression pattern
to display.
avg - bool, optional, default True
If True, the plots use the k-nearest-neighbor-averaged expression
values to smooth out noisy expression patterns and improves
visualization.
axes - matplotlib axis, optional, default None
Plot output to the specified, existing axes. If None, create new
figure window.
**kwargs - all keyword arguments in 'SAM.scatter' are eligible.
"""
all_gene_names = np.array(list(self.adata.var_names))
cell_names = np.array(list(self.adata.obs_names))
all_cell_names = np.array(list(self.adata_raw.obs_names))
idx = np.where(all_gene_names == gene)[0]
name = gene
if(idx.size == 0):
print(
"Gene note found in the filtered dataset. Note that genes "
"are case sensitive.")
return
if(avg):
a = self.adata.layers['X_knn_avg'][:, idx].toarray().flatten()
if a.sum() == 0:
a = np.log2(self.adata_raw.X[np.in1d(
all_cell_names, cell_names), :][:,
idx].toarray().flatten() + 1)
else:
a = np.log2(self.adata_raw.X[np.in1d(
all_cell_names, cell_names), :][:,
idx].toarray().flatten() + 1)
if axes is None:
plt.figure()
axes = plt.gca()
self.scatter(c=a, axes=axes, **kwargs)
axes.set_title(name) | [
"def",
"show_gene_expression",
"(",
"self",
",",
"gene",
",",
"avg",
"=",
"True",
",",
"axes",
"=",
"None",
",",
"*",
"*",
"kwargs",
")",
":",
"all_gene_names",
"=",
"np",
".",
"array",
"(",
"list",
"(",
"self",
".",
"adata",
".",
"var_names",
")",
")",
"cell_names",
"=",
"np",
".",
"array",
"(",
"list",
"(",
"self",
".",
"adata",
".",
"obs_names",
")",
")",
"all_cell_names",
"=",
"np",
".",
"array",
"(",
"list",
"(",
"self",
".",
"adata_raw",
".",
"obs_names",
")",
")",
"idx",
"=",
"np",
".",
"where",
"(",
"all_gene_names",
"==",
"gene",
")",
"[",
"0",
"]",
"name",
"=",
"gene",
"if",
"(",
"idx",
".",
"size",
"==",
"0",
")",
":",
"print",
"(",
"\"Gene note found in the filtered dataset. Note that genes \"",
"\"are case sensitive.\"",
")",
"return",
"if",
"(",
"avg",
")",
":",
"a",
"=",
"self",
".",
"adata",
".",
"layers",
"[",
"'X_knn_avg'",
"]",
"[",
":",
",",
"idx",
"]",
".",
"toarray",
"(",
")",
".",
"flatten",
"(",
")",
"if",
"a",
".",
"sum",
"(",
")",
"==",
"0",
":",
"a",
"=",
"np",
".",
"log2",
"(",
"self",
".",
"adata_raw",
".",
"X",
"[",
"np",
".",
"in1d",
"(",
"all_cell_names",
",",
"cell_names",
")",
",",
":",
"]",
"[",
":",
",",
"idx",
"]",
".",
"toarray",
"(",
")",
".",
"flatten",
"(",
")",
"+",
"1",
")",
"else",
":",
"a",
"=",
"np",
".",
"log2",
"(",
"self",
".",
"adata_raw",
".",
"X",
"[",
"np",
".",
"in1d",
"(",
"all_cell_names",
",",
"cell_names",
")",
",",
":",
"]",
"[",
":",
",",
"idx",
"]",
".",
"toarray",
"(",
")",
".",
"flatten",
"(",
")",
"+",
"1",
")",
"if",
"axes",
"is",
"None",
":",
"plt",
".",
"figure",
"(",
")",
"axes",
"=",
"plt",
".",
"gca",
"(",
")",
"self",
".",
"scatter",
"(",
"c",
"=",
"a",
",",
"axes",
"=",
"axes",
",",
"*",
"*",
"kwargs",
")",
"axes",
".",
"set_title",
"(",
"name",
")"
] | Display a gene's expressions.
Displays a scatter plot using the SAM projection or another input
projection with a particular gene's expressions overlaid.
Parameters
----------
gene - string
a case-sensitive string indicating the gene expression pattern
to display.
avg - bool, optional, default True
If True, the plots use the k-nearest-neighbor-averaged expression
values to smooth out noisy expression patterns and improves
visualization.
axes - matplotlib axis, optional, default None
Plot output to the specified, existing axes. If None, create new
figure window.
**kwargs - all keyword arguments in 'SAM.scatter' are eligible. | [
"Display",
"a",
"gene",
"s",
"expressions",
"."
] | 4db4793f65af62047492327716932ba81a67f679 | https://github.com/atarashansky/self-assembling-manifold/blob/4db4793f65af62047492327716932ba81a67f679/SAM.py#L1177-L1231 | train |
atarashansky/self-assembling-manifold | SAM.py | SAM.louvain_clustering | def louvain_clustering(self, X=None, res=1, method='modularity'):
"""Runs Louvain clustering using the vtraag implementation. Assumes
that 'louvain' optional dependency is installed.
Parameters
----------
res - float, optional, default 1
The resolution parameter which tunes the number of clusters Louvain
finds.
method - str, optional, default 'modularity'
Can be 'modularity' or 'significance', which are two different
optimizing funcitons in the Louvain algorithm.
"""
if X is None:
X = self.adata.uns['neighbors']['connectivities']
save = True
else:
if not sp.isspmatrix_csr(X):
X = sp.csr_matrix(X)
save = False
import igraph as ig
import louvain
adjacency = sparse_knn(X.dot(X.T) / self.k, self.k).tocsr()
sources, targets = adjacency.nonzero()
weights = adjacency[sources, targets]
if isinstance(weights, np.matrix):
weights = weights.A1
g = ig.Graph(directed=True)
g.add_vertices(adjacency.shape[0])
g.add_edges(list(zip(sources, targets)))
try:
g.es['weight'] = weights
except BaseException:
pass
if method == 'significance':
cl = louvain.find_partition(g, louvain.SignificanceVertexPartition)
else:
cl = louvain.find_partition(
g,
louvain.RBConfigurationVertexPartition,
resolution_parameter=res)
if save:
self.adata.obs['louvain_clusters'] = pd.Categorical(np.array(cl.membership))
else:
return np.array(cl.membership) | python | def louvain_clustering(self, X=None, res=1, method='modularity'):
"""Runs Louvain clustering using the vtraag implementation. Assumes
that 'louvain' optional dependency is installed.
Parameters
----------
res - float, optional, default 1
The resolution parameter which tunes the number of clusters Louvain
finds.
method - str, optional, default 'modularity'
Can be 'modularity' or 'significance', which are two different
optimizing funcitons in the Louvain algorithm.
"""
if X is None:
X = self.adata.uns['neighbors']['connectivities']
save = True
else:
if not sp.isspmatrix_csr(X):
X = sp.csr_matrix(X)
save = False
import igraph as ig
import louvain
adjacency = sparse_knn(X.dot(X.T) / self.k, self.k).tocsr()
sources, targets = adjacency.nonzero()
weights = adjacency[sources, targets]
if isinstance(weights, np.matrix):
weights = weights.A1
g = ig.Graph(directed=True)
g.add_vertices(adjacency.shape[0])
g.add_edges(list(zip(sources, targets)))
try:
g.es['weight'] = weights
except BaseException:
pass
if method == 'significance':
cl = louvain.find_partition(g, louvain.SignificanceVertexPartition)
else:
cl = louvain.find_partition(
g,
louvain.RBConfigurationVertexPartition,
resolution_parameter=res)
if save:
self.adata.obs['louvain_clusters'] = pd.Categorical(np.array(cl.membership))
else:
return np.array(cl.membership) | [
"def",
"louvain_clustering",
"(",
"self",
",",
"X",
"=",
"None",
",",
"res",
"=",
"1",
",",
"method",
"=",
"'modularity'",
")",
":",
"if",
"X",
"is",
"None",
":",
"X",
"=",
"self",
".",
"adata",
".",
"uns",
"[",
"'neighbors'",
"]",
"[",
"'connectivities'",
"]",
"save",
"=",
"True",
"else",
":",
"if",
"not",
"sp",
".",
"isspmatrix_csr",
"(",
"X",
")",
":",
"X",
"=",
"sp",
".",
"csr_matrix",
"(",
"X",
")",
"save",
"=",
"False",
"import",
"igraph",
"as",
"ig",
"import",
"louvain",
"adjacency",
"=",
"sparse_knn",
"(",
"X",
".",
"dot",
"(",
"X",
".",
"T",
")",
"/",
"self",
".",
"k",
",",
"self",
".",
"k",
")",
".",
"tocsr",
"(",
")",
"sources",
",",
"targets",
"=",
"adjacency",
".",
"nonzero",
"(",
")",
"weights",
"=",
"adjacency",
"[",
"sources",
",",
"targets",
"]",
"if",
"isinstance",
"(",
"weights",
",",
"np",
".",
"matrix",
")",
":",
"weights",
"=",
"weights",
".",
"A1",
"g",
"=",
"ig",
".",
"Graph",
"(",
"directed",
"=",
"True",
")",
"g",
".",
"add_vertices",
"(",
"adjacency",
".",
"shape",
"[",
"0",
"]",
")",
"g",
".",
"add_edges",
"(",
"list",
"(",
"zip",
"(",
"sources",
",",
"targets",
")",
")",
")",
"try",
":",
"g",
".",
"es",
"[",
"'weight'",
"]",
"=",
"weights",
"except",
"BaseException",
":",
"pass",
"if",
"method",
"==",
"'significance'",
":",
"cl",
"=",
"louvain",
".",
"find_partition",
"(",
"g",
",",
"louvain",
".",
"SignificanceVertexPartition",
")",
"else",
":",
"cl",
"=",
"louvain",
".",
"find_partition",
"(",
"g",
",",
"louvain",
".",
"RBConfigurationVertexPartition",
",",
"resolution_parameter",
"=",
"res",
")",
"if",
"save",
":",
"self",
".",
"adata",
".",
"obs",
"[",
"'louvain_clusters'",
"]",
"=",
"pd",
".",
"Categorical",
"(",
"np",
".",
"array",
"(",
"cl",
".",
"membership",
")",
")",
"else",
":",
"return",
"np",
".",
"array",
"(",
"cl",
".",
"membership",
")"
] | Runs Louvain clustering using the vtraag implementation. Assumes
that 'louvain' optional dependency is installed.
Parameters
----------
res - float, optional, default 1
The resolution parameter which tunes the number of clusters Louvain
finds.
method - str, optional, default 'modularity'
Can be 'modularity' or 'significance', which are two different
optimizing funcitons in the Louvain algorithm. | [
"Runs",
"Louvain",
"clustering",
"using",
"the",
"vtraag",
"implementation",
".",
"Assumes",
"that",
"louvain",
"optional",
"dependency",
"is",
"installed",
"."
] | 4db4793f65af62047492327716932ba81a67f679 | https://github.com/atarashansky/self-assembling-manifold/blob/4db4793f65af62047492327716932ba81a67f679/SAM.py#L1265-L1316 | train |
atarashansky/self-assembling-manifold | SAM.py | SAM.kmeans_clustering | def kmeans_clustering(self, numc, X=None, npcs=15):
"""Performs k-means clustering.
Parameters
----------
numc - int
Number of clusters
npcs - int, optional, default 15
Number of principal components to use as inpute for k-means
clustering.
"""
from sklearn.cluster import KMeans
if X is None:
D_sub = self.adata.uns['X_processed']
X = (
D_sub -
D_sub.mean(0)).dot(
self.adata.uns['pca_obj'].components_[
:npcs,
:].T)
save = True
else:
save = False
cl = KMeans(n_clusters=numc).fit_predict(Normalizer().fit_transform(X))
if save:
self.adata.obs['kmeans_clusters'] = pd.Categorical(cl)
else:
return cl | python | def kmeans_clustering(self, numc, X=None, npcs=15):
"""Performs k-means clustering.
Parameters
----------
numc - int
Number of clusters
npcs - int, optional, default 15
Number of principal components to use as inpute for k-means
clustering.
"""
from sklearn.cluster import KMeans
if X is None:
D_sub = self.adata.uns['X_processed']
X = (
D_sub -
D_sub.mean(0)).dot(
self.adata.uns['pca_obj'].components_[
:npcs,
:].T)
save = True
else:
save = False
cl = KMeans(n_clusters=numc).fit_predict(Normalizer().fit_transform(X))
if save:
self.adata.obs['kmeans_clusters'] = pd.Categorical(cl)
else:
return cl | [
"def",
"kmeans_clustering",
"(",
"self",
",",
"numc",
",",
"X",
"=",
"None",
",",
"npcs",
"=",
"15",
")",
":",
"from",
"sklearn",
".",
"cluster",
"import",
"KMeans",
"if",
"X",
"is",
"None",
":",
"D_sub",
"=",
"self",
".",
"adata",
".",
"uns",
"[",
"'X_processed'",
"]",
"X",
"=",
"(",
"D_sub",
"-",
"D_sub",
".",
"mean",
"(",
"0",
")",
")",
".",
"dot",
"(",
"self",
".",
"adata",
".",
"uns",
"[",
"'pca_obj'",
"]",
".",
"components_",
"[",
":",
"npcs",
",",
":",
"]",
".",
"T",
")",
"save",
"=",
"True",
"else",
":",
"save",
"=",
"False",
"cl",
"=",
"KMeans",
"(",
"n_clusters",
"=",
"numc",
")",
".",
"fit_predict",
"(",
"Normalizer",
"(",
")",
".",
"fit_transform",
"(",
"X",
")",
")",
"if",
"save",
":",
"self",
".",
"adata",
".",
"obs",
"[",
"'kmeans_clusters'",
"]",
"=",
"pd",
".",
"Categorical",
"(",
"cl",
")",
"else",
":",
"return",
"cl"
] | Performs k-means clustering.
Parameters
----------
numc - int
Number of clusters
npcs - int, optional, default 15
Number of principal components to use as inpute for k-means
clustering. | [
"Performs",
"k",
"-",
"means",
"clustering",
"."
] | 4db4793f65af62047492327716932ba81a67f679 | https://github.com/atarashansky/self-assembling-manifold/blob/4db4793f65af62047492327716932ba81a67f679/SAM.py#L1318-L1350 | train |
atarashansky/self-assembling-manifold | SAM.py | SAM.identify_marker_genes_rf | def identify_marker_genes_rf(self, labels=None, clusters=None,
n_genes=4000):
"""
Ranks marker genes for each cluster using a random forest
classification approach.
Parameters
----------
labels - numpy.array or str, optional, default None
Cluster labels to use for marker gene identification. If None,
assumes that one of SAM's clustering algorithms has been run. Can
be a string (i.e. 'louvain_clusters', 'kmeans_clusters', etc) to
specify specific cluster labels in adata.obs.
clusters - int or array-like, default None
A number or vector corresponding to the specific cluster ID(s)
for which marker genes will be calculated. If None, marker genes
will be computed for all clusters.
n_genes - int, optional, default 4000
By default, trains the classifier on the top 4000 SAM-weighted
genes.
"""
if(labels is None):
try:
keys = np.array(list(self.adata.obs_keys()))
lbls = self.adata.obs[ut.search_string(
keys, '_clusters')[0][0]].get_values()
except KeyError:
print("Please generate cluster labels first or set the "
"'labels' keyword argument.")
return
elif isinstance(labels, str):
lbls = self.adata.obs[labels].get_values().flatten()
else:
lbls = labels
from sklearn.ensemble import RandomForestClassifier
markers = {}
if clusters == None:
lblsu = np.unique(lbls)
else:
lblsu = np.unique(clusters)
indices = np.argsort(-self.adata.var['weights'].values)
X = self.adata.layers['X_disp'][:, indices[:n_genes]].toarray()
for K in range(lblsu.size):
print(K)
y = np.zeros(lbls.size)
y[lbls == lblsu[K]] = 1
clf = RandomForestClassifier(n_estimators=100, max_depth=None,
random_state=0)
clf.fit(X, y)
idx = np.argsort(-clf.feature_importances_)
markers[lblsu[K]] = self.adata.uns['ranked_genes'][idx]
if clusters is None:
self.adata.uns['marker_genes_rf'] = markers
return markers | python | def identify_marker_genes_rf(self, labels=None, clusters=None,
n_genes=4000):
"""
Ranks marker genes for each cluster using a random forest
classification approach.
Parameters
----------
labels - numpy.array or str, optional, default None
Cluster labels to use for marker gene identification. If None,
assumes that one of SAM's clustering algorithms has been run. Can
be a string (i.e. 'louvain_clusters', 'kmeans_clusters', etc) to
specify specific cluster labels in adata.obs.
clusters - int or array-like, default None
A number or vector corresponding to the specific cluster ID(s)
for which marker genes will be calculated. If None, marker genes
will be computed for all clusters.
n_genes - int, optional, default 4000
By default, trains the classifier on the top 4000 SAM-weighted
genes.
"""
if(labels is None):
try:
keys = np.array(list(self.adata.obs_keys()))
lbls = self.adata.obs[ut.search_string(
keys, '_clusters')[0][0]].get_values()
except KeyError:
print("Please generate cluster labels first or set the "
"'labels' keyword argument.")
return
elif isinstance(labels, str):
lbls = self.adata.obs[labels].get_values().flatten()
else:
lbls = labels
from sklearn.ensemble import RandomForestClassifier
markers = {}
if clusters == None:
lblsu = np.unique(lbls)
else:
lblsu = np.unique(clusters)
indices = np.argsort(-self.adata.var['weights'].values)
X = self.adata.layers['X_disp'][:, indices[:n_genes]].toarray()
for K in range(lblsu.size):
print(K)
y = np.zeros(lbls.size)
y[lbls == lblsu[K]] = 1
clf = RandomForestClassifier(n_estimators=100, max_depth=None,
random_state=0)
clf.fit(X, y)
idx = np.argsort(-clf.feature_importances_)
markers[lblsu[K]] = self.adata.uns['ranked_genes'][idx]
if clusters is None:
self.adata.uns['marker_genes_rf'] = markers
return markers | [
"def",
"identify_marker_genes_rf",
"(",
"self",
",",
"labels",
"=",
"None",
",",
"clusters",
"=",
"None",
",",
"n_genes",
"=",
"4000",
")",
":",
"if",
"(",
"labels",
"is",
"None",
")",
":",
"try",
":",
"keys",
"=",
"np",
".",
"array",
"(",
"list",
"(",
"self",
".",
"adata",
".",
"obs_keys",
"(",
")",
")",
")",
"lbls",
"=",
"self",
".",
"adata",
".",
"obs",
"[",
"ut",
".",
"search_string",
"(",
"keys",
",",
"'_clusters'",
")",
"[",
"0",
"]",
"[",
"0",
"]",
"]",
".",
"get_values",
"(",
")",
"except",
"KeyError",
":",
"print",
"(",
"\"Please generate cluster labels first or set the \"",
"\"'labels' keyword argument.\"",
")",
"return",
"elif",
"isinstance",
"(",
"labels",
",",
"str",
")",
":",
"lbls",
"=",
"self",
".",
"adata",
".",
"obs",
"[",
"labels",
"]",
".",
"get_values",
"(",
")",
".",
"flatten",
"(",
")",
"else",
":",
"lbls",
"=",
"labels",
"from",
"sklearn",
".",
"ensemble",
"import",
"RandomForestClassifier",
"markers",
"=",
"{",
"}",
"if",
"clusters",
"==",
"None",
":",
"lblsu",
"=",
"np",
".",
"unique",
"(",
"lbls",
")",
"else",
":",
"lblsu",
"=",
"np",
".",
"unique",
"(",
"clusters",
")",
"indices",
"=",
"np",
".",
"argsort",
"(",
"-",
"self",
".",
"adata",
".",
"var",
"[",
"'weights'",
"]",
".",
"values",
")",
"X",
"=",
"self",
".",
"adata",
".",
"layers",
"[",
"'X_disp'",
"]",
"[",
":",
",",
"indices",
"[",
":",
"n_genes",
"]",
"]",
".",
"toarray",
"(",
")",
"for",
"K",
"in",
"range",
"(",
"lblsu",
".",
"size",
")",
":",
"print",
"(",
"K",
")",
"y",
"=",
"np",
".",
"zeros",
"(",
"lbls",
".",
"size",
")",
"y",
"[",
"lbls",
"==",
"lblsu",
"[",
"K",
"]",
"]",
"=",
"1",
"clf",
"=",
"RandomForestClassifier",
"(",
"n_estimators",
"=",
"100",
",",
"max_depth",
"=",
"None",
",",
"random_state",
"=",
"0",
")",
"clf",
".",
"fit",
"(",
"X",
",",
"y",
")",
"idx",
"=",
"np",
".",
"argsort",
"(",
"-",
"clf",
".",
"feature_importances_",
")",
"markers",
"[",
"lblsu",
"[",
"K",
"]",
"]",
"=",
"self",
".",
"adata",
".",
"uns",
"[",
"'ranked_genes'",
"]",
"[",
"idx",
"]",
"if",
"clusters",
"is",
"None",
":",
"self",
".",
"adata",
".",
"uns",
"[",
"'marker_genes_rf'",
"]",
"=",
"markers",
"return",
"markers"
] | Ranks marker genes for each cluster using a random forest
classification approach.
Parameters
----------
labels - numpy.array or str, optional, default None
Cluster labels to use for marker gene identification. If None,
assumes that one of SAM's clustering algorithms has been run. Can
be a string (i.e. 'louvain_clusters', 'kmeans_clusters', etc) to
specify specific cluster labels in adata.obs.
clusters - int or array-like, default None
A number or vector corresponding to the specific cluster ID(s)
for which marker genes will be calculated. If None, marker genes
will be computed for all clusters.
n_genes - int, optional, default 4000
By default, trains the classifier on the top 4000 SAM-weighted
genes. | [
"Ranks",
"marker",
"genes",
"for",
"each",
"cluster",
"using",
"a",
"random",
"forest",
"classification",
"approach",
"."
] | 4db4793f65af62047492327716932ba81a67f679 | https://github.com/atarashansky/self-assembling-manifold/blob/4db4793f65af62047492327716932ba81a67f679/SAM.py#L1404-L1471 | train |
atarashansky/self-assembling-manifold | SAM.py | SAM.identify_marker_genes_corr | def identify_marker_genes_corr(self, labels=None, n_genes=4000):
"""
Ranking marker genes based on their respective magnitudes in the
correlation dot products with cluster-specific reference expression
profiles.
Parameters
----------
labels - numpy.array or str, optional, default None
Cluster labels to use for marker gene identification. If None,
assumes that one of SAM's clustering algorithms has been run. Can
be a string (i.e. 'louvain_clusters', 'kmeans_clusters', etc) to
specify specific cluster labels in adata.obs.
n_genes - int, optional, default 4000
By default, computes correlations on the top 4000 SAM-weighted genes.
"""
if(labels is None):
try:
keys = np.array(list(self.adata.obs_keys()))
lbls = self.adata.obs[ut.search_string(
keys, '_clusters')[0][0]].get_values()
except KeyError:
print("Please generate cluster labels first or set the "
"'labels' keyword argument.")
return
elif isinstance(labels, str):
lbls = self.adata.obs[labels].get_values().flatten()
else:
lbls = labels
w=self.adata.var['weights'].values
s = StandardScaler()
idxg = np.argsort(-w)[:n_genes]
y1=s.fit_transform(self.adata.layers['X_disp'][:,idxg].A)*w[idxg]
all_gene_names = np.array(list(self.adata.var_names))[idxg]
markers = {}
lblsu=np.unique(lbls)
for i in lblsu:
Gcells = np.array(list(self.adata.obs_names[lbls==i]))
z1 = y1[np.in1d(self.adata.obs_names,Gcells),:]
m1 = (z1 - z1.mean(1)[:,None])/z1.std(1)[:,None]
ref = z1.mean(0)
ref = (ref-ref.mean())/ref.std()
g2 = (m1*ref).mean(0)
markers[i] = all_gene_names[np.argsort(-g2)]
self.adata.uns['marker_genes_corr'] = markers
return markers | python | def identify_marker_genes_corr(self, labels=None, n_genes=4000):
"""
Ranking marker genes based on their respective magnitudes in the
correlation dot products with cluster-specific reference expression
profiles.
Parameters
----------
labels - numpy.array or str, optional, default None
Cluster labels to use for marker gene identification. If None,
assumes that one of SAM's clustering algorithms has been run. Can
be a string (i.e. 'louvain_clusters', 'kmeans_clusters', etc) to
specify specific cluster labels in adata.obs.
n_genes - int, optional, default 4000
By default, computes correlations on the top 4000 SAM-weighted genes.
"""
if(labels is None):
try:
keys = np.array(list(self.adata.obs_keys()))
lbls = self.adata.obs[ut.search_string(
keys, '_clusters')[0][0]].get_values()
except KeyError:
print("Please generate cluster labels first or set the "
"'labels' keyword argument.")
return
elif isinstance(labels, str):
lbls = self.adata.obs[labels].get_values().flatten()
else:
lbls = labels
w=self.adata.var['weights'].values
s = StandardScaler()
idxg = np.argsort(-w)[:n_genes]
y1=s.fit_transform(self.adata.layers['X_disp'][:,idxg].A)*w[idxg]
all_gene_names = np.array(list(self.adata.var_names))[idxg]
markers = {}
lblsu=np.unique(lbls)
for i in lblsu:
Gcells = np.array(list(self.adata.obs_names[lbls==i]))
z1 = y1[np.in1d(self.adata.obs_names,Gcells),:]
m1 = (z1 - z1.mean(1)[:,None])/z1.std(1)[:,None]
ref = z1.mean(0)
ref = (ref-ref.mean())/ref.std()
g2 = (m1*ref).mean(0)
markers[i] = all_gene_names[np.argsort(-g2)]
self.adata.uns['marker_genes_corr'] = markers
return markers | [
"def",
"identify_marker_genes_corr",
"(",
"self",
",",
"labels",
"=",
"None",
",",
"n_genes",
"=",
"4000",
")",
":",
"if",
"(",
"labels",
"is",
"None",
")",
":",
"try",
":",
"keys",
"=",
"np",
".",
"array",
"(",
"list",
"(",
"self",
".",
"adata",
".",
"obs_keys",
"(",
")",
")",
")",
"lbls",
"=",
"self",
".",
"adata",
".",
"obs",
"[",
"ut",
".",
"search_string",
"(",
"keys",
",",
"'_clusters'",
")",
"[",
"0",
"]",
"[",
"0",
"]",
"]",
".",
"get_values",
"(",
")",
"except",
"KeyError",
":",
"print",
"(",
"\"Please generate cluster labels first or set the \"",
"\"'labels' keyword argument.\"",
")",
"return",
"elif",
"isinstance",
"(",
"labels",
",",
"str",
")",
":",
"lbls",
"=",
"self",
".",
"adata",
".",
"obs",
"[",
"labels",
"]",
".",
"get_values",
"(",
")",
".",
"flatten",
"(",
")",
"else",
":",
"lbls",
"=",
"labels",
"w",
"=",
"self",
".",
"adata",
".",
"var",
"[",
"'weights'",
"]",
".",
"values",
"s",
"=",
"StandardScaler",
"(",
")",
"idxg",
"=",
"np",
".",
"argsort",
"(",
"-",
"w",
")",
"[",
":",
"n_genes",
"]",
"y1",
"=",
"s",
".",
"fit_transform",
"(",
"self",
".",
"adata",
".",
"layers",
"[",
"'X_disp'",
"]",
"[",
":",
",",
"idxg",
"]",
".",
"A",
")",
"*",
"w",
"[",
"idxg",
"]",
"all_gene_names",
"=",
"np",
".",
"array",
"(",
"list",
"(",
"self",
".",
"adata",
".",
"var_names",
")",
")",
"[",
"idxg",
"]",
"markers",
"=",
"{",
"}",
"lblsu",
"=",
"np",
".",
"unique",
"(",
"lbls",
")",
"for",
"i",
"in",
"lblsu",
":",
"Gcells",
"=",
"np",
".",
"array",
"(",
"list",
"(",
"self",
".",
"adata",
".",
"obs_names",
"[",
"lbls",
"==",
"i",
"]",
")",
")",
"z1",
"=",
"y1",
"[",
"np",
".",
"in1d",
"(",
"self",
".",
"adata",
".",
"obs_names",
",",
"Gcells",
")",
",",
":",
"]",
"m1",
"=",
"(",
"z1",
"-",
"z1",
".",
"mean",
"(",
"1",
")",
"[",
":",
",",
"None",
"]",
")",
"/",
"z1",
".",
"std",
"(",
"1",
")",
"[",
":",
",",
"None",
"]",
"ref",
"=",
"z1",
".",
"mean",
"(",
"0",
")",
"ref",
"=",
"(",
"ref",
"-",
"ref",
".",
"mean",
"(",
")",
")",
"/",
"ref",
".",
"std",
"(",
")",
"g2",
"=",
"(",
"m1",
"*",
"ref",
")",
".",
"mean",
"(",
"0",
")",
"markers",
"[",
"i",
"]",
"=",
"all_gene_names",
"[",
"np",
".",
"argsort",
"(",
"-",
"g2",
")",
"]",
"self",
".",
"adata",
".",
"uns",
"[",
"'marker_genes_corr'",
"]",
"=",
"markers",
"return",
"markers"
] | Ranking marker genes based on their respective magnitudes in the
correlation dot products with cluster-specific reference expression
profiles.
Parameters
----------
labels - numpy.array or str, optional, default None
Cluster labels to use for marker gene identification. If None,
assumes that one of SAM's clustering algorithms has been run. Can
be a string (i.e. 'louvain_clusters', 'kmeans_clusters', etc) to
specify specific cluster labels in adata.obs.
n_genes - int, optional, default 4000
By default, computes correlations on the top 4000 SAM-weighted genes. | [
"Ranking",
"marker",
"genes",
"based",
"on",
"their",
"respective",
"magnitudes",
"in",
"the",
"correlation",
"dot",
"products",
"with",
"cluster",
"-",
"specific",
"reference",
"expression",
"profiles",
"."
] | 4db4793f65af62047492327716932ba81a67f679 | https://github.com/atarashansky/self-assembling-manifold/blob/4db4793f65af62047492327716932ba81a67f679/SAM.py#L1521-L1575 | train |
cocoakekeyu/cancan | cancan/ability.py | Ability.add | def add(self, action=None, subject=None, **conditions):
"""
Add ability are allowed using two arguments.
The first one is the action you're setting the permission for,
the second one is the class of object you're setting it on.
the third one is the subject's conditions must be matches or a function
to be test.
self.add('update', Article)
self.add('update', Article, user_id=1)
self.add('update', Article, user_id=1, title='hello')
self.add('update', Article, function=test_title)
"""
self.add_rule(Rule(True, action, subject, **conditions)) | python | def add(self, action=None, subject=None, **conditions):
"""
Add ability are allowed using two arguments.
The first one is the action you're setting the permission for,
the second one is the class of object you're setting it on.
the third one is the subject's conditions must be matches or a function
to be test.
self.add('update', Article)
self.add('update', Article, user_id=1)
self.add('update', Article, user_id=1, title='hello')
self.add('update', Article, function=test_title)
"""
self.add_rule(Rule(True, action, subject, **conditions)) | [
"def",
"add",
"(",
"self",
",",
"action",
"=",
"None",
",",
"subject",
"=",
"None",
",",
"*",
"*",
"conditions",
")",
":",
"self",
".",
"add_rule",
"(",
"Rule",
"(",
"True",
",",
"action",
",",
"subject",
",",
"*",
"*",
"conditions",
")",
")"
] | Add ability are allowed using two arguments.
The first one is the action you're setting the permission for,
the second one is the class of object you're setting it on.
the third one is the subject's conditions must be matches or a function
to be test.
self.add('update', Article)
self.add('update', Article, user_id=1)
self.add('update', Article, user_id=1, title='hello')
self.add('update', Article, function=test_title) | [
"Add",
"ability",
"are",
"allowed",
"using",
"two",
"arguments",
"."
] | f198d560e6e008e6c5580ba55581a939a5d544ed | https://github.com/cocoakekeyu/cancan/blob/f198d560e6e008e6c5580ba55581a939a5d544ed/cancan/ability.py#L21-L35 | train |
cocoakekeyu/cancan | cancan/ability.py | Ability.addnot | def addnot(self, action=None, subject=None, **conditions):
"""
Defines an ability which cannot be done.
"""
self.add_rule(Rule(False, action, subject, **conditions)) | python | def addnot(self, action=None, subject=None, **conditions):
"""
Defines an ability which cannot be done.
"""
self.add_rule(Rule(False, action, subject, **conditions)) | [
"def",
"addnot",
"(",
"self",
",",
"action",
"=",
"None",
",",
"subject",
"=",
"None",
",",
"*",
"*",
"conditions",
")",
":",
"self",
".",
"add_rule",
"(",
"Rule",
"(",
"False",
",",
"action",
",",
"subject",
",",
"*",
"*",
"conditions",
")",
")"
] | Defines an ability which cannot be done. | [
"Defines",
"an",
"ability",
"which",
"cannot",
"be",
"done",
"."
] | f198d560e6e008e6c5580ba55581a939a5d544ed | https://github.com/cocoakekeyu/cancan/blob/f198d560e6e008e6c5580ba55581a939a5d544ed/cancan/ability.py#L37-L41 | train |
cocoakekeyu/cancan | cancan/ability.py | Ability.can | def can(self, action, subject, **conditions):
"""
Check if the user has permission to perform a given action on an object
"""
for rule in self.relevant_rules_for_match(action, subject):
if rule.matches_conditions(action, subject, **conditions):
return rule.base_behavior
return False | python | def can(self, action, subject, **conditions):
"""
Check if the user has permission to perform a given action on an object
"""
for rule in self.relevant_rules_for_match(action, subject):
if rule.matches_conditions(action, subject, **conditions):
return rule.base_behavior
return False | [
"def",
"can",
"(",
"self",
",",
"action",
",",
"subject",
",",
"*",
"*",
"conditions",
")",
":",
"for",
"rule",
"in",
"self",
".",
"relevant_rules_for_match",
"(",
"action",
",",
"subject",
")",
":",
"if",
"rule",
".",
"matches_conditions",
"(",
"action",
",",
"subject",
",",
"*",
"*",
"conditions",
")",
":",
"return",
"rule",
".",
"base_behavior",
"return",
"False"
] | Check if the user has permission to perform a given action on an object | [
"Check",
"if",
"the",
"user",
"has",
"permission",
"to",
"perform",
"a",
"given",
"action",
"on",
"an",
"object"
] | f198d560e6e008e6c5580ba55581a939a5d544ed | https://github.com/cocoakekeyu/cancan/blob/f198d560e6e008e6c5580ba55581a939a5d544ed/cancan/ability.py#L46-L53 | train |
cocoakekeyu/cancan | cancan/ability.py | Ability.relevant_rules_for_match | def relevant_rules_for_match(self, action, subject):
"""retrive match action and subject"""
matches = []
for rule in self.rules:
rule.expanded_actions = self.expand_actions(rule.actions)
if rule.is_relevant(action, subject):
matches.append(rule)
return self.optimize(matches[::-1]) | python | def relevant_rules_for_match(self, action, subject):
"""retrive match action and subject"""
matches = []
for rule in self.rules:
rule.expanded_actions = self.expand_actions(rule.actions)
if rule.is_relevant(action, subject):
matches.append(rule)
return self.optimize(matches[::-1]) | [
"def",
"relevant_rules_for_match",
"(",
"self",
",",
"action",
",",
"subject",
")",
":",
"matches",
"=",
"[",
"]",
"for",
"rule",
"in",
"self",
".",
"rules",
":",
"rule",
".",
"expanded_actions",
"=",
"self",
".",
"expand_actions",
"(",
"rule",
".",
"actions",
")",
"if",
"rule",
".",
"is_relevant",
"(",
"action",
",",
"subject",
")",
":",
"matches",
".",
"append",
"(",
"rule",
")",
"return",
"self",
".",
"optimize",
"(",
"matches",
"[",
":",
":",
"-",
"1",
"]",
")"
] | retrive match action and subject | [
"retrive",
"match",
"action",
"and",
"subject"
] | f198d560e6e008e6c5580ba55581a939a5d544ed | https://github.com/cocoakekeyu/cancan/blob/f198d560e6e008e6c5580ba55581a939a5d544ed/cancan/ability.py#L62-L70 | train |
cocoakekeyu/cancan | cancan/ability.py | Ability.expand_actions | def expand_actions(self, actions):
"""
Accepts an array of actions and returns an array of actions which match
"""
r = []
for action in actions:
r.append(action)
if action in self.aliased_actions:
r.extend(self.aliased_actions[action])
return r | python | def expand_actions(self, actions):
"""
Accepts an array of actions and returns an array of actions which match
"""
r = []
for action in actions:
r.append(action)
if action in self.aliased_actions:
r.extend(self.aliased_actions[action])
return r | [
"def",
"expand_actions",
"(",
"self",
",",
"actions",
")",
":",
"r",
"=",
"[",
"]",
"for",
"action",
"in",
"actions",
":",
"r",
".",
"append",
"(",
"action",
")",
"if",
"action",
"in",
"self",
".",
"aliased_actions",
":",
"r",
".",
"extend",
"(",
"self",
".",
"aliased_actions",
"[",
"action",
"]",
")",
"return",
"r"
] | Accepts an array of actions and returns an array of actions which match | [
"Accepts",
"an",
"array",
"of",
"actions",
"and",
"returns",
"an",
"array",
"of",
"actions",
"which",
"match"
] | f198d560e6e008e6c5580ba55581a939a5d544ed | https://github.com/cocoakekeyu/cancan/blob/f198d560e6e008e6c5580ba55581a939a5d544ed/cancan/ability.py#L81-L90 | train |
cocoakekeyu/cancan | cancan/ability.py | Ability.alias_action | def alias_action(self, *args, **kwargs):
"""
Alias one or more actions into another one.
self.alias_action('create', 'read', 'update', 'delete', to='crud')
"""
to = kwargs.pop('to', None)
if not to:
return
error_message = ("You can't specify target ({}) as alias "
"because it is real action name".format(to)
)
if to in list(itertools.chain(*self.aliased_actions.values())):
raise Exception(error_message)
self.aliased_actions.setdefault(to, []).extend(args) | python | def alias_action(self, *args, **kwargs):
"""
Alias one or more actions into another one.
self.alias_action('create', 'read', 'update', 'delete', to='crud')
"""
to = kwargs.pop('to', None)
if not to:
return
error_message = ("You can't specify target ({}) as alias "
"because it is real action name".format(to)
)
if to in list(itertools.chain(*self.aliased_actions.values())):
raise Exception(error_message)
self.aliased_actions.setdefault(to, []).extend(args) | [
"def",
"alias_action",
"(",
"self",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"to",
"=",
"kwargs",
".",
"pop",
"(",
"'to'",
",",
"None",
")",
"if",
"not",
"to",
":",
"return",
"error_message",
"=",
"(",
"\"You can't specify target ({}) as alias \"",
"\"because it is real action name\"",
".",
"format",
"(",
"to",
")",
")",
"if",
"to",
"in",
"list",
"(",
"itertools",
".",
"chain",
"(",
"*",
"self",
".",
"aliased_actions",
".",
"values",
"(",
")",
")",
")",
":",
"raise",
"Exception",
"(",
"error_message",
")",
"self",
".",
"aliased_actions",
".",
"setdefault",
"(",
"to",
",",
"[",
"]",
")",
".",
"extend",
"(",
"args",
")"
] | Alias one or more actions into another one.
self.alias_action('create', 'read', 'update', 'delete', to='crud') | [
"Alias",
"one",
"or",
"more",
"actions",
"into",
"another",
"one",
"."
] | f198d560e6e008e6c5580ba55581a939a5d544ed | https://github.com/cocoakekeyu/cancan/blob/f198d560e6e008e6c5580ba55581a939a5d544ed/cancan/ability.py#L92-L108 | train |
suurjaak/InputScope | inputscope/db.py | fetch | def fetch(table, cols="*", where=(), group="", order=(), limit=(), **kwargs):
"""Convenience wrapper for database SELECT and fetch all."""
return select(table, cols, where, group, order, limit, **kwargs).fetchall() | python | def fetch(table, cols="*", where=(), group="", order=(), limit=(), **kwargs):
"""Convenience wrapper for database SELECT and fetch all."""
return select(table, cols, where, group, order, limit, **kwargs).fetchall() | [
"def",
"fetch",
"(",
"table",
",",
"cols",
"=",
"\"*\"",
",",
"where",
"=",
"(",
")",
",",
"group",
"=",
"\"\"",
",",
"order",
"=",
"(",
")",
",",
"limit",
"=",
"(",
")",
",",
"*",
"*",
"kwargs",
")",
":",
"return",
"select",
"(",
"table",
",",
"cols",
",",
"where",
",",
"group",
",",
"order",
",",
"limit",
",",
"*",
"*",
"kwargs",
")",
".",
"fetchall",
"(",
")"
] | Convenience wrapper for database SELECT and fetch all. | [
"Convenience",
"wrapper",
"for",
"database",
"SELECT",
"and",
"fetch",
"all",
"."
] | 245ff045163a1995e8cd5ac558d0a93024eb86eb | https://github.com/suurjaak/InputScope/blob/245ff045163a1995e8cd5ac558d0a93024eb86eb/inputscope/db.py#L24-L26 | train |
suurjaak/InputScope | inputscope/db.py | fetchone | def fetchone(table, cols="*", where=(), group="", order=(), limit=(), **kwargs):
"""Convenience wrapper for database SELECT and fetch one."""
return select(table, cols, where, group, order, limit, **kwargs).fetchone() | python | def fetchone(table, cols="*", where=(), group="", order=(), limit=(), **kwargs):
"""Convenience wrapper for database SELECT and fetch one."""
return select(table, cols, where, group, order, limit, **kwargs).fetchone() | [
"def",
"fetchone",
"(",
"table",
",",
"cols",
"=",
"\"*\"",
",",
"where",
"=",
"(",
")",
",",
"group",
"=",
"\"\"",
",",
"order",
"=",
"(",
")",
",",
"limit",
"=",
"(",
")",
",",
"*",
"*",
"kwargs",
")",
":",
"return",
"select",
"(",
"table",
",",
"cols",
",",
"where",
",",
"group",
",",
"order",
",",
"limit",
",",
"*",
"*",
"kwargs",
")",
".",
"fetchone",
"(",
")"
] | Convenience wrapper for database SELECT and fetch one. | [
"Convenience",
"wrapper",
"for",
"database",
"SELECT",
"and",
"fetch",
"one",
"."
] | 245ff045163a1995e8cd5ac558d0a93024eb86eb | https://github.com/suurjaak/InputScope/blob/245ff045163a1995e8cd5ac558d0a93024eb86eb/inputscope/db.py#L29-L31 | train |
suurjaak/InputScope | inputscope/db.py | insert | def insert(table, values=(), **kwargs):
"""Convenience wrapper for database INSERT."""
values = dict(values, **kwargs).items()
sql, args = makeSQL("INSERT", table, values=values)
return execute(sql, args).lastrowid | python | def insert(table, values=(), **kwargs):
"""Convenience wrapper for database INSERT."""
values = dict(values, **kwargs).items()
sql, args = makeSQL("INSERT", table, values=values)
return execute(sql, args).lastrowid | [
"def",
"insert",
"(",
"table",
",",
"values",
"=",
"(",
")",
",",
"*",
"*",
"kwargs",
")",
":",
"values",
"=",
"dict",
"(",
"values",
",",
"*",
"*",
"kwargs",
")",
".",
"items",
"(",
")",
"sql",
",",
"args",
"=",
"makeSQL",
"(",
"\"INSERT\"",
",",
"table",
",",
"values",
"=",
"values",
")",
"return",
"execute",
"(",
"sql",
",",
"args",
")",
".",
"lastrowid"
] | Convenience wrapper for database INSERT. | [
"Convenience",
"wrapper",
"for",
"database",
"INSERT",
"."
] | 245ff045163a1995e8cd5ac558d0a93024eb86eb | https://github.com/suurjaak/InputScope/blob/245ff045163a1995e8cd5ac558d0a93024eb86eb/inputscope/db.py#L34-L38 | train |
suurjaak/InputScope | inputscope/db.py | select | def select(table, cols="*", where=(), group="", order=(), limit=(), **kwargs):
"""Convenience wrapper for database SELECT."""
where = dict(where, **kwargs).items()
sql, args = makeSQL("SELECT", table, cols, where, group, order, limit)
return execute(sql, args) | python | def select(table, cols="*", where=(), group="", order=(), limit=(), **kwargs):
"""Convenience wrapper for database SELECT."""
where = dict(where, **kwargs).items()
sql, args = makeSQL("SELECT", table, cols, where, group, order, limit)
return execute(sql, args) | [
"def",
"select",
"(",
"table",
",",
"cols",
"=",
"\"*\"",
",",
"where",
"=",
"(",
")",
",",
"group",
"=",
"\"\"",
",",
"order",
"=",
"(",
")",
",",
"limit",
"=",
"(",
")",
",",
"*",
"*",
"kwargs",
")",
":",
"where",
"=",
"dict",
"(",
"where",
",",
"*",
"*",
"kwargs",
")",
".",
"items",
"(",
")",
"sql",
",",
"args",
"=",
"makeSQL",
"(",
"\"SELECT\"",
",",
"table",
",",
"cols",
",",
"where",
",",
"group",
",",
"order",
",",
"limit",
")",
"return",
"execute",
"(",
"sql",
",",
"args",
")"
] | Convenience wrapper for database SELECT. | [
"Convenience",
"wrapper",
"for",
"database",
"SELECT",
"."
] | 245ff045163a1995e8cd5ac558d0a93024eb86eb | https://github.com/suurjaak/InputScope/blob/245ff045163a1995e8cd5ac558d0a93024eb86eb/inputscope/db.py#L41-L45 | train |
suurjaak/InputScope | inputscope/db.py | update | def update(table, values, where=(), **kwargs):
"""Convenience wrapper for database UPDATE."""
where = dict(where, **kwargs).items()
sql, args = makeSQL("UPDATE", table, values=values, where=where)
return execute(sql, args).rowcount | python | def update(table, values, where=(), **kwargs):
"""Convenience wrapper for database UPDATE."""
where = dict(where, **kwargs).items()
sql, args = makeSQL("UPDATE", table, values=values, where=where)
return execute(sql, args).rowcount | [
"def",
"update",
"(",
"table",
",",
"values",
",",
"where",
"=",
"(",
")",
",",
"*",
"*",
"kwargs",
")",
":",
"where",
"=",
"dict",
"(",
"where",
",",
"*",
"*",
"kwargs",
")",
".",
"items",
"(",
")",
"sql",
",",
"args",
"=",
"makeSQL",
"(",
"\"UPDATE\"",
",",
"table",
",",
"values",
"=",
"values",
",",
"where",
"=",
"where",
")",
"return",
"execute",
"(",
"sql",
",",
"args",
")",
".",
"rowcount"
] | Convenience wrapper for database UPDATE. | [
"Convenience",
"wrapper",
"for",
"database",
"UPDATE",
"."
] | 245ff045163a1995e8cd5ac558d0a93024eb86eb | https://github.com/suurjaak/InputScope/blob/245ff045163a1995e8cd5ac558d0a93024eb86eb/inputscope/db.py#L48-L52 | train |
suurjaak/InputScope | inputscope/db.py | delete | def delete(table, where=(), **kwargs):
"""Convenience wrapper for database DELETE."""
where = dict(where, **kwargs).items()
sql, args = makeSQL("DELETE", table, where=where)
return execute(sql, args).rowcount | python | def delete(table, where=(), **kwargs):
"""Convenience wrapper for database DELETE."""
where = dict(where, **kwargs).items()
sql, args = makeSQL("DELETE", table, where=where)
return execute(sql, args).rowcount | [
"def",
"delete",
"(",
"table",
",",
"where",
"=",
"(",
")",
",",
"*",
"*",
"kwargs",
")",
":",
"where",
"=",
"dict",
"(",
"where",
",",
"*",
"*",
"kwargs",
")",
".",
"items",
"(",
")",
"sql",
",",
"args",
"=",
"makeSQL",
"(",
"\"DELETE\"",
",",
"table",
",",
"where",
"=",
"where",
")",
"return",
"execute",
"(",
"sql",
",",
"args",
")",
".",
"rowcount"
] | Convenience wrapper for database DELETE. | [
"Convenience",
"wrapper",
"for",
"database",
"DELETE",
"."
] | 245ff045163a1995e8cd5ac558d0a93024eb86eb | https://github.com/suurjaak/InputScope/blob/245ff045163a1995e8cd5ac558d0a93024eb86eb/inputscope/db.py#L55-L59 | train |
suurjaak/InputScope | inputscope/db.py | make_cursor | def make_cursor(path, init_statements=(), _connectioncache={}):
"""Returns a cursor to the database, making new connection if not cached."""
connection = _connectioncache.get(path)
if not connection:
is_new = not os.path.exists(path) or not os.path.getsize(path)
try: is_new and os.makedirs(os.path.dirname(path))
except OSError: pass
connection = sqlite3.connect(path, isolation_level=None,
check_same_thread=False, detect_types=sqlite3.PARSE_DECLTYPES)
for x in init_statements or (): connection.execute(x)
try: is_new and ":memory:" not in path.lower() and os.chmod(path, 0707)
except OSError: pass
connection.row_factory = lambda cur, row: dict(sqlite3.Row(cur, row))
_connectioncache[path] = connection
return connection.cursor() | python | def make_cursor(path, init_statements=(), _connectioncache={}):
"""Returns a cursor to the database, making new connection if not cached."""
connection = _connectioncache.get(path)
if not connection:
is_new = not os.path.exists(path) or not os.path.getsize(path)
try: is_new and os.makedirs(os.path.dirname(path))
except OSError: pass
connection = sqlite3.connect(path, isolation_level=None,
check_same_thread=False, detect_types=sqlite3.PARSE_DECLTYPES)
for x in init_statements or (): connection.execute(x)
try: is_new and ":memory:" not in path.lower() and os.chmod(path, 0707)
except OSError: pass
connection.row_factory = lambda cur, row: dict(sqlite3.Row(cur, row))
_connectioncache[path] = connection
return connection.cursor() | [
"def",
"make_cursor",
"(",
"path",
",",
"init_statements",
"=",
"(",
")",
",",
"_connectioncache",
"=",
"{",
"}",
")",
":",
"connection",
"=",
"_connectioncache",
".",
"get",
"(",
"path",
")",
"if",
"not",
"connection",
":",
"is_new",
"=",
"not",
"os",
".",
"path",
".",
"exists",
"(",
"path",
")",
"or",
"not",
"os",
".",
"path",
".",
"getsize",
"(",
"path",
")",
"try",
":",
"is_new",
"and",
"os",
".",
"makedirs",
"(",
"os",
".",
"path",
".",
"dirname",
"(",
"path",
")",
")",
"except",
"OSError",
":",
"pass",
"connection",
"=",
"sqlite3",
".",
"connect",
"(",
"path",
",",
"isolation_level",
"=",
"None",
",",
"check_same_thread",
"=",
"False",
",",
"detect_types",
"=",
"sqlite3",
".",
"PARSE_DECLTYPES",
")",
"for",
"x",
"in",
"init_statements",
"or",
"(",
")",
":",
"connection",
".",
"execute",
"(",
"x",
")",
"try",
":",
"is_new",
"and",
"\":memory:\"",
"not",
"in",
"path",
".",
"lower",
"(",
")",
"and",
"os",
".",
"chmod",
"(",
"path",
",",
"0707",
")",
"except",
"OSError",
":",
"pass",
"connection",
".",
"row_factory",
"=",
"lambda",
"cur",
",",
"row",
":",
"dict",
"(",
"sqlite3",
".",
"Row",
"(",
"cur",
",",
"row",
")",
")",
"_connectioncache",
"[",
"path",
"]",
"=",
"connection",
"return",
"connection",
".",
"cursor",
"(",
")"
] | Returns a cursor to the database, making new connection if not cached. | [
"Returns",
"a",
"cursor",
"to",
"the",
"database",
"making",
"new",
"connection",
"if",
"not",
"cached",
"."
] | 245ff045163a1995e8cd5ac558d0a93024eb86eb | https://github.com/suurjaak/InputScope/blob/245ff045163a1995e8cd5ac558d0a93024eb86eb/inputscope/db.py#L73-L87 | train |
Subsets and Splits