repo
stringlengths 7
55
| path
stringlengths 4
127
| func_name
stringlengths 1
88
| original_string
stringlengths 75
19.8k
| language
stringclasses 1
value | code
stringlengths 75
19.8k
| code_tokens
list | docstring
stringlengths 3
17.3k
| docstring_tokens
list | sha
stringlengths 40
40
| url
stringlengths 87
242
| partition
stringclasses 1
value |
---|---|---|---|---|---|---|---|---|---|---|---|
romanz/trezor-agent | libagent/util.py | ExpiringCache.get | def get(self):
"""Returns existing value, or None if deadline has expired."""
if self.timer() > self.deadline:
self.value = None
return self.value | python | def get(self):
"""Returns existing value, or None if deadline has expired."""
if self.timer() > self.deadline:
self.value = None
return self.value | [
"def",
"get",
"(",
"self",
")",
":",
"if",
"self",
".",
"timer",
"(",
")",
">",
"self",
".",
"deadline",
":",
"self",
".",
"value",
"=",
"None",
"return",
"self",
".",
"value"
] | Returns existing value, or None if deadline has expired. | [
"Returns",
"existing",
"value",
"or",
"None",
"if",
"deadline",
"has",
"expired",
"."
] | 513b1259c4d7aca5f88cd958edc11828d0712f1b | https://github.com/romanz/trezor-agent/blob/513b1259c4d7aca5f88cd958edc11828d0712f1b/libagent/util.py#L271-L275 | train |
romanz/trezor-agent | libagent/util.py | ExpiringCache.set | def set(self, value):
"""Set new value and reset the deadline for expiration."""
self.deadline = self.timer() + self.duration
self.value = value | python | def set(self, value):
"""Set new value and reset the deadline for expiration."""
self.deadline = self.timer() + self.duration
self.value = value | [
"def",
"set",
"(",
"self",
",",
"value",
")",
":",
"self",
".",
"deadline",
"=",
"self",
".",
"timer",
"(",
")",
"+",
"self",
".",
"duration",
"self",
".",
"value",
"=",
"value"
] | Set new value and reset the deadline for expiration. | [
"Set",
"new",
"value",
"and",
"reset",
"the",
"deadline",
"for",
"expiration",
"."
] | 513b1259c4d7aca5f88cd958edc11828d0712f1b | https://github.com/romanz/trezor-agent/blob/513b1259c4d7aca5f88cd958edc11828d0712f1b/libagent/util.py#L277-L280 | train |
romanz/trezor-agent | libagent/gpg/agent.py | sig_encode | def sig_encode(r, s):
"""Serialize ECDSA signature data into GPG S-expression."""
r = util.assuan_serialize(util.num2bytes(r, 32))
s = util.assuan_serialize(util.num2bytes(s, 32))
return b'(7:sig-val(5:ecdsa(1:r32:' + r + b')(1:s32:' + s + b')))' | python | def sig_encode(r, s):
"""Serialize ECDSA signature data into GPG S-expression."""
r = util.assuan_serialize(util.num2bytes(r, 32))
s = util.assuan_serialize(util.num2bytes(s, 32))
return b'(7:sig-val(5:ecdsa(1:r32:' + r + b')(1:s32:' + s + b')))' | [
"def",
"sig_encode",
"(",
"r",
",",
"s",
")",
":",
"r",
"=",
"util",
".",
"assuan_serialize",
"(",
"util",
".",
"num2bytes",
"(",
"r",
",",
"32",
")",
")",
"s",
"=",
"util",
".",
"assuan_serialize",
"(",
"util",
".",
"num2bytes",
"(",
"s",
",",
"32",
")",
")",
"return",
"b'(7:sig-val(5:ecdsa(1:r32:'",
"+",
"r",
"+",
"b')(1:s32:'",
"+",
"s",
"+",
"b')))'"
] | Serialize ECDSA signature data into GPG S-expression. | [
"Serialize",
"ECDSA",
"signature",
"data",
"into",
"GPG",
"S",
"-",
"expression",
"."
] | 513b1259c4d7aca5f88cd958edc11828d0712f1b | https://github.com/romanz/trezor-agent/blob/513b1259c4d7aca5f88cd958edc11828d0712f1b/libagent/gpg/agent.py#L24-L28 | train |
romanz/trezor-agent | libagent/gpg/agent.py | parse_ecdh | def parse_ecdh(line):
"""Parse ECDH request and return remote public key."""
prefix, line = line.split(b' ', 1)
assert prefix == b'D'
exp, leftover = keyring.parse(keyring.unescape(line))
log.debug('ECDH s-exp: %r', exp)
assert not leftover
label, exp = exp
assert label == b'enc-val'
assert exp[0] == b'ecdh'
items = exp[1:]
log.debug('ECDH parameters: %r', items)
return dict(items)[b'e'] | python | def parse_ecdh(line):
"""Parse ECDH request and return remote public key."""
prefix, line = line.split(b' ', 1)
assert prefix == b'D'
exp, leftover = keyring.parse(keyring.unescape(line))
log.debug('ECDH s-exp: %r', exp)
assert not leftover
label, exp = exp
assert label == b'enc-val'
assert exp[0] == b'ecdh'
items = exp[1:]
log.debug('ECDH parameters: %r', items)
return dict(items)[b'e'] | [
"def",
"parse_ecdh",
"(",
"line",
")",
":",
"prefix",
",",
"line",
"=",
"line",
".",
"split",
"(",
"b' '",
",",
"1",
")",
"assert",
"prefix",
"==",
"b'D'",
"exp",
",",
"leftover",
"=",
"keyring",
".",
"parse",
"(",
"keyring",
".",
"unescape",
"(",
"line",
")",
")",
"log",
".",
"debug",
"(",
"'ECDH s-exp: %r'",
",",
"exp",
")",
"assert",
"not",
"leftover",
"label",
",",
"exp",
"=",
"exp",
"assert",
"label",
"==",
"b'enc-val'",
"assert",
"exp",
"[",
"0",
"]",
"==",
"b'ecdh'",
"items",
"=",
"exp",
"[",
"1",
":",
"]",
"log",
".",
"debug",
"(",
"'ECDH parameters: %r'",
",",
"items",
")",
"return",
"dict",
"(",
"items",
")",
"[",
"b'e'",
"]"
] | Parse ECDH request and return remote public key. | [
"Parse",
"ECDH",
"request",
"and",
"return",
"remote",
"public",
"key",
"."
] | 513b1259c4d7aca5f88cd958edc11828d0712f1b | https://github.com/romanz/trezor-agent/blob/513b1259c4d7aca5f88cd958edc11828d0712f1b/libagent/gpg/agent.py#L37-L49 | train |
romanz/trezor-agent | libagent/gpg/agent.py | Handler.handle_getinfo | def handle_getinfo(self, conn, args):
"""Handle some of the GETINFO messages."""
result = None
if args[0] == b'version':
result = self.version
elif args[0] == b's2k_count':
# Use highest number of S2K iterations.
# https://www.gnupg.org/documentation/manuals/gnupg/OpenPGP-Options.html
# https://tools.ietf.org/html/rfc4880#section-3.7.1.3
result = '{}'.format(64 << 20).encode('ascii')
else:
log.warning('Unknown GETINFO command: %s', args)
if result:
keyring.sendline(conn, b'D ' + result) | python | def handle_getinfo(self, conn, args):
"""Handle some of the GETINFO messages."""
result = None
if args[0] == b'version':
result = self.version
elif args[0] == b's2k_count':
# Use highest number of S2K iterations.
# https://www.gnupg.org/documentation/manuals/gnupg/OpenPGP-Options.html
# https://tools.ietf.org/html/rfc4880#section-3.7.1.3
result = '{}'.format(64 << 20).encode('ascii')
else:
log.warning('Unknown GETINFO command: %s', args)
if result:
keyring.sendline(conn, b'D ' + result) | [
"def",
"handle_getinfo",
"(",
"self",
",",
"conn",
",",
"args",
")",
":",
"result",
"=",
"None",
"if",
"args",
"[",
"0",
"]",
"==",
"b'version'",
":",
"result",
"=",
"self",
".",
"version",
"elif",
"args",
"[",
"0",
"]",
"==",
"b's2k_count'",
":",
"# Use highest number of S2K iterations.",
"# https://www.gnupg.org/documentation/manuals/gnupg/OpenPGP-Options.html",
"# https://tools.ietf.org/html/rfc4880#section-3.7.1.3",
"result",
"=",
"'{}'",
".",
"format",
"(",
"64",
"<<",
"20",
")",
".",
"encode",
"(",
"'ascii'",
")",
"else",
":",
"log",
".",
"warning",
"(",
"'Unknown GETINFO command: %s'",
",",
"args",
")",
"if",
"result",
":",
"keyring",
".",
"sendline",
"(",
"conn",
",",
"b'D '",
"+",
"result",
")"
] | Handle some of the GETINFO messages. | [
"Handle",
"some",
"of",
"the",
"GETINFO",
"messages",
"."
] | 513b1259c4d7aca5f88cd958edc11828d0712f1b | https://github.com/romanz/trezor-agent/blob/513b1259c4d7aca5f88cd958edc11828d0712f1b/libagent/gpg/agent.py#L129-L143 | train |
romanz/trezor-agent | libagent/gpg/agent.py | Handler.handle_scd | def handle_scd(self, conn, args):
"""No support for smart-card device protocol."""
reply = {
(b'GETINFO', b'version'): self.version,
}.get(args)
if reply is None:
raise AgentError(b'ERR 100696144 No such device <SCD>')
keyring.sendline(conn, b'D ' + reply) | python | def handle_scd(self, conn, args):
"""No support for smart-card device protocol."""
reply = {
(b'GETINFO', b'version'): self.version,
}.get(args)
if reply is None:
raise AgentError(b'ERR 100696144 No such device <SCD>')
keyring.sendline(conn, b'D ' + reply) | [
"def",
"handle_scd",
"(",
"self",
",",
"conn",
",",
"args",
")",
":",
"reply",
"=",
"{",
"(",
"b'GETINFO'",
",",
"b'version'",
")",
":",
"self",
".",
"version",
",",
"}",
".",
"get",
"(",
"args",
")",
"if",
"reply",
"is",
"None",
":",
"raise",
"AgentError",
"(",
"b'ERR 100696144 No such device <SCD>'",
")",
"keyring",
".",
"sendline",
"(",
"conn",
",",
"b'D '",
"+",
"reply",
")"
] | No support for smart-card device protocol. | [
"No",
"support",
"for",
"smart",
"-",
"card",
"device",
"protocol",
"."
] | 513b1259c4d7aca5f88cd958edc11828d0712f1b | https://github.com/romanz/trezor-agent/blob/513b1259c4d7aca5f88cd958edc11828d0712f1b/libagent/gpg/agent.py#L145-L152 | train |
romanz/trezor-agent | libagent/gpg/agent.py | Handler.get_identity | def get_identity(self, keygrip):
"""
Returns device.interface.Identity that matches specified keygrip.
In case of missing keygrip, KeyError will be raised.
"""
keygrip_bytes = binascii.unhexlify(keygrip)
pubkey_dict, user_ids = decode.load_by_keygrip(
pubkey_bytes=self.pubkey_bytes, keygrip=keygrip_bytes)
# We assume the first user ID is used to generate TREZOR-based GPG keys.
user_id = user_ids[0]['value'].decode('utf-8')
curve_name = protocol.get_curve_name_by_oid(pubkey_dict['curve_oid'])
ecdh = (pubkey_dict['algo'] == protocol.ECDH_ALGO_ID)
identity = client.create_identity(user_id=user_id, curve_name=curve_name)
verifying_key = self.client.pubkey(identity=identity, ecdh=ecdh)
pubkey = protocol.PublicKey(
curve_name=curve_name, created=pubkey_dict['created'],
verifying_key=verifying_key, ecdh=ecdh)
assert pubkey.key_id() == pubkey_dict['key_id']
assert pubkey.keygrip() == keygrip_bytes
return identity | python | def get_identity(self, keygrip):
"""
Returns device.interface.Identity that matches specified keygrip.
In case of missing keygrip, KeyError will be raised.
"""
keygrip_bytes = binascii.unhexlify(keygrip)
pubkey_dict, user_ids = decode.load_by_keygrip(
pubkey_bytes=self.pubkey_bytes, keygrip=keygrip_bytes)
# We assume the first user ID is used to generate TREZOR-based GPG keys.
user_id = user_ids[0]['value'].decode('utf-8')
curve_name = protocol.get_curve_name_by_oid(pubkey_dict['curve_oid'])
ecdh = (pubkey_dict['algo'] == protocol.ECDH_ALGO_ID)
identity = client.create_identity(user_id=user_id, curve_name=curve_name)
verifying_key = self.client.pubkey(identity=identity, ecdh=ecdh)
pubkey = protocol.PublicKey(
curve_name=curve_name, created=pubkey_dict['created'],
verifying_key=verifying_key, ecdh=ecdh)
assert pubkey.key_id() == pubkey_dict['key_id']
assert pubkey.keygrip() == keygrip_bytes
return identity | [
"def",
"get_identity",
"(",
"self",
",",
"keygrip",
")",
":",
"keygrip_bytes",
"=",
"binascii",
".",
"unhexlify",
"(",
"keygrip",
")",
"pubkey_dict",
",",
"user_ids",
"=",
"decode",
".",
"load_by_keygrip",
"(",
"pubkey_bytes",
"=",
"self",
".",
"pubkey_bytes",
",",
"keygrip",
"=",
"keygrip_bytes",
")",
"# We assume the first user ID is used to generate TREZOR-based GPG keys.",
"user_id",
"=",
"user_ids",
"[",
"0",
"]",
"[",
"'value'",
"]",
".",
"decode",
"(",
"'utf-8'",
")",
"curve_name",
"=",
"protocol",
".",
"get_curve_name_by_oid",
"(",
"pubkey_dict",
"[",
"'curve_oid'",
"]",
")",
"ecdh",
"=",
"(",
"pubkey_dict",
"[",
"'algo'",
"]",
"==",
"protocol",
".",
"ECDH_ALGO_ID",
")",
"identity",
"=",
"client",
".",
"create_identity",
"(",
"user_id",
"=",
"user_id",
",",
"curve_name",
"=",
"curve_name",
")",
"verifying_key",
"=",
"self",
".",
"client",
".",
"pubkey",
"(",
"identity",
"=",
"identity",
",",
"ecdh",
"=",
"ecdh",
")",
"pubkey",
"=",
"protocol",
".",
"PublicKey",
"(",
"curve_name",
"=",
"curve_name",
",",
"created",
"=",
"pubkey_dict",
"[",
"'created'",
"]",
",",
"verifying_key",
"=",
"verifying_key",
",",
"ecdh",
"=",
"ecdh",
")",
"assert",
"pubkey",
".",
"key_id",
"(",
")",
"==",
"pubkey_dict",
"[",
"'key_id'",
"]",
"assert",
"pubkey",
".",
"keygrip",
"(",
")",
"==",
"keygrip_bytes",
"return",
"identity"
] | Returns device.interface.Identity that matches specified keygrip.
In case of missing keygrip, KeyError will be raised. | [
"Returns",
"device",
".",
"interface",
".",
"Identity",
"that",
"matches",
"specified",
"keygrip",
"."
] | 513b1259c4d7aca5f88cd958edc11828d0712f1b | https://github.com/romanz/trezor-agent/blob/513b1259c4d7aca5f88cd958edc11828d0712f1b/libagent/gpg/agent.py#L155-L176 | train |
romanz/trezor-agent | libagent/gpg/agent.py | Handler.pksign | def pksign(self, conn):
"""Sign a message digest using a private EC key."""
log.debug('signing %r digest (algo #%s)', self.digest, self.algo)
identity = self.get_identity(keygrip=self.keygrip)
r, s = self.client.sign(identity=identity,
digest=binascii.unhexlify(self.digest))
result = sig_encode(r, s)
log.debug('result: %r', result)
keyring.sendline(conn, b'D ' + result) | python | def pksign(self, conn):
"""Sign a message digest using a private EC key."""
log.debug('signing %r digest (algo #%s)', self.digest, self.algo)
identity = self.get_identity(keygrip=self.keygrip)
r, s = self.client.sign(identity=identity,
digest=binascii.unhexlify(self.digest))
result = sig_encode(r, s)
log.debug('result: %r', result)
keyring.sendline(conn, b'D ' + result) | [
"def",
"pksign",
"(",
"self",
",",
"conn",
")",
":",
"log",
".",
"debug",
"(",
"'signing %r digest (algo #%s)'",
",",
"self",
".",
"digest",
",",
"self",
".",
"algo",
")",
"identity",
"=",
"self",
".",
"get_identity",
"(",
"keygrip",
"=",
"self",
".",
"keygrip",
")",
"r",
",",
"s",
"=",
"self",
".",
"client",
".",
"sign",
"(",
"identity",
"=",
"identity",
",",
"digest",
"=",
"binascii",
".",
"unhexlify",
"(",
"self",
".",
"digest",
")",
")",
"result",
"=",
"sig_encode",
"(",
"r",
",",
"s",
")",
"log",
".",
"debug",
"(",
"'result: %r'",
",",
"result",
")",
"keyring",
".",
"sendline",
"(",
"conn",
",",
"b'D '",
"+",
"result",
")"
] | Sign a message digest using a private EC key. | [
"Sign",
"a",
"message",
"digest",
"using",
"a",
"private",
"EC",
"key",
"."
] | 513b1259c4d7aca5f88cd958edc11828d0712f1b | https://github.com/romanz/trezor-agent/blob/513b1259c4d7aca5f88cd958edc11828d0712f1b/libagent/gpg/agent.py#L178-L186 | train |
romanz/trezor-agent | libagent/gpg/agent.py | Handler.pkdecrypt | def pkdecrypt(self, conn):
"""Handle decryption using ECDH."""
for msg in [b'S INQUIRE_MAXLEN 4096', b'INQUIRE CIPHERTEXT']:
keyring.sendline(conn, msg)
line = keyring.recvline(conn)
assert keyring.recvline(conn) == b'END'
remote_pubkey = parse_ecdh(line)
identity = self.get_identity(keygrip=self.keygrip)
ec_point = self.client.ecdh(identity=identity, pubkey=remote_pubkey)
keyring.sendline(conn, b'D ' + _serialize_point(ec_point)) | python | def pkdecrypt(self, conn):
"""Handle decryption using ECDH."""
for msg in [b'S INQUIRE_MAXLEN 4096', b'INQUIRE CIPHERTEXT']:
keyring.sendline(conn, msg)
line = keyring.recvline(conn)
assert keyring.recvline(conn) == b'END'
remote_pubkey = parse_ecdh(line)
identity = self.get_identity(keygrip=self.keygrip)
ec_point = self.client.ecdh(identity=identity, pubkey=remote_pubkey)
keyring.sendline(conn, b'D ' + _serialize_point(ec_point)) | [
"def",
"pkdecrypt",
"(",
"self",
",",
"conn",
")",
":",
"for",
"msg",
"in",
"[",
"b'S INQUIRE_MAXLEN 4096'",
",",
"b'INQUIRE CIPHERTEXT'",
"]",
":",
"keyring",
".",
"sendline",
"(",
"conn",
",",
"msg",
")",
"line",
"=",
"keyring",
".",
"recvline",
"(",
"conn",
")",
"assert",
"keyring",
".",
"recvline",
"(",
"conn",
")",
"==",
"b'END'",
"remote_pubkey",
"=",
"parse_ecdh",
"(",
"line",
")",
"identity",
"=",
"self",
".",
"get_identity",
"(",
"keygrip",
"=",
"self",
".",
"keygrip",
")",
"ec_point",
"=",
"self",
".",
"client",
".",
"ecdh",
"(",
"identity",
"=",
"identity",
",",
"pubkey",
"=",
"remote_pubkey",
")",
"keyring",
".",
"sendline",
"(",
"conn",
",",
"b'D '",
"+",
"_serialize_point",
"(",
"ec_point",
")",
")"
] | Handle decryption using ECDH. | [
"Handle",
"decryption",
"using",
"ECDH",
"."
] | 513b1259c4d7aca5f88cd958edc11828d0712f1b | https://github.com/romanz/trezor-agent/blob/513b1259c4d7aca5f88cd958edc11828d0712f1b/libagent/gpg/agent.py#L188-L199 | train |
romanz/trezor-agent | libagent/gpg/agent.py | Handler.have_key | def have_key(self, *keygrips):
"""Check if any keygrip corresponds to a TREZOR-based key."""
for keygrip in keygrips:
try:
self.get_identity(keygrip=keygrip)
break
except KeyError as e:
log.warning('HAVEKEY(%s) failed: %s', keygrip, e)
else:
raise AgentError(b'ERR 67108881 No secret key <GPG Agent>') | python | def have_key(self, *keygrips):
"""Check if any keygrip corresponds to a TREZOR-based key."""
for keygrip in keygrips:
try:
self.get_identity(keygrip=keygrip)
break
except KeyError as e:
log.warning('HAVEKEY(%s) failed: %s', keygrip, e)
else:
raise AgentError(b'ERR 67108881 No secret key <GPG Agent>') | [
"def",
"have_key",
"(",
"self",
",",
"*",
"keygrips",
")",
":",
"for",
"keygrip",
"in",
"keygrips",
":",
"try",
":",
"self",
".",
"get_identity",
"(",
"keygrip",
"=",
"keygrip",
")",
"break",
"except",
"KeyError",
"as",
"e",
":",
"log",
".",
"warning",
"(",
"'HAVEKEY(%s) failed: %s'",
",",
"keygrip",
",",
"e",
")",
"else",
":",
"raise",
"AgentError",
"(",
"b'ERR 67108881 No secret key <GPG Agent>'",
")"
] | Check if any keygrip corresponds to a TREZOR-based key. | [
"Check",
"if",
"any",
"keygrip",
"corresponds",
"to",
"a",
"TREZOR",
"-",
"based",
"key",
"."
] | 513b1259c4d7aca5f88cd958edc11828d0712f1b | https://github.com/romanz/trezor-agent/blob/513b1259c4d7aca5f88cd958edc11828d0712f1b/libagent/gpg/agent.py#L201-L210 | train |
romanz/trezor-agent | libagent/gpg/agent.py | Handler.set_hash | def set_hash(self, algo, digest):
"""Set algorithm ID and hexadecimal digest for next operation."""
self.algo = algo
self.digest = digest | python | def set_hash(self, algo, digest):
"""Set algorithm ID and hexadecimal digest for next operation."""
self.algo = algo
self.digest = digest | [
"def",
"set_hash",
"(",
"self",
",",
"algo",
",",
"digest",
")",
":",
"self",
".",
"algo",
"=",
"algo",
"self",
".",
"digest",
"=",
"digest"
] | Set algorithm ID and hexadecimal digest for next operation. | [
"Set",
"algorithm",
"ID",
"and",
"hexadecimal",
"digest",
"for",
"next",
"operation",
"."
] | 513b1259c4d7aca5f88cd958edc11828d0712f1b | https://github.com/romanz/trezor-agent/blob/513b1259c4d7aca5f88cd958edc11828d0712f1b/libagent/gpg/agent.py#L216-L219 | train |
romanz/trezor-agent | libagent/gpg/agent.py | Handler.handle | def handle(self, conn):
"""Handle connection from GPG binary using the ASSUAN protocol."""
keyring.sendline(conn, b'OK')
for line in keyring.iterlines(conn):
parts = line.split(b' ')
command = parts[0]
args = tuple(parts[1:])
if command == b'BYE':
return
elif command == b'KILLAGENT':
keyring.sendline(conn, b'OK')
raise AgentStop()
if command not in self.handlers:
log.error('unknown request: %r', line)
continue
handler = self.handlers[command]
if handler:
try:
handler(conn, args)
except AgentError as e:
msg, = e.args
keyring.sendline(conn, msg)
continue
keyring.sendline(conn, b'OK') | python | def handle(self, conn):
"""Handle connection from GPG binary using the ASSUAN protocol."""
keyring.sendline(conn, b'OK')
for line in keyring.iterlines(conn):
parts = line.split(b' ')
command = parts[0]
args = tuple(parts[1:])
if command == b'BYE':
return
elif command == b'KILLAGENT':
keyring.sendline(conn, b'OK')
raise AgentStop()
if command not in self.handlers:
log.error('unknown request: %r', line)
continue
handler = self.handlers[command]
if handler:
try:
handler(conn, args)
except AgentError as e:
msg, = e.args
keyring.sendline(conn, msg)
continue
keyring.sendline(conn, b'OK') | [
"def",
"handle",
"(",
"self",
",",
"conn",
")",
":",
"keyring",
".",
"sendline",
"(",
"conn",
",",
"b'OK'",
")",
"for",
"line",
"in",
"keyring",
".",
"iterlines",
"(",
"conn",
")",
":",
"parts",
"=",
"line",
".",
"split",
"(",
"b' '",
")",
"command",
"=",
"parts",
"[",
"0",
"]",
"args",
"=",
"tuple",
"(",
"parts",
"[",
"1",
":",
"]",
")",
"if",
"command",
"==",
"b'BYE'",
":",
"return",
"elif",
"command",
"==",
"b'KILLAGENT'",
":",
"keyring",
".",
"sendline",
"(",
"conn",
",",
"b'OK'",
")",
"raise",
"AgentStop",
"(",
")",
"if",
"command",
"not",
"in",
"self",
".",
"handlers",
":",
"log",
".",
"error",
"(",
"'unknown request: %r'",
",",
"line",
")",
"continue",
"handler",
"=",
"self",
".",
"handlers",
"[",
"command",
"]",
"if",
"handler",
":",
"try",
":",
"handler",
"(",
"conn",
",",
"args",
")",
"except",
"AgentError",
"as",
"e",
":",
"msg",
",",
"=",
"e",
".",
"args",
"keyring",
".",
"sendline",
"(",
"conn",
",",
"msg",
")",
"continue",
"keyring",
".",
"sendline",
"(",
"conn",
",",
"b'OK'",
")"
] | Handle connection from GPG binary using the ASSUAN protocol. | [
"Handle",
"connection",
"from",
"GPG",
"binary",
"using",
"the",
"ASSUAN",
"protocol",
"."
] | 513b1259c4d7aca5f88cd958edc11828d0712f1b | https://github.com/romanz/trezor-agent/blob/513b1259c4d7aca5f88cd958edc11828d0712f1b/libagent/gpg/agent.py#L221-L247 | train |
romanz/trezor-agent | libagent/device/fake_device.py | FakeDevice.connect | def connect(self):
"""Return "dummy" connection."""
log.critical('NEVER USE THIS CODE FOR REAL-LIFE USE-CASES!!!')
log.critical('ONLY FOR DEBUGGING AND TESTING!!!')
# The code below uses HARD-CODED secret key - and should be used ONLY
# for GnuPG integration tests (e.g. when no real device is available).
# pylint: disable=attribute-defined-outside-init
self.secexp = 1
self.sk = ecdsa.SigningKey.from_secret_exponent(
secexp=self.secexp, curve=ecdsa.curves.NIST256p, hashfunc=hashlib.sha256)
self.vk = self.sk.get_verifying_key()
return self | python | def connect(self):
"""Return "dummy" connection."""
log.critical('NEVER USE THIS CODE FOR REAL-LIFE USE-CASES!!!')
log.critical('ONLY FOR DEBUGGING AND TESTING!!!')
# The code below uses HARD-CODED secret key - and should be used ONLY
# for GnuPG integration tests (e.g. when no real device is available).
# pylint: disable=attribute-defined-outside-init
self.secexp = 1
self.sk = ecdsa.SigningKey.from_secret_exponent(
secexp=self.secexp, curve=ecdsa.curves.NIST256p, hashfunc=hashlib.sha256)
self.vk = self.sk.get_verifying_key()
return self | [
"def",
"connect",
"(",
"self",
")",
":",
"log",
".",
"critical",
"(",
"'NEVER USE THIS CODE FOR REAL-LIFE USE-CASES!!!'",
")",
"log",
".",
"critical",
"(",
"'ONLY FOR DEBUGGING AND TESTING!!!'",
")",
"# The code below uses HARD-CODED secret key - and should be used ONLY",
"# for GnuPG integration tests (e.g. when no real device is available).",
"# pylint: disable=attribute-defined-outside-init",
"self",
".",
"secexp",
"=",
"1",
"self",
".",
"sk",
"=",
"ecdsa",
".",
"SigningKey",
".",
"from_secret_exponent",
"(",
"secexp",
"=",
"self",
".",
"secexp",
",",
"curve",
"=",
"ecdsa",
".",
"curves",
".",
"NIST256p",
",",
"hashfunc",
"=",
"hashlib",
".",
"sha256",
")",
"self",
".",
"vk",
"=",
"self",
".",
"sk",
".",
"get_verifying_key",
"(",
")",
"return",
"self"
] | Return "dummy" connection. | [
"Return",
"dummy",
"connection",
"."
] | 513b1259c4d7aca5f88cd958edc11828d0712f1b | https://github.com/romanz/trezor-agent/blob/513b1259c4d7aca5f88cd958edc11828d0712f1b/libagent/device/fake_device.py#L29-L40 | train |
romanz/trezor-agent | libagent/gpg/client.py | create_identity | def create_identity(user_id, curve_name):
"""Create GPG identity for hardware device."""
result = interface.Identity(identity_str='gpg://', curve_name=curve_name)
result.identity_dict['host'] = user_id
return result | python | def create_identity(user_id, curve_name):
"""Create GPG identity for hardware device."""
result = interface.Identity(identity_str='gpg://', curve_name=curve_name)
result.identity_dict['host'] = user_id
return result | [
"def",
"create_identity",
"(",
"user_id",
",",
"curve_name",
")",
":",
"result",
"=",
"interface",
".",
"Identity",
"(",
"identity_str",
"=",
"'gpg://'",
",",
"curve_name",
"=",
"curve_name",
")",
"result",
".",
"identity_dict",
"[",
"'host'",
"]",
"=",
"user_id",
"return",
"result"
] | Create GPG identity for hardware device. | [
"Create",
"GPG",
"identity",
"for",
"hardware",
"device",
"."
] | 513b1259c4d7aca5f88cd958edc11828d0712f1b | https://github.com/romanz/trezor-agent/blob/513b1259c4d7aca5f88cd958edc11828d0712f1b/libagent/gpg/client.py#L11-L15 | train |
romanz/trezor-agent | libagent/gpg/client.py | Client.pubkey | def pubkey(self, identity, ecdh=False):
"""Return public key as VerifyingKey object."""
with self.device:
pubkey = self.device.pubkey(ecdh=ecdh, identity=identity)
return formats.decompress_pubkey(
pubkey=pubkey, curve_name=identity.curve_name) | python | def pubkey(self, identity, ecdh=False):
"""Return public key as VerifyingKey object."""
with self.device:
pubkey = self.device.pubkey(ecdh=ecdh, identity=identity)
return formats.decompress_pubkey(
pubkey=pubkey, curve_name=identity.curve_name) | [
"def",
"pubkey",
"(",
"self",
",",
"identity",
",",
"ecdh",
"=",
"False",
")",
":",
"with",
"self",
".",
"device",
":",
"pubkey",
"=",
"self",
".",
"device",
".",
"pubkey",
"(",
"ecdh",
"=",
"ecdh",
",",
"identity",
"=",
"identity",
")",
"return",
"formats",
".",
"decompress_pubkey",
"(",
"pubkey",
"=",
"pubkey",
",",
"curve_name",
"=",
"identity",
".",
"curve_name",
")"
] | Return public key as VerifyingKey object. | [
"Return",
"public",
"key",
"as",
"VerifyingKey",
"object",
"."
] | 513b1259c4d7aca5f88cd958edc11828d0712f1b | https://github.com/romanz/trezor-agent/blob/513b1259c4d7aca5f88cd958edc11828d0712f1b/libagent/gpg/client.py#L25-L30 | train |
romanz/trezor-agent | libagent/gpg/client.py | Client.sign | def sign(self, identity, digest):
"""Sign the digest and return a serialized signature."""
log.info('please confirm GPG signature on %s for "%s"...',
self.device, identity.to_string())
if identity.curve_name == formats.CURVE_NIST256:
digest = digest[:32] # sign the first 256 bits
log.debug('signing digest: %s', util.hexlify(digest))
with self.device:
sig = self.device.sign(blob=digest, identity=identity)
return (util.bytes2num(sig[:32]), util.bytes2num(sig[32:])) | python | def sign(self, identity, digest):
"""Sign the digest and return a serialized signature."""
log.info('please confirm GPG signature on %s for "%s"...',
self.device, identity.to_string())
if identity.curve_name == formats.CURVE_NIST256:
digest = digest[:32] # sign the first 256 bits
log.debug('signing digest: %s', util.hexlify(digest))
with self.device:
sig = self.device.sign(blob=digest, identity=identity)
return (util.bytes2num(sig[:32]), util.bytes2num(sig[32:])) | [
"def",
"sign",
"(",
"self",
",",
"identity",
",",
"digest",
")",
":",
"log",
".",
"info",
"(",
"'please confirm GPG signature on %s for \"%s\"...'",
",",
"self",
".",
"device",
",",
"identity",
".",
"to_string",
"(",
")",
")",
"if",
"identity",
".",
"curve_name",
"==",
"formats",
".",
"CURVE_NIST256",
":",
"digest",
"=",
"digest",
"[",
":",
"32",
"]",
"# sign the first 256 bits",
"log",
".",
"debug",
"(",
"'signing digest: %s'",
",",
"util",
".",
"hexlify",
"(",
"digest",
")",
")",
"with",
"self",
".",
"device",
":",
"sig",
"=",
"self",
".",
"device",
".",
"sign",
"(",
"blob",
"=",
"digest",
",",
"identity",
"=",
"identity",
")",
"return",
"(",
"util",
".",
"bytes2num",
"(",
"sig",
"[",
":",
"32",
"]",
")",
",",
"util",
".",
"bytes2num",
"(",
"sig",
"[",
"32",
":",
"]",
")",
")"
] | Sign the digest and return a serialized signature. | [
"Sign",
"the",
"digest",
"and",
"return",
"a",
"serialized",
"signature",
"."
] | 513b1259c4d7aca5f88cd958edc11828d0712f1b | https://github.com/romanz/trezor-agent/blob/513b1259c4d7aca5f88cd958edc11828d0712f1b/libagent/gpg/client.py#L32-L41 | train |
romanz/trezor-agent | libagent/gpg/client.py | Client.ecdh | def ecdh(self, identity, pubkey):
"""Derive shared secret using ECDH from remote public key."""
log.info('please confirm GPG decryption on %s for "%s"...',
self.device, identity.to_string())
with self.device:
return self.device.ecdh(pubkey=pubkey, identity=identity) | python | def ecdh(self, identity, pubkey):
"""Derive shared secret using ECDH from remote public key."""
log.info('please confirm GPG decryption on %s for "%s"...',
self.device, identity.to_string())
with self.device:
return self.device.ecdh(pubkey=pubkey, identity=identity) | [
"def",
"ecdh",
"(",
"self",
",",
"identity",
",",
"pubkey",
")",
":",
"log",
".",
"info",
"(",
"'please confirm GPG decryption on %s for \"%s\"...'",
",",
"self",
".",
"device",
",",
"identity",
".",
"to_string",
"(",
")",
")",
"with",
"self",
".",
"device",
":",
"return",
"self",
".",
"device",
".",
"ecdh",
"(",
"pubkey",
"=",
"pubkey",
",",
"identity",
"=",
"identity",
")"
] | Derive shared secret using ECDH from remote public key. | [
"Derive",
"shared",
"secret",
"using",
"ECDH",
"from",
"remote",
"public",
"key",
"."
] | 513b1259c4d7aca5f88cd958edc11828d0712f1b | https://github.com/romanz/trezor-agent/blob/513b1259c4d7aca5f88cd958edc11828d0712f1b/libagent/gpg/client.py#L43-L48 | train |
romanz/trezor-agent | libagent/device/trezor.py | Trezor.connect | def connect(self):
"""Enumerate and connect to the first available interface."""
transport = self._defs.find_device()
if not transport:
raise interface.NotFoundError('{} not connected'.format(self))
log.debug('using transport: %s', transport)
for _ in range(5): # Retry a few times in case of PIN failures
connection = self._defs.Client(transport=transport,
ui=self.ui,
state=self.__class__.cached_state)
self._verify_version(connection)
try:
connection.ping(msg='', pin_protection=True) # unlock PIN
return connection
except (self._defs.PinException, ValueError) as e:
log.error('Invalid PIN: %s, retrying...', e)
continue
except Exception as e:
log.exception('ping failed: %s', e)
connection.close() # so the next HID open() will succeed
raise | python | def connect(self):
"""Enumerate and connect to the first available interface."""
transport = self._defs.find_device()
if not transport:
raise interface.NotFoundError('{} not connected'.format(self))
log.debug('using transport: %s', transport)
for _ in range(5): # Retry a few times in case of PIN failures
connection = self._defs.Client(transport=transport,
ui=self.ui,
state=self.__class__.cached_state)
self._verify_version(connection)
try:
connection.ping(msg='', pin_protection=True) # unlock PIN
return connection
except (self._defs.PinException, ValueError) as e:
log.error('Invalid PIN: %s, retrying...', e)
continue
except Exception as e:
log.exception('ping failed: %s', e)
connection.close() # so the next HID open() will succeed
raise | [
"def",
"connect",
"(",
"self",
")",
":",
"transport",
"=",
"self",
".",
"_defs",
".",
"find_device",
"(",
")",
"if",
"not",
"transport",
":",
"raise",
"interface",
".",
"NotFoundError",
"(",
"'{} not connected'",
".",
"format",
"(",
"self",
")",
")",
"log",
".",
"debug",
"(",
"'using transport: %s'",
",",
"transport",
")",
"for",
"_",
"in",
"range",
"(",
"5",
")",
":",
"# Retry a few times in case of PIN failures",
"connection",
"=",
"self",
".",
"_defs",
".",
"Client",
"(",
"transport",
"=",
"transport",
",",
"ui",
"=",
"self",
".",
"ui",
",",
"state",
"=",
"self",
".",
"__class__",
".",
"cached_state",
")",
"self",
".",
"_verify_version",
"(",
"connection",
")",
"try",
":",
"connection",
".",
"ping",
"(",
"msg",
"=",
"''",
",",
"pin_protection",
"=",
"True",
")",
"# unlock PIN",
"return",
"connection",
"except",
"(",
"self",
".",
"_defs",
".",
"PinException",
",",
"ValueError",
")",
"as",
"e",
":",
"log",
".",
"error",
"(",
"'Invalid PIN: %s, retrying...'",
",",
"e",
")",
"continue",
"except",
"Exception",
"as",
"e",
":",
"log",
".",
"exception",
"(",
"'ping failed: %s'",
",",
"e",
")",
"connection",
".",
"close",
"(",
")",
"# so the next HID open() will succeed",
"raise"
] | Enumerate and connect to the first available interface. | [
"Enumerate",
"and",
"connect",
"to",
"the",
"first",
"available",
"interface",
"."
] | 513b1259c4d7aca5f88cd958edc11828d0712f1b | https://github.com/romanz/trezor-agent/blob/513b1259c4d7aca5f88cd958edc11828d0712f1b/libagent/device/trezor.py#L47-L69 | train |
romanz/trezor-agent | libagent/device/interface.py | string_to_identity | def string_to_identity(identity_str):
"""Parse string into Identity dictionary."""
m = _identity_regexp.match(identity_str)
result = m.groupdict()
log.debug('parsed identity: %s', result)
return {k: v for k, v in result.items() if v} | python | def string_to_identity(identity_str):
"""Parse string into Identity dictionary."""
m = _identity_regexp.match(identity_str)
result = m.groupdict()
log.debug('parsed identity: %s', result)
return {k: v for k, v in result.items() if v} | [
"def",
"string_to_identity",
"(",
"identity_str",
")",
":",
"m",
"=",
"_identity_regexp",
".",
"match",
"(",
"identity_str",
")",
"result",
"=",
"m",
".",
"groupdict",
"(",
")",
"log",
".",
"debug",
"(",
"'parsed identity: %s'",
",",
"result",
")",
"return",
"{",
"k",
":",
"v",
"for",
"k",
",",
"v",
"in",
"result",
".",
"items",
"(",
")",
"if",
"v",
"}"
] | Parse string into Identity dictionary. | [
"Parse",
"string",
"into",
"Identity",
"dictionary",
"."
] | 513b1259c4d7aca5f88cd958edc11828d0712f1b | https://github.com/romanz/trezor-agent/blob/513b1259c4d7aca5f88cd958edc11828d0712f1b/libagent/device/interface.py#L26-L31 | train |
romanz/trezor-agent | libagent/device/interface.py | identity_to_string | def identity_to_string(identity_dict):
"""Dump Identity dictionary into its string representation."""
result = []
if identity_dict.get('proto'):
result.append(identity_dict['proto'] + '://')
if identity_dict.get('user'):
result.append(identity_dict['user'] + '@')
result.append(identity_dict['host'])
if identity_dict.get('port'):
result.append(':' + identity_dict['port'])
if identity_dict.get('path'):
result.append(identity_dict['path'])
log.debug('identity parts: %s', result)
return ''.join(result) | python | def identity_to_string(identity_dict):
"""Dump Identity dictionary into its string representation."""
result = []
if identity_dict.get('proto'):
result.append(identity_dict['proto'] + '://')
if identity_dict.get('user'):
result.append(identity_dict['user'] + '@')
result.append(identity_dict['host'])
if identity_dict.get('port'):
result.append(':' + identity_dict['port'])
if identity_dict.get('path'):
result.append(identity_dict['path'])
log.debug('identity parts: %s', result)
return ''.join(result) | [
"def",
"identity_to_string",
"(",
"identity_dict",
")",
":",
"result",
"=",
"[",
"]",
"if",
"identity_dict",
".",
"get",
"(",
"'proto'",
")",
":",
"result",
".",
"append",
"(",
"identity_dict",
"[",
"'proto'",
"]",
"+",
"'://'",
")",
"if",
"identity_dict",
".",
"get",
"(",
"'user'",
")",
":",
"result",
".",
"append",
"(",
"identity_dict",
"[",
"'user'",
"]",
"+",
"'@'",
")",
"result",
".",
"append",
"(",
"identity_dict",
"[",
"'host'",
"]",
")",
"if",
"identity_dict",
".",
"get",
"(",
"'port'",
")",
":",
"result",
".",
"append",
"(",
"':'",
"+",
"identity_dict",
"[",
"'port'",
"]",
")",
"if",
"identity_dict",
".",
"get",
"(",
"'path'",
")",
":",
"result",
".",
"append",
"(",
"identity_dict",
"[",
"'path'",
"]",
")",
"log",
".",
"debug",
"(",
"'identity parts: %s'",
",",
"result",
")",
"return",
"''",
".",
"join",
"(",
"result",
")"
] | Dump Identity dictionary into its string representation. | [
"Dump",
"Identity",
"dictionary",
"into",
"its",
"string",
"representation",
"."
] | 513b1259c4d7aca5f88cd958edc11828d0712f1b | https://github.com/romanz/trezor-agent/blob/513b1259c4d7aca5f88cd958edc11828d0712f1b/libagent/device/interface.py#L34-L47 | train |
romanz/trezor-agent | libagent/device/interface.py | Identity.items | def items(self):
"""Return a copy of identity_dict items."""
return [(k, unidecode.unidecode(v))
for k, v in self.identity_dict.items()] | python | def items(self):
"""Return a copy of identity_dict items."""
return [(k, unidecode.unidecode(v))
for k, v in self.identity_dict.items()] | [
"def",
"items",
"(",
"self",
")",
":",
"return",
"[",
"(",
"k",
",",
"unidecode",
".",
"unidecode",
"(",
"v",
")",
")",
"for",
"k",
",",
"v",
"in",
"self",
".",
"identity_dict",
".",
"items",
"(",
")",
"]"
] | Return a copy of identity_dict items. | [
"Return",
"a",
"copy",
"of",
"identity_dict",
"items",
"."
] | 513b1259c4d7aca5f88cd958edc11828d0712f1b | https://github.com/romanz/trezor-agent/blob/513b1259c4d7aca5f88cd958edc11828d0712f1b/libagent/device/interface.py#L70-L73 | train |
romanz/trezor-agent | libagent/device/interface.py | Identity.to_bytes | def to_bytes(self):
"""Transliterate Unicode into ASCII."""
s = identity_to_string(self.identity_dict)
return unidecode.unidecode(s).encode('ascii') | python | def to_bytes(self):
"""Transliterate Unicode into ASCII."""
s = identity_to_string(self.identity_dict)
return unidecode.unidecode(s).encode('ascii') | [
"def",
"to_bytes",
"(",
"self",
")",
":",
"s",
"=",
"identity_to_string",
"(",
"self",
".",
"identity_dict",
")",
"return",
"unidecode",
".",
"unidecode",
"(",
"s",
")",
".",
"encode",
"(",
"'ascii'",
")"
] | Transliterate Unicode into ASCII. | [
"Transliterate",
"Unicode",
"into",
"ASCII",
"."
] | 513b1259c4d7aca5f88cd958edc11828d0712f1b | https://github.com/romanz/trezor-agent/blob/513b1259c4d7aca5f88cd958edc11828d0712f1b/libagent/device/interface.py#L75-L78 | train |
romanz/trezor-agent | libagent/device/interface.py | Identity.get_curve_name | def get_curve_name(self, ecdh=False):
"""Return correct curve name for device operations."""
if ecdh:
return formats.get_ecdh_curve_name(self.curve_name)
else:
return self.curve_name | python | def get_curve_name(self, ecdh=False):
"""Return correct curve name for device operations."""
if ecdh:
return formats.get_ecdh_curve_name(self.curve_name)
else:
return self.curve_name | [
"def",
"get_curve_name",
"(",
"self",
",",
"ecdh",
"=",
"False",
")",
":",
"if",
"ecdh",
":",
"return",
"formats",
".",
"get_ecdh_curve_name",
"(",
"self",
".",
"curve_name",
")",
"else",
":",
"return",
"self",
".",
"curve_name"
] | Return correct curve name for device operations. | [
"Return",
"correct",
"curve",
"name",
"for",
"device",
"operations",
"."
] | 513b1259c4d7aca5f88cd958edc11828d0712f1b | https://github.com/romanz/trezor-agent/blob/513b1259c4d7aca5f88cd958edc11828d0712f1b/libagent/device/interface.py#L97-L102 | train |
romanz/trezor-agent | libagent/ssh/__init__.py | serve | def serve(handler, sock_path, timeout=UNIX_SOCKET_TIMEOUT):
"""
Start the ssh-agent server on a UNIX-domain socket.
If no connection is made during the specified timeout,
retry until the context is over.
"""
ssh_version = subprocess.check_output(['ssh', '-V'],
stderr=subprocess.STDOUT)
log.debug('local SSH version: %r', ssh_version)
environ = {'SSH_AUTH_SOCK': sock_path, 'SSH_AGENT_PID': str(os.getpid())}
device_mutex = threading.Lock()
with server.unix_domain_socket_server(sock_path) as sock:
sock.settimeout(timeout)
quit_event = threading.Event()
handle_conn = functools.partial(server.handle_connection,
handler=handler,
mutex=device_mutex)
kwargs = dict(sock=sock,
handle_conn=handle_conn,
quit_event=quit_event)
with server.spawn(server.server_thread, kwargs):
try:
yield environ
finally:
log.debug('closing server')
quit_event.set() | python | def serve(handler, sock_path, timeout=UNIX_SOCKET_TIMEOUT):
"""
Start the ssh-agent server on a UNIX-domain socket.
If no connection is made during the specified timeout,
retry until the context is over.
"""
ssh_version = subprocess.check_output(['ssh', '-V'],
stderr=subprocess.STDOUT)
log.debug('local SSH version: %r', ssh_version)
environ = {'SSH_AUTH_SOCK': sock_path, 'SSH_AGENT_PID': str(os.getpid())}
device_mutex = threading.Lock()
with server.unix_domain_socket_server(sock_path) as sock:
sock.settimeout(timeout)
quit_event = threading.Event()
handle_conn = functools.partial(server.handle_connection,
handler=handler,
mutex=device_mutex)
kwargs = dict(sock=sock,
handle_conn=handle_conn,
quit_event=quit_event)
with server.spawn(server.server_thread, kwargs):
try:
yield environ
finally:
log.debug('closing server')
quit_event.set() | [
"def",
"serve",
"(",
"handler",
",",
"sock_path",
",",
"timeout",
"=",
"UNIX_SOCKET_TIMEOUT",
")",
":",
"ssh_version",
"=",
"subprocess",
".",
"check_output",
"(",
"[",
"'ssh'",
",",
"'-V'",
"]",
",",
"stderr",
"=",
"subprocess",
".",
"STDOUT",
")",
"log",
".",
"debug",
"(",
"'local SSH version: %r'",
",",
"ssh_version",
")",
"environ",
"=",
"{",
"'SSH_AUTH_SOCK'",
":",
"sock_path",
",",
"'SSH_AGENT_PID'",
":",
"str",
"(",
"os",
".",
"getpid",
"(",
")",
")",
"}",
"device_mutex",
"=",
"threading",
".",
"Lock",
"(",
")",
"with",
"server",
".",
"unix_domain_socket_server",
"(",
"sock_path",
")",
"as",
"sock",
":",
"sock",
".",
"settimeout",
"(",
"timeout",
")",
"quit_event",
"=",
"threading",
".",
"Event",
"(",
")",
"handle_conn",
"=",
"functools",
".",
"partial",
"(",
"server",
".",
"handle_connection",
",",
"handler",
"=",
"handler",
",",
"mutex",
"=",
"device_mutex",
")",
"kwargs",
"=",
"dict",
"(",
"sock",
"=",
"sock",
",",
"handle_conn",
"=",
"handle_conn",
",",
"quit_event",
"=",
"quit_event",
")",
"with",
"server",
".",
"spawn",
"(",
"server",
".",
"server_thread",
",",
"kwargs",
")",
":",
"try",
":",
"yield",
"environ",
"finally",
":",
"log",
".",
"debug",
"(",
"'closing server'",
")",
"quit_event",
".",
"set",
"(",
")"
] | Start the ssh-agent server on a UNIX-domain socket.
If no connection is made during the specified timeout,
retry until the context is over. | [
"Start",
"the",
"ssh",
"-",
"agent",
"server",
"on",
"a",
"UNIX",
"-",
"domain",
"socket",
"."
] | 513b1259c4d7aca5f88cd958edc11828d0712f1b | https://github.com/romanz/trezor-agent/blob/513b1259c4d7aca5f88cd958edc11828d0712f1b/libagent/ssh/__init__.py#L124-L150 | train |
romanz/trezor-agent | libagent/ssh/__init__.py | run_server | def run_server(conn, command, sock_path, debug, timeout):
"""Common code for run_agent and run_git below."""
ret = 0
try:
handler = protocol.Handler(conn=conn, debug=debug)
with serve(handler=handler, sock_path=sock_path,
timeout=timeout) as env:
if command:
ret = server.run_process(command=command, environ=env)
else:
signal.pause() # wait for signal (e.g. SIGINT)
except KeyboardInterrupt:
log.info('server stopped')
return ret | python | def run_server(conn, command, sock_path, debug, timeout):
"""Common code for run_agent and run_git below."""
ret = 0
try:
handler = protocol.Handler(conn=conn, debug=debug)
with serve(handler=handler, sock_path=sock_path,
timeout=timeout) as env:
if command:
ret = server.run_process(command=command, environ=env)
else:
signal.pause() # wait for signal (e.g. SIGINT)
except KeyboardInterrupt:
log.info('server stopped')
return ret | [
"def",
"run_server",
"(",
"conn",
",",
"command",
",",
"sock_path",
",",
"debug",
",",
"timeout",
")",
":",
"ret",
"=",
"0",
"try",
":",
"handler",
"=",
"protocol",
".",
"Handler",
"(",
"conn",
"=",
"conn",
",",
"debug",
"=",
"debug",
")",
"with",
"serve",
"(",
"handler",
"=",
"handler",
",",
"sock_path",
"=",
"sock_path",
",",
"timeout",
"=",
"timeout",
")",
"as",
"env",
":",
"if",
"command",
":",
"ret",
"=",
"server",
".",
"run_process",
"(",
"command",
"=",
"command",
",",
"environ",
"=",
"env",
")",
"else",
":",
"signal",
".",
"pause",
"(",
")",
"# wait for signal (e.g. SIGINT)",
"except",
"KeyboardInterrupt",
":",
"log",
".",
"info",
"(",
"'server stopped'",
")",
"return",
"ret"
] | Common code for run_agent and run_git below. | [
"Common",
"code",
"for",
"run_agent",
"and",
"run_git",
"below",
"."
] | 513b1259c4d7aca5f88cd958edc11828d0712f1b | https://github.com/romanz/trezor-agent/blob/513b1259c4d7aca5f88cd958edc11828d0712f1b/libagent/ssh/__init__.py#L153-L166 | train |
romanz/trezor-agent | libagent/ssh/__init__.py | handle_connection_error | def handle_connection_error(func):
"""Fail with non-zero exit code."""
@functools.wraps(func)
def wrapper(*args, **kwargs):
try:
return func(*args, **kwargs)
except device.interface.NotFoundError as e:
log.error('Connection error (try unplugging and replugging your device): %s', e)
return 1
return wrapper | python | def handle_connection_error(func):
"""Fail with non-zero exit code."""
@functools.wraps(func)
def wrapper(*args, **kwargs):
try:
return func(*args, **kwargs)
except device.interface.NotFoundError as e:
log.error('Connection error (try unplugging and replugging your device): %s', e)
return 1
return wrapper | [
"def",
"handle_connection_error",
"(",
"func",
")",
":",
"@",
"functools",
".",
"wraps",
"(",
"func",
")",
"def",
"wrapper",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"try",
":",
"return",
"func",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
"except",
"device",
".",
"interface",
".",
"NotFoundError",
"as",
"e",
":",
"log",
".",
"error",
"(",
"'Connection error (try unplugging and replugging your device): %s'",
",",
"e",
")",
"return",
"1",
"return",
"wrapper"
] | Fail with non-zero exit code. | [
"Fail",
"with",
"non",
"-",
"zero",
"exit",
"code",
"."
] | 513b1259c4d7aca5f88cd958edc11828d0712f1b | https://github.com/romanz/trezor-agent/blob/513b1259c4d7aca5f88cd958edc11828d0712f1b/libagent/ssh/__init__.py#L169-L178 | train |
romanz/trezor-agent | libagent/ssh/__init__.py | parse_config | def parse_config(contents):
"""Parse config file into a list of Identity objects."""
for identity_str, curve_name in re.findall(r'\<(.*?)\|(.*?)\>', contents):
yield device.interface.Identity(identity_str=identity_str,
curve_name=curve_name) | python | def parse_config(contents):
"""Parse config file into a list of Identity objects."""
for identity_str, curve_name in re.findall(r'\<(.*?)\|(.*?)\>', contents):
yield device.interface.Identity(identity_str=identity_str,
curve_name=curve_name) | [
"def",
"parse_config",
"(",
"contents",
")",
":",
"for",
"identity_str",
",",
"curve_name",
"in",
"re",
".",
"findall",
"(",
"r'\\<(.*?)\\|(.*?)\\>'",
",",
"contents",
")",
":",
"yield",
"device",
".",
"interface",
".",
"Identity",
"(",
"identity_str",
"=",
"identity_str",
",",
"curve_name",
"=",
"curve_name",
")"
] | Parse config file into a list of Identity objects. | [
"Parse",
"config",
"file",
"into",
"a",
"list",
"of",
"Identity",
"objects",
"."
] | 513b1259c4d7aca5f88cd958edc11828d0712f1b | https://github.com/romanz/trezor-agent/blob/513b1259c4d7aca5f88cd958edc11828d0712f1b/libagent/ssh/__init__.py#L181-L185 | train |
romanz/trezor-agent | libagent/ssh/__init__.py | main | def main(device_type):
"""Run ssh-agent using given hardware client factory."""
args = create_agent_parser(device_type=device_type).parse_args()
util.setup_logging(verbosity=args.verbose, filename=args.log_file)
public_keys = None
filename = None
if args.identity.startswith('/'):
filename = args.identity
contents = open(filename, 'rb').read().decode('utf-8')
# Allow loading previously exported SSH public keys
if filename.endswith('.pub'):
public_keys = list(import_public_keys(contents))
identities = list(parse_config(contents))
else:
identities = [device.interface.Identity(
identity_str=args.identity, curve_name=args.ecdsa_curve_name)]
for index, identity in enumerate(identities):
identity.identity_dict['proto'] = u'ssh'
log.info('identity #%d: %s', index, identity.to_string())
# override default PIN/passphrase entry tools (relevant for TREZOR/Keepkey):
device_type.ui = device.ui.UI(device_type=device_type, config=vars(args))
device_type.ui.cached_passphrase_ack = util.ExpiringCache(
args.cache_expiry_seconds)
conn = JustInTimeConnection(
conn_factory=lambda: client.Client(device_type()),
identities=identities, public_keys=public_keys)
sock_path = _get_sock_path(args)
command = args.command
context = _dummy_context()
if args.connect:
command = ['ssh'] + ssh_args(conn) + args.command
elif args.mosh:
command = ['mosh'] + mosh_args(conn) + args.command
elif args.daemonize:
out = 'SSH_AUTH_SOCK={0}; export SSH_AUTH_SOCK;\n'.format(sock_path)
sys.stdout.write(out)
sys.stdout.flush()
context = daemon.DaemonContext()
log.info('running the agent as a daemon on %s', sock_path)
elif args.foreground:
log.info('running the agent on %s', sock_path)
use_shell = bool(args.shell)
if use_shell:
command = os.environ['SHELL']
sys.stdin.close()
if command or args.daemonize or args.foreground:
with context:
return run_server(conn=conn, command=command, sock_path=sock_path,
debug=args.debug, timeout=args.timeout)
else:
for pk in conn.public_keys():
sys.stdout.write(pk)
return 0 | python | def main(device_type):
"""Run ssh-agent using given hardware client factory."""
args = create_agent_parser(device_type=device_type).parse_args()
util.setup_logging(verbosity=args.verbose, filename=args.log_file)
public_keys = None
filename = None
if args.identity.startswith('/'):
filename = args.identity
contents = open(filename, 'rb').read().decode('utf-8')
# Allow loading previously exported SSH public keys
if filename.endswith('.pub'):
public_keys = list(import_public_keys(contents))
identities = list(parse_config(contents))
else:
identities = [device.interface.Identity(
identity_str=args.identity, curve_name=args.ecdsa_curve_name)]
for index, identity in enumerate(identities):
identity.identity_dict['proto'] = u'ssh'
log.info('identity #%d: %s', index, identity.to_string())
# override default PIN/passphrase entry tools (relevant for TREZOR/Keepkey):
device_type.ui = device.ui.UI(device_type=device_type, config=vars(args))
device_type.ui.cached_passphrase_ack = util.ExpiringCache(
args.cache_expiry_seconds)
conn = JustInTimeConnection(
conn_factory=lambda: client.Client(device_type()),
identities=identities, public_keys=public_keys)
sock_path = _get_sock_path(args)
command = args.command
context = _dummy_context()
if args.connect:
command = ['ssh'] + ssh_args(conn) + args.command
elif args.mosh:
command = ['mosh'] + mosh_args(conn) + args.command
elif args.daemonize:
out = 'SSH_AUTH_SOCK={0}; export SSH_AUTH_SOCK;\n'.format(sock_path)
sys.stdout.write(out)
sys.stdout.flush()
context = daemon.DaemonContext()
log.info('running the agent as a daemon on %s', sock_path)
elif args.foreground:
log.info('running the agent on %s', sock_path)
use_shell = bool(args.shell)
if use_shell:
command = os.environ['SHELL']
sys.stdin.close()
if command or args.daemonize or args.foreground:
with context:
return run_server(conn=conn, command=command, sock_path=sock_path,
debug=args.debug, timeout=args.timeout)
else:
for pk in conn.public_keys():
sys.stdout.write(pk)
return 0 | [
"def",
"main",
"(",
"device_type",
")",
":",
"args",
"=",
"create_agent_parser",
"(",
"device_type",
"=",
"device_type",
")",
".",
"parse_args",
"(",
")",
"util",
".",
"setup_logging",
"(",
"verbosity",
"=",
"args",
".",
"verbose",
",",
"filename",
"=",
"args",
".",
"log_file",
")",
"public_keys",
"=",
"None",
"filename",
"=",
"None",
"if",
"args",
".",
"identity",
".",
"startswith",
"(",
"'/'",
")",
":",
"filename",
"=",
"args",
".",
"identity",
"contents",
"=",
"open",
"(",
"filename",
",",
"'rb'",
")",
".",
"read",
"(",
")",
".",
"decode",
"(",
"'utf-8'",
")",
"# Allow loading previously exported SSH public keys",
"if",
"filename",
".",
"endswith",
"(",
"'.pub'",
")",
":",
"public_keys",
"=",
"list",
"(",
"import_public_keys",
"(",
"contents",
")",
")",
"identities",
"=",
"list",
"(",
"parse_config",
"(",
"contents",
")",
")",
"else",
":",
"identities",
"=",
"[",
"device",
".",
"interface",
".",
"Identity",
"(",
"identity_str",
"=",
"args",
".",
"identity",
",",
"curve_name",
"=",
"args",
".",
"ecdsa_curve_name",
")",
"]",
"for",
"index",
",",
"identity",
"in",
"enumerate",
"(",
"identities",
")",
":",
"identity",
".",
"identity_dict",
"[",
"'proto'",
"]",
"=",
"u'ssh'",
"log",
".",
"info",
"(",
"'identity #%d: %s'",
",",
"index",
",",
"identity",
".",
"to_string",
"(",
")",
")",
"# override default PIN/passphrase entry tools (relevant for TREZOR/Keepkey):",
"device_type",
".",
"ui",
"=",
"device",
".",
"ui",
".",
"UI",
"(",
"device_type",
"=",
"device_type",
",",
"config",
"=",
"vars",
"(",
"args",
")",
")",
"device_type",
".",
"ui",
".",
"cached_passphrase_ack",
"=",
"util",
".",
"ExpiringCache",
"(",
"args",
".",
"cache_expiry_seconds",
")",
"conn",
"=",
"JustInTimeConnection",
"(",
"conn_factory",
"=",
"lambda",
":",
"client",
".",
"Client",
"(",
"device_type",
"(",
")",
")",
",",
"identities",
"=",
"identities",
",",
"public_keys",
"=",
"public_keys",
")",
"sock_path",
"=",
"_get_sock_path",
"(",
"args",
")",
"command",
"=",
"args",
".",
"command",
"context",
"=",
"_dummy_context",
"(",
")",
"if",
"args",
".",
"connect",
":",
"command",
"=",
"[",
"'ssh'",
"]",
"+",
"ssh_args",
"(",
"conn",
")",
"+",
"args",
".",
"command",
"elif",
"args",
".",
"mosh",
":",
"command",
"=",
"[",
"'mosh'",
"]",
"+",
"mosh_args",
"(",
"conn",
")",
"+",
"args",
".",
"command",
"elif",
"args",
".",
"daemonize",
":",
"out",
"=",
"'SSH_AUTH_SOCK={0}; export SSH_AUTH_SOCK;\\n'",
".",
"format",
"(",
"sock_path",
")",
"sys",
".",
"stdout",
".",
"write",
"(",
"out",
")",
"sys",
".",
"stdout",
".",
"flush",
"(",
")",
"context",
"=",
"daemon",
".",
"DaemonContext",
"(",
")",
"log",
".",
"info",
"(",
"'running the agent as a daemon on %s'",
",",
"sock_path",
")",
"elif",
"args",
".",
"foreground",
":",
"log",
".",
"info",
"(",
"'running the agent on %s'",
",",
"sock_path",
")",
"use_shell",
"=",
"bool",
"(",
"args",
".",
"shell",
")",
"if",
"use_shell",
":",
"command",
"=",
"os",
".",
"environ",
"[",
"'SHELL'",
"]",
"sys",
".",
"stdin",
".",
"close",
"(",
")",
"if",
"command",
"or",
"args",
".",
"daemonize",
"or",
"args",
".",
"foreground",
":",
"with",
"context",
":",
"return",
"run_server",
"(",
"conn",
"=",
"conn",
",",
"command",
"=",
"command",
",",
"sock_path",
"=",
"sock_path",
",",
"debug",
"=",
"args",
".",
"debug",
",",
"timeout",
"=",
"args",
".",
"timeout",
")",
"else",
":",
"for",
"pk",
"in",
"conn",
".",
"public_keys",
"(",
")",
":",
"sys",
".",
"stdout",
".",
"write",
"(",
"pk",
")",
"return",
"0"
] | Run ssh-agent using given hardware client factory. | [
"Run",
"ssh",
"-",
"agent",
"using",
"given",
"hardware",
"client",
"factory",
"."
] | 513b1259c4d7aca5f88cd958edc11828d0712f1b | https://github.com/romanz/trezor-agent/blob/513b1259c4d7aca5f88cd958edc11828d0712f1b/libagent/ssh/__init__.py#L255-L313 | train |
romanz/trezor-agent | libagent/ssh/__init__.py | JustInTimeConnection.parse_public_keys | def parse_public_keys(self):
"""Parse SSH public keys into dictionaries."""
public_keys = [formats.import_public_key(pk)
for pk in self.public_keys()]
for pk, identity in zip(public_keys, self.identities):
pk['identity'] = identity
return public_keys | python | def parse_public_keys(self):
"""Parse SSH public keys into dictionaries."""
public_keys = [formats.import_public_key(pk)
for pk in self.public_keys()]
for pk, identity in zip(public_keys, self.identities):
pk['identity'] = identity
return public_keys | [
"def",
"parse_public_keys",
"(",
"self",
")",
":",
"public_keys",
"=",
"[",
"formats",
".",
"import_public_key",
"(",
"pk",
")",
"for",
"pk",
"in",
"self",
".",
"public_keys",
"(",
")",
"]",
"for",
"pk",
",",
"identity",
"in",
"zip",
"(",
"public_keys",
",",
"self",
".",
"identities",
")",
":",
"pk",
"[",
"'identity'",
"]",
"=",
"identity",
"return",
"public_keys"
] | Parse SSH public keys into dictionaries. | [
"Parse",
"SSH",
"public",
"keys",
"into",
"dictionaries",
"."
] | 513b1259c4d7aca5f88cd958edc11828d0712f1b | https://github.com/romanz/trezor-agent/blob/513b1259c4d7aca5f88cd958edc11828d0712f1b/libagent/ssh/__init__.py#L213-L219 | train |
romanz/trezor-agent | libagent/ssh/__init__.py | JustInTimeConnection.public_keys_as_files | def public_keys_as_files(self):
"""Store public keys as temporary SSH identity files."""
if not self.public_keys_tempfiles:
for pk in self.public_keys():
f = tempfile.NamedTemporaryFile(prefix='trezor-ssh-pubkey-', mode='w')
f.write(pk)
f.flush()
self.public_keys_tempfiles.append(f)
return self.public_keys_tempfiles | python | def public_keys_as_files(self):
"""Store public keys as temporary SSH identity files."""
if not self.public_keys_tempfiles:
for pk in self.public_keys():
f = tempfile.NamedTemporaryFile(prefix='trezor-ssh-pubkey-', mode='w')
f.write(pk)
f.flush()
self.public_keys_tempfiles.append(f)
return self.public_keys_tempfiles | [
"def",
"public_keys_as_files",
"(",
"self",
")",
":",
"if",
"not",
"self",
".",
"public_keys_tempfiles",
":",
"for",
"pk",
"in",
"self",
".",
"public_keys",
"(",
")",
":",
"f",
"=",
"tempfile",
".",
"NamedTemporaryFile",
"(",
"prefix",
"=",
"'trezor-ssh-pubkey-'",
",",
"mode",
"=",
"'w'",
")",
"f",
".",
"write",
"(",
"pk",
")",
"f",
".",
"flush",
"(",
")",
"self",
".",
"public_keys_tempfiles",
".",
"append",
"(",
"f",
")",
"return",
"self",
".",
"public_keys_tempfiles"
] | Store public keys as temporary SSH identity files. | [
"Store",
"public",
"keys",
"as",
"temporary",
"SSH",
"identity",
"files",
"."
] | 513b1259c4d7aca5f88cd958edc11828d0712f1b | https://github.com/romanz/trezor-agent/blob/513b1259c4d7aca5f88cd958edc11828d0712f1b/libagent/ssh/__init__.py#L221-L230 | train |
romanz/trezor-agent | libagent/ssh/__init__.py | JustInTimeConnection.sign | def sign(self, blob, identity):
"""Sign a given blob using the specified identity on the device."""
conn = self.conn_factory()
return conn.sign_ssh_challenge(blob=blob, identity=identity) | python | def sign(self, blob, identity):
"""Sign a given blob using the specified identity on the device."""
conn = self.conn_factory()
return conn.sign_ssh_challenge(blob=blob, identity=identity) | [
"def",
"sign",
"(",
"self",
",",
"blob",
",",
"identity",
")",
":",
"conn",
"=",
"self",
".",
"conn_factory",
"(",
")",
"return",
"conn",
".",
"sign_ssh_challenge",
"(",
"blob",
"=",
"blob",
",",
"identity",
"=",
"identity",
")"
] | Sign a given blob using the specified identity on the device. | [
"Sign",
"a",
"given",
"blob",
"using",
"the",
"specified",
"identity",
"on",
"the",
"device",
"."
] | 513b1259c4d7aca5f88cd958edc11828d0712f1b | https://github.com/romanz/trezor-agent/blob/513b1259c4d7aca5f88cd958edc11828d0712f1b/libagent/ssh/__init__.py#L232-L235 | train |
romanz/trezor-agent | libagent/gpg/protocol.py | packet | def packet(tag, blob):
"""Create small GPG packet."""
assert len(blob) < 2**32
if len(blob) < 2**8:
length_type = 0
elif len(blob) < 2**16:
length_type = 1
else:
length_type = 2
fmt = ['>B', '>H', '>L'][length_type]
leading_byte = 0x80 | (tag << 2) | (length_type)
return struct.pack('>B', leading_byte) + util.prefix_len(fmt, blob) | python | def packet(tag, blob):
"""Create small GPG packet."""
assert len(blob) < 2**32
if len(blob) < 2**8:
length_type = 0
elif len(blob) < 2**16:
length_type = 1
else:
length_type = 2
fmt = ['>B', '>H', '>L'][length_type]
leading_byte = 0x80 | (tag << 2) | (length_type)
return struct.pack('>B', leading_byte) + util.prefix_len(fmt, blob) | [
"def",
"packet",
"(",
"tag",
",",
"blob",
")",
":",
"assert",
"len",
"(",
"blob",
")",
"<",
"2",
"**",
"32",
"if",
"len",
"(",
"blob",
")",
"<",
"2",
"**",
"8",
":",
"length_type",
"=",
"0",
"elif",
"len",
"(",
"blob",
")",
"<",
"2",
"**",
"16",
":",
"length_type",
"=",
"1",
"else",
":",
"length_type",
"=",
"2",
"fmt",
"=",
"[",
"'>B'",
",",
"'>H'",
",",
"'>L'",
"]",
"[",
"length_type",
"]",
"leading_byte",
"=",
"0x80",
"|",
"(",
"tag",
"<<",
"2",
")",
"|",
"(",
"length_type",
")",
"return",
"struct",
".",
"pack",
"(",
"'>B'",
",",
"leading_byte",
")",
"+",
"util",
".",
"prefix_len",
"(",
"fmt",
",",
"blob",
")"
] | Create small GPG packet. | [
"Create",
"small",
"GPG",
"packet",
"."
] | 513b1259c4d7aca5f88cd958edc11828d0712f1b | https://github.com/romanz/trezor-agent/blob/513b1259c4d7aca5f88cd958edc11828d0712f1b/libagent/gpg/protocol.py#L13-L26 | train |
romanz/trezor-agent | libagent/gpg/protocol.py | subpacket | def subpacket(subpacket_type, fmt, *values):
"""Create GPG subpacket."""
blob = struct.pack(fmt, *values) if values else fmt
return struct.pack('>B', subpacket_type) + blob | python | def subpacket(subpacket_type, fmt, *values):
"""Create GPG subpacket."""
blob = struct.pack(fmt, *values) if values else fmt
return struct.pack('>B', subpacket_type) + blob | [
"def",
"subpacket",
"(",
"subpacket_type",
",",
"fmt",
",",
"*",
"values",
")",
":",
"blob",
"=",
"struct",
".",
"pack",
"(",
"fmt",
",",
"*",
"values",
")",
"if",
"values",
"else",
"fmt",
"return",
"struct",
".",
"pack",
"(",
"'>B'",
",",
"subpacket_type",
")",
"+",
"blob"
] | Create GPG subpacket. | [
"Create",
"GPG",
"subpacket",
"."
] | 513b1259c4d7aca5f88cd958edc11828d0712f1b | https://github.com/romanz/trezor-agent/blob/513b1259c4d7aca5f88cd958edc11828d0712f1b/libagent/gpg/protocol.py#L29-L32 | train |
romanz/trezor-agent | libagent/gpg/protocol.py | subpacket_prefix_len | def subpacket_prefix_len(item):
"""Prefix subpacket length according to RFC 4880 section-5.2.3.1."""
n = len(item)
if n >= 8384:
prefix = b'\xFF' + struct.pack('>L', n)
elif n >= 192:
n = n - 192
prefix = struct.pack('BB', (n // 256) + 192, n % 256)
else:
prefix = struct.pack('B', n)
return prefix + item | python | def subpacket_prefix_len(item):
"""Prefix subpacket length according to RFC 4880 section-5.2.3.1."""
n = len(item)
if n >= 8384:
prefix = b'\xFF' + struct.pack('>L', n)
elif n >= 192:
n = n - 192
prefix = struct.pack('BB', (n // 256) + 192, n % 256)
else:
prefix = struct.pack('B', n)
return prefix + item | [
"def",
"subpacket_prefix_len",
"(",
"item",
")",
":",
"n",
"=",
"len",
"(",
"item",
")",
"if",
"n",
">=",
"8384",
":",
"prefix",
"=",
"b'\\xFF'",
"+",
"struct",
".",
"pack",
"(",
"'>L'",
",",
"n",
")",
"elif",
"n",
">=",
"192",
":",
"n",
"=",
"n",
"-",
"192",
"prefix",
"=",
"struct",
".",
"pack",
"(",
"'BB'",
",",
"(",
"n",
"//",
"256",
")",
"+",
"192",
",",
"n",
"%",
"256",
")",
"else",
":",
"prefix",
"=",
"struct",
".",
"pack",
"(",
"'B'",
",",
"n",
")",
"return",
"prefix",
"+",
"item"
] | Prefix subpacket length according to RFC 4880 section-5.2.3.1. | [
"Prefix",
"subpacket",
"length",
"according",
"to",
"RFC",
"4880",
"section",
"-",
"5",
".",
"2",
".",
"3",
".",
"1",
"."
] | 513b1259c4d7aca5f88cd958edc11828d0712f1b | https://github.com/romanz/trezor-agent/blob/513b1259c4d7aca5f88cd958edc11828d0712f1b/libagent/gpg/protocol.py#L55-L65 | train |
romanz/trezor-agent | libagent/gpg/protocol.py | subpackets | def subpackets(*items):
"""Serialize several GPG subpackets."""
prefixed = [subpacket_prefix_len(item) for item in items]
return util.prefix_len('>H', b''.join(prefixed)) | python | def subpackets(*items):
"""Serialize several GPG subpackets."""
prefixed = [subpacket_prefix_len(item) for item in items]
return util.prefix_len('>H', b''.join(prefixed)) | [
"def",
"subpackets",
"(",
"*",
"items",
")",
":",
"prefixed",
"=",
"[",
"subpacket_prefix_len",
"(",
"item",
")",
"for",
"item",
"in",
"items",
"]",
"return",
"util",
".",
"prefix_len",
"(",
"'>H'",
",",
"b''",
".",
"join",
"(",
"prefixed",
")",
")"
] | Serialize several GPG subpackets. | [
"Serialize",
"several",
"GPG",
"subpackets",
"."
] | 513b1259c4d7aca5f88cd958edc11828d0712f1b | https://github.com/romanz/trezor-agent/blob/513b1259c4d7aca5f88cd958edc11828d0712f1b/libagent/gpg/protocol.py#L68-L71 | train |
romanz/trezor-agent | libagent/gpg/protocol.py | mpi | def mpi(value):
"""Serialize multipresicion integer using GPG format."""
bits = value.bit_length()
data_size = (bits + 7) // 8
data_bytes = bytearray(data_size)
for i in range(data_size):
data_bytes[i] = value & 0xFF
value = value >> 8
data_bytes.reverse()
return struct.pack('>H', bits) + bytes(data_bytes) | python | def mpi(value):
"""Serialize multipresicion integer using GPG format."""
bits = value.bit_length()
data_size = (bits + 7) // 8
data_bytes = bytearray(data_size)
for i in range(data_size):
data_bytes[i] = value & 0xFF
value = value >> 8
data_bytes.reverse()
return struct.pack('>H', bits) + bytes(data_bytes) | [
"def",
"mpi",
"(",
"value",
")",
":",
"bits",
"=",
"value",
".",
"bit_length",
"(",
")",
"data_size",
"=",
"(",
"bits",
"+",
"7",
")",
"//",
"8",
"data_bytes",
"=",
"bytearray",
"(",
"data_size",
")",
"for",
"i",
"in",
"range",
"(",
"data_size",
")",
":",
"data_bytes",
"[",
"i",
"]",
"=",
"value",
"&",
"0xFF",
"value",
"=",
"value",
">>",
"8",
"data_bytes",
".",
"reverse",
"(",
")",
"return",
"struct",
".",
"pack",
"(",
"'>H'",
",",
"bits",
")",
"+",
"bytes",
"(",
"data_bytes",
")"
] | Serialize multipresicion integer using GPG format. | [
"Serialize",
"multipresicion",
"integer",
"using",
"GPG",
"format",
"."
] | 513b1259c4d7aca5f88cd958edc11828d0712f1b | https://github.com/romanz/trezor-agent/blob/513b1259c4d7aca5f88cd958edc11828d0712f1b/libagent/gpg/protocol.py#L74-L84 | train |
romanz/trezor-agent | libagent/gpg/protocol.py | keygrip_nist256 | def keygrip_nist256(vk):
"""Compute keygrip for NIST256 curve public keys."""
curve = vk.curve.curve
gen = vk.curve.generator
g = (4 << 512) | (gen.x() << 256) | gen.y()
point = vk.pubkey.point
q = (4 << 512) | (point.x() << 256) | point.y()
return _compute_keygrip([
['p', util.num2bytes(curve.p(), size=32)],
['a', util.num2bytes(curve.a() % curve.p(), size=32)],
['b', util.num2bytes(curve.b() % curve.p(), size=32)],
['g', util.num2bytes(g, size=65)],
['n', util.num2bytes(vk.curve.order, size=32)],
['q', util.num2bytes(q, size=65)],
]) | python | def keygrip_nist256(vk):
"""Compute keygrip for NIST256 curve public keys."""
curve = vk.curve.curve
gen = vk.curve.generator
g = (4 << 512) | (gen.x() << 256) | gen.y()
point = vk.pubkey.point
q = (4 << 512) | (point.x() << 256) | point.y()
return _compute_keygrip([
['p', util.num2bytes(curve.p(), size=32)],
['a', util.num2bytes(curve.a() % curve.p(), size=32)],
['b', util.num2bytes(curve.b() % curve.p(), size=32)],
['g', util.num2bytes(g, size=65)],
['n', util.num2bytes(vk.curve.order, size=32)],
['q', util.num2bytes(q, size=65)],
]) | [
"def",
"keygrip_nist256",
"(",
"vk",
")",
":",
"curve",
"=",
"vk",
".",
"curve",
".",
"curve",
"gen",
"=",
"vk",
".",
"curve",
".",
"generator",
"g",
"=",
"(",
"4",
"<<",
"512",
")",
"|",
"(",
"gen",
".",
"x",
"(",
")",
"<<",
"256",
")",
"|",
"gen",
".",
"y",
"(",
")",
"point",
"=",
"vk",
".",
"pubkey",
".",
"point",
"q",
"=",
"(",
"4",
"<<",
"512",
")",
"|",
"(",
"point",
".",
"x",
"(",
")",
"<<",
"256",
")",
"|",
"point",
".",
"y",
"(",
")",
"return",
"_compute_keygrip",
"(",
"[",
"[",
"'p'",
",",
"util",
".",
"num2bytes",
"(",
"curve",
".",
"p",
"(",
")",
",",
"size",
"=",
"32",
")",
"]",
",",
"[",
"'a'",
",",
"util",
".",
"num2bytes",
"(",
"curve",
".",
"a",
"(",
")",
"%",
"curve",
".",
"p",
"(",
")",
",",
"size",
"=",
"32",
")",
"]",
",",
"[",
"'b'",
",",
"util",
".",
"num2bytes",
"(",
"curve",
".",
"b",
"(",
")",
"%",
"curve",
".",
"p",
"(",
")",
",",
"size",
"=",
"32",
")",
"]",
",",
"[",
"'g'",
",",
"util",
".",
"num2bytes",
"(",
"g",
",",
"size",
"=",
"65",
")",
"]",
",",
"[",
"'n'",
",",
"util",
".",
"num2bytes",
"(",
"vk",
".",
"curve",
".",
"order",
",",
"size",
"=",
"32",
")",
"]",
",",
"[",
"'q'",
",",
"util",
".",
"num2bytes",
"(",
"q",
",",
"size",
"=",
"65",
")",
"]",
",",
"]",
")"
] | Compute keygrip for NIST256 curve public keys. | [
"Compute",
"keygrip",
"for",
"NIST256",
"curve",
"public",
"keys",
"."
] | 513b1259c4d7aca5f88cd958edc11828d0712f1b | https://github.com/romanz/trezor-agent/blob/513b1259c4d7aca5f88cd958edc11828d0712f1b/libagent/gpg/protocol.py#L107-L122 | train |
romanz/trezor-agent | libagent/gpg/protocol.py | keygrip_ed25519 | def keygrip_ed25519(vk):
"""Compute keygrip for Ed25519 public keys."""
# pylint: disable=line-too-long
return _compute_keygrip([
['p', util.num2bytes(0x7FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFED, size=32)], # nopep8
['a', b'\x01'],
['b', util.num2bytes(0x2DFC9311D490018C7338BF8688861767FF8FF5B2BEBE27548A14B235ECA6874A, size=32)], # nopep8
['g', util.num2bytes(0x04216936D3CD6E53FEC0A4E231FDD6DC5C692CC7609525A7B2C9562D608F25D51A6666666666666666666666666666666666666666666666666666666666666658, size=65)], # nopep8
['n', util.num2bytes(0x1000000000000000000000000000000014DEF9DEA2F79CD65812631A5CF5D3ED, size=32)], # nopep8
['q', vk.to_bytes()],
]) | python | def keygrip_ed25519(vk):
"""Compute keygrip for Ed25519 public keys."""
# pylint: disable=line-too-long
return _compute_keygrip([
['p', util.num2bytes(0x7FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFED, size=32)], # nopep8
['a', b'\x01'],
['b', util.num2bytes(0x2DFC9311D490018C7338BF8688861767FF8FF5B2BEBE27548A14B235ECA6874A, size=32)], # nopep8
['g', util.num2bytes(0x04216936D3CD6E53FEC0A4E231FDD6DC5C692CC7609525A7B2C9562D608F25D51A6666666666666666666666666666666666666666666666666666666666666658, size=65)], # nopep8
['n', util.num2bytes(0x1000000000000000000000000000000014DEF9DEA2F79CD65812631A5CF5D3ED, size=32)], # nopep8
['q', vk.to_bytes()],
]) | [
"def",
"keygrip_ed25519",
"(",
"vk",
")",
":",
"# pylint: disable=line-too-long",
"return",
"_compute_keygrip",
"(",
"[",
"[",
"'p'",
",",
"util",
".",
"num2bytes",
"(",
"0x7FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFED",
",",
"size",
"=",
"32",
")",
"]",
",",
"# nopep8",
"[",
"'a'",
",",
"b'\\x01'",
"]",
",",
"[",
"'b'",
",",
"util",
".",
"num2bytes",
"(",
"0x2DFC9311D490018C7338BF8688861767FF8FF5B2BEBE27548A14B235ECA6874A",
",",
"size",
"=",
"32",
")",
"]",
",",
"# nopep8",
"[",
"'g'",
",",
"util",
".",
"num2bytes",
"(",
"0x04216936D3CD6E53FEC0A4E231FDD6DC5C692CC7609525A7B2C9562D608F25D51A6666666666666666666666666666666666666666666666666666666666666658",
",",
"size",
"=",
"65",
")",
"]",
",",
"# nopep8",
"[",
"'n'",
",",
"util",
".",
"num2bytes",
"(",
"0x1000000000000000000000000000000014DEF9DEA2F79CD65812631A5CF5D3ED",
",",
"size",
"=",
"32",
")",
"]",
",",
"# nopep8",
"[",
"'q'",
",",
"vk",
".",
"to_bytes",
"(",
")",
"]",
",",
"]",
")"
] | Compute keygrip for Ed25519 public keys. | [
"Compute",
"keygrip",
"for",
"Ed25519",
"public",
"keys",
"."
] | 513b1259c4d7aca5f88cd958edc11828d0712f1b | https://github.com/romanz/trezor-agent/blob/513b1259c4d7aca5f88cd958edc11828d0712f1b/libagent/gpg/protocol.py#L125-L135 | train |
romanz/trezor-agent | libagent/gpg/protocol.py | keygrip_curve25519 | def keygrip_curve25519(vk):
"""Compute keygrip for Curve25519 public keys."""
# pylint: disable=line-too-long
return _compute_keygrip([
['p', util.num2bytes(0x7FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFED, size=32)], # nopep8
['a', b'\x01\xDB\x41'],
['b', b'\x01'],
['g', util.num2bytes(0x04000000000000000000000000000000000000000000000000000000000000000920ae19a1b8a086b4e01edd2c7748d14c923d4d7e6d7c61b229e9c5a27eced3d9, size=65)], # nopep8
['n', util.num2bytes(0x1000000000000000000000000000000014DEF9DEA2F79CD65812631A5CF5D3ED, size=32)], # nopep8
['q', vk.to_bytes()],
]) | python | def keygrip_curve25519(vk):
"""Compute keygrip for Curve25519 public keys."""
# pylint: disable=line-too-long
return _compute_keygrip([
['p', util.num2bytes(0x7FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFED, size=32)], # nopep8
['a', b'\x01\xDB\x41'],
['b', b'\x01'],
['g', util.num2bytes(0x04000000000000000000000000000000000000000000000000000000000000000920ae19a1b8a086b4e01edd2c7748d14c923d4d7e6d7c61b229e9c5a27eced3d9, size=65)], # nopep8
['n', util.num2bytes(0x1000000000000000000000000000000014DEF9DEA2F79CD65812631A5CF5D3ED, size=32)], # nopep8
['q', vk.to_bytes()],
]) | [
"def",
"keygrip_curve25519",
"(",
"vk",
")",
":",
"# pylint: disable=line-too-long",
"return",
"_compute_keygrip",
"(",
"[",
"[",
"'p'",
",",
"util",
".",
"num2bytes",
"(",
"0x7FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFED",
",",
"size",
"=",
"32",
")",
"]",
",",
"# nopep8",
"[",
"'a'",
",",
"b'\\x01\\xDB\\x41'",
"]",
",",
"[",
"'b'",
",",
"b'\\x01'",
"]",
",",
"[",
"'g'",
",",
"util",
".",
"num2bytes",
"(",
"0x04000000000000000000000000000000000000000000000000000000000000000920ae19a1b8a086b4e01edd2c7748d14c923d4d7e6d7c61b229e9c5a27eced3d9",
",",
"size",
"=",
"65",
")",
"]",
",",
"# nopep8",
"[",
"'n'",
",",
"util",
".",
"num2bytes",
"(",
"0x1000000000000000000000000000000014DEF9DEA2F79CD65812631A5CF5D3ED",
",",
"size",
"=",
"32",
")",
"]",
",",
"# nopep8",
"[",
"'q'",
",",
"vk",
".",
"to_bytes",
"(",
")",
"]",
",",
"]",
")"
] | Compute keygrip for Curve25519 public keys. | [
"Compute",
"keygrip",
"for",
"Curve25519",
"public",
"keys",
"."
] | 513b1259c4d7aca5f88cd958edc11828d0712f1b | https://github.com/romanz/trezor-agent/blob/513b1259c4d7aca5f88cd958edc11828d0712f1b/libagent/gpg/protocol.py#L138-L148 | train |
romanz/trezor-agent | libagent/gpg/protocol.py | get_curve_name_by_oid | def get_curve_name_by_oid(oid):
"""Return curve name matching specified OID, or raise KeyError."""
for curve_name, info in SUPPORTED_CURVES.items():
if info['oid'] == oid:
return curve_name
raise KeyError('Unknown OID: {!r}'.format(oid)) | python | def get_curve_name_by_oid(oid):
"""Return curve name matching specified OID, or raise KeyError."""
for curve_name, info in SUPPORTED_CURVES.items():
if info['oid'] == oid:
return curve_name
raise KeyError('Unknown OID: {!r}'.format(oid)) | [
"def",
"get_curve_name_by_oid",
"(",
"oid",
")",
":",
"for",
"curve_name",
",",
"info",
"in",
"SUPPORTED_CURVES",
".",
"items",
"(",
")",
":",
"if",
"info",
"[",
"'oid'",
"]",
"==",
"oid",
":",
"return",
"curve_name",
"raise",
"KeyError",
"(",
"'Unknown OID: {!r}'",
".",
"format",
"(",
"oid",
")",
")"
] | Return curve name matching specified OID, or raise KeyError. | [
"Return",
"curve",
"name",
"matching",
"specified",
"OID",
"or",
"raise",
"KeyError",
"."
] | 513b1259c4d7aca5f88cd958edc11828d0712f1b | https://github.com/romanz/trezor-agent/blob/513b1259c4d7aca5f88cd958edc11828d0712f1b/libagent/gpg/protocol.py#L180-L185 | train |
romanz/trezor-agent | libagent/gpg/protocol.py | make_signature | def make_signature(signer_func, data_to_sign, public_algo,
hashed_subpackets, unhashed_subpackets, sig_type=0):
"""Create new GPG signature."""
# pylint: disable=too-many-arguments
header = struct.pack('>BBBB',
4, # version
sig_type, # rfc4880 (section-5.2.1)
public_algo,
8) # hash_alg (SHA256)
hashed = subpackets(*hashed_subpackets)
unhashed = subpackets(*unhashed_subpackets)
tail = b'\x04\xff' + struct.pack('>L', len(header) + len(hashed))
data_to_hash = data_to_sign + header + hashed + tail
log.debug('hashing %d bytes', len(data_to_hash))
digest = hashlib.sha256(data_to_hash).digest()
log.debug('signing digest: %s', util.hexlify(digest))
params = signer_func(digest=digest)
sig = b''.join(mpi(p) for p in params)
return bytes(header + hashed + unhashed +
digest[:2] + # used for decoder's sanity check
sig) | python | def make_signature(signer_func, data_to_sign, public_algo,
hashed_subpackets, unhashed_subpackets, sig_type=0):
"""Create new GPG signature."""
# pylint: disable=too-many-arguments
header = struct.pack('>BBBB',
4, # version
sig_type, # rfc4880 (section-5.2.1)
public_algo,
8) # hash_alg (SHA256)
hashed = subpackets(*hashed_subpackets)
unhashed = subpackets(*unhashed_subpackets)
tail = b'\x04\xff' + struct.pack('>L', len(header) + len(hashed))
data_to_hash = data_to_sign + header + hashed + tail
log.debug('hashing %d bytes', len(data_to_hash))
digest = hashlib.sha256(data_to_hash).digest()
log.debug('signing digest: %s', util.hexlify(digest))
params = signer_func(digest=digest)
sig = b''.join(mpi(p) for p in params)
return bytes(header + hashed + unhashed +
digest[:2] + # used for decoder's sanity check
sig) | [
"def",
"make_signature",
"(",
"signer_func",
",",
"data_to_sign",
",",
"public_algo",
",",
"hashed_subpackets",
",",
"unhashed_subpackets",
",",
"sig_type",
"=",
"0",
")",
":",
"# pylint: disable=too-many-arguments",
"header",
"=",
"struct",
".",
"pack",
"(",
"'>BBBB'",
",",
"4",
",",
"# version",
"sig_type",
",",
"# rfc4880 (section-5.2.1)",
"public_algo",
",",
"8",
")",
"# hash_alg (SHA256)",
"hashed",
"=",
"subpackets",
"(",
"*",
"hashed_subpackets",
")",
"unhashed",
"=",
"subpackets",
"(",
"*",
"unhashed_subpackets",
")",
"tail",
"=",
"b'\\x04\\xff'",
"+",
"struct",
".",
"pack",
"(",
"'>L'",
",",
"len",
"(",
"header",
")",
"+",
"len",
"(",
"hashed",
")",
")",
"data_to_hash",
"=",
"data_to_sign",
"+",
"header",
"+",
"hashed",
"+",
"tail",
"log",
".",
"debug",
"(",
"'hashing %d bytes'",
",",
"len",
"(",
"data_to_hash",
")",
")",
"digest",
"=",
"hashlib",
".",
"sha256",
"(",
"data_to_hash",
")",
".",
"digest",
"(",
")",
"log",
".",
"debug",
"(",
"'signing digest: %s'",
",",
"util",
".",
"hexlify",
"(",
"digest",
")",
")",
"params",
"=",
"signer_func",
"(",
"digest",
"=",
"digest",
")",
"sig",
"=",
"b''",
".",
"join",
"(",
"mpi",
"(",
"p",
")",
"for",
"p",
"in",
"params",
")",
"return",
"bytes",
"(",
"header",
"+",
"hashed",
"+",
"unhashed",
"+",
"digest",
"[",
":",
"2",
"]",
"+",
"# used for decoder's sanity check",
"sig",
")"
] | Create new GPG signature. | [
"Create",
"new",
"GPG",
"signature",
"."
] | 513b1259c4d7aca5f88cd958edc11828d0712f1b | https://github.com/romanz/trezor-agent/blob/513b1259c4d7aca5f88cd958edc11828d0712f1b/libagent/gpg/protocol.py#L254-L276 | train |
romanz/trezor-agent | libagent/gpg/protocol.py | PublicKey.data | def data(self):
"""Data for packet creation."""
header = struct.pack('>BLB',
4, # version
self.created, # creation
self.algo_id) # public key algorithm ID
oid = util.prefix_len('>B', self.curve_info['oid'])
blob = self.curve_info['serialize'](self.verifying_key)
return header + oid + blob + self.ecdh_packet | python | def data(self):
"""Data for packet creation."""
header = struct.pack('>BLB',
4, # version
self.created, # creation
self.algo_id) # public key algorithm ID
oid = util.prefix_len('>B', self.curve_info['oid'])
blob = self.curve_info['serialize'](self.verifying_key)
return header + oid + blob + self.ecdh_packet | [
"def",
"data",
"(",
"self",
")",
":",
"header",
"=",
"struct",
".",
"pack",
"(",
"'>BLB'",
",",
"4",
",",
"# version",
"self",
".",
"created",
",",
"# creation",
"self",
".",
"algo_id",
")",
"# public key algorithm ID",
"oid",
"=",
"util",
".",
"prefix_len",
"(",
"'>B'",
",",
"self",
".",
"curve_info",
"[",
"'oid'",
"]",
")",
"blob",
"=",
"self",
".",
"curve_info",
"[",
"'serialize'",
"]",
"(",
"self",
".",
"verifying_key",
")",
"return",
"header",
"+",
"oid",
"+",
"blob",
"+",
"self",
".",
"ecdh_packet"
] | Data for packet creation. | [
"Data",
"for",
"packet",
"creation",
"."
] | 513b1259c4d7aca5f88cd958edc11828d0712f1b | https://github.com/romanz/trezor-agent/blob/513b1259c4d7aca5f88cd958edc11828d0712f1b/libagent/gpg/protocol.py#L209-L217 | train |
romanz/trezor-agent | libagent/gpg/encode.py | create_subkey | def create_subkey(primary_bytes, subkey, signer_func, secret_bytes=b''):
"""Export new subkey to GPG primary key."""
subkey_packet = protocol.packet(tag=(7 if secret_bytes else 14),
blob=(subkey.data() + secret_bytes))
packets = list(decode.parse_packets(io.BytesIO(primary_bytes)))
primary, user_id, signature = packets[:3]
data_to_sign = primary['_to_hash'] + subkey.data_to_hash()
if subkey.ecdh:
embedded_sig = None
else:
# Primary Key Binding Signature
hashed_subpackets = [
protocol.subpacket_time(subkey.created)] # signature time
unhashed_subpackets = [
protocol.subpacket(16, subkey.key_id())] # issuer key id
embedded_sig = protocol.make_signature(
signer_func=signer_func,
data_to_sign=data_to_sign,
public_algo=subkey.algo_id,
sig_type=0x19,
hashed_subpackets=hashed_subpackets,
unhashed_subpackets=unhashed_subpackets)
# Subkey Binding Signature
# Key flags: https://tools.ietf.org/html/rfc4880#section-5.2.3.21
# (certify & sign) (encrypt)
flags = (2) if (not subkey.ecdh) else (4 | 8)
hashed_subpackets = [
protocol.subpacket_time(subkey.created), # signature time
protocol.subpacket_byte(0x1B, flags)]
unhashed_subpackets = []
unhashed_subpackets.append(protocol.subpacket(16, primary['key_id']))
if embedded_sig is not None:
unhashed_subpackets.append(protocol.subpacket(32, embedded_sig))
unhashed_subpackets.append(protocol.CUSTOM_SUBPACKET)
if not decode.has_custom_subpacket(signature):
signer_func = keyring.create_agent_signer(user_id['value'])
signature = protocol.make_signature(
signer_func=signer_func,
data_to_sign=data_to_sign,
public_algo=primary['algo'],
sig_type=0x18,
hashed_subpackets=hashed_subpackets,
unhashed_subpackets=unhashed_subpackets)
sign_packet = protocol.packet(tag=2, blob=signature)
return primary_bytes + subkey_packet + sign_packet | python | def create_subkey(primary_bytes, subkey, signer_func, secret_bytes=b''):
"""Export new subkey to GPG primary key."""
subkey_packet = protocol.packet(tag=(7 if secret_bytes else 14),
blob=(subkey.data() + secret_bytes))
packets = list(decode.parse_packets(io.BytesIO(primary_bytes)))
primary, user_id, signature = packets[:3]
data_to_sign = primary['_to_hash'] + subkey.data_to_hash()
if subkey.ecdh:
embedded_sig = None
else:
# Primary Key Binding Signature
hashed_subpackets = [
protocol.subpacket_time(subkey.created)] # signature time
unhashed_subpackets = [
protocol.subpacket(16, subkey.key_id())] # issuer key id
embedded_sig = protocol.make_signature(
signer_func=signer_func,
data_to_sign=data_to_sign,
public_algo=subkey.algo_id,
sig_type=0x19,
hashed_subpackets=hashed_subpackets,
unhashed_subpackets=unhashed_subpackets)
# Subkey Binding Signature
# Key flags: https://tools.ietf.org/html/rfc4880#section-5.2.3.21
# (certify & sign) (encrypt)
flags = (2) if (not subkey.ecdh) else (4 | 8)
hashed_subpackets = [
protocol.subpacket_time(subkey.created), # signature time
protocol.subpacket_byte(0x1B, flags)]
unhashed_subpackets = []
unhashed_subpackets.append(protocol.subpacket(16, primary['key_id']))
if embedded_sig is not None:
unhashed_subpackets.append(protocol.subpacket(32, embedded_sig))
unhashed_subpackets.append(protocol.CUSTOM_SUBPACKET)
if not decode.has_custom_subpacket(signature):
signer_func = keyring.create_agent_signer(user_id['value'])
signature = protocol.make_signature(
signer_func=signer_func,
data_to_sign=data_to_sign,
public_algo=primary['algo'],
sig_type=0x18,
hashed_subpackets=hashed_subpackets,
unhashed_subpackets=unhashed_subpackets)
sign_packet = protocol.packet(tag=2, blob=signature)
return primary_bytes + subkey_packet + sign_packet | [
"def",
"create_subkey",
"(",
"primary_bytes",
",",
"subkey",
",",
"signer_func",
",",
"secret_bytes",
"=",
"b''",
")",
":",
"subkey_packet",
"=",
"protocol",
".",
"packet",
"(",
"tag",
"=",
"(",
"7",
"if",
"secret_bytes",
"else",
"14",
")",
",",
"blob",
"=",
"(",
"subkey",
".",
"data",
"(",
")",
"+",
"secret_bytes",
")",
")",
"packets",
"=",
"list",
"(",
"decode",
".",
"parse_packets",
"(",
"io",
".",
"BytesIO",
"(",
"primary_bytes",
")",
")",
")",
"primary",
",",
"user_id",
",",
"signature",
"=",
"packets",
"[",
":",
"3",
"]",
"data_to_sign",
"=",
"primary",
"[",
"'_to_hash'",
"]",
"+",
"subkey",
".",
"data_to_hash",
"(",
")",
"if",
"subkey",
".",
"ecdh",
":",
"embedded_sig",
"=",
"None",
"else",
":",
"# Primary Key Binding Signature",
"hashed_subpackets",
"=",
"[",
"protocol",
".",
"subpacket_time",
"(",
"subkey",
".",
"created",
")",
"]",
"# signature time",
"unhashed_subpackets",
"=",
"[",
"protocol",
".",
"subpacket",
"(",
"16",
",",
"subkey",
".",
"key_id",
"(",
")",
")",
"]",
"# issuer key id",
"embedded_sig",
"=",
"protocol",
".",
"make_signature",
"(",
"signer_func",
"=",
"signer_func",
",",
"data_to_sign",
"=",
"data_to_sign",
",",
"public_algo",
"=",
"subkey",
".",
"algo_id",
",",
"sig_type",
"=",
"0x19",
",",
"hashed_subpackets",
"=",
"hashed_subpackets",
",",
"unhashed_subpackets",
"=",
"unhashed_subpackets",
")",
"# Subkey Binding Signature",
"# Key flags: https://tools.ietf.org/html/rfc4880#section-5.2.3.21",
"# (certify & sign) (encrypt)",
"flags",
"=",
"(",
"2",
")",
"if",
"(",
"not",
"subkey",
".",
"ecdh",
")",
"else",
"(",
"4",
"|",
"8",
")",
"hashed_subpackets",
"=",
"[",
"protocol",
".",
"subpacket_time",
"(",
"subkey",
".",
"created",
")",
",",
"# signature time",
"protocol",
".",
"subpacket_byte",
"(",
"0x1B",
",",
"flags",
")",
"]",
"unhashed_subpackets",
"=",
"[",
"]",
"unhashed_subpackets",
".",
"append",
"(",
"protocol",
".",
"subpacket",
"(",
"16",
",",
"primary",
"[",
"'key_id'",
"]",
")",
")",
"if",
"embedded_sig",
"is",
"not",
"None",
":",
"unhashed_subpackets",
".",
"append",
"(",
"protocol",
".",
"subpacket",
"(",
"32",
",",
"embedded_sig",
")",
")",
"unhashed_subpackets",
".",
"append",
"(",
"protocol",
".",
"CUSTOM_SUBPACKET",
")",
"if",
"not",
"decode",
".",
"has_custom_subpacket",
"(",
"signature",
")",
":",
"signer_func",
"=",
"keyring",
".",
"create_agent_signer",
"(",
"user_id",
"[",
"'value'",
"]",
")",
"signature",
"=",
"protocol",
".",
"make_signature",
"(",
"signer_func",
"=",
"signer_func",
",",
"data_to_sign",
"=",
"data_to_sign",
",",
"public_algo",
"=",
"primary",
"[",
"'algo'",
"]",
",",
"sig_type",
"=",
"0x18",
",",
"hashed_subpackets",
"=",
"hashed_subpackets",
",",
"unhashed_subpackets",
"=",
"unhashed_subpackets",
")",
"sign_packet",
"=",
"protocol",
".",
"packet",
"(",
"tag",
"=",
"2",
",",
"blob",
"=",
"signature",
")",
"return",
"primary_bytes",
"+",
"subkey_packet",
"+",
"sign_packet"
] | Export new subkey to GPG primary key. | [
"Export",
"new",
"subkey",
"to",
"GPG",
"primary",
"key",
"."
] | 513b1259c4d7aca5f88cd958edc11828d0712f1b | https://github.com/romanz/trezor-agent/blob/513b1259c4d7aca5f88cd958edc11828d0712f1b/libagent/gpg/encode.py#L51-L103 | train |
romanz/trezor-agent | libagent/gpg/__init__.py | verify_gpg_version | def verify_gpg_version():
"""Make sure that the installed GnuPG is not too old."""
existing_gpg = keyring.gpg_version().decode('ascii')
required_gpg = '>=2.1.11'
msg = 'Existing GnuPG has version "{}" ({} required)'.format(existing_gpg,
required_gpg)
if not semver.match(existing_gpg, required_gpg):
log.error(msg) | python | def verify_gpg_version():
"""Make sure that the installed GnuPG is not too old."""
existing_gpg = keyring.gpg_version().decode('ascii')
required_gpg = '>=2.1.11'
msg = 'Existing GnuPG has version "{}" ({} required)'.format(existing_gpg,
required_gpg)
if not semver.match(existing_gpg, required_gpg):
log.error(msg) | [
"def",
"verify_gpg_version",
"(",
")",
":",
"existing_gpg",
"=",
"keyring",
".",
"gpg_version",
"(",
")",
".",
"decode",
"(",
"'ascii'",
")",
"required_gpg",
"=",
"'>=2.1.11'",
"msg",
"=",
"'Existing GnuPG has version \"{}\" ({} required)'",
".",
"format",
"(",
"existing_gpg",
",",
"required_gpg",
")",
"if",
"not",
"semver",
".",
"match",
"(",
"existing_gpg",
",",
"required_gpg",
")",
":",
"log",
".",
"error",
"(",
"msg",
")"
] | Make sure that the installed GnuPG is not too old. | [
"Make",
"sure",
"that",
"the",
"installed",
"GnuPG",
"is",
"not",
"too",
"old",
"."
] | 513b1259c4d7aca5f88cd958edc11828d0712f1b | https://github.com/romanz/trezor-agent/blob/513b1259c4d7aca5f88cd958edc11828d0712f1b/libagent/gpg/__init__.py#L83-L90 | train |
romanz/trezor-agent | libagent/gpg/__init__.py | check_output | def check_output(args):
"""Runs command and returns the output as string."""
log.debug('run: %s', args)
out = subprocess.check_output(args=args).decode('utf-8')
log.debug('out: %r', out)
return out | python | def check_output(args):
"""Runs command and returns the output as string."""
log.debug('run: %s', args)
out = subprocess.check_output(args=args).decode('utf-8')
log.debug('out: %r', out)
return out | [
"def",
"check_output",
"(",
"args",
")",
":",
"log",
".",
"debug",
"(",
"'run: %s'",
",",
"args",
")",
"out",
"=",
"subprocess",
".",
"check_output",
"(",
"args",
"=",
"args",
")",
".",
"decode",
"(",
"'utf-8'",
")",
"log",
".",
"debug",
"(",
"'out: %r'",
",",
"out",
")",
"return",
"out"
] | Runs command and returns the output as string. | [
"Runs",
"command",
"and",
"returns",
"the",
"output",
"as",
"string",
"."
] | 513b1259c4d7aca5f88cd958edc11828d0712f1b | https://github.com/romanz/trezor-agent/blob/513b1259c4d7aca5f88cd958edc11828d0712f1b/libagent/gpg/__init__.py#L93-L98 | train |
romanz/trezor-agent | libagent/gpg/__init__.py | check_call | def check_call(args, stdin=None, env=None):
"""Runs command and verifies its success."""
log.debug('run: %s%s', args, ' {}'.format(env) if env else '')
subprocess.check_call(args=args, stdin=stdin, env=env) | python | def check_call(args, stdin=None, env=None):
"""Runs command and verifies its success."""
log.debug('run: %s%s', args, ' {}'.format(env) if env else '')
subprocess.check_call(args=args, stdin=stdin, env=env) | [
"def",
"check_call",
"(",
"args",
",",
"stdin",
"=",
"None",
",",
"env",
"=",
"None",
")",
":",
"log",
".",
"debug",
"(",
"'run: %s%s'",
",",
"args",
",",
"' {}'",
".",
"format",
"(",
"env",
")",
"if",
"env",
"else",
"''",
")",
"subprocess",
".",
"check_call",
"(",
"args",
"=",
"args",
",",
"stdin",
"=",
"stdin",
",",
"env",
"=",
"env",
")"
] | Runs command and verifies its success. | [
"Runs",
"command",
"and",
"verifies",
"its",
"success",
"."
] | 513b1259c4d7aca5f88cd958edc11828d0712f1b | https://github.com/romanz/trezor-agent/blob/513b1259c4d7aca5f88cd958edc11828d0712f1b/libagent/gpg/__init__.py#L101-L104 | train |
romanz/trezor-agent | libagent/gpg/__init__.py | write_file | def write_file(path, data):
"""Writes data to specified path."""
with open(path, 'w') as f:
log.debug('setting %s contents:\n%s', path, data)
f.write(data)
return f | python | def write_file(path, data):
"""Writes data to specified path."""
with open(path, 'w') as f:
log.debug('setting %s contents:\n%s', path, data)
f.write(data)
return f | [
"def",
"write_file",
"(",
"path",
",",
"data",
")",
":",
"with",
"open",
"(",
"path",
",",
"'w'",
")",
"as",
"f",
":",
"log",
".",
"debug",
"(",
"'setting %s contents:\\n%s'",
",",
"path",
",",
"data",
")",
"f",
".",
"write",
"(",
"data",
")",
"return",
"f"
] | Writes data to specified path. | [
"Writes",
"data",
"to",
"specified",
"path",
"."
] | 513b1259c4d7aca5f88cd958edc11828d0712f1b | https://github.com/romanz/trezor-agent/blob/513b1259c4d7aca5f88cd958edc11828d0712f1b/libagent/gpg/__init__.py#L107-L112 | train |
romanz/trezor-agent | libagent/gpg/__init__.py | run_agent | def run_agent(device_type):
"""Run a simple GPG-agent server."""
p = argparse.ArgumentParser()
p.add_argument('--homedir', default=os.environ.get('GNUPGHOME'))
p.add_argument('-v', '--verbose', default=0, action='count')
p.add_argument('--server', default=False, action='store_true',
help='Use stdin/stdout for communication with GPG.')
p.add_argument('--pin-entry-binary', type=str, default='pinentry',
help='Path to PIN entry UI helper.')
p.add_argument('--passphrase-entry-binary', type=str, default='pinentry',
help='Path to passphrase entry UI helper.')
p.add_argument('--cache-expiry-seconds', type=float, default=float('inf'),
help='Expire passphrase from cache after this duration.')
args, _ = p.parse_known_args()
assert args.homedir
log_file = os.path.join(args.homedir, 'gpg-agent.log')
util.setup_logging(verbosity=args.verbose, filename=log_file)
log.debug('sys.argv: %s', sys.argv)
log.debug('os.environ: %s', os.environ)
log.debug('pid: %d, parent pid: %d', os.getpid(), os.getppid())
try:
env = {'GNUPGHOME': args.homedir, 'PATH': os.environ['PATH']}
pubkey_bytes = keyring.export_public_keys(env=env)
device_type.ui = device.ui.UI(device_type=device_type,
config=vars(args))
device_type.ui.cached_passphrase_ack = util.ExpiringCache(
seconds=float(args.cache_expiry_seconds))
handler = agent.Handler(device=device_type(),
pubkey_bytes=pubkey_bytes)
sock_server = _server_from_assuan_fd(os.environ)
if sock_server is None:
sock_server = _server_from_sock_path(env)
with sock_server as sock:
for conn in agent.yield_connections(sock):
with contextlib.closing(conn):
try:
handler.handle(conn)
except agent.AgentStop:
log.info('stopping gpg-agent')
return
except IOError as e:
log.info('connection closed: %s', e)
return
except Exception as e: # pylint: disable=broad-except
log.exception('handler failed: %s', e)
except Exception as e: # pylint: disable=broad-except
log.exception('gpg-agent failed: %s', e) | python | def run_agent(device_type):
"""Run a simple GPG-agent server."""
p = argparse.ArgumentParser()
p.add_argument('--homedir', default=os.environ.get('GNUPGHOME'))
p.add_argument('-v', '--verbose', default=0, action='count')
p.add_argument('--server', default=False, action='store_true',
help='Use stdin/stdout for communication with GPG.')
p.add_argument('--pin-entry-binary', type=str, default='pinentry',
help='Path to PIN entry UI helper.')
p.add_argument('--passphrase-entry-binary', type=str, default='pinentry',
help='Path to passphrase entry UI helper.')
p.add_argument('--cache-expiry-seconds', type=float, default=float('inf'),
help='Expire passphrase from cache after this duration.')
args, _ = p.parse_known_args()
assert args.homedir
log_file = os.path.join(args.homedir, 'gpg-agent.log')
util.setup_logging(verbosity=args.verbose, filename=log_file)
log.debug('sys.argv: %s', sys.argv)
log.debug('os.environ: %s', os.environ)
log.debug('pid: %d, parent pid: %d', os.getpid(), os.getppid())
try:
env = {'GNUPGHOME': args.homedir, 'PATH': os.environ['PATH']}
pubkey_bytes = keyring.export_public_keys(env=env)
device_type.ui = device.ui.UI(device_type=device_type,
config=vars(args))
device_type.ui.cached_passphrase_ack = util.ExpiringCache(
seconds=float(args.cache_expiry_seconds))
handler = agent.Handler(device=device_type(),
pubkey_bytes=pubkey_bytes)
sock_server = _server_from_assuan_fd(os.environ)
if sock_server is None:
sock_server = _server_from_sock_path(env)
with sock_server as sock:
for conn in agent.yield_connections(sock):
with contextlib.closing(conn):
try:
handler.handle(conn)
except agent.AgentStop:
log.info('stopping gpg-agent')
return
except IOError as e:
log.info('connection closed: %s', e)
return
except Exception as e: # pylint: disable=broad-except
log.exception('handler failed: %s', e)
except Exception as e: # pylint: disable=broad-except
log.exception('gpg-agent failed: %s', e) | [
"def",
"run_agent",
"(",
"device_type",
")",
":",
"p",
"=",
"argparse",
".",
"ArgumentParser",
"(",
")",
"p",
".",
"add_argument",
"(",
"'--homedir'",
",",
"default",
"=",
"os",
".",
"environ",
".",
"get",
"(",
"'GNUPGHOME'",
")",
")",
"p",
".",
"add_argument",
"(",
"'-v'",
",",
"'--verbose'",
",",
"default",
"=",
"0",
",",
"action",
"=",
"'count'",
")",
"p",
".",
"add_argument",
"(",
"'--server'",
",",
"default",
"=",
"False",
",",
"action",
"=",
"'store_true'",
",",
"help",
"=",
"'Use stdin/stdout for communication with GPG.'",
")",
"p",
".",
"add_argument",
"(",
"'--pin-entry-binary'",
",",
"type",
"=",
"str",
",",
"default",
"=",
"'pinentry'",
",",
"help",
"=",
"'Path to PIN entry UI helper.'",
")",
"p",
".",
"add_argument",
"(",
"'--passphrase-entry-binary'",
",",
"type",
"=",
"str",
",",
"default",
"=",
"'pinentry'",
",",
"help",
"=",
"'Path to passphrase entry UI helper.'",
")",
"p",
".",
"add_argument",
"(",
"'--cache-expiry-seconds'",
",",
"type",
"=",
"float",
",",
"default",
"=",
"float",
"(",
"'inf'",
")",
",",
"help",
"=",
"'Expire passphrase from cache after this duration.'",
")",
"args",
",",
"_",
"=",
"p",
".",
"parse_known_args",
"(",
")",
"assert",
"args",
".",
"homedir",
"log_file",
"=",
"os",
".",
"path",
".",
"join",
"(",
"args",
".",
"homedir",
",",
"'gpg-agent.log'",
")",
"util",
".",
"setup_logging",
"(",
"verbosity",
"=",
"args",
".",
"verbose",
",",
"filename",
"=",
"log_file",
")",
"log",
".",
"debug",
"(",
"'sys.argv: %s'",
",",
"sys",
".",
"argv",
")",
"log",
".",
"debug",
"(",
"'os.environ: %s'",
",",
"os",
".",
"environ",
")",
"log",
".",
"debug",
"(",
"'pid: %d, parent pid: %d'",
",",
"os",
".",
"getpid",
"(",
")",
",",
"os",
".",
"getppid",
"(",
")",
")",
"try",
":",
"env",
"=",
"{",
"'GNUPGHOME'",
":",
"args",
".",
"homedir",
",",
"'PATH'",
":",
"os",
".",
"environ",
"[",
"'PATH'",
"]",
"}",
"pubkey_bytes",
"=",
"keyring",
".",
"export_public_keys",
"(",
"env",
"=",
"env",
")",
"device_type",
".",
"ui",
"=",
"device",
".",
"ui",
".",
"UI",
"(",
"device_type",
"=",
"device_type",
",",
"config",
"=",
"vars",
"(",
"args",
")",
")",
"device_type",
".",
"ui",
".",
"cached_passphrase_ack",
"=",
"util",
".",
"ExpiringCache",
"(",
"seconds",
"=",
"float",
"(",
"args",
".",
"cache_expiry_seconds",
")",
")",
"handler",
"=",
"agent",
".",
"Handler",
"(",
"device",
"=",
"device_type",
"(",
")",
",",
"pubkey_bytes",
"=",
"pubkey_bytes",
")",
"sock_server",
"=",
"_server_from_assuan_fd",
"(",
"os",
".",
"environ",
")",
"if",
"sock_server",
"is",
"None",
":",
"sock_server",
"=",
"_server_from_sock_path",
"(",
"env",
")",
"with",
"sock_server",
"as",
"sock",
":",
"for",
"conn",
"in",
"agent",
".",
"yield_connections",
"(",
"sock",
")",
":",
"with",
"contextlib",
".",
"closing",
"(",
"conn",
")",
":",
"try",
":",
"handler",
".",
"handle",
"(",
"conn",
")",
"except",
"agent",
".",
"AgentStop",
":",
"log",
".",
"info",
"(",
"'stopping gpg-agent'",
")",
"return",
"except",
"IOError",
"as",
"e",
":",
"log",
".",
"info",
"(",
"'connection closed: %s'",
",",
"e",
")",
"return",
"except",
"Exception",
"as",
"e",
":",
"# pylint: disable=broad-except",
"log",
".",
"exception",
"(",
"'handler failed: %s'",
",",
"e",
")",
"except",
"Exception",
"as",
"e",
":",
"# pylint: disable=broad-except",
"log",
".",
"exception",
"(",
"'gpg-agent failed: %s'",
",",
"e",
")"
] | Run a simple GPG-agent server. | [
"Run",
"a",
"simple",
"GPG",
"-",
"agent",
"server",
"."
] | 513b1259c4d7aca5f88cd958edc11828d0712f1b | https://github.com/romanz/trezor-agent/blob/513b1259c4d7aca5f88cd958edc11828d0712f1b/libagent/gpg/__init__.py#L222-L276 | train |
romanz/trezor-agent | libagent/device/trezor_defs.py | find_device | def find_device():
"""Selects a transport based on `TREZOR_PATH` environment variable.
If unset, picks first connected device.
"""
try:
return get_transport(os.environ.get("TREZOR_PATH"))
except Exception as e: # pylint: disable=broad-except
log.debug("Failed to find a Trezor device: %s", e) | python | def find_device():
"""Selects a transport based on `TREZOR_PATH` environment variable.
If unset, picks first connected device.
"""
try:
return get_transport(os.environ.get("TREZOR_PATH"))
except Exception as e: # pylint: disable=broad-except
log.debug("Failed to find a Trezor device: %s", e) | [
"def",
"find_device",
"(",
")",
":",
"try",
":",
"return",
"get_transport",
"(",
"os",
".",
"environ",
".",
"get",
"(",
"\"TREZOR_PATH\"",
")",
")",
"except",
"Exception",
"as",
"e",
":",
"# pylint: disable=broad-except",
"log",
".",
"debug",
"(",
"\"Failed to find a Trezor device: %s\"",
",",
"e",
")"
] | Selects a transport based on `TREZOR_PATH` environment variable.
If unset, picks first connected device. | [
"Selects",
"a",
"transport",
"based",
"on",
"TREZOR_PATH",
"environment",
"variable",
"."
] | 513b1259c4d7aca5f88cd958edc11828d0712f1b | https://github.com/romanz/trezor-agent/blob/513b1259c4d7aca5f88cd958edc11828d0712f1b/libagent/device/trezor_defs.py#L22-L30 | train |
romanz/trezor-agent | libagent/device/ledger.py | _convert_public_key | def _convert_public_key(ecdsa_curve_name, result):
"""Convert Ledger reply into PublicKey object."""
if ecdsa_curve_name == 'nist256p1':
if (result[64] & 1) != 0:
result = bytearray([0x03]) + result[1:33]
else:
result = bytearray([0x02]) + result[1:33]
else:
result = result[1:]
keyX = bytearray(result[0:32])
keyY = bytearray(result[32:][::-1])
if (keyX[31] & 1) != 0:
keyY[31] |= 0x80
result = b'\x00' + bytes(keyY)
return bytes(result) | python | def _convert_public_key(ecdsa_curve_name, result):
"""Convert Ledger reply into PublicKey object."""
if ecdsa_curve_name == 'nist256p1':
if (result[64] & 1) != 0:
result = bytearray([0x03]) + result[1:33]
else:
result = bytearray([0x02]) + result[1:33]
else:
result = result[1:]
keyX = bytearray(result[0:32])
keyY = bytearray(result[32:][::-1])
if (keyX[31] & 1) != 0:
keyY[31] |= 0x80
result = b'\x00' + bytes(keyY)
return bytes(result) | [
"def",
"_convert_public_key",
"(",
"ecdsa_curve_name",
",",
"result",
")",
":",
"if",
"ecdsa_curve_name",
"==",
"'nist256p1'",
":",
"if",
"(",
"result",
"[",
"64",
"]",
"&",
"1",
")",
"!=",
"0",
":",
"result",
"=",
"bytearray",
"(",
"[",
"0x03",
"]",
")",
"+",
"result",
"[",
"1",
":",
"33",
"]",
"else",
":",
"result",
"=",
"bytearray",
"(",
"[",
"0x02",
"]",
")",
"+",
"result",
"[",
"1",
":",
"33",
"]",
"else",
":",
"result",
"=",
"result",
"[",
"1",
":",
"]",
"keyX",
"=",
"bytearray",
"(",
"result",
"[",
"0",
":",
"32",
"]",
")",
"keyY",
"=",
"bytearray",
"(",
"result",
"[",
"32",
":",
"]",
"[",
":",
":",
"-",
"1",
"]",
")",
"if",
"(",
"keyX",
"[",
"31",
"]",
"&",
"1",
")",
"!=",
"0",
":",
"keyY",
"[",
"31",
"]",
"|=",
"0x80",
"result",
"=",
"b'\\x00'",
"+",
"bytes",
"(",
"keyY",
")",
"return",
"bytes",
"(",
"result",
")"
] | Convert Ledger reply into PublicKey object. | [
"Convert",
"Ledger",
"reply",
"into",
"PublicKey",
"object",
"."
] | 513b1259c4d7aca5f88cd958edc11828d0712f1b | https://github.com/romanz/trezor-agent/blob/513b1259c4d7aca5f88cd958edc11828d0712f1b/libagent/device/ledger.py#L19-L33 | train |
romanz/trezor-agent | libagent/device/ledger.py | LedgerNanoS.connect | def connect(self):
"""Enumerate and connect to the first USB HID interface."""
try:
return comm.getDongle()
except comm.CommException as e:
raise interface.NotFoundError(
'{} not connected: "{}"'.format(self, e)) | python | def connect(self):
"""Enumerate and connect to the first USB HID interface."""
try:
return comm.getDongle()
except comm.CommException as e:
raise interface.NotFoundError(
'{} not connected: "{}"'.format(self, e)) | [
"def",
"connect",
"(",
"self",
")",
":",
"try",
":",
"return",
"comm",
".",
"getDongle",
"(",
")",
"except",
"comm",
".",
"CommException",
"as",
"e",
":",
"raise",
"interface",
".",
"NotFoundError",
"(",
"'{} not connected: \"{}\"'",
".",
"format",
"(",
"self",
",",
"e",
")",
")"
] | Enumerate and connect to the first USB HID interface. | [
"Enumerate",
"and",
"connect",
"to",
"the",
"first",
"USB",
"HID",
"interface",
"."
] | 513b1259c4d7aca5f88cd958edc11828d0712f1b | https://github.com/romanz/trezor-agent/blob/513b1259c4d7aca5f88cd958edc11828d0712f1b/libagent/device/ledger.py#L44-L50 | train |
romanz/trezor-agent | libagent/device/ledger.py | LedgerNanoS.pubkey | def pubkey(self, identity, ecdh=False):
"""Get PublicKey object for specified BIP32 address and elliptic curve."""
curve_name = identity.get_curve_name(ecdh)
path = _expand_path(identity.get_bip32_address(ecdh))
if curve_name == 'nist256p1':
p2 = '01'
else:
p2 = '02'
apdu = '800200' + p2
apdu = binascii.unhexlify(apdu)
apdu += bytearray([len(path) + 1, len(path) // 4])
apdu += path
log.debug('apdu: %r', apdu)
result = bytearray(self.conn.exchange(bytes(apdu)))
log.debug('result: %r', result)
return _convert_public_key(curve_name, result[1:]) | python | def pubkey(self, identity, ecdh=False):
"""Get PublicKey object for specified BIP32 address and elliptic curve."""
curve_name = identity.get_curve_name(ecdh)
path = _expand_path(identity.get_bip32_address(ecdh))
if curve_name == 'nist256p1':
p2 = '01'
else:
p2 = '02'
apdu = '800200' + p2
apdu = binascii.unhexlify(apdu)
apdu += bytearray([len(path) + 1, len(path) // 4])
apdu += path
log.debug('apdu: %r', apdu)
result = bytearray(self.conn.exchange(bytes(apdu)))
log.debug('result: %r', result)
return _convert_public_key(curve_name, result[1:]) | [
"def",
"pubkey",
"(",
"self",
",",
"identity",
",",
"ecdh",
"=",
"False",
")",
":",
"curve_name",
"=",
"identity",
".",
"get_curve_name",
"(",
"ecdh",
")",
"path",
"=",
"_expand_path",
"(",
"identity",
".",
"get_bip32_address",
"(",
"ecdh",
")",
")",
"if",
"curve_name",
"==",
"'nist256p1'",
":",
"p2",
"=",
"'01'",
"else",
":",
"p2",
"=",
"'02'",
"apdu",
"=",
"'800200'",
"+",
"p2",
"apdu",
"=",
"binascii",
".",
"unhexlify",
"(",
"apdu",
")",
"apdu",
"+=",
"bytearray",
"(",
"[",
"len",
"(",
"path",
")",
"+",
"1",
",",
"len",
"(",
"path",
")",
"//",
"4",
"]",
")",
"apdu",
"+=",
"path",
"log",
".",
"debug",
"(",
"'apdu: %r'",
",",
"apdu",
")",
"result",
"=",
"bytearray",
"(",
"self",
".",
"conn",
".",
"exchange",
"(",
"bytes",
"(",
"apdu",
")",
")",
")",
"log",
".",
"debug",
"(",
"'result: %r'",
",",
"result",
")",
"return",
"_convert_public_key",
"(",
"curve_name",
",",
"result",
"[",
"1",
":",
"]",
")"
] | Get PublicKey object for specified BIP32 address and elliptic curve. | [
"Get",
"PublicKey",
"object",
"for",
"specified",
"BIP32",
"address",
"and",
"elliptic",
"curve",
"."
] | 513b1259c4d7aca5f88cd958edc11828d0712f1b | https://github.com/romanz/trezor-agent/blob/513b1259c4d7aca5f88cd958edc11828d0712f1b/libagent/device/ledger.py#L52-L67 | train |
inonit/drf-haystack | ez_setup.py | download_setuptools | def download_setuptools(version=DEFAULT_VERSION, download_base=DEFAULT_URL,
to_dir=os.curdir, delay=15):
"""Download distribute from a specified location and return its filename
`version` should be a valid distribute version number that is available
as an egg for download under the `download_base` URL (which should end
with a '/'). `to_dir` is the directory where the egg will be downloaded.
`delay` is the number of seconds to pause before an actual download
attempt.
"""
# making sure we use the absolute path
to_dir = os.path.abspath(to_dir)
try:
from urllib.request import urlopen
except ImportError:
from urllib2 import urlopen
tgz_name = "distribute-%s.tar.gz" % version
url = download_base + tgz_name
saveto = os.path.join(to_dir, tgz_name)
src = dst = None
if not os.path.exists(saveto): # Avoid repeated downloads
try:
log.warn("Downloading %s", url)
src = urlopen(url)
# Read/write all in one block, so we don't create a corrupt file
# if the download is interrupted.
data = src.read()
dst = open(saveto, "wb")
dst.write(data)
finally:
if src:
src.close()
if dst:
dst.close()
return os.path.realpath(saveto) | python | def download_setuptools(version=DEFAULT_VERSION, download_base=DEFAULT_URL,
to_dir=os.curdir, delay=15):
"""Download distribute from a specified location and return its filename
`version` should be a valid distribute version number that is available
as an egg for download under the `download_base` URL (which should end
with a '/'). `to_dir` is the directory where the egg will be downloaded.
`delay` is the number of seconds to pause before an actual download
attempt.
"""
# making sure we use the absolute path
to_dir = os.path.abspath(to_dir)
try:
from urllib.request import urlopen
except ImportError:
from urllib2 import urlopen
tgz_name = "distribute-%s.tar.gz" % version
url = download_base + tgz_name
saveto = os.path.join(to_dir, tgz_name)
src = dst = None
if not os.path.exists(saveto): # Avoid repeated downloads
try:
log.warn("Downloading %s", url)
src = urlopen(url)
# Read/write all in one block, so we don't create a corrupt file
# if the download is interrupted.
data = src.read()
dst = open(saveto, "wb")
dst.write(data)
finally:
if src:
src.close()
if dst:
dst.close()
return os.path.realpath(saveto) | [
"def",
"download_setuptools",
"(",
"version",
"=",
"DEFAULT_VERSION",
",",
"download_base",
"=",
"DEFAULT_URL",
",",
"to_dir",
"=",
"os",
".",
"curdir",
",",
"delay",
"=",
"15",
")",
":",
"# making sure we use the absolute path",
"to_dir",
"=",
"os",
".",
"path",
".",
"abspath",
"(",
"to_dir",
")",
"try",
":",
"from",
"urllib",
".",
"request",
"import",
"urlopen",
"except",
"ImportError",
":",
"from",
"urllib2",
"import",
"urlopen",
"tgz_name",
"=",
"\"distribute-%s.tar.gz\"",
"%",
"version",
"url",
"=",
"download_base",
"+",
"tgz_name",
"saveto",
"=",
"os",
".",
"path",
".",
"join",
"(",
"to_dir",
",",
"tgz_name",
")",
"src",
"=",
"dst",
"=",
"None",
"if",
"not",
"os",
".",
"path",
".",
"exists",
"(",
"saveto",
")",
":",
"# Avoid repeated downloads",
"try",
":",
"log",
".",
"warn",
"(",
"\"Downloading %s\"",
",",
"url",
")",
"src",
"=",
"urlopen",
"(",
"url",
")",
"# Read/write all in one block, so we don't create a corrupt file",
"# if the download is interrupted.",
"data",
"=",
"src",
".",
"read",
"(",
")",
"dst",
"=",
"open",
"(",
"saveto",
",",
"\"wb\"",
")",
"dst",
".",
"write",
"(",
"data",
")",
"finally",
":",
"if",
"src",
":",
"src",
".",
"close",
"(",
")",
"if",
"dst",
":",
"dst",
".",
"close",
"(",
")",
"return",
"os",
".",
"path",
".",
"realpath",
"(",
"saveto",
")"
] | Download distribute from a specified location and return its filename
`version` should be a valid distribute version number that is available
as an egg for download under the `download_base` URL (which should end
with a '/'). `to_dir` is the directory where the egg will be downloaded.
`delay` is the number of seconds to pause before an actual download
attempt. | [
"Download",
"distribute",
"from",
"a",
"specified",
"location",
"and",
"return",
"its",
"filename"
] | ceabd0f6318f129758341ab08292a20205d6f4cd | https://github.com/inonit/drf-haystack/blob/ceabd0f6318f129758341ab08292a20205d6f4cd/ez_setup.py#L170-L204 | train |
inonit/drf-haystack | drf_haystack/query.py | BaseQueryBuilder.tokenize | def tokenize(stream, separator):
"""
Tokenize and yield query parameter values.
:param stream: Input value
:param separator: Character to use to separate the tokens.
:return:
"""
for value in stream:
for token in value.split(separator):
if token:
yield token.strip() | python | def tokenize(stream, separator):
"""
Tokenize and yield query parameter values.
:param stream: Input value
:param separator: Character to use to separate the tokens.
:return:
"""
for value in stream:
for token in value.split(separator):
if token:
yield token.strip() | [
"def",
"tokenize",
"(",
"stream",
",",
"separator",
")",
":",
"for",
"value",
"in",
"stream",
":",
"for",
"token",
"in",
"value",
".",
"split",
"(",
"separator",
")",
":",
"if",
"token",
":",
"yield",
"token",
".",
"strip",
"(",
")"
] | Tokenize and yield query parameter values.
:param stream: Input value
:param separator: Character to use to separate the tokens.
:return: | [
"Tokenize",
"and",
"yield",
"query",
"parameter",
"values",
"."
] | ceabd0f6318f129758341ab08292a20205d6f4cd | https://github.com/inonit/drf-haystack/blob/ceabd0f6318f129758341ab08292a20205d6f4cd/drf_haystack/query.py#L34-L45 | train |
inonit/drf-haystack | drf_haystack/query.py | FilterQueryBuilder.build_query | def build_query(self, **filters):
"""
Creates a single SQ filter from querystring parameters that correspond to the SearchIndex fields
that have been "registered" in `view.fields`.
Default behavior is to `OR` terms for the same parameters, and `AND` between parameters. Any
querystring parameters that are not registered in `view.fields` will be ignored.
:param dict[str, list[str]] filters: is an expanded QueryDict or a mapping of keys to a list of
parameters.
"""
applicable_filters = []
applicable_exclusions = []
for param, value in filters.items():
excluding_term = False
param_parts = param.split("__")
base_param = param_parts[0] # only test against field without lookup
negation_keyword = constants.DRF_HAYSTACK_NEGATION_KEYWORD
if len(param_parts) > 1 and param_parts[1] == negation_keyword:
excluding_term = True
param = param.replace("__%s" % negation_keyword, "") # haystack wouldn't understand our negation
if self.view.serializer_class:
if hasattr(self.view.serializer_class.Meta, 'field_aliases'):
old_base = base_param
base_param = self.view.serializer_class.Meta.field_aliases.get(base_param, base_param)
param = param.replace(old_base, base_param) # need to replace the alias
fields = getattr(self.view.serializer_class.Meta, 'fields', [])
exclude = getattr(self.view.serializer_class.Meta, 'exclude', [])
search_fields = getattr(self.view.serializer_class.Meta, 'search_fields', [])
# Skip if the parameter is not listed in the serializer's `fields`
# or if it's in the `exclude` list.
if ((fields or search_fields) and base_param not in
chain(fields, search_fields)) or base_param in exclude or not value:
continue
field_queries = []
if len(param_parts) > 1 and param_parts[-1] in ('in', 'range'):
# `in` and `range` filters expects a list of values
field_queries.append(self.view.query_object((param, list(self.tokenize(value, self.view.lookup_sep)))))
else:
for token in self.tokenize(value, self.view.lookup_sep):
field_queries.append(self.view.query_object((param, token)))
field_queries = [fq for fq in field_queries if fq]
if len(field_queries) > 0:
term = six.moves.reduce(operator.or_, field_queries)
if excluding_term:
applicable_exclusions.append(term)
else:
applicable_filters.append(term)
applicable_filters = six.moves.reduce(
self.default_operator, filter(lambda x: x, applicable_filters)) if applicable_filters else []
applicable_exclusions = six.moves.reduce(
self.default_operator, filter(lambda x: x, applicable_exclusions)) if applicable_exclusions else []
return applicable_filters, applicable_exclusions | python | def build_query(self, **filters):
"""
Creates a single SQ filter from querystring parameters that correspond to the SearchIndex fields
that have been "registered" in `view.fields`.
Default behavior is to `OR` terms for the same parameters, and `AND` between parameters. Any
querystring parameters that are not registered in `view.fields` will be ignored.
:param dict[str, list[str]] filters: is an expanded QueryDict or a mapping of keys to a list of
parameters.
"""
applicable_filters = []
applicable_exclusions = []
for param, value in filters.items():
excluding_term = False
param_parts = param.split("__")
base_param = param_parts[0] # only test against field without lookup
negation_keyword = constants.DRF_HAYSTACK_NEGATION_KEYWORD
if len(param_parts) > 1 and param_parts[1] == negation_keyword:
excluding_term = True
param = param.replace("__%s" % negation_keyword, "") # haystack wouldn't understand our negation
if self.view.serializer_class:
if hasattr(self.view.serializer_class.Meta, 'field_aliases'):
old_base = base_param
base_param = self.view.serializer_class.Meta.field_aliases.get(base_param, base_param)
param = param.replace(old_base, base_param) # need to replace the alias
fields = getattr(self.view.serializer_class.Meta, 'fields', [])
exclude = getattr(self.view.serializer_class.Meta, 'exclude', [])
search_fields = getattr(self.view.serializer_class.Meta, 'search_fields', [])
# Skip if the parameter is not listed in the serializer's `fields`
# or if it's in the `exclude` list.
if ((fields or search_fields) and base_param not in
chain(fields, search_fields)) or base_param in exclude or not value:
continue
field_queries = []
if len(param_parts) > 1 and param_parts[-1] in ('in', 'range'):
# `in` and `range` filters expects a list of values
field_queries.append(self.view.query_object((param, list(self.tokenize(value, self.view.lookup_sep)))))
else:
for token in self.tokenize(value, self.view.lookup_sep):
field_queries.append(self.view.query_object((param, token)))
field_queries = [fq for fq in field_queries if fq]
if len(field_queries) > 0:
term = six.moves.reduce(operator.or_, field_queries)
if excluding_term:
applicable_exclusions.append(term)
else:
applicable_filters.append(term)
applicable_filters = six.moves.reduce(
self.default_operator, filter(lambda x: x, applicable_filters)) if applicable_filters else []
applicable_exclusions = six.moves.reduce(
self.default_operator, filter(lambda x: x, applicable_exclusions)) if applicable_exclusions else []
return applicable_filters, applicable_exclusions | [
"def",
"build_query",
"(",
"self",
",",
"*",
"*",
"filters",
")",
":",
"applicable_filters",
"=",
"[",
"]",
"applicable_exclusions",
"=",
"[",
"]",
"for",
"param",
",",
"value",
"in",
"filters",
".",
"items",
"(",
")",
":",
"excluding_term",
"=",
"False",
"param_parts",
"=",
"param",
".",
"split",
"(",
"\"__\"",
")",
"base_param",
"=",
"param_parts",
"[",
"0",
"]",
"# only test against field without lookup",
"negation_keyword",
"=",
"constants",
".",
"DRF_HAYSTACK_NEGATION_KEYWORD",
"if",
"len",
"(",
"param_parts",
")",
">",
"1",
"and",
"param_parts",
"[",
"1",
"]",
"==",
"negation_keyword",
":",
"excluding_term",
"=",
"True",
"param",
"=",
"param",
".",
"replace",
"(",
"\"__%s\"",
"%",
"negation_keyword",
",",
"\"\"",
")",
"# haystack wouldn't understand our negation",
"if",
"self",
".",
"view",
".",
"serializer_class",
":",
"if",
"hasattr",
"(",
"self",
".",
"view",
".",
"serializer_class",
".",
"Meta",
",",
"'field_aliases'",
")",
":",
"old_base",
"=",
"base_param",
"base_param",
"=",
"self",
".",
"view",
".",
"serializer_class",
".",
"Meta",
".",
"field_aliases",
".",
"get",
"(",
"base_param",
",",
"base_param",
")",
"param",
"=",
"param",
".",
"replace",
"(",
"old_base",
",",
"base_param",
")",
"# need to replace the alias",
"fields",
"=",
"getattr",
"(",
"self",
".",
"view",
".",
"serializer_class",
".",
"Meta",
",",
"'fields'",
",",
"[",
"]",
")",
"exclude",
"=",
"getattr",
"(",
"self",
".",
"view",
".",
"serializer_class",
".",
"Meta",
",",
"'exclude'",
",",
"[",
"]",
")",
"search_fields",
"=",
"getattr",
"(",
"self",
".",
"view",
".",
"serializer_class",
".",
"Meta",
",",
"'search_fields'",
",",
"[",
"]",
")",
"# Skip if the parameter is not listed in the serializer's `fields`",
"# or if it's in the `exclude` list.",
"if",
"(",
"(",
"fields",
"or",
"search_fields",
")",
"and",
"base_param",
"not",
"in",
"chain",
"(",
"fields",
",",
"search_fields",
")",
")",
"or",
"base_param",
"in",
"exclude",
"or",
"not",
"value",
":",
"continue",
"field_queries",
"=",
"[",
"]",
"if",
"len",
"(",
"param_parts",
")",
">",
"1",
"and",
"param_parts",
"[",
"-",
"1",
"]",
"in",
"(",
"'in'",
",",
"'range'",
")",
":",
"# `in` and `range` filters expects a list of values",
"field_queries",
".",
"append",
"(",
"self",
".",
"view",
".",
"query_object",
"(",
"(",
"param",
",",
"list",
"(",
"self",
".",
"tokenize",
"(",
"value",
",",
"self",
".",
"view",
".",
"lookup_sep",
")",
")",
")",
")",
")",
"else",
":",
"for",
"token",
"in",
"self",
".",
"tokenize",
"(",
"value",
",",
"self",
".",
"view",
".",
"lookup_sep",
")",
":",
"field_queries",
".",
"append",
"(",
"self",
".",
"view",
".",
"query_object",
"(",
"(",
"param",
",",
"token",
")",
")",
")",
"field_queries",
"=",
"[",
"fq",
"for",
"fq",
"in",
"field_queries",
"if",
"fq",
"]",
"if",
"len",
"(",
"field_queries",
")",
">",
"0",
":",
"term",
"=",
"six",
".",
"moves",
".",
"reduce",
"(",
"operator",
".",
"or_",
",",
"field_queries",
")",
"if",
"excluding_term",
":",
"applicable_exclusions",
".",
"append",
"(",
"term",
")",
"else",
":",
"applicable_filters",
".",
"append",
"(",
"term",
")",
"applicable_filters",
"=",
"six",
".",
"moves",
".",
"reduce",
"(",
"self",
".",
"default_operator",
",",
"filter",
"(",
"lambda",
"x",
":",
"x",
",",
"applicable_filters",
")",
")",
"if",
"applicable_filters",
"else",
"[",
"]",
"applicable_exclusions",
"=",
"six",
".",
"moves",
".",
"reduce",
"(",
"self",
".",
"default_operator",
",",
"filter",
"(",
"lambda",
"x",
":",
"x",
",",
"applicable_exclusions",
")",
")",
"if",
"applicable_exclusions",
"else",
"[",
"]",
"return",
"applicable_filters",
",",
"applicable_exclusions"
] | Creates a single SQ filter from querystring parameters that correspond to the SearchIndex fields
that have been "registered" in `view.fields`.
Default behavior is to `OR` terms for the same parameters, and `AND` between parameters. Any
querystring parameters that are not registered in `view.fields` will be ignored.
:param dict[str, list[str]] filters: is an expanded QueryDict or a mapping of keys to a list of
parameters. | [
"Creates",
"a",
"single",
"SQ",
"filter",
"from",
"querystring",
"parameters",
"that",
"correspond",
"to",
"the",
"SearchIndex",
"fields",
"that",
"have",
"been",
"registered",
"in",
"view",
".",
"fields",
"."
] | ceabd0f6318f129758341ab08292a20205d6f4cd | https://github.com/inonit/drf-haystack/blob/ceabd0f6318f129758341ab08292a20205d6f4cd/drf_haystack/query.py#L89-L151 | train |
inonit/drf-haystack | drf_haystack/query.py | FacetQueryBuilder.build_query | def build_query(self, **filters):
"""
Creates a dict of dictionaries suitable for passing to the SearchQuerySet `facet`,
`date_facet` or `query_facet` method. All key word arguments should be wrapped in a list.
:param view: API View
:param dict[str, list[str]] filters: is an expanded QueryDict or a mapping
of keys to a list of parameters.
"""
field_facets = {}
date_facets = {}
query_facets = {}
facet_serializer_cls = self.view.get_facet_serializer_class()
if self.view.lookup_sep == ":":
raise AttributeError("The %(cls)s.lookup_sep attribute conflicts with the HaystackFacetFilter "
"query parameter parser. Please choose another `lookup_sep` attribute "
"for %(cls)s." % {"cls": self.view.__class__.__name__})
fields = facet_serializer_cls.Meta.fields
exclude = facet_serializer_cls.Meta.exclude
field_options = facet_serializer_cls.Meta.field_options
for field, options in filters.items():
if field not in fields or field in exclude:
continue
field_options = merge_dict(field_options, {field: self.parse_field_options(self.view.lookup_sep, *options)})
valid_gap = ("year", "month", "day", "hour", "minute", "second")
for field, options in field_options.items():
if any([k in options for k in ("start_date", "end_date", "gap_by", "gap_amount")]):
if not all(("start_date", "end_date", "gap_by" in options)):
raise ValueError("Date faceting requires at least 'start_date', 'end_date' "
"and 'gap_by' to be set.")
if not options["gap_by"] in valid_gap:
raise ValueError("The 'gap_by' parameter must be one of %s." % ", ".join(valid_gap))
options.setdefault("gap_amount", 1)
date_facets[field] = field_options[field]
else:
field_facets[field] = field_options[field]
return {
"date_facets": date_facets,
"field_facets": field_facets,
"query_facets": query_facets
} | python | def build_query(self, **filters):
"""
Creates a dict of dictionaries suitable for passing to the SearchQuerySet `facet`,
`date_facet` or `query_facet` method. All key word arguments should be wrapped in a list.
:param view: API View
:param dict[str, list[str]] filters: is an expanded QueryDict or a mapping
of keys to a list of parameters.
"""
field_facets = {}
date_facets = {}
query_facets = {}
facet_serializer_cls = self.view.get_facet_serializer_class()
if self.view.lookup_sep == ":":
raise AttributeError("The %(cls)s.lookup_sep attribute conflicts with the HaystackFacetFilter "
"query parameter parser. Please choose another `lookup_sep` attribute "
"for %(cls)s." % {"cls": self.view.__class__.__name__})
fields = facet_serializer_cls.Meta.fields
exclude = facet_serializer_cls.Meta.exclude
field_options = facet_serializer_cls.Meta.field_options
for field, options in filters.items():
if field not in fields or field in exclude:
continue
field_options = merge_dict(field_options, {field: self.parse_field_options(self.view.lookup_sep, *options)})
valid_gap = ("year", "month", "day", "hour", "minute", "second")
for field, options in field_options.items():
if any([k in options for k in ("start_date", "end_date", "gap_by", "gap_amount")]):
if not all(("start_date", "end_date", "gap_by" in options)):
raise ValueError("Date faceting requires at least 'start_date', 'end_date' "
"and 'gap_by' to be set.")
if not options["gap_by"] in valid_gap:
raise ValueError("The 'gap_by' parameter must be one of %s." % ", ".join(valid_gap))
options.setdefault("gap_amount", 1)
date_facets[field] = field_options[field]
else:
field_facets[field] = field_options[field]
return {
"date_facets": date_facets,
"field_facets": field_facets,
"query_facets": query_facets
} | [
"def",
"build_query",
"(",
"self",
",",
"*",
"*",
"filters",
")",
":",
"field_facets",
"=",
"{",
"}",
"date_facets",
"=",
"{",
"}",
"query_facets",
"=",
"{",
"}",
"facet_serializer_cls",
"=",
"self",
".",
"view",
".",
"get_facet_serializer_class",
"(",
")",
"if",
"self",
".",
"view",
".",
"lookup_sep",
"==",
"\":\"",
":",
"raise",
"AttributeError",
"(",
"\"The %(cls)s.lookup_sep attribute conflicts with the HaystackFacetFilter \"",
"\"query parameter parser. Please choose another `lookup_sep` attribute \"",
"\"for %(cls)s.\"",
"%",
"{",
"\"cls\"",
":",
"self",
".",
"view",
".",
"__class__",
".",
"__name__",
"}",
")",
"fields",
"=",
"facet_serializer_cls",
".",
"Meta",
".",
"fields",
"exclude",
"=",
"facet_serializer_cls",
".",
"Meta",
".",
"exclude",
"field_options",
"=",
"facet_serializer_cls",
".",
"Meta",
".",
"field_options",
"for",
"field",
",",
"options",
"in",
"filters",
".",
"items",
"(",
")",
":",
"if",
"field",
"not",
"in",
"fields",
"or",
"field",
"in",
"exclude",
":",
"continue",
"field_options",
"=",
"merge_dict",
"(",
"field_options",
",",
"{",
"field",
":",
"self",
".",
"parse_field_options",
"(",
"self",
".",
"view",
".",
"lookup_sep",
",",
"*",
"options",
")",
"}",
")",
"valid_gap",
"=",
"(",
"\"year\"",
",",
"\"month\"",
",",
"\"day\"",
",",
"\"hour\"",
",",
"\"minute\"",
",",
"\"second\"",
")",
"for",
"field",
",",
"options",
"in",
"field_options",
".",
"items",
"(",
")",
":",
"if",
"any",
"(",
"[",
"k",
"in",
"options",
"for",
"k",
"in",
"(",
"\"start_date\"",
",",
"\"end_date\"",
",",
"\"gap_by\"",
",",
"\"gap_amount\"",
")",
"]",
")",
":",
"if",
"not",
"all",
"(",
"(",
"\"start_date\"",
",",
"\"end_date\"",
",",
"\"gap_by\"",
"in",
"options",
")",
")",
":",
"raise",
"ValueError",
"(",
"\"Date faceting requires at least 'start_date', 'end_date' \"",
"\"and 'gap_by' to be set.\"",
")",
"if",
"not",
"options",
"[",
"\"gap_by\"",
"]",
"in",
"valid_gap",
":",
"raise",
"ValueError",
"(",
"\"The 'gap_by' parameter must be one of %s.\"",
"%",
"\", \"",
".",
"join",
"(",
"valid_gap",
")",
")",
"options",
".",
"setdefault",
"(",
"\"gap_amount\"",
",",
"1",
")",
"date_facets",
"[",
"field",
"]",
"=",
"field_options",
"[",
"field",
"]",
"else",
":",
"field_facets",
"[",
"field",
"]",
"=",
"field_options",
"[",
"field",
"]",
"return",
"{",
"\"date_facets\"",
":",
"date_facets",
",",
"\"field_facets\"",
":",
"field_facets",
",",
"\"query_facets\"",
":",
"query_facets",
"}"
] | Creates a dict of dictionaries suitable for passing to the SearchQuerySet `facet`,
`date_facet` or `query_facet` method. All key word arguments should be wrapped in a list.
:param view: API View
:param dict[str, list[str]] filters: is an expanded QueryDict or a mapping
of keys to a list of parameters. | [
"Creates",
"a",
"dict",
"of",
"dictionaries",
"suitable",
"for",
"passing",
"to",
"the",
"SearchQuerySet",
"facet",
"date_facet",
"or",
"query_facet",
"method",
".",
"All",
"key",
"word",
"arguments",
"should",
"be",
"wrapped",
"in",
"a",
"list",
"."
] | ceabd0f6318f129758341ab08292a20205d6f4cd | https://github.com/inonit/drf-haystack/blob/ceabd0f6318f129758341ab08292a20205d6f4cd/drf_haystack/query.py#L159-L210 | train |
inonit/drf-haystack | drf_haystack/query.py | FacetQueryBuilder.parse_field_options | def parse_field_options(self, *options):
"""
Parse the field options query string and return it as a dictionary.
"""
defaults = {}
for option in options:
if isinstance(option, six.text_type):
tokens = [token.strip() for token in option.split(self.view.lookup_sep)]
for token in tokens:
if not len(token.split(":")) == 2:
warnings.warn("The %s token is not properly formatted. Tokens need to be "
"formatted as 'token:value' pairs." % token)
continue
param, value = token.split(":", 1)
if any([k == param for k in ("start_date", "end_date", "gap_amount")]):
if param in ("start_date", "end_date"):
value = parser.parse(value)
if param == "gap_amount":
value = int(value)
defaults[param] = value
return defaults | python | def parse_field_options(self, *options):
"""
Parse the field options query string and return it as a dictionary.
"""
defaults = {}
for option in options:
if isinstance(option, six.text_type):
tokens = [token.strip() for token in option.split(self.view.lookup_sep)]
for token in tokens:
if not len(token.split(":")) == 2:
warnings.warn("The %s token is not properly formatted. Tokens need to be "
"formatted as 'token:value' pairs." % token)
continue
param, value = token.split(":", 1)
if any([k == param for k in ("start_date", "end_date", "gap_amount")]):
if param in ("start_date", "end_date"):
value = parser.parse(value)
if param == "gap_amount":
value = int(value)
defaults[param] = value
return defaults | [
"def",
"parse_field_options",
"(",
"self",
",",
"*",
"options",
")",
":",
"defaults",
"=",
"{",
"}",
"for",
"option",
"in",
"options",
":",
"if",
"isinstance",
"(",
"option",
",",
"six",
".",
"text_type",
")",
":",
"tokens",
"=",
"[",
"token",
".",
"strip",
"(",
")",
"for",
"token",
"in",
"option",
".",
"split",
"(",
"self",
".",
"view",
".",
"lookup_sep",
")",
"]",
"for",
"token",
"in",
"tokens",
":",
"if",
"not",
"len",
"(",
"token",
".",
"split",
"(",
"\":\"",
")",
")",
"==",
"2",
":",
"warnings",
".",
"warn",
"(",
"\"The %s token is not properly formatted. Tokens need to be \"",
"\"formatted as 'token:value' pairs.\"",
"%",
"token",
")",
"continue",
"param",
",",
"value",
"=",
"token",
".",
"split",
"(",
"\":\"",
",",
"1",
")",
"if",
"any",
"(",
"[",
"k",
"==",
"param",
"for",
"k",
"in",
"(",
"\"start_date\"",
",",
"\"end_date\"",
",",
"\"gap_amount\"",
")",
"]",
")",
":",
"if",
"param",
"in",
"(",
"\"start_date\"",
",",
"\"end_date\"",
")",
":",
"value",
"=",
"parser",
".",
"parse",
"(",
"value",
")",
"if",
"param",
"==",
"\"gap_amount\"",
":",
"value",
"=",
"int",
"(",
"value",
")",
"defaults",
"[",
"param",
"]",
"=",
"value",
"return",
"defaults"
] | Parse the field options query string and return it as a dictionary. | [
"Parse",
"the",
"field",
"options",
"query",
"string",
"and",
"return",
"it",
"as",
"a",
"dictionary",
"."
] | ceabd0f6318f129758341ab08292a20205d6f4cd | https://github.com/inonit/drf-haystack/blob/ceabd0f6318f129758341ab08292a20205d6f4cd/drf_haystack/query.py#L212-L239 | train |
inonit/drf-haystack | drf_haystack/query.py | SpatialQueryBuilder.build_query | def build_query(self, **filters):
"""
Build queries for geo spatial filtering.
Expected query parameters are:
- a `unit=value` parameter where the unit is a valid UNIT in the
`django.contrib.gis.measure.Distance` class.
- `from` which must be a comma separated latitude and longitude.
Example query:
/api/v1/search/?km=10&from=59.744076,10.152045
Will perform a `dwithin` query within 10 km from the point
with latitude 59.744076 and longitude 10.152045.
"""
applicable_filters = None
filters = dict((k, filters[k]) for k in chain(self.D.UNITS.keys(),
[constants.DRF_HAYSTACK_SPATIAL_QUERY_PARAM]) if k in filters)
distance = dict((k, v) for k, v in filters.items() if k in self.D.UNITS.keys())
try:
latitude, longitude = map(float, self.tokenize(filters[constants.DRF_HAYSTACK_SPATIAL_QUERY_PARAM],
self.view.lookup_sep))
point = self.Point(longitude, latitude, srid=constants.GEO_SRID)
except ValueError:
raise ValueError("Cannot convert `from=latitude,longitude` query parameter to "
"float values. Make sure to provide numerical values only!")
except KeyError:
# If the user has not provided any `from` query string parameter,
# just return.
pass
else:
for unit in distance.keys():
if not len(distance[unit]) == 1:
raise ValueError("Each unit must have exactly one value.")
distance[unit] = float(distance[unit][0])
if point and distance:
applicable_filters = {
"dwithin": {
"field": self.backend.point_field,
"point": point,
"distance": self.D(**distance)
},
"distance": {
"field": self.backend.point_field,
"point": point
}
}
return applicable_filters | python | def build_query(self, **filters):
"""
Build queries for geo spatial filtering.
Expected query parameters are:
- a `unit=value` parameter where the unit is a valid UNIT in the
`django.contrib.gis.measure.Distance` class.
- `from` which must be a comma separated latitude and longitude.
Example query:
/api/v1/search/?km=10&from=59.744076,10.152045
Will perform a `dwithin` query within 10 km from the point
with latitude 59.744076 and longitude 10.152045.
"""
applicable_filters = None
filters = dict((k, filters[k]) for k in chain(self.D.UNITS.keys(),
[constants.DRF_HAYSTACK_SPATIAL_QUERY_PARAM]) if k in filters)
distance = dict((k, v) for k, v in filters.items() if k in self.D.UNITS.keys())
try:
latitude, longitude = map(float, self.tokenize(filters[constants.DRF_HAYSTACK_SPATIAL_QUERY_PARAM],
self.view.lookup_sep))
point = self.Point(longitude, latitude, srid=constants.GEO_SRID)
except ValueError:
raise ValueError("Cannot convert `from=latitude,longitude` query parameter to "
"float values. Make sure to provide numerical values only!")
except KeyError:
# If the user has not provided any `from` query string parameter,
# just return.
pass
else:
for unit in distance.keys():
if not len(distance[unit]) == 1:
raise ValueError("Each unit must have exactly one value.")
distance[unit] = float(distance[unit][0])
if point and distance:
applicable_filters = {
"dwithin": {
"field": self.backend.point_field,
"point": point,
"distance": self.D(**distance)
},
"distance": {
"field": self.backend.point_field,
"point": point
}
}
return applicable_filters | [
"def",
"build_query",
"(",
"self",
",",
"*",
"*",
"filters",
")",
":",
"applicable_filters",
"=",
"None",
"filters",
"=",
"dict",
"(",
"(",
"k",
",",
"filters",
"[",
"k",
"]",
")",
"for",
"k",
"in",
"chain",
"(",
"self",
".",
"D",
".",
"UNITS",
".",
"keys",
"(",
")",
",",
"[",
"constants",
".",
"DRF_HAYSTACK_SPATIAL_QUERY_PARAM",
"]",
")",
"if",
"k",
"in",
"filters",
")",
"distance",
"=",
"dict",
"(",
"(",
"k",
",",
"v",
")",
"for",
"k",
",",
"v",
"in",
"filters",
".",
"items",
"(",
")",
"if",
"k",
"in",
"self",
".",
"D",
".",
"UNITS",
".",
"keys",
"(",
")",
")",
"try",
":",
"latitude",
",",
"longitude",
"=",
"map",
"(",
"float",
",",
"self",
".",
"tokenize",
"(",
"filters",
"[",
"constants",
".",
"DRF_HAYSTACK_SPATIAL_QUERY_PARAM",
"]",
",",
"self",
".",
"view",
".",
"lookup_sep",
")",
")",
"point",
"=",
"self",
".",
"Point",
"(",
"longitude",
",",
"latitude",
",",
"srid",
"=",
"constants",
".",
"GEO_SRID",
")",
"except",
"ValueError",
":",
"raise",
"ValueError",
"(",
"\"Cannot convert `from=latitude,longitude` query parameter to \"",
"\"float values. Make sure to provide numerical values only!\"",
")",
"except",
"KeyError",
":",
"# If the user has not provided any `from` query string parameter,",
"# just return.",
"pass",
"else",
":",
"for",
"unit",
"in",
"distance",
".",
"keys",
"(",
")",
":",
"if",
"not",
"len",
"(",
"distance",
"[",
"unit",
"]",
")",
"==",
"1",
":",
"raise",
"ValueError",
"(",
"\"Each unit must have exactly one value.\"",
")",
"distance",
"[",
"unit",
"]",
"=",
"float",
"(",
"distance",
"[",
"unit",
"]",
"[",
"0",
"]",
")",
"if",
"point",
"and",
"distance",
":",
"applicable_filters",
"=",
"{",
"\"dwithin\"",
":",
"{",
"\"field\"",
":",
"self",
".",
"backend",
".",
"point_field",
",",
"\"point\"",
":",
"point",
",",
"\"distance\"",
":",
"self",
".",
"D",
"(",
"*",
"*",
"distance",
")",
"}",
",",
"\"distance\"",
":",
"{",
"\"field\"",
":",
"self",
".",
"backend",
".",
"point_field",
",",
"\"point\"",
":",
"point",
"}",
"}",
"return",
"applicable_filters"
] | Build queries for geo spatial filtering.
Expected query parameters are:
- a `unit=value` parameter where the unit is a valid UNIT in the
`django.contrib.gis.measure.Distance` class.
- `from` which must be a comma separated latitude and longitude.
Example query:
/api/v1/search/?km=10&from=59.744076,10.152045
Will perform a `dwithin` query within 10 km from the point
with latitude 59.744076 and longitude 10.152045. | [
"Build",
"queries",
"for",
"geo",
"spatial",
"filtering",
"."
] | ceabd0f6318f129758341ab08292a20205d6f4cd | https://github.com/inonit/drf-haystack/blob/ceabd0f6318f129758341ab08292a20205d6f4cd/drf_haystack/query.py#L266-L318 | train |
inonit/drf-haystack | drf_haystack/utils.py | merge_dict | def merge_dict(a, b):
"""
Recursively merges and returns dict a with dict b.
Any list values will be combined and returned sorted.
:param a: dictionary object
:param b: dictionary object
:return: merged dictionary object
"""
if not isinstance(b, dict):
return b
result = deepcopy(a)
for key, val in six.iteritems(b):
if key in result and isinstance(result[key], dict):
result[key] = merge_dict(result[key], val)
elif key in result and isinstance(result[key], list):
result[key] = sorted(list(set(val) | set(result[key])))
else:
result[key] = deepcopy(val)
return result | python | def merge_dict(a, b):
"""
Recursively merges and returns dict a with dict b.
Any list values will be combined and returned sorted.
:param a: dictionary object
:param b: dictionary object
:return: merged dictionary object
"""
if not isinstance(b, dict):
return b
result = deepcopy(a)
for key, val in six.iteritems(b):
if key in result and isinstance(result[key], dict):
result[key] = merge_dict(result[key], val)
elif key in result and isinstance(result[key], list):
result[key] = sorted(list(set(val) | set(result[key])))
else:
result[key] = deepcopy(val)
return result | [
"def",
"merge_dict",
"(",
"a",
",",
"b",
")",
":",
"if",
"not",
"isinstance",
"(",
"b",
",",
"dict",
")",
":",
"return",
"b",
"result",
"=",
"deepcopy",
"(",
"a",
")",
"for",
"key",
",",
"val",
"in",
"six",
".",
"iteritems",
"(",
"b",
")",
":",
"if",
"key",
"in",
"result",
"and",
"isinstance",
"(",
"result",
"[",
"key",
"]",
",",
"dict",
")",
":",
"result",
"[",
"key",
"]",
"=",
"merge_dict",
"(",
"result",
"[",
"key",
"]",
",",
"val",
")",
"elif",
"key",
"in",
"result",
"and",
"isinstance",
"(",
"result",
"[",
"key",
"]",
",",
"list",
")",
":",
"result",
"[",
"key",
"]",
"=",
"sorted",
"(",
"list",
"(",
"set",
"(",
"val",
")",
"|",
"set",
"(",
"result",
"[",
"key",
"]",
")",
")",
")",
"else",
":",
"result",
"[",
"key",
"]",
"=",
"deepcopy",
"(",
"val",
")",
"return",
"result"
] | Recursively merges and returns dict a with dict b.
Any list values will be combined and returned sorted.
:param a: dictionary object
:param b: dictionary object
:return: merged dictionary object | [
"Recursively",
"merges",
"and",
"returns",
"dict",
"a",
"with",
"dict",
"b",
".",
"Any",
"list",
"values",
"will",
"be",
"combined",
"and",
"returned",
"sorted",
"."
] | ceabd0f6318f129758341ab08292a20205d6f4cd | https://github.com/inonit/drf-haystack/blob/ceabd0f6318f129758341ab08292a20205d6f4cd/drf_haystack/utils.py#L9-L31 | train |
inonit/drf-haystack | drf_haystack/generics.py | HaystackGenericAPIView.get_queryset | def get_queryset(self, index_models=[]):
"""
Get the list of items for this view.
Returns ``self.queryset`` if defined and is a ``self.object_class``
instance.
@:param index_models: override `self.index_models`
"""
if self.queryset is not None and isinstance(self.queryset, self.object_class):
queryset = self.queryset.all()
else:
queryset = self.object_class()._clone()
if len(index_models):
queryset = queryset.models(*index_models)
elif len(self.index_models):
queryset = queryset.models(*self.index_models)
return queryset | python | def get_queryset(self, index_models=[]):
"""
Get the list of items for this view.
Returns ``self.queryset`` if defined and is a ``self.object_class``
instance.
@:param index_models: override `self.index_models`
"""
if self.queryset is not None and isinstance(self.queryset, self.object_class):
queryset = self.queryset.all()
else:
queryset = self.object_class()._clone()
if len(index_models):
queryset = queryset.models(*index_models)
elif len(self.index_models):
queryset = queryset.models(*self.index_models)
return queryset | [
"def",
"get_queryset",
"(",
"self",
",",
"index_models",
"=",
"[",
"]",
")",
":",
"if",
"self",
".",
"queryset",
"is",
"not",
"None",
"and",
"isinstance",
"(",
"self",
".",
"queryset",
",",
"self",
".",
"object_class",
")",
":",
"queryset",
"=",
"self",
".",
"queryset",
".",
"all",
"(",
")",
"else",
":",
"queryset",
"=",
"self",
".",
"object_class",
"(",
")",
".",
"_clone",
"(",
")",
"if",
"len",
"(",
"index_models",
")",
":",
"queryset",
"=",
"queryset",
".",
"models",
"(",
"*",
"index_models",
")",
"elif",
"len",
"(",
"self",
".",
"index_models",
")",
":",
"queryset",
"=",
"queryset",
".",
"models",
"(",
"*",
"self",
".",
"index_models",
")",
"return",
"queryset"
] | Get the list of items for this view.
Returns ``self.queryset`` if defined and is a ``self.object_class``
instance.
@:param index_models: override `self.index_models` | [
"Get",
"the",
"list",
"of",
"items",
"for",
"this",
"view",
".",
"Returns",
"self",
".",
"queryset",
"if",
"defined",
"and",
"is",
"a",
"self",
".",
"object_class",
"instance",
"."
] | ceabd0f6318f129758341ab08292a20205d6f4cd | https://github.com/inonit/drf-haystack/blob/ceabd0f6318f129758341ab08292a20205d6f4cd/drf_haystack/generics.py#L40-L56 | train |
inonit/drf-haystack | drf_haystack/generics.py | HaystackGenericAPIView.get_object | def get_object(self):
"""
Fetch a single document from the data store according to whatever
unique identifier is available for that document in the
SearchIndex.
In cases where the view has multiple ``index_models``, add a ``model`` query
parameter containing a single `app_label.model` name to the request in order
to override which model to include in the SearchQuerySet.
Example:
/api/v1/search/42/?model=myapp.person
"""
queryset = self.get_queryset()
if "model" in self.request.query_params:
try:
app_label, model = map(six.text_type.lower, self.request.query_params["model"].split(".", 1))
ctype = ContentType.objects.get(app_label=app_label, model=model)
queryset = self.get_queryset(index_models=[ctype.model_class()])
except (ValueError, ContentType.DoesNotExist):
raise Http404("Could not find any models matching '%s'. Make sure to use a valid "
"'app_label.model' name for the 'model' query parameter." % self.request.query_params["model"])
lookup_url_kwarg = self.lookup_url_kwarg or self.lookup_field
if lookup_url_kwarg not in self.kwargs:
raise AttributeError(
"Expected view %s to be called with a URL keyword argument "
"named '%s'. Fix your URL conf, or set the `.lookup_field` "
"attribute on the view correctly." % (self.__class__.__name__, lookup_url_kwarg)
)
queryset = queryset.filter(self.query_object((self.document_uid_field, self.kwargs[lookup_url_kwarg])))
count = queryset.count()
if count == 1:
return queryset[0]
elif count > 1:
raise Http404("Multiple results matches the given query. Expected a single result.")
raise Http404("No result matches the given query.") | python | def get_object(self):
"""
Fetch a single document from the data store according to whatever
unique identifier is available for that document in the
SearchIndex.
In cases where the view has multiple ``index_models``, add a ``model`` query
parameter containing a single `app_label.model` name to the request in order
to override which model to include in the SearchQuerySet.
Example:
/api/v1/search/42/?model=myapp.person
"""
queryset = self.get_queryset()
if "model" in self.request.query_params:
try:
app_label, model = map(six.text_type.lower, self.request.query_params["model"].split(".", 1))
ctype = ContentType.objects.get(app_label=app_label, model=model)
queryset = self.get_queryset(index_models=[ctype.model_class()])
except (ValueError, ContentType.DoesNotExist):
raise Http404("Could not find any models matching '%s'. Make sure to use a valid "
"'app_label.model' name for the 'model' query parameter." % self.request.query_params["model"])
lookup_url_kwarg = self.lookup_url_kwarg or self.lookup_field
if lookup_url_kwarg not in self.kwargs:
raise AttributeError(
"Expected view %s to be called with a URL keyword argument "
"named '%s'. Fix your URL conf, or set the `.lookup_field` "
"attribute on the view correctly." % (self.__class__.__name__, lookup_url_kwarg)
)
queryset = queryset.filter(self.query_object((self.document_uid_field, self.kwargs[lookup_url_kwarg])))
count = queryset.count()
if count == 1:
return queryset[0]
elif count > 1:
raise Http404("Multiple results matches the given query. Expected a single result.")
raise Http404("No result matches the given query.") | [
"def",
"get_object",
"(",
"self",
")",
":",
"queryset",
"=",
"self",
".",
"get_queryset",
"(",
")",
"if",
"\"model\"",
"in",
"self",
".",
"request",
".",
"query_params",
":",
"try",
":",
"app_label",
",",
"model",
"=",
"map",
"(",
"six",
".",
"text_type",
".",
"lower",
",",
"self",
".",
"request",
".",
"query_params",
"[",
"\"model\"",
"]",
".",
"split",
"(",
"\".\"",
",",
"1",
")",
")",
"ctype",
"=",
"ContentType",
".",
"objects",
".",
"get",
"(",
"app_label",
"=",
"app_label",
",",
"model",
"=",
"model",
")",
"queryset",
"=",
"self",
".",
"get_queryset",
"(",
"index_models",
"=",
"[",
"ctype",
".",
"model_class",
"(",
")",
"]",
")",
"except",
"(",
"ValueError",
",",
"ContentType",
".",
"DoesNotExist",
")",
":",
"raise",
"Http404",
"(",
"\"Could not find any models matching '%s'. Make sure to use a valid \"",
"\"'app_label.model' name for the 'model' query parameter.\"",
"%",
"self",
".",
"request",
".",
"query_params",
"[",
"\"model\"",
"]",
")",
"lookup_url_kwarg",
"=",
"self",
".",
"lookup_url_kwarg",
"or",
"self",
".",
"lookup_field",
"if",
"lookup_url_kwarg",
"not",
"in",
"self",
".",
"kwargs",
":",
"raise",
"AttributeError",
"(",
"\"Expected view %s to be called with a URL keyword argument \"",
"\"named '%s'. Fix your URL conf, or set the `.lookup_field` \"",
"\"attribute on the view correctly.\"",
"%",
"(",
"self",
".",
"__class__",
".",
"__name__",
",",
"lookup_url_kwarg",
")",
")",
"queryset",
"=",
"queryset",
".",
"filter",
"(",
"self",
".",
"query_object",
"(",
"(",
"self",
".",
"document_uid_field",
",",
"self",
".",
"kwargs",
"[",
"lookup_url_kwarg",
"]",
")",
")",
")",
"count",
"=",
"queryset",
".",
"count",
"(",
")",
"if",
"count",
"==",
"1",
":",
"return",
"queryset",
"[",
"0",
"]",
"elif",
"count",
">",
"1",
":",
"raise",
"Http404",
"(",
"\"Multiple results matches the given query. Expected a single result.\"",
")",
"raise",
"Http404",
"(",
"\"No result matches the given query.\"",
")"
] | Fetch a single document from the data store according to whatever
unique identifier is available for that document in the
SearchIndex.
In cases where the view has multiple ``index_models``, add a ``model`` query
parameter containing a single `app_label.model` name to the request in order
to override which model to include in the SearchQuerySet.
Example:
/api/v1/search/42/?model=myapp.person | [
"Fetch",
"a",
"single",
"document",
"from",
"the",
"data",
"store",
"according",
"to",
"whatever",
"unique",
"identifier",
"is",
"available",
"for",
"that",
"document",
"in",
"the",
"SearchIndex",
"."
] | ceabd0f6318f129758341ab08292a20205d6f4cd | https://github.com/inonit/drf-haystack/blob/ceabd0f6318f129758341ab08292a20205d6f4cd/drf_haystack/generics.py#L58-L95 | train |
inonit/drf-haystack | drf_haystack/mixins.py | MoreLikeThisMixin.more_like_this | def more_like_this(self, request, pk=None):
"""
Sets up a detail route for ``more-like-this`` results.
Note that you'll need backend support in order to take advantage of this.
This will add ie. ^search/{pk}/more-like-this/$ to your existing ^search pattern.
"""
obj = self.get_object().object
queryset = self.filter_queryset(self.get_queryset()).more_like_this(obj)
page = self.paginate_queryset(queryset)
if page is not None:
serializer = self.get_serializer(page, many=True)
return self.get_paginated_response(serializer.data)
serializer = self.get_serializer(queryset, many=True)
return Response(serializer.data) | python | def more_like_this(self, request, pk=None):
"""
Sets up a detail route for ``more-like-this`` results.
Note that you'll need backend support in order to take advantage of this.
This will add ie. ^search/{pk}/more-like-this/$ to your existing ^search pattern.
"""
obj = self.get_object().object
queryset = self.filter_queryset(self.get_queryset()).more_like_this(obj)
page = self.paginate_queryset(queryset)
if page is not None:
serializer = self.get_serializer(page, many=True)
return self.get_paginated_response(serializer.data)
serializer = self.get_serializer(queryset, many=True)
return Response(serializer.data) | [
"def",
"more_like_this",
"(",
"self",
",",
"request",
",",
"pk",
"=",
"None",
")",
":",
"obj",
"=",
"self",
".",
"get_object",
"(",
")",
".",
"object",
"queryset",
"=",
"self",
".",
"filter_queryset",
"(",
"self",
".",
"get_queryset",
"(",
")",
")",
".",
"more_like_this",
"(",
"obj",
")",
"page",
"=",
"self",
".",
"paginate_queryset",
"(",
"queryset",
")",
"if",
"page",
"is",
"not",
"None",
":",
"serializer",
"=",
"self",
".",
"get_serializer",
"(",
"page",
",",
"many",
"=",
"True",
")",
"return",
"self",
".",
"get_paginated_response",
"(",
"serializer",
".",
"data",
")",
"serializer",
"=",
"self",
".",
"get_serializer",
"(",
"queryset",
",",
"many",
"=",
"True",
")",
"return",
"Response",
"(",
"serializer",
".",
"data",
")"
] | Sets up a detail route for ``more-like-this`` results.
Note that you'll need backend support in order to take advantage of this.
This will add ie. ^search/{pk}/more-like-this/$ to your existing ^search pattern. | [
"Sets",
"up",
"a",
"detail",
"route",
"for",
"more",
"-",
"like",
"-",
"this",
"results",
".",
"Note",
"that",
"you",
"ll",
"need",
"backend",
"support",
"in",
"order",
"to",
"take",
"advantage",
"of",
"this",
"."
] | ceabd0f6318f129758341ab08292a20205d6f4cd | https://github.com/inonit/drf-haystack/blob/ceabd0f6318f129758341ab08292a20205d6f4cd/drf_haystack/mixins.py#L17-L33 | train |
inonit/drf-haystack | drf_haystack/mixins.py | FacetMixin.filter_facet_queryset | def filter_facet_queryset(self, queryset):
"""
Given a search queryset, filter it with whichever facet filter backends
in use.
"""
for backend in list(self.facet_filter_backends):
queryset = backend().filter_queryset(self.request, queryset, self)
if self.load_all:
queryset = queryset.load_all()
return queryset | python | def filter_facet_queryset(self, queryset):
"""
Given a search queryset, filter it with whichever facet filter backends
in use.
"""
for backend in list(self.facet_filter_backends):
queryset = backend().filter_queryset(self.request, queryset, self)
if self.load_all:
queryset = queryset.load_all()
return queryset | [
"def",
"filter_facet_queryset",
"(",
"self",
",",
"queryset",
")",
":",
"for",
"backend",
"in",
"list",
"(",
"self",
".",
"facet_filter_backends",
")",
":",
"queryset",
"=",
"backend",
"(",
")",
".",
"filter_queryset",
"(",
"self",
".",
"request",
",",
"queryset",
",",
"self",
")",
"if",
"self",
".",
"load_all",
":",
"queryset",
"=",
"queryset",
".",
"load_all",
"(",
")",
"return",
"queryset"
] | Given a search queryset, filter it with whichever facet filter backends
in use. | [
"Given",
"a",
"search",
"queryset",
"filter",
"it",
"with",
"whichever",
"facet",
"filter",
"backends",
"in",
"use",
"."
] | ceabd0f6318f129758341ab08292a20205d6f4cd | https://github.com/inonit/drf-haystack/blob/ceabd0f6318f129758341ab08292a20205d6f4cd/drf_haystack/mixins.py#L66-L77 | train |
inonit/drf-haystack | drf_haystack/mixins.py | FacetMixin.get_facet_serializer | def get_facet_serializer(self, *args, **kwargs):
"""
Return the facet serializer instance that should be used for
serializing faceted output.
"""
assert "objects" in kwargs, "`objects` is a required argument to `get_facet_serializer()`"
facet_serializer_class = self.get_facet_serializer_class()
kwargs["context"] = self.get_serializer_context()
kwargs["context"].update({
"objects": kwargs.pop("objects"),
"facet_query_params_text": self.facet_query_params_text,
})
return facet_serializer_class(*args, **kwargs) | python | def get_facet_serializer(self, *args, **kwargs):
"""
Return the facet serializer instance that should be used for
serializing faceted output.
"""
assert "objects" in kwargs, "`objects` is a required argument to `get_facet_serializer()`"
facet_serializer_class = self.get_facet_serializer_class()
kwargs["context"] = self.get_serializer_context()
kwargs["context"].update({
"objects": kwargs.pop("objects"),
"facet_query_params_text": self.facet_query_params_text,
})
return facet_serializer_class(*args, **kwargs) | [
"def",
"get_facet_serializer",
"(",
"self",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"assert",
"\"objects\"",
"in",
"kwargs",
",",
"\"`objects` is a required argument to `get_facet_serializer()`\"",
"facet_serializer_class",
"=",
"self",
".",
"get_facet_serializer_class",
"(",
")",
"kwargs",
"[",
"\"context\"",
"]",
"=",
"self",
".",
"get_serializer_context",
"(",
")",
"kwargs",
"[",
"\"context\"",
"]",
".",
"update",
"(",
"{",
"\"objects\"",
":",
"kwargs",
".",
"pop",
"(",
"\"objects\"",
")",
",",
"\"facet_query_params_text\"",
":",
"self",
".",
"facet_query_params_text",
",",
"}",
")",
"return",
"facet_serializer_class",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")"
] | Return the facet serializer instance that should be used for
serializing faceted output. | [
"Return",
"the",
"facet",
"serializer",
"instance",
"that",
"should",
"be",
"used",
"for",
"serializing",
"faceted",
"output",
"."
] | ceabd0f6318f129758341ab08292a20205d6f4cd | https://github.com/inonit/drf-haystack/blob/ceabd0f6318f129758341ab08292a20205d6f4cd/drf_haystack/mixins.py#L79-L92 | train |
inonit/drf-haystack | drf_haystack/mixins.py | FacetMixin.get_facet_serializer_class | def get_facet_serializer_class(self):
"""
Return the class to use for serializing facets.
Defaults to using ``self.facet_serializer_class``.
"""
if self.facet_serializer_class is None:
raise AttributeError(
"%(cls)s should either include a `facet_serializer_class` attribute, "
"or override %(cls)s.get_facet_serializer_class() method." %
{"cls": self.__class__.__name__}
)
return self.facet_serializer_class | python | def get_facet_serializer_class(self):
"""
Return the class to use for serializing facets.
Defaults to using ``self.facet_serializer_class``.
"""
if self.facet_serializer_class is None:
raise AttributeError(
"%(cls)s should either include a `facet_serializer_class` attribute, "
"or override %(cls)s.get_facet_serializer_class() method." %
{"cls": self.__class__.__name__}
)
return self.facet_serializer_class | [
"def",
"get_facet_serializer_class",
"(",
"self",
")",
":",
"if",
"self",
".",
"facet_serializer_class",
"is",
"None",
":",
"raise",
"AttributeError",
"(",
"\"%(cls)s should either include a `facet_serializer_class` attribute, \"",
"\"or override %(cls)s.get_facet_serializer_class() method.\"",
"%",
"{",
"\"cls\"",
":",
"self",
".",
"__class__",
".",
"__name__",
"}",
")",
"return",
"self",
".",
"facet_serializer_class"
] | Return the class to use for serializing facets.
Defaults to using ``self.facet_serializer_class``. | [
"Return",
"the",
"class",
"to",
"use",
"for",
"serializing",
"facets",
".",
"Defaults",
"to",
"using",
"self",
".",
"facet_serializer_class",
"."
] | ceabd0f6318f129758341ab08292a20205d6f4cd | https://github.com/inonit/drf-haystack/blob/ceabd0f6318f129758341ab08292a20205d6f4cd/drf_haystack/mixins.py#L94-L105 | train |
inonit/drf-haystack | drf_haystack/mixins.py | FacetMixin.get_facet_objects_serializer | def get_facet_objects_serializer(self, *args, **kwargs):
"""
Return the serializer instance which should be used for
serializing faceted objects.
"""
facet_objects_serializer_class = self.get_facet_objects_serializer_class()
kwargs["context"] = self.get_serializer_context()
return facet_objects_serializer_class(*args, **kwargs) | python | def get_facet_objects_serializer(self, *args, **kwargs):
"""
Return the serializer instance which should be used for
serializing faceted objects.
"""
facet_objects_serializer_class = self.get_facet_objects_serializer_class()
kwargs["context"] = self.get_serializer_context()
return facet_objects_serializer_class(*args, **kwargs) | [
"def",
"get_facet_objects_serializer",
"(",
"self",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"facet_objects_serializer_class",
"=",
"self",
".",
"get_facet_objects_serializer_class",
"(",
")",
"kwargs",
"[",
"\"context\"",
"]",
"=",
"self",
".",
"get_serializer_context",
"(",
")",
"return",
"facet_objects_serializer_class",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")"
] | Return the serializer instance which should be used for
serializing faceted objects. | [
"Return",
"the",
"serializer",
"instance",
"which",
"should",
"be",
"used",
"for",
"serializing",
"faceted",
"objects",
"."
] | ceabd0f6318f129758341ab08292a20205d6f4cd | https://github.com/inonit/drf-haystack/blob/ceabd0f6318f129758341ab08292a20205d6f4cd/drf_haystack/mixins.py#L107-L114 | train |
inonit/drf-haystack | drf_haystack/fields.py | DRFHaystackFieldMixin.bind | def bind(self, field_name, parent):
"""
Initializes the field name and parent for the field instance.
Called when a field is added to the parent serializer instance.
Taken from DRF and modified to support drf_haystack multiple index
functionality.
"""
# In order to enforce a consistent style, we error if a redundant
# 'source' argument has been used. For example:
# my_field = serializer.CharField(source='my_field')
assert self.source != field_name, (
"It is redundant to specify `source='%s'` on field '%s' in "
"serializer '%s', because it is the same as the field name. "
"Remove the `source` keyword argument." %
(field_name, self.__class__.__name__, parent.__class__.__name__)
)
self.field_name = field_name
self.parent = parent
# `self.label` should default to being based on the field name.
if self.label is None:
self.label = field_name.replace('_', ' ').capitalize()
# self.source should default to being the same as the field name.
if self.source is None:
self.source = self.convert_field_name(field_name)
# self.source_attrs is a list of attributes that need to be looked up
# when serializing the instance, or populating the validated data.
if self.source == '*':
self.source_attrs = []
else:
self.source_attrs = self.source.split('.') | python | def bind(self, field_name, parent):
"""
Initializes the field name and parent for the field instance.
Called when a field is added to the parent serializer instance.
Taken from DRF and modified to support drf_haystack multiple index
functionality.
"""
# In order to enforce a consistent style, we error if a redundant
# 'source' argument has been used. For example:
# my_field = serializer.CharField(source='my_field')
assert self.source != field_name, (
"It is redundant to specify `source='%s'` on field '%s' in "
"serializer '%s', because it is the same as the field name. "
"Remove the `source` keyword argument." %
(field_name, self.__class__.__name__, parent.__class__.__name__)
)
self.field_name = field_name
self.parent = parent
# `self.label` should default to being based on the field name.
if self.label is None:
self.label = field_name.replace('_', ' ').capitalize()
# self.source should default to being the same as the field name.
if self.source is None:
self.source = self.convert_field_name(field_name)
# self.source_attrs is a list of attributes that need to be looked up
# when serializing the instance, or populating the validated data.
if self.source == '*':
self.source_attrs = []
else:
self.source_attrs = self.source.split('.') | [
"def",
"bind",
"(",
"self",
",",
"field_name",
",",
"parent",
")",
":",
"# In order to enforce a consistent style, we error if a redundant",
"# 'source' argument has been used. For example:",
"# my_field = serializer.CharField(source='my_field')",
"assert",
"self",
".",
"source",
"!=",
"field_name",
",",
"(",
"\"It is redundant to specify `source='%s'` on field '%s' in \"",
"\"serializer '%s', because it is the same as the field name. \"",
"\"Remove the `source` keyword argument.\"",
"%",
"(",
"field_name",
",",
"self",
".",
"__class__",
".",
"__name__",
",",
"parent",
".",
"__class__",
".",
"__name__",
")",
")",
"self",
".",
"field_name",
"=",
"field_name",
"self",
".",
"parent",
"=",
"parent",
"# `self.label` should default to being based on the field name.",
"if",
"self",
".",
"label",
"is",
"None",
":",
"self",
".",
"label",
"=",
"field_name",
".",
"replace",
"(",
"'_'",
",",
"' '",
")",
".",
"capitalize",
"(",
")",
"# self.source should default to being the same as the field name.",
"if",
"self",
".",
"source",
"is",
"None",
":",
"self",
".",
"source",
"=",
"self",
".",
"convert_field_name",
"(",
"field_name",
")",
"# self.source_attrs is a list of attributes that need to be looked up",
"# when serializing the instance, or populating the validated data.",
"if",
"self",
".",
"source",
"==",
"'*'",
":",
"self",
".",
"source_attrs",
"=",
"[",
"]",
"else",
":",
"self",
".",
"source_attrs",
"=",
"self",
".",
"source",
".",
"split",
"(",
"'.'",
")"
] | Initializes the field name and parent for the field instance.
Called when a field is added to the parent serializer instance.
Taken from DRF and modified to support drf_haystack multiple index
functionality. | [
"Initializes",
"the",
"field",
"name",
"and",
"parent",
"for",
"the",
"field",
"instance",
".",
"Called",
"when",
"a",
"field",
"is",
"added",
"to",
"the",
"parent",
"serializer",
"instance",
".",
"Taken",
"from",
"DRF",
"and",
"modified",
"to",
"support",
"drf_haystack",
"multiple",
"index",
"functionality",
"."
] | ceabd0f6318f129758341ab08292a20205d6f4cd | https://github.com/inonit/drf-haystack/blob/ceabd0f6318f129758341ab08292a20205d6f4cd/drf_haystack/fields.py#L16-L50 | train |
inonit/drf-haystack | drf_haystack/serializers.py | HaystackSerializer._get_default_field_kwargs | def _get_default_field_kwargs(model, field):
"""
Get the required attributes from the model field in order
to instantiate a REST Framework serializer field.
"""
kwargs = {}
try:
field_name = field.model_attr or field.index_fieldname
model_field = model._meta.get_field(field_name)
kwargs.update(get_field_kwargs(field_name, model_field))
# Remove stuff we don't care about!
delete_attrs = [
"allow_blank",
"choices",
"model_field",
"allow_unicode",
]
for attr in delete_attrs:
if attr in kwargs:
del kwargs[attr]
except FieldDoesNotExist:
pass
return kwargs | python | def _get_default_field_kwargs(model, field):
"""
Get the required attributes from the model field in order
to instantiate a REST Framework serializer field.
"""
kwargs = {}
try:
field_name = field.model_attr or field.index_fieldname
model_field = model._meta.get_field(field_name)
kwargs.update(get_field_kwargs(field_name, model_field))
# Remove stuff we don't care about!
delete_attrs = [
"allow_blank",
"choices",
"model_field",
"allow_unicode",
]
for attr in delete_attrs:
if attr in kwargs:
del kwargs[attr]
except FieldDoesNotExist:
pass
return kwargs | [
"def",
"_get_default_field_kwargs",
"(",
"model",
",",
"field",
")",
":",
"kwargs",
"=",
"{",
"}",
"try",
":",
"field_name",
"=",
"field",
".",
"model_attr",
"or",
"field",
".",
"index_fieldname",
"model_field",
"=",
"model",
".",
"_meta",
".",
"get_field",
"(",
"field_name",
")",
"kwargs",
".",
"update",
"(",
"get_field_kwargs",
"(",
"field_name",
",",
"model_field",
")",
")",
"# Remove stuff we don't care about!",
"delete_attrs",
"=",
"[",
"\"allow_blank\"",
",",
"\"choices\"",
",",
"\"model_field\"",
",",
"\"allow_unicode\"",
",",
"]",
"for",
"attr",
"in",
"delete_attrs",
":",
"if",
"attr",
"in",
"kwargs",
":",
"del",
"kwargs",
"[",
"attr",
"]",
"except",
"FieldDoesNotExist",
":",
"pass",
"return",
"kwargs"
] | Get the required attributes from the model field in order
to instantiate a REST Framework serializer field. | [
"Get",
"the",
"required",
"attributes",
"from",
"the",
"model",
"field",
"in",
"order",
"to",
"instantiate",
"a",
"REST",
"Framework",
"serializer",
"field",
"."
] | ceabd0f6318f129758341ab08292a20205d6f4cd | https://github.com/inonit/drf-haystack/blob/ceabd0f6318f129758341ab08292a20205d6f4cd/drf_haystack/serializers.py#L124-L148 | train |
inonit/drf-haystack | drf_haystack/serializers.py | HaystackSerializer._get_index_class_name | def _get_index_class_name(self, index_cls):
"""
Converts in index model class to a name suitable for use as a field name prefix. A user
may optionally specify custom aliases via an 'index_aliases' attribute on the Meta class
"""
cls_name = index_cls.__name__
aliases = self.Meta.index_aliases
return aliases.get(cls_name, cls_name.split('.')[-1]) | python | def _get_index_class_name(self, index_cls):
"""
Converts in index model class to a name suitable for use as a field name prefix. A user
may optionally specify custom aliases via an 'index_aliases' attribute on the Meta class
"""
cls_name = index_cls.__name__
aliases = self.Meta.index_aliases
return aliases.get(cls_name, cls_name.split('.')[-1]) | [
"def",
"_get_index_class_name",
"(",
"self",
",",
"index_cls",
")",
":",
"cls_name",
"=",
"index_cls",
".",
"__name__",
"aliases",
"=",
"self",
".",
"Meta",
".",
"index_aliases",
"return",
"aliases",
".",
"get",
"(",
"cls_name",
",",
"cls_name",
".",
"split",
"(",
"'.'",
")",
"[",
"-",
"1",
"]",
")"
] | Converts in index model class to a name suitable for use as a field name prefix. A user
may optionally specify custom aliases via an 'index_aliases' attribute on the Meta class | [
"Converts",
"in",
"index",
"model",
"class",
"to",
"a",
"name",
"suitable",
"for",
"use",
"as",
"a",
"field",
"name",
"prefix",
".",
"A",
"user",
"may",
"optionally",
"specify",
"custom",
"aliases",
"via",
"an",
"index_aliases",
"attribute",
"on",
"the",
"Meta",
"class"
] | ceabd0f6318f129758341ab08292a20205d6f4cd | https://github.com/inonit/drf-haystack/blob/ceabd0f6318f129758341ab08292a20205d6f4cd/drf_haystack/serializers.py#L156-L163 | train |
inonit/drf-haystack | drf_haystack/serializers.py | HaystackSerializer.get_fields | def get_fields(self):
"""
Get the required fields for serializing the result.
"""
fields = self.Meta.fields
exclude = self.Meta.exclude
ignore_fields = self.Meta.ignore_fields
indices = self.Meta.index_classes
declared_fields = copy.deepcopy(self._declared_fields)
prefix_field_names = len(indices) > 1
field_mapping = OrderedDict()
# overlapping fields on multiple indices is supported by internally prefixing the field
# names with the index class to which they belong or, optionally, a user-provided alias
# for the index.
for index_cls in self.Meta.index_classes:
prefix = ""
if prefix_field_names:
prefix = "_%s__" % self._get_index_class_name(index_cls)
for field_name, field_type in six.iteritems(index_cls.fields):
orig_name = field_name
field_name = "%s%s" % (prefix, field_name)
# Don't use this field if it is in `ignore_fields`
if orig_name in ignore_fields or field_name in ignore_fields:
continue
# When fields to include are decided by `exclude`
if exclude:
if orig_name in exclude or field_name in exclude:
continue
# When fields to include are decided by `fields`
if fields:
if orig_name not in fields and field_name not in fields:
continue
# Look up the field attributes on the current index model,
# in order to correctly instantiate the serializer field.
model = index_cls().get_model()
kwargs = self._get_default_field_kwargs(model, field_type)
kwargs['prefix_field_names'] = prefix_field_names
field_mapping[field_name] = self._field_mapping[field_type](**kwargs)
# Add any explicitly declared fields. They *will* override any index fields
# in case of naming collision!.
if declared_fields:
for field_name in declared_fields:
field_mapping[field_name] = declared_fields[field_name]
return field_mapping | python | def get_fields(self):
"""
Get the required fields for serializing the result.
"""
fields = self.Meta.fields
exclude = self.Meta.exclude
ignore_fields = self.Meta.ignore_fields
indices = self.Meta.index_classes
declared_fields = copy.deepcopy(self._declared_fields)
prefix_field_names = len(indices) > 1
field_mapping = OrderedDict()
# overlapping fields on multiple indices is supported by internally prefixing the field
# names with the index class to which they belong or, optionally, a user-provided alias
# for the index.
for index_cls in self.Meta.index_classes:
prefix = ""
if prefix_field_names:
prefix = "_%s__" % self._get_index_class_name(index_cls)
for field_name, field_type in six.iteritems(index_cls.fields):
orig_name = field_name
field_name = "%s%s" % (prefix, field_name)
# Don't use this field if it is in `ignore_fields`
if orig_name in ignore_fields or field_name in ignore_fields:
continue
# When fields to include are decided by `exclude`
if exclude:
if orig_name in exclude or field_name in exclude:
continue
# When fields to include are decided by `fields`
if fields:
if orig_name not in fields and field_name not in fields:
continue
# Look up the field attributes on the current index model,
# in order to correctly instantiate the serializer field.
model = index_cls().get_model()
kwargs = self._get_default_field_kwargs(model, field_type)
kwargs['prefix_field_names'] = prefix_field_names
field_mapping[field_name] = self._field_mapping[field_type](**kwargs)
# Add any explicitly declared fields. They *will* override any index fields
# in case of naming collision!.
if declared_fields:
for field_name in declared_fields:
field_mapping[field_name] = declared_fields[field_name]
return field_mapping | [
"def",
"get_fields",
"(",
"self",
")",
":",
"fields",
"=",
"self",
".",
"Meta",
".",
"fields",
"exclude",
"=",
"self",
".",
"Meta",
".",
"exclude",
"ignore_fields",
"=",
"self",
".",
"Meta",
".",
"ignore_fields",
"indices",
"=",
"self",
".",
"Meta",
".",
"index_classes",
"declared_fields",
"=",
"copy",
".",
"deepcopy",
"(",
"self",
".",
"_declared_fields",
")",
"prefix_field_names",
"=",
"len",
"(",
"indices",
")",
">",
"1",
"field_mapping",
"=",
"OrderedDict",
"(",
")",
"# overlapping fields on multiple indices is supported by internally prefixing the field",
"# names with the index class to which they belong or, optionally, a user-provided alias",
"# for the index.",
"for",
"index_cls",
"in",
"self",
".",
"Meta",
".",
"index_classes",
":",
"prefix",
"=",
"\"\"",
"if",
"prefix_field_names",
":",
"prefix",
"=",
"\"_%s__\"",
"%",
"self",
".",
"_get_index_class_name",
"(",
"index_cls",
")",
"for",
"field_name",
",",
"field_type",
"in",
"six",
".",
"iteritems",
"(",
"index_cls",
".",
"fields",
")",
":",
"orig_name",
"=",
"field_name",
"field_name",
"=",
"\"%s%s\"",
"%",
"(",
"prefix",
",",
"field_name",
")",
"# Don't use this field if it is in `ignore_fields`",
"if",
"orig_name",
"in",
"ignore_fields",
"or",
"field_name",
"in",
"ignore_fields",
":",
"continue",
"# When fields to include are decided by `exclude`",
"if",
"exclude",
":",
"if",
"orig_name",
"in",
"exclude",
"or",
"field_name",
"in",
"exclude",
":",
"continue",
"# When fields to include are decided by `fields`",
"if",
"fields",
":",
"if",
"orig_name",
"not",
"in",
"fields",
"and",
"field_name",
"not",
"in",
"fields",
":",
"continue",
"# Look up the field attributes on the current index model,",
"# in order to correctly instantiate the serializer field.",
"model",
"=",
"index_cls",
"(",
")",
".",
"get_model",
"(",
")",
"kwargs",
"=",
"self",
".",
"_get_default_field_kwargs",
"(",
"model",
",",
"field_type",
")",
"kwargs",
"[",
"'prefix_field_names'",
"]",
"=",
"prefix_field_names",
"field_mapping",
"[",
"field_name",
"]",
"=",
"self",
".",
"_field_mapping",
"[",
"field_type",
"]",
"(",
"*",
"*",
"kwargs",
")",
"# Add any explicitly declared fields. They *will* override any index fields",
"# in case of naming collision!.",
"if",
"declared_fields",
":",
"for",
"field_name",
"in",
"declared_fields",
":",
"field_mapping",
"[",
"field_name",
"]",
"=",
"declared_fields",
"[",
"field_name",
"]",
"return",
"field_mapping"
] | Get the required fields for serializing the result. | [
"Get",
"the",
"required",
"fields",
"for",
"serializing",
"the",
"result",
"."
] | ceabd0f6318f129758341ab08292a20205d6f4cd | https://github.com/inonit/drf-haystack/blob/ceabd0f6318f129758341ab08292a20205d6f4cd/drf_haystack/serializers.py#L165-L214 | train |
inonit/drf-haystack | drf_haystack/serializers.py | HaystackSerializer.to_representation | def to_representation(self, instance):
"""
If we have a serializer mapping, use that. Otherwise, use standard serializer behavior
Since we might be dealing with multiple indexes, some fields might
not be valid for all results. Do not render the fields which don't belong
to the search result.
"""
if self.Meta.serializers:
ret = self.multi_serializer_representation(instance)
else:
ret = super(HaystackSerializer, self).to_representation(instance)
prefix_field_names = len(getattr(self.Meta, "index_classes")) > 1
current_index = self._get_index_class_name(type(instance.searchindex))
for field in self.fields.keys():
# handle declared field value methods on serializer
value_method = getattr(self, "get_{}".format(field), None)
if value_method and callable(value_method):
ret[field] = value_method()
# now convert namespaced field names
orig_field = field
if prefix_field_names:
parts = field.split("__")
if len(parts) > 1:
index = parts[0][1:] # trim the preceding '_'
field = parts[1]
if index == current_index:
ret[field] = ret[orig_field]
del ret[orig_field]
elif field not in chain(instance.searchindex.fields.keys(), self._declared_fields.keys()):
del ret[orig_field]
# include the highlighted field in either case
if getattr(instance, "highlighted", None):
ret["highlighted"] = instance.highlighted[0]
return ret | python | def to_representation(self, instance):
"""
If we have a serializer mapping, use that. Otherwise, use standard serializer behavior
Since we might be dealing with multiple indexes, some fields might
not be valid for all results. Do not render the fields which don't belong
to the search result.
"""
if self.Meta.serializers:
ret = self.multi_serializer_representation(instance)
else:
ret = super(HaystackSerializer, self).to_representation(instance)
prefix_field_names = len(getattr(self.Meta, "index_classes")) > 1
current_index = self._get_index_class_name(type(instance.searchindex))
for field in self.fields.keys():
# handle declared field value methods on serializer
value_method = getattr(self, "get_{}".format(field), None)
if value_method and callable(value_method):
ret[field] = value_method()
# now convert namespaced field names
orig_field = field
if prefix_field_names:
parts = field.split("__")
if len(parts) > 1:
index = parts[0][1:] # trim the preceding '_'
field = parts[1]
if index == current_index:
ret[field] = ret[orig_field]
del ret[orig_field]
elif field not in chain(instance.searchindex.fields.keys(), self._declared_fields.keys()):
del ret[orig_field]
# include the highlighted field in either case
if getattr(instance, "highlighted", None):
ret["highlighted"] = instance.highlighted[0]
return ret | [
"def",
"to_representation",
"(",
"self",
",",
"instance",
")",
":",
"if",
"self",
".",
"Meta",
".",
"serializers",
":",
"ret",
"=",
"self",
".",
"multi_serializer_representation",
"(",
"instance",
")",
"else",
":",
"ret",
"=",
"super",
"(",
"HaystackSerializer",
",",
"self",
")",
".",
"to_representation",
"(",
"instance",
")",
"prefix_field_names",
"=",
"len",
"(",
"getattr",
"(",
"self",
".",
"Meta",
",",
"\"index_classes\"",
")",
")",
">",
"1",
"current_index",
"=",
"self",
".",
"_get_index_class_name",
"(",
"type",
"(",
"instance",
".",
"searchindex",
")",
")",
"for",
"field",
"in",
"self",
".",
"fields",
".",
"keys",
"(",
")",
":",
"# handle declared field value methods on serializer",
"value_method",
"=",
"getattr",
"(",
"self",
",",
"\"get_{}\"",
".",
"format",
"(",
"field",
")",
",",
"None",
")",
"if",
"value_method",
"and",
"callable",
"(",
"value_method",
")",
":",
"ret",
"[",
"field",
"]",
"=",
"value_method",
"(",
")",
"# now convert namespaced field names",
"orig_field",
"=",
"field",
"if",
"prefix_field_names",
":",
"parts",
"=",
"field",
".",
"split",
"(",
"\"__\"",
")",
"if",
"len",
"(",
"parts",
")",
">",
"1",
":",
"index",
"=",
"parts",
"[",
"0",
"]",
"[",
"1",
":",
"]",
"# trim the preceding '_'",
"field",
"=",
"parts",
"[",
"1",
"]",
"if",
"index",
"==",
"current_index",
":",
"ret",
"[",
"field",
"]",
"=",
"ret",
"[",
"orig_field",
"]",
"del",
"ret",
"[",
"orig_field",
"]",
"elif",
"field",
"not",
"in",
"chain",
"(",
"instance",
".",
"searchindex",
".",
"fields",
".",
"keys",
"(",
")",
",",
"self",
".",
"_declared_fields",
".",
"keys",
"(",
")",
")",
":",
"del",
"ret",
"[",
"orig_field",
"]",
"# include the highlighted field in either case",
"if",
"getattr",
"(",
"instance",
",",
"\"highlighted\"",
",",
"None",
")",
":",
"ret",
"[",
"\"highlighted\"",
"]",
"=",
"instance",
".",
"highlighted",
"[",
"0",
"]",
"return",
"ret"
] | If we have a serializer mapping, use that. Otherwise, use standard serializer behavior
Since we might be dealing with multiple indexes, some fields might
not be valid for all results. Do not render the fields which don't belong
to the search result. | [
"If",
"we",
"have",
"a",
"serializer",
"mapping",
"use",
"that",
".",
"Otherwise",
"use",
"standard",
"serializer",
"behavior",
"Since",
"we",
"might",
"be",
"dealing",
"with",
"multiple",
"indexes",
"some",
"fields",
"might",
"not",
"be",
"valid",
"for",
"all",
"results",
".",
"Do",
"not",
"render",
"the",
"fields",
"which",
"don",
"t",
"belong",
"to",
"the",
"search",
"result",
"."
] | ceabd0f6318f129758341ab08292a20205d6f4cd | https://github.com/inonit/drf-haystack/blob/ceabd0f6318f129758341ab08292a20205d6f4cd/drf_haystack/serializers.py#L216-L251 | train |
inonit/drf-haystack | drf_haystack/serializers.py | FacetFieldSerializer.get_narrow_url | def get_narrow_url(self, instance):
"""
Return a link suitable for narrowing on the current item.
"""
text = instance[0]
request = self.context["request"]
query_params = request.GET.copy()
# Never keep the page query parameter in narrowing urls.
# It will raise a NotFound exception when trying to paginate a narrowed queryset.
page_query_param = self.get_paginate_by_param()
if page_query_param and page_query_param in query_params:
del query_params[page_query_param]
selected_facets = set(query_params.pop(self.root.facet_query_params_text, []))
selected_facets.add("%(field)s_exact:%(text)s" % {"field": self.parent_field, "text": text})
query_params.setlist(self.root.facet_query_params_text, sorted(selected_facets))
path = "%(path)s?%(query)s" % {"path": request.path_info, "query": query_params.urlencode()}
url = request.build_absolute_uri(path)
return serializers.Hyperlink(url, "narrow-url") | python | def get_narrow_url(self, instance):
"""
Return a link suitable for narrowing on the current item.
"""
text = instance[0]
request = self.context["request"]
query_params = request.GET.copy()
# Never keep the page query parameter in narrowing urls.
# It will raise a NotFound exception when trying to paginate a narrowed queryset.
page_query_param = self.get_paginate_by_param()
if page_query_param and page_query_param in query_params:
del query_params[page_query_param]
selected_facets = set(query_params.pop(self.root.facet_query_params_text, []))
selected_facets.add("%(field)s_exact:%(text)s" % {"field": self.parent_field, "text": text})
query_params.setlist(self.root.facet_query_params_text, sorted(selected_facets))
path = "%(path)s?%(query)s" % {"path": request.path_info, "query": query_params.urlencode()}
url = request.build_absolute_uri(path)
return serializers.Hyperlink(url, "narrow-url") | [
"def",
"get_narrow_url",
"(",
"self",
",",
"instance",
")",
":",
"text",
"=",
"instance",
"[",
"0",
"]",
"request",
"=",
"self",
".",
"context",
"[",
"\"request\"",
"]",
"query_params",
"=",
"request",
".",
"GET",
".",
"copy",
"(",
")",
"# Never keep the page query parameter in narrowing urls.",
"# It will raise a NotFound exception when trying to paginate a narrowed queryset.",
"page_query_param",
"=",
"self",
".",
"get_paginate_by_param",
"(",
")",
"if",
"page_query_param",
"and",
"page_query_param",
"in",
"query_params",
":",
"del",
"query_params",
"[",
"page_query_param",
"]",
"selected_facets",
"=",
"set",
"(",
"query_params",
".",
"pop",
"(",
"self",
".",
"root",
".",
"facet_query_params_text",
",",
"[",
"]",
")",
")",
"selected_facets",
".",
"add",
"(",
"\"%(field)s_exact:%(text)s\"",
"%",
"{",
"\"field\"",
":",
"self",
".",
"parent_field",
",",
"\"text\"",
":",
"text",
"}",
")",
"query_params",
".",
"setlist",
"(",
"self",
".",
"root",
".",
"facet_query_params_text",
",",
"sorted",
"(",
"selected_facets",
")",
")",
"path",
"=",
"\"%(path)s?%(query)s\"",
"%",
"{",
"\"path\"",
":",
"request",
".",
"path_info",
",",
"\"query\"",
":",
"query_params",
".",
"urlencode",
"(",
")",
"}",
"url",
"=",
"request",
".",
"build_absolute_uri",
"(",
"path",
")",
"return",
"serializers",
".",
"Hyperlink",
"(",
"url",
",",
"\"narrow-url\"",
")"
] | Return a link suitable for narrowing on the current item. | [
"Return",
"a",
"link",
"suitable",
"for",
"narrowing",
"on",
"the",
"current",
"item",
"."
] | ceabd0f6318f129758341ab08292a20205d6f4cd | https://github.com/inonit/drf-haystack/blob/ceabd0f6318f129758341ab08292a20205d6f4cd/drf_haystack/serializers.py#L340-L360 | train |
inonit/drf-haystack | drf_haystack/serializers.py | FacetFieldSerializer.to_representation | def to_representation(self, field, instance):
"""
Set the ``parent_field`` property equal to the current field on the serializer class,
so that each field can query it to see what kind of attribute they are processing.
"""
self.parent_field = field
return super(FacetFieldSerializer, self).to_representation(instance) | python | def to_representation(self, field, instance):
"""
Set the ``parent_field`` property equal to the current field on the serializer class,
so that each field can query it to see what kind of attribute they are processing.
"""
self.parent_field = field
return super(FacetFieldSerializer, self).to_representation(instance) | [
"def",
"to_representation",
"(",
"self",
",",
"field",
",",
"instance",
")",
":",
"self",
".",
"parent_field",
"=",
"field",
"return",
"super",
"(",
"FacetFieldSerializer",
",",
"self",
")",
".",
"to_representation",
"(",
"instance",
")"
] | Set the ``parent_field`` property equal to the current field on the serializer class,
so that each field can query it to see what kind of attribute they are processing. | [
"Set",
"the",
"parent_field",
"property",
"equal",
"to",
"the",
"current",
"field",
"on",
"the",
"serializer",
"class",
"so",
"that",
"each",
"field",
"can",
"query",
"it",
"to",
"see",
"what",
"kind",
"of",
"attribute",
"they",
"are",
"processing",
"."
] | ceabd0f6318f129758341ab08292a20205d6f4cd | https://github.com/inonit/drf-haystack/blob/ceabd0f6318f129758341ab08292a20205d6f4cd/drf_haystack/serializers.py#L362-L368 | train |
inonit/drf-haystack | drf_haystack/serializers.py | HaystackFacetSerializer.get_fields | def get_fields(self):
"""
This returns a dictionary containing the top most fields,
``dates``, ``fields`` and ``queries``.
"""
field_mapping = OrderedDict()
for field, data in self.instance.items():
field_mapping.update(
{field: self.facet_dict_field_class(
child=self.facet_list_field_class(child=self.facet_field_serializer_class(data)), required=False)}
)
if self.serialize_objects is True:
field_mapping["objects"] = serializers.SerializerMethodField()
return field_mapping | python | def get_fields(self):
"""
This returns a dictionary containing the top most fields,
``dates``, ``fields`` and ``queries``.
"""
field_mapping = OrderedDict()
for field, data in self.instance.items():
field_mapping.update(
{field: self.facet_dict_field_class(
child=self.facet_list_field_class(child=self.facet_field_serializer_class(data)), required=False)}
)
if self.serialize_objects is True:
field_mapping["objects"] = serializers.SerializerMethodField()
return field_mapping | [
"def",
"get_fields",
"(",
"self",
")",
":",
"field_mapping",
"=",
"OrderedDict",
"(",
")",
"for",
"field",
",",
"data",
"in",
"self",
".",
"instance",
".",
"items",
"(",
")",
":",
"field_mapping",
".",
"update",
"(",
"{",
"field",
":",
"self",
".",
"facet_dict_field_class",
"(",
"child",
"=",
"self",
".",
"facet_list_field_class",
"(",
"child",
"=",
"self",
".",
"facet_field_serializer_class",
"(",
"data",
")",
")",
",",
"required",
"=",
"False",
")",
"}",
")",
"if",
"self",
".",
"serialize_objects",
"is",
"True",
":",
"field_mapping",
"[",
"\"objects\"",
"]",
"=",
"serializers",
".",
"SerializerMethodField",
"(",
")",
"return",
"field_mapping"
] | This returns a dictionary containing the top most fields,
``dates``, ``fields`` and ``queries``. | [
"This",
"returns",
"a",
"dictionary",
"containing",
"the",
"top",
"most",
"fields",
"dates",
"fields",
"and",
"queries",
"."
] | ceabd0f6318f129758341ab08292a20205d6f4cd | https://github.com/inonit/drf-haystack/blob/ceabd0f6318f129758341ab08292a20205d6f4cd/drf_haystack/serializers.py#L384-L399 | train |
inonit/drf-haystack | drf_haystack/serializers.py | HaystackFacetSerializer.get_objects | def get_objects(self, instance):
"""
Return a list of objects matching the faceted result.
"""
view = self.context["view"]
queryset = self.context["objects"]
page = view.paginate_queryset(queryset)
if page is not None:
serializer = view.get_facet_objects_serializer(page, many=True)
return OrderedDict([
("count", self.get_count(queryset)),
("next", view.paginator.get_next_link()),
("previous", view.paginator.get_previous_link()),
("results", serializer.data)
])
serializer = view.get_serializer(queryset, many=True)
return serializer.data | python | def get_objects(self, instance):
"""
Return a list of objects matching the faceted result.
"""
view = self.context["view"]
queryset = self.context["objects"]
page = view.paginate_queryset(queryset)
if page is not None:
serializer = view.get_facet_objects_serializer(page, many=True)
return OrderedDict([
("count", self.get_count(queryset)),
("next", view.paginator.get_next_link()),
("previous", view.paginator.get_previous_link()),
("results", serializer.data)
])
serializer = view.get_serializer(queryset, many=True)
return serializer.data | [
"def",
"get_objects",
"(",
"self",
",",
"instance",
")",
":",
"view",
"=",
"self",
".",
"context",
"[",
"\"view\"",
"]",
"queryset",
"=",
"self",
".",
"context",
"[",
"\"objects\"",
"]",
"page",
"=",
"view",
".",
"paginate_queryset",
"(",
"queryset",
")",
"if",
"page",
"is",
"not",
"None",
":",
"serializer",
"=",
"view",
".",
"get_facet_objects_serializer",
"(",
"page",
",",
"many",
"=",
"True",
")",
"return",
"OrderedDict",
"(",
"[",
"(",
"\"count\"",
",",
"self",
".",
"get_count",
"(",
"queryset",
")",
")",
",",
"(",
"\"next\"",
",",
"view",
".",
"paginator",
".",
"get_next_link",
"(",
")",
")",
",",
"(",
"\"previous\"",
",",
"view",
".",
"paginator",
".",
"get_previous_link",
"(",
")",
")",
",",
"(",
"\"results\"",
",",
"serializer",
".",
"data",
")",
"]",
")",
"serializer",
"=",
"view",
".",
"get_serializer",
"(",
"queryset",
",",
"many",
"=",
"True",
")",
"return",
"serializer",
".",
"data"
] | Return a list of objects matching the faceted result. | [
"Return",
"a",
"list",
"of",
"objects",
"matching",
"the",
"faceted",
"result",
"."
] | ceabd0f6318f129758341ab08292a20205d6f4cd | https://github.com/inonit/drf-haystack/blob/ceabd0f6318f129758341ab08292a20205d6f4cd/drf_haystack/serializers.py#L401-L419 | train |
inonit/drf-haystack | drf_haystack/serializers.py | HighlighterMixin.get_document_field | def get_document_field(instance):
"""
Returns which field the search index has marked as it's
`document=True` field.
"""
for name, field in instance.searchindex.fields.items():
if field.document is True:
return name | python | def get_document_field(instance):
"""
Returns which field the search index has marked as it's
`document=True` field.
"""
for name, field in instance.searchindex.fields.items():
if field.document is True:
return name | [
"def",
"get_document_field",
"(",
"instance",
")",
":",
"for",
"name",
",",
"field",
"in",
"instance",
".",
"searchindex",
".",
"fields",
".",
"items",
"(",
")",
":",
"if",
"field",
".",
"document",
"is",
"True",
":",
"return",
"name"
] | Returns which field the search index has marked as it's
`document=True` field. | [
"Returns",
"which",
"field",
"the",
"search",
"index",
"has",
"marked",
"as",
"it",
"s",
"document",
"=",
"True",
"field",
"."
] | ceabd0f6318f129758341ab08292a20205d6f4cd | https://github.com/inonit/drf-haystack/blob/ceabd0f6318f129758341ab08292a20205d6f4cd/drf_haystack/serializers.py#L470-L477 | train |
inonit/drf-haystack | drf_haystack/filters.py | BaseHaystackFilterBackend.apply_filters | def apply_filters(self, queryset, applicable_filters=None, applicable_exclusions=None):
"""
Apply constructed filters and excludes and return the queryset
:param queryset: queryset to filter
:param applicable_filters: filters which are passed directly to queryset.filter()
:param applicable_exclusions: filters which are passed directly to queryset.exclude()
:returns filtered queryset
"""
if applicable_filters:
queryset = queryset.filter(applicable_filters)
if applicable_exclusions:
queryset = queryset.exclude(applicable_exclusions)
return queryset | python | def apply_filters(self, queryset, applicable_filters=None, applicable_exclusions=None):
"""
Apply constructed filters and excludes and return the queryset
:param queryset: queryset to filter
:param applicable_filters: filters which are passed directly to queryset.filter()
:param applicable_exclusions: filters which are passed directly to queryset.exclude()
:returns filtered queryset
"""
if applicable_filters:
queryset = queryset.filter(applicable_filters)
if applicable_exclusions:
queryset = queryset.exclude(applicable_exclusions)
return queryset | [
"def",
"apply_filters",
"(",
"self",
",",
"queryset",
",",
"applicable_filters",
"=",
"None",
",",
"applicable_exclusions",
"=",
"None",
")",
":",
"if",
"applicable_filters",
":",
"queryset",
"=",
"queryset",
".",
"filter",
"(",
"applicable_filters",
")",
"if",
"applicable_exclusions",
":",
"queryset",
"=",
"queryset",
".",
"exclude",
"(",
"applicable_exclusions",
")",
"return",
"queryset"
] | Apply constructed filters and excludes and return the queryset
:param queryset: queryset to filter
:param applicable_filters: filters which are passed directly to queryset.filter()
:param applicable_exclusions: filters which are passed directly to queryset.exclude()
:returns filtered queryset | [
"Apply",
"constructed",
"filters",
"and",
"excludes",
"and",
"return",
"the",
"queryset"
] | ceabd0f6318f129758341ab08292a20205d6f4cd | https://github.com/inonit/drf-haystack/blob/ceabd0f6318f129758341ab08292a20205d6f4cd/drf_haystack/filters.py#L27-L40 | train |
inonit/drf-haystack | drf_haystack/filters.py | BaseHaystackFilterBackend.build_filters | def build_filters(self, view, filters=None):
"""
Get the query builder instance and return constructed query filters.
"""
query_builder = self.get_query_builder(backend=self, view=view)
return query_builder.build_query(**(filters if filters else {})) | python | def build_filters(self, view, filters=None):
"""
Get the query builder instance and return constructed query filters.
"""
query_builder = self.get_query_builder(backend=self, view=view)
return query_builder.build_query(**(filters if filters else {})) | [
"def",
"build_filters",
"(",
"self",
",",
"view",
",",
"filters",
"=",
"None",
")",
":",
"query_builder",
"=",
"self",
".",
"get_query_builder",
"(",
"backend",
"=",
"self",
",",
"view",
"=",
"view",
")",
"return",
"query_builder",
".",
"build_query",
"(",
"*",
"*",
"(",
"filters",
"if",
"filters",
"else",
"{",
"}",
")",
")"
] | Get the query builder instance and return constructed query filters. | [
"Get",
"the",
"query",
"builder",
"instance",
"and",
"return",
"constructed",
"query",
"filters",
"."
] | ceabd0f6318f129758341ab08292a20205d6f4cd | https://github.com/inonit/drf-haystack/blob/ceabd0f6318f129758341ab08292a20205d6f4cd/drf_haystack/filters.py#L42-L47 | train |
inonit/drf-haystack | drf_haystack/filters.py | BaseHaystackFilterBackend.filter_queryset | def filter_queryset(self, request, queryset, view):
"""
Return the filtered queryset.
"""
applicable_filters, applicable_exclusions = self.build_filters(view, filters=self.get_request_filters(request))
return self.apply_filters(
queryset=queryset,
applicable_filters=self.process_filters(applicable_filters, queryset, view),
applicable_exclusions=self.process_filters(applicable_exclusions, queryset, view)
) | python | def filter_queryset(self, request, queryset, view):
"""
Return the filtered queryset.
"""
applicable_filters, applicable_exclusions = self.build_filters(view, filters=self.get_request_filters(request))
return self.apply_filters(
queryset=queryset,
applicable_filters=self.process_filters(applicable_filters, queryset, view),
applicable_exclusions=self.process_filters(applicable_exclusions, queryset, view)
) | [
"def",
"filter_queryset",
"(",
"self",
",",
"request",
",",
"queryset",
",",
"view",
")",
":",
"applicable_filters",
",",
"applicable_exclusions",
"=",
"self",
".",
"build_filters",
"(",
"view",
",",
"filters",
"=",
"self",
".",
"get_request_filters",
"(",
"request",
")",
")",
"return",
"self",
".",
"apply_filters",
"(",
"queryset",
"=",
"queryset",
",",
"applicable_filters",
"=",
"self",
".",
"process_filters",
"(",
"applicable_filters",
",",
"queryset",
",",
"view",
")",
",",
"applicable_exclusions",
"=",
"self",
".",
"process_filters",
"(",
"applicable_exclusions",
",",
"queryset",
",",
"view",
")",
")"
] | Return the filtered queryset. | [
"Return",
"the",
"filtered",
"queryset",
"."
] | ceabd0f6318f129758341ab08292a20205d6f4cd | https://github.com/inonit/drf-haystack/blob/ceabd0f6318f129758341ab08292a20205d6f4cd/drf_haystack/filters.py#L56-L65 | train |
inonit/drf-haystack | drf_haystack/filters.py | BaseHaystackFilterBackend.get_query_builder | def get_query_builder(self, *args, **kwargs):
"""
Return the query builder class instance that should be used to
build the query which is passed to the search engine backend.
"""
query_builder = self.get_query_builder_class()
return query_builder(*args, **kwargs) | python | def get_query_builder(self, *args, **kwargs):
"""
Return the query builder class instance that should be used to
build the query which is passed to the search engine backend.
"""
query_builder = self.get_query_builder_class()
return query_builder(*args, **kwargs) | [
"def",
"get_query_builder",
"(",
"self",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"query_builder",
"=",
"self",
".",
"get_query_builder_class",
"(",
")",
"return",
"query_builder",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")"
] | Return the query builder class instance that should be used to
build the query which is passed to the search engine backend. | [
"Return",
"the",
"query",
"builder",
"class",
"instance",
"that",
"should",
"be",
"used",
"to",
"build",
"the",
"query",
"which",
"is",
"passed",
"to",
"the",
"search",
"engine",
"backend",
"."
] | ceabd0f6318f129758341ab08292a20205d6f4cd | https://github.com/inonit/drf-haystack/blob/ceabd0f6318f129758341ab08292a20205d6f4cd/drf_haystack/filters.py#L67-L73 | train |
inonit/drf-haystack | drf_haystack/filters.py | HaystackFacetFilter.apply_filters | def apply_filters(self, queryset, applicable_filters=None, applicable_exclusions=None):
"""
Apply faceting to the queryset
"""
for field, options in applicable_filters["field_facets"].items():
queryset = queryset.facet(field, **options)
for field, options in applicable_filters["date_facets"].items():
queryset = queryset.date_facet(field, **options)
for field, options in applicable_filters["query_facets"].items():
queryset = queryset.query_facet(field, **options)
return queryset | python | def apply_filters(self, queryset, applicable_filters=None, applicable_exclusions=None):
"""
Apply faceting to the queryset
"""
for field, options in applicable_filters["field_facets"].items():
queryset = queryset.facet(field, **options)
for field, options in applicable_filters["date_facets"].items():
queryset = queryset.date_facet(field, **options)
for field, options in applicable_filters["query_facets"].items():
queryset = queryset.query_facet(field, **options)
return queryset | [
"def",
"apply_filters",
"(",
"self",
",",
"queryset",
",",
"applicable_filters",
"=",
"None",
",",
"applicable_exclusions",
"=",
"None",
")",
":",
"for",
"field",
",",
"options",
"in",
"applicable_filters",
"[",
"\"field_facets\"",
"]",
".",
"items",
"(",
")",
":",
"queryset",
"=",
"queryset",
".",
"facet",
"(",
"field",
",",
"*",
"*",
"options",
")",
"for",
"field",
",",
"options",
"in",
"applicable_filters",
"[",
"\"date_facets\"",
"]",
".",
"items",
"(",
")",
":",
"queryset",
"=",
"queryset",
".",
"date_facet",
"(",
"field",
",",
"*",
"*",
"options",
")",
"for",
"field",
",",
"options",
"in",
"applicable_filters",
"[",
"\"query_facets\"",
"]",
".",
"items",
"(",
")",
":",
"queryset",
"=",
"queryset",
".",
"query_facet",
"(",
"field",
",",
"*",
"*",
"options",
")",
"return",
"queryset"
] | Apply faceting to the queryset | [
"Apply",
"faceting",
"to",
"the",
"queryset"
] | ceabd0f6318f129758341ab08292a20205d6f4cd | https://github.com/inonit/drf-haystack/blob/ceabd0f6318f129758341ab08292a20205d6f4cd/drf_haystack/filters.py#L202-L215 | train |
maximtrp/scikit-posthocs | scikit_posthocs/_posthocs.py | __convert_to_df | def __convert_to_df(a, val_col=None, group_col=None, val_id=None, group_id=None):
'''Hidden helper method to create a DataFrame with input data for further
processing.
Parameters
----------
a : array_like or pandas DataFrame object
An array, any object exposing the array interface or a pandas DataFrame.
Array must be two-dimensional. Second dimension may vary,
i.e. groups may have different lengths.
val_col : str, optional
Name of a DataFrame column that contains dependent variable values (test
or response variable). Values should have a non-nominal scale. Must be
specified if `a` is a pandas DataFrame object.
group_col : str, optional
Name of a DataFrame column that contains independent variable values
(grouping or predictor variable). Values should have a nominal scale
(categorical). Must be specified if `a` is a pandas DataFrame object.
val_id : int, optional
Index of a column that contains dependent variable values (test or
response variable). Should be specified if a NumPy ndarray is used as an
input. It will be inferred from data, if not specified.
group_id : int, optional
Index of a column that contains independent variable values (grouping or
predictor variable). Should be specified if a NumPy ndarray is used as
an input. It will be inferred from data, if not specified.
Returns
-------
x : pandas DataFrame
DataFrame with input data, `val_col` column contains numerical values and
`group_col` column contains categorical values.
val_col : str
Name of a DataFrame column that contains dependent variable values (test
or response variable).
group_col : str
Name of a DataFrame column that contains independent variable values
(grouping or predictor variable).
Notes
-----
Inferrence algorithm for determining `val_id` and `group_id` args is rather
simple, so it is better to specify them explicitly to prevent errors.
'''
if not group_col:
group_col = 'groups'
if not val_col:
val_col = 'vals'
if isinstance(a, DataFrame):
x = a.copy()
if not {group_col, val_col}.issubset(a.columns):
raise ValueError('Specify correct column names using `group_col` and `val_col` args')
return x, val_col, group_col
elif isinstance(a, list) or (isinstance(a, np.ndarray) and not a.shape.count(2)):
grps_len = map(len, a)
grps = list(it.chain(*[[i+1] * l for i, l in enumerate(grps_len)]))
vals = list(it.chain(*a))
return DataFrame({val_col: vals, group_col: grps}), val_col, group_col
elif isinstance(a, np.ndarray):
# cols ids not defined
# trying to infer
if not(all([val_id, group_id])):
if np.argmax(a.shape):
a = a.T
ax = [np.unique(a[:, 0]).size, np.unique(a[:, 1]).size]
if np.asscalar(np.diff(ax)):
__val_col = np.argmax(ax)
__group_col = np.argmin(ax)
else:
raise ValueError('Cannot infer input format.\nPlease specify `val_id` and `group_id` args')
cols = {__val_col: val_col,
__group_col: group_col}
else:
cols = {val_id: val_col,
group_id: group_col}
cols_vals = dict(sorted(cols.items())).values()
return DataFrame(a, columns=cols_vals), val_col, group_col | python | def __convert_to_df(a, val_col=None, group_col=None, val_id=None, group_id=None):
'''Hidden helper method to create a DataFrame with input data for further
processing.
Parameters
----------
a : array_like or pandas DataFrame object
An array, any object exposing the array interface or a pandas DataFrame.
Array must be two-dimensional. Second dimension may vary,
i.e. groups may have different lengths.
val_col : str, optional
Name of a DataFrame column that contains dependent variable values (test
or response variable). Values should have a non-nominal scale. Must be
specified if `a` is a pandas DataFrame object.
group_col : str, optional
Name of a DataFrame column that contains independent variable values
(grouping or predictor variable). Values should have a nominal scale
(categorical). Must be specified if `a` is a pandas DataFrame object.
val_id : int, optional
Index of a column that contains dependent variable values (test or
response variable). Should be specified if a NumPy ndarray is used as an
input. It will be inferred from data, if not specified.
group_id : int, optional
Index of a column that contains independent variable values (grouping or
predictor variable). Should be specified if a NumPy ndarray is used as
an input. It will be inferred from data, if not specified.
Returns
-------
x : pandas DataFrame
DataFrame with input data, `val_col` column contains numerical values and
`group_col` column contains categorical values.
val_col : str
Name of a DataFrame column that contains dependent variable values (test
or response variable).
group_col : str
Name of a DataFrame column that contains independent variable values
(grouping or predictor variable).
Notes
-----
Inferrence algorithm for determining `val_id` and `group_id` args is rather
simple, so it is better to specify them explicitly to prevent errors.
'''
if not group_col:
group_col = 'groups'
if not val_col:
val_col = 'vals'
if isinstance(a, DataFrame):
x = a.copy()
if not {group_col, val_col}.issubset(a.columns):
raise ValueError('Specify correct column names using `group_col` and `val_col` args')
return x, val_col, group_col
elif isinstance(a, list) or (isinstance(a, np.ndarray) and not a.shape.count(2)):
grps_len = map(len, a)
grps = list(it.chain(*[[i+1] * l for i, l in enumerate(grps_len)]))
vals = list(it.chain(*a))
return DataFrame({val_col: vals, group_col: grps}), val_col, group_col
elif isinstance(a, np.ndarray):
# cols ids not defined
# trying to infer
if not(all([val_id, group_id])):
if np.argmax(a.shape):
a = a.T
ax = [np.unique(a[:, 0]).size, np.unique(a[:, 1]).size]
if np.asscalar(np.diff(ax)):
__val_col = np.argmax(ax)
__group_col = np.argmin(ax)
else:
raise ValueError('Cannot infer input format.\nPlease specify `val_id` and `group_id` args')
cols = {__val_col: val_col,
__group_col: group_col}
else:
cols = {val_id: val_col,
group_id: group_col}
cols_vals = dict(sorted(cols.items())).values()
return DataFrame(a, columns=cols_vals), val_col, group_col | [
"def",
"__convert_to_df",
"(",
"a",
",",
"val_col",
"=",
"None",
",",
"group_col",
"=",
"None",
",",
"val_id",
"=",
"None",
",",
"group_id",
"=",
"None",
")",
":",
"if",
"not",
"group_col",
":",
"group_col",
"=",
"'groups'",
"if",
"not",
"val_col",
":",
"val_col",
"=",
"'vals'",
"if",
"isinstance",
"(",
"a",
",",
"DataFrame",
")",
":",
"x",
"=",
"a",
".",
"copy",
"(",
")",
"if",
"not",
"{",
"group_col",
",",
"val_col",
"}",
".",
"issubset",
"(",
"a",
".",
"columns",
")",
":",
"raise",
"ValueError",
"(",
"'Specify correct column names using `group_col` and `val_col` args'",
")",
"return",
"x",
",",
"val_col",
",",
"group_col",
"elif",
"isinstance",
"(",
"a",
",",
"list",
")",
"or",
"(",
"isinstance",
"(",
"a",
",",
"np",
".",
"ndarray",
")",
"and",
"not",
"a",
".",
"shape",
".",
"count",
"(",
"2",
")",
")",
":",
"grps_len",
"=",
"map",
"(",
"len",
",",
"a",
")",
"grps",
"=",
"list",
"(",
"it",
".",
"chain",
"(",
"*",
"[",
"[",
"i",
"+",
"1",
"]",
"*",
"l",
"for",
"i",
",",
"l",
"in",
"enumerate",
"(",
"grps_len",
")",
"]",
")",
")",
"vals",
"=",
"list",
"(",
"it",
".",
"chain",
"(",
"*",
"a",
")",
")",
"return",
"DataFrame",
"(",
"{",
"val_col",
":",
"vals",
",",
"group_col",
":",
"grps",
"}",
")",
",",
"val_col",
",",
"group_col",
"elif",
"isinstance",
"(",
"a",
",",
"np",
".",
"ndarray",
")",
":",
"# cols ids not defined",
"# trying to infer",
"if",
"not",
"(",
"all",
"(",
"[",
"val_id",
",",
"group_id",
"]",
")",
")",
":",
"if",
"np",
".",
"argmax",
"(",
"a",
".",
"shape",
")",
":",
"a",
"=",
"a",
".",
"T",
"ax",
"=",
"[",
"np",
".",
"unique",
"(",
"a",
"[",
":",
",",
"0",
"]",
")",
".",
"size",
",",
"np",
".",
"unique",
"(",
"a",
"[",
":",
",",
"1",
"]",
")",
".",
"size",
"]",
"if",
"np",
".",
"asscalar",
"(",
"np",
".",
"diff",
"(",
"ax",
")",
")",
":",
"__val_col",
"=",
"np",
".",
"argmax",
"(",
"ax",
")",
"__group_col",
"=",
"np",
".",
"argmin",
"(",
"ax",
")",
"else",
":",
"raise",
"ValueError",
"(",
"'Cannot infer input format.\\nPlease specify `val_id` and `group_id` args'",
")",
"cols",
"=",
"{",
"__val_col",
":",
"val_col",
",",
"__group_col",
":",
"group_col",
"}",
"else",
":",
"cols",
"=",
"{",
"val_id",
":",
"val_col",
",",
"group_id",
":",
"group_col",
"}",
"cols_vals",
"=",
"dict",
"(",
"sorted",
"(",
"cols",
".",
"items",
"(",
")",
")",
")",
".",
"values",
"(",
")",
"return",
"DataFrame",
"(",
"a",
",",
"columns",
"=",
"cols_vals",
")",
",",
"val_col",
",",
"group_col"
] | Hidden helper method to create a DataFrame with input data for further
processing.
Parameters
----------
a : array_like or pandas DataFrame object
An array, any object exposing the array interface or a pandas DataFrame.
Array must be two-dimensional. Second dimension may vary,
i.e. groups may have different lengths.
val_col : str, optional
Name of a DataFrame column that contains dependent variable values (test
or response variable). Values should have a non-nominal scale. Must be
specified if `a` is a pandas DataFrame object.
group_col : str, optional
Name of a DataFrame column that contains independent variable values
(grouping or predictor variable). Values should have a nominal scale
(categorical). Must be specified if `a` is a pandas DataFrame object.
val_id : int, optional
Index of a column that contains dependent variable values (test or
response variable). Should be specified if a NumPy ndarray is used as an
input. It will be inferred from data, if not specified.
group_id : int, optional
Index of a column that contains independent variable values (grouping or
predictor variable). Should be specified if a NumPy ndarray is used as
an input. It will be inferred from data, if not specified.
Returns
-------
x : pandas DataFrame
DataFrame with input data, `val_col` column contains numerical values and
`group_col` column contains categorical values.
val_col : str
Name of a DataFrame column that contains dependent variable values (test
or response variable).
group_col : str
Name of a DataFrame column that contains independent variable values
(grouping or predictor variable).
Notes
-----
Inferrence algorithm for determining `val_id` and `group_id` args is rather
simple, so it is better to specify them explicitly to prevent errors. | [
"Hidden",
"helper",
"method",
"to",
"create",
"a",
"DataFrame",
"with",
"input",
"data",
"for",
"further",
"processing",
"."
] | 5476b09e2a325cd4e31c0b0bc6906ab5cd77fc5d | https://github.com/maximtrp/scikit-posthocs/blob/5476b09e2a325cd4e31c0b0bc6906ab5cd77fc5d/scikit_posthocs/_posthocs.py#L11-L106 | train |
maximtrp/scikit-posthocs | scikit_posthocs/_posthocs.py | posthoc_tukey_hsd | def posthoc_tukey_hsd(x, g, alpha=0.05):
'''Pairwise comparisons with TukeyHSD confidence intervals. This is a
convenience function to make statsmodels `pairwise_tukeyhsd` method more
applicable for further use.
Parameters
----------
x : array_like or pandas Series object, 1d
An array, any object exposing the array interface, containing dependent
variable values (test or response variable). Values should have a
non-nominal scale. NaN values will cause an error (please handle
manually).
g : array_like or pandas Series object, 1d
An array, any object exposing the array interface, containing
independent variable values (grouping or predictor variable). Values
should have a nominal scale (categorical).
alpha : float, optional
Significance level for the test. Default is 0.05.
Returns
-------
result : pandas DataFrame
DataFrame with 0, 1, and -1 values, where 0 is False (not significant),
1 is True (significant), and -1 is for diagonal elements.
Examples
--------
>>> x = [[1,2,3,4,5], [35,31,75,40,21], [10,6,9,6,1]]
>>> g = [['a'] * 5, ['b'] * 5, ['c'] * 5]
>>> sp.posthoc_tukey_hsd(np.concatenate(x), np.concatenate(g))
'''
result = pairwise_tukeyhsd(x, g, alpha=0.05)
groups = np.array(result.groupsunique, dtype=np.str)
groups_len = len(groups)
vs = np.zeros((groups_len, groups_len), dtype=np.int)
for a in result.summary()[1:]:
a0 = str(a[0])
a1 = str(a[1])
a0i = np.where(groups == a0)[0][0]
a1i = np.where(groups == a1)[0][0]
vs[a0i, a1i] = 1 if str(a[5]) == 'True' else 0
vs = np.triu(vs)
np.fill_diagonal(vs, -1)
tri_lower = np.tril_indices(vs.shape[0], -1)
vs[tri_lower] = vs.T[tri_lower]
return DataFrame(vs, index=groups, columns=groups) | python | def posthoc_tukey_hsd(x, g, alpha=0.05):
'''Pairwise comparisons with TukeyHSD confidence intervals. This is a
convenience function to make statsmodels `pairwise_tukeyhsd` method more
applicable for further use.
Parameters
----------
x : array_like or pandas Series object, 1d
An array, any object exposing the array interface, containing dependent
variable values (test or response variable). Values should have a
non-nominal scale. NaN values will cause an error (please handle
manually).
g : array_like or pandas Series object, 1d
An array, any object exposing the array interface, containing
independent variable values (grouping or predictor variable). Values
should have a nominal scale (categorical).
alpha : float, optional
Significance level for the test. Default is 0.05.
Returns
-------
result : pandas DataFrame
DataFrame with 0, 1, and -1 values, where 0 is False (not significant),
1 is True (significant), and -1 is for diagonal elements.
Examples
--------
>>> x = [[1,2,3,4,5], [35,31,75,40,21], [10,6,9,6,1]]
>>> g = [['a'] * 5, ['b'] * 5, ['c'] * 5]
>>> sp.posthoc_tukey_hsd(np.concatenate(x), np.concatenate(g))
'''
result = pairwise_tukeyhsd(x, g, alpha=0.05)
groups = np.array(result.groupsunique, dtype=np.str)
groups_len = len(groups)
vs = np.zeros((groups_len, groups_len), dtype=np.int)
for a in result.summary()[1:]:
a0 = str(a[0])
a1 = str(a[1])
a0i = np.where(groups == a0)[0][0]
a1i = np.where(groups == a1)[0][0]
vs[a0i, a1i] = 1 if str(a[5]) == 'True' else 0
vs = np.triu(vs)
np.fill_diagonal(vs, -1)
tri_lower = np.tril_indices(vs.shape[0], -1)
vs[tri_lower] = vs.T[tri_lower]
return DataFrame(vs, index=groups, columns=groups) | [
"def",
"posthoc_tukey_hsd",
"(",
"x",
",",
"g",
",",
"alpha",
"=",
"0.05",
")",
":",
"result",
"=",
"pairwise_tukeyhsd",
"(",
"x",
",",
"g",
",",
"alpha",
"=",
"0.05",
")",
"groups",
"=",
"np",
".",
"array",
"(",
"result",
".",
"groupsunique",
",",
"dtype",
"=",
"np",
".",
"str",
")",
"groups_len",
"=",
"len",
"(",
"groups",
")",
"vs",
"=",
"np",
".",
"zeros",
"(",
"(",
"groups_len",
",",
"groups_len",
")",
",",
"dtype",
"=",
"np",
".",
"int",
")",
"for",
"a",
"in",
"result",
".",
"summary",
"(",
")",
"[",
"1",
":",
"]",
":",
"a0",
"=",
"str",
"(",
"a",
"[",
"0",
"]",
")",
"a1",
"=",
"str",
"(",
"a",
"[",
"1",
"]",
")",
"a0i",
"=",
"np",
".",
"where",
"(",
"groups",
"==",
"a0",
")",
"[",
"0",
"]",
"[",
"0",
"]",
"a1i",
"=",
"np",
".",
"where",
"(",
"groups",
"==",
"a1",
")",
"[",
"0",
"]",
"[",
"0",
"]",
"vs",
"[",
"a0i",
",",
"a1i",
"]",
"=",
"1",
"if",
"str",
"(",
"a",
"[",
"5",
"]",
")",
"==",
"'True'",
"else",
"0",
"vs",
"=",
"np",
".",
"triu",
"(",
"vs",
")",
"np",
".",
"fill_diagonal",
"(",
"vs",
",",
"-",
"1",
")",
"tri_lower",
"=",
"np",
".",
"tril_indices",
"(",
"vs",
".",
"shape",
"[",
"0",
"]",
",",
"-",
"1",
")",
"vs",
"[",
"tri_lower",
"]",
"=",
"vs",
".",
"T",
"[",
"tri_lower",
"]",
"return",
"DataFrame",
"(",
"vs",
",",
"index",
"=",
"groups",
",",
"columns",
"=",
"groups",
")"
] | Pairwise comparisons with TukeyHSD confidence intervals. This is a
convenience function to make statsmodels `pairwise_tukeyhsd` method more
applicable for further use.
Parameters
----------
x : array_like or pandas Series object, 1d
An array, any object exposing the array interface, containing dependent
variable values (test or response variable). Values should have a
non-nominal scale. NaN values will cause an error (please handle
manually).
g : array_like or pandas Series object, 1d
An array, any object exposing the array interface, containing
independent variable values (grouping or predictor variable). Values
should have a nominal scale (categorical).
alpha : float, optional
Significance level for the test. Default is 0.05.
Returns
-------
result : pandas DataFrame
DataFrame with 0, 1, and -1 values, where 0 is False (not significant),
1 is True (significant), and -1 is for diagonal elements.
Examples
--------
>>> x = [[1,2,3,4,5], [35,31,75,40,21], [10,6,9,6,1]]
>>> g = [['a'] * 5, ['b'] * 5, ['c'] * 5]
>>> sp.posthoc_tukey_hsd(np.concatenate(x), np.concatenate(g)) | [
"Pairwise",
"comparisons",
"with",
"TukeyHSD",
"confidence",
"intervals",
".",
"This",
"is",
"a",
"convenience",
"function",
"to",
"make",
"statsmodels",
"pairwise_tukeyhsd",
"method",
"more",
"applicable",
"for",
"further",
"use",
"."
] | 5476b09e2a325cd4e31c0b0bc6906ab5cd77fc5d | https://github.com/maximtrp/scikit-posthocs/blob/5476b09e2a325cd4e31c0b0bc6906ab5cd77fc5d/scikit_posthocs/_posthocs.py#L1845-L1897 | train |
maximtrp/scikit-posthocs | scikit_posthocs/_posthocs.py | posthoc_mannwhitney | def posthoc_mannwhitney(a, val_col=None, group_col=None, use_continuity=True, alternative='two-sided', p_adjust=None, sort=True):
'''Pairwise comparisons with Mann-Whitney rank test.
Parameters
----------
a : array_like or pandas DataFrame object
An array, any object exposing the array interface or a pandas
DataFrame. Array must be two-dimensional.
val_col : str, optional
Name of a DataFrame column that contains dependent variable values (test
or response variable). Values should have a non-nominal scale. Must be
specified if `a` is a pandas DataFrame object.
group_col : str, optional
Name of a DataFrame column that contains independent variable values
(grouping or predictor variable). Values should have a nominal scale
(categorical). Must be specified if `a` is a pandas DataFrame object.
use_continuity : bool, optional
Whether a continuity correction (1/2.) should be taken into account.
Default is True.
alternative : ['two-sided', 'less', or 'greater'], optional
Whether to get the p-value for the one-sided hypothesis
('less' or 'greater') or for the two-sided hypothesis ('two-sided').
Defaults to 'two-sided'.
p_adjust : str, optional
Method for adjusting p values.
See statsmodels.sandbox.stats.multicomp for details.
Available methods are:
'bonferroni' : one-step correction
'sidak' : one-step correction
'holm-sidak' : step-down method using Sidak adjustments
'holm' : step-down method using Bonferroni adjustments
'simes-hochberg' : step-up method (independent)
'hommel' : closed method based on Simes tests (non-negative)
'fdr_bh' : Benjamini/Hochberg (non-negative)
'fdr_by' : Benjamini/Yekutieli (negative)
'fdr_tsbh' : two stage fdr correction (non-negative)
'fdr_tsbky' : two stage fdr correction (non-negative)
sort : bool, optional
Specifies whether to sort DataFrame by group_col or not. Recommended
unless you sort your data manually.
Returns
-------
result : pandas DataFrame
P values.
Notes
-----
Refer to `scipy.stats.mannwhitneyu` reference page for further details.
Examples
--------
>>> x = [[1,2,3,4,5], [35,31,75,40,21], [10,6,9,6,1]]
>>> sp.posthoc_mannwhitney(x, p_adjust = 'holm')
'''
x, _val_col, _group_col = __convert_to_df(a, val_col, group_col)
if not sort:
x[_group_col] = Categorical(x[_group_col], categories=x[_group_col].unique(), ordered=True)
x.sort_values(by=[_group_col, _val_col], ascending=True, inplace=True)
groups = np.unique(x[_group_col])
x_len = groups.size
vs = np.zeros((x_len, x_len))
tri_upper = np.triu_indices(vs.shape[0], 1)
tri_lower = np.tril_indices(vs.shape[0], -1)
vs[:,:] = 0
combs = it.combinations(range(x_len), 2)
for i,j in combs:
vs[i, j] = ss.mannwhitneyu(x.loc[x[_group_col] == groups[i], _val_col],
x.loc[x[_group_col] == groups[j], _val_col],
use_continuity=use_continuity,
alternative=alternative)[1]
if p_adjust:
vs[tri_upper] = multipletests(vs[tri_upper], method = p_adjust)[1]
vs[tri_lower] = vs.T[tri_lower]
np.fill_diagonal(vs, -1)
return DataFrame(vs, index=groups, columns=groups) | python | def posthoc_mannwhitney(a, val_col=None, group_col=None, use_continuity=True, alternative='two-sided', p_adjust=None, sort=True):
'''Pairwise comparisons with Mann-Whitney rank test.
Parameters
----------
a : array_like or pandas DataFrame object
An array, any object exposing the array interface or a pandas
DataFrame. Array must be two-dimensional.
val_col : str, optional
Name of a DataFrame column that contains dependent variable values (test
or response variable). Values should have a non-nominal scale. Must be
specified if `a` is a pandas DataFrame object.
group_col : str, optional
Name of a DataFrame column that contains independent variable values
(grouping or predictor variable). Values should have a nominal scale
(categorical). Must be specified if `a` is a pandas DataFrame object.
use_continuity : bool, optional
Whether a continuity correction (1/2.) should be taken into account.
Default is True.
alternative : ['two-sided', 'less', or 'greater'], optional
Whether to get the p-value for the one-sided hypothesis
('less' or 'greater') or for the two-sided hypothesis ('two-sided').
Defaults to 'two-sided'.
p_adjust : str, optional
Method for adjusting p values.
See statsmodels.sandbox.stats.multicomp for details.
Available methods are:
'bonferroni' : one-step correction
'sidak' : one-step correction
'holm-sidak' : step-down method using Sidak adjustments
'holm' : step-down method using Bonferroni adjustments
'simes-hochberg' : step-up method (independent)
'hommel' : closed method based on Simes tests (non-negative)
'fdr_bh' : Benjamini/Hochberg (non-negative)
'fdr_by' : Benjamini/Yekutieli (negative)
'fdr_tsbh' : two stage fdr correction (non-negative)
'fdr_tsbky' : two stage fdr correction (non-negative)
sort : bool, optional
Specifies whether to sort DataFrame by group_col or not. Recommended
unless you sort your data manually.
Returns
-------
result : pandas DataFrame
P values.
Notes
-----
Refer to `scipy.stats.mannwhitneyu` reference page for further details.
Examples
--------
>>> x = [[1,2,3,4,5], [35,31,75,40,21], [10,6,9,6,1]]
>>> sp.posthoc_mannwhitney(x, p_adjust = 'holm')
'''
x, _val_col, _group_col = __convert_to_df(a, val_col, group_col)
if not sort:
x[_group_col] = Categorical(x[_group_col], categories=x[_group_col].unique(), ordered=True)
x.sort_values(by=[_group_col, _val_col], ascending=True, inplace=True)
groups = np.unique(x[_group_col])
x_len = groups.size
vs = np.zeros((x_len, x_len))
tri_upper = np.triu_indices(vs.shape[0], 1)
tri_lower = np.tril_indices(vs.shape[0], -1)
vs[:,:] = 0
combs = it.combinations(range(x_len), 2)
for i,j in combs:
vs[i, j] = ss.mannwhitneyu(x.loc[x[_group_col] == groups[i], _val_col],
x.loc[x[_group_col] == groups[j], _val_col],
use_continuity=use_continuity,
alternative=alternative)[1]
if p_adjust:
vs[tri_upper] = multipletests(vs[tri_upper], method = p_adjust)[1]
vs[tri_lower] = vs.T[tri_lower]
np.fill_diagonal(vs, -1)
return DataFrame(vs, index=groups, columns=groups) | [
"def",
"posthoc_mannwhitney",
"(",
"a",
",",
"val_col",
"=",
"None",
",",
"group_col",
"=",
"None",
",",
"use_continuity",
"=",
"True",
",",
"alternative",
"=",
"'two-sided'",
",",
"p_adjust",
"=",
"None",
",",
"sort",
"=",
"True",
")",
":",
"x",
",",
"_val_col",
",",
"_group_col",
"=",
"__convert_to_df",
"(",
"a",
",",
"val_col",
",",
"group_col",
")",
"if",
"not",
"sort",
":",
"x",
"[",
"_group_col",
"]",
"=",
"Categorical",
"(",
"x",
"[",
"_group_col",
"]",
",",
"categories",
"=",
"x",
"[",
"_group_col",
"]",
".",
"unique",
"(",
")",
",",
"ordered",
"=",
"True",
")",
"x",
".",
"sort_values",
"(",
"by",
"=",
"[",
"_group_col",
",",
"_val_col",
"]",
",",
"ascending",
"=",
"True",
",",
"inplace",
"=",
"True",
")",
"groups",
"=",
"np",
".",
"unique",
"(",
"x",
"[",
"_group_col",
"]",
")",
"x_len",
"=",
"groups",
".",
"size",
"vs",
"=",
"np",
".",
"zeros",
"(",
"(",
"x_len",
",",
"x_len",
")",
")",
"tri_upper",
"=",
"np",
".",
"triu_indices",
"(",
"vs",
".",
"shape",
"[",
"0",
"]",
",",
"1",
")",
"tri_lower",
"=",
"np",
".",
"tril_indices",
"(",
"vs",
".",
"shape",
"[",
"0",
"]",
",",
"-",
"1",
")",
"vs",
"[",
":",
",",
":",
"]",
"=",
"0",
"combs",
"=",
"it",
".",
"combinations",
"(",
"range",
"(",
"x_len",
")",
",",
"2",
")",
"for",
"i",
",",
"j",
"in",
"combs",
":",
"vs",
"[",
"i",
",",
"j",
"]",
"=",
"ss",
".",
"mannwhitneyu",
"(",
"x",
".",
"loc",
"[",
"x",
"[",
"_group_col",
"]",
"==",
"groups",
"[",
"i",
"]",
",",
"_val_col",
"]",
",",
"x",
".",
"loc",
"[",
"x",
"[",
"_group_col",
"]",
"==",
"groups",
"[",
"j",
"]",
",",
"_val_col",
"]",
",",
"use_continuity",
"=",
"use_continuity",
",",
"alternative",
"=",
"alternative",
")",
"[",
"1",
"]",
"if",
"p_adjust",
":",
"vs",
"[",
"tri_upper",
"]",
"=",
"multipletests",
"(",
"vs",
"[",
"tri_upper",
"]",
",",
"method",
"=",
"p_adjust",
")",
"[",
"1",
"]",
"vs",
"[",
"tri_lower",
"]",
"=",
"vs",
".",
"T",
"[",
"tri_lower",
"]",
"np",
".",
"fill_diagonal",
"(",
"vs",
",",
"-",
"1",
")",
"return",
"DataFrame",
"(",
"vs",
",",
"index",
"=",
"groups",
",",
"columns",
"=",
"groups",
")"
] | Pairwise comparisons with Mann-Whitney rank test.
Parameters
----------
a : array_like or pandas DataFrame object
An array, any object exposing the array interface or a pandas
DataFrame. Array must be two-dimensional.
val_col : str, optional
Name of a DataFrame column that contains dependent variable values (test
or response variable). Values should have a non-nominal scale. Must be
specified if `a` is a pandas DataFrame object.
group_col : str, optional
Name of a DataFrame column that contains independent variable values
(grouping or predictor variable). Values should have a nominal scale
(categorical). Must be specified if `a` is a pandas DataFrame object.
use_continuity : bool, optional
Whether a continuity correction (1/2.) should be taken into account.
Default is True.
alternative : ['two-sided', 'less', or 'greater'], optional
Whether to get the p-value for the one-sided hypothesis
('less' or 'greater') or for the two-sided hypothesis ('two-sided').
Defaults to 'two-sided'.
p_adjust : str, optional
Method for adjusting p values.
See statsmodels.sandbox.stats.multicomp for details.
Available methods are:
'bonferroni' : one-step correction
'sidak' : one-step correction
'holm-sidak' : step-down method using Sidak adjustments
'holm' : step-down method using Bonferroni adjustments
'simes-hochberg' : step-up method (independent)
'hommel' : closed method based on Simes tests (non-negative)
'fdr_bh' : Benjamini/Hochberg (non-negative)
'fdr_by' : Benjamini/Yekutieli (negative)
'fdr_tsbh' : two stage fdr correction (non-negative)
'fdr_tsbky' : two stage fdr correction (non-negative)
sort : bool, optional
Specifies whether to sort DataFrame by group_col or not. Recommended
unless you sort your data manually.
Returns
-------
result : pandas DataFrame
P values.
Notes
-----
Refer to `scipy.stats.mannwhitneyu` reference page for further details.
Examples
--------
>>> x = [[1,2,3,4,5], [35,31,75,40,21], [10,6,9,6,1]]
>>> sp.posthoc_mannwhitney(x, p_adjust = 'holm') | [
"Pairwise",
"comparisons",
"with",
"Mann",
"-",
"Whitney",
"rank",
"test",
"."
] | 5476b09e2a325cd4e31c0b0bc6906ab5cd77fc5d | https://github.com/maximtrp/scikit-posthocs/blob/5476b09e2a325cd4e31c0b0bc6906ab5cd77fc5d/scikit_posthocs/_posthocs.py#L1900-L1991 | train |
maximtrp/scikit-posthocs | scikit_posthocs/_posthocs.py | posthoc_wilcoxon | def posthoc_wilcoxon(a, val_col=None, group_col=None, zero_method='wilcox', correction=False, p_adjust=None, sort=False):
'''Pairwise comparisons with Wilcoxon signed-rank test. It is a non-parametric
version of the paired T-test for use with non-parametric ANOVA.
Parameters
----------
a : array_like or pandas DataFrame object
An array, any object exposing the array interface or a pandas
DataFrame. Array must be two-dimensional.
val_col : str, optional
Name of a DataFrame column that contains dependent variable values (test
or response variable). Values should have a non-nominal scale. Must be
specified if `a` is a pandas DataFrame object.
group_col : str, optional
Name of a DataFrame column that contains independent variable values
(grouping or predictor variable). Values should have a nominal scale
(categorical). Must be specified if `a` is a pandas DataFrame object.
zero_method : string, {"pratt", "wilcox", "zsplit"}, optional
"pratt": Pratt treatment, includes zero-differences in the ranking
process (more conservative)
"wilcox": Wilcox treatment, discards all zero-differences
"zsplit": Zero rank split, just like Pratt, but spliting the zero rank
between positive and negative ones
correction : bool, optional
If True, apply continuity correction by adjusting the Wilcoxon rank
statistic by 0.5 towards the mean value when computing the z-statistic.
Default is False.
p_adjust : str, optional
Method for adjusting p values.
See statsmodels.sandbox.stats.multicomp for details.
Available methods are:
'bonferroni' : one-step correction
'sidak' : one-step correction
'holm-sidak' : step-down method using Sidak adjustments
'holm' : step-down method using Bonferroni adjustments
'simes-hochberg' : step-up method (independent)
'hommel' : closed method based on Simes tests (non-negative)
'fdr_bh' : Benjamini/Hochberg (non-negative)
'fdr_by' : Benjamini/Yekutieli (negative)
'fdr_tsbh' : two stage fdr correction (non-negative)
'fdr_tsbky' : two stage fdr correction (non-negative)
sort : bool, optional
Specifies whether to sort DataFrame by group_col and val_col or not.
Default is False.
Returns
-------
result : pandas DataFrame
P values.
Notes
-----
Refer to `scipy.stats.wilcoxon` reference page for further details.
Examples
--------
>>> x = [[1,2,3,4,5], [35,31,75,40,21], [10,6,9,6,1]]
>>> sp.posthoc_wilcoxon(x)
'''
x, _val_col, _group_col = __convert_to_df(a, val_col, group_col)
if not sort:
x[_group_col] = Categorical(x[_group_col], categories=x[_group_col].unique(), ordered=True)
#x.sort_values(by=[_group_col, _val_col], ascending=True, inplace=True)
groups = np.unique(x[_group_col])
x_len = groups.size
vs = np.zeros((x_len, x_len))
tri_upper = np.triu_indices(vs.shape[0], 1)
tri_lower = np.tril_indices(vs.shape[0], -1)
vs[:,:] = 0
combs = it.combinations(range(x_len), 2)
for i,j in combs:
vs[i, j] = ss.wilcoxon(x.loc[x[_group_col] == groups[i], _val_col],
x.loc[x[_group_col] == groups[j], _val_col],
zero_method=zero_method, correction=correction)[1]
if p_adjust:
vs[tri_upper] = multipletests(vs[tri_upper], method=p_adjust)[1]
vs[tri_lower] = vs.T[tri_lower]
np.fill_diagonal(vs, -1)
return DataFrame(vs, index=groups, columns=groups) | python | def posthoc_wilcoxon(a, val_col=None, group_col=None, zero_method='wilcox', correction=False, p_adjust=None, sort=False):
'''Pairwise comparisons with Wilcoxon signed-rank test. It is a non-parametric
version of the paired T-test for use with non-parametric ANOVA.
Parameters
----------
a : array_like or pandas DataFrame object
An array, any object exposing the array interface or a pandas
DataFrame. Array must be two-dimensional.
val_col : str, optional
Name of a DataFrame column that contains dependent variable values (test
or response variable). Values should have a non-nominal scale. Must be
specified if `a` is a pandas DataFrame object.
group_col : str, optional
Name of a DataFrame column that contains independent variable values
(grouping or predictor variable). Values should have a nominal scale
(categorical). Must be specified if `a` is a pandas DataFrame object.
zero_method : string, {"pratt", "wilcox", "zsplit"}, optional
"pratt": Pratt treatment, includes zero-differences in the ranking
process (more conservative)
"wilcox": Wilcox treatment, discards all zero-differences
"zsplit": Zero rank split, just like Pratt, but spliting the zero rank
between positive and negative ones
correction : bool, optional
If True, apply continuity correction by adjusting the Wilcoxon rank
statistic by 0.5 towards the mean value when computing the z-statistic.
Default is False.
p_adjust : str, optional
Method for adjusting p values.
See statsmodels.sandbox.stats.multicomp for details.
Available methods are:
'bonferroni' : one-step correction
'sidak' : one-step correction
'holm-sidak' : step-down method using Sidak adjustments
'holm' : step-down method using Bonferroni adjustments
'simes-hochberg' : step-up method (independent)
'hommel' : closed method based on Simes tests (non-negative)
'fdr_bh' : Benjamini/Hochberg (non-negative)
'fdr_by' : Benjamini/Yekutieli (negative)
'fdr_tsbh' : two stage fdr correction (non-negative)
'fdr_tsbky' : two stage fdr correction (non-negative)
sort : bool, optional
Specifies whether to sort DataFrame by group_col and val_col or not.
Default is False.
Returns
-------
result : pandas DataFrame
P values.
Notes
-----
Refer to `scipy.stats.wilcoxon` reference page for further details.
Examples
--------
>>> x = [[1,2,3,4,5], [35,31,75,40,21], [10,6,9,6,1]]
>>> sp.posthoc_wilcoxon(x)
'''
x, _val_col, _group_col = __convert_to_df(a, val_col, group_col)
if not sort:
x[_group_col] = Categorical(x[_group_col], categories=x[_group_col].unique(), ordered=True)
#x.sort_values(by=[_group_col, _val_col], ascending=True, inplace=True)
groups = np.unique(x[_group_col])
x_len = groups.size
vs = np.zeros((x_len, x_len))
tri_upper = np.triu_indices(vs.shape[0], 1)
tri_lower = np.tril_indices(vs.shape[0], -1)
vs[:,:] = 0
combs = it.combinations(range(x_len), 2)
for i,j in combs:
vs[i, j] = ss.wilcoxon(x.loc[x[_group_col] == groups[i], _val_col],
x.loc[x[_group_col] == groups[j], _val_col],
zero_method=zero_method, correction=correction)[1]
if p_adjust:
vs[tri_upper] = multipletests(vs[tri_upper], method=p_adjust)[1]
vs[tri_lower] = vs.T[tri_lower]
np.fill_diagonal(vs, -1)
return DataFrame(vs, index=groups, columns=groups) | [
"def",
"posthoc_wilcoxon",
"(",
"a",
",",
"val_col",
"=",
"None",
",",
"group_col",
"=",
"None",
",",
"zero_method",
"=",
"'wilcox'",
",",
"correction",
"=",
"False",
",",
"p_adjust",
"=",
"None",
",",
"sort",
"=",
"False",
")",
":",
"x",
",",
"_val_col",
",",
"_group_col",
"=",
"__convert_to_df",
"(",
"a",
",",
"val_col",
",",
"group_col",
")",
"if",
"not",
"sort",
":",
"x",
"[",
"_group_col",
"]",
"=",
"Categorical",
"(",
"x",
"[",
"_group_col",
"]",
",",
"categories",
"=",
"x",
"[",
"_group_col",
"]",
".",
"unique",
"(",
")",
",",
"ordered",
"=",
"True",
")",
"#x.sort_values(by=[_group_col, _val_col], ascending=True, inplace=True)",
"groups",
"=",
"np",
".",
"unique",
"(",
"x",
"[",
"_group_col",
"]",
")",
"x_len",
"=",
"groups",
".",
"size",
"vs",
"=",
"np",
".",
"zeros",
"(",
"(",
"x_len",
",",
"x_len",
")",
")",
"tri_upper",
"=",
"np",
".",
"triu_indices",
"(",
"vs",
".",
"shape",
"[",
"0",
"]",
",",
"1",
")",
"tri_lower",
"=",
"np",
".",
"tril_indices",
"(",
"vs",
".",
"shape",
"[",
"0",
"]",
",",
"-",
"1",
")",
"vs",
"[",
":",
",",
":",
"]",
"=",
"0",
"combs",
"=",
"it",
".",
"combinations",
"(",
"range",
"(",
"x_len",
")",
",",
"2",
")",
"for",
"i",
",",
"j",
"in",
"combs",
":",
"vs",
"[",
"i",
",",
"j",
"]",
"=",
"ss",
".",
"wilcoxon",
"(",
"x",
".",
"loc",
"[",
"x",
"[",
"_group_col",
"]",
"==",
"groups",
"[",
"i",
"]",
",",
"_val_col",
"]",
",",
"x",
".",
"loc",
"[",
"x",
"[",
"_group_col",
"]",
"==",
"groups",
"[",
"j",
"]",
",",
"_val_col",
"]",
",",
"zero_method",
"=",
"zero_method",
",",
"correction",
"=",
"correction",
")",
"[",
"1",
"]",
"if",
"p_adjust",
":",
"vs",
"[",
"tri_upper",
"]",
"=",
"multipletests",
"(",
"vs",
"[",
"tri_upper",
"]",
",",
"method",
"=",
"p_adjust",
")",
"[",
"1",
"]",
"vs",
"[",
"tri_lower",
"]",
"=",
"vs",
".",
"T",
"[",
"tri_lower",
"]",
"np",
".",
"fill_diagonal",
"(",
"vs",
",",
"-",
"1",
")",
"return",
"DataFrame",
"(",
"vs",
",",
"index",
"=",
"groups",
",",
"columns",
"=",
"groups",
")"
] | Pairwise comparisons with Wilcoxon signed-rank test. It is a non-parametric
version of the paired T-test for use with non-parametric ANOVA.
Parameters
----------
a : array_like or pandas DataFrame object
An array, any object exposing the array interface or a pandas
DataFrame. Array must be two-dimensional.
val_col : str, optional
Name of a DataFrame column that contains dependent variable values (test
or response variable). Values should have a non-nominal scale. Must be
specified if `a` is a pandas DataFrame object.
group_col : str, optional
Name of a DataFrame column that contains independent variable values
(grouping or predictor variable). Values should have a nominal scale
(categorical). Must be specified if `a` is a pandas DataFrame object.
zero_method : string, {"pratt", "wilcox", "zsplit"}, optional
"pratt": Pratt treatment, includes zero-differences in the ranking
process (more conservative)
"wilcox": Wilcox treatment, discards all zero-differences
"zsplit": Zero rank split, just like Pratt, but spliting the zero rank
between positive and negative ones
correction : bool, optional
If True, apply continuity correction by adjusting the Wilcoxon rank
statistic by 0.5 towards the mean value when computing the z-statistic.
Default is False.
p_adjust : str, optional
Method for adjusting p values.
See statsmodels.sandbox.stats.multicomp for details.
Available methods are:
'bonferroni' : one-step correction
'sidak' : one-step correction
'holm-sidak' : step-down method using Sidak adjustments
'holm' : step-down method using Bonferroni adjustments
'simes-hochberg' : step-up method (independent)
'hommel' : closed method based on Simes tests (non-negative)
'fdr_bh' : Benjamini/Hochberg (non-negative)
'fdr_by' : Benjamini/Yekutieli (negative)
'fdr_tsbh' : two stage fdr correction (non-negative)
'fdr_tsbky' : two stage fdr correction (non-negative)
sort : bool, optional
Specifies whether to sort DataFrame by group_col and val_col or not.
Default is False.
Returns
-------
result : pandas DataFrame
P values.
Notes
-----
Refer to `scipy.stats.wilcoxon` reference page for further details.
Examples
--------
>>> x = [[1,2,3,4,5], [35,31,75,40,21], [10,6,9,6,1]]
>>> sp.posthoc_wilcoxon(x) | [
"Pairwise",
"comparisons",
"with",
"Wilcoxon",
"signed",
"-",
"rank",
"test",
".",
"It",
"is",
"a",
"non",
"-",
"parametric",
"version",
"of",
"the",
"paired",
"T",
"-",
"test",
"for",
"use",
"with",
"non",
"-",
"parametric",
"ANOVA",
"."
] | 5476b09e2a325cd4e31c0b0bc6906ab5cd77fc5d | https://github.com/maximtrp/scikit-posthocs/blob/5476b09e2a325cd4e31c0b0bc6906ab5cd77fc5d/scikit_posthocs/_posthocs.py#L1994-L2086 | train |
cjrh/aiorun | aiorun.py | shutdown_waits_for | def shutdown_waits_for(coro, loop=None):
"""Prevent coro from being cancelled during the shutdown sequence.
The trick here is that we add this coro to the global
"DO_NOT_CANCEL" collection, and then later during the shutdown
sequence we make sure that the task that wraps this coro will NOT
be cancelled.
To make this work, we have to create a super-secret task, below, that
communicates with the caller (which "awaits" us) via a Future. Using
a Future in this way allows us to avoid awaiting the Task, which
decouples the Task from the normal exception propagation which would
normally happen when the outer Task gets cancelled. We get the
result of coro back to the caller via Future.set_result.
NOTE that during the shutdown sequence, the caller WILL NOT be able
to receive a result, since the caller will likely have been
cancelled. So you should probably not rely on capturing results
via this function.
"""
loop = loop or get_event_loop()
fut = loop.create_future() # This future will connect coro and the caller.
async def coro_proxy():
"""This function will await coro, but it will also send the result
over the the future. Remember: the outside caller (of
shutdown_waits_for) will be awaiting fut, NOT coro(), due to
the decoupling. However, when coro completes, we need to send its
result over to the fut to make it look *as if* it was just coro
running the whole time. This whole thing is a teeny magic trick.
"""
try:
result = await coro
except (CancelledError, Exception) as e:
set_fut_done = partial(fut.set_exception, e)
else:
set_fut_done = partial(fut.set_result, result)
if not fut.cancelled():
set_fut_done()
new_coro = coro_proxy() # We'll taskify this one instead of coro.
_DO_NOT_CANCEL_COROS.add(new_coro) # The new task must not be cancelled.
loop.create_task(new_coro) # Make the task
# Ok, so we *could* simply return fut. Callers can await it as normal,
# e.g.
#
# async def blah():
# x = await shutdown_waits_for(bleh())
#
# That will work fine. However, callers may *also* want to detach the
# call from the current execution context, e.g.
#
# async def blah():
# loop.create_task(shutdown_waits_for(bleh()))
#
# This will only work if shutdown_waits_for() returns a coroutine.
# Therefore, we just make a new coroutine to wrap the `await fut` and
# return that. Then both things will work.
#
# (Side note: instead of callers using create_tasks, it would also work
# if they used `asyncio.ensure_future()` instead, since that can work
# with futures. But I don't like ensure_future.)
#
# (Another side note: You don't even need `create_task()` or
# `ensure_future()`...If you don't want a result, you can just call
# `shutdown_waits_for()` as a flat function call, no await or anything,
# and it should still work; unfortunately it causes a RuntimeWarning to
# tell you that ``inner()`` was never awaited :/
async def inner():
return await fut
return inner() | python | def shutdown_waits_for(coro, loop=None):
"""Prevent coro from being cancelled during the shutdown sequence.
The trick here is that we add this coro to the global
"DO_NOT_CANCEL" collection, and then later during the shutdown
sequence we make sure that the task that wraps this coro will NOT
be cancelled.
To make this work, we have to create a super-secret task, below, that
communicates with the caller (which "awaits" us) via a Future. Using
a Future in this way allows us to avoid awaiting the Task, which
decouples the Task from the normal exception propagation which would
normally happen when the outer Task gets cancelled. We get the
result of coro back to the caller via Future.set_result.
NOTE that during the shutdown sequence, the caller WILL NOT be able
to receive a result, since the caller will likely have been
cancelled. So you should probably not rely on capturing results
via this function.
"""
loop = loop or get_event_loop()
fut = loop.create_future() # This future will connect coro and the caller.
async def coro_proxy():
"""This function will await coro, but it will also send the result
over the the future. Remember: the outside caller (of
shutdown_waits_for) will be awaiting fut, NOT coro(), due to
the decoupling. However, when coro completes, we need to send its
result over to the fut to make it look *as if* it was just coro
running the whole time. This whole thing is a teeny magic trick.
"""
try:
result = await coro
except (CancelledError, Exception) as e:
set_fut_done = partial(fut.set_exception, e)
else:
set_fut_done = partial(fut.set_result, result)
if not fut.cancelled():
set_fut_done()
new_coro = coro_proxy() # We'll taskify this one instead of coro.
_DO_NOT_CANCEL_COROS.add(new_coro) # The new task must not be cancelled.
loop.create_task(new_coro) # Make the task
# Ok, so we *could* simply return fut. Callers can await it as normal,
# e.g.
#
# async def blah():
# x = await shutdown_waits_for(bleh())
#
# That will work fine. However, callers may *also* want to detach the
# call from the current execution context, e.g.
#
# async def blah():
# loop.create_task(shutdown_waits_for(bleh()))
#
# This will only work if shutdown_waits_for() returns a coroutine.
# Therefore, we just make a new coroutine to wrap the `await fut` and
# return that. Then both things will work.
#
# (Side note: instead of callers using create_tasks, it would also work
# if they used `asyncio.ensure_future()` instead, since that can work
# with futures. But I don't like ensure_future.)
#
# (Another side note: You don't even need `create_task()` or
# `ensure_future()`...If you don't want a result, you can just call
# `shutdown_waits_for()` as a flat function call, no await or anything,
# and it should still work; unfortunately it causes a RuntimeWarning to
# tell you that ``inner()`` was never awaited :/
async def inner():
return await fut
return inner() | [
"def",
"shutdown_waits_for",
"(",
"coro",
",",
"loop",
"=",
"None",
")",
":",
"loop",
"=",
"loop",
"or",
"get_event_loop",
"(",
")",
"fut",
"=",
"loop",
".",
"create_future",
"(",
")",
"# This future will connect coro and the caller.",
"async",
"def",
"coro_proxy",
"(",
")",
":",
"\"\"\"This function will await coro, but it will also send the result\n over the the future. Remember: the outside caller (of\n shutdown_waits_for) will be awaiting fut, NOT coro(), due to\n the decoupling. However, when coro completes, we need to send its\n result over to the fut to make it look *as if* it was just coro\n running the whole time. This whole thing is a teeny magic trick.\n \"\"\"",
"try",
":",
"result",
"=",
"await",
"coro",
"except",
"(",
"CancelledError",
",",
"Exception",
")",
"as",
"e",
":",
"set_fut_done",
"=",
"partial",
"(",
"fut",
".",
"set_exception",
",",
"e",
")",
"else",
":",
"set_fut_done",
"=",
"partial",
"(",
"fut",
".",
"set_result",
",",
"result",
")",
"if",
"not",
"fut",
".",
"cancelled",
"(",
")",
":",
"set_fut_done",
"(",
")",
"new_coro",
"=",
"coro_proxy",
"(",
")",
"# We'll taskify this one instead of coro.",
"_DO_NOT_CANCEL_COROS",
".",
"add",
"(",
"new_coro",
")",
"# The new task must not be cancelled.",
"loop",
".",
"create_task",
"(",
"new_coro",
")",
"# Make the task",
"# Ok, so we *could* simply return fut. Callers can await it as normal,",
"# e.g.",
"#",
"# async def blah():",
"# x = await shutdown_waits_for(bleh())",
"#",
"# That will work fine. However, callers may *also* want to detach the",
"# call from the current execution context, e.g.",
"#",
"# async def blah():",
"# loop.create_task(shutdown_waits_for(bleh()))",
"#",
"# This will only work if shutdown_waits_for() returns a coroutine.",
"# Therefore, we just make a new coroutine to wrap the `await fut` and",
"# return that. Then both things will work.",
"#",
"# (Side note: instead of callers using create_tasks, it would also work",
"# if they used `asyncio.ensure_future()` instead, since that can work",
"# with futures. But I don't like ensure_future.)",
"#",
"# (Another side note: You don't even need `create_task()` or",
"# `ensure_future()`...If you don't want a result, you can just call",
"# `shutdown_waits_for()` as a flat function call, no await or anything,",
"# and it should still work; unfortunately it causes a RuntimeWarning to",
"# tell you that ``inner()`` was never awaited :/",
"async",
"def",
"inner",
"(",
")",
":",
"return",
"await",
"fut",
"return",
"inner",
"(",
")"
] | Prevent coro from being cancelled during the shutdown sequence.
The trick here is that we add this coro to the global
"DO_NOT_CANCEL" collection, and then later during the shutdown
sequence we make sure that the task that wraps this coro will NOT
be cancelled.
To make this work, we have to create a super-secret task, below, that
communicates with the caller (which "awaits" us) via a Future. Using
a Future in this way allows us to avoid awaiting the Task, which
decouples the Task from the normal exception propagation which would
normally happen when the outer Task gets cancelled. We get the
result of coro back to the caller via Future.set_result.
NOTE that during the shutdown sequence, the caller WILL NOT be able
to receive a result, since the caller will likely have been
cancelled. So you should probably not rely on capturing results
via this function. | [
"Prevent",
"coro",
"from",
"being",
"cancelled",
"during",
"the",
"shutdown",
"sequence",
"."
] | 23c73318447f578a4a24845c5f43574ac7b414e4 | https://github.com/cjrh/aiorun/blob/23c73318447f578a4a24845c5f43574ac7b414e4/aiorun.py#L43-L117 | train |
cjrh/aiorun | aiorun.py | run | def run(coro: 'Optional[Coroutine]' = None, *,
loop: Optional[AbstractEventLoop] = None,
shutdown_handler: Optional[Callable[[AbstractEventLoop], None]] = None,
executor_workers: int = 10,
executor: Optional[Executor] = None,
use_uvloop: bool = False) -> None:
"""
Start up the event loop, and wait for a signal to shut down.
:param coro: Optionally supply a coroutine. The loop will still
run if missing. The loop will continue to run after the supplied
coroutine finishes. The supplied coroutine is typically
a "main" coroutine from which all other work is spawned.
:param loop: Optionally supply your own loop. If missing, the
default loop attached to the current thread context will
be used, i.e., whatever ``asyncio.get_event_loop()`` returns.
:param shutdown_handler: By default, SIGINT and SIGTERM will be
handled and will stop the loop, thereby invoking the shutdown
sequence. Alternatively you can supply your own shutdown
handler function. It should conform to the type spec as shown
in the function signature.
:param executor_workers: The number of workers in the executor.
(NOTE: ``run()`` creates a new executor instance internally,
regardless of whether you supply your own loop.)
:param executor: You can decide to use your own executor instance
if you like.
:param use_uvloop: The loop policy will be set to use uvloop. It
is your responsibility to install uvloop. If missing, an
``ImportError`` will be raised.
"""
logger.debug('Entering run()')
assert not (loop and use_uvloop), (
"'loop' and 'use_uvloop' parameters are mutually "
"exclusive. (Just make your own uvloop and pass it in)."
)
if use_uvloop:
import uvloop
asyncio.set_event_loop_policy(uvloop.EventLoopPolicy())
loop_was_supplied = bool(loop)
if not loop_was_supplied:
loop = get_event_loop()
if coro:
async def new_coro():
"""During shutdown, run_until_complete() will exit
if a CancelledError bubbles up from anything in the
group. To counteract that, we'll try to handle
any CancelledErrors that bubble up from the given
coro. This isn't fool-proof: if the user doesn't
provide a coro, and instead creates their own with
loop.create_task, that task might bubble
a CancelledError into the run_until_complete()."""
try:
await coro
except asyncio.CancelledError:
pass
loop.create_task(new_coro())
shutdown_handler = shutdown_handler or _shutdown_handler
if WINDOWS: # pragma: no cover
# This is to allow CTRL-C to be detected in a timely fashion,
# see: https://bugs.python.org/issue23057#msg246316
loop.create_task(windows_support_wakeup())
# This is to be able to handle SIGBREAK.
def windows_handler(sig, frame):
# Disable the handler so it won't be called again.
signame = signal.Signals(sig).name
logger.critical('Received signal: %s. Stopping the loop.', signame)
shutdown_handler(loop)
signal.signal(signal.SIGBREAK, windows_handler)
signal.signal(signal.SIGINT, windows_handler)
else:
loop.add_signal_handler(SIGINT, shutdown_handler, loop)
loop.add_signal_handler(SIGTERM, shutdown_handler, loop)
# TODO: We probably don't want to create a different executor if the
# TODO: loop was supplied. (User might have put stuff on that loop's
# TODO: executor).
if not executor:
logger.debug('Creating default executor')
executor = ThreadPoolExecutor(max_workers=executor_workers)
loop.set_default_executor(executor)
try:
loop.run_forever()
except KeyboardInterrupt: # pragma: no cover
logger.info('Got KeyboardInterrupt')
if WINDOWS:
# Windows doesn't do any POSIX signal handling, and no
# abstraction layer for signals is currently implemented in
# asyncio. So we fall back to KeyboardInterrupt (triggered
# by the user/environment sending CTRL-C, or signal.CTRL_C_EVENT
shutdown_handler()
logger.info('Entering shutdown phase.')
def sep():
tasks = all_tasks(loop=loop)
do_not_cancel = set()
for t in tasks:
# TODO: we don't need access to the coro. We could simply
# TODO: store the task itself in the weakset.
if t._coro in _DO_NOT_CANCEL_COROS:
do_not_cancel.add(t)
tasks -= do_not_cancel
logger.info('Cancelling pending tasks.')
for t in tasks:
logger.debug('Cancelling task: %s', t)
t.cancel()
return tasks, do_not_cancel
tasks, do_not_cancel = sep()
# Here's a protip: if you group a bunch of tasks, and some of them
# get cancelled, and they DON'T HANDLE THE CANCELLATION, then the
# raised CancelledError will bubble up to, and stop the
# loop.run_until_complete() line: meaning, not all the tasks in
# the gathered group will actually be complete. You need to
# enable this with the ``return_exceptions`` flag.
group = gather(*tasks, *do_not_cancel, return_exceptions=True)
logger.info('Running pending tasks till complete')
# TODO: obtain all the results, and log any results that are exceptions
# other than CancelledError. Will be useful for troubleshooting.
loop.run_until_complete(group)
logger.info('Waiting for executor shutdown.')
executor.shutdown(wait=True)
# If loop was supplied, it's up to the caller to close!
if not loop_was_supplied:
logger.info('Closing the loop.')
loop.close()
logger.critical('Leaving. Bye!') | python | def run(coro: 'Optional[Coroutine]' = None, *,
loop: Optional[AbstractEventLoop] = None,
shutdown_handler: Optional[Callable[[AbstractEventLoop], None]] = None,
executor_workers: int = 10,
executor: Optional[Executor] = None,
use_uvloop: bool = False) -> None:
"""
Start up the event loop, and wait for a signal to shut down.
:param coro: Optionally supply a coroutine. The loop will still
run if missing. The loop will continue to run after the supplied
coroutine finishes. The supplied coroutine is typically
a "main" coroutine from which all other work is spawned.
:param loop: Optionally supply your own loop. If missing, the
default loop attached to the current thread context will
be used, i.e., whatever ``asyncio.get_event_loop()`` returns.
:param shutdown_handler: By default, SIGINT and SIGTERM will be
handled and will stop the loop, thereby invoking the shutdown
sequence. Alternatively you can supply your own shutdown
handler function. It should conform to the type spec as shown
in the function signature.
:param executor_workers: The number of workers in the executor.
(NOTE: ``run()`` creates a new executor instance internally,
regardless of whether you supply your own loop.)
:param executor: You can decide to use your own executor instance
if you like.
:param use_uvloop: The loop policy will be set to use uvloop. It
is your responsibility to install uvloop. If missing, an
``ImportError`` will be raised.
"""
logger.debug('Entering run()')
assert not (loop and use_uvloop), (
"'loop' and 'use_uvloop' parameters are mutually "
"exclusive. (Just make your own uvloop and pass it in)."
)
if use_uvloop:
import uvloop
asyncio.set_event_loop_policy(uvloop.EventLoopPolicy())
loop_was_supplied = bool(loop)
if not loop_was_supplied:
loop = get_event_loop()
if coro:
async def new_coro():
"""During shutdown, run_until_complete() will exit
if a CancelledError bubbles up from anything in the
group. To counteract that, we'll try to handle
any CancelledErrors that bubble up from the given
coro. This isn't fool-proof: if the user doesn't
provide a coro, and instead creates their own with
loop.create_task, that task might bubble
a CancelledError into the run_until_complete()."""
try:
await coro
except asyncio.CancelledError:
pass
loop.create_task(new_coro())
shutdown_handler = shutdown_handler or _shutdown_handler
if WINDOWS: # pragma: no cover
# This is to allow CTRL-C to be detected in a timely fashion,
# see: https://bugs.python.org/issue23057#msg246316
loop.create_task(windows_support_wakeup())
# This is to be able to handle SIGBREAK.
def windows_handler(sig, frame):
# Disable the handler so it won't be called again.
signame = signal.Signals(sig).name
logger.critical('Received signal: %s. Stopping the loop.', signame)
shutdown_handler(loop)
signal.signal(signal.SIGBREAK, windows_handler)
signal.signal(signal.SIGINT, windows_handler)
else:
loop.add_signal_handler(SIGINT, shutdown_handler, loop)
loop.add_signal_handler(SIGTERM, shutdown_handler, loop)
# TODO: We probably don't want to create a different executor if the
# TODO: loop was supplied. (User might have put stuff on that loop's
# TODO: executor).
if not executor:
logger.debug('Creating default executor')
executor = ThreadPoolExecutor(max_workers=executor_workers)
loop.set_default_executor(executor)
try:
loop.run_forever()
except KeyboardInterrupt: # pragma: no cover
logger.info('Got KeyboardInterrupt')
if WINDOWS:
# Windows doesn't do any POSIX signal handling, and no
# abstraction layer for signals is currently implemented in
# asyncio. So we fall back to KeyboardInterrupt (triggered
# by the user/environment sending CTRL-C, or signal.CTRL_C_EVENT
shutdown_handler()
logger.info('Entering shutdown phase.')
def sep():
tasks = all_tasks(loop=loop)
do_not_cancel = set()
for t in tasks:
# TODO: we don't need access to the coro. We could simply
# TODO: store the task itself in the weakset.
if t._coro in _DO_NOT_CANCEL_COROS:
do_not_cancel.add(t)
tasks -= do_not_cancel
logger.info('Cancelling pending tasks.')
for t in tasks:
logger.debug('Cancelling task: %s', t)
t.cancel()
return tasks, do_not_cancel
tasks, do_not_cancel = sep()
# Here's a protip: if you group a bunch of tasks, and some of them
# get cancelled, and they DON'T HANDLE THE CANCELLATION, then the
# raised CancelledError will bubble up to, and stop the
# loop.run_until_complete() line: meaning, not all the tasks in
# the gathered group will actually be complete. You need to
# enable this with the ``return_exceptions`` flag.
group = gather(*tasks, *do_not_cancel, return_exceptions=True)
logger.info('Running pending tasks till complete')
# TODO: obtain all the results, and log any results that are exceptions
# other than CancelledError. Will be useful for troubleshooting.
loop.run_until_complete(group)
logger.info('Waiting for executor shutdown.')
executor.shutdown(wait=True)
# If loop was supplied, it's up to the caller to close!
if not loop_was_supplied:
logger.info('Closing the loop.')
loop.close()
logger.critical('Leaving. Bye!') | [
"def",
"run",
"(",
"coro",
":",
"'Optional[Coroutine]'",
"=",
"None",
",",
"*",
",",
"loop",
":",
"Optional",
"[",
"AbstractEventLoop",
"]",
"=",
"None",
",",
"shutdown_handler",
":",
"Optional",
"[",
"Callable",
"[",
"[",
"AbstractEventLoop",
"]",
",",
"None",
"]",
"]",
"=",
"None",
",",
"executor_workers",
":",
"int",
"=",
"10",
",",
"executor",
":",
"Optional",
"[",
"Executor",
"]",
"=",
"None",
",",
"use_uvloop",
":",
"bool",
"=",
"False",
")",
"->",
"None",
":",
"logger",
".",
"debug",
"(",
"'Entering run()'",
")",
"assert",
"not",
"(",
"loop",
"and",
"use_uvloop",
")",
",",
"(",
"\"'loop' and 'use_uvloop' parameters are mutually \"",
"\"exclusive. (Just make your own uvloop and pass it in).\"",
")",
"if",
"use_uvloop",
":",
"import",
"uvloop",
"asyncio",
".",
"set_event_loop_policy",
"(",
"uvloop",
".",
"EventLoopPolicy",
"(",
")",
")",
"loop_was_supplied",
"=",
"bool",
"(",
"loop",
")",
"if",
"not",
"loop_was_supplied",
":",
"loop",
"=",
"get_event_loop",
"(",
")",
"if",
"coro",
":",
"async",
"def",
"new_coro",
"(",
")",
":",
"\"\"\"During shutdown, run_until_complete() will exit\n if a CancelledError bubbles up from anything in the\n group. To counteract that, we'll try to handle\n any CancelledErrors that bubble up from the given\n coro. This isn't fool-proof: if the user doesn't\n provide a coro, and instead creates their own with\n loop.create_task, that task might bubble\n a CancelledError into the run_until_complete().\"\"\"",
"try",
":",
"await",
"coro",
"except",
"asyncio",
".",
"CancelledError",
":",
"pass",
"loop",
".",
"create_task",
"(",
"new_coro",
"(",
")",
")",
"shutdown_handler",
"=",
"shutdown_handler",
"or",
"_shutdown_handler",
"if",
"WINDOWS",
":",
"# pragma: no cover",
"# This is to allow CTRL-C to be detected in a timely fashion,",
"# see: https://bugs.python.org/issue23057#msg246316",
"loop",
".",
"create_task",
"(",
"windows_support_wakeup",
"(",
")",
")",
"# This is to be able to handle SIGBREAK.",
"def",
"windows_handler",
"(",
"sig",
",",
"frame",
")",
":",
"# Disable the handler so it won't be called again.",
"signame",
"=",
"signal",
".",
"Signals",
"(",
"sig",
")",
".",
"name",
"logger",
".",
"critical",
"(",
"'Received signal: %s. Stopping the loop.'",
",",
"signame",
")",
"shutdown_handler",
"(",
"loop",
")",
"signal",
".",
"signal",
"(",
"signal",
".",
"SIGBREAK",
",",
"windows_handler",
")",
"signal",
".",
"signal",
"(",
"signal",
".",
"SIGINT",
",",
"windows_handler",
")",
"else",
":",
"loop",
".",
"add_signal_handler",
"(",
"SIGINT",
",",
"shutdown_handler",
",",
"loop",
")",
"loop",
".",
"add_signal_handler",
"(",
"SIGTERM",
",",
"shutdown_handler",
",",
"loop",
")",
"# TODO: We probably don't want to create a different executor if the",
"# TODO: loop was supplied. (User might have put stuff on that loop's",
"# TODO: executor).",
"if",
"not",
"executor",
":",
"logger",
".",
"debug",
"(",
"'Creating default executor'",
")",
"executor",
"=",
"ThreadPoolExecutor",
"(",
"max_workers",
"=",
"executor_workers",
")",
"loop",
".",
"set_default_executor",
"(",
"executor",
")",
"try",
":",
"loop",
".",
"run_forever",
"(",
")",
"except",
"KeyboardInterrupt",
":",
"# pragma: no cover",
"logger",
".",
"info",
"(",
"'Got KeyboardInterrupt'",
")",
"if",
"WINDOWS",
":",
"# Windows doesn't do any POSIX signal handling, and no",
"# abstraction layer for signals is currently implemented in",
"# asyncio. So we fall back to KeyboardInterrupt (triggered",
"# by the user/environment sending CTRL-C, or signal.CTRL_C_EVENT",
"shutdown_handler",
"(",
")",
"logger",
".",
"info",
"(",
"'Entering shutdown phase.'",
")",
"def",
"sep",
"(",
")",
":",
"tasks",
"=",
"all_tasks",
"(",
"loop",
"=",
"loop",
")",
"do_not_cancel",
"=",
"set",
"(",
")",
"for",
"t",
"in",
"tasks",
":",
"# TODO: we don't need access to the coro. We could simply",
"# TODO: store the task itself in the weakset.",
"if",
"t",
".",
"_coro",
"in",
"_DO_NOT_CANCEL_COROS",
":",
"do_not_cancel",
".",
"add",
"(",
"t",
")",
"tasks",
"-=",
"do_not_cancel",
"logger",
".",
"info",
"(",
"'Cancelling pending tasks.'",
")",
"for",
"t",
"in",
"tasks",
":",
"logger",
".",
"debug",
"(",
"'Cancelling task: %s'",
",",
"t",
")",
"t",
".",
"cancel",
"(",
")",
"return",
"tasks",
",",
"do_not_cancel",
"tasks",
",",
"do_not_cancel",
"=",
"sep",
"(",
")",
"# Here's a protip: if you group a bunch of tasks, and some of them",
"# get cancelled, and they DON'T HANDLE THE CANCELLATION, then the",
"# raised CancelledError will bubble up to, and stop the",
"# loop.run_until_complete() line: meaning, not all the tasks in",
"# the gathered group will actually be complete. You need to",
"# enable this with the ``return_exceptions`` flag.",
"group",
"=",
"gather",
"(",
"*",
"tasks",
",",
"*",
"do_not_cancel",
",",
"return_exceptions",
"=",
"True",
")",
"logger",
".",
"info",
"(",
"'Running pending tasks till complete'",
")",
"# TODO: obtain all the results, and log any results that are exceptions",
"# other than CancelledError. Will be useful for troubleshooting.",
"loop",
".",
"run_until_complete",
"(",
"group",
")",
"logger",
".",
"info",
"(",
"'Waiting for executor shutdown.'",
")",
"executor",
".",
"shutdown",
"(",
"wait",
"=",
"True",
")",
"# If loop was supplied, it's up to the caller to close!",
"if",
"not",
"loop_was_supplied",
":",
"logger",
".",
"info",
"(",
"'Closing the loop.'",
")",
"loop",
".",
"close",
"(",
")",
"logger",
".",
"critical",
"(",
"'Leaving. Bye!'",
")"
] | Start up the event loop, and wait for a signal to shut down.
:param coro: Optionally supply a coroutine. The loop will still
run if missing. The loop will continue to run after the supplied
coroutine finishes. The supplied coroutine is typically
a "main" coroutine from which all other work is spawned.
:param loop: Optionally supply your own loop. If missing, the
default loop attached to the current thread context will
be used, i.e., whatever ``asyncio.get_event_loop()`` returns.
:param shutdown_handler: By default, SIGINT and SIGTERM will be
handled and will stop the loop, thereby invoking the shutdown
sequence. Alternatively you can supply your own shutdown
handler function. It should conform to the type spec as shown
in the function signature.
:param executor_workers: The number of workers in the executor.
(NOTE: ``run()`` creates a new executor instance internally,
regardless of whether you supply your own loop.)
:param executor: You can decide to use your own executor instance
if you like.
:param use_uvloop: The loop policy will be set to use uvloop. It
is your responsibility to install uvloop. If missing, an
``ImportError`` will be raised. | [
"Start",
"up",
"the",
"event",
"loop",
"and",
"wait",
"for",
"a",
"signal",
"to",
"shut",
"down",
"."
] | 23c73318447f578a4a24845c5f43574ac7b414e4 | https://github.com/cjrh/aiorun/blob/23c73318447f578a4a24845c5f43574ac7b414e4/aiorun.py#L120-L255 | train |
emre/storm | storm/kommandr.py | prog.command | def command(self, *args, **kwargs):
"""Convenient decorator simply creates corresponding command"""
if len(args) == 1 and isinstance(args[0], collections.Callable):
return self._generate_command(args[0])
else:
def _command(func):
return self._generate_command(func, *args, **kwargs)
return _command | python | def command(self, *args, **kwargs):
"""Convenient decorator simply creates corresponding command"""
if len(args) == 1 and isinstance(args[0], collections.Callable):
return self._generate_command(args[0])
else:
def _command(func):
return self._generate_command(func, *args, **kwargs)
return _command | [
"def",
"command",
"(",
"self",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"if",
"len",
"(",
"args",
")",
"==",
"1",
"and",
"isinstance",
"(",
"args",
"[",
"0",
"]",
",",
"collections",
".",
"Callable",
")",
":",
"return",
"self",
".",
"_generate_command",
"(",
"args",
"[",
"0",
"]",
")",
"else",
":",
"def",
"_command",
"(",
"func",
")",
":",
"return",
"self",
".",
"_generate_command",
"(",
"func",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
"return",
"_command"
] | Convenient decorator simply creates corresponding command | [
"Convenient",
"decorator",
"simply",
"creates",
"corresponding",
"command"
] | c752defc1b718cfffbf0e0e15532fa1d7840bf6d | https://github.com/emre/storm/blob/c752defc1b718cfffbf0e0e15532fa1d7840bf6d/storm/kommandr.py#L96-L103 | train |
emre/storm | storm/kommandr.py | prog._generate_command | def _generate_command(self, func, name=None, **kwargs):
"""Generates a command parser for given func.
:param func: func to generate related command parser
:param type: function
:param name: command name
:param type: str
:param **kwargs: keyword arguments those passed through to
:py:class:``argparse.ArgumentParser.add_parser``
:param type: dict
"""
func_pointer = name or func.__name__
storm_config = get_storm_config()
aliases, additional_kwarg = None, None
if 'aliases' in storm_config:
for command, alias_list in \
six.iteritems(storm_config.get("aliases")):
if func_pointer == command:
aliases = alias_list
break
func_help = func.__doc__ and func.__doc__.strip()
subparser = self.subparsers.add_parser(name or func.__name__,
aliases=aliases,
help=func_help)
spec = inspect.getargspec(func)
opts = reversed(list(izip_longest(reversed(spec.args or []),
reversed(spec.defaults or []),
fillvalue=self._POSITIONAL())))
for k, v in opts:
argopts = getattr(func, 'argopts', {})
args, kwargs = argopts.get(k, ([], {}))
args = list(args)
is_positional = isinstance(v, self._POSITIONAL)
options = [arg for arg in args if arg.startswith('-')]
if isinstance(v, list):
kwargs.update({
'action': 'append',
})
if is_positional:
if options:
args = options
kwargs.update({'required': True, 'dest': k})
else:
args = [k]
else:
args = options or ['--%s' % k]
kwargs.update({'default': v, 'dest': k})
arg = subparser.add_argument(*args, **kwargs)
subparser.set_defaults(**{self._COMMAND_FLAG: func})
return func | python | def _generate_command(self, func, name=None, **kwargs):
"""Generates a command parser for given func.
:param func: func to generate related command parser
:param type: function
:param name: command name
:param type: str
:param **kwargs: keyword arguments those passed through to
:py:class:``argparse.ArgumentParser.add_parser``
:param type: dict
"""
func_pointer = name or func.__name__
storm_config = get_storm_config()
aliases, additional_kwarg = None, None
if 'aliases' in storm_config:
for command, alias_list in \
six.iteritems(storm_config.get("aliases")):
if func_pointer == command:
aliases = alias_list
break
func_help = func.__doc__ and func.__doc__.strip()
subparser = self.subparsers.add_parser(name or func.__name__,
aliases=aliases,
help=func_help)
spec = inspect.getargspec(func)
opts = reversed(list(izip_longest(reversed(spec.args or []),
reversed(spec.defaults or []),
fillvalue=self._POSITIONAL())))
for k, v in opts:
argopts = getattr(func, 'argopts', {})
args, kwargs = argopts.get(k, ([], {}))
args = list(args)
is_positional = isinstance(v, self._POSITIONAL)
options = [arg for arg in args if arg.startswith('-')]
if isinstance(v, list):
kwargs.update({
'action': 'append',
})
if is_positional:
if options:
args = options
kwargs.update({'required': True, 'dest': k})
else:
args = [k]
else:
args = options or ['--%s' % k]
kwargs.update({'default': v, 'dest': k})
arg = subparser.add_argument(*args, **kwargs)
subparser.set_defaults(**{self._COMMAND_FLAG: func})
return func | [
"def",
"_generate_command",
"(",
"self",
",",
"func",
",",
"name",
"=",
"None",
",",
"*",
"*",
"kwargs",
")",
":",
"func_pointer",
"=",
"name",
"or",
"func",
".",
"__name__",
"storm_config",
"=",
"get_storm_config",
"(",
")",
"aliases",
",",
"additional_kwarg",
"=",
"None",
",",
"None",
"if",
"'aliases'",
"in",
"storm_config",
":",
"for",
"command",
",",
"alias_list",
"in",
"six",
".",
"iteritems",
"(",
"storm_config",
".",
"get",
"(",
"\"aliases\"",
")",
")",
":",
"if",
"func_pointer",
"==",
"command",
":",
"aliases",
"=",
"alias_list",
"break",
"func_help",
"=",
"func",
".",
"__doc__",
"and",
"func",
".",
"__doc__",
".",
"strip",
"(",
")",
"subparser",
"=",
"self",
".",
"subparsers",
".",
"add_parser",
"(",
"name",
"or",
"func",
".",
"__name__",
",",
"aliases",
"=",
"aliases",
",",
"help",
"=",
"func_help",
")",
"spec",
"=",
"inspect",
".",
"getargspec",
"(",
"func",
")",
"opts",
"=",
"reversed",
"(",
"list",
"(",
"izip_longest",
"(",
"reversed",
"(",
"spec",
".",
"args",
"or",
"[",
"]",
")",
",",
"reversed",
"(",
"spec",
".",
"defaults",
"or",
"[",
"]",
")",
",",
"fillvalue",
"=",
"self",
".",
"_POSITIONAL",
"(",
")",
")",
")",
")",
"for",
"k",
",",
"v",
"in",
"opts",
":",
"argopts",
"=",
"getattr",
"(",
"func",
",",
"'argopts'",
",",
"{",
"}",
")",
"args",
",",
"kwargs",
"=",
"argopts",
".",
"get",
"(",
"k",
",",
"(",
"[",
"]",
",",
"{",
"}",
")",
")",
"args",
"=",
"list",
"(",
"args",
")",
"is_positional",
"=",
"isinstance",
"(",
"v",
",",
"self",
".",
"_POSITIONAL",
")",
"options",
"=",
"[",
"arg",
"for",
"arg",
"in",
"args",
"if",
"arg",
".",
"startswith",
"(",
"'-'",
")",
"]",
"if",
"isinstance",
"(",
"v",
",",
"list",
")",
":",
"kwargs",
".",
"update",
"(",
"{",
"'action'",
":",
"'append'",
",",
"}",
")",
"if",
"is_positional",
":",
"if",
"options",
":",
"args",
"=",
"options",
"kwargs",
".",
"update",
"(",
"{",
"'required'",
":",
"True",
",",
"'dest'",
":",
"k",
"}",
")",
"else",
":",
"args",
"=",
"[",
"k",
"]",
"else",
":",
"args",
"=",
"options",
"or",
"[",
"'--%s'",
"%",
"k",
"]",
"kwargs",
".",
"update",
"(",
"{",
"'default'",
":",
"v",
",",
"'dest'",
":",
"k",
"}",
")",
"arg",
"=",
"subparser",
".",
"add_argument",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
"subparser",
".",
"set_defaults",
"(",
"*",
"*",
"{",
"self",
".",
"_COMMAND_FLAG",
":",
"func",
"}",
")",
"return",
"func"
] | Generates a command parser for given func.
:param func: func to generate related command parser
:param type: function
:param name: command name
:param type: str
:param **kwargs: keyword arguments those passed through to
:py:class:``argparse.ArgumentParser.add_parser``
:param type: dict | [
"Generates",
"a",
"command",
"parser",
"for",
"given",
"func",
"."
] | c752defc1b718cfffbf0e0e15532fa1d7840bf6d | https://github.com/emre/storm/blob/c752defc1b718cfffbf0e0e15532fa1d7840bf6d/storm/kommandr.py#L121-L177 | train |
emre/storm | storm/kommandr.py | prog.execute | def execute(self, arg_list):
"""Main function to parse and dispatch commands by given ``arg_list``
:param arg_list: all arguments provided by the command line
:param type: list
"""
arg_map = self.parser.parse_args(arg_list).__dict__
command = arg_map.pop(self._COMMAND_FLAG)
return command(**arg_map) | python | def execute(self, arg_list):
"""Main function to parse and dispatch commands by given ``arg_list``
:param arg_list: all arguments provided by the command line
:param type: list
"""
arg_map = self.parser.parse_args(arg_list).__dict__
command = arg_map.pop(self._COMMAND_FLAG)
return command(**arg_map) | [
"def",
"execute",
"(",
"self",
",",
"arg_list",
")",
":",
"arg_map",
"=",
"self",
".",
"parser",
".",
"parse_args",
"(",
"arg_list",
")",
".",
"__dict__",
"command",
"=",
"arg_map",
".",
"pop",
"(",
"self",
".",
"_COMMAND_FLAG",
")",
"return",
"command",
"(",
"*",
"*",
"arg_map",
")"
] | Main function to parse and dispatch commands by given ``arg_list``
:param arg_list: all arguments provided by the command line
:param type: list | [
"Main",
"function",
"to",
"parse",
"and",
"dispatch",
"commands",
"by",
"given",
"arg_list"
] | c752defc1b718cfffbf0e0e15532fa1d7840bf6d | https://github.com/emre/storm/blob/c752defc1b718cfffbf0e0e15532fa1d7840bf6d/storm/kommandr.py#L179-L188 | train |
emre/storm | storm/__main__.py | add | def add(name, connection_uri, id_file="", o=[], config=None):
"""
Adds a new entry to sshconfig.
"""
storm_ = get_storm_instance(config)
try:
# validate name
if '@' in name:
raise ValueError('invalid value: "@" cannot be used in name.')
user, host, port = parse(
connection_uri,
user=get_default("user", storm_.defaults),
port=get_default("port", storm_.defaults)
)
storm_.add_entry(name, host, user, port, id_file, o)
print(
get_formatted_message(
'{0} added to your ssh config. you can connect '
'it by typing "ssh {0}".'.format(name),
'success')
)
except ValueError as error:
print(get_formatted_message(error, 'error'), file=sys.stderr)
sys.exit(1) | python | def add(name, connection_uri, id_file="", o=[], config=None):
"""
Adds a new entry to sshconfig.
"""
storm_ = get_storm_instance(config)
try:
# validate name
if '@' in name:
raise ValueError('invalid value: "@" cannot be used in name.')
user, host, port = parse(
connection_uri,
user=get_default("user", storm_.defaults),
port=get_default("port", storm_.defaults)
)
storm_.add_entry(name, host, user, port, id_file, o)
print(
get_formatted_message(
'{0} added to your ssh config. you can connect '
'it by typing "ssh {0}".'.format(name),
'success')
)
except ValueError as error:
print(get_formatted_message(error, 'error'), file=sys.stderr)
sys.exit(1) | [
"def",
"add",
"(",
"name",
",",
"connection_uri",
",",
"id_file",
"=",
"\"\"",
",",
"o",
"=",
"[",
"]",
",",
"config",
"=",
"None",
")",
":",
"storm_",
"=",
"get_storm_instance",
"(",
"config",
")",
"try",
":",
"# validate name",
"if",
"'@'",
"in",
"name",
":",
"raise",
"ValueError",
"(",
"'invalid value: \"@\" cannot be used in name.'",
")",
"user",
",",
"host",
",",
"port",
"=",
"parse",
"(",
"connection_uri",
",",
"user",
"=",
"get_default",
"(",
"\"user\"",
",",
"storm_",
".",
"defaults",
")",
",",
"port",
"=",
"get_default",
"(",
"\"port\"",
",",
"storm_",
".",
"defaults",
")",
")",
"storm_",
".",
"add_entry",
"(",
"name",
",",
"host",
",",
"user",
",",
"port",
",",
"id_file",
",",
"o",
")",
"print",
"(",
"get_formatted_message",
"(",
"'{0} added to your ssh config. you can connect '",
"'it by typing \"ssh {0}\".'",
".",
"format",
"(",
"name",
")",
",",
"'success'",
")",
")",
"except",
"ValueError",
"as",
"error",
":",
"print",
"(",
"get_formatted_message",
"(",
"error",
",",
"'error'",
")",
",",
"file",
"=",
"sys",
".",
"stderr",
")",
"sys",
".",
"exit",
"(",
"1",
")"
] | Adds a new entry to sshconfig. | [
"Adds",
"a",
"new",
"entry",
"to",
"sshconfig",
"."
] | c752defc1b718cfffbf0e0e15532fa1d7840bf6d | https://github.com/emre/storm/blob/c752defc1b718cfffbf0e0e15532fa1d7840bf6d/storm/__main__.py#L34-L63 | train |
emre/storm | storm/__main__.py | clone | def clone(name, clone_name, config=None):
"""
Clone an entry to the sshconfig.
"""
storm_ = get_storm_instance(config)
try:
# validate name
if '@' in name:
raise ValueError('invalid value: "@" cannot be used in name.')
storm_.clone_entry(name, clone_name)
print(
get_formatted_message(
'{0} added to your ssh config. you can connect '
'it by typing "ssh {0}".'.format(clone_name),
'success')
)
except ValueError as error:
print(get_formatted_message(error, 'error'), file=sys.stderr)
sys.exit(1) | python | def clone(name, clone_name, config=None):
"""
Clone an entry to the sshconfig.
"""
storm_ = get_storm_instance(config)
try:
# validate name
if '@' in name:
raise ValueError('invalid value: "@" cannot be used in name.')
storm_.clone_entry(name, clone_name)
print(
get_formatted_message(
'{0} added to your ssh config. you can connect '
'it by typing "ssh {0}".'.format(clone_name),
'success')
)
except ValueError as error:
print(get_formatted_message(error, 'error'), file=sys.stderr)
sys.exit(1) | [
"def",
"clone",
"(",
"name",
",",
"clone_name",
",",
"config",
"=",
"None",
")",
":",
"storm_",
"=",
"get_storm_instance",
"(",
"config",
")",
"try",
":",
"# validate name",
"if",
"'@'",
"in",
"name",
":",
"raise",
"ValueError",
"(",
"'invalid value: \"@\" cannot be used in name.'",
")",
"storm_",
".",
"clone_entry",
"(",
"name",
",",
"clone_name",
")",
"print",
"(",
"get_formatted_message",
"(",
"'{0} added to your ssh config. you can connect '",
"'it by typing \"ssh {0}\".'",
".",
"format",
"(",
"clone_name",
")",
",",
"'success'",
")",
")",
"except",
"ValueError",
"as",
"error",
":",
"print",
"(",
"get_formatted_message",
"(",
"error",
",",
"'error'",
")",
",",
"file",
"=",
"sys",
".",
"stderr",
")",
"sys",
".",
"exit",
"(",
"1",
")"
] | Clone an entry to the sshconfig. | [
"Clone",
"an",
"entry",
"to",
"the",
"sshconfig",
"."
] | c752defc1b718cfffbf0e0e15532fa1d7840bf6d | https://github.com/emre/storm/blob/c752defc1b718cfffbf0e0e15532fa1d7840bf6d/storm/__main__.py#L67-L90 | train |
emre/storm | storm/__main__.py | move | def move(name, entry_name, config=None):
"""
Move an entry to the sshconfig.
"""
storm_ = get_storm_instance(config)
try:
if '@' in name:
raise ValueError('invalid value: "@" cannot be used in name.')
storm_.clone_entry(name, entry_name, keep_original=False)
print(
get_formatted_message(
'{0} moved in ssh config. you can '
'connect it by typing "ssh {0}".'.format(
entry_name
),
'success')
)
except ValueError as error:
print(get_formatted_message(error, 'error'), file=sys.stderr)
sys.exit(1) | python | def move(name, entry_name, config=None):
"""
Move an entry to the sshconfig.
"""
storm_ = get_storm_instance(config)
try:
if '@' in name:
raise ValueError('invalid value: "@" cannot be used in name.')
storm_.clone_entry(name, entry_name, keep_original=False)
print(
get_formatted_message(
'{0} moved in ssh config. you can '
'connect it by typing "ssh {0}".'.format(
entry_name
),
'success')
)
except ValueError as error:
print(get_formatted_message(error, 'error'), file=sys.stderr)
sys.exit(1) | [
"def",
"move",
"(",
"name",
",",
"entry_name",
",",
"config",
"=",
"None",
")",
":",
"storm_",
"=",
"get_storm_instance",
"(",
"config",
")",
"try",
":",
"if",
"'@'",
"in",
"name",
":",
"raise",
"ValueError",
"(",
"'invalid value: \"@\" cannot be used in name.'",
")",
"storm_",
".",
"clone_entry",
"(",
"name",
",",
"entry_name",
",",
"keep_original",
"=",
"False",
")",
"print",
"(",
"get_formatted_message",
"(",
"'{0} moved in ssh config. you can '",
"'connect it by typing \"ssh {0}\".'",
".",
"format",
"(",
"entry_name",
")",
",",
"'success'",
")",
")",
"except",
"ValueError",
"as",
"error",
":",
"print",
"(",
"get_formatted_message",
"(",
"error",
",",
"'error'",
")",
",",
"file",
"=",
"sys",
".",
"stderr",
")",
"sys",
".",
"exit",
"(",
"1",
")"
] | Move an entry to the sshconfig. | [
"Move",
"an",
"entry",
"to",
"the",
"sshconfig",
"."
] | c752defc1b718cfffbf0e0e15532fa1d7840bf6d | https://github.com/emre/storm/blob/c752defc1b718cfffbf0e0e15532fa1d7840bf6d/storm/__main__.py#L93-L117 | train |
emre/storm | storm/__main__.py | edit | def edit(name, connection_uri, id_file="", o=[], config=None):
"""
Edits the related entry in ssh config.
"""
storm_ = get_storm_instance(config)
try:
if ',' in name:
name = " ".join(name.split(","))
user, host, port = parse(
connection_uri,
user=get_default("user", storm_.defaults),
port=get_default("port", storm_.defaults)
)
storm_.edit_entry(name, host, user, port, id_file, o)
print(get_formatted_message(
'"{0}" updated successfully.'.format(
name
), 'success'))
except ValueError as error:
print(get_formatted_message(error, 'error'), file=sys.stderr)
sys.exit(1) | python | def edit(name, connection_uri, id_file="", o=[], config=None):
"""
Edits the related entry in ssh config.
"""
storm_ = get_storm_instance(config)
try:
if ',' in name:
name = " ".join(name.split(","))
user, host, port = parse(
connection_uri,
user=get_default("user", storm_.defaults),
port=get_default("port", storm_.defaults)
)
storm_.edit_entry(name, host, user, port, id_file, o)
print(get_formatted_message(
'"{0}" updated successfully.'.format(
name
), 'success'))
except ValueError as error:
print(get_formatted_message(error, 'error'), file=sys.stderr)
sys.exit(1) | [
"def",
"edit",
"(",
"name",
",",
"connection_uri",
",",
"id_file",
"=",
"\"\"",
",",
"o",
"=",
"[",
"]",
",",
"config",
"=",
"None",
")",
":",
"storm_",
"=",
"get_storm_instance",
"(",
"config",
")",
"try",
":",
"if",
"','",
"in",
"name",
":",
"name",
"=",
"\" \"",
".",
"join",
"(",
"name",
".",
"split",
"(",
"\",\"",
")",
")",
"user",
",",
"host",
",",
"port",
"=",
"parse",
"(",
"connection_uri",
",",
"user",
"=",
"get_default",
"(",
"\"user\"",
",",
"storm_",
".",
"defaults",
")",
",",
"port",
"=",
"get_default",
"(",
"\"port\"",
",",
"storm_",
".",
"defaults",
")",
")",
"storm_",
".",
"edit_entry",
"(",
"name",
",",
"host",
",",
"user",
",",
"port",
",",
"id_file",
",",
"o",
")",
"print",
"(",
"get_formatted_message",
"(",
"'\"{0}\" updated successfully.'",
".",
"format",
"(",
"name",
")",
",",
"'success'",
")",
")",
"except",
"ValueError",
"as",
"error",
":",
"print",
"(",
"get_formatted_message",
"(",
"error",
",",
"'error'",
")",
",",
"file",
"=",
"sys",
".",
"stderr",
")",
"sys",
".",
"exit",
"(",
"1",
")"
] | Edits the related entry in ssh config. | [
"Edits",
"the",
"related",
"entry",
"in",
"ssh",
"config",
"."
] | c752defc1b718cfffbf0e0e15532fa1d7840bf6d | https://github.com/emre/storm/blob/c752defc1b718cfffbf0e0e15532fa1d7840bf6d/storm/__main__.py#L120-L143 | train |
emre/storm | storm/__main__.py | update | def update(name, connection_uri="", id_file="", o=[], config=None):
"""
Enhanced version of the edit command featuring multiple
edits using regular expressions to match entries
"""
storm_ = get_storm_instance(config)
settings = {}
if id_file != "":
settings['identityfile'] = id_file
for option in o:
k, v = option.split("=")
settings[k] = v
try:
storm_.update_entry(name, **settings)
print(get_formatted_message(
'"{0}" updated successfully.'.format(
name
), 'success'))
except ValueError as error:
print(get_formatted_message(error, 'error'), file=sys.stderr)
sys.exit(1) | python | def update(name, connection_uri="", id_file="", o=[], config=None):
"""
Enhanced version of the edit command featuring multiple
edits using regular expressions to match entries
"""
storm_ = get_storm_instance(config)
settings = {}
if id_file != "":
settings['identityfile'] = id_file
for option in o:
k, v = option.split("=")
settings[k] = v
try:
storm_.update_entry(name, **settings)
print(get_formatted_message(
'"{0}" updated successfully.'.format(
name
), 'success'))
except ValueError as error:
print(get_formatted_message(error, 'error'), file=sys.stderr)
sys.exit(1) | [
"def",
"update",
"(",
"name",
",",
"connection_uri",
"=",
"\"\"",
",",
"id_file",
"=",
"\"\"",
",",
"o",
"=",
"[",
"]",
",",
"config",
"=",
"None",
")",
":",
"storm_",
"=",
"get_storm_instance",
"(",
"config",
")",
"settings",
"=",
"{",
"}",
"if",
"id_file",
"!=",
"\"\"",
":",
"settings",
"[",
"'identityfile'",
"]",
"=",
"id_file",
"for",
"option",
"in",
"o",
":",
"k",
",",
"v",
"=",
"option",
".",
"split",
"(",
"\"=\"",
")",
"settings",
"[",
"k",
"]",
"=",
"v",
"try",
":",
"storm_",
".",
"update_entry",
"(",
"name",
",",
"*",
"*",
"settings",
")",
"print",
"(",
"get_formatted_message",
"(",
"'\"{0}\" updated successfully.'",
".",
"format",
"(",
"name",
")",
",",
"'success'",
")",
")",
"except",
"ValueError",
"as",
"error",
":",
"print",
"(",
"get_formatted_message",
"(",
"error",
",",
"'error'",
")",
",",
"file",
"=",
"sys",
".",
"stderr",
")",
"sys",
".",
"exit",
"(",
"1",
")"
] | Enhanced version of the edit command featuring multiple
edits using regular expressions to match entries | [
"Enhanced",
"version",
"of",
"the",
"edit",
"command",
"featuring",
"multiple",
"edits",
"using",
"regular",
"expressions",
"to",
"match",
"entries"
] | c752defc1b718cfffbf0e0e15532fa1d7840bf6d | https://github.com/emre/storm/blob/c752defc1b718cfffbf0e0e15532fa1d7840bf6d/storm/__main__.py#L146-L169 | train |
emre/storm | storm/__main__.py | delete | def delete(name, config=None):
"""
Deletes a single host.
"""
storm_ = get_storm_instance(config)
try:
storm_.delete_entry(name)
print(
get_formatted_message(
'hostname "{0}" deleted successfully.'.format(name),
'success')
)
except ValueError as error:
print(get_formatted_message(error, 'error'), file=sys.stderr)
sys.exit(1) | python | def delete(name, config=None):
"""
Deletes a single host.
"""
storm_ = get_storm_instance(config)
try:
storm_.delete_entry(name)
print(
get_formatted_message(
'hostname "{0}" deleted successfully.'.format(name),
'success')
)
except ValueError as error:
print(get_formatted_message(error, 'error'), file=sys.stderr)
sys.exit(1) | [
"def",
"delete",
"(",
"name",
",",
"config",
"=",
"None",
")",
":",
"storm_",
"=",
"get_storm_instance",
"(",
"config",
")",
"try",
":",
"storm_",
".",
"delete_entry",
"(",
"name",
")",
"print",
"(",
"get_formatted_message",
"(",
"'hostname \"{0}\" deleted successfully.'",
".",
"format",
"(",
"name",
")",
",",
"'success'",
")",
")",
"except",
"ValueError",
"as",
"error",
":",
"print",
"(",
"get_formatted_message",
"(",
"error",
",",
"'error'",
")",
",",
"file",
"=",
"sys",
".",
"stderr",
")",
"sys",
".",
"exit",
"(",
"1",
")"
] | Deletes a single host. | [
"Deletes",
"a",
"single",
"host",
"."
] | c752defc1b718cfffbf0e0e15532fa1d7840bf6d | https://github.com/emre/storm/blob/c752defc1b718cfffbf0e0e15532fa1d7840bf6d/storm/__main__.py#L172-L187 | train |
emre/storm | storm/__main__.py | list | def list(config=None):
"""
Lists all hosts from ssh config.
"""
storm_ = get_storm_instance(config)
try:
result = colored('Listing entries:', 'white', attrs=["bold", ]) + "\n\n"
result_stack = ""
for host in storm_.list_entries(True):
if host.get("type") == 'entry':
if not host.get("host") == "*":
result += " {0} -> {1}@{2}:{3}".format(
colored(host["host"], 'green', attrs=["bold", ]),
host.get("options").get(
"user", get_default("user", storm_.defaults)
),
host.get("options").get(
"hostname", "[hostname_not_specified]"
),
host.get("options").get(
"port", get_default("port", storm_.defaults)
)
)
extra = False
for key, value in six.iteritems(host.get("options")):
if not key in ["user", "hostname", "port"]:
if not extra:
custom_options = colored(
'\n\t[custom options] ', 'white'
)
result += " {0}".format(custom_options)
extra = True
if isinstance(value, collections.Sequence):
if isinstance(value, builtins.list):
value = ",".join(value)
result += "{0}={1} ".format(key, value)
if extra:
result = result[0:-1]
result += "\n\n"
else:
result_stack = colored(
" (*) General options: \n", "green", attrs=["bold",]
)
for key, value in six.iteritems(host.get("options")):
if isinstance(value, type([])):
result_stack += "\t {0}: ".format(
colored(key, "magenta")
)
result_stack += ', '.join(value)
result_stack += "\n"
else:
result_stack += "\t {0}: {1}\n".format(
colored(key, "magenta"),
value,
)
result_stack = result_stack[0:-1] + "\n"
result += result_stack
print(get_formatted_message(result, ""))
except Exception as error:
print(get_formatted_message(str(error), 'error'), file=sys.stderr)
sys.exit(1) | python | def list(config=None):
"""
Lists all hosts from ssh config.
"""
storm_ = get_storm_instance(config)
try:
result = colored('Listing entries:', 'white', attrs=["bold", ]) + "\n\n"
result_stack = ""
for host in storm_.list_entries(True):
if host.get("type") == 'entry':
if not host.get("host") == "*":
result += " {0} -> {1}@{2}:{3}".format(
colored(host["host"], 'green', attrs=["bold", ]),
host.get("options").get(
"user", get_default("user", storm_.defaults)
),
host.get("options").get(
"hostname", "[hostname_not_specified]"
),
host.get("options").get(
"port", get_default("port", storm_.defaults)
)
)
extra = False
for key, value in six.iteritems(host.get("options")):
if not key in ["user", "hostname", "port"]:
if not extra:
custom_options = colored(
'\n\t[custom options] ', 'white'
)
result += " {0}".format(custom_options)
extra = True
if isinstance(value, collections.Sequence):
if isinstance(value, builtins.list):
value = ",".join(value)
result += "{0}={1} ".format(key, value)
if extra:
result = result[0:-1]
result += "\n\n"
else:
result_stack = colored(
" (*) General options: \n", "green", attrs=["bold",]
)
for key, value in six.iteritems(host.get("options")):
if isinstance(value, type([])):
result_stack += "\t {0}: ".format(
colored(key, "magenta")
)
result_stack += ', '.join(value)
result_stack += "\n"
else:
result_stack += "\t {0}: {1}\n".format(
colored(key, "magenta"),
value,
)
result_stack = result_stack[0:-1] + "\n"
result += result_stack
print(get_formatted_message(result, ""))
except Exception as error:
print(get_formatted_message(str(error), 'error'), file=sys.stderr)
sys.exit(1) | [
"def",
"list",
"(",
"config",
"=",
"None",
")",
":",
"storm_",
"=",
"get_storm_instance",
"(",
"config",
")",
"try",
":",
"result",
"=",
"colored",
"(",
"'Listing entries:'",
",",
"'white'",
",",
"attrs",
"=",
"[",
"\"bold\"",
",",
"]",
")",
"+",
"\"\\n\\n\"",
"result_stack",
"=",
"\"\"",
"for",
"host",
"in",
"storm_",
".",
"list_entries",
"(",
"True",
")",
":",
"if",
"host",
".",
"get",
"(",
"\"type\"",
")",
"==",
"'entry'",
":",
"if",
"not",
"host",
".",
"get",
"(",
"\"host\"",
")",
"==",
"\"*\"",
":",
"result",
"+=",
"\" {0} -> {1}@{2}:{3}\"",
".",
"format",
"(",
"colored",
"(",
"host",
"[",
"\"host\"",
"]",
",",
"'green'",
",",
"attrs",
"=",
"[",
"\"bold\"",
",",
"]",
")",
",",
"host",
".",
"get",
"(",
"\"options\"",
")",
".",
"get",
"(",
"\"user\"",
",",
"get_default",
"(",
"\"user\"",
",",
"storm_",
".",
"defaults",
")",
")",
",",
"host",
".",
"get",
"(",
"\"options\"",
")",
".",
"get",
"(",
"\"hostname\"",
",",
"\"[hostname_not_specified]\"",
")",
",",
"host",
".",
"get",
"(",
"\"options\"",
")",
".",
"get",
"(",
"\"port\"",
",",
"get_default",
"(",
"\"port\"",
",",
"storm_",
".",
"defaults",
")",
")",
")",
"extra",
"=",
"False",
"for",
"key",
",",
"value",
"in",
"six",
".",
"iteritems",
"(",
"host",
".",
"get",
"(",
"\"options\"",
")",
")",
":",
"if",
"not",
"key",
"in",
"[",
"\"user\"",
",",
"\"hostname\"",
",",
"\"port\"",
"]",
":",
"if",
"not",
"extra",
":",
"custom_options",
"=",
"colored",
"(",
"'\\n\\t[custom options] '",
",",
"'white'",
")",
"result",
"+=",
"\" {0}\"",
".",
"format",
"(",
"custom_options",
")",
"extra",
"=",
"True",
"if",
"isinstance",
"(",
"value",
",",
"collections",
".",
"Sequence",
")",
":",
"if",
"isinstance",
"(",
"value",
",",
"builtins",
".",
"list",
")",
":",
"value",
"=",
"\",\"",
".",
"join",
"(",
"value",
")",
"result",
"+=",
"\"{0}={1} \"",
".",
"format",
"(",
"key",
",",
"value",
")",
"if",
"extra",
":",
"result",
"=",
"result",
"[",
"0",
":",
"-",
"1",
"]",
"result",
"+=",
"\"\\n\\n\"",
"else",
":",
"result_stack",
"=",
"colored",
"(",
"\" (*) General options: \\n\"",
",",
"\"green\"",
",",
"attrs",
"=",
"[",
"\"bold\"",
",",
"]",
")",
"for",
"key",
",",
"value",
"in",
"six",
".",
"iteritems",
"(",
"host",
".",
"get",
"(",
"\"options\"",
")",
")",
":",
"if",
"isinstance",
"(",
"value",
",",
"type",
"(",
"[",
"]",
")",
")",
":",
"result_stack",
"+=",
"\"\\t {0}: \"",
".",
"format",
"(",
"colored",
"(",
"key",
",",
"\"magenta\"",
")",
")",
"result_stack",
"+=",
"', '",
".",
"join",
"(",
"value",
")",
"result_stack",
"+=",
"\"\\n\"",
"else",
":",
"result_stack",
"+=",
"\"\\t {0}: {1}\\n\"",
".",
"format",
"(",
"colored",
"(",
"key",
",",
"\"magenta\"",
")",
",",
"value",
",",
")",
"result_stack",
"=",
"result_stack",
"[",
"0",
":",
"-",
"1",
"]",
"+",
"\"\\n\"",
"result",
"+=",
"result_stack",
"print",
"(",
"get_formatted_message",
"(",
"result",
",",
"\"\"",
")",
")",
"except",
"Exception",
"as",
"error",
":",
"print",
"(",
"get_formatted_message",
"(",
"str",
"(",
"error",
")",
",",
"'error'",
")",
",",
"file",
"=",
"sys",
".",
"stderr",
")",
"sys",
".",
"exit",
"(",
"1",
")"
] | Lists all hosts from ssh config. | [
"Lists",
"all",
"hosts",
"from",
"ssh",
"config",
"."
] | c752defc1b718cfffbf0e0e15532fa1d7840bf6d | https://github.com/emre/storm/blob/c752defc1b718cfffbf0e0e15532fa1d7840bf6d/storm/__main__.py#L190-L258 | train |
emre/storm | storm/__main__.py | search | def search(search_text, config=None):
"""
Searches entries by given search text.
"""
storm_ = get_storm_instance(config)
try:
results = storm_.search_host(search_text)
if len(results) == 0:
print ('no results found.')
if len(results) > 0:
message = 'Listing results for {0}:\n'.format(search_text)
message += "".join(results)
print(message)
except Exception as error:
print(get_formatted_message(str(error), 'error'), file=sys.stderr)
sys.exit(1) | python | def search(search_text, config=None):
"""
Searches entries by given search text.
"""
storm_ = get_storm_instance(config)
try:
results = storm_.search_host(search_text)
if len(results) == 0:
print ('no results found.')
if len(results) > 0:
message = 'Listing results for {0}:\n'.format(search_text)
message += "".join(results)
print(message)
except Exception as error:
print(get_formatted_message(str(error), 'error'), file=sys.stderr)
sys.exit(1) | [
"def",
"search",
"(",
"search_text",
",",
"config",
"=",
"None",
")",
":",
"storm_",
"=",
"get_storm_instance",
"(",
"config",
")",
"try",
":",
"results",
"=",
"storm_",
".",
"search_host",
"(",
"search_text",
")",
"if",
"len",
"(",
"results",
")",
"==",
"0",
":",
"print",
"(",
"'no results found.'",
")",
"if",
"len",
"(",
"results",
")",
">",
"0",
":",
"message",
"=",
"'Listing results for {0}:\\n'",
".",
"format",
"(",
"search_text",
")",
"message",
"+=",
"\"\"",
".",
"join",
"(",
"results",
")",
"print",
"(",
"message",
")",
"except",
"Exception",
"as",
"error",
":",
"print",
"(",
"get_formatted_message",
"(",
"str",
"(",
"error",
")",
",",
"'error'",
")",
",",
"file",
"=",
"sys",
".",
"stderr",
")",
"sys",
".",
"exit",
"(",
"1",
")"
] | Searches entries by given search text. | [
"Searches",
"entries",
"by",
"given",
"search",
"text",
"."
] | c752defc1b718cfffbf0e0e15532fa1d7840bf6d | https://github.com/emre/storm/blob/c752defc1b718cfffbf0e0e15532fa1d7840bf6d/storm/__main__.py#L261-L278 | train |
emre/storm | storm/__main__.py | delete_all | def delete_all(config=None):
"""
Deletes all hosts from ssh config.
"""
storm_ = get_storm_instance(config)
try:
storm_.delete_all_entries()
print(get_formatted_message('all entries deleted.', 'success'))
except Exception as error:
print(get_formatted_message(str(error), 'error'), file=sys.stderr)
sys.exit(1) | python | def delete_all(config=None):
"""
Deletes all hosts from ssh config.
"""
storm_ = get_storm_instance(config)
try:
storm_.delete_all_entries()
print(get_formatted_message('all entries deleted.', 'success'))
except Exception as error:
print(get_formatted_message(str(error), 'error'), file=sys.stderr)
sys.exit(1) | [
"def",
"delete_all",
"(",
"config",
"=",
"None",
")",
":",
"storm_",
"=",
"get_storm_instance",
"(",
"config",
")",
"try",
":",
"storm_",
".",
"delete_all_entries",
"(",
")",
"print",
"(",
"get_formatted_message",
"(",
"'all entries deleted.'",
",",
"'success'",
")",
")",
"except",
"Exception",
"as",
"error",
":",
"print",
"(",
"get_formatted_message",
"(",
"str",
"(",
"error",
")",
",",
"'error'",
")",
",",
"file",
"=",
"sys",
".",
"stderr",
")",
"sys",
".",
"exit",
"(",
"1",
")"
] | Deletes all hosts from ssh config. | [
"Deletes",
"all",
"hosts",
"from",
"ssh",
"config",
"."
] | c752defc1b718cfffbf0e0e15532fa1d7840bf6d | https://github.com/emre/storm/blob/c752defc1b718cfffbf0e0e15532fa1d7840bf6d/storm/__main__.py#L281-L292 | train |
emre/storm | storm/__main__.py | backup | def backup(target_file, config=None):
"""
Backups the main ssh configuration into target file.
"""
storm_ = get_storm_instance(config)
try:
storm_.backup(target_file)
except Exception as error:
print(get_formatted_message(str(error), 'error'), file=sys.stderr)
sys.exit(1) | python | def backup(target_file, config=None):
"""
Backups the main ssh configuration into target file.
"""
storm_ = get_storm_instance(config)
try:
storm_.backup(target_file)
except Exception as error:
print(get_formatted_message(str(error), 'error'), file=sys.stderr)
sys.exit(1) | [
"def",
"backup",
"(",
"target_file",
",",
"config",
"=",
"None",
")",
":",
"storm_",
"=",
"get_storm_instance",
"(",
"config",
")",
"try",
":",
"storm_",
".",
"backup",
"(",
"target_file",
")",
"except",
"Exception",
"as",
"error",
":",
"print",
"(",
"get_formatted_message",
"(",
"str",
"(",
"error",
")",
",",
"'error'",
")",
",",
"file",
"=",
"sys",
".",
"stderr",
")",
"sys",
".",
"exit",
"(",
"1",
")"
] | Backups the main ssh configuration into target file. | [
"Backups",
"the",
"main",
"ssh",
"configuration",
"into",
"target",
"file",
"."
] | c752defc1b718cfffbf0e0e15532fa1d7840bf6d | https://github.com/emre/storm/blob/c752defc1b718cfffbf0e0e15532fa1d7840bf6d/storm/__main__.py#L295-L304 | train |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.