repo
stringlengths 7
55
| path
stringlengths 4
127
| func_name
stringlengths 1
88
| original_string
stringlengths 75
19.8k
| language
stringclasses 1
value | code
stringlengths 75
19.8k
| code_tokens
sequence | docstring
stringlengths 3
17.3k
| docstring_tokens
sequence | sha
stringlengths 40
40
| url
stringlengths 87
242
| partition
stringclasses 1
value |
---|---|---|---|---|---|---|---|---|---|---|---|
btimby/fulltext | fulltext/data/winmake.py | rm | def rm(pattern):
"""Recursively remove a file or dir by pattern."""
paths = glob.glob(pattern)
for path in paths:
if path.startswith('.git/'):
continue
if os.path.isdir(path):
def onerror(fun, path, excinfo):
exc = excinfo[1]
if exc.errno != errno.ENOENT:
raise
safe_print("rmdir -f %s" % path)
shutil.rmtree(path, onerror=onerror)
else:
safe_print("rm %s" % path)
os.remove(path) | python | def rm(pattern):
"""Recursively remove a file or dir by pattern."""
paths = glob.glob(pattern)
for path in paths:
if path.startswith('.git/'):
continue
if os.path.isdir(path):
def onerror(fun, path, excinfo):
exc = excinfo[1]
if exc.errno != errno.ENOENT:
raise
safe_print("rmdir -f %s" % path)
shutil.rmtree(path, onerror=onerror)
else:
safe_print("rm %s" % path)
os.remove(path) | [
"def",
"rm",
"(",
"pattern",
")",
":",
"paths",
"=",
"glob",
".",
"glob",
"(",
"pattern",
")",
"for",
"path",
"in",
"paths",
":",
"if",
"path",
".",
"startswith",
"(",
"'.git/'",
")",
":",
"continue",
"if",
"os",
".",
"path",
".",
"isdir",
"(",
"path",
")",
":",
"def",
"onerror",
"(",
"fun",
",",
"path",
",",
"excinfo",
")",
":",
"exc",
"=",
"excinfo",
"[",
"1",
"]",
"if",
"exc",
".",
"errno",
"!=",
"errno",
".",
"ENOENT",
":",
"raise",
"safe_print",
"(",
"\"rmdir -f %s\"",
"%",
"path",
")",
"shutil",
".",
"rmtree",
"(",
"path",
",",
"onerror",
"=",
"onerror",
")",
"else",
":",
"safe_print",
"(",
"\"rm %s\"",
"%",
"path",
")",
"os",
".",
"remove",
"(",
"path",
")"
] | Recursively remove a file or dir by pattern. | [
"Recursively",
"remove",
"a",
"file",
"or",
"dir",
"by",
"pattern",
"."
] | 9234cc1e2099209430e20317649549026de283ce | https://github.com/btimby/fulltext/blob/9234cc1e2099209430e20317649549026de283ce/fulltext/data/winmake.py#L106-L122 | train |
btimby/fulltext | fulltext/data/winmake.py | help | def help():
"""Print this help"""
safe_print('Run "make [-p <PYTHON>] <target>" where <target> is one of:')
for name in sorted(_cmds):
safe_print(
" %-20s %s" % (name.replace('_', '-'), _cmds[name] or ''))
sys.exit(1) | python | def help():
"""Print this help"""
safe_print('Run "make [-p <PYTHON>] <target>" where <target> is one of:')
for name in sorted(_cmds):
safe_print(
" %-20s %s" % (name.replace('_', '-'), _cmds[name] or ''))
sys.exit(1) | [
"def",
"help",
"(",
")",
":",
"safe_print",
"(",
"'Run \"make [-p <PYTHON>] <target>\" where <target> is one of:'",
")",
"for",
"name",
"in",
"sorted",
"(",
"_cmds",
")",
":",
"safe_print",
"(",
"\" %-20s %s\"",
"%",
"(",
"name",
".",
"replace",
"(",
"'_'",
",",
"'-'",
")",
",",
"_cmds",
"[",
"name",
"]",
"or",
"''",
")",
")",
"sys",
".",
"exit",
"(",
"1",
")"
] | Print this help | [
"Print",
"this",
"help"
] | 9234cc1e2099209430e20317649549026de283ce | https://github.com/btimby/fulltext/blob/9234cc1e2099209430e20317649549026de283ce/fulltext/data/winmake.py#L149-L155 | train |
btimby/fulltext | fulltext/data/winmake.py | clean | def clean():
"""Deletes dev files"""
rm("$testfn*")
rm("*.bak")
rm("*.core")
rm("*.egg-info")
rm("*.orig")
rm("*.pyc")
rm("*.pyd")
rm("*.pyo")
rm("*.rej")
rm("*.so")
rm("*.~")
rm("*__pycache__")
rm(".coverage")
rm(".tox")
rm(".coverage")
rm("build")
rm("dist")
rm("docs/_build")
rm("htmlcov")
rm("tmp")
rm("venv") | python | def clean():
"""Deletes dev files"""
rm("$testfn*")
rm("*.bak")
rm("*.core")
rm("*.egg-info")
rm("*.orig")
rm("*.pyc")
rm("*.pyd")
rm("*.pyo")
rm("*.rej")
rm("*.so")
rm("*.~")
rm("*__pycache__")
rm(".coverage")
rm(".tox")
rm(".coverage")
rm("build")
rm("dist")
rm("docs/_build")
rm("htmlcov")
rm("tmp")
rm("venv") | [
"def",
"clean",
"(",
")",
":",
"rm",
"(",
"\"$testfn*\"",
")",
"rm",
"(",
"\"*.bak\"",
")",
"rm",
"(",
"\"*.core\"",
")",
"rm",
"(",
"\"*.egg-info\"",
")",
"rm",
"(",
"\"*.orig\"",
")",
"rm",
"(",
"\"*.pyc\"",
")",
"rm",
"(",
"\"*.pyd\"",
")",
"rm",
"(",
"\"*.pyo\"",
")",
"rm",
"(",
"\"*.rej\"",
")",
"rm",
"(",
"\"*.so\"",
")",
"rm",
"(",
"\"*.~\"",
")",
"rm",
"(",
"\"*__pycache__\"",
")",
"rm",
"(",
"\".coverage\"",
")",
"rm",
"(",
"\".tox\"",
")",
"rm",
"(",
"\".coverage\"",
")",
"rm",
"(",
"\"build\"",
")",
"rm",
"(",
"\"dist\"",
")",
"rm",
"(",
"\"docs/_build\"",
")",
"rm",
"(",
"\"htmlcov\"",
")",
"rm",
"(",
"\"tmp\"",
")",
"rm",
"(",
"\"venv\"",
")"
] | Deletes dev files | [
"Deletes",
"dev",
"files"
] | 9234cc1e2099209430e20317649549026de283ce | https://github.com/btimby/fulltext/blob/9234cc1e2099209430e20317649549026de283ce/fulltext/data/winmake.py#L200-L222 | train |
btimby/fulltext | fulltext/data/winmake.py | lint | def lint():
"""Run flake8 against all py files"""
py_files = subprocess.check_output("git ls-files")
if PY3:
py_files = py_files.decode()
py_files = [x for x in py_files.split() if x.endswith('.py')]
py_files = ' '.join(py_files)
sh("%s -m flake8 %s" % (PYTHON, py_files), nolog=True) | python | def lint():
"""Run flake8 against all py files"""
py_files = subprocess.check_output("git ls-files")
if PY3:
py_files = py_files.decode()
py_files = [x for x in py_files.split() if x.endswith('.py')]
py_files = ' '.join(py_files)
sh("%s -m flake8 %s" % (PYTHON, py_files), nolog=True) | [
"def",
"lint",
"(",
")",
":",
"py_files",
"=",
"subprocess",
".",
"check_output",
"(",
"\"git ls-files\"",
")",
"if",
"PY3",
":",
"py_files",
"=",
"py_files",
".",
"decode",
"(",
")",
"py_files",
"=",
"[",
"x",
"for",
"x",
"in",
"py_files",
".",
"split",
"(",
")",
"if",
"x",
".",
"endswith",
"(",
"'.py'",
")",
"]",
"py_files",
"=",
"' '",
".",
"join",
"(",
"py_files",
")",
"sh",
"(",
"\"%s -m flake8 %s\"",
"%",
"(",
"PYTHON",
",",
"py_files",
")",
",",
"nolog",
"=",
"True",
")"
] | Run flake8 against all py files | [
"Run",
"flake8",
"against",
"all",
"py",
"files"
] | 9234cc1e2099209430e20317649549026de283ce | https://github.com/btimby/fulltext/blob/9234cc1e2099209430e20317649549026de283ce/fulltext/data/winmake.py#L234-L241 | train |
btimby/fulltext | fulltext/data/winmake.py | coverage | def coverage():
"""Run coverage tests."""
# Note: coverage options are controlled by .coveragerc file
install()
test_setup()
sh("%s -m coverage run %s" % (PYTHON, TEST_SCRIPT))
sh("%s -m coverage report" % PYTHON)
sh("%s -m coverage html" % PYTHON)
sh("%s -m webbrowser -t htmlcov/index.html" % PYTHON) | python | def coverage():
"""Run coverage tests."""
# Note: coverage options are controlled by .coveragerc file
install()
test_setup()
sh("%s -m coverage run %s" % (PYTHON, TEST_SCRIPT))
sh("%s -m coverage report" % PYTHON)
sh("%s -m coverage html" % PYTHON)
sh("%s -m webbrowser -t htmlcov/index.html" % PYTHON) | [
"def",
"coverage",
"(",
")",
":",
"# Note: coverage options are controlled by .coveragerc file",
"install",
"(",
")",
"test_setup",
"(",
")",
"sh",
"(",
"\"%s -m coverage run %s\"",
"%",
"(",
"PYTHON",
",",
"TEST_SCRIPT",
")",
")",
"sh",
"(",
"\"%s -m coverage report\"",
"%",
"PYTHON",
")",
"sh",
"(",
"\"%s -m coverage html\"",
"%",
"PYTHON",
")",
"sh",
"(",
"\"%s -m webbrowser -t htmlcov/index.html\"",
"%",
"PYTHON",
")"
] | Run coverage tests. | [
"Run",
"coverage",
"tests",
"."
] | 9234cc1e2099209430e20317649549026de283ce | https://github.com/btimby/fulltext/blob/9234cc1e2099209430e20317649549026de283ce/fulltext/data/winmake.py#L261-L269 | train |
btimby/fulltext | fulltext/data/winmake.py | venv | def venv():
"""Install venv + deps."""
try:
import virtualenv # NOQA
except ImportError:
sh("%s -m pip install virtualenv" % PYTHON)
if not os.path.isdir("venv"):
sh("%s -m virtualenv venv" % PYTHON)
sh("venv\\Scripts\\pip install -r %s" % (REQUIREMENTS_TXT)) | python | def venv():
"""Install venv + deps."""
try:
import virtualenv # NOQA
except ImportError:
sh("%s -m pip install virtualenv" % PYTHON)
if not os.path.isdir("venv"):
sh("%s -m virtualenv venv" % PYTHON)
sh("venv\\Scripts\\pip install -r %s" % (REQUIREMENTS_TXT)) | [
"def",
"venv",
"(",
")",
":",
"try",
":",
"import",
"virtualenv",
"# NOQA",
"except",
"ImportError",
":",
"sh",
"(",
"\"%s -m pip install virtualenv\"",
"%",
"PYTHON",
")",
"if",
"not",
"os",
".",
"path",
".",
"isdir",
"(",
"\"venv\"",
")",
":",
"sh",
"(",
"\"%s -m virtualenv venv\"",
"%",
"PYTHON",
")",
"sh",
"(",
"\"venv\\\\Scripts\\\\pip install -r %s\"",
"%",
"(",
"REQUIREMENTS_TXT",
")",
")"
] | Install venv + deps. | [
"Install",
"venv",
"+",
"deps",
"."
] | 9234cc1e2099209430e20317649549026de283ce | https://github.com/btimby/fulltext/blob/9234cc1e2099209430e20317649549026de283ce/fulltext/data/winmake.py#L311-L319 | train |
pschmitt/pykeepass | pykeepass/kdbx_parsing/kdbx4.py | compute_header_hmac_hash | def compute_header_hmac_hash(context):
"""Compute HMAC-SHA256 hash of header.
Used to prevent header tampering."""
return hmac.new(
hashlib.sha512(
b'\xff' * 8 +
hashlib.sha512(
context._.header.value.dynamic_header.master_seed.data +
context.transformed_key +
b'\x01'
).digest()
).digest(),
context._.header.data,
hashlib.sha256
).digest() | python | def compute_header_hmac_hash(context):
"""Compute HMAC-SHA256 hash of header.
Used to prevent header tampering."""
return hmac.new(
hashlib.sha512(
b'\xff' * 8 +
hashlib.sha512(
context._.header.value.dynamic_header.master_seed.data +
context.transformed_key +
b'\x01'
).digest()
).digest(),
context._.header.data,
hashlib.sha256
).digest() | [
"def",
"compute_header_hmac_hash",
"(",
"context",
")",
":",
"return",
"hmac",
".",
"new",
"(",
"hashlib",
".",
"sha512",
"(",
"b'\\xff'",
"*",
"8",
"+",
"hashlib",
".",
"sha512",
"(",
"context",
".",
"_",
".",
"header",
".",
"value",
".",
"dynamic_header",
".",
"master_seed",
".",
"data",
"+",
"context",
".",
"transformed_key",
"+",
"b'\\x01'",
")",
".",
"digest",
"(",
")",
")",
".",
"digest",
"(",
")",
",",
"context",
".",
"_",
".",
"header",
".",
"data",
",",
"hashlib",
".",
"sha256",
")",
".",
"digest",
"(",
")"
] | Compute HMAC-SHA256 hash of header.
Used to prevent header tampering. | [
"Compute",
"HMAC",
"-",
"SHA256",
"hash",
"of",
"header",
".",
"Used",
"to",
"prevent",
"header",
"tampering",
"."
] | 85da3630d6e410b2a10d3e711cd69308b51d401d | https://github.com/pschmitt/pykeepass/blob/85da3630d6e410b2a10d3e711cd69308b51d401d/pykeepass/kdbx_parsing/kdbx4.py#L64-L79 | train |
pschmitt/pykeepass | pykeepass/kdbx_parsing/kdbx4.py | compute_payload_block_hash | def compute_payload_block_hash(this):
"""Compute hash of each payload block.
Used to prevent payload corruption and tampering."""
return hmac.new(
hashlib.sha512(
struct.pack('<Q', this._index) +
hashlib.sha512(
this._._.header.value.dynamic_header.master_seed.data +
this._.transformed_key + b'\x01'
).digest()
).digest(),
struct.pack('<Q', this._index) +
struct.pack('<I', len(this.block_data)) +
this.block_data, hashlib.sha256
).digest() | python | def compute_payload_block_hash(this):
"""Compute hash of each payload block.
Used to prevent payload corruption and tampering."""
return hmac.new(
hashlib.sha512(
struct.pack('<Q', this._index) +
hashlib.sha512(
this._._.header.value.dynamic_header.master_seed.data +
this._.transformed_key + b'\x01'
).digest()
).digest(),
struct.pack('<Q', this._index) +
struct.pack('<I', len(this.block_data)) +
this.block_data, hashlib.sha256
).digest() | [
"def",
"compute_payload_block_hash",
"(",
"this",
")",
":",
"return",
"hmac",
".",
"new",
"(",
"hashlib",
".",
"sha512",
"(",
"struct",
".",
"pack",
"(",
"'<Q'",
",",
"this",
".",
"_index",
")",
"+",
"hashlib",
".",
"sha512",
"(",
"this",
".",
"_",
".",
"_",
".",
"header",
".",
"value",
".",
"dynamic_header",
".",
"master_seed",
".",
"data",
"+",
"this",
".",
"_",
".",
"transformed_key",
"+",
"b'\\x01'",
")",
".",
"digest",
"(",
")",
")",
".",
"digest",
"(",
")",
",",
"struct",
".",
"pack",
"(",
"'<Q'",
",",
"this",
".",
"_index",
")",
"+",
"struct",
".",
"pack",
"(",
"'<I'",
",",
"len",
"(",
"this",
".",
"block_data",
")",
")",
"+",
"this",
".",
"block_data",
",",
"hashlib",
".",
"sha256",
")",
".",
"digest",
"(",
")"
] | Compute hash of each payload block.
Used to prevent payload corruption and tampering. | [
"Compute",
"hash",
"of",
"each",
"payload",
"block",
".",
"Used",
"to",
"prevent",
"payload",
"corruption",
"and",
"tampering",
"."
] | 85da3630d6e410b2a10d3e711cd69308b51d401d | https://github.com/pschmitt/pykeepass/blob/85da3630d6e410b2a10d3e711cd69308b51d401d/pykeepass/kdbx_parsing/kdbx4.py#L156-L171 | train |
pschmitt/pykeepass | pykeepass/kdbx_parsing/pytwofish.py | Twofish.decrypt | def decrypt(self, block):
"""Decrypt blocks."""
if len(block) % 16:
raise ValueError("block size must be a multiple of 16")
plaintext = b''
while block:
a, b, c, d = struct.unpack("<4L", block[:16])
temp = [a, b, c, d]
decrypt(self.context, temp)
plaintext += struct.pack("<4L", *temp)
block = block[16:]
return plaintext | python | def decrypt(self, block):
"""Decrypt blocks."""
if len(block) % 16:
raise ValueError("block size must be a multiple of 16")
plaintext = b''
while block:
a, b, c, d = struct.unpack("<4L", block[:16])
temp = [a, b, c, d]
decrypt(self.context, temp)
plaintext += struct.pack("<4L", *temp)
block = block[16:]
return plaintext | [
"def",
"decrypt",
"(",
"self",
",",
"block",
")",
":",
"if",
"len",
"(",
"block",
")",
"%",
"16",
":",
"raise",
"ValueError",
"(",
"\"block size must be a multiple of 16\"",
")",
"plaintext",
"=",
"b''",
"while",
"block",
":",
"a",
",",
"b",
",",
"c",
",",
"d",
"=",
"struct",
".",
"unpack",
"(",
"\"<4L\"",
",",
"block",
"[",
":",
"16",
"]",
")",
"temp",
"=",
"[",
"a",
",",
"b",
",",
"c",
",",
"d",
"]",
"decrypt",
"(",
"self",
".",
"context",
",",
"temp",
")",
"plaintext",
"+=",
"struct",
".",
"pack",
"(",
"\"<4L\"",
",",
"*",
"temp",
")",
"block",
"=",
"block",
"[",
"16",
":",
"]",
"return",
"plaintext"
] | Decrypt blocks. | [
"Decrypt",
"blocks",
"."
] | 85da3630d6e410b2a10d3e711cd69308b51d401d | https://github.com/pschmitt/pykeepass/blob/85da3630d6e410b2a10d3e711cd69308b51d401d/pykeepass/kdbx_parsing/pytwofish.py#L81-L96 | train |
pschmitt/pykeepass | pykeepass/kdbx_parsing/pytwofish.py | Twofish.encrypt | def encrypt(self, block):
"""Encrypt blocks."""
if len(block) % 16:
raise ValueError("block size must be a multiple of 16")
ciphertext = b''
while block:
a, b, c, d = struct.unpack("<4L", block[0:16])
temp = [a, b, c, d]
encrypt(self.context, temp)
ciphertext += struct.pack("<4L", *temp)
block = block[16:]
return ciphertext | python | def encrypt(self, block):
"""Encrypt blocks."""
if len(block) % 16:
raise ValueError("block size must be a multiple of 16")
ciphertext = b''
while block:
a, b, c, d = struct.unpack("<4L", block[0:16])
temp = [a, b, c, d]
encrypt(self.context, temp)
ciphertext += struct.pack("<4L", *temp)
block = block[16:]
return ciphertext | [
"def",
"encrypt",
"(",
"self",
",",
"block",
")",
":",
"if",
"len",
"(",
"block",
")",
"%",
"16",
":",
"raise",
"ValueError",
"(",
"\"block size must be a multiple of 16\"",
")",
"ciphertext",
"=",
"b''",
"while",
"block",
":",
"a",
",",
"b",
",",
"c",
",",
"d",
"=",
"struct",
".",
"unpack",
"(",
"\"<4L\"",
",",
"block",
"[",
"0",
":",
"16",
"]",
")",
"temp",
"=",
"[",
"a",
",",
"b",
",",
"c",
",",
"d",
"]",
"encrypt",
"(",
"self",
".",
"context",
",",
"temp",
")",
"ciphertext",
"+=",
"struct",
".",
"pack",
"(",
"\"<4L\"",
",",
"*",
"temp",
")",
"block",
"=",
"block",
"[",
"16",
":",
"]",
"return",
"ciphertext"
] | Encrypt blocks. | [
"Encrypt",
"blocks",
"."
] | 85da3630d6e410b2a10d3e711cd69308b51d401d | https://github.com/pschmitt/pykeepass/blob/85da3630d6e410b2a10d3e711cd69308b51d401d/pykeepass/kdbx_parsing/pytwofish.py#L99-L114 | train |
pschmitt/pykeepass | pykeepass/kdbx_parsing/common.py | aes_kdf | def aes_kdf(key, rounds, password=None, keyfile=None):
"""Set up a context for AES128-ECB encryption to find transformed_key"""
cipher = AES.new(key, AES.MODE_ECB)
key_composite = compute_key_composite(
password=password,
keyfile=keyfile
)
# get the number of rounds from the header and transform the key_composite
transformed_key = key_composite
for _ in range(0, rounds):
transformed_key = cipher.encrypt(transformed_key)
return hashlib.sha256(transformed_key).digest() | python | def aes_kdf(key, rounds, password=None, keyfile=None):
"""Set up a context for AES128-ECB encryption to find transformed_key"""
cipher = AES.new(key, AES.MODE_ECB)
key_composite = compute_key_composite(
password=password,
keyfile=keyfile
)
# get the number of rounds from the header and transform the key_composite
transformed_key = key_composite
for _ in range(0, rounds):
transformed_key = cipher.encrypt(transformed_key)
return hashlib.sha256(transformed_key).digest() | [
"def",
"aes_kdf",
"(",
"key",
",",
"rounds",
",",
"password",
"=",
"None",
",",
"keyfile",
"=",
"None",
")",
":",
"cipher",
"=",
"AES",
".",
"new",
"(",
"key",
",",
"AES",
".",
"MODE_ECB",
")",
"key_composite",
"=",
"compute_key_composite",
"(",
"password",
"=",
"password",
",",
"keyfile",
"=",
"keyfile",
")",
"# get the number of rounds from the header and transform the key_composite",
"transformed_key",
"=",
"key_composite",
"for",
"_",
"in",
"range",
"(",
"0",
",",
"rounds",
")",
":",
"transformed_key",
"=",
"cipher",
".",
"encrypt",
"(",
"transformed_key",
")",
"return",
"hashlib",
".",
"sha256",
"(",
"transformed_key",
")",
".",
"digest",
"(",
")"
] | Set up a context for AES128-ECB encryption to find transformed_key | [
"Set",
"up",
"a",
"context",
"for",
"AES128",
"-",
"ECB",
"encryption",
"to",
"find",
"transformed_key"
] | 85da3630d6e410b2a10d3e711cd69308b51d401d | https://github.com/pschmitt/pykeepass/blob/85da3630d6e410b2a10d3e711cd69308b51d401d/pykeepass/kdbx_parsing/common.py#L84-L98 | train |
pschmitt/pykeepass | pykeepass/kdbx_parsing/common.py | compute_key_composite | def compute_key_composite(password=None, keyfile=None):
"""Compute composite key.
Used in header verification and payload decryption."""
# hash the password
if password:
password_composite = hashlib.sha256(password.encode('utf-8')).digest()
else:
password_composite = b''
# hash the keyfile
if keyfile:
# try to read XML keyfile
try:
with open(keyfile, 'r') as f:
tree = etree.parse(f).getroot()
keyfile_composite = base64.b64decode(tree.find('Key/Data').text)
# otherwise, try to read plain keyfile
except (etree.XMLSyntaxError, UnicodeDecodeError):
try:
with open(keyfile, 'rb') as f:
key = f.read()
try:
int(key, 16)
is_hex = True
except ValueError:
is_hex = False
# if the length is 32 bytes we assume it is the key
if len(key) == 32:
keyfile_composite = key
# if the length is 64 bytes we assume the key is hex encoded
elif len(key) == 64 and is_hex:
keyfile_composite = codecs.decode(key, 'hex')
# anything else may be a file to hash for the key
else:
keyfile_composite = hashlib.sha256(key).digest()
except:
raise IOError('Could not read keyfile')
else:
keyfile_composite = b''
# create composite key from password and keyfile composites
return hashlib.sha256(password_composite + keyfile_composite).digest() | python | def compute_key_composite(password=None, keyfile=None):
"""Compute composite key.
Used in header verification and payload decryption."""
# hash the password
if password:
password_composite = hashlib.sha256(password.encode('utf-8')).digest()
else:
password_composite = b''
# hash the keyfile
if keyfile:
# try to read XML keyfile
try:
with open(keyfile, 'r') as f:
tree = etree.parse(f).getroot()
keyfile_composite = base64.b64decode(tree.find('Key/Data').text)
# otherwise, try to read plain keyfile
except (etree.XMLSyntaxError, UnicodeDecodeError):
try:
with open(keyfile, 'rb') as f:
key = f.read()
try:
int(key, 16)
is_hex = True
except ValueError:
is_hex = False
# if the length is 32 bytes we assume it is the key
if len(key) == 32:
keyfile_composite = key
# if the length is 64 bytes we assume the key is hex encoded
elif len(key) == 64 and is_hex:
keyfile_composite = codecs.decode(key, 'hex')
# anything else may be a file to hash for the key
else:
keyfile_composite = hashlib.sha256(key).digest()
except:
raise IOError('Could not read keyfile')
else:
keyfile_composite = b''
# create composite key from password and keyfile composites
return hashlib.sha256(password_composite + keyfile_composite).digest() | [
"def",
"compute_key_composite",
"(",
"password",
"=",
"None",
",",
"keyfile",
"=",
"None",
")",
":",
"# hash the password",
"if",
"password",
":",
"password_composite",
"=",
"hashlib",
".",
"sha256",
"(",
"password",
".",
"encode",
"(",
"'utf-8'",
")",
")",
".",
"digest",
"(",
")",
"else",
":",
"password_composite",
"=",
"b''",
"# hash the keyfile",
"if",
"keyfile",
":",
"# try to read XML keyfile",
"try",
":",
"with",
"open",
"(",
"keyfile",
",",
"'r'",
")",
"as",
"f",
":",
"tree",
"=",
"etree",
".",
"parse",
"(",
"f",
")",
".",
"getroot",
"(",
")",
"keyfile_composite",
"=",
"base64",
".",
"b64decode",
"(",
"tree",
".",
"find",
"(",
"'Key/Data'",
")",
".",
"text",
")",
"# otherwise, try to read plain keyfile",
"except",
"(",
"etree",
".",
"XMLSyntaxError",
",",
"UnicodeDecodeError",
")",
":",
"try",
":",
"with",
"open",
"(",
"keyfile",
",",
"'rb'",
")",
"as",
"f",
":",
"key",
"=",
"f",
".",
"read",
"(",
")",
"try",
":",
"int",
"(",
"key",
",",
"16",
")",
"is_hex",
"=",
"True",
"except",
"ValueError",
":",
"is_hex",
"=",
"False",
"# if the length is 32 bytes we assume it is the key",
"if",
"len",
"(",
"key",
")",
"==",
"32",
":",
"keyfile_composite",
"=",
"key",
"# if the length is 64 bytes we assume the key is hex encoded",
"elif",
"len",
"(",
"key",
")",
"==",
"64",
"and",
"is_hex",
":",
"keyfile_composite",
"=",
"codecs",
".",
"decode",
"(",
"key",
",",
"'hex'",
")",
"# anything else may be a file to hash for the key",
"else",
":",
"keyfile_composite",
"=",
"hashlib",
".",
"sha256",
"(",
"key",
")",
".",
"digest",
"(",
")",
"except",
":",
"raise",
"IOError",
"(",
"'Could not read keyfile'",
")",
"else",
":",
"keyfile_composite",
"=",
"b''",
"# create composite key from password and keyfile composites",
"return",
"hashlib",
".",
"sha256",
"(",
"password_composite",
"+",
"keyfile_composite",
")",
".",
"digest",
"(",
")"
] | Compute composite key.
Used in header verification and payload decryption. | [
"Compute",
"composite",
"key",
".",
"Used",
"in",
"header",
"verification",
"and",
"payload",
"decryption",
"."
] | 85da3630d6e410b2a10d3e711cd69308b51d401d | https://github.com/pschmitt/pykeepass/blob/85da3630d6e410b2a10d3e711cd69308b51d401d/pykeepass/kdbx_parsing/common.py#L101-L144 | train |
pschmitt/pykeepass | pykeepass/kdbx_parsing/common.py | compute_master | def compute_master(context):
"""Computes master key from transformed key and master seed.
Used in payload decryption."""
# combine the transformed key with the header master seed to find the master_key
master_key = hashlib.sha256(
context._.header.value.dynamic_header.master_seed.data +
context.transformed_key).digest()
return master_key | python | def compute_master(context):
"""Computes master key from transformed key and master seed.
Used in payload decryption."""
# combine the transformed key with the header master seed to find the master_key
master_key = hashlib.sha256(
context._.header.value.dynamic_header.master_seed.data +
context.transformed_key).digest()
return master_key | [
"def",
"compute_master",
"(",
"context",
")",
":",
"# combine the transformed key with the header master seed to find the master_key",
"master_key",
"=",
"hashlib",
".",
"sha256",
"(",
"context",
".",
"_",
".",
"header",
".",
"value",
".",
"dynamic_header",
".",
"master_seed",
".",
"data",
"+",
"context",
".",
"transformed_key",
")",
".",
"digest",
"(",
")",
"return",
"master_key"
] | Computes master key from transformed key and master seed.
Used in payload decryption. | [
"Computes",
"master",
"key",
"from",
"transformed",
"key",
"and",
"master",
"seed",
".",
"Used",
"in",
"payload",
"decryption",
"."
] | 85da3630d6e410b2a10d3e711cd69308b51d401d | https://github.com/pschmitt/pykeepass/blob/85da3630d6e410b2a10d3e711cd69308b51d401d/pykeepass/kdbx_parsing/common.py#L146-L154 | train |
pschmitt/pykeepass | pykeepass/kdbx_parsing/common.py | Unprotect | def Unprotect(protected_stream_id, protected_stream_key, subcon):
"""Select stream cipher based on protected_stream_id"""
return Switch(
protected_stream_id,
{'arcfourvariant': ARCFourVariantStream(protected_stream_key, subcon),
'salsa20': Salsa20Stream(protected_stream_key, subcon),
'chacha20': ChaCha20Stream(protected_stream_key, subcon),
},
default=subcon
) | python | def Unprotect(protected_stream_id, protected_stream_key, subcon):
"""Select stream cipher based on protected_stream_id"""
return Switch(
protected_stream_id,
{'arcfourvariant': ARCFourVariantStream(protected_stream_key, subcon),
'salsa20': Salsa20Stream(protected_stream_key, subcon),
'chacha20': ChaCha20Stream(protected_stream_key, subcon),
},
default=subcon
) | [
"def",
"Unprotect",
"(",
"protected_stream_id",
",",
"protected_stream_key",
",",
"subcon",
")",
":",
"return",
"Switch",
"(",
"protected_stream_id",
",",
"{",
"'arcfourvariant'",
":",
"ARCFourVariantStream",
"(",
"protected_stream_key",
",",
"subcon",
")",
",",
"'salsa20'",
":",
"Salsa20Stream",
"(",
"protected_stream_key",
",",
"subcon",
")",
",",
"'chacha20'",
":",
"ChaCha20Stream",
"(",
"protected_stream_key",
",",
"subcon",
")",
",",
"}",
",",
"default",
"=",
"subcon",
")"
] | Select stream cipher based on protected_stream_id | [
"Select",
"stream",
"cipher",
"based",
"on",
"protected_stream_id"
] | 85da3630d6e410b2a10d3e711cd69308b51d401d | https://github.com/pschmitt/pykeepass/blob/85da3630d6e410b2a10d3e711cd69308b51d401d/pykeepass/kdbx_parsing/common.py#L231-L241 | train |
pschmitt/pykeepass | pykeepass/kdbx_parsing/twofish.py | BlockCipher.encrypt | def encrypt(self,plaintext,n=''):
"""Encrypt some plaintext
plaintext = a string of binary data
n = the 'tweak' value when the chaining mode is XTS
The encrypt function will encrypt the supplied plaintext.
The behavior varies slightly depending on the chaining mode.
ECB, CBC:
---------
When the supplied plaintext is not a multiple of the blocksize
of the cipher, then the remaining plaintext will be cached.
The next time the encrypt function is called with some plaintext,
the new plaintext will be concatenated to the cache and then
cache+plaintext will be encrypted.
CFB, OFB, CTR:
--------------
When the chaining mode allows the cipher to act as a stream cipher,
the encrypt function will always encrypt all of the supplied
plaintext immediately. No cache will be kept.
XTS:
----
Because the handling of the last two blocks is linked,
it needs the whole block of plaintext to be supplied at once.
Every encrypt function called on a XTS cipher will output
an encrypted block based on the current supplied plaintext block.
CMAC:
-----
Everytime the function is called, the hash from the input data is calculated.
No finalizing needed.
The hashlength is equal to block size of the used block cipher.
"""
#self.ed = 'e' if chain is encrypting, 'd' if decrypting,
# None if nothing happened with the chain yet
#assert self.ed in ('e',None)
# makes sure you don't encrypt with a cipher that has started decrypting
self.ed = 'e'
if self.mode == MODE_XTS:
# data sequence number (or 'tweak') has to be provided when in XTS mode
return self.chain.update(plaintext,'e',n)
else:
return self.chain.update(plaintext,'e') | python | def encrypt(self,plaintext,n=''):
"""Encrypt some plaintext
plaintext = a string of binary data
n = the 'tweak' value when the chaining mode is XTS
The encrypt function will encrypt the supplied plaintext.
The behavior varies slightly depending on the chaining mode.
ECB, CBC:
---------
When the supplied plaintext is not a multiple of the blocksize
of the cipher, then the remaining plaintext will be cached.
The next time the encrypt function is called with some plaintext,
the new plaintext will be concatenated to the cache and then
cache+plaintext will be encrypted.
CFB, OFB, CTR:
--------------
When the chaining mode allows the cipher to act as a stream cipher,
the encrypt function will always encrypt all of the supplied
plaintext immediately. No cache will be kept.
XTS:
----
Because the handling of the last two blocks is linked,
it needs the whole block of plaintext to be supplied at once.
Every encrypt function called on a XTS cipher will output
an encrypted block based on the current supplied plaintext block.
CMAC:
-----
Everytime the function is called, the hash from the input data is calculated.
No finalizing needed.
The hashlength is equal to block size of the used block cipher.
"""
#self.ed = 'e' if chain is encrypting, 'd' if decrypting,
# None if nothing happened with the chain yet
#assert self.ed in ('e',None)
# makes sure you don't encrypt with a cipher that has started decrypting
self.ed = 'e'
if self.mode == MODE_XTS:
# data sequence number (or 'tweak') has to be provided when in XTS mode
return self.chain.update(plaintext,'e',n)
else:
return self.chain.update(plaintext,'e') | [
"def",
"encrypt",
"(",
"self",
",",
"plaintext",
",",
"n",
"=",
"''",
")",
":",
"#self.ed = 'e' if chain is encrypting, 'd' if decrypting,",
"# None if nothing happened with the chain yet",
"#assert self.ed in ('e',None) ",
"# makes sure you don't encrypt with a cipher that has started decrypting",
"self",
".",
"ed",
"=",
"'e'",
"if",
"self",
".",
"mode",
"==",
"MODE_XTS",
":",
"# data sequence number (or 'tweak') has to be provided when in XTS mode",
"return",
"self",
".",
"chain",
".",
"update",
"(",
"plaintext",
",",
"'e'",
",",
"n",
")",
"else",
":",
"return",
"self",
".",
"chain",
".",
"update",
"(",
"plaintext",
",",
"'e'",
")"
] | Encrypt some plaintext
plaintext = a string of binary data
n = the 'tweak' value when the chaining mode is XTS
The encrypt function will encrypt the supplied plaintext.
The behavior varies slightly depending on the chaining mode.
ECB, CBC:
---------
When the supplied plaintext is not a multiple of the blocksize
of the cipher, then the remaining plaintext will be cached.
The next time the encrypt function is called with some plaintext,
the new plaintext will be concatenated to the cache and then
cache+plaintext will be encrypted.
CFB, OFB, CTR:
--------------
When the chaining mode allows the cipher to act as a stream cipher,
the encrypt function will always encrypt all of the supplied
plaintext immediately. No cache will be kept.
XTS:
----
Because the handling of the last two blocks is linked,
it needs the whole block of plaintext to be supplied at once.
Every encrypt function called on a XTS cipher will output
an encrypted block based on the current supplied plaintext block.
CMAC:
-----
Everytime the function is called, the hash from the input data is calculated.
No finalizing needed.
The hashlength is equal to block size of the used block cipher. | [
"Encrypt",
"some",
"plaintext"
] | 85da3630d6e410b2a10d3e711cd69308b51d401d | https://github.com/pschmitt/pykeepass/blob/85da3630d6e410b2a10d3e711cd69308b51d401d/pykeepass/kdbx_parsing/twofish.py#L114-L159 | train |
pschmitt/pykeepass | pykeepass/kdbx_parsing/twofish.py | BlockCipher.decrypt | def decrypt(self,ciphertext,n=''):
"""Decrypt some ciphertext
ciphertext = a string of binary data
n = the 'tweak' value when the chaining mode is XTS
The decrypt function will decrypt the supplied ciphertext.
The behavior varies slightly depending on the chaining mode.
ECB, CBC:
---------
When the supplied ciphertext is not a multiple of the blocksize
of the cipher, then the remaining ciphertext will be cached.
The next time the decrypt function is called with some ciphertext,
the new ciphertext will be concatenated to the cache and then
cache+ciphertext will be decrypted.
CFB, OFB, CTR:
--------------
When the chaining mode allows the cipher to act as a stream cipher,
the decrypt function will always decrypt all of the supplied
ciphertext immediately. No cache will be kept.
XTS:
----
Because the handling of the last two blocks is linked,
it needs the whole block of ciphertext to be supplied at once.
Every decrypt function called on a XTS cipher will output
a decrypted block based on the current supplied ciphertext block.
CMAC:
-----
Mode not supported for decryption as this does not make sense.
"""
#self.ed = 'e' if chain is encrypting, 'd' if decrypting,
# None if nothing happened with the chain yet
#assert self.ed in ('d',None)
# makes sure you don't decrypt with a cipher that has started encrypting
self.ed = 'd'
if self.mode == MODE_XTS:
# data sequence number (or 'tweak') has to be provided when in XTS mode
return self.chain.update(ciphertext,'d',n)
else:
return self.chain.update(ciphertext,'d') | python | def decrypt(self,ciphertext,n=''):
"""Decrypt some ciphertext
ciphertext = a string of binary data
n = the 'tweak' value when the chaining mode is XTS
The decrypt function will decrypt the supplied ciphertext.
The behavior varies slightly depending on the chaining mode.
ECB, CBC:
---------
When the supplied ciphertext is not a multiple of the blocksize
of the cipher, then the remaining ciphertext will be cached.
The next time the decrypt function is called with some ciphertext,
the new ciphertext will be concatenated to the cache and then
cache+ciphertext will be decrypted.
CFB, OFB, CTR:
--------------
When the chaining mode allows the cipher to act as a stream cipher,
the decrypt function will always decrypt all of the supplied
ciphertext immediately. No cache will be kept.
XTS:
----
Because the handling of the last two blocks is linked,
it needs the whole block of ciphertext to be supplied at once.
Every decrypt function called on a XTS cipher will output
a decrypted block based on the current supplied ciphertext block.
CMAC:
-----
Mode not supported for decryption as this does not make sense.
"""
#self.ed = 'e' if chain is encrypting, 'd' if decrypting,
# None if nothing happened with the chain yet
#assert self.ed in ('d',None)
# makes sure you don't decrypt with a cipher that has started encrypting
self.ed = 'd'
if self.mode == MODE_XTS:
# data sequence number (or 'tweak') has to be provided when in XTS mode
return self.chain.update(ciphertext,'d',n)
else:
return self.chain.update(ciphertext,'d') | [
"def",
"decrypt",
"(",
"self",
",",
"ciphertext",
",",
"n",
"=",
"''",
")",
":",
"#self.ed = 'e' if chain is encrypting, 'd' if decrypting,",
"# None if nothing happened with the chain yet",
"#assert self.ed in ('d',None)",
"# makes sure you don't decrypt with a cipher that has started encrypting",
"self",
".",
"ed",
"=",
"'d'",
"if",
"self",
".",
"mode",
"==",
"MODE_XTS",
":",
"# data sequence number (or 'tweak') has to be provided when in XTS mode",
"return",
"self",
".",
"chain",
".",
"update",
"(",
"ciphertext",
",",
"'d'",
",",
"n",
")",
"else",
":",
"return",
"self",
".",
"chain",
".",
"update",
"(",
"ciphertext",
",",
"'d'",
")"
] | Decrypt some ciphertext
ciphertext = a string of binary data
n = the 'tweak' value when the chaining mode is XTS
The decrypt function will decrypt the supplied ciphertext.
The behavior varies slightly depending on the chaining mode.
ECB, CBC:
---------
When the supplied ciphertext is not a multiple of the blocksize
of the cipher, then the remaining ciphertext will be cached.
The next time the decrypt function is called with some ciphertext,
the new ciphertext will be concatenated to the cache and then
cache+ciphertext will be decrypted.
CFB, OFB, CTR:
--------------
When the chaining mode allows the cipher to act as a stream cipher,
the decrypt function will always decrypt all of the supplied
ciphertext immediately. No cache will be kept.
XTS:
----
Because the handling of the last two blocks is linked,
it needs the whole block of ciphertext to be supplied at once.
Every decrypt function called on a XTS cipher will output
a decrypted block based on the current supplied ciphertext block.
CMAC:
-----
Mode not supported for decryption as this does not make sense. | [
"Decrypt",
"some",
"ciphertext"
] | 85da3630d6e410b2a10d3e711cd69308b51d401d | https://github.com/pschmitt/pykeepass/blob/85da3630d6e410b2a10d3e711cd69308b51d401d/pykeepass/kdbx_parsing/twofish.py#L161-L204 | train |
pschmitt/pykeepass | pykeepass/kdbx_parsing/twofish.py | BlockCipher.final | def final(self,style='pkcs7'):
# TODO: after calling final, reset the IV? so the cipher is as good as new?
"""Finalizes the encryption by padding the cache
padfct = padding function
import from CryptoPlus.Util.padding
For ECB, CBC: the remaining bytes in the cache will be padded and
encrypted.
For OFB,CFB, CTR: an encrypted padding will be returned, making the
total outputed bytes since construction of the cipher
a multiple of the blocksize of that cipher.
If the cipher has been used for decryption, the final function won't do
anything. You have to manually unpad if necessary.
After finalization, the chain can still be used but the IV, counter etc
aren't reset but just continue as they were after the last step (finalization step).
"""
assert self.mode not in (MODE_XTS, MODE_CMAC) # finalizing (=padding) doesn't make sense when in XTS or CMAC mode
if self.ed == b'e':
# when the chain is in encryption mode, finalizing will pad the cache and encrypt this last block
if self.mode in (MODE_OFB,MODE_CFB,MODE_CTR):
dummy = b'0'*(self.chain.totalbytes%self.blocksize) # a dummy string that will be used to get a valid padding
else: #ECB, CBC
dummy = self.chain.cache
pdata = pad(dummy,self.blocksize,style=style)[len(dummy):]
#~ pad = padfct(dummy,padding.PAD,self.blocksize)[len(dummy):] # construct the padding necessary
return self.chain.update(pdata,b'e') # supply the padding to the update function => chain cache will be "cache+padding"
else:
# final function doesn't make sense when decrypting => padding should be removed manually
pass | python | def final(self,style='pkcs7'):
# TODO: after calling final, reset the IV? so the cipher is as good as new?
"""Finalizes the encryption by padding the cache
padfct = padding function
import from CryptoPlus.Util.padding
For ECB, CBC: the remaining bytes in the cache will be padded and
encrypted.
For OFB,CFB, CTR: an encrypted padding will be returned, making the
total outputed bytes since construction of the cipher
a multiple of the blocksize of that cipher.
If the cipher has been used for decryption, the final function won't do
anything. You have to manually unpad if necessary.
After finalization, the chain can still be used but the IV, counter etc
aren't reset but just continue as they were after the last step (finalization step).
"""
assert self.mode not in (MODE_XTS, MODE_CMAC) # finalizing (=padding) doesn't make sense when in XTS or CMAC mode
if self.ed == b'e':
# when the chain is in encryption mode, finalizing will pad the cache and encrypt this last block
if self.mode in (MODE_OFB,MODE_CFB,MODE_CTR):
dummy = b'0'*(self.chain.totalbytes%self.blocksize) # a dummy string that will be used to get a valid padding
else: #ECB, CBC
dummy = self.chain.cache
pdata = pad(dummy,self.blocksize,style=style)[len(dummy):]
#~ pad = padfct(dummy,padding.PAD,self.blocksize)[len(dummy):] # construct the padding necessary
return self.chain.update(pdata,b'e') # supply the padding to the update function => chain cache will be "cache+padding"
else:
# final function doesn't make sense when decrypting => padding should be removed manually
pass | [
"def",
"final",
"(",
"self",
",",
"style",
"=",
"'pkcs7'",
")",
":",
"# TODO: after calling final, reset the IV? so the cipher is as good as new?",
"assert",
"self",
".",
"mode",
"not",
"in",
"(",
"MODE_XTS",
",",
"MODE_CMAC",
")",
"# finalizing (=padding) doesn't make sense when in XTS or CMAC mode",
"if",
"self",
".",
"ed",
"==",
"b'e'",
":",
"# when the chain is in encryption mode, finalizing will pad the cache and encrypt this last block",
"if",
"self",
".",
"mode",
"in",
"(",
"MODE_OFB",
",",
"MODE_CFB",
",",
"MODE_CTR",
")",
":",
"dummy",
"=",
"b'0'",
"*",
"(",
"self",
".",
"chain",
".",
"totalbytes",
"%",
"self",
".",
"blocksize",
")",
"# a dummy string that will be used to get a valid padding",
"else",
":",
"#ECB, CBC",
"dummy",
"=",
"self",
".",
"chain",
".",
"cache",
"pdata",
"=",
"pad",
"(",
"dummy",
",",
"self",
".",
"blocksize",
",",
"style",
"=",
"style",
")",
"[",
"len",
"(",
"dummy",
")",
":",
"]",
"#~ pad = padfct(dummy,padding.PAD,self.blocksize)[len(dummy):] # construct the padding necessary",
"return",
"self",
".",
"chain",
".",
"update",
"(",
"pdata",
",",
"b'e'",
")",
"# supply the padding to the update function => chain cache will be \"cache+padding\"",
"else",
":",
"# final function doesn't make sense when decrypting => padding should be removed manually",
"pass"
] | Finalizes the encryption by padding the cache
padfct = padding function
import from CryptoPlus.Util.padding
For ECB, CBC: the remaining bytes in the cache will be padded and
encrypted.
For OFB,CFB, CTR: an encrypted padding will be returned, making the
total outputed bytes since construction of the cipher
a multiple of the blocksize of that cipher.
If the cipher has been used for decryption, the final function won't do
anything. You have to manually unpad if necessary.
After finalization, the chain can still be used but the IV, counter etc
aren't reset but just continue as they were after the last step (finalization step). | [
"Finalizes",
"the",
"encryption",
"by",
"padding",
"the",
"cache"
] | 85da3630d6e410b2a10d3e711cd69308b51d401d | https://github.com/pschmitt/pykeepass/blob/85da3630d6e410b2a10d3e711cd69308b51d401d/pykeepass/kdbx_parsing/twofish.py#L206-L237 | train |
pschmitt/pykeepass | pykeepass/baseelement.py | BaseElement._datetime_to_utc | def _datetime_to_utc(self, dt):
"""Convert naive datetimes to UTC"""
if not dt.tzinfo:
dt = dt.replace(tzinfo=tz.gettz())
return dt.astimezone(tz.gettz('UTC')) | python | def _datetime_to_utc(self, dt):
"""Convert naive datetimes to UTC"""
if not dt.tzinfo:
dt = dt.replace(tzinfo=tz.gettz())
return dt.astimezone(tz.gettz('UTC')) | [
"def",
"_datetime_to_utc",
"(",
"self",
",",
"dt",
")",
":",
"if",
"not",
"dt",
".",
"tzinfo",
":",
"dt",
"=",
"dt",
".",
"replace",
"(",
"tzinfo",
"=",
"tz",
".",
"gettz",
"(",
")",
")",
"return",
"dt",
".",
"astimezone",
"(",
"tz",
".",
"gettz",
"(",
"'UTC'",
")",
")"
] | Convert naive datetimes to UTC | [
"Convert",
"naive",
"datetimes",
"to",
"UTC"
] | 85da3630d6e410b2a10d3e711cd69308b51d401d | https://github.com/pschmitt/pykeepass/blob/85da3630d6e410b2a10d3e711cd69308b51d401d/pykeepass/baseelement.py#L92-L97 | train |
pschmitt/pykeepass | pykeepass/baseelement.py | BaseElement._encode_time | def _encode_time(self, value):
"""Convert datetime to base64 or plaintext string"""
if self._kp.version >= (4, 0):
diff_seconds = int(
(
self._datetime_to_utc(value) -
datetime(
year=1,
month=1,
day=1,
tzinfo=tz.gettz('UTC')
)
).total_seconds()
)
return base64.b64encode(
struct.pack('<Q', diff_seconds)
).decode('utf-8')
else:
return self._datetime_to_utc(value).isoformat() | python | def _encode_time(self, value):
"""Convert datetime to base64 or plaintext string"""
if self._kp.version >= (4, 0):
diff_seconds = int(
(
self._datetime_to_utc(value) -
datetime(
year=1,
month=1,
day=1,
tzinfo=tz.gettz('UTC')
)
).total_seconds()
)
return base64.b64encode(
struct.pack('<Q', diff_seconds)
).decode('utf-8')
else:
return self._datetime_to_utc(value).isoformat() | [
"def",
"_encode_time",
"(",
"self",
",",
"value",
")",
":",
"if",
"self",
".",
"_kp",
".",
"version",
">=",
"(",
"4",
",",
"0",
")",
":",
"diff_seconds",
"=",
"int",
"(",
"(",
"self",
".",
"_datetime_to_utc",
"(",
"value",
")",
"-",
"datetime",
"(",
"year",
"=",
"1",
",",
"month",
"=",
"1",
",",
"day",
"=",
"1",
",",
"tzinfo",
"=",
"tz",
".",
"gettz",
"(",
"'UTC'",
")",
")",
")",
".",
"total_seconds",
"(",
")",
")",
"return",
"base64",
".",
"b64encode",
"(",
"struct",
".",
"pack",
"(",
"'<Q'",
",",
"diff_seconds",
")",
")",
".",
"decode",
"(",
"'utf-8'",
")",
"else",
":",
"return",
"self",
".",
"_datetime_to_utc",
"(",
"value",
")",
".",
"isoformat",
"(",
")"
] | Convert datetime to base64 or plaintext string | [
"Convert",
"datetime",
"to",
"base64",
"or",
"plaintext",
"string"
] | 85da3630d6e410b2a10d3e711cd69308b51d401d | https://github.com/pschmitt/pykeepass/blob/85da3630d6e410b2a10d3e711cd69308b51d401d/pykeepass/baseelement.py#L99-L118 | train |
pschmitt/pykeepass | pykeepass/baseelement.py | BaseElement._decode_time | def _decode_time(self, text):
"""Convert base64 time or plaintext time to datetime"""
if self._kp.version >= (4, 0):
# decode KDBX4 date from b64 format
try:
return (
datetime(year=1, month=1, day=1, tzinfo=tz.gettz('UTC')) +
timedelta(
seconds = struct.unpack('<Q', base64.b64decode(text))[0]
)
)
except BinasciiError:
return parser.parse(
text,
tzinfos={'UTC':tz.gettz('UTC')}
)
else:
return parser.parse(
text,
tzinfos={'UTC':tz.gettz('UTC')}
) | python | def _decode_time(self, text):
"""Convert base64 time or plaintext time to datetime"""
if self._kp.version >= (4, 0):
# decode KDBX4 date from b64 format
try:
return (
datetime(year=1, month=1, day=1, tzinfo=tz.gettz('UTC')) +
timedelta(
seconds = struct.unpack('<Q', base64.b64decode(text))[0]
)
)
except BinasciiError:
return parser.parse(
text,
tzinfos={'UTC':tz.gettz('UTC')}
)
else:
return parser.parse(
text,
tzinfos={'UTC':tz.gettz('UTC')}
) | [
"def",
"_decode_time",
"(",
"self",
",",
"text",
")",
":",
"if",
"self",
".",
"_kp",
".",
"version",
">=",
"(",
"4",
",",
"0",
")",
":",
"# decode KDBX4 date from b64 format",
"try",
":",
"return",
"(",
"datetime",
"(",
"year",
"=",
"1",
",",
"month",
"=",
"1",
",",
"day",
"=",
"1",
",",
"tzinfo",
"=",
"tz",
".",
"gettz",
"(",
"'UTC'",
")",
")",
"+",
"timedelta",
"(",
"seconds",
"=",
"struct",
".",
"unpack",
"(",
"'<Q'",
",",
"base64",
".",
"b64decode",
"(",
"text",
")",
")",
"[",
"0",
"]",
")",
")",
"except",
"BinasciiError",
":",
"return",
"parser",
".",
"parse",
"(",
"text",
",",
"tzinfos",
"=",
"{",
"'UTC'",
":",
"tz",
".",
"gettz",
"(",
"'UTC'",
")",
"}",
")",
"else",
":",
"return",
"parser",
".",
"parse",
"(",
"text",
",",
"tzinfos",
"=",
"{",
"'UTC'",
":",
"tz",
".",
"gettz",
"(",
"'UTC'",
")",
"}",
")"
] | Convert base64 time or plaintext time to datetime | [
"Convert",
"base64",
"time",
"or",
"plaintext",
"time",
"to",
"datetime"
] | 85da3630d6e410b2a10d3e711cd69308b51d401d | https://github.com/pschmitt/pykeepass/blob/85da3630d6e410b2a10d3e711cd69308b51d401d/pykeepass/baseelement.py#L120-L141 | train |
thunder-project/thunder | thunder/images/readers.py | fromrdd | def fromrdd(rdd, dims=None, nrecords=None, dtype=None, labels=None, ordered=False):
"""
Load images from a Spark RDD.
Input RDD must be a collection of key-value pairs
where keys are singleton tuples indexing images,
and values are 2d or 3d ndarrays.
Parameters
----------
rdd : SparkRDD
An RDD containing the images.
dims : tuple or array, optional, default = None
Image dimensions (if provided will avoid check).
nrecords : int, optional, default = None
Number of images (if provided will avoid check).
dtype : string, default = None
Data numerical type (if provided will avoid check)
labels : array, optional, default = None
Labels for records. If provided, should be one-dimensional.
ordered : boolean, optional, default = False
Whether or not the rdd is ordered by key
"""
from .images import Images
from bolt.spark.array import BoltArraySpark
if dims is None or dtype is None:
item = rdd.values().first()
dtype = item.dtype
dims = item.shape
if nrecords is None:
nrecords = rdd.count()
def process_keys(record):
k, v = record
if isinstance(k, int):
k = (k,)
return k, v
values = BoltArraySpark(rdd.map(process_keys), shape=(nrecords,) + tuple(dims), dtype=dtype, split=1, ordered=ordered)
return Images(values, labels=labels) | python | def fromrdd(rdd, dims=None, nrecords=None, dtype=None, labels=None, ordered=False):
"""
Load images from a Spark RDD.
Input RDD must be a collection of key-value pairs
where keys are singleton tuples indexing images,
and values are 2d or 3d ndarrays.
Parameters
----------
rdd : SparkRDD
An RDD containing the images.
dims : tuple or array, optional, default = None
Image dimensions (if provided will avoid check).
nrecords : int, optional, default = None
Number of images (if provided will avoid check).
dtype : string, default = None
Data numerical type (if provided will avoid check)
labels : array, optional, default = None
Labels for records. If provided, should be one-dimensional.
ordered : boolean, optional, default = False
Whether or not the rdd is ordered by key
"""
from .images import Images
from bolt.spark.array import BoltArraySpark
if dims is None or dtype is None:
item = rdd.values().first()
dtype = item.dtype
dims = item.shape
if nrecords is None:
nrecords = rdd.count()
def process_keys(record):
k, v = record
if isinstance(k, int):
k = (k,)
return k, v
values = BoltArraySpark(rdd.map(process_keys), shape=(nrecords,) + tuple(dims), dtype=dtype, split=1, ordered=ordered)
return Images(values, labels=labels) | [
"def",
"fromrdd",
"(",
"rdd",
",",
"dims",
"=",
"None",
",",
"nrecords",
"=",
"None",
",",
"dtype",
"=",
"None",
",",
"labels",
"=",
"None",
",",
"ordered",
"=",
"False",
")",
":",
"from",
".",
"images",
"import",
"Images",
"from",
"bolt",
".",
"spark",
".",
"array",
"import",
"BoltArraySpark",
"if",
"dims",
"is",
"None",
"or",
"dtype",
"is",
"None",
":",
"item",
"=",
"rdd",
".",
"values",
"(",
")",
".",
"first",
"(",
")",
"dtype",
"=",
"item",
".",
"dtype",
"dims",
"=",
"item",
".",
"shape",
"if",
"nrecords",
"is",
"None",
":",
"nrecords",
"=",
"rdd",
".",
"count",
"(",
")",
"def",
"process_keys",
"(",
"record",
")",
":",
"k",
",",
"v",
"=",
"record",
"if",
"isinstance",
"(",
"k",
",",
"int",
")",
":",
"k",
"=",
"(",
"k",
",",
")",
"return",
"k",
",",
"v",
"values",
"=",
"BoltArraySpark",
"(",
"rdd",
".",
"map",
"(",
"process_keys",
")",
",",
"shape",
"=",
"(",
"nrecords",
",",
")",
"+",
"tuple",
"(",
"dims",
")",
",",
"dtype",
"=",
"dtype",
",",
"split",
"=",
"1",
",",
"ordered",
"=",
"ordered",
")",
"return",
"Images",
"(",
"values",
",",
"labels",
"=",
"labels",
")"
] | Load images from a Spark RDD.
Input RDD must be a collection of key-value pairs
where keys are singleton tuples indexing images,
and values are 2d or 3d ndarrays.
Parameters
----------
rdd : SparkRDD
An RDD containing the images.
dims : tuple or array, optional, default = None
Image dimensions (if provided will avoid check).
nrecords : int, optional, default = None
Number of images (if provided will avoid check).
dtype : string, default = None
Data numerical type (if provided will avoid check)
labels : array, optional, default = None
Labels for records. If provided, should be one-dimensional.
ordered : boolean, optional, default = False
Whether or not the rdd is ordered by key | [
"Load",
"images",
"from",
"a",
"Spark",
"RDD",
"."
] | 967ff8f3e7c2fabe1705743d95eb2746d4329786 | https://github.com/thunder-project/thunder/blob/967ff8f3e7c2fabe1705743d95eb2746d4329786/thunder/images/readers.py#L10-L56 | train |
thunder-project/thunder | thunder/images/readers.py | fromarray | def fromarray(values, labels=None, npartitions=None, engine=None):
"""
Load images from an array.
First dimension will be used to index images,
so remaining dimensions after the first should
be the dimensions of the images,
e.g. (3, 100, 200) for 3 x (100, 200) images
Parameters
----------
values : array-like
The array of images. Can be a numpy array,
a bolt array, or an array-like.
labels : array, optional, default = None
Labels for records. If provided, should be one-dimensional.
npartitions : int, default = None
Number of partitions for parallelization (spark only)
engine : object, default = None
Computational engine (e.g. a SparkContext for spark)
"""
from .images import Images
import bolt
if isinstance(values, bolt.spark.array.BoltArraySpark):
return Images(values)
values = asarray(values)
if values.ndim < 2:
raise ValueError('Array for images must have at least 2 dimensions, got %g' % values.ndim)
if values.ndim == 2:
values = expand_dims(values, 0)
shape = None
dtype = None
for im in values:
if shape is None:
shape = im.shape
dtype = im.dtype
if not im.shape == shape:
raise ValueError('Arrays must all be of same shape; got both %s and %s' %
(str(shape), str(im.shape)))
if not im.dtype == dtype:
raise ValueError('Arrays must all be of same data type; got both %s and %s' %
(str(dtype), str(im.dtype)))
if spark and isinstance(engine, spark):
if not npartitions:
npartitions = engine.defaultParallelism
values = bolt.array(values, context=engine, npartitions=npartitions, axis=(0,))
values._ordered = True
return Images(values)
return Images(values, labels=labels) | python | def fromarray(values, labels=None, npartitions=None, engine=None):
"""
Load images from an array.
First dimension will be used to index images,
so remaining dimensions after the first should
be the dimensions of the images,
e.g. (3, 100, 200) for 3 x (100, 200) images
Parameters
----------
values : array-like
The array of images. Can be a numpy array,
a bolt array, or an array-like.
labels : array, optional, default = None
Labels for records. If provided, should be one-dimensional.
npartitions : int, default = None
Number of partitions for parallelization (spark only)
engine : object, default = None
Computational engine (e.g. a SparkContext for spark)
"""
from .images import Images
import bolt
if isinstance(values, bolt.spark.array.BoltArraySpark):
return Images(values)
values = asarray(values)
if values.ndim < 2:
raise ValueError('Array for images must have at least 2 dimensions, got %g' % values.ndim)
if values.ndim == 2:
values = expand_dims(values, 0)
shape = None
dtype = None
for im in values:
if shape is None:
shape = im.shape
dtype = im.dtype
if not im.shape == shape:
raise ValueError('Arrays must all be of same shape; got both %s and %s' %
(str(shape), str(im.shape)))
if not im.dtype == dtype:
raise ValueError('Arrays must all be of same data type; got both %s and %s' %
(str(dtype), str(im.dtype)))
if spark and isinstance(engine, spark):
if not npartitions:
npartitions = engine.defaultParallelism
values = bolt.array(values, context=engine, npartitions=npartitions, axis=(0,))
values._ordered = True
return Images(values)
return Images(values, labels=labels) | [
"def",
"fromarray",
"(",
"values",
",",
"labels",
"=",
"None",
",",
"npartitions",
"=",
"None",
",",
"engine",
"=",
"None",
")",
":",
"from",
".",
"images",
"import",
"Images",
"import",
"bolt",
"if",
"isinstance",
"(",
"values",
",",
"bolt",
".",
"spark",
".",
"array",
".",
"BoltArraySpark",
")",
":",
"return",
"Images",
"(",
"values",
")",
"values",
"=",
"asarray",
"(",
"values",
")",
"if",
"values",
".",
"ndim",
"<",
"2",
":",
"raise",
"ValueError",
"(",
"'Array for images must have at least 2 dimensions, got %g'",
"%",
"values",
".",
"ndim",
")",
"if",
"values",
".",
"ndim",
"==",
"2",
":",
"values",
"=",
"expand_dims",
"(",
"values",
",",
"0",
")",
"shape",
"=",
"None",
"dtype",
"=",
"None",
"for",
"im",
"in",
"values",
":",
"if",
"shape",
"is",
"None",
":",
"shape",
"=",
"im",
".",
"shape",
"dtype",
"=",
"im",
".",
"dtype",
"if",
"not",
"im",
".",
"shape",
"==",
"shape",
":",
"raise",
"ValueError",
"(",
"'Arrays must all be of same shape; got both %s and %s'",
"%",
"(",
"str",
"(",
"shape",
")",
",",
"str",
"(",
"im",
".",
"shape",
")",
")",
")",
"if",
"not",
"im",
".",
"dtype",
"==",
"dtype",
":",
"raise",
"ValueError",
"(",
"'Arrays must all be of same data type; got both %s and %s'",
"%",
"(",
"str",
"(",
"dtype",
")",
",",
"str",
"(",
"im",
".",
"dtype",
")",
")",
")",
"if",
"spark",
"and",
"isinstance",
"(",
"engine",
",",
"spark",
")",
":",
"if",
"not",
"npartitions",
":",
"npartitions",
"=",
"engine",
".",
"defaultParallelism",
"values",
"=",
"bolt",
".",
"array",
"(",
"values",
",",
"context",
"=",
"engine",
",",
"npartitions",
"=",
"npartitions",
",",
"axis",
"=",
"(",
"0",
",",
")",
")",
"values",
".",
"_ordered",
"=",
"True",
"return",
"Images",
"(",
"values",
")",
"return",
"Images",
"(",
"values",
",",
"labels",
"=",
"labels",
")"
] | Load images from an array.
First dimension will be used to index images,
so remaining dimensions after the first should
be the dimensions of the images,
e.g. (3, 100, 200) for 3 x (100, 200) images
Parameters
----------
values : array-like
The array of images. Can be a numpy array,
a bolt array, or an array-like.
labels : array, optional, default = None
Labels for records. If provided, should be one-dimensional.
npartitions : int, default = None
Number of partitions for parallelization (spark only)
engine : object, default = None
Computational engine (e.g. a SparkContext for spark) | [
"Load",
"images",
"from",
"an",
"array",
"."
] | 967ff8f3e7c2fabe1705743d95eb2746d4329786 | https://github.com/thunder-project/thunder/blob/967ff8f3e7c2fabe1705743d95eb2746d4329786/thunder/images/readers.py#L58-L116 | train |
thunder-project/thunder | thunder/images/readers.py | fromlist | def fromlist(items, accessor=None, keys=None, dims=None, dtype=None, labels=None, npartitions=None, engine=None):
"""
Load images from a list of items using the given accessor.
Parameters
----------
accessor : function
Apply to each item from the list to yield an image.
keys : list, optional, default=None
An optional list of keys.
dims : tuple, optional, default=None
Specify a known image dimension to avoid computation.
labels : array, optional, default = None
Labels for records. If provided, should be one-dimensional.
npartitions : int
Number of partitions for computational engine.
"""
if spark and isinstance(engine, spark):
nrecords = len(items)
if keys:
items = zip(keys, items)
else:
keys = [(i,) for i in range(nrecords)]
items = zip(keys, items)
if not npartitions:
npartitions = engine.defaultParallelism
rdd = engine.parallelize(items, npartitions)
if accessor:
rdd = rdd.mapValues(accessor)
return fromrdd(rdd, nrecords=nrecords, dims=dims, dtype=dtype, labels=labels, ordered=True)
else:
if accessor:
items = asarray([accessor(i) for i in items])
return fromarray(items, labels=labels) | python | def fromlist(items, accessor=None, keys=None, dims=None, dtype=None, labels=None, npartitions=None, engine=None):
"""
Load images from a list of items using the given accessor.
Parameters
----------
accessor : function
Apply to each item from the list to yield an image.
keys : list, optional, default=None
An optional list of keys.
dims : tuple, optional, default=None
Specify a known image dimension to avoid computation.
labels : array, optional, default = None
Labels for records. If provided, should be one-dimensional.
npartitions : int
Number of partitions for computational engine.
"""
if spark and isinstance(engine, spark):
nrecords = len(items)
if keys:
items = zip(keys, items)
else:
keys = [(i,) for i in range(nrecords)]
items = zip(keys, items)
if not npartitions:
npartitions = engine.defaultParallelism
rdd = engine.parallelize(items, npartitions)
if accessor:
rdd = rdd.mapValues(accessor)
return fromrdd(rdd, nrecords=nrecords, dims=dims, dtype=dtype, labels=labels, ordered=True)
else:
if accessor:
items = asarray([accessor(i) for i in items])
return fromarray(items, labels=labels) | [
"def",
"fromlist",
"(",
"items",
",",
"accessor",
"=",
"None",
",",
"keys",
"=",
"None",
",",
"dims",
"=",
"None",
",",
"dtype",
"=",
"None",
",",
"labels",
"=",
"None",
",",
"npartitions",
"=",
"None",
",",
"engine",
"=",
"None",
")",
":",
"if",
"spark",
"and",
"isinstance",
"(",
"engine",
",",
"spark",
")",
":",
"nrecords",
"=",
"len",
"(",
"items",
")",
"if",
"keys",
":",
"items",
"=",
"zip",
"(",
"keys",
",",
"items",
")",
"else",
":",
"keys",
"=",
"[",
"(",
"i",
",",
")",
"for",
"i",
"in",
"range",
"(",
"nrecords",
")",
"]",
"items",
"=",
"zip",
"(",
"keys",
",",
"items",
")",
"if",
"not",
"npartitions",
":",
"npartitions",
"=",
"engine",
".",
"defaultParallelism",
"rdd",
"=",
"engine",
".",
"parallelize",
"(",
"items",
",",
"npartitions",
")",
"if",
"accessor",
":",
"rdd",
"=",
"rdd",
".",
"mapValues",
"(",
"accessor",
")",
"return",
"fromrdd",
"(",
"rdd",
",",
"nrecords",
"=",
"nrecords",
",",
"dims",
"=",
"dims",
",",
"dtype",
"=",
"dtype",
",",
"labels",
"=",
"labels",
",",
"ordered",
"=",
"True",
")",
"else",
":",
"if",
"accessor",
":",
"items",
"=",
"asarray",
"(",
"[",
"accessor",
"(",
"i",
")",
"for",
"i",
"in",
"items",
"]",
")",
"return",
"fromarray",
"(",
"items",
",",
"labels",
"=",
"labels",
")"
] | Load images from a list of items using the given accessor.
Parameters
----------
accessor : function
Apply to each item from the list to yield an image.
keys : list, optional, default=None
An optional list of keys.
dims : tuple, optional, default=None
Specify a known image dimension to avoid computation.
labels : array, optional, default = None
Labels for records. If provided, should be one-dimensional.
npartitions : int
Number of partitions for computational engine. | [
"Load",
"images",
"from",
"a",
"list",
"of",
"items",
"using",
"the",
"given",
"accessor",
"."
] | 967ff8f3e7c2fabe1705743d95eb2746d4329786 | https://github.com/thunder-project/thunder/blob/967ff8f3e7c2fabe1705743d95eb2746d4329786/thunder/images/readers.py#L119-L157 | train |
thunder-project/thunder | thunder/images/readers.py | frompath | def frompath(path, accessor=None, ext=None, start=None, stop=None, recursive=False, npartitions=None, dims=None, dtype=None, labels=None, recount=False, engine=None, credentials=None):
"""
Load images from a path using the given accessor.
Supports both local and remote filesystems.
Parameters
----------
accessor : function
Apply to each item after loading to yield an image.
ext : str, optional, default=None
File extension.
npartitions : int, optional, default=None
Number of partitions for computational engine,
if None will use default for engine.
dims : tuple, optional, default=None
Dimensions of images.
dtype : str, optional, default=None
Numerical type of images.
labels : array, optional, default = None
Labels for records. If provided, should be one-dimensional.
start, stop : nonnegative int, optional, default=None
Indices of files to load, interpreted using Python slicing conventions.
recursive : boolean, optional, default=False
If true, will recursively descend directories from path, loading all files
with an extension matching 'ext'.
recount : boolean, optional, default=False
Force subsequent record counting.
"""
from thunder.readers import get_parallel_reader
reader = get_parallel_reader(path)(engine, credentials=credentials)
data = reader.read(path, ext=ext, start=start, stop=stop,
recursive=recursive, npartitions=npartitions)
if spark and isinstance(engine, spark):
if accessor:
data = data.flatMap(accessor)
if recount:
nrecords = None
def switch(record):
ary, idx = record
return (idx,), ary
data = data.values().zipWithIndex().map(switch)
else:
nrecords = reader.nfiles
return fromrdd(data, nrecords=nrecords, dims=dims, dtype=dtype, labels=labels, ordered=True)
else:
if accessor:
data = [accessor(d) for d in data]
flattened = list(itertools.chain(*data))
values = [kv[1] for kv in flattened]
return fromarray(values, labels=labels) | python | def frompath(path, accessor=None, ext=None, start=None, stop=None, recursive=False, npartitions=None, dims=None, dtype=None, labels=None, recount=False, engine=None, credentials=None):
"""
Load images from a path using the given accessor.
Supports both local and remote filesystems.
Parameters
----------
accessor : function
Apply to each item after loading to yield an image.
ext : str, optional, default=None
File extension.
npartitions : int, optional, default=None
Number of partitions for computational engine,
if None will use default for engine.
dims : tuple, optional, default=None
Dimensions of images.
dtype : str, optional, default=None
Numerical type of images.
labels : array, optional, default = None
Labels for records. If provided, should be one-dimensional.
start, stop : nonnegative int, optional, default=None
Indices of files to load, interpreted using Python slicing conventions.
recursive : boolean, optional, default=False
If true, will recursively descend directories from path, loading all files
with an extension matching 'ext'.
recount : boolean, optional, default=False
Force subsequent record counting.
"""
from thunder.readers import get_parallel_reader
reader = get_parallel_reader(path)(engine, credentials=credentials)
data = reader.read(path, ext=ext, start=start, stop=stop,
recursive=recursive, npartitions=npartitions)
if spark and isinstance(engine, spark):
if accessor:
data = data.flatMap(accessor)
if recount:
nrecords = None
def switch(record):
ary, idx = record
return (idx,), ary
data = data.values().zipWithIndex().map(switch)
else:
nrecords = reader.nfiles
return fromrdd(data, nrecords=nrecords, dims=dims, dtype=dtype, labels=labels, ordered=True)
else:
if accessor:
data = [accessor(d) for d in data]
flattened = list(itertools.chain(*data))
values = [kv[1] for kv in flattened]
return fromarray(values, labels=labels) | [
"def",
"frompath",
"(",
"path",
",",
"accessor",
"=",
"None",
",",
"ext",
"=",
"None",
",",
"start",
"=",
"None",
",",
"stop",
"=",
"None",
",",
"recursive",
"=",
"False",
",",
"npartitions",
"=",
"None",
",",
"dims",
"=",
"None",
",",
"dtype",
"=",
"None",
",",
"labels",
"=",
"None",
",",
"recount",
"=",
"False",
",",
"engine",
"=",
"None",
",",
"credentials",
"=",
"None",
")",
":",
"from",
"thunder",
".",
"readers",
"import",
"get_parallel_reader",
"reader",
"=",
"get_parallel_reader",
"(",
"path",
")",
"(",
"engine",
",",
"credentials",
"=",
"credentials",
")",
"data",
"=",
"reader",
".",
"read",
"(",
"path",
",",
"ext",
"=",
"ext",
",",
"start",
"=",
"start",
",",
"stop",
"=",
"stop",
",",
"recursive",
"=",
"recursive",
",",
"npartitions",
"=",
"npartitions",
")",
"if",
"spark",
"and",
"isinstance",
"(",
"engine",
",",
"spark",
")",
":",
"if",
"accessor",
":",
"data",
"=",
"data",
".",
"flatMap",
"(",
"accessor",
")",
"if",
"recount",
":",
"nrecords",
"=",
"None",
"def",
"switch",
"(",
"record",
")",
":",
"ary",
",",
"idx",
"=",
"record",
"return",
"(",
"idx",
",",
")",
",",
"ary",
"data",
"=",
"data",
".",
"values",
"(",
")",
".",
"zipWithIndex",
"(",
")",
".",
"map",
"(",
"switch",
")",
"else",
":",
"nrecords",
"=",
"reader",
".",
"nfiles",
"return",
"fromrdd",
"(",
"data",
",",
"nrecords",
"=",
"nrecords",
",",
"dims",
"=",
"dims",
",",
"dtype",
"=",
"dtype",
",",
"labels",
"=",
"labels",
",",
"ordered",
"=",
"True",
")",
"else",
":",
"if",
"accessor",
":",
"data",
"=",
"[",
"accessor",
"(",
"d",
")",
"for",
"d",
"in",
"data",
"]",
"flattened",
"=",
"list",
"(",
"itertools",
".",
"chain",
"(",
"*",
"data",
")",
")",
"values",
"=",
"[",
"kv",
"[",
"1",
"]",
"for",
"kv",
"in",
"flattened",
"]",
"return",
"fromarray",
"(",
"values",
",",
"labels",
"=",
"labels",
")"
] | Load images from a path using the given accessor.
Supports both local and remote filesystems.
Parameters
----------
accessor : function
Apply to each item after loading to yield an image.
ext : str, optional, default=None
File extension.
npartitions : int, optional, default=None
Number of partitions for computational engine,
if None will use default for engine.
dims : tuple, optional, default=None
Dimensions of images.
dtype : str, optional, default=None
Numerical type of images.
labels : array, optional, default = None
Labels for records. If provided, should be one-dimensional.
start, stop : nonnegative int, optional, default=None
Indices of files to load, interpreted using Python slicing conventions.
recursive : boolean, optional, default=False
If true, will recursively descend directories from path, loading all files
with an extension matching 'ext'.
recount : boolean, optional, default=False
Force subsequent record counting. | [
"Load",
"images",
"from",
"a",
"path",
"using",
"the",
"given",
"accessor",
"."
] | 967ff8f3e7c2fabe1705743d95eb2746d4329786 | https://github.com/thunder-project/thunder/blob/967ff8f3e7c2fabe1705743d95eb2746d4329786/thunder/images/readers.py#L159-L221 | train |
thunder-project/thunder | thunder/images/readers.py | fromtif | def fromtif(path, ext='tif', start=None, stop=None, recursive=False, nplanes=None, npartitions=None, labels=None, engine=None, credentials=None, discard_extra=False):
"""
Loads images from single or multi-page TIF files.
Parameters
----------
path : str
Path to data files or directory, specified as either a local filesystem path
or in a URI-like format, including scheme. May include a single '*' wildcard character.
ext : string, optional, default = 'tif'
Extension required on data files to be loaded.
start, stop : nonnegative int, optional, default = None
Indices of the first and last-plus-one file to load, relative to the sorted
filenames matching 'path' and 'ext'. Interpreted using python slice indexing conventions.
recursive : boolean, optional, default = False
If true, will recursively descend directories from path, loading all files
with an extension matching 'ext'.
nplanes : positive integer, optional, default = None
If passed, will cause single files to be subdivided into nplanes separate images.
Otherwise, each file is taken to represent one image.
npartitions : int, optional, default = None
Number of partitions for computational engine,
if None will use default for engine.
labels : array, optional, default = None
Labels for records. If provided, should be one-dimensional.
discard_extra : boolean, optional, default = False
If True and nplanes doesn't divide by the number of pages in a multi-page tiff, the reminder will
be discarded and a warning will be shown. If False, it will raise an error
"""
from tifffile import TiffFile
if nplanes is not None and nplanes <= 0:
raise ValueError('nplanes must be positive if passed, got %d' % nplanes)
def getarray(idx_buffer_filename):
idx, buf, fname = idx_buffer_filename
fbuf = BytesIO(buf)
tfh = TiffFile(fbuf)
ary = tfh.asarray()
pageCount = ary.shape[0]
if nplanes is not None:
extra = pageCount % nplanes
if extra:
if discard_extra:
pageCount = pageCount - extra
logging.getLogger('thunder').warn('Ignored %d pages in file %s' % (extra, fname))
else:
raise ValueError("nplanes '%d' does not evenly divide '%d in file %s'" % (nplanes, pageCount,
fname))
values = [ary[i:(i+nplanes)] for i in range(0, pageCount, nplanes)]
else:
values = [ary]
tfh.close()
if ary.ndim == 3:
values = [val.squeeze() for val in values]
nvals = len(values)
keys = [(idx*nvals + timepoint,) for timepoint in range(nvals)]
return zip(keys, values)
recount = False if nplanes is None else True
data = frompath(path, accessor=getarray, ext=ext, start=start, stop=stop,
recursive=recursive, npartitions=npartitions, recount=recount,
labels=labels, engine=engine, credentials=credentials)
if engine is not None and npartitions is not None and data.npartitions() < npartitions:
data = data.repartition(npartitions)
return data | python | def fromtif(path, ext='tif', start=None, stop=None, recursive=False, nplanes=None, npartitions=None, labels=None, engine=None, credentials=None, discard_extra=False):
"""
Loads images from single or multi-page TIF files.
Parameters
----------
path : str
Path to data files or directory, specified as either a local filesystem path
or in a URI-like format, including scheme. May include a single '*' wildcard character.
ext : string, optional, default = 'tif'
Extension required on data files to be loaded.
start, stop : nonnegative int, optional, default = None
Indices of the first and last-plus-one file to load, relative to the sorted
filenames matching 'path' and 'ext'. Interpreted using python slice indexing conventions.
recursive : boolean, optional, default = False
If true, will recursively descend directories from path, loading all files
with an extension matching 'ext'.
nplanes : positive integer, optional, default = None
If passed, will cause single files to be subdivided into nplanes separate images.
Otherwise, each file is taken to represent one image.
npartitions : int, optional, default = None
Number of partitions for computational engine,
if None will use default for engine.
labels : array, optional, default = None
Labels for records. If provided, should be one-dimensional.
discard_extra : boolean, optional, default = False
If True and nplanes doesn't divide by the number of pages in a multi-page tiff, the reminder will
be discarded and a warning will be shown. If False, it will raise an error
"""
from tifffile import TiffFile
if nplanes is not None and nplanes <= 0:
raise ValueError('nplanes must be positive if passed, got %d' % nplanes)
def getarray(idx_buffer_filename):
idx, buf, fname = idx_buffer_filename
fbuf = BytesIO(buf)
tfh = TiffFile(fbuf)
ary = tfh.asarray()
pageCount = ary.shape[0]
if nplanes is not None:
extra = pageCount % nplanes
if extra:
if discard_extra:
pageCount = pageCount - extra
logging.getLogger('thunder').warn('Ignored %d pages in file %s' % (extra, fname))
else:
raise ValueError("nplanes '%d' does not evenly divide '%d in file %s'" % (nplanes, pageCount,
fname))
values = [ary[i:(i+nplanes)] for i in range(0, pageCount, nplanes)]
else:
values = [ary]
tfh.close()
if ary.ndim == 3:
values = [val.squeeze() for val in values]
nvals = len(values)
keys = [(idx*nvals + timepoint,) for timepoint in range(nvals)]
return zip(keys, values)
recount = False if nplanes is None else True
data = frompath(path, accessor=getarray, ext=ext, start=start, stop=stop,
recursive=recursive, npartitions=npartitions, recount=recount,
labels=labels, engine=engine, credentials=credentials)
if engine is not None and npartitions is not None and data.npartitions() < npartitions:
data = data.repartition(npartitions)
return data | [
"def",
"fromtif",
"(",
"path",
",",
"ext",
"=",
"'tif'",
",",
"start",
"=",
"None",
",",
"stop",
"=",
"None",
",",
"recursive",
"=",
"False",
",",
"nplanes",
"=",
"None",
",",
"npartitions",
"=",
"None",
",",
"labels",
"=",
"None",
",",
"engine",
"=",
"None",
",",
"credentials",
"=",
"None",
",",
"discard_extra",
"=",
"False",
")",
":",
"from",
"tifffile",
"import",
"TiffFile",
"if",
"nplanes",
"is",
"not",
"None",
"and",
"nplanes",
"<=",
"0",
":",
"raise",
"ValueError",
"(",
"'nplanes must be positive if passed, got %d'",
"%",
"nplanes",
")",
"def",
"getarray",
"(",
"idx_buffer_filename",
")",
":",
"idx",
",",
"buf",
",",
"fname",
"=",
"idx_buffer_filename",
"fbuf",
"=",
"BytesIO",
"(",
"buf",
")",
"tfh",
"=",
"TiffFile",
"(",
"fbuf",
")",
"ary",
"=",
"tfh",
".",
"asarray",
"(",
")",
"pageCount",
"=",
"ary",
".",
"shape",
"[",
"0",
"]",
"if",
"nplanes",
"is",
"not",
"None",
":",
"extra",
"=",
"pageCount",
"%",
"nplanes",
"if",
"extra",
":",
"if",
"discard_extra",
":",
"pageCount",
"=",
"pageCount",
"-",
"extra",
"logging",
".",
"getLogger",
"(",
"'thunder'",
")",
".",
"warn",
"(",
"'Ignored %d pages in file %s'",
"%",
"(",
"extra",
",",
"fname",
")",
")",
"else",
":",
"raise",
"ValueError",
"(",
"\"nplanes '%d' does not evenly divide '%d in file %s'\"",
"%",
"(",
"nplanes",
",",
"pageCount",
",",
"fname",
")",
")",
"values",
"=",
"[",
"ary",
"[",
"i",
":",
"(",
"i",
"+",
"nplanes",
")",
"]",
"for",
"i",
"in",
"range",
"(",
"0",
",",
"pageCount",
",",
"nplanes",
")",
"]",
"else",
":",
"values",
"=",
"[",
"ary",
"]",
"tfh",
".",
"close",
"(",
")",
"if",
"ary",
".",
"ndim",
"==",
"3",
":",
"values",
"=",
"[",
"val",
".",
"squeeze",
"(",
")",
"for",
"val",
"in",
"values",
"]",
"nvals",
"=",
"len",
"(",
"values",
")",
"keys",
"=",
"[",
"(",
"idx",
"*",
"nvals",
"+",
"timepoint",
",",
")",
"for",
"timepoint",
"in",
"range",
"(",
"nvals",
")",
"]",
"return",
"zip",
"(",
"keys",
",",
"values",
")",
"recount",
"=",
"False",
"if",
"nplanes",
"is",
"None",
"else",
"True",
"data",
"=",
"frompath",
"(",
"path",
",",
"accessor",
"=",
"getarray",
",",
"ext",
"=",
"ext",
",",
"start",
"=",
"start",
",",
"stop",
"=",
"stop",
",",
"recursive",
"=",
"recursive",
",",
"npartitions",
"=",
"npartitions",
",",
"recount",
"=",
"recount",
",",
"labels",
"=",
"labels",
",",
"engine",
"=",
"engine",
",",
"credentials",
"=",
"credentials",
")",
"if",
"engine",
"is",
"not",
"None",
"and",
"npartitions",
"is",
"not",
"None",
"and",
"data",
".",
"npartitions",
"(",
")",
"<",
"npartitions",
":",
"data",
"=",
"data",
".",
"repartition",
"(",
"npartitions",
")",
"return",
"data"
] | Loads images from single or multi-page TIF files.
Parameters
----------
path : str
Path to data files or directory, specified as either a local filesystem path
or in a URI-like format, including scheme. May include a single '*' wildcard character.
ext : string, optional, default = 'tif'
Extension required on data files to be loaded.
start, stop : nonnegative int, optional, default = None
Indices of the first and last-plus-one file to load, relative to the sorted
filenames matching 'path' and 'ext'. Interpreted using python slice indexing conventions.
recursive : boolean, optional, default = False
If true, will recursively descend directories from path, loading all files
with an extension matching 'ext'.
nplanes : positive integer, optional, default = None
If passed, will cause single files to be subdivided into nplanes separate images.
Otherwise, each file is taken to represent one image.
npartitions : int, optional, default = None
Number of partitions for computational engine,
if None will use default for engine.
labels : array, optional, default = None
Labels for records. If provided, should be one-dimensional.
discard_extra : boolean, optional, default = False
If True and nplanes doesn't divide by the number of pages in a multi-page tiff, the reminder will
be discarded and a warning will be shown. If False, it will raise an error | [
"Loads",
"images",
"from",
"single",
"or",
"multi",
"-",
"page",
"TIF",
"files",
"."
] | 967ff8f3e7c2fabe1705743d95eb2746d4329786 | https://github.com/thunder-project/thunder/blob/967ff8f3e7c2fabe1705743d95eb2746d4329786/thunder/images/readers.py#L323-L397 | train |
thunder-project/thunder | thunder/images/readers.py | frompng | def frompng(path, ext='png', start=None, stop=None, recursive=False, npartitions=None, labels=None, engine=None, credentials=None):
"""
Load images from PNG files.
Parameters
----------
path : str
Path to data files or directory, specified as either a local filesystem path
or in a URI-like format, including scheme. May include a single '*' wildcard character.
ext : string, optional, default = 'tif'
Extension required on data files to be loaded.
start, stop : nonnegative int, optional, default = None
Indices of the first and last-plus-one file to load, relative to the sorted
filenames matching `path` and `ext`. Interpreted using python slice indexing conventions.
recursive : boolean, optional, default = False
If true, will recursively descend directories from path, loading all files
with an extension matching 'ext'.
npartitions : int, optional, default = None
Number of partitions for computational engine,
if None will use default for engine.
labels : array, optional, default = None
Labels for records. If provided, should be one-dimensional.
"""
from scipy.misc import imread
def getarray(idx_buffer_filename):
idx, buf, _ = idx_buffer_filename
fbuf = BytesIO(buf)
yield (idx,), imread(fbuf)
return frompath(path, accessor=getarray, ext=ext, start=start,
stop=stop, recursive=recursive, npartitions=npartitions,
labels=labels, engine=engine, credentials=credentials) | python | def frompng(path, ext='png', start=None, stop=None, recursive=False, npartitions=None, labels=None, engine=None, credentials=None):
"""
Load images from PNG files.
Parameters
----------
path : str
Path to data files or directory, specified as either a local filesystem path
or in a URI-like format, including scheme. May include a single '*' wildcard character.
ext : string, optional, default = 'tif'
Extension required on data files to be loaded.
start, stop : nonnegative int, optional, default = None
Indices of the first and last-plus-one file to load, relative to the sorted
filenames matching `path` and `ext`. Interpreted using python slice indexing conventions.
recursive : boolean, optional, default = False
If true, will recursively descend directories from path, loading all files
with an extension matching 'ext'.
npartitions : int, optional, default = None
Number of partitions for computational engine,
if None will use default for engine.
labels : array, optional, default = None
Labels for records. If provided, should be one-dimensional.
"""
from scipy.misc import imread
def getarray(idx_buffer_filename):
idx, buf, _ = idx_buffer_filename
fbuf = BytesIO(buf)
yield (idx,), imread(fbuf)
return frompath(path, accessor=getarray, ext=ext, start=start,
stop=stop, recursive=recursive, npartitions=npartitions,
labels=labels, engine=engine, credentials=credentials) | [
"def",
"frompng",
"(",
"path",
",",
"ext",
"=",
"'png'",
",",
"start",
"=",
"None",
",",
"stop",
"=",
"None",
",",
"recursive",
"=",
"False",
",",
"npartitions",
"=",
"None",
",",
"labels",
"=",
"None",
",",
"engine",
"=",
"None",
",",
"credentials",
"=",
"None",
")",
":",
"from",
"scipy",
".",
"misc",
"import",
"imread",
"def",
"getarray",
"(",
"idx_buffer_filename",
")",
":",
"idx",
",",
"buf",
",",
"_",
"=",
"idx_buffer_filename",
"fbuf",
"=",
"BytesIO",
"(",
"buf",
")",
"yield",
"(",
"idx",
",",
")",
",",
"imread",
"(",
"fbuf",
")",
"return",
"frompath",
"(",
"path",
",",
"accessor",
"=",
"getarray",
",",
"ext",
"=",
"ext",
",",
"start",
"=",
"start",
",",
"stop",
"=",
"stop",
",",
"recursive",
"=",
"recursive",
",",
"npartitions",
"=",
"npartitions",
",",
"labels",
"=",
"labels",
",",
"engine",
"=",
"engine",
",",
"credentials",
"=",
"credentials",
")"
] | Load images from PNG files.
Parameters
----------
path : str
Path to data files or directory, specified as either a local filesystem path
or in a URI-like format, including scheme. May include a single '*' wildcard character.
ext : string, optional, default = 'tif'
Extension required on data files to be loaded.
start, stop : nonnegative int, optional, default = None
Indices of the first and last-plus-one file to load, relative to the sorted
filenames matching `path` and `ext`. Interpreted using python slice indexing conventions.
recursive : boolean, optional, default = False
If true, will recursively descend directories from path, loading all files
with an extension matching 'ext'.
npartitions : int, optional, default = None
Number of partitions for computational engine,
if None will use default for engine.
labels : array, optional, default = None
Labels for records. If provided, should be one-dimensional. | [
"Load",
"images",
"from",
"PNG",
"files",
"."
] | 967ff8f3e7c2fabe1705743d95eb2746d4329786 | https://github.com/thunder-project/thunder/blob/967ff8f3e7c2fabe1705743d95eb2746d4329786/thunder/images/readers.py#L399-L436 | train |
thunder-project/thunder | thunder/images/readers.py | fromrandom | def fromrandom(shape=(10, 50, 50), npartitions=1, seed=42, engine=None):
"""
Generate random image data.
Parameters
----------
shape : tuple, optional, default=(10, 50, 50)
Dimensions of images.
npartitions : int, optional, default=1
Number of partitions.
seed : int, optional, default=42
Random seed.
"""
seed = hash(seed)
def generate(v):
random.seed(seed + v)
return random.randn(*shape[1:])
return fromlist(range(shape[0]), accessor=generate, npartitions=npartitions, engine=engine) | python | def fromrandom(shape=(10, 50, 50), npartitions=1, seed=42, engine=None):
"""
Generate random image data.
Parameters
----------
shape : tuple, optional, default=(10, 50, 50)
Dimensions of images.
npartitions : int, optional, default=1
Number of partitions.
seed : int, optional, default=42
Random seed.
"""
seed = hash(seed)
def generate(v):
random.seed(seed + v)
return random.randn(*shape[1:])
return fromlist(range(shape[0]), accessor=generate, npartitions=npartitions, engine=engine) | [
"def",
"fromrandom",
"(",
"shape",
"=",
"(",
"10",
",",
"50",
",",
"50",
")",
",",
"npartitions",
"=",
"1",
",",
"seed",
"=",
"42",
",",
"engine",
"=",
"None",
")",
":",
"seed",
"=",
"hash",
"(",
"seed",
")",
"def",
"generate",
"(",
"v",
")",
":",
"random",
".",
"seed",
"(",
"seed",
"+",
"v",
")",
"return",
"random",
".",
"randn",
"(",
"*",
"shape",
"[",
"1",
":",
"]",
")",
"return",
"fromlist",
"(",
"range",
"(",
"shape",
"[",
"0",
"]",
")",
",",
"accessor",
"=",
"generate",
",",
"npartitions",
"=",
"npartitions",
",",
"engine",
"=",
"engine",
")"
] | Generate random image data.
Parameters
----------
shape : tuple, optional, default=(10, 50, 50)
Dimensions of images.
npartitions : int, optional, default=1
Number of partitions.
seed : int, optional, default=42
Random seed. | [
"Generate",
"random",
"image",
"data",
"."
] | 967ff8f3e7c2fabe1705743d95eb2746d4329786 | https://github.com/thunder-project/thunder/blob/967ff8f3e7c2fabe1705743d95eb2746d4329786/thunder/images/readers.py#L438-L459 | train |
thunder-project/thunder | thunder/images/readers.py | fromexample | def fromexample(name=None, engine=None):
"""
Load example image data.
Data are downloaded from S3, so this method requires an internet connection.
Parameters
----------
name : str
Name of dataset, if not specified will print options.
engine : object, default = None
Computational engine (e.g. a SparkContext for Spark)
"""
datasets = ['mouse', 'fish']
if name is None:
print('Availiable example image datasets')
for d in datasets:
print('- ' + d)
return
check_options(name, datasets)
path = 's3n://thunder-sample-data/images/' + name
if name == 'mouse':
data = frombinary(path=path, npartitions=1, order='F', engine=engine)
if name == 'fish':
data = fromtif(path=path, npartitions=1, engine=engine)
if spark and isinstance(engine, spark):
data.cache()
data.compute()
return data | python | def fromexample(name=None, engine=None):
"""
Load example image data.
Data are downloaded from S3, so this method requires an internet connection.
Parameters
----------
name : str
Name of dataset, if not specified will print options.
engine : object, default = None
Computational engine (e.g. a SparkContext for Spark)
"""
datasets = ['mouse', 'fish']
if name is None:
print('Availiable example image datasets')
for d in datasets:
print('- ' + d)
return
check_options(name, datasets)
path = 's3n://thunder-sample-data/images/' + name
if name == 'mouse':
data = frombinary(path=path, npartitions=1, order='F', engine=engine)
if name == 'fish':
data = fromtif(path=path, npartitions=1, engine=engine)
if spark and isinstance(engine, spark):
data.cache()
data.compute()
return data | [
"def",
"fromexample",
"(",
"name",
"=",
"None",
",",
"engine",
"=",
"None",
")",
":",
"datasets",
"=",
"[",
"'mouse'",
",",
"'fish'",
"]",
"if",
"name",
"is",
"None",
":",
"print",
"(",
"'Availiable example image datasets'",
")",
"for",
"d",
"in",
"datasets",
":",
"print",
"(",
"'- '",
"+",
"d",
")",
"return",
"check_options",
"(",
"name",
",",
"datasets",
")",
"path",
"=",
"'s3n://thunder-sample-data/images/'",
"+",
"name",
"if",
"name",
"==",
"'mouse'",
":",
"data",
"=",
"frombinary",
"(",
"path",
"=",
"path",
",",
"npartitions",
"=",
"1",
",",
"order",
"=",
"'F'",
",",
"engine",
"=",
"engine",
")",
"if",
"name",
"==",
"'fish'",
":",
"data",
"=",
"fromtif",
"(",
"path",
"=",
"path",
",",
"npartitions",
"=",
"1",
",",
"engine",
"=",
"engine",
")",
"if",
"spark",
"and",
"isinstance",
"(",
"engine",
",",
"spark",
")",
":",
"data",
".",
"cache",
"(",
")",
"data",
".",
"compute",
"(",
")",
"return",
"data"
] | Load example image data.
Data are downloaded from S3, so this method requires an internet connection.
Parameters
----------
name : str
Name of dataset, if not specified will print options.
engine : object, default = None
Computational engine (e.g. a SparkContext for Spark) | [
"Load",
"example",
"image",
"data",
"."
] | 967ff8f3e7c2fabe1705743d95eb2746d4329786 | https://github.com/thunder-project/thunder/blob/967ff8f3e7c2fabe1705743d95eb2746d4329786/thunder/images/readers.py#L461-L497 | train |
thunder-project/thunder | thunder/blocks/local.py | LocalChunks.unchunk | def unchunk(self):
"""
Reconstitute the chunked array back into a full ndarray.
Returns
-------
ndarray
"""
if self.padding != len(self.shape)*(0,):
shape = self.values.shape
arr = empty(shape, dtype=object)
for inds in product(*[arange(s) for s in shape]):
slices = []
for i, p, n in zip(inds, self.padding, shape):
start = None if (i == 0 or p == 0) else p
stop = None if (i == n-1 or p == 0) else -p
slices.append(slice(start, stop, None))
arr[inds] = self.values[inds][tuple(slices)]
else:
arr = self.values
return allstack(arr.tolist()) | python | def unchunk(self):
"""
Reconstitute the chunked array back into a full ndarray.
Returns
-------
ndarray
"""
if self.padding != len(self.shape)*(0,):
shape = self.values.shape
arr = empty(shape, dtype=object)
for inds in product(*[arange(s) for s in shape]):
slices = []
for i, p, n in zip(inds, self.padding, shape):
start = None if (i == 0 or p == 0) else p
stop = None if (i == n-1 or p == 0) else -p
slices.append(slice(start, stop, None))
arr[inds] = self.values[inds][tuple(slices)]
else:
arr = self.values
return allstack(arr.tolist()) | [
"def",
"unchunk",
"(",
"self",
")",
":",
"if",
"self",
".",
"padding",
"!=",
"len",
"(",
"self",
".",
"shape",
")",
"*",
"(",
"0",
",",
")",
":",
"shape",
"=",
"self",
".",
"values",
".",
"shape",
"arr",
"=",
"empty",
"(",
"shape",
",",
"dtype",
"=",
"object",
")",
"for",
"inds",
"in",
"product",
"(",
"*",
"[",
"arange",
"(",
"s",
")",
"for",
"s",
"in",
"shape",
"]",
")",
":",
"slices",
"=",
"[",
"]",
"for",
"i",
",",
"p",
",",
"n",
"in",
"zip",
"(",
"inds",
",",
"self",
".",
"padding",
",",
"shape",
")",
":",
"start",
"=",
"None",
"if",
"(",
"i",
"==",
"0",
"or",
"p",
"==",
"0",
")",
"else",
"p",
"stop",
"=",
"None",
"if",
"(",
"i",
"==",
"n",
"-",
"1",
"or",
"p",
"==",
"0",
")",
"else",
"-",
"p",
"slices",
".",
"append",
"(",
"slice",
"(",
"start",
",",
"stop",
",",
"None",
")",
")",
"arr",
"[",
"inds",
"]",
"=",
"self",
".",
"values",
"[",
"inds",
"]",
"[",
"tuple",
"(",
"slices",
")",
"]",
"else",
":",
"arr",
"=",
"self",
".",
"values",
"return",
"allstack",
"(",
"arr",
".",
"tolist",
"(",
")",
")"
] | Reconstitute the chunked array back into a full ndarray.
Returns
-------
ndarray | [
"Reconstitute",
"the",
"chunked",
"array",
"back",
"into",
"a",
"full",
"ndarray",
"."
] | 967ff8f3e7c2fabe1705743d95eb2746d4329786 | https://github.com/thunder-project/thunder/blob/967ff8f3e7c2fabe1705743d95eb2746d4329786/thunder/blocks/local.py#L54-L75 | train |
thunder-project/thunder | thunder/blocks/local.py | LocalChunks.chunk | def chunk(arr, chunk_size="150", padding=None):
"""
Created a chunked array from a full array and a chunk size.
Parameters
----------
array : ndarray
Array that will be broken into chunks
chunk_size : string or tuple, default = '150'
Size of each image chunk.
If a str, size of memory footprint in KB.
If a tuple, then the dimensions of each chunk.
If an int, then all dimensions will use this number
padding : tuple or int
Amount of padding along each dimensions for chunks. If an int, then
the same amount of padding is used for all dimensions
Returns
-------
LocalChunks
"""
plan, _ = LocalChunks.getplan(chunk_size, arr.shape[1:], arr.dtype)
plan = r_[arr.shape[0], plan]
if padding is None:
pad = arr.ndim*(0,)
elif isinstance(padding, int):
pad = (0,) + (arr.ndim-1)*(padding,)
else:
pad = (0,) + padding
shape = arr.shape
if any([x + y > z for x, y, z in zip(plan, pad, shape)]):
raise ValueError("Chunk sizes %s plus padding sizes %s cannot exceed value dimensions %s along any axis"
% (tuple(plan), tuple(pad), tuple(shape)))
if any([x > y for x, y in zip(pad, plan)]):
raise ValueError("Padding sizes %s cannot exceed chunk sizes %s along any axis"
% (tuple(pad), tuple(plan)))
def rectify(x):
x[x<0] = 0
return x
breaks = [r_[arange(0, n, s), n] for n, s in zip(shape, plan)]
limits = [zip(rectify(b[:-1]-p), b[1:]+p) for b, p in zip(breaks, pad)]
slices = product(*[[slice(x[0], x[1]) for x in l] for l in limits])
vals = [arr[s] for s in slices]
newarr = empty(len(vals), dtype=object)
for i in range(len(vals)):
newarr[i] = vals[i]
newsize = [b.shape[0]-1 for b in breaks]
newarr = newarr.reshape(*newsize)
return LocalChunks(newarr, shape, plan, dtype=arr.dtype, padding=pad) | python | def chunk(arr, chunk_size="150", padding=None):
"""
Created a chunked array from a full array and a chunk size.
Parameters
----------
array : ndarray
Array that will be broken into chunks
chunk_size : string or tuple, default = '150'
Size of each image chunk.
If a str, size of memory footprint in KB.
If a tuple, then the dimensions of each chunk.
If an int, then all dimensions will use this number
padding : tuple or int
Amount of padding along each dimensions for chunks. If an int, then
the same amount of padding is used for all dimensions
Returns
-------
LocalChunks
"""
plan, _ = LocalChunks.getplan(chunk_size, arr.shape[1:], arr.dtype)
plan = r_[arr.shape[0], plan]
if padding is None:
pad = arr.ndim*(0,)
elif isinstance(padding, int):
pad = (0,) + (arr.ndim-1)*(padding,)
else:
pad = (0,) + padding
shape = arr.shape
if any([x + y > z for x, y, z in zip(plan, pad, shape)]):
raise ValueError("Chunk sizes %s plus padding sizes %s cannot exceed value dimensions %s along any axis"
% (tuple(plan), tuple(pad), tuple(shape)))
if any([x > y for x, y in zip(pad, plan)]):
raise ValueError("Padding sizes %s cannot exceed chunk sizes %s along any axis"
% (tuple(pad), tuple(plan)))
def rectify(x):
x[x<0] = 0
return x
breaks = [r_[arange(0, n, s), n] for n, s in zip(shape, plan)]
limits = [zip(rectify(b[:-1]-p), b[1:]+p) for b, p in zip(breaks, pad)]
slices = product(*[[slice(x[0], x[1]) for x in l] for l in limits])
vals = [arr[s] for s in slices]
newarr = empty(len(vals), dtype=object)
for i in range(len(vals)):
newarr[i] = vals[i]
newsize = [b.shape[0]-1 for b in breaks]
newarr = newarr.reshape(*newsize)
return LocalChunks(newarr, shape, plan, dtype=arr.dtype, padding=pad) | [
"def",
"chunk",
"(",
"arr",
",",
"chunk_size",
"=",
"\"150\"",
",",
"padding",
"=",
"None",
")",
":",
"plan",
",",
"_",
"=",
"LocalChunks",
".",
"getplan",
"(",
"chunk_size",
",",
"arr",
".",
"shape",
"[",
"1",
":",
"]",
",",
"arr",
".",
"dtype",
")",
"plan",
"=",
"r_",
"[",
"arr",
".",
"shape",
"[",
"0",
"]",
",",
"plan",
"]",
"if",
"padding",
"is",
"None",
":",
"pad",
"=",
"arr",
".",
"ndim",
"*",
"(",
"0",
",",
")",
"elif",
"isinstance",
"(",
"padding",
",",
"int",
")",
":",
"pad",
"=",
"(",
"0",
",",
")",
"+",
"(",
"arr",
".",
"ndim",
"-",
"1",
")",
"*",
"(",
"padding",
",",
")",
"else",
":",
"pad",
"=",
"(",
"0",
",",
")",
"+",
"padding",
"shape",
"=",
"arr",
".",
"shape",
"if",
"any",
"(",
"[",
"x",
"+",
"y",
">",
"z",
"for",
"x",
",",
"y",
",",
"z",
"in",
"zip",
"(",
"plan",
",",
"pad",
",",
"shape",
")",
"]",
")",
":",
"raise",
"ValueError",
"(",
"\"Chunk sizes %s plus padding sizes %s cannot exceed value dimensions %s along any axis\"",
"%",
"(",
"tuple",
"(",
"plan",
")",
",",
"tuple",
"(",
"pad",
")",
",",
"tuple",
"(",
"shape",
")",
")",
")",
"if",
"any",
"(",
"[",
"x",
">",
"y",
"for",
"x",
",",
"y",
"in",
"zip",
"(",
"pad",
",",
"plan",
")",
"]",
")",
":",
"raise",
"ValueError",
"(",
"\"Padding sizes %s cannot exceed chunk sizes %s along any axis\"",
"%",
"(",
"tuple",
"(",
"pad",
")",
",",
"tuple",
"(",
"plan",
")",
")",
")",
"def",
"rectify",
"(",
"x",
")",
":",
"x",
"[",
"x",
"<",
"0",
"]",
"=",
"0",
"return",
"x",
"breaks",
"=",
"[",
"r_",
"[",
"arange",
"(",
"0",
",",
"n",
",",
"s",
")",
",",
"n",
"]",
"for",
"n",
",",
"s",
"in",
"zip",
"(",
"shape",
",",
"plan",
")",
"]",
"limits",
"=",
"[",
"zip",
"(",
"rectify",
"(",
"b",
"[",
":",
"-",
"1",
"]",
"-",
"p",
")",
",",
"b",
"[",
"1",
":",
"]",
"+",
"p",
")",
"for",
"b",
",",
"p",
"in",
"zip",
"(",
"breaks",
",",
"pad",
")",
"]",
"slices",
"=",
"product",
"(",
"*",
"[",
"[",
"slice",
"(",
"x",
"[",
"0",
"]",
",",
"x",
"[",
"1",
"]",
")",
"for",
"x",
"in",
"l",
"]",
"for",
"l",
"in",
"limits",
"]",
")",
"vals",
"=",
"[",
"arr",
"[",
"s",
"]",
"for",
"s",
"in",
"slices",
"]",
"newarr",
"=",
"empty",
"(",
"len",
"(",
"vals",
")",
",",
"dtype",
"=",
"object",
")",
"for",
"i",
"in",
"range",
"(",
"len",
"(",
"vals",
")",
")",
":",
"newarr",
"[",
"i",
"]",
"=",
"vals",
"[",
"i",
"]",
"newsize",
"=",
"[",
"b",
".",
"shape",
"[",
"0",
"]",
"-",
"1",
"for",
"b",
"in",
"breaks",
"]",
"newarr",
"=",
"newarr",
".",
"reshape",
"(",
"*",
"newsize",
")",
"return",
"LocalChunks",
"(",
"newarr",
",",
"shape",
",",
"plan",
",",
"dtype",
"=",
"arr",
".",
"dtype",
",",
"padding",
"=",
"pad",
")"
] | Created a chunked array from a full array and a chunk size.
Parameters
----------
array : ndarray
Array that will be broken into chunks
chunk_size : string or tuple, default = '150'
Size of each image chunk.
If a str, size of memory footprint in KB.
If a tuple, then the dimensions of each chunk.
If an int, then all dimensions will use this number
padding : tuple or int
Amount of padding along each dimensions for chunks. If an int, then
the same amount of padding is used for all dimensions
Returns
-------
LocalChunks | [
"Created",
"a",
"chunked",
"array",
"from",
"a",
"full",
"array",
"and",
"a",
"chunk",
"size",
"."
] | 967ff8f3e7c2fabe1705743d95eb2746d4329786 | https://github.com/thunder-project/thunder/blob/967ff8f3e7c2fabe1705743d95eb2746d4329786/thunder/blocks/local.py#L121-L178 | train |
thunder-project/thunder | thunder/base.py | Data.filter | def filter(self, func):
"""
Filter array along an axis.
Applies a function which should evaluate to boolean,
along a single axis or multiple axes. Array will be
aligned so that the desired set of axes are in the
keys, which may require a transpose/reshape.
Parameters
----------
func : function
Function to apply, should return boolean
"""
if self.mode == 'local':
reshaped = self._align(self.baseaxes)
filtered = asarray(list(filter(func, reshaped)))
if self.labels is not None:
mask = asarray(list(map(func, reshaped)))
if self.mode == 'spark':
sort = False if self.labels is None else True
filtered = self.values.filter(func, axis=self.baseaxes, sort=sort)
if self.labels is not None:
keys, vals = zip(*self.values.map(func, axis=self.baseaxes, value_shape=(1,)).tordd().collect())
perm = sorted(range(len(keys)), key=keys.__getitem__)
mask = asarray(vals)[perm]
if self.labels is not None:
s1 = prod(self.baseshape)
newlabels = self.labels.reshape(s1, 1)[mask].squeeze()
else:
newlabels = None
return self._constructor(filtered, labels=newlabels).__finalize__(self, noprop=('labels',)) | python | def filter(self, func):
"""
Filter array along an axis.
Applies a function which should evaluate to boolean,
along a single axis or multiple axes. Array will be
aligned so that the desired set of axes are in the
keys, which may require a transpose/reshape.
Parameters
----------
func : function
Function to apply, should return boolean
"""
if self.mode == 'local':
reshaped = self._align(self.baseaxes)
filtered = asarray(list(filter(func, reshaped)))
if self.labels is not None:
mask = asarray(list(map(func, reshaped)))
if self.mode == 'spark':
sort = False if self.labels is None else True
filtered = self.values.filter(func, axis=self.baseaxes, sort=sort)
if self.labels is not None:
keys, vals = zip(*self.values.map(func, axis=self.baseaxes, value_shape=(1,)).tordd().collect())
perm = sorted(range(len(keys)), key=keys.__getitem__)
mask = asarray(vals)[perm]
if self.labels is not None:
s1 = prod(self.baseshape)
newlabels = self.labels.reshape(s1, 1)[mask].squeeze()
else:
newlabels = None
return self._constructor(filtered, labels=newlabels).__finalize__(self, noprop=('labels',)) | [
"def",
"filter",
"(",
"self",
",",
"func",
")",
":",
"if",
"self",
".",
"mode",
"==",
"'local'",
":",
"reshaped",
"=",
"self",
".",
"_align",
"(",
"self",
".",
"baseaxes",
")",
"filtered",
"=",
"asarray",
"(",
"list",
"(",
"filter",
"(",
"func",
",",
"reshaped",
")",
")",
")",
"if",
"self",
".",
"labels",
"is",
"not",
"None",
":",
"mask",
"=",
"asarray",
"(",
"list",
"(",
"map",
"(",
"func",
",",
"reshaped",
")",
")",
")",
"if",
"self",
".",
"mode",
"==",
"'spark'",
":",
"sort",
"=",
"False",
"if",
"self",
".",
"labels",
"is",
"None",
"else",
"True",
"filtered",
"=",
"self",
".",
"values",
".",
"filter",
"(",
"func",
",",
"axis",
"=",
"self",
".",
"baseaxes",
",",
"sort",
"=",
"sort",
")",
"if",
"self",
".",
"labels",
"is",
"not",
"None",
":",
"keys",
",",
"vals",
"=",
"zip",
"(",
"*",
"self",
".",
"values",
".",
"map",
"(",
"func",
",",
"axis",
"=",
"self",
".",
"baseaxes",
",",
"value_shape",
"=",
"(",
"1",
",",
")",
")",
".",
"tordd",
"(",
")",
".",
"collect",
"(",
")",
")",
"perm",
"=",
"sorted",
"(",
"range",
"(",
"len",
"(",
"keys",
")",
")",
",",
"key",
"=",
"keys",
".",
"__getitem__",
")",
"mask",
"=",
"asarray",
"(",
"vals",
")",
"[",
"perm",
"]",
"if",
"self",
".",
"labels",
"is",
"not",
"None",
":",
"s1",
"=",
"prod",
"(",
"self",
".",
"baseshape",
")",
"newlabels",
"=",
"self",
".",
"labels",
".",
"reshape",
"(",
"s1",
",",
"1",
")",
"[",
"mask",
"]",
".",
"squeeze",
"(",
")",
"else",
":",
"newlabels",
"=",
"None",
"return",
"self",
".",
"_constructor",
"(",
"filtered",
",",
"labels",
"=",
"newlabels",
")",
".",
"__finalize__",
"(",
"self",
",",
"noprop",
"=",
"(",
"'labels'",
",",
")",
")"
] | Filter array along an axis.
Applies a function which should evaluate to boolean,
along a single axis or multiple axes. Array will be
aligned so that the desired set of axes are in the
keys, which may require a transpose/reshape.
Parameters
----------
func : function
Function to apply, should return boolean | [
"Filter",
"array",
"along",
"an",
"axis",
"."
] | 967ff8f3e7c2fabe1705743d95eb2746d4329786 | https://github.com/thunder-project/thunder/blob/967ff8f3e7c2fabe1705743d95eb2746d4329786/thunder/base.py#L372-L410 | train |
thunder-project/thunder | thunder/base.py | Data.map | def map(self, func, value_shape=None, dtype=None, with_keys=False):
"""
Apply an array -> array function across an axis.
Array will be aligned so that the desired set of axes
are in the keys, which may require a transpose/reshape.
Parameters
----------
func : function
Function of a single array to apply. If with_keys=True,
function should be of a (tuple, array) pair.
axis : tuple or int, optional, default=(0,)
Axis or multiple axes to apply function along.
value_shape : tuple, optional, default=None
Known shape of values resulting from operation. Only
valid in spark mode.
dtype : numpy dtype, optional, default=None
Known shape of dtype resulting from operation. Only
valid in spark mode.
with_keys : bool, optional, default=False
Include keys as an argument to the function
"""
axis = self.baseaxes
if self.mode == 'local':
axes = sorted(tupleize(axis))
key_shape = [self.shape[axis] for axis in axes]
reshaped = self._align(axes, key_shape=key_shape)
if with_keys:
keys = zip(*unravel_index(range(prod(key_shape)), key_shape))
mapped = asarray(list(map(func, zip(keys, reshaped))))
else:
mapped = asarray(list(map(func, reshaped)))
try:
elem_shape = mapped[0].shape
except:
elem_shape = (1,)
expand = list(elem_shape)
expand = [1] if len(expand) == 0 else expand
# invert the previous reshape operation, using the shape of the map result
linearized_shape_inv = key_shape + expand
reordered = mapped.reshape(*linearized_shape_inv)
return self._constructor(reordered, mode=self.mode).__finalize__(self, noprop=('index'))
if self.mode == 'spark':
expand = lambda x: array(func(x), ndmin=1)
mapped = self.values.map(expand, axis, value_shape, dtype, with_keys)
return self._constructor(mapped, mode=self.mode).__finalize__(self, noprop=('index',)) | python | def map(self, func, value_shape=None, dtype=None, with_keys=False):
"""
Apply an array -> array function across an axis.
Array will be aligned so that the desired set of axes
are in the keys, which may require a transpose/reshape.
Parameters
----------
func : function
Function of a single array to apply. If with_keys=True,
function should be of a (tuple, array) pair.
axis : tuple or int, optional, default=(0,)
Axis or multiple axes to apply function along.
value_shape : tuple, optional, default=None
Known shape of values resulting from operation. Only
valid in spark mode.
dtype : numpy dtype, optional, default=None
Known shape of dtype resulting from operation. Only
valid in spark mode.
with_keys : bool, optional, default=False
Include keys as an argument to the function
"""
axis = self.baseaxes
if self.mode == 'local':
axes = sorted(tupleize(axis))
key_shape = [self.shape[axis] for axis in axes]
reshaped = self._align(axes, key_shape=key_shape)
if with_keys:
keys = zip(*unravel_index(range(prod(key_shape)), key_shape))
mapped = asarray(list(map(func, zip(keys, reshaped))))
else:
mapped = asarray(list(map(func, reshaped)))
try:
elem_shape = mapped[0].shape
except:
elem_shape = (1,)
expand = list(elem_shape)
expand = [1] if len(expand) == 0 else expand
# invert the previous reshape operation, using the shape of the map result
linearized_shape_inv = key_shape + expand
reordered = mapped.reshape(*linearized_shape_inv)
return self._constructor(reordered, mode=self.mode).__finalize__(self, noprop=('index'))
if self.mode == 'spark':
expand = lambda x: array(func(x), ndmin=1)
mapped = self.values.map(expand, axis, value_shape, dtype, with_keys)
return self._constructor(mapped, mode=self.mode).__finalize__(self, noprop=('index',)) | [
"def",
"map",
"(",
"self",
",",
"func",
",",
"value_shape",
"=",
"None",
",",
"dtype",
"=",
"None",
",",
"with_keys",
"=",
"False",
")",
":",
"axis",
"=",
"self",
".",
"baseaxes",
"if",
"self",
".",
"mode",
"==",
"'local'",
":",
"axes",
"=",
"sorted",
"(",
"tupleize",
"(",
"axis",
")",
")",
"key_shape",
"=",
"[",
"self",
".",
"shape",
"[",
"axis",
"]",
"for",
"axis",
"in",
"axes",
"]",
"reshaped",
"=",
"self",
".",
"_align",
"(",
"axes",
",",
"key_shape",
"=",
"key_shape",
")",
"if",
"with_keys",
":",
"keys",
"=",
"zip",
"(",
"*",
"unravel_index",
"(",
"range",
"(",
"prod",
"(",
"key_shape",
")",
")",
",",
"key_shape",
")",
")",
"mapped",
"=",
"asarray",
"(",
"list",
"(",
"map",
"(",
"func",
",",
"zip",
"(",
"keys",
",",
"reshaped",
")",
")",
")",
")",
"else",
":",
"mapped",
"=",
"asarray",
"(",
"list",
"(",
"map",
"(",
"func",
",",
"reshaped",
")",
")",
")",
"try",
":",
"elem_shape",
"=",
"mapped",
"[",
"0",
"]",
".",
"shape",
"except",
":",
"elem_shape",
"=",
"(",
"1",
",",
")",
"expand",
"=",
"list",
"(",
"elem_shape",
")",
"expand",
"=",
"[",
"1",
"]",
"if",
"len",
"(",
"expand",
")",
"==",
"0",
"else",
"expand",
"# invert the previous reshape operation, using the shape of the map result",
"linearized_shape_inv",
"=",
"key_shape",
"+",
"expand",
"reordered",
"=",
"mapped",
".",
"reshape",
"(",
"*",
"linearized_shape_inv",
")",
"return",
"self",
".",
"_constructor",
"(",
"reordered",
",",
"mode",
"=",
"self",
".",
"mode",
")",
".",
"__finalize__",
"(",
"self",
",",
"noprop",
"=",
"(",
"'index'",
")",
")",
"if",
"self",
".",
"mode",
"==",
"'spark'",
":",
"expand",
"=",
"lambda",
"x",
":",
"array",
"(",
"func",
"(",
"x",
")",
",",
"ndmin",
"=",
"1",
")",
"mapped",
"=",
"self",
".",
"values",
".",
"map",
"(",
"expand",
",",
"axis",
",",
"value_shape",
",",
"dtype",
",",
"with_keys",
")",
"return",
"self",
".",
"_constructor",
"(",
"mapped",
",",
"mode",
"=",
"self",
".",
"mode",
")",
".",
"__finalize__",
"(",
"self",
",",
"noprop",
"=",
"(",
"'index'",
",",
")",
")"
] | Apply an array -> array function across an axis.
Array will be aligned so that the desired set of axes
are in the keys, which may require a transpose/reshape.
Parameters
----------
func : function
Function of a single array to apply. If with_keys=True,
function should be of a (tuple, array) pair.
axis : tuple or int, optional, default=(0,)
Axis or multiple axes to apply function along.
value_shape : tuple, optional, default=None
Known shape of values resulting from operation. Only
valid in spark mode.
dtype : numpy dtype, optional, default=None
Known shape of dtype resulting from operation. Only
valid in spark mode.
with_keys : bool, optional, default=False
Include keys as an argument to the function | [
"Apply",
"an",
"array",
"-",
">",
"array",
"function",
"across",
"an",
"axis",
"."
] | 967ff8f3e7c2fabe1705743d95eb2746d4329786 | https://github.com/thunder-project/thunder/blob/967ff8f3e7c2fabe1705743d95eb2746d4329786/thunder/base.py#L412-L469 | train |
thunder-project/thunder | thunder/base.py | Data._reduce | def _reduce(self, func, axis=0):
"""
Reduce an array along an axis.
Applies an associative/commutative function of two arguments
cumulatively to all arrays along an axis. Array will be aligned
so that the desired set of axes are in the keys, which may
require a transpose/reshape.
Parameters
----------
func : function
Function of two arrays that returns a single array
axis : tuple or int, optional, default=(0,)
Axis or multiple axes to reduce along.
"""
if self.mode == 'local':
axes = sorted(tupleize(axis))
# if the function is a ufunc, it can automatically handle reducing over multiple axes
if isinstance(func, ufunc):
inshape(self.shape, axes)
reduced = func.reduce(self, axis=tuple(axes))
else:
reshaped = self._align(axes)
reduced = reduce(func, reshaped)
# ensure that the shape of the reduced array is valid
expected_shape = [self.shape[i] for i in range(len(self.shape)) if i not in axes]
if reduced.shape != tuple(expected_shape):
raise ValueError("reduce did not yield an array with valid dimensions")
return self._constructor(reduced[newaxis, :]).__finalize__(self)
if self.mode == 'spark':
reduced = self.values.reduce(func, axis, keepdims=True)
return self._constructor(reduced).__finalize__(self) | python | def _reduce(self, func, axis=0):
"""
Reduce an array along an axis.
Applies an associative/commutative function of two arguments
cumulatively to all arrays along an axis. Array will be aligned
so that the desired set of axes are in the keys, which may
require a transpose/reshape.
Parameters
----------
func : function
Function of two arrays that returns a single array
axis : tuple or int, optional, default=(0,)
Axis or multiple axes to reduce along.
"""
if self.mode == 'local':
axes = sorted(tupleize(axis))
# if the function is a ufunc, it can automatically handle reducing over multiple axes
if isinstance(func, ufunc):
inshape(self.shape, axes)
reduced = func.reduce(self, axis=tuple(axes))
else:
reshaped = self._align(axes)
reduced = reduce(func, reshaped)
# ensure that the shape of the reduced array is valid
expected_shape = [self.shape[i] for i in range(len(self.shape)) if i not in axes]
if reduced.shape != tuple(expected_shape):
raise ValueError("reduce did not yield an array with valid dimensions")
return self._constructor(reduced[newaxis, :]).__finalize__(self)
if self.mode == 'spark':
reduced = self.values.reduce(func, axis, keepdims=True)
return self._constructor(reduced).__finalize__(self) | [
"def",
"_reduce",
"(",
"self",
",",
"func",
",",
"axis",
"=",
"0",
")",
":",
"if",
"self",
".",
"mode",
"==",
"'local'",
":",
"axes",
"=",
"sorted",
"(",
"tupleize",
"(",
"axis",
")",
")",
"# if the function is a ufunc, it can automatically handle reducing over multiple axes",
"if",
"isinstance",
"(",
"func",
",",
"ufunc",
")",
":",
"inshape",
"(",
"self",
".",
"shape",
",",
"axes",
")",
"reduced",
"=",
"func",
".",
"reduce",
"(",
"self",
",",
"axis",
"=",
"tuple",
"(",
"axes",
")",
")",
"else",
":",
"reshaped",
"=",
"self",
".",
"_align",
"(",
"axes",
")",
"reduced",
"=",
"reduce",
"(",
"func",
",",
"reshaped",
")",
"# ensure that the shape of the reduced array is valid",
"expected_shape",
"=",
"[",
"self",
".",
"shape",
"[",
"i",
"]",
"for",
"i",
"in",
"range",
"(",
"len",
"(",
"self",
".",
"shape",
")",
")",
"if",
"i",
"not",
"in",
"axes",
"]",
"if",
"reduced",
".",
"shape",
"!=",
"tuple",
"(",
"expected_shape",
")",
":",
"raise",
"ValueError",
"(",
"\"reduce did not yield an array with valid dimensions\"",
")",
"return",
"self",
".",
"_constructor",
"(",
"reduced",
"[",
"newaxis",
",",
":",
"]",
")",
".",
"__finalize__",
"(",
"self",
")",
"if",
"self",
".",
"mode",
"==",
"'spark'",
":",
"reduced",
"=",
"self",
".",
"values",
".",
"reduce",
"(",
"func",
",",
"axis",
",",
"keepdims",
"=",
"True",
")",
"return",
"self",
".",
"_constructor",
"(",
"reduced",
")",
".",
"__finalize__",
"(",
"self",
")"
] | Reduce an array along an axis.
Applies an associative/commutative function of two arguments
cumulatively to all arrays along an axis. Array will be aligned
so that the desired set of axes are in the keys, which may
require a transpose/reshape.
Parameters
----------
func : function
Function of two arrays that returns a single array
axis : tuple or int, optional, default=(0,)
Axis or multiple axes to reduce along. | [
"Reduce",
"an",
"array",
"along",
"an",
"axis",
"."
] | 967ff8f3e7c2fabe1705743d95eb2746d4329786 | https://github.com/thunder-project/thunder/blob/967ff8f3e7c2fabe1705743d95eb2746d4329786/thunder/base.py#L471-L508 | train |
thunder-project/thunder | thunder/base.py | Data.element_wise | def element_wise(self, other, op):
"""
Apply an elementwise operation to data.
Both self and other data must have the same mode.
If self is in local mode, other can also be a numpy array.
Self and other must have the same shape, or other must be a scalar.
Parameters
----------
other : Data or numpy array
Data to apply elementwise operation to
op : function
Binary operator to use for elementwise operations, e.g. add, subtract
"""
if not isscalar(other) and not self.shape == other.shape:
raise ValueError("shapes %s and %s must be equal" % (self.shape, other.shape))
if not isscalar(other) and isinstance(other, Data) and not self.mode == other.mode:
raise NotImplementedError
if isscalar(other):
return self.map(lambda x: op(x, other))
if self.mode == 'local' and isinstance(other, ndarray):
return self._constructor(op(self.values, other)).__finalize__(self)
if self.mode == 'local' and isinstance(other, Data):
return self._constructor(op(self.values, other.values)).__finalize__(self)
if self.mode == 'spark' and isinstance(other, Data):
def func(record):
(k1, x), (k2, y) = record
return k1, op(x, y)
rdd = self.tordd().zip(other.tordd()).map(func)
barray = BoltArraySpark(rdd, shape=self.shape, dtype=self.dtype, split=self.values.split)
return self._constructor(barray).__finalize__(self) | python | def element_wise(self, other, op):
"""
Apply an elementwise operation to data.
Both self and other data must have the same mode.
If self is in local mode, other can also be a numpy array.
Self and other must have the same shape, or other must be a scalar.
Parameters
----------
other : Data or numpy array
Data to apply elementwise operation to
op : function
Binary operator to use for elementwise operations, e.g. add, subtract
"""
if not isscalar(other) and not self.shape == other.shape:
raise ValueError("shapes %s and %s must be equal" % (self.shape, other.shape))
if not isscalar(other) and isinstance(other, Data) and not self.mode == other.mode:
raise NotImplementedError
if isscalar(other):
return self.map(lambda x: op(x, other))
if self.mode == 'local' and isinstance(other, ndarray):
return self._constructor(op(self.values, other)).__finalize__(self)
if self.mode == 'local' and isinstance(other, Data):
return self._constructor(op(self.values, other.values)).__finalize__(self)
if self.mode == 'spark' and isinstance(other, Data):
def func(record):
(k1, x), (k2, y) = record
return k1, op(x, y)
rdd = self.tordd().zip(other.tordd()).map(func)
barray = BoltArraySpark(rdd, shape=self.shape, dtype=self.dtype, split=self.values.split)
return self._constructor(barray).__finalize__(self) | [
"def",
"element_wise",
"(",
"self",
",",
"other",
",",
"op",
")",
":",
"if",
"not",
"isscalar",
"(",
"other",
")",
"and",
"not",
"self",
".",
"shape",
"==",
"other",
".",
"shape",
":",
"raise",
"ValueError",
"(",
"\"shapes %s and %s must be equal\"",
"%",
"(",
"self",
".",
"shape",
",",
"other",
".",
"shape",
")",
")",
"if",
"not",
"isscalar",
"(",
"other",
")",
"and",
"isinstance",
"(",
"other",
",",
"Data",
")",
"and",
"not",
"self",
".",
"mode",
"==",
"other",
".",
"mode",
":",
"raise",
"NotImplementedError",
"if",
"isscalar",
"(",
"other",
")",
":",
"return",
"self",
".",
"map",
"(",
"lambda",
"x",
":",
"op",
"(",
"x",
",",
"other",
")",
")",
"if",
"self",
".",
"mode",
"==",
"'local'",
"and",
"isinstance",
"(",
"other",
",",
"ndarray",
")",
":",
"return",
"self",
".",
"_constructor",
"(",
"op",
"(",
"self",
".",
"values",
",",
"other",
")",
")",
".",
"__finalize__",
"(",
"self",
")",
"if",
"self",
".",
"mode",
"==",
"'local'",
"and",
"isinstance",
"(",
"other",
",",
"Data",
")",
":",
"return",
"self",
".",
"_constructor",
"(",
"op",
"(",
"self",
".",
"values",
",",
"other",
".",
"values",
")",
")",
".",
"__finalize__",
"(",
"self",
")",
"if",
"self",
".",
"mode",
"==",
"'spark'",
"and",
"isinstance",
"(",
"other",
",",
"Data",
")",
":",
"def",
"func",
"(",
"record",
")",
":",
"(",
"k1",
",",
"x",
")",
",",
"(",
"k2",
",",
"y",
")",
"=",
"record",
"return",
"k1",
",",
"op",
"(",
"x",
",",
"y",
")",
"rdd",
"=",
"self",
".",
"tordd",
"(",
")",
".",
"zip",
"(",
"other",
".",
"tordd",
"(",
")",
")",
".",
"map",
"(",
"func",
")",
"barray",
"=",
"BoltArraySpark",
"(",
"rdd",
",",
"shape",
"=",
"self",
".",
"shape",
",",
"dtype",
"=",
"self",
".",
"dtype",
",",
"split",
"=",
"self",
".",
"values",
".",
"split",
")",
"return",
"self",
".",
"_constructor",
"(",
"barray",
")",
".",
"__finalize__",
"(",
"self",
")"
] | Apply an elementwise operation to data.
Both self and other data must have the same mode.
If self is in local mode, other can also be a numpy array.
Self and other must have the same shape, or other must be a scalar.
Parameters
----------
other : Data or numpy array
Data to apply elementwise operation to
op : function
Binary operator to use for elementwise operations, e.g. add, subtract | [
"Apply",
"an",
"elementwise",
"operation",
"to",
"data",
"."
] | 967ff8f3e7c2fabe1705743d95eb2746d4329786 | https://github.com/thunder-project/thunder/blob/967ff8f3e7c2fabe1705743d95eb2746d4329786/thunder/base.py#L510-L549 | train |
thunder-project/thunder | thunder/base.py | Data.clip | def clip(self, min=None, max=None):
"""
Clip values above and below.
Parameters
----------
min : scalar or array-like
Minimum value. If array, will be broadcasted
max : scalar or array-like
Maximum value. If array, will be broadcasted.
"""
return self._constructor(
self.values.clip(min=min, max=max)).__finalize__(self) | python | def clip(self, min=None, max=None):
"""
Clip values above and below.
Parameters
----------
min : scalar or array-like
Minimum value. If array, will be broadcasted
max : scalar or array-like
Maximum value. If array, will be broadcasted.
"""
return self._constructor(
self.values.clip(min=min, max=max)).__finalize__(self) | [
"def",
"clip",
"(",
"self",
",",
"min",
"=",
"None",
",",
"max",
"=",
"None",
")",
":",
"return",
"self",
".",
"_constructor",
"(",
"self",
".",
"values",
".",
"clip",
"(",
"min",
"=",
"min",
",",
"max",
"=",
"max",
")",
")",
".",
"__finalize__",
"(",
"self",
")"
] | Clip values above and below.
Parameters
----------
min : scalar or array-like
Minimum value. If array, will be broadcasted
max : scalar or array-like
Maximum value. If array, will be broadcasted. | [
"Clip",
"values",
"above",
"and",
"below",
"."
] | 967ff8f3e7c2fabe1705743d95eb2746d4329786 | https://github.com/thunder-project/thunder/blob/967ff8f3e7c2fabe1705743d95eb2746d4329786/thunder/base.py#L575-L588 | train |
thunder-project/thunder | thunder/series/readers.py | fromrdd | def fromrdd(rdd, nrecords=None, shape=None, index=None, labels=None, dtype=None, ordered=False):
"""
Load series data from a Spark RDD.
Assumes keys are tuples with increasing and unique indices,
and values are 1d ndarrays. Will try to infer properties
that are not explicitly provided.
Parameters
----------
rdd : SparkRDD
An RDD containing series data.
shape : tuple or array, optional, default = None
Total shape of data (if provided will avoid check).
nrecords : int, optional, default = None
Number of records (if provided will avoid check).
index : array, optional, default = None
Index for records, if not provided will use (0, 1, ...)
labels : array, optional, default = None
Labels for records. If provided, should have shape of shape[:-1].
dtype : string, default = None
Data numerical type (if provided will avoid check)
ordered : boolean, optional, default = False
Whether or not the rdd is ordered by key
"""
from .series import Series
from bolt.spark.array import BoltArraySpark
if index is None or dtype is None:
item = rdd.values().first()
if index is None:
index = range(len(item))
if dtype is None:
dtype = item.dtype
if nrecords is None and shape is not None:
nrecords = prod(shape[:-1])
if nrecords is None:
nrecords = rdd.count()
if shape is None:
shape = (nrecords, asarray(index).shape[0])
def process_keys(record):
k, v = record
if isinstance(k, int):
k = (k,)
return k, v
values = BoltArraySpark(rdd.map(process_keys), shape=shape, dtype=dtype, split=len(shape)-1, ordered=ordered)
return Series(values, index=index, labels=labels) | python | def fromrdd(rdd, nrecords=None, shape=None, index=None, labels=None, dtype=None, ordered=False):
"""
Load series data from a Spark RDD.
Assumes keys are tuples with increasing and unique indices,
and values are 1d ndarrays. Will try to infer properties
that are not explicitly provided.
Parameters
----------
rdd : SparkRDD
An RDD containing series data.
shape : tuple or array, optional, default = None
Total shape of data (if provided will avoid check).
nrecords : int, optional, default = None
Number of records (if provided will avoid check).
index : array, optional, default = None
Index for records, if not provided will use (0, 1, ...)
labels : array, optional, default = None
Labels for records. If provided, should have shape of shape[:-1].
dtype : string, default = None
Data numerical type (if provided will avoid check)
ordered : boolean, optional, default = False
Whether or not the rdd is ordered by key
"""
from .series import Series
from bolt.spark.array import BoltArraySpark
if index is None or dtype is None:
item = rdd.values().first()
if index is None:
index = range(len(item))
if dtype is None:
dtype = item.dtype
if nrecords is None and shape is not None:
nrecords = prod(shape[:-1])
if nrecords is None:
nrecords = rdd.count()
if shape is None:
shape = (nrecords, asarray(index).shape[0])
def process_keys(record):
k, v = record
if isinstance(k, int):
k = (k,)
return k, v
values = BoltArraySpark(rdd.map(process_keys), shape=shape, dtype=dtype, split=len(shape)-1, ordered=ordered)
return Series(values, index=index, labels=labels) | [
"def",
"fromrdd",
"(",
"rdd",
",",
"nrecords",
"=",
"None",
",",
"shape",
"=",
"None",
",",
"index",
"=",
"None",
",",
"labels",
"=",
"None",
",",
"dtype",
"=",
"None",
",",
"ordered",
"=",
"False",
")",
":",
"from",
".",
"series",
"import",
"Series",
"from",
"bolt",
".",
"spark",
".",
"array",
"import",
"BoltArraySpark",
"if",
"index",
"is",
"None",
"or",
"dtype",
"is",
"None",
":",
"item",
"=",
"rdd",
".",
"values",
"(",
")",
".",
"first",
"(",
")",
"if",
"index",
"is",
"None",
":",
"index",
"=",
"range",
"(",
"len",
"(",
"item",
")",
")",
"if",
"dtype",
"is",
"None",
":",
"dtype",
"=",
"item",
".",
"dtype",
"if",
"nrecords",
"is",
"None",
"and",
"shape",
"is",
"not",
"None",
":",
"nrecords",
"=",
"prod",
"(",
"shape",
"[",
":",
"-",
"1",
"]",
")",
"if",
"nrecords",
"is",
"None",
":",
"nrecords",
"=",
"rdd",
".",
"count",
"(",
")",
"if",
"shape",
"is",
"None",
":",
"shape",
"=",
"(",
"nrecords",
",",
"asarray",
"(",
"index",
")",
".",
"shape",
"[",
"0",
"]",
")",
"def",
"process_keys",
"(",
"record",
")",
":",
"k",
",",
"v",
"=",
"record",
"if",
"isinstance",
"(",
"k",
",",
"int",
")",
":",
"k",
"=",
"(",
"k",
",",
")",
"return",
"k",
",",
"v",
"values",
"=",
"BoltArraySpark",
"(",
"rdd",
".",
"map",
"(",
"process_keys",
")",
",",
"shape",
"=",
"shape",
",",
"dtype",
"=",
"dtype",
",",
"split",
"=",
"len",
"(",
"shape",
")",
"-",
"1",
",",
"ordered",
"=",
"ordered",
")",
"return",
"Series",
"(",
"values",
",",
"index",
"=",
"index",
",",
"labels",
"=",
"labels",
")"
] | Load series data from a Spark RDD.
Assumes keys are tuples with increasing and unique indices,
and values are 1d ndarrays. Will try to infer properties
that are not explicitly provided.
Parameters
----------
rdd : SparkRDD
An RDD containing series data.
shape : tuple or array, optional, default = None
Total shape of data (if provided will avoid check).
nrecords : int, optional, default = None
Number of records (if provided will avoid check).
index : array, optional, default = None
Index for records, if not provided will use (0, 1, ...)
labels : array, optional, default = None
Labels for records. If provided, should have shape of shape[:-1].
dtype : string, default = None
Data numerical type (if provided will avoid check)
ordered : boolean, optional, default = False
Whether or not the rdd is ordered by key | [
"Load",
"series",
"data",
"from",
"a",
"Spark",
"RDD",
"."
] | 967ff8f3e7c2fabe1705743d95eb2746d4329786 | https://github.com/thunder-project/thunder/blob/967ff8f3e7c2fabe1705743d95eb2746d4329786/thunder/series/readers.py#L13-L72 | train |
thunder-project/thunder | thunder/series/readers.py | fromarray | def fromarray(values, index=None, labels=None, npartitions=None, engine=None):
"""
Load series data from an array.
Assumes that all but final dimension index the records,
and the size of the final dimension is the length of each record,
e.g. a (2, 3, 4) array will be treated as 2 x 3 records of size (4,)
Parameters
----------
values : array-like
An array containing the data. Can be a numpy array,
a bolt array, or an array-like.
index : array, optional, default = None
Index for records, if not provided will use (0,1,...,N)
where N is the length of each record.
labels : array, optional, default = None
Labels for records. If provided, should have same shape as values.shape[:-1].
npartitions : int, default = None
Number of partitions for parallelization (Spark only)
engine : object, default = None
Computational engine (e.g. a SparkContext for Spark)
"""
from .series import Series
import bolt
if isinstance(values, bolt.spark.array.BoltArraySpark):
return Series(values)
values = asarray(values)
if values.ndim < 2:
values = expand_dims(values, 0)
if index is not None and not asarray(index).shape[0] == values.shape[-1]:
raise ValueError('Index length %s not equal to record length %s'
% (asarray(index).shape[0], values.shape[-1]))
if index is None:
index = arange(values.shape[-1])
if spark and isinstance(engine, spark):
axis = tuple(range(values.ndim - 1))
values = bolt.array(values, context=engine, npartitions=npartitions, axis=axis)
values._ordered = True
return Series(values, index=index)
return Series(values, index=index, labels=labels) | python | def fromarray(values, index=None, labels=None, npartitions=None, engine=None):
"""
Load series data from an array.
Assumes that all but final dimension index the records,
and the size of the final dimension is the length of each record,
e.g. a (2, 3, 4) array will be treated as 2 x 3 records of size (4,)
Parameters
----------
values : array-like
An array containing the data. Can be a numpy array,
a bolt array, or an array-like.
index : array, optional, default = None
Index for records, if not provided will use (0,1,...,N)
where N is the length of each record.
labels : array, optional, default = None
Labels for records. If provided, should have same shape as values.shape[:-1].
npartitions : int, default = None
Number of partitions for parallelization (Spark only)
engine : object, default = None
Computational engine (e.g. a SparkContext for Spark)
"""
from .series import Series
import bolt
if isinstance(values, bolt.spark.array.BoltArraySpark):
return Series(values)
values = asarray(values)
if values.ndim < 2:
values = expand_dims(values, 0)
if index is not None and not asarray(index).shape[0] == values.shape[-1]:
raise ValueError('Index length %s not equal to record length %s'
% (asarray(index).shape[0], values.shape[-1]))
if index is None:
index = arange(values.shape[-1])
if spark and isinstance(engine, spark):
axis = tuple(range(values.ndim - 1))
values = bolt.array(values, context=engine, npartitions=npartitions, axis=axis)
values._ordered = True
return Series(values, index=index)
return Series(values, index=index, labels=labels) | [
"def",
"fromarray",
"(",
"values",
",",
"index",
"=",
"None",
",",
"labels",
"=",
"None",
",",
"npartitions",
"=",
"None",
",",
"engine",
"=",
"None",
")",
":",
"from",
".",
"series",
"import",
"Series",
"import",
"bolt",
"if",
"isinstance",
"(",
"values",
",",
"bolt",
".",
"spark",
".",
"array",
".",
"BoltArraySpark",
")",
":",
"return",
"Series",
"(",
"values",
")",
"values",
"=",
"asarray",
"(",
"values",
")",
"if",
"values",
".",
"ndim",
"<",
"2",
":",
"values",
"=",
"expand_dims",
"(",
"values",
",",
"0",
")",
"if",
"index",
"is",
"not",
"None",
"and",
"not",
"asarray",
"(",
"index",
")",
".",
"shape",
"[",
"0",
"]",
"==",
"values",
".",
"shape",
"[",
"-",
"1",
"]",
":",
"raise",
"ValueError",
"(",
"'Index length %s not equal to record length %s'",
"%",
"(",
"asarray",
"(",
"index",
")",
".",
"shape",
"[",
"0",
"]",
",",
"values",
".",
"shape",
"[",
"-",
"1",
"]",
")",
")",
"if",
"index",
"is",
"None",
":",
"index",
"=",
"arange",
"(",
"values",
".",
"shape",
"[",
"-",
"1",
"]",
")",
"if",
"spark",
"and",
"isinstance",
"(",
"engine",
",",
"spark",
")",
":",
"axis",
"=",
"tuple",
"(",
"range",
"(",
"values",
".",
"ndim",
"-",
"1",
")",
")",
"values",
"=",
"bolt",
".",
"array",
"(",
"values",
",",
"context",
"=",
"engine",
",",
"npartitions",
"=",
"npartitions",
",",
"axis",
"=",
"axis",
")",
"values",
".",
"_ordered",
"=",
"True",
"return",
"Series",
"(",
"values",
",",
"index",
"=",
"index",
")",
"return",
"Series",
"(",
"values",
",",
"index",
"=",
"index",
",",
"labels",
"=",
"labels",
")"
] | Load series data from an array.
Assumes that all but final dimension index the records,
and the size of the final dimension is the length of each record,
e.g. a (2, 3, 4) array will be treated as 2 x 3 records of size (4,)
Parameters
----------
values : array-like
An array containing the data. Can be a numpy array,
a bolt array, or an array-like.
index : array, optional, default = None
Index for records, if not provided will use (0,1,...,N)
where N is the length of each record.
labels : array, optional, default = None
Labels for records. If provided, should have same shape as values.shape[:-1].
npartitions : int, default = None
Number of partitions for parallelization (Spark only)
engine : object, default = None
Computational engine (e.g. a SparkContext for Spark) | [
"Load",
"series",
"data",
"from",
"an",
"array",
"."
] | 967ff8f3e7c2fabe1705743d95eb2746d4329786 | https://github.com/thunder-project/thunder/blob/967ff8f3e7c2fabe1705743d95eb2746d4329786/thunder/series/readers.py#L74-L124 | train |
thunder-project/thunder | thunder/series/readers.py | fromlist | def fromlist(items, accessor=None, index=None, labels=None, dtype=None, npartitions=None, engine=None):
"""
Load series data from a list with an optional accessor function.
Will call accessor function on each item from the list,
providing a generic interface for data loading.
Parameters
----------
items : list
A list of items to load.
accessor : function, optional, default = None
A function to apply to each item in the list during loading.
index : array, optional, default = None
Index for records, if not provided will use (0,1,...,N)
where N is the length of each record.
labels : array, optional, default = None
Labels for records. If provided, should have same length as items.
dtype : string, default = None
Data numerical type (if provided will avoid check)
npartitions : int, default = None
Number of partitions for parallelization (Spark only)
engine : object, default = None
Computational engine (e.g. a SparkContext for Spark)
"""
if spark and isinstance(engine, spark):
if dtype is None:
dtype = accessor(items[0]).dtype if accessor else items[0].dtype
nrecords = len(items)
keys = map(lambda k: (k, ), range(len(items)))
if not npartitions:
npartitions = engine.defaultParallelism
items = zip(keys, items)
rdd = engine.parallelize(items, npartitions)
if accessor:
rdd = rdd.mapValues(accessor)
return fromrdd(rdd, nrecords=nrecords, index=index, labels=labels, dtype=dtype, ordered=True)
else:
if accessor:
items = [accessor(i) for i in items]
return fromarray(items, index=index, labels=labels) | python | def fromlist(items, accessor=None, index=None, labels=None, dtype=None, npartitions=None, engine=None):
"""
Load series data from a list with an optional accessor function.
Will call accessor function on each item from the list,
providing a generic interface for data loading.
Parameters
----------
items : list
A list of items to load.
accessor : function, optional, default = None
A function to apply to each item in the list during loading.
index : array, optional, default = None
Index for records, if not provided will use (0,1,...,N)
where N is the length of each record.
labels : array, optional, default = None
Labels for records. If provided, should have same length as items.
dtype : string, default = None
Data numerical type (if provided will avoid check)
npartitions : int, default = None
Number of partitions for parallelization (Spark only)
engine : object, default = None
Computational engine (e.g. a SparkContext for Spark)
"""
if spark and isinstance(engine, spark):
if dtype is None:
dtype = accessor(items[0]).dtype if accessor else items[0].dtype
nrecords = len(items)
keys = map(lambda k: (k, ), range(len(items)))
if not npartitions:
npartitions = engine.defaultParallelism
items = zip(keys, items)
rdd = engine.parallelize(items, npartitions)
if accessor:
rdd = rdd.mapValues(accessor)
return fromrdd(rdd, nrecords=nrecords, index=index, labels=labels, dtype=dtype, ordered=True)
else:
if accessor:
items = [accessor(i) for i in items]
return fromarray(items, index=index, labels=labels) | [
"def",
"fromlist",
"(",
"items",
",",
"accessor",
"=",
"None",
",",
"index",
"=",
"None",
",",
"labels",
"=",
"None",
",",
"dtype",
"=",
"None",
",",
"npartitions",
"=",
"None",
",",
"engine",
"=",
"None",
")",
":",
"if",
"spark",
"and",
"isinstance",
"(",
"engine",
",",
"spark",
")",
":",
"if",
"dtype",
"is",
"None",
":",
"dtype",
"=",
"accessor",
"(",
"items",
"[",
"0",
"]",
")",
".",
"dtype",
"if",
"accessor",
"else",
"items",
"[",
"0",
"]",
".",
"dtype",
"nrecords",
"=",
"len",
"(",
"items",
")",
"keys",
"=",
"map",
"(",
"lambda",
"k",
":",
"(",
"k",
",",
")",
",",
"range",
"(",
"len",
"(",
"items",
")",
")",
")",
"if",
"not",
"npartitions",
":",
"npartitions",
"=",
"engine",
".",
"defaultParallelism",
"items",
"=",
"zip",
"(",
"keys",
",",
"items",
")",
"rdd",
"=",
"engine",
".",
"parallelize",
"(",
"items",
",",
"npartitions",
")",
"if",
"accessor",
":",
"rdd",
"=",
"rdd",
".",
"mapValues",
"(",
"accessor",
")",
"return",
"fromrdd",
"(",
"rdd",
",",
"nrecords",
"=",
"nrecords",
",",
"index",
"=",
"index",
",",
"labels",
"=",
"labels",
",",
"dtype",
"=",
"dtype",
",",
"ordered",
"=",
"True",
")",
"else",
":",
"if",
"accessor",
":",
"items",
"=",
"[",
"accessor",
"(",
"i",
")",
"for",
"i",
"in",
"items",
"]",
"return",
"fromarray",
"(",
"items",
",",
"index",
"=",
"index",
",",
"labels",
"=",
"labels",
")"
] | Load series data from a list with an optional accessor function.
Will call accessor function on each item from the list,
providing a generic interface for data loading.
Parameters
----------
items : list
A list of items to load.
accessor : function, optional, default = None
A function to apply to each item in the list during loading.
index : array, optional, default = None
Index for records, if not provided will use (0,1,...,N)
where N is the length of each record.
labels : array, optional, default = None
Labels for records. If provided, should have same length as items.
dtype : string, default = None
Data numerical type (if provided will avoid check)
npartitions : int, default = None
Number of partitions for parallelization (Spark only)
engine : object, default = None
Computational engine (e.g. a SparkContext for Spark) | [
"Load",
"series",
"data",
"from",
"a",
"list",
"with",
"an",
"optional",
"accessor",
"function",
"."
] | 967ff8f3e7c2fabe1705743d95eb2746d4329786 | https://github.com/thunder-project/thunder/blob/967ff8f3e7c2fabe1705743d95eb2746d4329786/thunder/series/readers.py#L126-L173 | train |
thunder-project/thunder | thunder/series/readers.py | fromtext | def fromtext(path, ext='txt', dtype='float64', skip=0, shape=None, index=None, labels=None, npartitions=None, engine=None, credentials=None):
"""
Loads series data from text files.
Assumes data are formatted as rows, where each record is a row
of numbers separated by spaces e.g. 'v v v v v'. You can
optionally specify a fixed number of initial items per row to skip / discard.
Parameters
----------
path : string
Directory to load from, can be a URI string with scheme
(e.g. 'file://', 's3n://', or 'gs://'), or a single file,
or a directory, or a directory with a single wildcard character.
ext : str, optional, default = 'txt'
File extension.
dtype : dtype or dtype specifier, default 'float64'
Numerical type to use for data after converting from text.
skip : int, optional, default = 0
Number of items in each record to skip.
shape : tuple or list, optional, default = None
Shape of data if known, will be inferred otherwise.
index : array, optional, default = None
Index for records, if not provided will use (0, 1, ...)
labels : array, optional, default = None
Labels for records. If provided, should have length equal to number of rows.
npartitions : int, default = None
Number of partitions for parallelization (Spark only)
engine : object, default = None
Computational engine (e.g. a SparkContext for Spark)
credentials : dict, default = None
Credentials for remote storage (e.g. S3) in the form {access: ***, secret: ***}
"""
from thunder.readers import normalize_scheme, get_parallel_reader
path = normalize_scheme(path, ext)
if spark and isinstance(engine, spark):
def parse(line, skip):
vec = [float(x) for x in line.split(' ')]
return array(vec[skip:], dtype=dtype)
lines = engine.textFile(path, npartitions)
data = lines.map(lambda x: parse(x, skip))
def switch(record):
ary, idx = record
return (idx,), ary
rdd = data.zipWithIndex().map(switch)
return fromrdd(rdd, dtype=str(dtype), shape=shape, index=index, ordered=True)
else:
reader = get_parallel_reader(path)(engine, credentials=credentials)
data = reader.read(path, ext=ext)
values = []
for kv in data:
for line in str(kv[1].decode('utf-8')).split('\n')[:-1]:
values.append(fromstring(line, sep=' '))
values = asarray(values)
if skip > 0:
values = values[:, skip:]
if shape:
values = values.reshape(shape)
return fromarray(values, index=index, labels=labels) | python | def fromtext(path, ext='txt', dtype='float64', skip=0, shape=None, index=None, labels=None, npartitions=None, engine=None, credentials=None):
"""
Loads series data from text files.
Assumes data are formatted as rows, where each record is a row
of numbers separated by spaces e.g. 'v v v v v'. You can
optionally specify a fixed number of initial items per row to skip / discard.
Parameters
----------
path : string
Directory to load from, can be a URI string with scheme
(e.g. 'file://', 's3n://', or 'gs://'), or a single file,
or a directory, or a directory with a single wildcard character.
ext : str, optional, default = 'txt'
File extension.
dtype : dtype or dtype specifier, default 'float64'
Numerical type to use for data after converting from text.
skip : int, optional, default = 0
Number of items in each record to skip.
shape : tuple or list, optional, default = None
Shape of data if known, will be inferred otherwise.
index : array, optional, default = None
Index for records, if not provided will use (0, 1, ...)
labels : array, optional, default = None
Labels for records. If provided, should have length equal to number of rows.
npartitions : int, default = None
Number of partitions for parallelization (Spark only)
engine : object, default = None
Computational engine (e.g. a SparkContext for Spark)
credentials : dict, default = None
Credentials for remote storage (e.g. S3) in the form {access: ***, secret: ***}
"""
from thunder.readers import normalize_scheme, get_parallel_reader
path = normalize_scheme(path, ext)
if spark and isinstance(engine, spark):
def parse(line, skip):
vec = [float(x) for x in line.split(' ')]
return array(vec[skip:], dtype=dtype)
lines = engine.textFile(path, npartitions)
data = lines.map(lambda x: parse(x, skip))
def switch(record):
ary, idx = record
return (idx,), ary
rdd = data.zipWithIndex().map(switch)
return fromrdd(rdd, dtype=str(dtype), shape=shape, index=index, ordered=True)
else:
reader = get_parallel_reader(path)(engine, credentials=credentials)
data = reader.read(path, ext=ext)
values = []
for kv in data:
for line in str(kv[1].decode('utf-8')).split('\n')[:-1]:
values.append(fromstring(line, sep=' '))
values = asarray(values)
if skip > 0:
values = values[:, skip:]
if shape:
values = values.reshape(shape)
return fromarray(values, index=index, labels=labels) | [
"def",
"fromtext",
"(",
"path",
",",
"ext",
"=",
"'txt'",
",",
"dtype",
"=",
"'float64'",
",",
"skip",
"=",
"0",
",",
"shape",
"=",
"None",
",",
"index",
"=",
"None",
",",
"labels",
"=",
"None",
",",
"npartitions",
"=",
"None",
",",
"engine",
"=",
"None",
",",
"credentials",
"=",
"None",
")",
":",
"from",
"thunder",
".",
"readers",
"import",
"normalize_scheme",
",",
"get_parallel_reader",
"path",
"=",
"normalize_scheme",
"(",
"path",
",",
"ext",
")",
"if",
"spark",
"and",
"isinstance",
"(",
"engine",
",",
"spark",
")",
":",
"def",
"parse",
"(",
"line",
",",
"skip",
")",
":",
"vec",
"=",
"[",
"float",
"(",
"x",
")",
"for",
"x",
"in",
"line",
".",
"split",
"(",
"' '",
")",
"]",
"return",
"array",
"(",
"vec",
"[",
"skip",
":",
"]",
",",
"dtype",
"=",
"dtype",
")",
"lines",
"=",
"engine",
".",
"textFile",
"(",
"path",
",",
"npartitions",
")",
"data",
"=",
"lines",
".",
"map",
"(",
"lambda",
"x",
":",
"parse",
"(",
"x",
",",
"skip",
")",
")",
"def",
"switch",
"(",
"record",
")",
":",
"ary",
",",
"idx",
"=",
"record",
"return",
"(",
"idx",
",",
")",
",",
"ary",
"rdd",
"=",
"data",
".",
"zipWithIndex",
"(",
")",
".",
"map",
"(",
"switch",
")",
"return",
"fromrdd",
"(",
"rdd",
",",
"dtype",
"=",
"str",
"(",
"dtype",
")",
",",
"shape",
"=",
"shape",
",",
"index",
"=",
"index",
",",
"ordered",
"=",
"True",
")",
"else",
":",
"reader",
"=",
"get_parallel_reader",
"(",
"path",
")",
"(",
"engine",
",",
"credentials",
"=",
"credentials",
")",
"data",
"=",
"reader",
".",
"read",
"(",
"path",
",",
"ext",
"=",
"ext",
")",
"values",
"=",
"[",
"]",
"for",
"kv",
"in",
"data",
":",
"for",
"line",
"in",
"str",
"(",
"kv",
"[",
"1",
"]",
".",
"decode",
"(",
"'utf-8'",
")",
")",
".",
"split",
"(",
"'\\n'",
")",
"[",
":",
"-",
"1",
"]",
":",
"values",
".",
"append",
"(",
"fromstring",
"(",
"line",
",",
"sep",
"=",
"' '",
")",
")",
"values",
"=",
"asarray",
"(",
"values",
")",
"if",
"skip",
">",
"0",
":",
"values",
"=",
"values",
"[",
":",
",",
"skip",
":",
"]",
"if",
"shape",
":",
"values",
"=",
"values",
".",
"reshape",
"(",
"shape",
")",
"return",
"fromarray",
"(",
"values",
",",
"index",
"=",
"index",
",",
"labels",
"=",
"labels",
")"
] | Loads series data from text files.
Assumes data are formatted as rows, where each record is a row
of numbers separated by spaces e.g. 'v v v v v'. You can
optionally specify a fixed number of initial items per row to skip / discard.
Parameters
----------
path : string
Directory to load from, can be a URI string with scheme
(e.g. 'file://', 's3n://', or 'gs://'), or a single file,
or a directory, or a directory with a single wildcard character.
ext : str, optional, default = 'txt'
File extension.
dtype : dtype or dtype specifier, default 'float64'
Numerical type to use for data after converting from text.
skip : int, optional, default = 0
Number of items in each record to skip.
shape : tuple or list, optional, default = None
Shape of data if known, will be inferred otherwise.
index : array, optional, default = None
Index for records, if not provided will use (0, 1, ...)
labels : array, optional, default = None
Labels for records. If provided, should have length equal to number of rows.
npartitions : int, default = None
Number of partitions for parallelization (Spark only)
engine : object, default = None
Computational engine (e.g. a SparkContext for Spark)
credentials : dict, default = None
Credentials for remote storage (e.g. S3) in the form {access: ***, secret: ***} | [
"Loads",
"series",
"data",
"from",
"text",
"files",
"."
] | 967ff8f3e7c2fabe1705743d95eb2746d4329786 | https://github.com/thunder-project/thunder/blob/967ff8f3e7c2fabe1705743d95eb2746d4329786/thunder/series/readers.py#L175-L252 | train |
thunder-project/thunder | thunder/series/readers.py | frombinary | def frombinary(path, ext='bin', conf='conf.json', dtype=None, shape=None, skip=0, index=None, labels=None, engine=None, credentials=None):
"""
Load series data from flat binary files.
Parameters
----------
path : string URI or local filesystem path
Directory to load from, can be a URI string with scheme
(e.g. 'file://', 's3n://', or 'gs://'), or a single file,
or a directory, or a directory with a single wildcard character.
ext : str, optional, default = 'bin'
Optional file extension specifier.
conf : str, optional, default = 'conf.json'
Name of conf file with type and size information.
dtype : dtype or dtype specifier, default 'float64'
Numerical type to use for data after converting from text.
shape : tuple or list, optional, default = None
Shape of data if known, will be inferred otherwise.
skip : int, optional, default = 0
Number of items in each record to skip.
index : array, optional, default = None
Index for records, if not provided will use (0, 1, ...)
labels : array, optional, default = None
Labels for records. If provided, should have shape of shape[:-1].
engine : object, default = None
Computational engine (e.g. a SparkContext for Spark)
credentials : dict, default = None
Credentials for remote storage (e.g. S3) in the form {access: ***, secret: ***}
"""
shape, dtype = _binaryconfig(path, conf, dtype, shape, credentials)
from thunder.readers import normalize_scheme, get_parallel_reader
path = normalize_scheme(path, ext)
from numpy import dtype as dtype_func
nelements = shape[-1] + skip
recordsize = dtype_func(dtype).itemsize * nelements
if spark and isinstance(engine, spark):
lines = engine.binaryRecords(path, recordsize)
raw = lines.map(lambda x: frombuffer(buffer(x), offset=0, count=nelements, dtype=dtype)[skip:])
def switch(record):
ary, idx = record
return (idx,), ary
rdd = raw.zipWithIndex().map(switch)
if shape and len(shape) > 2:
expand = lambda k: unravel_index(k[0], shape[0:-1])
rdd = rdd.map(lambda kv: (expand(kv[0]), kv[1]))
if not index:
index = arange(shape[-1])
return fromrdd(rdd, dtype=dtype, shape=shape, index=index, ordered=True)
else:
reader = get_parallel_reader(path)(engine, credentials=credentials)
data = reader.read(path, ext=ext)
values = []
for record in data:
buf = record[1]
offset = 0
while offset < len(buf):
v = frombuffer(buffer(buf), offset=offset, count=nelements, dtype=dtype)
values.append(v[skip:])
offset += recordsize
if not len(values) == prod(shape[0:-1]):
raise ValueError('Unexpected shape, got %g records but expected %g'
% (len(values), prod(shape[0:-1])))
values = asarray(values, dtype=dtype)
if shape:
values = values.reshape(shape)
return fromarray(values, index=index, labels=labels) | python | def frombinary(path, ext='bin', conf='conf.json', dtype=None, shape=None, skip=0, index=None, labels=None, engine=None, credentials=None):
"""
Load series data from flat binary files.
Parameters
----------
path : string URI or local filesystem path
Directory to load from, can be a URI string with scheme
(e.g. 'file://', 's3n://', or 'gs://'), or a single file,
or a directory, or a directory with a single wildcard character.
ext : str, optional, default = 'bin'
Optional file extension specifier.
conf : str, optional, default = 'conf.json'
Name of conf file with type and size information.
dtype : dtype or dtype specifier, default 'float64'
Numerical type to use for data after converting from text.
shape : tuple or list, optional, default = None
Shape of data if known, will be inferred otherwise.
skip : int, optional, default = 0
Number of items in each record to skip.
index : array, optional, default = None
Index for records, if not provided will use (0, 1, ...)
labels : array, optional, default = None
Labels for records. If provided, should have shape of shape[:-1].
engine : object, default = None
Computational engine (e.g. a SparkContext for Spark)
credentials : dict, default = None
Credentials for remote storage (e.g. S3) in the form {access: ***, secret: ***}
"""
shape, dtype = _binaryconfig(path, conf, dtype, shape, credentials)
from thunder.readers import normalize_scheme, get_parallel_reader
path = normalize_scheme(path, ext)
from numpy import dtype as dtype_func
nelements = shape[-1] + skip
recordsize = dtype_func(dtype).itemsize * nelements
if spark and isinstance(engine, spark):
lines = engine.binaryRecords(path, recordsize)
raw = lines.map(lambda x: frombuffer(buffer(x), offset=0, count=nelements, dtype=dtype)[skip:])
def switch(record):
ary, idx = record
return (idx,), ary
rdd = raw.zipWithIndex().map(switch)
if shape and len(shape) > 2:
expand = lambda k: unravel_index(k[0], shape[0:-1])
rdd = rdd.map(lambda kv: (expand(kv[0]), kv[1]))
if not index:
index = arange(shape[-1])
return fromrdd(rdd, dtype=dtype, shape=shape, index=index, ordered=True)
else:
reader = get_parallel_reader(path)(engine, credentials=credentials)
data = reader.read(path, ext=ext)
values = []
for record in data:
buf = record[1]
offset = 0
while offset < len(buf):
v = frombuffer(buffer(buf), offset=offset, count=nelements, dtype=dtype)
values.append(v[skip:])
offset += recordsize
if not len(values) == prod(shape[0:-1]):
raise ValueError('Unexpected shape, got %g records but expected %g'
% (len(values), prod(shape[0:-1])))
values = asarray(values, dtype=dtype)
if shape:
values = values.reshape(shape)
return fromarray(values, index=index, labels=labels) | [
"def",
"frombinary",
"(",
"path",
",",
"ext",
"=",
"'bin'",
",",
"conf",
"=",
"'conf.json'",
",",
"dtype",
"=",
"None",
",",
"shape",
"=",
"None",
",",
"skip",
"=",
"0",
",",
"index",
"=",
"None",
",",
"labels",
"=",
"None",
",",
"engine",
"=",
"None",
",",
"credentials",
"=",
"None",
")",
":",
"shape",
",",
"dtype",
"=",
"_binaryconfig",
"(",
"path",
",",
"conf",
",",
"dtype",
",",
"shape",
",",
"credentials",
")",
"from",
"thunder",
".",
"readers",
"import",
"normalize_scheme",
",",
"get_parallel_reader",
"path",
"=",
"normalize_scheme",
"(",
"path",
",",
"ext",
")",
"from",
"numpy",
"import",
"dtype",
"as",
"dtype_func",
"nelements",
"=",
"shape",
"[",
"-",
"1",
"]",
"+",
"skip",
"recordsize",
"=",
"dtype_func",
"(",
"dtype",
")",
".",
"itemsize",
"*",
"nelements",
"if",
"spark",
"and",
"isinstance",
"(",
"engine",
",",
"spark",
")",
":",
"lines",
"=",
"engine",
".",
"binaryRecords",
"(",
"path",
",",
"recordsize",
")",
"raw",
"=",
"lines",
".",
"map",
"(",
"lambda",
"x",
":",
"frombuffer",
"(",
"buffer",
"(",
"x",
")",
",",
"offset",
"=",
"0",
",",
"count",
"=",
"nelements",
",",
"dtype",
"=",
"dtype",
")",
"[",
"skip",
":",
"]",
")",
"def",
"switch",
"(",
"record",
")",
":",
"ary",
",",
"idx",
"=",
"record",
"return",
"(",
"idx",
",",
")",
",",
"ary",
"rdd",
"=",
"raw",
".",
"zipWithIndex",
"(",
")",
".",
"map",
"(",
"switch",
")",
"if",
"shape",
"and",
"len",
"(",
"shape",
")",
">",
"2",
":",
"expand",
"=",
"lambda",
"k",
":",
"unravel_index",
"(",
"k",
"[",
"0",
"]",
",",
"shape",
"[",
"0",
":",
"-",
"1",
"]",
")",
"rdd",
"=",
"rdd",
".",
"map",
"(",
"lambda",
"kv",
":",
"(",
"expand",
"(",
"kv",
"[",
"0",
"]",
")",
",",
"kv",
"[",
"1",
"]",
")",
")",
"if",
"not",
"index",
":",
"index",
"=",
"arange",
"(",
"shape",
"[",
"-",
"1",
"]",
")",
"return",
"fromrdd",
"(",
"rdd",
",",
"dtype",
"=",
"dtype",
",",
"shape",
"=",
"shape",
",",
"index",
"=",
"index",
",",
"ordered",
"=",
"True",
")",
"else",
":",
"reader",
"=",
"get_parallel_reader",
"(",
"path",
")",
"(",
"engine",
",",
"credentials",
"=",
"credentials",
")",
"data",
"=",
"reader",
".",
"read",
"(",
"path",
",",
"ext",
"=",
"ext",
")",
"values",
"=",
"[",
"]",
"for",
"record",
"in",
"data",
":",
"buf",
"=",
"record",
"[",
"1",
"]",
"offset",
"=",
"0",
"while",
"offset",
"<",
"len",
"(",
"buf",
")",
":",
"v",
"=",
"frombuffer",
"(",
"buffer",
"(",
"buf",
")",
",",
"offset",
"=",
"offset",
",",
"count",
"=",
"nelements",
",",
"dtype",
"=",
"dtype",
")",
"values",
".",
"append",
"(",
"v",
"[",
"skip",
":",
"]",
")",
"offset",
"+=",
"recordsize",
"if",
"not",
"len",
"(",
"values",
")",
"==",
"prod",
"(",
"shape",
"[",
"0",
":",
"-",
"1",
"]",
")",
":",
"raise",
"ValueError",
"(",
"'Unexpected shape, got %g records but expected %g'",
"%",
"(",
"len",
"(",
"values",
")",
",",
"prod",
"(",
"shape",
"[",
"0",
":",
"-",
"1",
"]",
")",
")",
")",
"values",
"=",
"asarray",
"(",
"values",
",",
"dtype",
"=",
"dtype",
")",
"if",
"shape",
":",
"values",
"=",
"values",
".",
"reshape",
"(",
"shape",
")",
"return",
"fromarray",
"(",
"values",
",",
"index",
"=",
"index",
",",
"labels",
"=",
"labels",
")"
] | Load series data from flat binary files.
Parameters
----------
path : string URI or local filesystem path
Directory to load from, can be a URI string with scheme
(e.g. 'file://', 's3n://', or 'gs://'), or a single file,
or a directory, or a directory with a single wildcard character.
ext : str, optional, default = 'bin'
Optional file extension specifier.
conf : str, optional, default = 'conf.json'
Name of conf file with type and size information.
dtype : dtype or dtype specifier, default 'float64'
Numerical type to use for data after converting from text.
shape : tuple or list, optional, default = None
Shape of data if known, will be inferred otherwise.
skip : int, optional, default = 0
Number of items in each record to skip.
index : array, optional, default = None
Index for records, if not provided will use (0, 1, ...)
labels : array, optional, default = None
Labels for records. If provided, should have shape of shape[:-1].
engine : object, default = None
Computational engine (e.g. a SparkContext for Spark)
credentials : dict, default = None
Credentials for remote storage (e.g. S3) in the form {access: ***, secret: ***} | [
"Load",
"series",
"data",
"from",
"flat",
"binary",
"files",
"."
] | 967ff8f3e7c2fabe1705743d95eb2746d4329786 | https://github.com/thunder-project/thunder/blob/967ff8f3e7c2fabe1705743d95eb2746d4329786/thunder/series/readers.py#L254-L342 | train |
thunder-project/thunder | thunder/series/readers.py | _binaryconfig | def _binaryconfig(path, conf, dtype=None, shape=None, credentials=None):
"""
Collects parameters to use for binary series loading.
"""
import json
from thunder.readers import get_file_reader, FileNotFoundError
reader = get_file_reader(path)(credentials=credentials)
try:
buf = reader.read(path, filename=conf)
params = json.loads(str(buf.decode('utf-8')))
except FileNotFoundError:
params = {}
if dtype:
params['dtype'] = dtype
if shape:
params['shape'] = shape
if 'dtype' not in params.keys():
raise ValueError('dtype not specified either in conf.json or as argument')
if 'shape' not in params.keys():
raise ValueError('shape not specified either in conf.json or as argument')
return params['shape'], params['dtype'] | python | def _binaryconfig(path, conf, dtype=None, shape=None, credentials=None):
"""
Collects parameters to use for binary series loading.
"""
import json
from thunder.readers import get_file_reader, FileNotFoundError
reader = get_file_reader(path)(credentials=credentials)
try:
buf = reader.read(path, filename=conf)
params = json.loads(str(buf.decode('utf-8')))
except FileNotFoundError:
params = {}
if dtype:
params['dtype'] = dtype
if shape:
params['shape'] = shape
if 'dtype' not in params.keys():
raise ValueError('dtype not specified either in conf.json or as argument')
if 'shape' not in params.keys():
raise ValueError('shape not specified either in conf.json or as argument')
return params['shape'], params['dtype'] | [
"def",
"_binaryconfig",
"(",
"path",
",",
"conf",
",",
"dtype",
"=",
"None",
",",
"shape",
"=",
"None",
",",
"credentials",
"=",
"None",
")",
":",
"import",
"json",
"from",
"thunder",
".",
"readers",
"import",
"get_file_reader",
",",
"FileNotFoundError",
"reader",
"=",
"get_file_reader",
"(",
"path",
")",
"(",
"credentials",
"=",
"credentials",
")",
"try",
":",
"buf",
"=",
"reader",
".",
"read",
"(",
"path",
",",
"filename",
"=",
"conf",
")",
"params",
"=",
"json",
".",
"loads",
"(",
"str",
"(",
"buf",
".",
"decode",
"(",
"'utf-8'",
")",
")",
")",
"except",
"FileNotFoundError",
":",
"params",
"=",
"{",
"}",
"if",
"dtype",
":",
"params",
"[",
"'dtype'",
"]",
"=",
"dtype",
"if",
"shape",
":",
"params",
"[",
"'shape'",
"]",
"=",
"shape",
"if",
"'dtype'",
"not",
"in",
"params",
".",
"keys",
"(",
")",
":",
"raise",
"ValueError",
"(",
"'dtype not specified either in conf.json or as argument'",
")",
"if",
"'shape'",
"not",
"in",
"params",
".",
"keys",
"(",
")",
":",
"raise",
"ValueError",
"(",
"'shape not specified either in conf.json or as argument'",
")",
"return",
"params",
"[",
"'shape'",
"]",
",",
"params",
"[",
"'dtype'",
"]"
] | Collects parameters to use for binary series loading. | [
"Collects",
"parameters",
"to",
"use",
"for",
"binary",
"series",
"loading",
"."
] | 967ff8f3e7c2fabe1705743d95eb2746d4329786 | https://github.com/thunder-project/thunder/blob/967ff8f3e7c2fabe1705743d95eb2746d4329786/thunder/series/readers.py#L344-L370 | train |
thunder-project/thunder | thunder/series/readers.py | fromexample | def fromexample(name=None, engine=None):
"""
Load example series data.
Data are downloaded from S3, so this method requires an internet connection.
Parameters
----------
name : str
Name of dataset, options include 'iris' | 'mouse' | 'fish'.
If not specified will print options.
engine : object, default = None
Computational engine (e.g. a SparkContext for Spark)
"""
import os
import tempfile
import shutil
from boto.s3.connection import S3Connection
datasets = ['iris', 'mouse', 'fish']
if name is None:
print('Availiable example series datasets')
for d in datasets:
print('- ' + d)
return
check_options(name, datasets)
d = tempfile.mkdtemp()
try:
os.mkdir(os.path.join(d, 'series'))
os.mkdir(os.path.join(d, 'series', name))
conn = S3Connection(anon=True)
bucket = conn.get_bucket('thunder-sample-data')
for key in bucket.list(os.path.join('series', name) + '/'):
if not key.name.endswith('/'):
key.get_contents_to_filename(os.path.join(d, key.name))
data = frombinary(os.path.join(d, 'series', name), engine=engine)
if spark and isinstance(engine, spark):
data.cache()
data.compute()
finally:
shutil.rmtree(d)
return data | python | def fromexample(name=None, engine=None):
"""
Load example series data.
Data are downloaded from S3, so this method requires an internet connection.
Parameters
----------
name : str
Name of dataset, options include 'iris' | 'mouse' | 'fish'.
If not specified will print options.
engine : object, default = None
Computational engine (e.g. a SparkContext for Spark)
"""
import os
import tempfile
import shutil
from boto.s3.connection import S3Connection
datasets = ['iris', 'mouse', 'fish']
if name is None:
print('Availiable example series datasets')
for d in datasets:
print('- ' + d)
return
check_options(name, datasets)
d = tempfile.mkdtemp()
try:
os.mkdir(os.path.join(d, 'series'))
os.mkdir(os.path.join(d, 'series', name))
conn = S3Connection(anon=True)
bucket = conn.get_bucket('thunder-sample-data')
for key in bucket.list(os.path.join('series', name) + '/'):
if not key.name.endswith('/'):
key.get_contents_to_filename(os.path.join(d, key.name))
data = frombinary(os.path.join(d, 'series', name), engine=engine)
if spark and isinstance(engine, spark):
data.cache()
data.compute()
finally:
shutil.rmtree(d)
return data | [
"def",
"fromexample",
"(",
"name",
"=",
"None",
",",
"engine",
"=",
"None",
")",
":",
"import",
"os",
"import",
"tempfile",
"import",
"shutil",
"from",
"boto",
".",
"s3",
".",
"connection",
"import",
"S3Connection",
"datasets",
"=",
"[",
"'iris'",
",",
"'mouse'",
",",
"'fish'",
"]",
"if",
"name",
"is",
"None",
":",
"print",
"(",
"'Availiable example series datasets'",
")",
"for",
"d",
"in",
"datasets",
":",
"print",
"(",
"'- '",
"+",
"d",
")",
"return",
"check_options",
"(",
"name",
",",
"datasets",
")",
"d",
"=",
"tempfile",
".",
"mkdtemp",
"(",
")",
"try",
":",
"os",
".",
"mkdir",
"(",
"os",
".",
"path",
".",
"join",
"(",
"d",
",",
"'series'",
")",
")",
"os",
".",
"mkdir",
"(",
"os",
".",
"path",
".",
"join",
"(",
"d",
",",
"'series'",
",",
"name",
")",
")",
"conn",
"=",
"S3Connection",
"(",
"anon",
"=",
"True",
")",
"bucket",
"=",
"conn",
".",
"get_bucket",
"(",
"'thunder-sample-data'",
")",
"for",
"key",
"in",
"bucket",
".",
"list",
"(",
"os",
".",
"path",
".",
"join",
"(",
"'series'",
",",
"name",
")",
"+",
"'/'",
")",
":",
"if",
"not",
"key",
".",
"name",
".",
"endswith",
"(",
"'/'",
")",
":",
"key",
".",
"get_contents_to_filename",
"(",
"os",
".",
"path",
".",
"join",
"(",
"d",
",",
"key",
".",
"name",
")",
")",
"data",
"=",
"frombinary",
"(",
"os",
".",
"path",
".",
"join",
"(",
"d",
",",
"'series'",
",",
"name",
")",
",",
"engine",
"=",
"engine",
")",
"if",
"spark",
"and",
"isinstance",
"(",
"engine",
",",
"spark",
")",
":",
"data",
".",
"cache",
"(",
")",
"data",
".",
"compute",
"(",
")",
"finally",
":",
"shutil",
".",
"rmtree",
"(",
"d",
")",
"return",
"data"
] | Load example series data.
Data are downloaded from S3, so this method requires an internet connection.
Parameters
----------
name : str
Name of dataset, options include 'iris' | 'mouse' | 'fish'.
If not specified will print options.
engine : object, default = None
Computational engine (e.g. a SparkContext for Spark) | [
"Load",
"example",
"series",
"data",
"."
] | 967ff8f3e7c2fabe1705743d95eb2746d4329786 | https://github.com/thunder-project/thunder/blob/967ff8f3e7c2fabe1705743d95eb2746d4329786/thunder/series/readers.py#L398-L447 | train |
thunder-project/thunder | thunder/series/writers.py | tobinary | def tobinary(series, path, prefix='series', overwrite=False, credentials=None):
"""
Writes out data to binary format.
Parameters
----------
series : Series
The data to write
path : string path or URI to directory to be created
Output files will be written underneath path.
Directory will be created as a result of this call.
prefix : str, optional, default = 'series'
String prefix for files.
overwrite : bool
If true, path and all its contents will be deleted and
recreated as partof this call.
"""
from six import BytesIO
from thunder.utils import check_path
from thunder.writers import get_parallel_writer
if not overwrite:
check_path(path, credentials=credentials)
overwrite = True
def tobuffer(kv):
firstkey = None
buf = BytesIO()
for k, v in kv:
if firstkey is None:
firstkey = k
buf.write(v.tostring())
val = buf.getvalue()
buf.close()
if firstkey is None:
return iter([])
else:
label = prefix + '-' + getlabel(firstkey) + ".bin"
return iter([(label, val)])
writer = get_parallel_writer(path)(path, overwrite=overwrite, credentials=credentials)
if series.mode == 'spark':
binary = series.values.tordd().sortByKey().mapPartitions(tobuffer)
binary.foreach(writer.write)
else:
basedims = [series.shape[d] for d in series.baseaxes]
def split(k):
ind = unravel_index(k, basedims)
return ind, series.values[ind]
buf = tobuffer([split(i) for i in range(prod(basedims))])
[writer.write(b) for b in buf]
shape = series.shape
dtype = series.dtype
write_config(path, shape=shape, dtype=dtype, overwrite=overwrite, credentials=credentials) | python | def tobinary(series, path, prefix='series', overwrite=False, credentials=None):
"""
Writes out data to binary format.
Parameters
----------
series : Series
The data to write
path : string path or URI to directory to be created
Output files will be written underneath path.
Directory will be created as a result of this call.
prefix : str, optional, default = 'series'
String prefix for files.
overwrite : bool
If true, path and all its contents will be deleted and
recreated as partof this call.
"""
from six import BytesIO
from thunder.utils import check_path
from thunder.writers import get_parallel_writer
if not overwrite:
check_path(path, credentials=credentials)
overwrite = True
def tobuffer(kv):
firstkey = None
buf = BytesIO()
for k, v in kv:
if firstkey is None:
firstkey = k
buf.write(v.tostring())
val = buf.getvalue()
buf.close()
if firstkey is None:
return iter([])
else:
label = prefix + '-' + getlabel(firstkey) + ".bin"
return iter([(label, val)])
writer = get_parallel_writer(path)(path, overwrite=overwrite, credentials=credentials)
if series.mode == 'spark':
binary = series.values.tordd().sortByKey().mapPartitions(tobuffer)
binary.foreach(writer.write)
else:
basedims = [series.shape[d] for d in series.baseaxes]
def split(k):
ind = unravel_index(k, basedims)
return ind, series.values[ind]
buf = tobuffer([split(i) for i in range(prod(basedims))])
[writer.write(b) for b in buf]
shape = series.shape
dtype = series.dtype
write_config(path, shape=shape, dtype=dtype, overwrite=overwrite, credentials=credentials) | [
"def",
"tobinary",
"(",
"series",
",",
"path",
",",
"prefix",
"=",
"'series'",
",",
"overwrite",
"=",
"False",
",",
"credentials",
"=",
"None",
")",
":",
"from",
"six",
"import",
"BytesIO",
"from",
"thunder",
".",
"utils",
"import",
"check_path",
"from",
"thunder",
".",
"writers",
"import",
"get_parallel_writer",
"if",
"not",
"overwrite",
":",
"check_path",
"(",
"path",
",",
"credentials",
"=",
"credentials",
")",
"overwrite",
"=",
"True",
"def",
"tobuffer",
"(",
"kv",
")",
":",
"firstkey",
"=",
"None",
"buf",
"=",
"BytesIO",
"(",
")",
"for",
"k",
",",
"v",
"in",
"kv",
":",
"if",
"firstkey",
"is",
"None",
":",
"firstkey",
"=",
"k",
"buf",
".",
"write",
"(",
"v",
".",
"tostring",
"(",
")",
")",
"val",
"=",
"buf",
".",
"getvalue",
"(",
")",
"buf",
".",
"close",
"(",
")",
"if",
"firstkey",
"is",
"None",
":",
"return",
"iter",
"(",
"[",
"]",
")",
"else",
":",
"label",
"=",
"prefix",
"+",
"'-'",
"+",
"getlabel",
"(",
"firstkey",
")",
"+",
"\".bin\"",
"return",
"iter",
"(",
"[",
"(",
"label",
",",
"val",
")",
"]",
")",
"writer",
"=",
"get_parallel_writer",
"(",
"path",
")",
"(",
"path",
",",
"overwrite",
"=",
"overwrite",
",",
"credentials",
"=",
"credentials",
")",
"if",
"series",
".",
"mode",
"==",
"'spark'",
":",
"binary",
"=",
"series",
".",
"values",
".",
"tordd",
"(",
")",
".",
"sortByKey",
"(",
")",
".",
"mapPartitions",
"(",
"tobuffer",
")",
"binary",
".",
"foreach",
"(",
"writer",
".",
"write",
")",
"else",
":",
"basedims",
"=",
"[",
"series",
".",
"shape",
"[",
"d",
"]",
"for",
"d",
"in",
"series",
".",
"baseaxes",
"]",
"def",
"split",
"(",
"k",
")",
":",
"ind",
"=",
"unravel_index",
"(",
"k",
",",
"basedims",
")",
"return",
"ind",
",",
"series",
".",
"values",
"[",
"ind",
"]",
"buf",
"=",
"tobuffer",
"(",
"[",
"split",
"(",
"i",
")",
"for",
"i",
"in",
"range",
"(",
"prod",
"(",
"basedims",
")",
")",
"]",
")",
"[",
"writer",
".",
"write",
"(",
"b",
")",
"for",
"b",
"in",
"buf",
"]",
"shape",
"=",
"series",
".",
"shape",
"dtype",
"=",
"series",
".",
"dtype",
"write_config",
"(",
"path",
",",
"shape",
"=",
"shape",
",",
"dtype",
"=",
"dtype",
",",
"overwrite",
"=",
"overwrite",
",",
"credentials",
"=",
"credentials",
")"
] | Writes out data to binary format.
Parameters
----------
series : Series
The data to write
path : string path or URI to directory to be created
Output files will be written underneath path.
Directory will be created as a result of this call.
prefix : str, optional, default = 'series'
String prefix for files.
overwrite : bool
If true, path and all its contents will be deleted and
recreated as partof this call. | [
"Writes",
"out",
"data",
"to",
"binary",
"format",
"."
] | 967ff8f3e7c2fabe1705743d95eb2746d4329786 | https://github.com/thunder-project/thunder/blob/967ff8f3e7c2fabe1705743d95eb2746d4329786/thunder/series/writers.py#L3-L65 | train |
thunder-project/thunder | thunder/series/writers.py | write_config | def write_config(path, shape=None, dtype=None, name="conf.json", overwrite=True, credentials=None):
"""
Write a conf.json file with required information to load Series binary data.
"""
import json
from thunder.writers import get_file_writer
writer = get_file_writer(path)
conf = {'shape': shape, 'dtype': str(dtype)}
confwriter = writer(path, name, overwrite=overwrite, credentials=credentials)
confwriter.write(json.dumps(conf, indent=2))
successwriter = writer(path, "SUCCESS", overwrite=overwrite, credentials=credentials)
successwriter.write('') | python | def write_config(path, shape=None, dtype=None, name="conf.json", overwrite=True, credentials=None):
"""
Write a conf.json file with required information to load Series binary data.
"""
import json
from thunder.writers import get_file_writer
writer = get_file_writer(path)
conf = {'shape': shape, 'dtype': str(dtype)}
confwriter = writer(path, name, overwrite=overwrite, credentials=credentials)
confwriter.write(json.dumps(conf, indent=2))
successwriter = writer(path, "SUCCESS", overwrite=overwrite, credentials=credentials)
successwriter.write('') | [
"def",
"write_config",
"(",
"path",
",",
"shape",
"=",
"None",
",",
"dtype",
"=",
"None",
",",
"name",
"=",
"\"conf.json\"",
",",
"overwrite",
"=",
"True",
",",
"credentials",
"=",
"None",
")",
":",
"import",
"json",
"from",
"thunder",
".",
"writers",
"import",
"get_file_writer",
"writer",
"=",
"get_file_writer",
"(",
"path",
")",
"conf",
"=",
"{",
"'shape'",
":",
"shape",
",",
"'dtype'",
":",
"str",
"(",
"dtype",
")",
"}",
"confwriter",
"=",
"writer",
"(",
"path",
",",
"name",
",",
"overwrite",
"=",
"overwrite",
",",
"credentials",
"=",
"credentials",
")",
"confwriter",
".",
"write",
"(",
"json",
".",
"dumps",
"(",
"conf",
",",
"indent",
"=",
"2",
")",
")",
"successwriter",
"=",
"writer",
"(",
"path",
",",
"\"SUCCESS\"",
",",
"overwrite",
"=",
"overwrite",
",",
"credentials",
"=",
"credentials",
")",
"successwriter",
".",
"write",
"(",
"''",
")"
] | Write a conf.json file with required information to load Series binary data. | [
"Write",
"a",
"conf",
".",
"json",
"file",
"with",
"required",
"information",
"to",
"load",
"Series",
"binary",
"data",
"."
] | 967ff8f3e7c2fabe1705743d95eb2746d4329786 | https://github.com/thunder-project/thunder/blob/967ff8f3e7c2fabe1705743d95eb2746d4329786/thunder/series/writers.py#L67-L81 | train |
thunder-project/thunder | thunder/images/images.py | Images.toblocks | def toblocks(self, chunk_size='auto', padding=None):
"""
Convert to blocks which represent subdivisions of the images data.
Parameters
----------
chunk_size : str or tuple, size of image chunk used during conversion, default = 'auto'
String interpreted as memory size (in kilobytes, e.g. '64').
The exception is the string 'auto'. In spark mode, 'auto' will choose a chunk size to make the
resulting blocks ~100 MB in size. In local mode, 'auto' will create a single block.
Tuple of ints interpreted as 'pixels per dimension'.
padding : tuple or int
Amount of padding along each dimensions for blocks. If an int, then
the same amount of padding is used for all dimensions
"""
from thunder.blocks.blocks import Blocks
from thunder.blocks.local import LocalChunks
if self.mode == 'spark':
if chunk_size is 'auto':
chunk_size = str(max([int(1e5/self.shape[0]), 1]))
chunks = self.values.chunk(chunk_size, padding=padding).keys_to_values((0,))
if self.mode == 'local':
if chunk_size is 'auto':
chunk_size = self.shape[1:]
chunks = LocalChunks.chunk(self.values, chunk_size, padding=padding)
return Blocks(chunks) | python | def toblocks(self, chunk_size='auto', padding=None):
"""
Convert to blocks which represent subdivisions of the images data.
Parameters
----------
chunk_size : str or tuple, size of image chunk used during conversion, default = 'auto'
String interpreted as memory size (in kilobytes, e.g. '64').
The exception is the string 'auto'. In spark mode, 'auto' will choose a chunk size to make the
resulting blocks ~100 MB in size. In local mode, 'auto' will create a single block.
Tuple of ints interpreted as 'pixels per dimension'.
padding : tuple or int
Amount of padding along each dimensions for blocks. If an int, then
the same amount of padding is used for all dimensions
"""
from thunder.blocks.blocks import Blocks
from thunder.blocks.local import LocalChunks
if self.mode == 'spark':
if chunk_size is 'auto':
chunk_size = str(max([int(1e5/self.shape[0]), 1]))
chunks = self.values.chunk(chunk_size, padding=padding).keys_to_values((0,))
if self.mode == 'local':
if chunk_size is 'auto':
chunk_size = self.shape[1:]
chunks = LocalChunks.chunk(self.values, chunk_size, padding=padding)
return Blocks(chunks) | [
"def",
"toblocks",
"(",
"self",
",",
"chunk_size",
"=",
"'auto'",
",",
"padding",
"=",
"None",
")",
":",
"from",
"thunder",
".",
"blocks",
".",
"blocks",
"import",
"Blocks",
"from",
"thunder",
".",
"blocks",
".",
"local",
"import",
"LocalChunks",
"if",
"self",
".",
"mode",
"==",
"'spark'",
":",
"if",
"chunk_size",
"is",
"'auto'",
":",
"chunk_size",
"=",
"str",
"(",
"max",
"(",
"[",
"int",
"(",
"1e5",
"/",
"self",
".",
"shape",
"[",
"0",
"]",
")",
",",
"1",
"]",
")",
")",
"chunks",
"=",
"self",
".",
"values",
".",
"chunk",
"(",
"chunk_size",
",",
"padding",
"=",
"padding",
")",
".",
"keys_to_values",
"(",
"(",
"0",
",",
")",
")",
"if",
"self",
".",
"mode",
"==",
"'local'",
":",
"if",
"chunk_size",
"is",
"'auto'",
":",
"chunk_size",
"=",
"self",
".",
"shape",
"[",
"1",
":",
"]",
"chunks",
"=",
"LocalChunks",
".",
"chunk",
"(",
"self",
".",
"values",
",",
"chunk_size",
",",
"padding",
"=",
"padding",
")",
"return",
"Blocks",
"(",
"chunks",
")"
] | Convert to blocks which represent subdivisions of the images data.
Parameters
----------
chunk_size : str or tuple, size of image chunk used during conversion, default = 'auto'
String interpreted as memory size (in kilobytes, e.g. '64').
The exception is the string 'auto'. In spark mode, 'auto' will choose a chunk size to make the
resulting blocks ~100 MB in size. In local mode, 'auto' will create a single block.
Tuple of ints interpreted as 'pixels per dimension'.
padding : tuple or int
Amount of padding along each dimensions for blocks. If an int, then
the same amount of padding is used for all dimensions | [
"Convert",
"to",
"blocks",
"which",
"represent",
"subdivisions",
"of",
"the",
"images",
"data",
"."
] | 967ff8f3e7c2fabe1705743d95eb2746d4329786 | https://github.com/thunder-project/thunder/blob/967ff8f3e7c2fabe1705743d95eb2746d4329786/thunder/images/images.py#L60-L89 | train |
thunder-project/thunder | thunder/images/images.py | Images.toseries | def toseries(self, chunk_size='auto'):
"""
Converts to series data.
This method is equivalent to images.toblocks(size).toSeries().
Parameters
----------
chunk_size : str or tuple, size of image chunk used during conversion, default = 'auto'
String interpreted as memory size (in kilobytes, e.g. '64').
The exception is the string 'auto', which will choose a chunk size to make the
resulting blocks ~100 MB in size. Tuple of ints interpreted as 'pixels per dimension'.
Only valid in spark mode.
"""
from thunder.series.series import Series
if chunk_size is 'auto':
chunk_size = str(max([int(1e5/self.shape[0]), 1]))
n = len(self.shape) - 1
index = arange(self.shape[0])
if self.mode == 'spark':
return Series(self.values.swap((0,), tuple(range(n)), size=chunk_size), index=index)
if self.mode == 'local':
return Series(self.values.transpose(tuple(range(1, n+1)) + (0,)), index=index) | python | def toseries(self, chunk_size='auto'):
"""
Converts to series data.
This method is equivalent to images.toblocks(size).toSeries().
Parameters
----------
chunk_size : str or tuple, size of image chunk used during conversion, default = 'auto'
String interpreted as memory size (in kilobytes, e.g. '64').
The exception is the string 'auto', which will choose a chunk size to make the
resulting blocks ~100 MB in size. Tuple of ints interpreted as 'pixels per dimension'.
Only valid in spark mode.
"""
from thunder.series.series import Series
if chunk_size is 'auto':
chunk_size = str(max([int(1e5/self.shape[0]), 1]))
n = len(self.shape) - 1
index = arange(self.shape[0])
if self.mode == 'spark':
return Series(self.values.swap((0,), tuple(range(n)), size=chunk_size), index=index)
if self.mode == 'local':
return Series(self.values.transpose(tuple(range(1, n+1)) + (0,)), index=index) | [
"def",
"toseries",
"(",
"self",
",",
"chunk_size",
"=",
"'auto'",
")",
":",
"from",
"thunder",
".",
"series",
".",
"series",
"import",
"Series",
"if",
"chunk_size",
"is",
"'auto'",
":",
"chunk_size",
"=",
"str",
"(",
"max",
"(",
"[",
"int",
"(",
"1e5",
"/",
"self",
".",
"shape",
"[",
"0",
"]",
")",
",",
"1",
"]",
")",
")",
"n",
"=",
"len",
"(",
"self",
".",
"shape",
")",
"-",
"1",
"index",
"=",
"arange",
"(",
"self",
".",
"shape",
"[",
"0",
"]",
")",
"if",
"self",
".",
"mode",
"==",
"'spark'",
":",
"return",
"Series",
"(",
"self",
".",
"values",
".",
"swap",
"(",
"(",
"0",
",",
")",
",",
"tuple",
"(",
"range",
"(",
"n",
")",
")",
",",
"size",
"=",
"chunk_size",
")",
",",
"index",
"=",
"index",
")",
"if",
"self",
".",
"mode",
"==",
"'local'",
":",
"return",
"Series",
"(",
"self",
".",
"values",
".",
"transpose",
"(",
"tuple",
"(",
"range",
"(",
"1",
",",
"n",
"+",
"1",
")",
")",
"+",
"(",
"0",
",",
")",
")",
",",
"index",
"=",
"index",
")"
] | Converts to series data.
This method is equivalent to images.toblocks(size).toSeries().
Parameters
----------
chunk_size : str or tuple, size of image chunk used during conversion, default = 'auto'
String interpreted as memory size (in kilobytes, e.g. '64').
The exception is the string 'auto', which will choose a chunk size to make the
resulting blocks ~100 MB in size. Tuple of ints interpreted as 'pixels per dimension'.
Only valid in spark mode. | [
"Converts",
"to",
"series",
"data",
"."
] | 967ff8f3e7c2fabe1705743d95eb2746d4329786 | https://github.com/thunder-project/thunder/blob/967ff8f3e7c2fabe1705743d95eb2746d4329786/thunder/images/images.py#L91-L117 | train |
thunder-project/thunder | thunder/images/images.py | Images.tospark | def tospark(self, engine=None):
"""
Convert to distributed spark mode.
"""
from thunder.images.readers import fromarray
if self.mode == 'spark':
logging.getLogger('thunder').warn('images already in spark mode')
pass
if engine is None:
raise ValueError('Must provide a SparkContext')
return fromarray(self.toarray(), engine=engine) | python | def tospark(self, engine=None):
"""
Convert to distributed spark mode.
"""
from thunder.images.readers import fromarray
if self.mode == 'spark':
logging.getLogger('thunder').warn('images already in spark mode')
pass
if engine is None:
raise ValueError('Must provide a SparkContext')
return fromarray(self.toarray(), engine=engine) | [
"def",
"tospark",
"(",
"self",
",",
"engine",
"=",
"None",
")",
":",
"from",
"thunder",
".",
"images",
".",
"readers",
"import",
"fromarray",
"if",
"self",
".",
"mode",
"==",
"'spark'",
":",
"logging",
".",
"getLogger",
"(",
"'thunder'",
")",
".",
"warn",
"(",
"'images already in spark mode'",
")",
"pass",
"if",
"engine",
"is",
"None",
":",
"raise",
"ValueError",
"(",
"'Must provide a SparkContext'",
")",
"return",
"fromarray",
"(",
"self",
".",
"toarray",
"(",
")",
",",
"engine",
"=",
"engine",
")"
] | Convert to distributed spark mode. | [
"Convert",
"to",
"distributed",
"spark",
"mode",
"."
] | 967ff8f3e7c2fabe1705743d95eb2746d4329786 | https://github.com/thunder-project/thunder/blob/967ff8f3e7c2fabe1705743d95eb2746d4329786/thunder/images/images.py#L131-L144 | train |
thunder-project/thunder | thunder/images/images.py | Images.foreach | def foreach(self, func):
"""
Execute a function on each image.
Functions can have side effects. There is no return value.
"""
if self.mode == 'spark':
self.values.tordd().map(lambda kv: (kv[0][0], kv[1])).foreach(func)
else:
[func(kv) for kv in enumerate(self.values)] | python | def foreach(self, func):
"""
Execute a function on each image.
Functions can have side effects. There is no return value.
"""
if self.mode == 'spark':
self.values.tordd().map(lambda kv: (kv[0][0], kv[1])).foreach(func)
else:
[func(kv) for kv in enumerate(self.values)] | [
"def",
"foreach",
"(",
"self",
",",
"func",
")",
":",
"if",
"self",
".",
"mode",
"==",
"'spark'",
":",
"self",
".",
"values",
".",
"tordd",
"(",
")",
".",
"map",
"(",
"lambda",
"kv",
":",
"(",
"kv",
"[",
"0",
"]",
"[",
"0",
"]",
",",
"kv",
"[",
"1",
"]",
")",
")",
".",
"foreach",
"(",
"func",
")",
"else",
":",
"[",
"func",
"(",
"kv",
")",
"for",
"kv",
"in",
"enumerate",
"(",
"self",
".",
"values",
")",
"]"
] | Execute a function on each image.
Functions can have side effects. There is no return value. | [
"Execute",
"a",
"function",
"on",
"each",
"image",
"."
] | 967ff8f3e7c2fabe1705743d95eb2746d4329786 | https://github.com/thunder-project/thunder/blob/967ff8f3e7c2fabe1705743d95eb2746d4329786/thunder/images/images.py#L146-L155 | train |
thunder-project/thunder | thunder/images/images.py | Images.sample | def sample(self, nsamples=100, seed=None):
"""
Extract a random sample of images.
Parameters
----------
nsamples : int, optional, default = 100
The number of data points to sample.
seed : int, optional, default = None
Random seed.
"""
if nsamples < 1:
raise ValueError("Number of samples must be larger than 0, got '%g'" % nsamples)
if seed is None:
seed = random.randint(0, 2 ** 32)
if self.mode == 'spark':
result = asarray(self.values.tordd().values().takeSample(False, nsamples, seed))
else:
inds = [int(k) for k in random.rand(nsamples) * self.shape[0]]
result = asarray([self.values[i] for i in inds])
return self._constructor(result) | python | def sample(self, nsamples=100, seed=None):
"""
Extract a random sample of images.
Parameters
----------
nsamples : int, optional, default = 100
The number of data points to sample.
seed : int, optional, default = None
Random seed.
"""
if nsamples < 1:
raise ValueError("Number of samples must be larger than 0, got '%g'" % nsamples)
if seed is None:
seed = random.randint(0, 2 ** 32)
if self.mode == 'spark':
result = asarray(self.values.tordd().values().takeSample(False, nsamples, seed))
else:
inds = [int(k) for k in random.rand(nsamples) * self.shape[0]]
result = asarray([self.values[i] for i in inds])
return self._constructor(result) | [
"def",
"sample",
"(",
"self",
",",
"nsamples",
"=",
"100",
",",
"seed",
"=",
"None",
")",
":",
"if",
"nsamples",
"<",
"1",
":",
"raise",
"ValueError",
"(",
"\"Number of samples must be larger than 0, got '%g'\"",
"%",
"nsamples",
")",
"if",
"seed",
"is",
"None",
":",
"seed",
"=",
"random",
".",
"randint",
"(",
"0",
",",
"2",
"**",
"32",
")",
"if",
"self",
".",
"mode",
"==",
"'spark'",
":",
"result",
"=",
"asarray",
"(",
"self",
".",
"values",
".",
"tordd",
"(",
")",
".",
"values",
"(",
")",
".",
"takeSample",
"(",
"False",
",",
"nsamples",
",",
"seed",
")",
")",
"else",
":",
"inds",
"=",
"[",
"int",
"(",
"k",
")",
"for",
"k",
"in",
"random",
".",
"rand",
"(",
"nsamples",
")",
"*",
"self",
".",
"shape",
"[",
"0",
"]",
"]",
"result",
"=",
"asarray",
"(",
"[",
"self",
".",
"values",
"[",
"i",
"]",
"for",
"i",
"in",
"inds",
"]",
")",
"return",
"self",
".",
"_constructor",
"(",
"result",
")"
] | Extract a random sample of images.
Parameters
----------
nsamples : int, optional, default = 100
The number of data points to sample.
seed : int, optional, default = None
Random seed. | [
"Extract",
"a",
"random",
"sample",
"of",
"images",
"."
] | 967ff8f3e7c2fabe1705743d95eb2746d4329786 | https://github.com/thunder-project/thunder/blob/967ff8f3e7c2fabe1705743d95eb2746d4329786/thunder/images/images.py#L157-L182 | train |
thunder-project/thunder | thunder/images/images.py | Images.var | def var(self):
"""
Compute the variance across images.
"""
return self._constructor(self.values.var(axis=0, keepdims=True)) | python | def var(self):
"""
Compute the variance across images.
"""
return self._constructor(self.values.var(axis=0, keepdims=True)) | [
"def",
"var",
"(",
"self",
")",
":",
"return",
"self",
".",
"_constructor",
"(",
"self",
".",
"values",
".",
"var",
"(",
"axis",
"=",
"0",
",",
"keepdims",
"=",
"True",
")",
")"
] | Compute the variance across images. | [
"Compute",
"the",
"variance",
"across",
"images",
"."
] | 967ff8f3e7c2fabe1705743d95eb2746d4329786 | https://github.com/thunder-project/thunder/blob/967ff8f3e7c2fabe1705743d95eb2746d4329786/thunder/images/images.py#L201-L205 | train |
thunder-project/thunder | thunder/images/images.py | Images.std | def std(self):
"""
Compute the standard deviation across images.
"""
return self._constructor(self.values.std(axis=0, keepdims=True)) | python | def std(self):
"""
Compute the standard deviation across images.
"""
return self._constructor(self.values.std(axis=0, keepdims=True)) | [
"def",
"std",
"(",
"self",
")",
":",
"return",
"self",
".",
"_constructor",
"(",
"self",
".",
"values",
".",
"std",
"(",
"axis",
"=",
"0",
",",
"keepdims",
"=",
"True",
")",
")"
] | Compute the standard deviation across images. | [
"Compute",
"the",
"standard",
"deviation",
"across",
"images",
"."
] | 967ff8f3e7c2fabe1705743d95eb2746d4329786 | https://github.com/thunder-project/thunder/blob/967ff8f3e7c2fabe1705743d95eb2746d4329786/thunder/images/images.py#L207-L211 | train |
thunder-project/thunder | thunder/images/images.py | Images.squeeze | def squeeze(self):
"""
Remove single-dimensional axes from images.
"""
axis = tuple(range(1, len(self.shape) - 1)) if prod(self.shape[1:]) == 1 else None
return self.map(lambda x: x.squeeze(axis=axis)) | python | def squeeze(self):
"""
Remove single-dimensional axes from images.
"""
axis = tuple(range(1, len(self.shape) - 1)) if prod(self.shape[1:]) == 1 else None
return self.map(lambda x: x.squeeze(axis=axis)) | [
"def",
"squeeze",
"(",
"self",
")",
":",
"axis",
"=",
"tuple",
"(",
"range",
"(",
"1",
",",
"len",
"(",
"self",
".",
"shape",
")",
"-",
"1",
")",
")",
"if",
"prod",
"(",
"self",
".",
"shape",
"[",
"1",
":",
"]",
")",
"==",
"1",
"else",
"None",
"return",
"self",
".",
"map",
"(",
"lambda",
"x",
":",
"x",
".",
"squeeze",
"(",
"axis",
"=",
"axis",
")",
")"
] | Remove single-dimensional axes from images. | [
"Remove",
"single",
"-",
"dimensional",
"axes",
"from",
"images",
"."
] | 967ff8f3e7c2fabe1705743d95eb2746d4329786 | https://github.com/thunder-project/thunder/blob/967ff8f3e7c2fabe1705743d95eb2746d4329786/thunder/images/images.py#L231-L236 | train |
thunder-project/thunder | thunder/images/images.py | Images.max_projection | def max_projection(self, axis=2):
"""
Compute maximum projections of images along a dimension.
Parameters
----------
axis : int, optional, default = 2
Which axis to compute projection along.
"""
if axis >= size(self.value_shape):
raise Exception('Axis for projection (%s) exceeds '
'image dimensions (%s-%s)' % (axis, 0, size(self.value_shape)-1))
new_value_shape = list(self.value_shape)
del new_value_shape[axis]
return self.map(lambda x: amax(x, axis), value_shape=new_value_shape) | python | def max_projection(self, axis=2):
"""
Compute maximum projections of images along a dimension.
Parameters
----------
axis : int, optional, default = 2
Which axis to compute projection along.
"""
if axis >= size(self.value_shape):
raise Exception('Axis for projection (%s) exceeds '
'image dimensions (%s-%s)' % (axis, 0, size(self.value_shape)-1))
new_value_shape = list(self.value_shape)
del new_value_shape[axis]
return self.map(lambda x: amax(x, axis), value_shape=new_value_shape) | [
"def",
"max_projection",
"(",
"self",
",",
"axis",
"=",
"2",
")",
":",
"if",
"axis",
">=",
"size",
"(",
"self",
".",
"value_shape",
")",
":",
"raise",
"Exception",
"(",
"'Axis for projection (%s) exceeds '",
"'image dimensions (%s-%s)'",
"%",
"(",
"axis",
",",
"0",
",",
"size",
"(",
"self",
".",
"value_shape",
")",
"-",
"1",
")",
")",
"new_value_shape",
"=",
"list",
"(",
"self",
".",
"value_shape",
")",
"del",
"new_value_shape",
"[",
"axis",
"]",
"return",
"self",
".",
"map",
"(",
"lambda",
"x",
":",
"amax",
"(",
"x",
",",
"axis",
")",
",",
"value_shape",
"=",
"new_value_shape",
")"
] | Compute maximum projections of images along a dimension.
Parameters
----------
axis : int, optional, default = 2
Which axis to compute projection along. | [
"Compute",
"maximum",
"projections",
"of",
"images",
"along",
"a",
"dimension",
"."
] | 967ff8f3e7c2fabe1705743d95eb2746d4329786 | https://github.com/thunder-project/thunder/blob/967ff8f3e7c2fabe1705743d95eb2746d4329786/thunder/images/images.py#L258-L273 | train |
thunder-project/thunder | thunder/images/images.py | Images.max_min_projection | def max_min_projection(self, axis=2):
"""
Compute maximum-minimum projection along a dimension.
This computes the sum of the maximum and minimum values.
Parameters
----------
axis : int, optional, default = 2
Which axis to compute projection along.
"""
if axis >= size(self.value_shape):
raise Exception('Axis for projection (%s) exceeds '
'image dimensions (%s-%s)' % (axis, 0, size(self.value_shape)-1))
new_value_shape = list(self.value_shape)
del new_value_shape[axis]
return self.map(lambda x: amax(x, axis) + amin(x, axis), value_shape=new_value_shape) | python | def max_min_projection(self, axis=2):
"""
Compute maximum-minimum projection along a dimension.
This computes the sum of the maximum and minimum values.
Parameters
----------
axis : int, optional, default = 2
Which axis to compute projection along.
"""
if axis >= size(self.value_shape):
raise Exception('Axis for projection (%s) exceeds '
'image dimensions (%s-%s)' % (axis, 0, size(self.value_shape)-1))
new_value_shape = list(self.value_shape)
del new_value_shape[axis]
return self.map(lambda x: amax(x, axis) + amin(x, axis), value_shape=new_value_shape) | [
"def",
"max_min_projection",
"(",
"self",
",",
"axis",
"=",
"2",
")",
":",
"if",
"axis",
">=",
"size",
"(",
"self",
".",
"value_shape",
")",
":",
"raise",
"Exception",
"(",
"'Axis for projection (%s) exceeds '",
"'image dimensions (%s-%s)'",
"%",
"(",
"axis",
",",
"0",
",",
"size",
"(",
"self",
".",
"value_shape",
")",
"-",
"1",
")",
")",
"new_value_shape",
"=",
"list",
"(",
"self",
".",
"value_shape",
")",
"del",
"new_value_shape",
"[",
"axis",
"]",
"return",
"self",
".",
"map",
"(",
"lambda",
"x",
":",
"amax",
"(",
"x",
",",
"axis",
")",
"+",
"amin",
"(",
"x",
",",
"axis",
")",
",",
"value_shape",
"=",
"new_value_shape",
")"
] | Compute maximum-minimum projection along a dimension.
This computes the sum of the maximum and minimum values.
Parameters
----------
axis : int, optional, default = 2
Which axis to compute projection along. | [
"Compute",
"maximum",
"-",
"minimum",
"projection",
"along",
"a",
"dimension",
"."
] | 967ff8f3e7c2fabe1705743d95eb2746d4329786 | https://github.com/thunder-project/thunder/blob/967ff8f3e7c2fabe1705743d95eb2746d4329786/thunder/images/images.py#L275-L292 | train |
thunder-project/thunder | thunder/images/images.py | Images.subsample | def subsample(self, factor):
"""
Downsample images by an integer factor.
Parameters
----------
factor : positive int or tuple of positive ints
Stride to use in subsampling. If a single int is passed,
each dimension of the image will be downsampled by this factor.
If a tuple is passed, each dimension will be downsampled by the given factor.
"""
value_shape = self.value_shape
ndims = len(value_shape)
if not hasattr(factor, '__len__'):
factor = [factor] * ndims
factor = [int(sf) for sf in factor]
if any((sf <= 0 for sf in factor)):
raise ValueError('All sampling factors must be positive; got ' + str(factor))
def roundup(a, b):
return (a + b - 1) // b
slices = [slice(0, value_shape[i], factor[i]) for i in range(ndims)]
new_value_shape = tuple([roundup(value_shape[i], factor[i]) for i in range(ndims)])
return self.map(lambda v: v[slices], value_shape=new_value_shape) | python | def subsample(self, factor):
"""
Downsample images by an integer factor.
Parameters
----------
factor : positive int or tuple of positive ints
Stride to use in subsampling. If a single int is passed,
each dimension of the image will be downsampled by this factor.
If a tuple is passed, each dimension will be downsampled by the given factor.
"""
value_shape = self.value_shape
ndims = len(value_shape)
if not hasattr(factor, '__len__'):
factor = [factor] * ndims
factor = [int(sf) for sf in factor]
if any((sf <= 0 for sf in factor)):
raise ValueError('All sampling factors must be positive; got ' + str(factor))
def roundup(a, b):
return (a + b - 1) // b
slices = [slice(0, value_shape[i], factor[i]) for i in range(ndims)]
new_value_shape = tuple([roundup(value_shape[i], factor[i]) for i in range(ndims)])
return self.map(lambda v: v[slices], value_shape=new_value_shape) | [
"def",
"subsample",
"(",
"self",
",",
"factor",
")",
":",
"value_shape",
"=",
"self",
".",
"value_shape",
"ndims",
"=",
"len",
"(",
"value_shape",
")",
"if",
"not",
"hasattr",
"(",
"factor",
",",
"'__len__'",
")",
":",
"factor",
"=",
"[",
"factor",
"]",
"*",
"ndims",
"factor",
"=",
"[",
"int",
"(",
"sf",
")",
"for",
"sf",
"in",
"factor",
"]",
"if",
"any",
"(",
"(",
"sf",
"<=",
"0",
"for",
"sf",
"in",
"factor",
")",
")",
":",
"raise",
"ValueError",
"(",
"'All sampling factors must be positive; got '",
"+",
"str",
"(",
"factor",
")",
")",
"def",
"roundup",
"(",
"a",
",",
"b",
")",
":",
"return",
"(",
"a",
"+",
"b",
"-",
"1",
")",
"//",
"b",
"slices",
"=",
"[",
"slice",
"(",
"0",
",",
"value_shape",
"[",
"i",
"]",
",",
"factor",
"[",
"i",
"]",
")",
"for",
"i",
"in",
"range",
"(",
"ndims",
")",
"]",
"new_value_shape",
"=",
"tuple",
"(",
"[",
"roundup",
"(",
"value_shape",
"[",
"i",
"]",
",",
"factor",
"[",
"i",
"]",
")",
"for",
"i",
"in",
"range",
"(",
"ndims",
")",
"]",
")",
"return",
"self",
".",
"map",
"(",
"lambda",
"v",
":",
"v",
"[",
"slices",
"]",
",",
"value_shape",
"=",
"new_value_shape",
")"
] | Downsample images by an integer factor.
Parameters
----------
factor : positive int or tuple of positive ints
Stride to use in subsampling. If a single int is passed,
each dimension of the image will be downsampled by this factor.
If a tuple is passed, each dimension will be downsampled by the given factor. | [
"Downsample",
"images",
"by",
"an",
"integer",
"factor",
"."
] | 967ff8f3e7c2fabe1705743d95eb2746d4329786 | https://github.com/thunder-project/thunder/blob/967ff8f3e7c2fabe1705743d95eb2746d4329786/thunder/images/images.py#L294-L320 | train |
thunder-project/thunder | thunder/images/images.py | Images.gaussian_filter | def gaussian_filter(self, sigma=2, order=0):
"""
Spatially smooth images with a gaussian filter.
Filtering will be applied to every image in the collection.
Parameters
----------
sigma : scalar or sequence of scalars, default = 2
Size of the filter size as standard deviation in pixels.
A sequence is interpreted as the standard deviation for each axis.
A single scalar is applied equally to all axes.
order : choice of 0 / 1 / 2 / 3 or sequence from same set, optional, default = 0
Order of the gaussian kernel, 0 is a gaussian,
higher numbers correspond to derivatives of a gaussian.
"""
from scipy.ndimage.filters import gaussian_filter
return self.map(lambda v: gaussian_filter(v, sigma, order), value_shape=self.value_shape) | python | def gaussian_filter(self, sigma=2, order=0):
"""
Spatially smooth images with a gaussian filter.
Filtering will be applied to every image in the collection.
Parameters
----------
sigma : scalar or sequence of scalars, default = 2
Size of the filter size as standard deviation in pixels.
A sequence is interpreted as the standard deviation for each axis.
A single scalar is applied equally to all axes.
order : choice of 0 / 1 / 2 / 3 or sequence from same set, optional, default = 0
Order of the gaussian kernel, 0 is a gaussian,
higher numbers correspond to derivatives of a gaussian.
"""
from scipy.ndimage.filters import gaussian_filter
return self.map(lambda v: gaussian_filter(v, sigma, order), value_shape=self.value_shape) | [
"def",
"gaussian_filter",
"(",
"self",
",",
"sigma",
"=",
"2",
",",
"order",
"=",
"0",
")",
":",
"from",
"scipy",
".",
"ndimage",
".",
"filters",
"import",
"gaussian_filter",
"return",
"self",
".",
"map",
"(",
"lambda",
"v",
":",
"gaussian_filter",
"(",
"v",
",",
"sigma",
",",
"order",
")",
",",
"value_shape",
"=",
"self",
".",
"value_shape",
")"
] | Spatially smooth images with a gaussian filter.
Filtering will be applied to every image in the collection.
Parameters
----------
sigma : scalar or sequence of scalars, default = 2
Size of the filter size as standard deviation in pixels.
A sequence is interpreted as the standard deviation for each axis.
A single scalar is applied equally to all axes.
order : choice of 0 / 1 / 2 / 3 or sequence from same set, optional, default = 0
Order of the gaussian kernel, 0 is a gaussian,
higher numbers correspond to derivatives of a gaussian. | [
"Spatially",
"smooth",
"images",
"with",
"a",
"gaussian",
"filter",
"."
] | 967ff8f3e7c2fabe1705743d95eb2746d4329786 | https://github.com/thunder-project/thunder/blob/967ff8f3e7c2fabe1705743d95eb2746d4329786/thunder/images/images.py#L322-L341 | train |
thunder-project/thunder | thunder/images/images.py | Images._image_filter | def _image_filter(self, filter=None, size=2):
"""
Generic function for maping a filtering operation over images.
Parameters
----------
filter : string
Which filter to use.
size : int or tuple
Size parameter for filter.
"""
from numpy import isscalar
from scipy.ndimage.filters import median_filter, uniform_filter
FILTERS = {
'median': median_filter,
'uniform': uniform_filter
}
func = FILTERS[filter]
mode = self.mode
value_shape = self.value_shape
ndims = len(value_shape)
if ndims == 3 and isscalar(size) == 1:
size = [size, size, size]
if ndims == 3 and size[2] == 0:
def filter_(im):
if mode == 'spark':
im.setflags(write=True)
else:
im = im.copy()
for z in arange(0, value_shape[2]):
im[:, :, z] = func(im[:, :, z], size[0:2])
return im
else:
filter_ = lambda x: func(x, size)
return self.map(lambda v: filter_(v), value_shape=self.value_shape) | python | def _image_filter(self, filter=None, size=2):
"""
Generic function for maping a filtering operation over images.
Parameters
----------
filter : string
Which filter to use.
size : int or tuple
Size parameter for filter.
"""
from numpy import isscalar
from scipy.ndimage.filters import median_filter, uniform_filter
FILTERS = {
'median': median_filter,
'uniform': uniform_filter
}
func = FILTERS[filter]
mode = self.mode
value_shape = self.value_shape
ndims = len(value_shape)
if ndims == 3 and isscalar(size) == 1:
size = [size, size, size]
if ndims == 3 and size[2] == 0:
def filter_(im):
if mode == 'spark':
im.setflags(write=True)
else:
im = im.copy()
for z in arange(0, value_shape[2]):
im[:, :, z] = func(im[:, :, z], size[0:2])
return im
else:
filter_ = lambda x: func(x, size)
return self.map(lambda v: filter_(v), value_shape=self.value_shape) | [
"def",
"_image_filter",
"(",
"self",
",",
"filter",
"=",
"None",
",",
"size",
"=",
"2",
")",
":",
"from",
"numpy",
"import",
"isscalar",
"from",
"scipy",
".",
"ndimage",
".",
"filters",
"import",
"median_filter",
",",
"uniform_filter",
"FILTERS",
"=",
"{",
"'median'",
":",
"median_filter",
",",
"'uniform'",
":",
"uniform_filter",
"}",
"func",
"=",
"FILTERS",
"[",
"filter",
"]",
"mode",
"=",
"self",
".",
"mode",
"value_shape",
"=",
"self",
".",
"value_shape",
"ndims",
"=",
"len",
"(",
"value_shape",
")",
"if",
"ndims",
"==",
"3",
"and",
"isscalar",
"(",
"size",
")",
"==",
"1",
":",
"size",
"=",
"[",
"size",
",",
"size",
",",
"size",
"]",
"if",
"ndims",
"==",
"3",
"and",
"size",
"[",
"2",
"]",
"==",
"0",
":",
"def",
"filter_",
"(",
"im",
")",
":",
"if",
"mode",
"==",
"'spark'",
":",
"im",
".",
"setflags",
"(",
"write",
"=",
"True",
")",
"else",
":",
"im",
"=",
"im",
".",
"copy",
"(",
")",
"for",
"z",
"in",
"arange",
"(",
"0",
",",
"value_shape",
"[",
"2",
"]",
")",
":",
"im",
"[",
":",
",",
":",
",",
"z",
"]",
"=",
"func",
"(",
"im",
"[",
":",
",",
":",
",",
"z",
"]",
",",
"size",
"[",
"0",
":",
"2",
"]",
")",
"return",
"im",
"else",
":",
"filter_",
"=",
"lambda",
"x",
":",
"func",
"(",
"x",
",",
"size",
")",
"return",
"self",
".",
"map",
"(",
"lambda",
"v",
":",
"filter_",
"(",
"v",
")",
",",
"value_shape",
"=",
"self",
".",
"value_shape",
")"
] | Generic function for maping a filtering operation over images.
Parameters
----------
filter : string
Which filter to use.
size : int or tuple
Size parameter for filter. | [
"Generic",
"function",
"for",
"maping",
"a",
"filtering",
"operation",
"over",
"images",
"."
] | 967ff8f3e7c2fabe1705743d95eb2746d4329786 | https://github.com/thunder-project/thunder/blob/967ff8f3e7c2fabe1705743d95eb2746d4329786/thunder/images/images.py#L373-L414 | train |
thunder-project/thunder | thunder/images/images.py | Images.localcorr | def localcorr(self, size=2):
"""
Correlate every pixel in an image sequence to the average of its local neighborhood.
This algorithm computes, for every pixel, the correlation coefficient
between the sequence of values for that pixel, and the average of all pixels
in a local neighborhood. It does this by blurring the image(s) with a uniform filter,
and then correlates the original sequence with the blurred sequence.
Parameters
----------
size : int or tuple, optional, default = 2
Size of the filter in pixels. If a scalar, will use the same filter size
along each dimension.
"""
from thunder.images.readers import fromarray, fromrdd
from numpy import corrcoef, concatenate
nimages = self.shape[0]
# spatially average the original image set over the specified neighborhood
blurred = self.uniform_filter(size)
# union the averaged images with the originals to create an
# Images object containing 2N images (where N is the original number of images),
# ordered such that the first N images are the averaged ones.
if self.mode == 'spark':
combined = self.values.concatenate(blurred.values)
combined_images = fromrdd(combined.tordd())
else:
combined = concatenate((self.values, blurred.values), axis=0)
combined_images = fromarray(combined)
# correlate the first N (averaged) records with the last N (original) records
series = combined_images.toseries()
corr = series.map(lambda x: corrcoef(x[:nimages], x[nimages:])[0, 1])
return corr.toarray() | python | def localcorr(self, size=2):
"""
Correlate every pixel in an image sequence to the average of its local neighborhood.
This algorithm computes, for every pixel, the correlation coefficient
between the sequence of values for that pixel, and the average of all pixels
in a local neighborhood. It does this by blurring the image(s) with a uniform filter,
and then correlates the original sequence with the blurred sequence.
Parameters
----------
size : int or tuple, optional, default = 2
Size of the filter in pixels. If a scalar, will use the same filter size
along each dimension.
"""
from thunder.images.readers import fromarray, fromrdd
from numpy import corrcoef, concatenate
nimages = self.shape[0]
# spatially average the original image set over the specified neighborhood
blurred = self.uniform_filter(size)
# union the averaged images with the originals to create an
# Images object containing 2N images (where N is the original number of images),
# ordered such that the first N images are the averaged ones.
if self.mode == 'spark':
combined = self.values.concatenate(blurred.values)
combined_images = fromrdd(combined.tordd())
else:
combined = concatenate((self.values, blurred.values), axis=0)
combined_images = fromarray(combined)
# correlate the first N (averaged) records with the last N (original) records
series = combined_images.toseries()
corr = series.map(lambda x: corrcoef(x[:nimages], x[nimages:])[0, 1])
return corr.toarray() | [
"def",
"localcorr",
"(",
"self",
",",
"size",
"=",
"2",
")",
":",
"from",
"thunder",
".",
"images",
".",
"readers",
"import",
"fromarray",
",",
"fromrdd",
"from",
"numpy",
"import",
"corrcoef",
",",
"concatenate",
"nimages",
"=",
"self",
".",
"shape",
"[",
"0",
"]",
"# spatially average the original image set over the specified neighborhood",
"blurred",
"=",
"self",
".",
"uniform_filter",
"(",
"size",
")",
"# union the averaged images with the originals to create an",
"# Images object containing 2N images (where N is the original number of images),",
"# ordered such that the first N images are the averaged ones.",
"if",
"self",
".",
"mode",
"==",
"'spark'",
":",
"combined",
"=",
"self",
".",
"values",
".",
"concatenate",
"(",
"blurred",
".",
"values",
")",
"combined_images",
"=",
"fromrdd",
"(",
"combined",
".",
"tordd",
"(",
")",
")",
"else",
":",
"combined",
"=",
"concatenate",
"(",
"(",
"self",
".",
"values",
",",
"blurred",
".",
"values",
")",
",",
"axis",
"=",
"0",
")",
"combined_images",
"=",
"fromarray",
"(",
"combined",
")",
"# correlate the first N (averaged) records with the last N (original) records",
"series",
"=",
"combined_images",
".",
"toseries",
"(",
")",
"corr",
"=",
"series",
".",
"map",
"(",
"lambda",
"x",
":",
"corrcoef",
"(",
"x",
"[",
":",
"nimages",
"]",
",",
"x",
"[",
"nimages",
":",
"]",
")",
"[",
"0",
",",
"1",
"]",
")",
"return",
"corr",
".",
"toarray",
"(",
")"
] | Correlate every pixel in an image sequence to the average of its local neighborhood.
This algorithm computes, for every pixel, the correlation coefficient
between the sequence of values for that pixel, and the average of all pixels
in a local neighborhood. It does this by blurring the image(s) with a uniform filter,
and then correlates the original sequence with the blurred sequence.
Parameters
----------
size : int or tuple, optional, default = 2
Size of the filter in pixels. If a scalar, will use the same filter size
along each dimension. | [
"Correlate",
"every",
"pixel",
"in",
"an",
"image",
"sequence",
"to",
"the",
"average",
"of",
"its",
"local",
"neighborhood",
"."
] | 967ff8f3e7c2fabe1705743d95eb2746d4329786 | https://github.com/thunder-project/thunder/blob/967ff8f3e7c2fabe1705743d95eb2746d4329786/thunder/images/images.py#L416-L454 | train |
thunder-project/thunder | thunder/images/images.py | Images.subtract | def subtract(self, val):
"""
Subtract a constant value or an image from all images.
Parameters
----------
val : int, float, or ndarray
Value to subtract.
"""
if isinstance(val, ndarray):
if val.shape != self.value_shape:
raise Exception('Cannot subtract image with dimensions %s '
'from images with dimension %s' % (str(val.shape), str(self.value_shape)))
return self.map(lambda x: x - val, value_shape=self.value_shape) | python | def subtract(self, val):
"""
Subtract a constant value or an image from all images.
Parameters
----------
val : int, float, or ndarray
Value to subtract.
"""
if isinstance(val, ndarray):
if val.shape != self.value_shape:
raise Exception('Cannot subtract image with dimensions %s '
'from images with dimension %s' % (str(val.shape), str(self.value_shape)))
return self.map(lambda x: x - val, value_shape=self.value_shape) | [
"def",
"subtract",
"(",
"self",
",",
"val",
")",
":",
"if",
"isinstance",
"(",
"val",
",",
"ndarray",
")",
":",
"if",
"val",
".",
"shape",
"!=",
"self",
".",
"value_shape",
":",
"raise",
"Exception",
"(",
"'Cannot subtract image with dimensions %s '",
"'from images with dimension %s'",
"%",
"(",
"str",
"(",
"val",
".",
"shape",
")",
",",
"str",
"(",
"self",
".",
"value_shape",
")",
")",
")",
"return",
"self",
".",
"map",
"(",
"lambda",
"x",
":",
"x",
"-",
"val",
",",
"value_shape",
"=",
"self",
".",
"value_shape",
")"
] | Subtract a constant value or an image from all images.
Parameters
----------
val : int, float, or ndarray
Value to subtract. | [
"Subtract",
"a",
"constant",
"value",
"or",
"an",
"image",
"from",
"all",
"images",
"."
] | 967ff8f3e7c2fabe1705743d95eb2746d4329786 | https://github.com/thunder-project/thunder/blob/967ff8f3e7c2fabe1705743d95eb2746d4329786/thunder/images/images.py#L456-L470 | train |
thunder-project/thunder | thunder/images/images.py | Images.topng | def topng(self, path, prefix='image', overwrite=False):
"""
Write 2d images as PNG files.
Files will be written into a newly-created directory.
Three-dimensional data will be treated as RGB channels.
Parameters
----------
path : string
Path to output directory, must be one level below an existing directory.
prefix : string
String to prepend to filenames.
overwrite : bool
If true, the directory given by path will first be deleted if it exists.
"""
from thunder.images.writers import topng
# TODO add back colormap and vmin/vmax
topng(self, path, prefix=prefix, overwrite=overwrite) | python | def topng(self, path, prefix='image', overwrite=False):
"""
Write 2d images as PNG files.
Files will be written into a newly-created directory.
Three-dimensional data will be treated as RGB channels.
Parameters
----------
path : string
Path to output directory, must be one level below an existing directory.
prefix : string
String to prepend to filenames.
overwrite : bool
If true, the directory given by path will first be deleted if it exists.
"""
from thunder.images.writers import topng
# TODO add back colormap and vmin/vmax
topng(self, path, prefix=prefix, overwrite=overwrite) | [
"def",
"topng",
"(",
"self",
",",
"path",
",",
"prefix",
"=",
"'image'",
",",
"overwrite",
"=",
"False",
")",
":",
"from",
"thunder",
".",
"images",
".",
"writers",
"import",
"topng",
"# TODO add back colormap and vmin/vmax",
"topng",
"(",
"self",
",",
"path",
",",
"prefix",
"=",
"prefix",
",",
"overwrite",
"=",
"overwrite",
")"
] | Write 2d images as PNG files.
Files will be written into a newly-created directory.
Three-dimensional data will be treated as RGB channels.
Parameters
----------
path : string
Path to output directory, must be one level below an existing directory.
prefix : string
String to prepend to filenames.
overwrite : bool
If true, the directory given by path will first be deleted if it exists. | [
"Write",
"2d",
"images",
"as",
"PNG",
"files",
"."
] | 967ff8f3e7c2fabe1705743d95eb2746d4329786 | https://github.com/thunder-project/thunder/blob/967ff8f3e7c2fabe1705743d95eb2746d4329786/thunder/images/images.py#L472-L492 | train |
thunder-project/thunder | thunder/images/images.py | Images.map_as_series | def map_as_series(self, func, value_size=None, dtype=None, chunk_size='auto'):
"""
Efficiently apply a function to images as series data.
For images data that represent image sequences, this method
applies a function to each pixel's series, and then returns to
the images format, using an efficient intermediate block
representation.
Parameters
----------
func : function
Function to apply to each time series. Should take one-dimensional
ndarray and return the transformed one-dimensional ndarray.
value_size : int, optional, default = None
Size of the one-dimensional ndarray resulting from application of
func. If not supplied, will be automatically inferred for an extra
computational cost.
dtype : str, optional, default = None
dtype of one-dimensional ndarray resulting from application of func.
If not supplied it will be automatically inferred for an extra computational cost.
chunk_size : str or tuple, size of image chunk used during conversion, default = 'auto'
String interpreted as memory size (in kilobytes, e.g. '64').
The exception is the string 'auto'. In spark mode, 'auto' will choose a chunk size to make the
resulting blocks ~100 MB in size. In local mode, 'auto' will create a single block.
Tuple of ints interpreted as 'pixels per dimension'.
"""
blocks = self.toblocks(chunk_size=chunk_size)
if value_size is not None:
dims = list(blocks.blockshape)
dims[0] = value_size
else:
dims = None
def f(block):
return apply_along_axis(func, 0, block)
return blocks.map(f, value_shape=dims, dtype=dtype).toimages() | python | def map_as_series(self, func, value_size=None, dtype=None, chunk_size='auto'):
"""
Efficiently apply a function to images as series data.
For images data that represent image sequences, this method
applies a function to each pixel's series, and then returns to
the images format, using an efficient intermediate block
representation.
Parameters
----------
func : function
Function to apply to each time series. Should take one-dimensional
ndarray and return the transformed one-dimensional ndarray.
value_size : int, optional, default = None
Size of the one-dimensional ndarray resulting from application of
func. If not supplied, will be automatically inferred for an extra
computational cost.
dtype : str, optional, default = None
dtype of one-dimensional ndarray resulting from application of func.
If not supplied it will be automatically inferred for an extra computational cost.
chunk_size : str or tuple, size of image chunk used during conversion, default = 'auto'
String interpreted as memory size (in kilobytes, e.g. '64').
The exception is the string 'auto'. In spark mode, 'auto' will choose a chunk size to make the
resulting blocks ~100 MB in size. In local mode, 'auto' will create a single block.
Tuple of ints interpreted as 'pixels per dimension'.
"""
blocks = self.toblocks(chunk_size=chunk_size)
if value_size is not None:
dims = list(blocks.blockshape)
dims[0] = value_size
else:
dims = None
def f(block):
return apply_along_axis(func, 0, block)
return blocks.map(f, value_shape=dims, dtype=dtype).toimages() | [
"def",
"map_as_series",
"(",
"self",
",",
"func",
",",
"value_size",
"=",
"None",
",",
"dtype",
"=",
"None",
",",
"chunk_size",
"=",
"'auto'",
")",
":",
"blocks",
"=",
"self",
".",
"toblocks",
"(",
"chunk_size",
"=",
"chunk_size",
")",
"if",
"value_size",
"is",
"not",
"None",
":",
"dims",
"=",
"list",
"(",
"blocks",
".",
"blockshape",
")",
"dims",
"[",
"0",
"]",
"=",
"value_size",
"else",
":",
"dims",
"=",
"None",
"def",
"f",
"(",
"block",
")",
":",
"return",
"apply_along_axis",
"(",
"func",
",",
"0",
",",
"block",
")",
"return",
"blocks",
".",
"map",
"(",
"f",
",",
"value_shape",
"=",
"dims",
",",
"dtype",
"=",
"dtype",
")",
".",
"toimages",
"(",
")"
] | Efficiently apply a function to images as series data.
For images data that represent image sequences, this method
applies a function to each pixel's series, and then returns to
the images format, using an efficient intermediate block
representation.
Parameters
----------
func : function
Function to apply to each time series. Should take one-dimensional
ndarray and return the transformed one-dimensional ndarray.
value_size : int, optional, default = None
Size of the one-dimensional ndarray resulting from application of
func. If not supplied, will be automatically inferred for an extra
computational cost.
dtype : str, optional, default = None
dtype of one-dimensional ndarray resulting from application of func.
If not supplied it will be automatically inferred for an extra computational cost.
chunk_size : str or tuple, size of image chunk used during conversion, default = 'auto'
String interpreted as memory size (in kilobytes, e.g. '64').
The exception is the string 'auto'. In spark mode, 'auto' will choose a chunk size to make the
resulting blocks ~100 MB in size. In local mode, 'auto' will create a single block.
Tuple of ints interpreted as 'pixels per dimension'. | [
"Efficiently",
"apply",
"a",
"function",
"to",
"images",
"as",
"series",
"data",
"."
] | 967ff8f3e7c2fabe1705743d95eb2746d4329786 | https://github.com/thunder-project/thunder/blob/967ff8f3e7c2fabe1705743d95eb2746d4329786/thunder/images/images.py#L536-L577 | train |
thunder-project/thunder | thunder/blocks/blocks.py | Blocks.count | def count(self):
"""
Explicit count of the number of items.
For lazy or distributed data, will force a computation.
"""
if self.mode == 'spark':
return self.tordd().count()
if self.mode == 'local':
return prod(self.values.values.shape) | python | def count(self):
"""
Explicit count of the number of items.
For lazy or distributed data, will force a computation.
"""
if self.mode == 'spark':
return self.tordd().count()
if self.mode == 'local':
return prod(self.values.values.shape) | [
"def",
"count",
"(",
"self",
")",
":",
"if",
"self",
".",
"mode",
"==",
"'spark'",
":",
"return",
"self",
".",
"tordd",
"(",
")",
".",
"count",
"(",
")",
"if",
"self",
".",
"mode",
"==",
"'local'",
":",
"return",
"prod",
"(",
"self",
".",
"values",
".",
"values",
".",
"shape",
")"
] | Explicit count of the number of items.
For lazy or distributed data, will force a computation. | [
"Explicit",
"count",
"of",
"the",
"number",
"of",
"items",
"."
] | 967ff8f3e7c2fabe1705743d95eb2746d4329786 | https://github.com/thunder-project/thunder/blob/967ff8f3e7c2fabe1705743d95eb2746d4329786/thunder/blocks/blocks.py#L30-L40 | train |
thunder-project/thunder | thunder/blocks/blocks.py | Blocks.collect_blocks | def collect_blocks(self):
"""
Collect the blocks in a list
"""
if self.mode == 'spark':
return self.values.tordd().sortByKey().values().collect()
if self.mode == 'local':
return self.values.values.flatten().tolist() | python | def collect_blocks(self):
"""
Collect the blocks in a list
"""
if self.mode == 'spark':
return self.values.tordd().sortByKey().values().collect()
if self.mode == 'local':
return self.values.values.flatten().tolist() | [
"def",
"collect_blocks",
"(",
"self",
")",
":",
"if",
"self",
".",
"mode",
"==",
"'spark'",
":",
"return",
"self",
".",
"values",
".",
"tordd",
"(",
")",
".",
"sortByKey",
"(",
")",
".",
"values",
"(",
")",
".",
"collect",
"(",
")",
"if",
"self",
".",
"mode",
"==",
"'local'",
":",
"return",
"self",
".",
"values",
".",
"values",
".",
"flatten",
"(",
")",
".",
"tolist",
"(",
")"
] | Collect the blocks in a list | [
"Collect",
"the",
"blocks",
"in",
"a",
"list"
] | 967ff8f3e7c2fabe1705743d95eb2746d4329786 | https://github.com/thunder-project/thunder/blob/967ff8f3e7c2fabe1705743d95eb2746d4329786/thunder/blocks/blocks.py#L42-L50 | train |
thunder-project/thunder | thunder/blocks/blocks.py | Blocks.map | def map(self, func, value_shape=None, dtype=None):
"""
Apply an array -> array function to each block
"""
mapped = self.values.map(func, value_shape=value_shape, dtype=dtype)
return self._constructor(mapped).__finalize__(self, noprop=('dtype',)) | python | def map(self, func, value_shape=None, dtype=None):
"""
Apply an array -> array function to each block
"""
mapped = self.values.map(func, value_shape=value_shape, dtype=dtype)
return self._constructor(mapped).__finalize__(self, noprop=('dtype',)) | [
"def",
"map",
"(",
"self",
",",
"func",
",",
"value_shape",
"=",
"None",
",",
"dtype",
"=",
"None",
")",
":",
"mapped",
"=",
"self",
".",
"values",
".",
"map",
"(",
"func",
",",
"value_shape",
"=",
"value_shape",
",",
"dtype",
"=",
"dtype",
")",
"return",
"self",
".",
"_constructor",
"(",
"mapped",
")",
".",
"__finalize__",
"(",
"self",
",",
"noprop",
"=",
"(",
"'dtype'",
",",
")",
")"
] | Apply an array -> array function to each block | [
"Apply",
"an",
"array",
"-",
">",
"array",
"function",
"to",
"each",
"block"
] | 967ff8f3e7c2fabe1705743d95eb2746d4329786 | https://github.com/thunder-project/thunder/blob/967ff8f3e7c2fabe1705743d95eb2746d4329786/thunder/blocks/blocks.py#L52-L57 | train |
thunder-project/thunder | thunder/blocks/blocks.py | Blocks.toimages | def toimages(self):
"""
Convert blocks to images.
"""
from thunder.images.images import Images
if self.mode == 'spark':
values = self.values.values_to_keys((0,)).unchunk()
if self.mode == 'local':
values = self.values.unchunk()
return Images(values) | python | def toimages(self):
"""
Convert blocks to images.
"""
from thunder.images.images import Images
if self.mode == 'spark':
values = self.values.values_to_keys((0,)).unchunk()
if self.mode == 'local':
values = self.values.unchunk()
return Images(values) | [
"def",
"toimages",
"(",
"self",
")",
":",
"from",
"thunder",
".",
"images",
".",
"images",
"import",
"Images",
"if",
"self",
".",
"mode",
"==",
"'spark'",
":",
"values",
"=",
"self",
".",
"values",
".",
"values_to_keys",
"(",
"(",
"0",
",",
")",
")",
".",
"unchunk",
"(",
")",
"if",
"self",
".",
"mode",
"==",
"'local'",
":",
"values",
"=",
"self",
".",
"values",
".",
"unchunk",
"(",
")",
"return",
"Images",
"(",
"values",
")"
] | Convert blocks to images. | [
"Convert",
"blocks",
"to",
"images",
"."
] | 967ff8f3e7c2fabe1705743d95eb2746d4329786 | https://github.com/thunder-project/thunder/blob/967ff8f3e7c2fabe1705743d95eb2746d4329786/thunder/blocks/blocks.py#L75-L87 | train |
thunder-project/thunder | thunder/blocks/blocks.py | Blocks.toseries | def toseries(self):
"""
Converts blocks to series.
"""
from thunder.series.series import Series
if self.mode == 'spark':
values = self.values.values_to_keys(tuple(range(1, len(self.shape)))).unchunk()
if self.mode == 'local':
values = self.values.unchunk()
values = rollaxis(values, 0, values.ndim)
return Series(values) | python | def toseries(self):
"""
Converts blocks to series.
"""
from thunder.series.series import Series
if self.mode == 'spark':
values = self.values.values_to_keys(tuple(range(1, len(self.shape)))).unchunk()
if self.mode == 'local':
values = self.values.unchunk()
values = rollaxis(values, 0, values.ndim)
return Series(values) | [
"def",
"toseries",
"(",
"self",
")",
":",
"from",
"thunder",
".",
"series",
".",
"series",
"import",
"Series",
"if",
"self",
".",
"mode",
"==",
"'spark'",
":",
"values",
"=",
"self",
".",
"values",
".",
"values_to_keys",
"(",
"tuple",
"(",
"range",
"(",
"1",
",",
"len",
"(",
"self",
".",
"shape",
")",
")",
")",
")",
".",
"unchunk",
"(",
")",
"if",
"self",
".",
"mode",
"==",
"'local'",
":",
"values",
"=",
"self",
".",
"values",
".",
"unchunk",
"(",
")",
"values",
"=",
"rollaxis",
"(",
"values",
",",
"0",
",",
"values",
".",
"ndim",
")",
"return",
"Series",
"(",
"values",
")"
] | Converts blocks to series. | [
"Converts",
"blocks",
"to",
"series",
"."
] | 967ff8f3e7c2fabe1705743d95eb2746d4329786 | https://github.com/thunder-project/thunder/blob/967ff8f3e7c2fabe1705743d95eb2746d4329786/thunder/blocks/blocks.py#L89-L102 | train |
thunder-project/thunder | thunder/blocks/blocks.py | Blocks.toarray | def toarray(self):
"""
Convert blocks to local ndarray
"""
if self.mode == 'spark':
return self.values.unchunk().toarray()
if self.mode == 'local':
return self.values.unchunk() | python | def toarray(self):
"""
Convert blocks to local ndarray
"""
if self.mode == 'spark':
return self.values.unchunk().toarray()
if self.mode == 'local':
return self.values.unchunk() | [
"def",
"toarray",
"(",
"self",
")",
":",
"if",
"self",
".",
"mode",
"==",
"'spark'",
":",
"return",
"self",
".",
"values",
".",
"unchunk",
"(",
")",
".",
"toarray",
"(",
")",
"if",
"self",
".",
"mode",
"==",
"'local'",
":",
"return",
"self",
".",
"values",
".",
"unchunk",
"(",
")"
] | Convert blocks to local ndarray | [
"Convert",
"blocks",
"to",
"local",
"ndarray"
] | 967ff8f3e7c2fabe1705743d95eb2746d4329786 | https://github.com/thunder-project/thunder/blob/967ff8f3e7c2fabe1705743d95eb2746d4329786/thunder/blocks/blocks.py#L104-L112 | train |
thunder-project/thunder | thunder/series/series.py | Series.flatten | def flatten(self):
"""
Reshape all dimensions but the last into a single dimension
"""
size = prod(self.shape[:-1])
return self.reshape(size, self.shape[-1]) | python | def flatten(self):
"""
Reshape all dimensions but the last into a single dimension
"""
size = prod(self.shape[:-1])
return self.reshape(size, self.shape[-1]) | [
"def",
"flatten",
"(",
"self",
")",
":",
"size",
"=",
"prod",
"(",
"self",
".",
"shape",
"[",
":",
"-",
"1",
"]",
")",
"return",
"self",
".",
"reshape",
"(",
"size",
",",
"self",
".",
"shape",
"[",
"-",
"1",
"]",
")"
] | Reshape all dimensions but the last into a single dimension | [
"Reshape",
"all",
"dimensions",
"but",
"the",
"last",
"into",
"a",
"single",
"dimension"
] | 967ff8f3e7c2fabe1705743d95eb2746d4329786 | https://github.com/thunder-project/thunder/blob/967ff8f3e7c2fabe1705743d95eb2746d4329786/thunder/series/series.py#L81-L86 | train |
thunder-project/thunder | thunder/series/series.py | Series.tospark | def tospark(self, engine=None):
"""
Convert to spark mode.
"""
from thunder.series.readers import fromarray
if self.mode == 'spark':
logging.getLogger('thunder').warn('images already in local mode')
pass
if engine is None:
raise ValueError('Must provide SparkContext')
return fromarray(self.toarray(), index=self.index, labels=self.labels, engine=engine) | python | def tospark(self, engine=None):
"""
Convert to spark mode.
"""
from thunder.series.readers import fromarray
if self.mode == 'spark':
logging.getLogger('thunder').warn('images already in local mode')
pass
if engine is None:
raise ValueError('Must provide SparkContext')
return fromarray(self.toarray(), index=self.index, labels=self.labels, engine=engine) | [
"def",
"tospark",
"(",
"self",
",",
"engine",
"=",
"None",
")",
":",
"from",
"thunder",
".",
"series",
".",
"readers",
"import",
"fromarray",
"if",
"self",
".",
"mode",
"==",
"'spark'",
":",
"logging",
".",
"getLogger",
"(",
"'thunder'",
")",
".",
"warn",
"(",
"'images already in local mode'",
")",
"pass",
"if",
"engine",
"is",
"None",
":",
"raise",
"ValueError",
"(",
"'Must provide SparkContext'",
")",
"return",
"fromarray",
"(",
"self",
".",
"toarray",
"(",
")",
",",
"index",
"=",
"self",
".",
"index",
",",
"labels",
"=",
"self",
".",
"labels",
",",
"engine",
"=",
"engine",
")"
] | Convert to spark mode. | [
"Convert",
"to",
"spark",
"mode",
"."
] | 967ff8f3e7c2fabe1705743d95eb2746d4329786 | https://github.com/thunder-project/thunder/blob/967ff8f3e7c2fabe1705743d95eb2746d4329786/thunder/series/series.py#L122-L135 | train |
thunder-project/thunder | thunder/series/series.py | Series.sample | def sample(self, n=100, seed=None):
"""
Extract random sample of records.
Parameters
----------
n : int, optional, default = 100
The number of data points to sample.
seed : int, optional, default = None
Random seed.
"""
if n < 1:
raise ValueError("Number of samples must be larger than 0, got '%g'" % n)
if seed is None:
seed = random.randint(0, 2 ** 32)
if self.mode == 'spark':
result = asarray(self.values.tordd().values().takeSample(False, n, seed))
else:
basedims = [self.shape[d] for d in self.baseaxes]
inds = [unravel_index(int(k), basedims) for k in random.rand(n) * prod(basedims)]
result = asarray([self.values[tupleize(i) + (slice(None, None),)] for i in inds])
return self._constructor(result, index=self.index) | python | def sample(self, n=100, seed=None):
"""
Extract random sample of records.
Parameters
----------
n : int, optional, default = 100
The number of data points to sample.
seed : int, optional, default = None
Random seed.
"""
if n < 1:
raise ValueError("Number of samples must be larger than 0, got '%g'" % n)
if seed is None:
seed = random.randint(0, 2 ** 32)
if self.mode == 'spark':
result = asarray(self.values.tordd().values().takeSample(False, n, seed))
else:
basedims = [self.shape[d] for d in self.baseaxes]
inds = [unravel_index(int(k), basedims) for k in random.rand(n) * prod(basedims)]
result = asarray([self.values[tupleize(i) + (slice(None, None),)] for i in inds])
return self._constructor(result, index=self.index) | [
"def",
"sample",
"(",
"self",
",",
"n",
"=",
"100",
",",
"seed",
"=",
"None",
")",
":",
"if",
"n",
"<",
"1",
":",
"raise",
"ValueError",
"(",
"\"Number of samples must be larger than 0, got '%g'\"",
"%",
"n",
")",
"if",
"seed",
"is",
"None",
":",
"seed",
"=",
"random",
".",
"randint",
"(",
"0",
",",
"2",
"**",
"32",
")",
"if",
"self",
".",
"mode",
"==",
"'spark'",
":",
"result",
"=",
"asarray",
"(",
"self",
".",
"values",
".",
"tordd",
"(",
")",
".",
"values",
"(",
")",
".",
"takeSample",
"(",
"False",
",",
"n",
",",
"seed",
")",
")",
"else",
":",
"basedims",
"=",
"[",
"self",
".",
"shape",
"[",
"d",
"]",
"for",
"d",
"in",
"self",
".",
"baseaxes",
"]",
"inds",
"=",
"[",
"unravel_index",
"(",
"int",
"(",
"k",
")",
",",
"basedims",
")",
"for",
"k",
"in",
"random",
".",
"rand",
"(",
"n",
")",
"*",
"prod",
"(",
"basedims",
")",
"]",
"result",
"=",
"asarray",
"(",
"[",
"self",
".",
"values",
"[",
"tupleize",
"(",
"i",
")",
"+",
"(",
"slice",
"(",
"None",
",",
"None",
")",
",",
")",
"]",
"for",
"i",
"in",
"inds",
"]",
")",
"return",
"self",
".",
"_constructor",
"(",
"result",
",",
"index",
"=",
"self",
".",
"index",
")"
] | Extract random sample of records.
Parameters
----------
n : int, optional, default = 100
The number of data points to sample.
seed : int, optional, default = None
Random seed. | [
"Extract",
"random",
"sample",
"of",
"records",
"."
] | 967ff8f3e7c2fabe1705743d95eb2746d4329786 | https://github.com/thunder-project/thunder/blob/967ff8f3e7c2fabe1705743d95eb2746d4329786/thunder/series/series.py#L137-L163 | train |
thunder-project/thunder | thunder/series/series.py | Series.map | def map(self, func, index=None, value_shape=None, dtype=None, with_keys=False):
"""
Map an array -> array function over each record.
Parameters
----------
func : function
A function of a single record.
index : array-like, optional, default = None
If known, the index to be used following function evaluation.
value_shape : int, optional, default=None
Known shape of values resulting from operation. Only
valid in spark mode.
dtype : numpy.dtype, optional, default = None
If known, the type of the data following function evaluation.
with_keys : boolean, optional, default = False
If true, function should be of both tuple indices and series values.
"""
# if new index is given, can infer missing value_shape
if value_shape is None and index is not None:
value_shape = len(index)
if isinstance(value_shape, int):
values_shape = (value_shape, )
new = super(Series, self).map(func, value_shape=value_shape, dtype=dtype, with_keys=with_keys)
if index is not None:
new.index = index
# if series shape did not change and no index was supplied, propagate original index
else:
if len(new.index) == len(self.index):
new.index = self.index
return new | python | def map(self, func, index=None, value_shape=None, dtype=None, with_keys=False):
"""
Map an array -> array function over each record.
Parameters
----------
func : function
A function of a single record.
index : array-like, optional, default = None
If known, the index to be used following function evaluation.
value_shape : int, optional, default=None
Known shape of values resulting from operation. Only
valid in spark mode.
dtype : numpy.dtype, optional, default = None
If known, the type of the data following function evaluation.
with_keys : boolean, optional, default = False
If true, function should be of both tuple indices and series values.
"""
# if new index is given, can infer missing value_shape
if value_shape is None and index is not None:
value_shape = len(index)
if isinstance(value_shape, int):
values_shape = (value_shape, )
new = super(Series, self).map(func, value_shape=value_shape, dtype=dtype, with_keys=with_keys)
if index is not None:
new.index = index
# if series shape did not change and no index was supplied, propagate original index
else:
if len(new.index) == len(self.index):
new.index = self.index
return new | [
"def",
"map",
"(",
"self",
",",
"func",
",",
"index",
"=",
"None",
",",
"value_shape",
"=",
"None",
",",
"dtype",
"=",
"None",
",",
"with_keys",
"=",
"False",
")",
":",
"# if new index is given, can infer missing value_shape",
"if",
"value_shape",
"is",
"None",
"and",
"index",
"is",
"not",
"None",
":",
"value_shape",
"=",
"len",
"(",
"index",
")",
"if",
"isinstance",
"(",
"value_shape",
",",
"int",
")",
":",
"values_shape",
"=",
"(",
"value_shape",
",",
")",
"new",
"=",
"super",
"(",
"Series",
",",
"self",
")",
".",
"map",
"(",
"func",
",",
"value_shape",
"=",
"value_shape",
",",
"dtype",
"=",
"dtype",
",",
"with_keys",
"=",
"with_keys",
")",
"if",
"index",
"is",
"not",
"None",
":",
"new",
".",
"index",
"=",
"index",
"# if series shape did not change and no index was supplied, propagate original index",
"else",
":",
"if",
"len",
"(",
"new",
".",
"index",
")",
"==",
"len",
"(",
"self",
".",
"index",
")",
":",
"new",
".",
"index",
"=",
"self",
".",
"index",
"return",
"new"
] | Map an array -> array function over each record.
Parameters
----------
func : function
A function of a single record.
index : array-like, optional, default = None
If known, the index to be used following function evaluation.
value_shape : int, optional, default=None
Known shape of values resulting from operation. Only
valid in spark mode.
dtype : numpy.dtype, optional, default = None
If known, the type of the data following function evaluation.
with_keys : boolean, optional, default = False
If true, function should be of both tuple indices and series values. | [
"Map",
"an",
"array",
"-",
">",
"array",
"function",
"over",
"each",
"record",
"."
] | 967ff8f3e7c2fabe1705743d95eb2746d4329786 | https://github.com/thunder-project/thunder/blob/967ff8f3e7c2fabe1705743d95eb2746d4329786/thunder/series/series.py#L165-L202 | train |
thunder-project/thunder | thunder/series/series.py | Series.mean | def mean(self):
"""
Compute the mean across records
"""
return self._constructor(self.values.mean(axis=self.baseaxes, keepdims=True)) | python | def mean(self):
"""
Compute the mean across records
"""
return self._constructor(self.values.mean(axis=self.baseaxes, keepdims=True)) | [
"def",
"mean",
"(",
"self",
")",
":",
"return",
"self",
".",
"_constructor",
"(",
"self",
".",
"values",
".",
"mean",
"(",
"axis",
"=",
"self",
".",
"baseaxes",
",",
"keepdims",
"=",
"True",
")",
")"
] | Compute the mean across records | [
"Compute",
"the",
"mean",
"across",
"records"
] | 967ff8f3e7c2fabe1705743d95eb2746d4329786 | https://github.com/thunder-project/thunder/blob/967ff8f3e7c2fabe1705743d95eb2746d4329786/thunder/series/series.py#L215-L219 | train |
thunder-project/thunder | thunder/series/series.py | Series.sum | def sum(self):
"""
Compute the sum across records.
"""
return self._constructor(self.values.sum(axis=self.baseaxes, keepdims=True)) | python | def sum(self):
"""
Compute the sum across records.
"""
return self._constructor(self.values.sum(axis=self.baseaxes, keepdims=True)) | [
"def",
"sum",
"(",
"self",
")",
":",
"return",
"self",
".",
"_constructor",
"(",
"self",
".",
"values",
".",
"sum",
"(",
"axis",
"=",
"self",
".",
"baseaxes",
",",
"keepdims",
"=",
"True",
")",
")"
] | Compute the sum across records. | [
"Compute",
"the",
"sum",
"across",
"records",
"."
] | 967ff8f3e7c2fabe1705743d95eb2746d4329786 | https://github.com/thunder-project/thunder/blob/967ff8f3e7c2fabe1705743d95eb2746d4329786/thunder/series/series.py#L233-L237 | train |
thunder-project/thunder | thunder/series/series.py | Series.max | def max(self):
"""
Compute the max across records.
"""
return self._constructor(self.values.max(axis=self.baseaxes, keepdims=True)) | python | def max(self):
"""
Compute the max across records.
"""
return self._constructor(self.values.max(axis=self.baseaxes, keepdims=True)) | [
"def",
"max",
"(",
"self",
")",
":",
"return",
"self",
".",
"_constructor",
"(",
"self",
".",
"values",
".",
"max",
"(",
"axis",
"=",
"self",
".",
"baseaxes",
",",
"keepdims",
"=",
"True",
")",
")"
] | Compute the max across records. | [
"Compute",
"the",
"max",
"across",
"records",
"."
] | 967ff8f3e7c2fabe1705743d95eb2746d4329786 | https://github.com/thunder-project/thunder/blob/967ff8f3e7c2fabe1705743d95eb2746d4329786/thunder/series/series.py#L239-L243 | train |
thunder-project/thunder | thunder/series/series.py | Series.min | def min(self):
"""
Compute the min across records.
"""
return self._constructor(self.values.min(axis=self.baseaxes, keepdims=True)) | python | def min(self):
"""
Compute the min across records.
"""
return self._constructor(self.values.min(axis=self.baseaxes, keepdims=True)) | [
"def",
"min",
"(",
"self",
")",
":",
"return",
"self",
".",
"_constructor",
"(",
"self",
".",
"values",
".",
"min",
"(",
"axis",
"=",
"self",
".",
"baseaxes",
",",
"keepdims",
"=",
"True",
")",
")"
] | Compute the min across records. | [
"Compute",
"the",
"min",
"across",
"records",
"."
] | 967ff8f3e7c2fabe1705743d95eb2746d4329786 | https://github.com/thunder-project/thunder/blob/967ff8f3e7c2fabe1705743d95eb2746d4329786/thunder/series/series.py#L245-L249 | train |
thunder-project/thunder | thunder/series/series.py | Series.reshape | def reshape(self, *shape):
"""
Reshape the Series object
Cannot change the last dimension.
Parameters
----------
shape: one or more ints
New shape
"""
if prod(self.shape) != prod(shape):
raise ValueError("Reshaping must leave the number of elements unchanged")
if self.shape[-1] != shape[-1]:
raise ValueError("Reshaping cannot change the size of the constituent series (last dimension)")
if self.labels is not None:
newlabels = self.labels.reshape(*shape[:-1])
else:
newlabels = None
return self._constructor(self.values.reshape(shape), labels=newlabels).__finalize__(self, noprop=('labels',)) | python | def reshape(self, *shape):
"""
Reshape the Series object
Cannot change the last dimension.
Parameters
----------
shape: one or more ints
New shape
"""
if prod(self.shape) != prod(shape):
raise ValueError("Reshaping must leave the number of elements unchanged")
if self.shape[-1] != shape[-1]:
raise ValueError("Reshaping cannot change the size of the constituent series (last dimension)")
if self.labels is not None:
newlabels = self.labels.reshape(*shape[:-1])
else:
newlabels = None
return self._constructor(self.values.reshape(shape), labels=newlabels).__finalize__(self, noprop=('labels',)) | [
"def",
"reshape",
"(",
"self",
",",
"*",
"shape",
")",
":",
"if",
"prod",
"(",
"self",
".",
"shape",
")",
"!=",
"prod",
"(",
"shape",
")",
":",
"raise",
"ValueError",
"(",
"\"Reshaping must leave the number of elements unchanged\"",
")",
"if",
"self",
".",
"shape",
"[",
"-",
"1",
"]",
"!=",
"shape",
"[",
"-",
"1",
"]",
":",
"raise",
"ValueError",
"(",
"\"Reshaping cannot change the size of the constituent series (last dimension)\"",
")",
"if",
"self",
".",
"labels",
"is",
"not",
"None",
":",
"newlabels",
"=",
"self",
".",
"labels",
".",
"reshape",
"(",
"*",
"shape",
"[",
":",
"-",
"1",
"]",
")",
"else",
":",
"newlabels",
"=",
"None",
"return",
"self",
".",
"_constructor",
"(",
"self",
".",
"values",
".",
"reshape",
"(",
"shape",
")",
",",
"labels",
"=",
"newlabels",
")",
".",
"__finalize__",
"(",
"self",
",",
"noprop",
"=",
"(",
"'labels'",
",",
")",
")"
] | Reshape the Series object
Cannot change the last dimension.
Parameters
----------
shape: one or more ints
New shape | [
"Reshape",
"the",
"Series",
"object"
] | 967ff8f3e7c2fabe1705743d95eb2746d4329786 | https://github.com/thunder-project/thunder/blob/967ff8f3e7c2fabe1705743d95eb2746d4329786/thunder/series/series.py#L251-L273 | train |
thunder-project/thunder | thunder/series/series.py | Series.between | def between(self, left, right):
"""
Select subset of values within the given index range.
Inclusive on the left; exclusive on the right.
Parameters
----------
left : int
Left-most index in the desired range.
right: int
Right-most index in the desired range.
"""
crit = lambda x: left <= x < right
return self.select(crit) | python | def between(self, left, right):
"""
Select subset of values within the given index range.
Inclusive on the left; exclusive on the right.
Parameters
----------
left : int
Left-most index in the desired range.
right: int
Right-most index in the desired range.
"""
crit = lambda x: left <= x < right
return self.select(crit) | [
"def",
"between",
"(",
"self",
",",
"left",
",",
"right",
")",
":",
"crit",
"=",
"lambda",
"x",
":",
"left",
"<=",
"x",
"<",
"right",
"return",
"self",
".",
"select",
"(",
"crit",
")"
] | Select subset of values within the given index range.
Inclusive on the left; exclusive on the right.
Parameters
----------
left : int
Left-most index in the desired range.
right: int
Right-most index in the desired range. | [
"Select",
"subset",
"of",
"values",
"within",
"the",
"given",
"index",
"range",
"."
] | 967ff8f3e7c2fabe1705743d95eb2746d4329786 | https://github.com/thunder-project/thunder/blob/967ff8f3e7c2fabe1705743d95eb2746d4329786/thunder/series/series.py#L275-L290 | train |
thunder-project/thunder | thunder/series/series.py | Series.select | def select(self, crit):
"""
Select subset of values that match a given index criterion.
Parameters
----------
crit : function, list, str, int
Criterion function to map to indices, specific index value,
or list of indices.
"""
import types
# handle lists, strings, and ints
if not isinstance(crit, types.FunctionType):
# set("foo") -> {"f", "o"}; wrap in list to prevent:
if isinstance(crit, string_types):
critlist = set([crit])
else:
try:
critlist = set(crit)
except TypeError:
# typically means crit is not an iterable type; for instance, crit is an int
critlist = set([crit])
crit = lambda x: x in critlist
# if only one index, return it directly or throw an error
index = self.index
if size(index) == 1:
if crit(index[0]):
return self
else:
raise Exception('No indices found matching criterion')
# determine new index and check the result
newindex = [i for i in index if crit(i)]
if len(newindex) == 0:
raise Exception('No indices found matching criterion')
if array(newindex == index).all():
return self
# use fast logical indexing to get the new values
subinds = where([crit(i) for i in index])
new = self.map(lambda x: x[subinds], index=newindex)
# if singleton, need to check whether it's an array or a scalar/int
# if array, recompute a new set of indices
if len(newindex) == 1:
new = new.map(lambda x: x[0], index=newindex)
val = new.first()
if size(val) == 1:
newindex = [newindex[0]]
else:
newindex = arange(0, size(val))
new._index = newindex
return new | python | def select(self, crit):
"""
Select subset of values that match a given index criterion.
Parameters
----------
crit : function, list, str, int
Criterion function to map to indices, specific index value,
or list of indices.
"""
import types
# handle lists, strings, and ints
if not isinstance(crit, types.FunctionType):
# set("foo") -> {"f", "o"}; wrap in list to prevent:
if isinstance(crit, string_types):
critlist = set([crit])
else:
try:
critlist = set(crit)
except TypeError:
# typically means crit is not an iterable type; for instance, crit is an int
critlist = set([crit])
crit = lambda x: x in critlist
# if only one index, return it directly or throw an error
index = self.index
if size(index) == 1:
if crit(index[0]):
return self
else:
raise Exception('No indices found matching criterion')
# determine new index and check the result
newindex = [i for i in index if crit(i)]
if len(newindex) == 0:
raise Exception('No indices found matching criterion')
if array(newindex == index).all():
return self
# use fast logical indexing to get the new values
subinds = where([crit(i) for i in index])
new = self.map(lambda x: x[subinds], index=newindex)
# if singleton, need to check whether it's an array or a scalar/int
# if array, recompute a new set of indices
if len(newindex) == 1:
new = new.map(lambda x: x[0], index=newindex)
val = new.first()
if size(val) == 1:
newindex = [newindex[0]]
else:
newindex = arange(0, size(val))
new._index = newindex
return new | [
"def",
"select",
"(",
"self",
",",
"crit",
")",
":",
"import",
"types",
"# handle lists, strings, and ints",
"if",
"not",
"isinstance",
"(",
"crit",
",",
"types",
".",
"FunctionType",
")",
":",
"# set(\"foo\") -> {\"f\", \"o\"}; wrap in list to prevent:",
"if",
"isinstance",
"(",
"crit",
",",
"string_types",
")",
":",
"critlist",
"=",
"set",
"(",
"[",
"crit",
"]",
")",
"else",
":",
"try",
":",
"critlist",
"=",
"set",
"(",
"crit",
")",
"except",
"TypeError",
":",
"# typically means crit is not an iterable type; for instance, crit is an int",
"critlist",
"=",
"set",
"(",
"[",
"crit",
"]",
")",
"crit",
"=",
"lambda",
"x",
":",
"x",
"in",
"critlist",
"# if only one index, return it directly or throw an error",
"index",
"=",
"self",
".",
"index",
"if",
"size",
"(",
"index",
")",
"==",
"1",
":",
"if",
"crit",
"(",
"index",
"[",
"0",
"]",
")",
":",
"return",
"self",
"else",
":",
"raise",
"Exception",
"(",
"'No indices found matching criterion'",
")",
"# determine new index and check the result",
"newindex",
"=",
"[",
"i",
"for",
"i",
"in",
"index",
"if",
"crit",
"(",
"i",
")",
"]",
"if",
"len",
"(",
"newindex",
")",
"==",
"0",
":",
"raise",
"Exception",
"(",
"'No indices found matching criterion'",
")",
"if",
"array",
"(",
"newindex",
"==",
"index",
")",
".",
"all",
"(",
")",
":",
"return",
"self",
"# use fast logical indexing to get the new values",
"subinds",
"=",
"where",
"(",
"[",
"crit",
"(",
"i",
")",
"for",
"i",
"in",
"index",
"]",
")",
"new",
"=",
"self",
".",
"map",
"(",
"lambda",
"x",
":",
"x",
"[",
"subinds",
"]",
",",
"index",
"=",
"newindex",
")",
"# if singleton, need to check whether it's an array or a scalar/int",
"# if array, recompute a new set of indices",
"if",
"len",
"(",
"newindex",
")",
"==",
"1",
":",
"new",
"=",
"new",
".",
"map",
"(",
"lambda",
"x",
":",
"x",
"[",
"0",
"]",
",",
"index",
"=",
"newindex",
")",
"val",
"=",
"new",
".",
"first",
"(",
")",
"if",
"size",
"(",
"val",
")",
"==",
"1",
":",
"newindex",
"=",
"[",
"newindex",
"[",
"0",
"]",
"]",
"else",
":",
"newindex",
"=",
"arange",
"(",
"0",
",",
"size",
"(",
"val",
")",
")",
"new",
".",
"_index",
"=",
"newindex",
"return",
"new"
] | Select subset of values that match a given index criterion.
Parameters
----------
crit : function, list, str, int
Criterion function to map to indices, specific index value,
or list of indices. | [
"Select",
"subset",
"of",
"values",
"that",
"match",
"a",
"given",
"index",
"criterion",
"."
] | 967ff8f3e7c2fabe1705743d95eb2746d4329786 | https://github.com/thunder-project/thunder/blob/967ff8f3e7c2fabe1705743d95eb2746d4329786/thunder/series/series.py#L292-L348 | train |
thunder-project/thunder | thunder/series/series.py | Series.center | def center(self, axis=1):
"""
Subtract the mean either within or across records.
Parameters
----------
axis : int, optional, default = 1
Which axis to center along, within (1) or across (0) records.
"""
if axis == 1:
return self.map(lambda x: x - mean(x))
elif axis == 0:
meanval = self.mean().toarray()
return self.map(lambda x: x - meanval)
else:
raise Exception('Axis must be 0 or 1') | python | def center(self, axis=1):
"""
Subtract the mean either within or across records.
Parameters
----------
axis : int, optional, default = 1
Which axis to center along, within (1) or across (0) records.
"""
if axis == 1:
return self.map(lambda x: x - mean(x))
elif axis == 0:
meanval = self.mean().toarray()
return self.map(lambda x: x - meanval)
else:
raise Exception('Axis must be 0 or 1') | [
"def",
"center",
"(",
"self",
",",
"axis",
"=",
"1",
")",
":",
"if",
"axis",
"==",
"1",
":",
"return",
"self",
".",
"map",
"(",
"lambda",
"x",
":",
"x",
"-",
"mean",
"(",
"x",
")",
")",
"elif",
"axis",
"==",
"0",
":",
"meanval",
"=",
"self",
".",
"mean",
"(",
")",
".",
"toarray",
"(",
")",
"return",
"self",
".",
"map",
"(",
"lambda",
"x",
":",
"x",
"-",
"meanval",
")",
"else",
":",
"raise",
"Exception",
"(",
"'Axis must be 0 or 1'",
")"
] | Subtract the mean either within or across records.
Parameters
----------
axis : int, optional, default = 1
Which axis to center along, within (1) or across (0) records. | [
"Subtract",
"the",
"mean",
"either",
"within",
"or",
"across",
"records",
"."
] | 967ff8f3e7c2fabe1705743d95eb2746d4329786 | https://github.com/thunder-project/thunder/blob/967ff8f3e7c2fabe1705743d95eb2746d4329786/thunder/series/series.py#L350-L365 | train |
thunder-project/thunder | thunder/series/series.py | Series.standardize | def standardize(self, axis=1):
"""
Divide by standard deviation either within or across records.
Parameters
----------
axis : int, optional, default = 0
Which axis to standardize along, within (1) or across (0) records
"""
if axis == 1:
return self.map(lambda x: x / std(x))
elif axis == 0:
stdval = self.std().toarray()
return self.map(lambda x: x / stdval)
else:
raise Exception('Axis must be 0 or 1') | python | def standardize(self, axis=1):
"""
Divide by standard deviation either within or across records.
Parameters
----------
axis : int, optional, default = 0
Which axis to standardize along, within (1) or across (0) records
"""
if axis == 1:
return self.map(lambda x: x / std(x))
elif axis == 0:
stdval = self.std().toarray()
return self.map(lambda x: x / stdval)
else:
raise Exception('Axis must be 0 or 1') | [
"def",
"standardize",
"(",
"self",
",",
"axis",
"=",
"1",
")",
":",
"if",
"axis",
"==",
"1",
":",
"return",
"self",
".",
"map",
"(",
"lambda",
"x",
":",
"x",
"/",
"std",
"(",
"x",
")",
")",
"elif",
"axis",
"==",
"0",
":",
"stdval",
"=",
"self",
".",
"std",
"(",
")",
".",
"toarray",
"(",
")",
"return",
"self",
".",
"map",
"(",
"lambda",
"x",
":",
"x",
"/",
"stdval",
")",
"else",
":",
"raise",
"Exception",
"(",
"'Axis must be 0 or 1'",
")"
] | Divide by standard deviation either within or across records.
Parameters
----------
axis : int, optional, default = 0
Which axis to standardize along, within (1) or across (0) records | [
"Divide",
"by",
"standard",
"deviation",
"either",
"within",
"or",
"across",
"records",
"."
] | 967ff8f3e7c2fabe1705743d95eb2746d4329786 | https://github.com/thunder-project/thunder/blob/967ff8f3e7c2fabe1705743d95eb2746d4329786/thunder/series/series.py#L367-L382 | train |
thunder-project/thunder | thunder/series/series.py | Series.zscore | def zscore(self, axis=1):
"""
Subtract the mean and divide by standard deviation within or across records.
Parameters
----------
axis : int, optional, default = 0
Which axis to zscore along, within (1) or across (0) records
"""
if axis == 1:
return self.map(lambda x: (x - mean(x)) / std(x))
elif axis == 0:
meanval = self.mean().toarray()
stdval = self.std().toarray()
return self.map(lambda x: (x - meanval) / stdval)
else:
raise Exception('Axis must be 0 or 1') | python | def zscore(self, axis=1):
"""
Subtract the mean and divide by standard deviation within or across records.
Parameters
----------
axis : int, optional, default = 0
Which axis to zscore along, within (1) or across (0) records
"""
if axis == 1:
return self.map(lambda x: (x - mean(x)) / std(x))
elif axis == 0:
meanval = self.mean().toarray()
stdval = self.std().toarray()
return self.map(lambda x: (x - meanval) / stdval)
else:
raise Exception('Axis must be 0 or 1') | [
"def",
"zscore",
"(",
"self",
",",
"axis",
"=",
"1",
")",
":",
"if",
"axis",
"==",
"1",
":",
"return",
"self",
".",
"map",
"(",
"lambda",
"x",
":",
"(",
"x",
"-",
"mean",
"(",
"x",
")",
")",
"/",
"std",
"(",
"x",
")",
")",
"elif",
"axis",
"==",
"0",
":",
"meanval",
"=",
"self",
".",
"mean",
"(",
")",
".",
"toarray",
"(",
")",
"stdval",
"=",
"self",
".",
"std",
"(",
")",
".",
"toarray",
"(",
")",
"return",
"self",
".",
"map",
"(",
"lambda",
"x",
":",
"(",
"x",
"-",
"meanval",
")",
"/",
"stdval",
")",
"else",
":",
"raise",
"Exception",
"(",
"'Axis must be 0 or 1'",
")"
] | Subtract the mean and divide by standard deviation within or across records.
Parameters
----------
axis : int, optional, default = 0
Which axis to zscore along, within (1) or across (0) records | [
"Subtract",
"the",
"mean",
"and",
"divide",
"by",
"standard",
"deviation",
"within",
"or",
"across",
"records",
"."
] | 967ff8f3e7c2fabe1705743d95eb2746d4329786 | https://github.com/thunder-project/thunder/blob/967ff8f3e7c2fabe1705743d95eb2746d4329786/thunder/series/series.py#L384-L400 | train |
thunder-project/thunder | thunder/series/series.py | Series.squelch | def squelch(self, threshold):
"""
Set all records that do not exceed the given threhsold to 0.
Parameters
----------
threshold : scalar
Level below which to set records to zero
"""
func = lambda x: zeros(x.shape) if max(x) < threshold else x
return self.map(func) | python | def squelch(self, threshold):
"""
Set all records that do not exceed the given threhsold to 0.
Parameters
----------
threshold : scalar
Level below which to set records to zero
"""
func = lambda x: zeros(x.shape) if max(x) < threshold else x
return self.map(func) | [
"def",
"squelch",
"(",
"self",
",",
"threshold",
")",
":",
"func",
"=",
"lambda",
"x",
":",
"zeros",
"(",
"x",
".",
"shape",
")",
"if",
"max",
"(",
"x",
")",
"<",
"threshold",
"else",
"x",
"return",
"self",
".",
"map",
"(",
"func",
")"
] | Set all records that do not exceed the given threhsold to 0.
Parameters
----------
threshold : scalar
Level below which to set records to zero | [
"Set",
"all",
"records",
"that",
"do",
"not",
"exceed",
"the",
"given",
"threhsold",
"to",
"0",
"."
] | 967ff8f3e7c2fabe1705743d95eb2746d4329786 | https://github.com/thunder-project/thunder/blob/967ff8f3e7c2fabe1705743d95eb2746d4329786/thunder/series/series.py#L402-L412 | train |
thunder-project/thunder | thunder/series/series.py | Series.correlate | def correlate(self, signal):
"""
Correlate records against one or many one-dimensional arrays.
Parameters
----------
signal : array-like
One or more signals to correlate against.
"""
s = asarray(signal)
if s.ndim == 1:
if size(s) != self.shape[-1]:
raise ValueError("Length of signal '%g' does not match record length '%g'"
% (size(s), self.shape[-1]))
return self.map(lambda x: corrcoef(x, s)[0, 1], index=[1])
elif s.ndim == 2:
if s.shape[1] != self.shape[-1]:
raise ValueError("Length of signal '%g' does not match record length '%g'"
% (s.shape[1], self.shape[-1]))
newindex = arange(0, s.shape[0])
return self.map(lambda x: array([corrcoef(x, y)[0, 1] for y in s]), index=newindex)
else:
raise Exception('Signal to correlate with must have 1 or 2 dimensions') | python | def correlate(self, signal):
"""
Correlate records against one or many one-dimensional arrays.
Parameters
----------
signal : array-like
One or more signals to correlate against.
"""
s = asarray(signal)
if s.ndim == 1:
if size(s) != self.shape[-1]:
raise ValueError("Length of signal '%g' does not match record length '%g'"
% (size(s), self.shape[-1]))
return self.map(lambda x: corrcoef(x, s)[0, 1], index=[1])
elif s.ndim == 2:
if s.shape[1] != self.shape[-1]:
raise ValueError("Length of signal '%g' does not match record length '%g'"
% (s.shape[1], self.shape[-1]))
newindex = arange(0, s.shape[0])
return self.map(lambda x: array([corrcoef(x, y)[0, 1] for y in s]), index=newindex)
else:
raise Exception('Signal to correlate with must have 1 or 2 dimensions') | [
"def",
"correlate",
"(",
"self",
",",
"signal",
")",
":",
"s",
"=",
"asarray",
"(",
"signal",
")",
"if",
"s",
".",
"ndim",
"==",
"1",
":",
"if",
"size",
"(",
"s",
")",
"!=",
"self",
".",
"shape",
"[",
"-",
"1",
"]",
":",
"raise",
"ValueError",
"(",
"\"Length of signal '%g' does not match record length '%g'\"",
"%",
"(",
"size",
"(",
"s",
")",
",",
"self",
".",
"shape",
"[",
"-",
"1",
"]",
")",
")",
"return",
"self",
".",
"map",
"(",
"lambda",
"x",
":",
"corrcoef",
"(",
"x",
",",
"s",
")",
"[",
"0",
",",
"1",
"]",
",",
"index",
"=",
"[",
"1",
"]",
")",
"elif",
"s",
".",
"ndim",
"==",
"2",
":",
"if",
"s",
".",
"shape",
"[",
"1",
"]",
"!=",
"self",
".",
"shape",
"[",
"-",
"1",
"]",
":",
"raise",
"ValueError",
"(",
"\"Length of signal '%g' does not match record length '%g'\"",
"%",
"(",
"s",
".",
"shape",
"[",
"1",
"]",
",",
"self",
".",
"shape",
"[",
"-",
"1",
"]",
")",
")",
"newindex",
"=",
"arange",
"(",
"0",
",",
"s",
".",
"shape",
"[",
"0",
"]",
")",
"return",
"self",
".",
"map",
"(",
"lambda",
"x",
":",
"array",
"(",
"[",
"corrcoef",
"(",
"x",
",",
"y",
")",
"[",
"0",
",",
"1",
"]",
"for",
"y",
"in",
"s",
"]",
")",
",",
"index",
"=",
"newindex",
")",
"else",
":",
"raise",
"Exception",
"(",
"'Signal to correlate with must have 1 or 2 dimensions'",
")"
] | Correlate records against one or many one-dimensional arrays.
Parameters
----------
signal : array-like
One or more signals to correlate against. | [
"Correlate",
"records",
"against",
"one",
"or",
"many",
"one",
"-",
"dimensional",
"arrays",
"."
] | 967ff8f3e7c2fabe1705743d95eb2746d4329786 | https://github.com/thunder-project/thunder/blob/967ff8f3e7c2fabe1705743d95eb2746d4329786/thunder/series/series.py#L414-L440 | train |
thunder-project/thunder | thunder/series/series.py | Series._check_panel | def _check_panel(self, length):
"""
Check that given fixed panel length evenly divides index.
Parameters
----------
length : int
Fixed length with which to subdivide index
"""
n = len(self.index)
if divmod(n, length)[1] != 0:
raise ValueError("Panel length '%g' must evenly divide length of series '%g'"
% (length, n))
if n == length:
raise ValueError("Panel length '%g' cannot be length of series '%g'"
% (length, n)) | python | def _check_panel(self, length):
"""
Check that given fixed panel length evenly divides index.
Parameters
----------
length : int
Fixed length with which to subdivide index
"""
n = len(self.index)
if divmod(n, length)[1] != 0:
raise ValueError("Panel length '%g' must evenly divide length of series '%g'"
% (length, n))
if n == length:
raise ValueError("Panel length '%g' cannot be length of series '%g'"
% (length, n)) | [
"def",
"_check_panel",
"(",
"self",
",",
"length",
")",
":",
"n",
"=",
"len",
"(",
"self",
".",
"index",
")",
"if",
"divmod",
"(",
"n",
",",
"length",
")",
"[",
"1",
"]",
"!=",
"0",
":",
"raise",
"ValueError",
"(",
"\"Panel length '%g' must evenly divide length of series '%g'\"",
"%",
"(",
"length",
",",
"n",
")",
")",
"if",
"n",
"==",
"length",
":",
"raise",
"ValueError",
"(",
"\"Panel length '%g' cannot be length of series '%g'\"",
"%",
"(",
"length",
",",
"n",
")",
")"
] | Check that given fixed panel length evenly divides index.
Parameters
----------
length : int
Fixed length with which to subdivide index | [
"Check",
"that",
"given",
"fixed",
"panel",
"length",
"evenly",
"divides",
"index",
"."
] | 967ff8f3e7c2fabe1705743d95eb2746d4329786 | https://github.com/thunder-project/thunder/blob/967ff8f3e7c2fabe1705743d95eb2746d4329786/thunder/series/series.py#L442-L457 | train |
thunder-project/thunder | thunder/series/series.py | Series.mean_by_panel | def mean_by_panel(self, length):
"""
Compute the mean across fixed sized panels of each record.
Splits each record into panels of size `length`,
and then computes the mean across panels.
Panel length must subdivide record exactly.
Parameters
----------
length : int
Fixed length with which to subdivide.
"""
self._check_panel(length)
func = lambda v: v.reshape(-1, length).mean(axis=0)
newindex = arange(length)
return self.map(func, index=newindex) | python | def mean_by_panel(self, length):
"""
Compute the mean across fixed sized panels of each record.
Splits each record into panels of size `length`,
and then computes the mean across panels.
Panel length must subdivide record exactly.
Parameters
----------
length : int
Fixed length with which to subdivide.
"""
self._check_panel(length)
func = lambda v: v.reshape(-1, length).mean(axis=0)
newindex = arange(length)
return self.map(func, index=newindex) | [
"def",
"mean_by_panel",
"(",
"self",
",",
"length",
")",
":",
"self",
".",
"_check_panel",
"(",
"length",
")",
"func",
"=",
"lambda",
"v",
":",
"v",
".",
"reshape",
"(",
"-",
"1",
",",
"length",
")",
".",
"mean",
"(",
"axis",
"=",
"0",
")",
"newindex",
"=",
"arange",
"(",
"length",
")",
"return",
"self",
".",
"map",
"(",
"func",
",",
"index",
"=",
"newindex",
")"
] | Compute the mean across fixed sized panels of each record.
Splits each record into panels of size `length`,
and then computes the mean across panels.
Panel length must subdivide record exactly.
Parameters
----------
length : int
Fixed length with which to subdivide. | [
"Compute",
"the",
"mean",
"across",
"fixed",
"sized",
"panels",
"of",
"each",
"record",
"."
] | 967ff8f3e7c2fabe1705743d95eb2746d4329786 | https://github.com/thunder-project/thunder/blob/967ff8f3e7c2fabe1705743d95eb2746d4329786/thunder/series/series.py#L459-L475 | train |
thunder-project/thunder | thunder/series/series.py | Series._makemasks | def _makemasks(self, index=None, level=0):
"""
Internal function for generating masks for selecting values based on multi-index values.
As all other multi-index functions will call this function, basic type-checking is also
performed at this stage.
"""
if index is None:
index = self.index
try:
dims = len(array(index).shape)
if dims == 1:
index = array(index, ndmin=2).T
except:
raise TypeError('A multi-index must be convertible to a numpy ndarray')
try:
index = index[:, level]
except:
raise ValueError("Levels must be indices into individual elements of the index")
lenIdx = index.shape[0]
nlevels = index.shape[1]
combs = product(*[unique(index.T[i, :]) for i in range(nlevels)])
combs = array([l for l in combs])
masks = array([[array_equal(index[i], c) for i in range(lenIdx)] for c in combs])
return zip(*[(masks[x], combs[x]) for x in range(len(masks)) if masks[x].any()]) | python | def _makemasks(self, index=None, level=0):
"""
Internal function for generating masks for selecting values based on multi-index values.
As all other multi-index functions will call this function, basic type-checking is also
performed at this stage.
"""
if index is None:
index = self.index
try:
dims = len(array(index).shape)
if dims == 1:
index = array(index, ndmin=2).T
except:
raise TypeError('A multi-index must be convertible to a numpy ndarray')
try:
index = index[:, level]
except:
raise ValueError("Levels must be indices into individual elements of the index")
lenIdx = index.shape[0]
nlevels = index.shape[1]
combs = product(*[unique(index.T[i, :]) for i in range(nlevels)])
combs = array([l for l in combs])
masks = array([[array_equal(index[i], c) for i in range(lenIdx)] for c in combs])
return zip(*[(masks[x], combs[x]) for x in range(len(masks)) if masks[x].any()]) | [
"def",
"_makemasks",
"(",
"self",
",",
"index",
"=",
"None",
",",
"level",
"=",
"0",
")",
":",
"if",
"index",
"is",
"None",
":",
"index",
"=",
"self",
".",
"index",
"try",
":",
"dims",
"=",
"len",
"(",
"array",
"(",
"index",
")",
".",
"shape",
")",
"if",
"dims",
"==",
"1",
":",
"index",
"=",
"array",
"(",
"index",
",",
"ndmin",
"=",
"2",
")",
".",
"T",
"except",
":",
"raise",
"TypeError",
"(",
"'A multi-index must be convertible to a numpy ndarray'",
")",
"try",
":",
"index",
"=",
"index",
"[",
":",
",",
"level",
"]",
"except",
":",
"raise",
"ValueError",
"(",
"\"Levels must be indices into individual elements of the index\"",
")",
"lenIdx",
"=",
"index",
".",
"shape",
"[",
"0",
"]",
"nlevels",
"=",
"index",
".",
"shape",
"[",
"1",
"]",
"combs",
"=",
"product",
"(",
"*",
"[",
"unique",
"(",
"index",
".",
"T",
"[",
"i",
",",
":",
"]",
")",
"for",
"i",
"in",
"range",
"(",
"nlevels",
")",
"]",
")",
"combs",
"=",
"array",
"(",
"[",
"l",
"for",
"l",
"in",
"combs",
"]",
")",
"masks",
"=",
"array",
"(",
"[",
"[",
"array_equal",
"(",
"index",
"[",
"i",
"]",
",",
"c",
")",
"for",
"i",
"in",
"range",
"(",
"lenIdx",
")",
"]",
"for",
"c",
"in",
"combs",
"]",
")",
"return",
"zip",
"(",
"*",
"[",
"(",
"masks",
"[",
"x",
"]",
",",
"combs",
"[",
"x",
"]",
")",
"for",
"x",
"in",
"range",
"(",
"len",
"(",
"masks",
")",
")",
"if",
"masks",
"[",
"x",
"]",
".",
"any",
"(",
")",
"]",
")"
] | Internal function for generating masks for selecting values based on multi-index values.
As all other multi-index functions will call this function, basic type-checking is also
performed at this stage. | [
"Internal",
"function",
"for",
"generating",
"masks",
"for",
"selecting",
"values",
"based",
"on",
"multi",
"-",
"index",
"values",
"."
] | 967ff8f3e7c2fabe1705743d95eb2746d4329786 | https://github.com/thunder-project/thunder/blob/967ff8f3e7c2fabe1705743d95eb2746d4329786/thunder/series/series.py#L477-L507 | train |
thunder-project/thunder | thunder/series/series.py | Series._map_by_index | def _map_by_index(self, function, level=0):
"""
An internal function for maping a function to groups of values based on a multi-index
Elements of each record are grouped according to unique value combinations of the multi-
index across the given levels of the multi-index. Then the given function is applied
to to each of these groups separately. If this function is many-to-one, the result
can be recast as a Series indexed by the unique index values used for grouping.
"""
if type(level) is int:
level = [level]
masks, ind = self._makemasks(index=self.index, level=level)
nMasks = len(masks)
newindex = array(ind)
if len(newindex[0]) == 1:
newindex = ravel(newindex)
return self.map(lambda v: asarray([array(function(v[masks[x]])) for x in range(nMasks)]),
index=newindex) | python | def _map_by_index(self, function, level=0):
"""
An internal function for maping a function to groups of values based on a multi-index
Elements of each record are grouped according to unique value combinations of the multi-
index across the given levels of the multi-index. Then the given function is applied
to to each of these groups separately. If this function is many-to-one, the result
can be recast as a Series indexed by the unique index values used for grouping.
"""
if type(level) is int:
level = [level]
masks, ind = self._makemasks(index=self.index, level=level)
nMasks = len(masks)
newindex = array(ind)
if len(newindex[0]) == 1:
newindex = ravel(newindex)
return self.map(lambda v: asarray([array(function(v[masks[x]])) for x in range(nMasks)]),
index=newindex) | [
"def",
"_map_by_index",
"(",
"self",
",",
"function",
",",
"level",
"=",
"0",
")",
":",
"if",
"type",
"(",
"level",
")",
"is",
"int",
":",
"level",
"=",
"[",
"level",
"]",
"masks",
",",
"ind",
"=",
"self",
".",
"_makemasks",
"(",
"index",
"=",
"self",
".",
"index",
",",
"level",
"=",
"level",
")",
"nMasks",
"=",
"len",
"(",
"masks",
")",
"newindex",
"=",
"array",
"(",
"ind",
")",
"if",
"len",
"(",
"newindex",
"[",
"0",
"]",
")",
"==",
"1",
":",
"newindex",
"=",
"ravel",
"(",
"newindex",
")",
"return",
"self",
".",
"map",
"(",
"lambda",
"v",
":",
"asarray",
"(",
"[",
"array",
"(",
"function",
"(",
"v",
"[",
"masks",
"[",
"x",
"]",
"]",
")",
")",
"for",
"x",
"in",
"range",
"(",
"nMasks",
")",
"]",
")",
",",
"index",
"=",
"newindex",
")"
] | An internal function for maping a function to groups of values based on a multi-index
Elements of each record are grouped according to unique value combinations of the multi-
index across the given levels of the multi-index. Then the given function is applied
to to each of these groups separately. If this function is many-to-one, the result
can be recast as a Series indexed by the unique index values used for grouping. | [
"An",
"internal",
"function",
"for",
"maping",
"a",
"function",
"to",
"groups",
"of",
"values",
"based",
"on",
"a",
"multi",
"-",
"index"
] | 967ff8f3e7c2fabe1705743d95eb2746d4329786 | https://github.com/thunder-project/thunder/blob/967ff8f3e7c2fabe1705743d95eb2746d4329786/thunder/series/series.py#L509-L528 | train |
thunder-project/thunder | thunder/series/series.py | Series.aggregate_by_index | def aggregate_by_index(self, function, level=0):
"""
Aggregrate data in each record, grouping by index values.
For each unique value of the index, applies a function to the group
indexed by that value. Returns a Series indexed by those unique values.
For the result to be a valid Series object, the aggregating function should
return a simple numeric type. Also allows selection of levels within a
multi-index. See select_by_index for more info on indices and multi-indices.
Parameters
----------
function : function
Aggregating function to map to Series values. Should take a list or ndarray
as input and return a simple numeric value.
level : list of ints, optional, default=0
Specifies the levels of the multi-index to use when determining unique index values.
If only a single level is desired, can be an int.
"""
result = self._map_by_index(function, level=level)
return result.map(lambda v: array(v), index=result.index) | python | def aggregate_by_index(self, function, level=0):
"""
Aggregrate data in each record, grouping by index values.
For each unique value of the index, applies a function to the group
indexed by that value. Returns a Series indexed by those unique values.
For the result to be a valid Series object, the aggregating function should
return a simple numeric type. Also allows selection of levels within a
multi-index. See select_by_index for more info on indices and multi-indices.
Parameters
----------
function : function
Aggregating function to map to Series values. Should take a list or ndarray
as input and return a simple numeric value.
level : list of ints, optional, default=0
Specifies the levels of the multi-index to use when determining unique index values.
If only a single level is desired, can be an int.
"""
result = self._map_by_index(function, level=level)
return result.map(lambda v: array(v), index=result.index) | [
"def",
"aggregate_by_index",
"(",
"self",
",",
"function",
",",
"level",
"=",
"0",
")",
":",
"result",
"=",
"self",
".",
"_map_by_index",
"(",
"function",
",",
"level",
"=",
"level",
")",
"return",
"result",
".",
"map",
"(",
"lambda",
"v",
":",
"array",
"(",
"v",
")",
",",
"index",
"=",
"result",
".",
"index",
")"
] | Aggregrate data in each record, grouping by index values.
For each unique value of the index, applies a function to the group
indexed by that value. Returns a Series indexed by those unique values.
For the result to be a valid Series object, the aggregating function should
return a simple numeric type. Also allows selection of levels within a
multi-index. See select_by_index for more info on indices and multi-indices.
Parameters
----------
function : function
Aggregating function to map to Series values. Should take a list or ndarray
as input and return a simple numeric value.
level : list of ints, optional, default=0
Specifies the levels of the multi-index to use when determining unique index values.
If only a single level is desired, can be an int. | [
"Aggregrate",
"data",
"in",
"each",
"record",
"grouping",
"by",
"index",
"values",
"."
] | 967ff8f3e7c2fabe1705743d95eb2746d4329786 | https://github.com/thunder-project/thunder/blob/967ff8f3e7c2fabe1705743d95eb2746d4329786/thunder/series/series.py#L628-L649 | train |
thunder-project/thunder | thunder/series/series.py | Series.gramian | def gramian(self):
"""
Compute gramian of a distributed matrix.
The gramian is defined as the product of the matrix
with its transpose, i.e. A^T * A.
"""
if self.mode == 'spark':
rdd = self.values.tordd()
from pyspark.accumulators import AccumulatorParam
class MatrixAccumulator(AccumulatorParam):
def zero(self, value):
return zeros(shape(value))
def addInPlace(self, val1, val2):
val1 += val2
return val1
global mat
init = zeros((self.shape[1], self.shape[1]))
mat = rdd.context.accumulator(init, MatrixAccumulator())
def outer_sum(x):
global mat
mat += outer(x, x)
rdd.values().foreach(outer_sum)
return self._constructor(mat.value, index=self.index)
if self.mode == 'local':
return self._constructor(dot(self.values.T, self.values), index=self.index) | python | def gramian(self):
"""
Compute gramian of a distributed matrix.
The gramian is defined as the product of the matrix
with its transpose, i.e. A^T * A.
"""
if self.mode == 'spark':
rdd = self.values.tordd()
from pyspark.accumulators import AccumulatorParam
class MatrixAccumulator(AccumulatorParam):
def zero(self, value):
return zeros(shape(value))
def addInPlace(self, val1, val2):
val1 += val2
return val1
global mat
init = zeros((self.shape[1], self.shape[1]))
mat = rdd.context.accumulator(init, MatrixAccumulator())
def outer_sum(x):
global mat
mat += outer(x, x)
rdd.values().foreach(outer_sum)
return self._constructor(mat.value, index=self.index)
if self.mode == 'local':
return self._constructor(dot(self.values.T, self.values), index=self.index) | [
"def",
"gramian",
"(",
"self",
")",
":",
"if",
"self",
".",
"mode",
"==",
"'spark'",
":",
"rdd",
"=",
"self",
".",
"values",
".",
"tordd",
"(",
")",
"from",
"pyspark",
".",
"accumulators",
"import",
"AccumulatorParam",
"class",
"MatrixAccumulator",
"(",
"AccumulatorParam",
")",
":",
"def",
"zero",
"(",
"self",
",",
"value",
")",
":",
"return",
"zeros",
"(",
"shape",
"(",
"value",
")",
")",
"def",
"addInPlace",
"(",
"self",
",",
"val1",
",",
"val2",
")",
":",
"val1",
"+=",
"val2",
"return",
"val1",
"global",
"mat",
"init",
"=",
"zeros",
"(",
"(",
"self",
".",
"shape",
"[",
"1",
"]",
",",
"self",
".",
"shape",
"[",
"1",
"]",
")",
")",
"mat",
"=",
"rdd",
".",
"context",
".",
"accumulator",
"(",
"init",
",",
"MatrixAccumulator",
"(",
")",
")",
"def",
"outer_sum",
"(",
"x",
")",
":",
"global",
"mat",
"mat",
"+=",
"outer",
"(",
"x",
",",
"x",
")",
"rdd",
".",
"values",
"(",
")",
".",
"foreach",
"(",
"outer_sum",
")",
"return",
"self",
".",
"_constructor",
"(",
"mat",
".",
"value",
",",
"index",
"=",
"self",
".",
"index",
")",
"if",
"self",
".",
"mode",
"==",
"'local'",
":",
"return",
"self",
".",
"_constructor",
"(",
"dot",
"(",
"self",
".",
"values",
".",
"T",
",",
"self",
".",
"values",
")",
",",
"index",
"=",
"self",
".",
"index",
")"
] | Compute gramian of a distributed matrix.
The gramian is defined as the product of the matrix
with its transpose, i.e. A^T * A. | [
"Compute",
"gramian",
"of",
"a",
"distributed",
"matrix",
"."
] | 967ff8f3e7c2fabe1705743d95eb2746d4329786 | https://github.com/thunder-project/thunder/blob/967ff8f3e7c2fabe1705743d95eb2746d4329786/thunder/series/series.py#L731-L763 | train |
thunder-project/thunder | thunder/series/series.py | Series.times | def times(self, other):
"""
Multiply a matrix by another one.
Other matrix must be a numpy array, a scalar,
or another matrix in local mode.
Parameters
----------
other : Matrix, scalar, or numpy array
A matrix to multiply with
"""
if isinstance(other, ScalarType):
other = asarray(other)
index = self.index
else:
if isinstance(other, list):
other = asarray(other)
if isinstance(other, ndarray) and other.ndim < 2:
other = expand_dims(other, 1)
if not self.shape[1] == other.shape[0]:
raise ValueError('shapes %s and %s are not aligned' % (self.shape, other.shape))
index = arange(other.shape[1])
if self.mode == 'local' and isinstance(other, Series) and other.mode == 'spark':
raise NotImplementedError
if self.mode == 'spark' and isinstance(other, Series) and other.mode == 'spark':
raise NotImplementedError
if self.mode == 'local' and isinstance(other, (ndarray, ScalarType)):
return self._constructor(dot(self.values, other), index=index)
if self.mode == 'local' and isinstance(other, Series):
return self._constructor(dot(self.values, other.values), index=index)
if self.mode == 'spark' and isinstance(other, (ndarray, ScalarType)):
return self.map(lambda x: dot(x, other), index=index)
if self.mode == 'spark' and isinstance(other, Series):
return self.map(lambda x: dot(x, other.values), index=index) | python | def times(self, other):
"""
Multiply a matrix by another one.
Other matrix must be a numpy array, a scalar,
or another matrix in local mode.
Parameters
----------
other : Matrix, scalar, or numpy array
A matrix to multiply with
"""
if isinstance(other, ScalarType):
other = asarray(other)
index = self.index
else:
if isinstance(other, list):
other = asarray(other)
if isinstance(other, ndarray) and other.ndim < 2:
other = expand_dims(other, 1)
if not self.shape[1] == other.shape[0]:
raise ValueError('shapes %s and %s are not aligned' % (self.shape, other.shape))
index = arange(other.shape[1])
if self.mode == 'local' and isinstance(other, Series) and other.mode == 'spark':
raise NotImplementedError
if self.mode == 'spark' and isinstance(other, Series) and other.mode == 'spark':
raise NotImplementedError
if self.mode == 'local' and isinstance(other, (ndarray, ScalarType)):
return self._constructor(dot(self.values, other), index=index)
if self.mode == 'local' and isinstance(other, Series):
return self._constructor(dot(self.values, other.values), index=index)
if self.mode == 'spark' and isinstance(other, (ndarray, ScalarType)):
return self.map(lambda x: dot(x, other), index=index)
if self.mode == 'spark' and isinstance(other, Series):
return self.map(lambda x: dot(x, other.values), index=index) | [
"def",
"times",
"(",
"self",
",",
"other",
")",
":",
"if",
"isinstance",
"(",
"other",
",",
"ScalarType",
")",
":",
"other",
"=",
"asarray",
"(",
"other",
")",
"index",
"=",
"self",
".",
"index",
"else",
":",
"if",
"isinstance",
"(",
"other",
",",
"list",
")",
":",
"other",
"=",
"asarray",
"(",
"other",
")",
"if",
"isinstance",
"(",
"other",
",",
"ndarray",
")",
"and",
"other",
".",
"ndim",
"<",
"2",
":",
"other",
"=",
"expand_dims",
"(",
"other",
",",
"1",
")",
"if",
"not",
"self",
".",
"shape",
"[",
"1",
"]",
"==",
"other",
".",
"shape",
"[",
"0",
"]",
":",
"raise",
"ValueError",
"(",
"'shapes %s and %s are not aligned'",
"%",
"(",
"self",
".",
"shape",
",",
"other",
".",
"shape",
")",
")",
"index",
"=",
"arange",
"(",
"other",
".",
"shape",
"[",
"1",
"]",
")",
"if",
"self",
".",
"mode",
"==",
"'local'",
"and",
"isinstance",
"(",
"other",
",",
"Series",
")",
"and",
"other",
".",
"mode",
"==",
"'spark'",
":",
"raise",
"NotImplementedError",
"if",
"self",
".",
"mode",
"==",
"'spark'",
"and",
"isinstance",
"(",
"other",
",",
"Series",
")",
"and",
"other",
".",
"mode",
"==",
"'spark'",
":",
"raise",
"NotImplementedError",
"if",
"self",
".",
"mode",
"==",
"'local'",
"and",
"isinstance",
"(",
"other",
",",
"(",
"ndarray",
",",
"ScalarType",
")",
")",
":",
"return",
"self",
".",
"_constructor",
"(",
"dot",
"(",
"self",
".",
"values",
",",
"other",
")",
",",
"index",
"=",
"index",
")",
"if",
"self",
".",
"mode",
"==",
"'local'",
"and",
"isinstance",
"(",
"other",
",",
"Series",
")",
":",
"return",
"self",
".",
"_constructor",
"(",
"dot",
"(",
"self",
".",
"values",
",",
"other",
".",
"values",
")",
",",
"index",
"=",
"index",
")",
"if",
"self",
".",
"mode",
"==",
"'spark'",
"and",
"isinstance",
"(",
"other",
",",
"(",
"ndarray",
",",
"ScalarType",
")",
")",
":",
"return",
"self",
".",
"map",
"(",
"lambda",
"x",
":",
"dot",
"(",
"x",
",",
"other",
")",
",",
"index",
"=",
"index",
")",
"if",
"self",
".",
"mode",
"==",
"'spark'",
"and",
"isinstance",
"(",
"other",
",",
"Series",
")",
":",
"return",
"self",
".",
"map",
"(",
"lambda",
"x",
":",
"dot",
"(",
"x",
",",
"other",
".",
"values",
")",
",",
"index",
"=",
"index",
")"
] | Multiply a matrix by another one.
Other matrix must be a numpy array, a scalar,
or another matrix in local mode.
Parameters
----------
other : Matrix, scalar, or numpy array
A matrix to multiply with | [
"Multiply",
"a",
"matrix",
"by",
"another",
"one",
"."
] | 967ff8f3e7c2fabe1705743d95eb2746d4329786 | https://github.com/thunder-project/thunder/blob/967ff8f3e7c2fabe1705743d95eb2746d4329786/thunder/series/series.py#L765-L805 | train |
thunder-project/thunder | thunder/series/series.py | Series._makewindows | def _makewindows(self, indices, window):
"""
Make masks used by windowing functions
Given a list of indices specifying window centers,
and a window size, construct a list of index arrays,
one per window, that index into the target array
Parameters
----------
indices : array-like
List of times specifying window centers
window : int
Window size
"""
div = divmod(window, 2)
before = div[0]
after = div[0] + div[1]
index = asarray(self.index)
indices = asarray(indices)
if where(index == max(indices))[0][0] + after > len(index):
raise ValueError("Maximum requested index %g, with window %g, exceeds length %g"
% (max(indices), window, len(index)))
if where(index == min(indices))[0][0] - before < 0:
raise ValueError("Minimum requested index %g, with window %g, is less than 0"
% (min(indices), window))
masks = [arange(where(index == i)[0][0]-before, where(index == i)[0][0]+after, dtype='int') for i in indices]
return masks | python | def _makewindows(self, indices, window):
"""
Make masks used by windowing functions
Given a list of indices specifying window centers,
and a window size, construct a list of index arrays,
one per window, that index into the target array
Parameters
----------
indices : array-like
List of times specifying window centers
window : int
Window size
"""
div = divmod(window, 2)
before = div[0]
after = div[0] + div[1]
index = asarray(self.index)
indices = asarray(indices)
if where(index == max(indices))[0][0] + after > len(index):
raise ValueError("Maximum requested index %g, with window %g, exceeds length %g"
% (max(indices), window, len(index)))
if where(index == min(indices))[0][0] - before < 0:
raise ValueError("Minimum requested index %g, with window %g, is less than 0"
% (min(indices), window))
masks = [arange(where(index == i)[0][0]-before, where(index == i)[0][0]+after, dtype='int') for i in indices]
return masks | [
"def",
"_makewindows",
"(",
"self",
",",
"indices",
",",
"window",
")",
":",
"div",
"=",
"divmod",
"(",
"window",
",",
"2",
")",
"before",
"=",
"div",
"[",
"0",
"]",
"after",
"=",
"div",
"[",
"0",
"]",
"+",
"div",
"[",
"1",
"]",
"index",
"=",
"asarray",
"(",
"self",
".",
"index",
")",
"indices",
"=",
"asarray",
"(",
"indices",
")",
"if",
"where",
"(",
"index",
"==",
"max",
"(",
"indices",
")",
")",
"[",
"0",
"]",
"[",
"0",
"]",
"+",
"after",
">",
"len",
"(",
"index",
")",
":",
"raise",
"ValueError",
"(",
"\"Maximum requested index %g, with window %g, exceeds length %g\"",
"%",
"(",
"max",
"(",
"indices",
")",
",",
"window",
",",
"len",
"(",
"index",
")",
")",
")",
"if",
"where",
"(",
"index",
"==",
"min",
"(",
"indices",
")",
")",
"[",
"0",
"]",
"[",
"0",
"]",
"-",
"before",
"<",
"0",
":",
"raise",
"ValueError",
"(",
"\"Minimum requested index %g, with window %g, is less than 0\"",
"%",
"(",
"min",
"(",
"indices",
")",
",",
"window",
")",
")",
"masks",
"=",
"[",
"arange",
"(",
"where",
"(",
"index",
"==",
"i",
")",
"[",
"0",
"]",
"[",
"0",
"]",
"-",
"before",
",",
"where",
"(",
"index",
"==",
"i",
")",
"[",
"0",
"]",
"[",
"0",
"]",
"+",
"after",
",",
"dtype",
"=",
"'int'",
")",
"for",
"i",
"in",
"indices",
"]",
"return",
"masks"
] | Make masks used by windowing functions
Given a list of indices specifying window centers,
and a window size, construct a list of index arrays,
one per window, that index into the target array
Parameters
----------
indices : array-like
List of times specifying window centers
window : int
Window size | [
"Make",
"masks",
"used",
"by",
"windowing",
"functions"
] | 967ff8f3e7c2fabe1705743d95eb2746d4329786 | https://github.com/thunder-project/thunder/blob/967ff8f3e7c2fabe1705743d95eb2746d4329786/thunder/series/series.py#L807-L835 | train |
thunder-project/thunder | thunder/series/series.py | Series.mean_by_window | def mean_by_window(self, indices, window):
"""
Average series across multiple windows specified by their centers.
Parameters
----------
indices : array-like
List of times specifying window centers
window : int
Window size
"""
masks = self._makewindows(indices, window)
newindex = arange(0, len(masks[0]))
return self.map(lambda x: mean([x[m] for m in masks], axis=0), index=newindex) | python | def mean_by_window(self, indices, window):
"""
Average series across multiple windows specified by their centers.
Parameters
----------
indices : array-like
List of times specifying window centers
window : int
Window size
"""
masks = self._makewindows(indices, window)
newindex = arange(0, len(masks[0]))
return self.map(lambda x: mean([x[m] for m in masks], axis=0), index=newindex) | [
"def",
"mean_by_window",
"(",
"self",
",",
"indices",
",",
"window",
")",
":",
"masks",
"=",
"self",
".",
"_makewindows",
"(",
"indices",
",",
"window",
")",
"newindex",
"=",
"arange",
"(",
"0",
",",
"len",
"(",
"masks",
"[",
"0",
"]",
")",
")",
"return",
"self",
".",
"map",
"(",
"lambda",
"x",
":",
"mean",
"(",
"[",
"x",
"[",
"m",
"]",
"for",
"m",
"in",
"masks",
"]",
",",
"axis",
"=",
"0",
")",
",",
"index",
"=",
"newindex",
")"
] | Average series across multiple windows specified by their centers.
Parameters
----------
indices : array-like
List of times specifying window centers
window : int
Window size | [
"Average",
"series",
"across",
"multiple",
"windows",
"specified",
"by",
"their",
"centers",
"."
] | 967ff8f3e7c2fabe1705743d95eb2746d4329786 | https://github.com/thunder-project/thunder/blob/967ff8f3e7c2fabe1705743d95eb2746d4329786/thunder/series/series.py#L837-L851 | train |
thunder-project/thunder | thunder/series/series.py | Series.subsample | def subsample(self, sample_factor=2):
"""
Subsample series by an integer factor.
Parameters
----------
sample_factor : positive integer, optional, default=2
Factor for downsampling.
"""
if sample_factor < 0:
raise Exception('Factor for subsampling must be postive, got %g' % sample_factor)
s = slice(0, len(self.index), sample_factor)
newindex = self.index[s]
return self.map(lambda v: v[s], index=newindex) | python | def subsample(self, sample_factor=2):
"""
Subsample series by an integer factor.
Parameters
----------
sample_factor : positive integer, optional, default=2
Factor for downsampling.
"""
if sample_factor < 0:
raise Exception('Factor for subsampling must be postive, got %g' % sample_factor)
s = slice(0, len(self.index), sample_factor)
newindex = self.index[s]
return self.map(lambda v: v[s], index=newindex) | [
"def",
"subsample",
"(",
"self",
",",
"sample_factor",
"=",
"2",
")",
":",
"if",
"sample_factor",
"<",
"0",
":",
"raise",
"Exception",
"(",
"'Factor for subsampling must be postive, got %g'",
"%",
"sample_factor",
")",
"s",
"=",
"slice",
"(",
"0",
",",
"len",
"(",
"self",
".",
"index",
")",
",",
"sample_factor",
")",
"newindex",
"=",
"self",
".",
"index",
"[",
"s",
"]",
"return",
"self",
".",
"map",
"(",
"lambda",
"v",
":",
"v",
"[",
"s",
"]",
",",
"index",
"=",
"newindex",
")"
] | Subsample series by an integer factor.
Parameters
----------
sample_factor : positive integer, optional, default=2
Factor for downsampling. | [
"Subsample",
"series",
"by",
"an",
"integer",
"factor",
"."
] | 967ff8f3e7c2fabe1705743d95eb2746d4329786 | https://github.com/thunder-project/thunder/blob/967ff8f3e7c2fabe1705743d95eb2746d4329786/thunder/series/series.py#L853-L866 | train |
thunder-project/thunder | thunder/series/series.py | Series.downsample | def downsample(self, sample_factor=2):
"""
Downsample series by an integer factor by averaging.
Parameters
----------
sample_factor : positive integer, optional, default=2
Factor for downsampling.
"""
if sample_factor < 0:
raise Exception('Factor for subsampling must be postive, got %g' % sample_factor)
newlength = floor(len(self.index) / sample_factor)
func = lambda v: v[0:int(newlength * sample_factor)].reshape(-1, sample_factor).mean(axis=1)
newindex = arange(newlength)
return self.map(func, index=newindex) | python | def downsample(self, sample_factor=2):
"""
Downsample series by an integer factor by averaging.
Parameters
----------
sample_factor : positive integer, optional, default=2
Factor for downsampling.
"""
if sample_factor < 0:
raise Exception('Factor for subsampling must be postive, got %g' % sample_factor)
newlength = floor(len(self.index) / sample_factor)
func = lambda v: v[0:int(newlength * sample_factor)].reshape(-1, sample_factor).mean(axis=1)
newindex = arange(newlength)
return self.map(func, index=newindex) | [
"def",
"downsample",
"(",
"self",
",",
"sample_factor",
"=",
"2",
")",
":",
"if",
"sample_factor",
"<",
"0",
":",
"raise",
"Exception",
"(",
"'Factor for subsampling must be postive, got %g'",
"%",
"sample_factor",
")",
"newlength",
"=",
"floor",
"(",
"len",
"(",
"self",
".",
"index",
")",
"/",
"sample_factor",
")",
"func",
"=",
"lambda",
"v",
":",
"v",
"[",
"0",
":",
"int",
"(",
"newlength",
"*",
"sample_factor",
")",
"]",
".",
"reshape",
"(",
"-",
"1",
",",
"sample_factor",
")",
".",
"mean",
"(",
"axis",
"=",
"1",
")",
"newindex",
"=",
"arange",
"(",
"newlength",
")",
"return",
"self",
".",
"map",
"(",
"func",
",",
"index",
"=",
"newindex",
")"
] | Downsample series by an integer factor by averaging.
Parameters
----------
sample_factor : positive integer, optional, default=2
Factor for downsampling. | [
"Downsample",
"series",
"by",
"an",
"integer",
"factor",
"by",
"averaging",
"."
] | 967ff8f3e7c2fabe1705743d95eb2746d4329786 | https://github.com/thunder-project/thunder/blob/967ff8f3e7c2fabe1705743d95eb2746d4329786/thunder/series/series.py#L868-L882 | train |
thunder-project/thunder | thunder/series/series.py | Series.fourier | def fourier(self, freq=None):
"""
Compute statistics of a Fourier decomposition on series data.
Parameters
----------
freq : int
Digital frequency at which to compute coherence and phase
"""
def get(y, freq):
y = y - mean(y)
nframes = len(y)
ft = fft.fft(y)
ft = ft[0:int(fix(nframes/2))]
ampFt = 2*abs(ft)/nframes
amp = ampFt[freq]
ampSum = sqrt(sum(ampFt**2))
co = amp / ampSum
ph = -(pi/2) - angle(ft[freq])
if ph < 0:
ph += pi * 2
return array([co, ph])
if freq >= int(fix(size(self.index)/2)):
raise Exception('Requested frequency, %g, is too high, '
'must be less than half the series duration' % freq)
index = ['coherence', 'phase']
return self.map(lambda x: get(x, freq), index=index) | python | def fourier(self, freq=None):
"""
Compute statistics of a Fourier decomposition on series data.
Parameters
----------
freq : int
Digital frequency at which to compute coherence and phase
"""
def get(y, freq):
y = y - mean(y)
nframes = len(y)
ft = fft.fft(y)
ft = ft[0:int(fix(nframes/2))]
ampFt = 2*abs(ft)/nframes
amp = ampFt[freq]
ampSum = sqrt(sum(ampFt**2))
co = amp / ampSum
ph = -(pi/2) - angle(ft[freq])
if ph < 0:
ph += pi * 2
return array([co, ph])
if freq >= int(fix(size(self.index)/2)):
raise Exception('Requested frequency, %g, is too high, '
'must be less than half the series duration' % freq)
index = ['coherence', 'phase']
return self.map(lambda x: get(x, freq), index=index) | [
"def",
"fourier",
"(",
"self",
",",
"freq",
"=",
"None",
")",
":",
"def",
"get",
"(",
"y",
",",
"freq",
")",
":",
"y",
"=",
"y",
"-",
"mean",
"(",
"y",
")",
"nframes",
"=",
"len",
"(",
"y",
")",
"ft",
"=",
"fft",
".",
"fft",
"(",
"y",
")",
"ft",
"=",
"ft",
"[",
"0",
":",
"int",
"(",
"fix",
"(",
"nframes",
"/",
"2",
")",
")",
"]",
"ampFt",
"=",
"2",
"*",
"abs",
"(",
"ft",
")",
"/",
"nframes",
"amp",
"=",
"ampFt",
"[",
"freq",
"]",
"ampSum",
"=",
"sqrt",
"(",
"sum",
"(",
"ampFt",
"**",
"2",
")",
")",
"co",
"=",
"amp",
"/",
"ampSum",
"ph",
"=",
"-",
"(",
"pi",
"/",
"2",
")",
"-",
"angle",
"(",
"ft",
"[",
"freq",
"]",
")",
"if",
"ph",
"<",
"0",
":",
"ph",
"+=",
"pi",
"*",
"2",
"return",
"array",
"(",
"[",
"co",
",",
"ph",
"]",
")",
"if",
"freq",
">=",
"int",
"(",
"fix",
"(",
"size",
"(",
"self",
".",
"index",
")",
"/",
"2",
")",
")",
":",
"raise",
"Exception",
"(",
"'Requested frequency, %g, is too high, '",
"'must be less than half the series duration'",
"%",
"freq",
")",
"index",
"=",
"[",
"'coherence'",
",",
"'phase'",
"]",
"return",
"self",
".",
"map",
"(",
"lambda",
"x",
":",
"get",
"(",
"x",
",",
"freq",
")",
",",
"index",
"=",
"index",
")"
] | Compute statistics of a Fourier decomposition on series data.
Parameters
----------
freq : int
Digital frequency at which to compute coherence and phase | [
"Compute",
"statistics",
"of",
"a",
"Fourier",
"decomposition",
"on",
"series",
"data",
"."
] | 967ff8f3e7c2fabe1705743d95eb2746d4329786 | https://github.com/thunder-project/thunder/blob/967ff8f3e7c2fabe1705743d95eb2746d4329786/thunder/series/series.py#L884-L912 | train |
thunder-project/thunder | thunder/series/series.py | Series.convolve | def convolve(self, signal, mode='full'):
"""
Convolve series data against another signal.
Parameters
----------
signal : array
Signal to convolve with (must be 1D)
mode : str, optional, default='full'
Mode of convolution, options are 'full', 'same', and 'valid'
"""
from numpy import convolve
s = asarray(signal)
n = size(self.index)
m = size(s)
# use expected lengths to make a new index
if mode == 'same':
newmax = max(n, m)
elif mode == 'valid':
newmax = max(m, n) - min(m, n) + 1
else:
newmax = n+m-1
newindex = arange(0, newmax)
return self.map(lambda x: convolve(x, signal, mode), index=newindex) | python | def convolve(self, signal, mode='full'):
"""
Convolve series data against another signal.
Parameters
----------
signal : array
Signal to convolve with (must be 1D)
mode : str, optional, default='full'
Mode of convolution, options are 'full', 'same', and 'valid'
"""
from numpy import convolve
s = asarray(signal)
n = size(self.index)
m = size(s)
# use expected lengths to make a new index
if mode == 'same':
newmax = max(n, m)
elif mode == 'valid':
newmax = max(m, n) - min(m, n) + 1
else:
newmax = n+m-1
newindex = arange(0, newmax)
return self.map(lambda x: convolve(x, signal, mode), index=newindex) | [
"def",
"convolve",
"(",
"self",
",",
"signal",
",",
"mode",
"=",
"'full'",
")",
":",
"from",
"numpy",
"import",
"convolve",
"s",
"=",
"asarray",
"(",
"signal",
")",
"n",
"=",
"size",
"(",
"self",
".",
"index",
")",
"m",
"=",
"size",
"(",
"s",
")",
"# use expected lengths to make a new index",
"if",
"mode",
"==",
"'same'",
":",
"newmax",
"=",
"max",
"(",
"n",
",",
"m",
")",
"elif",
"mode",
"==",
"'valid'",
":",
"newmax",
"=",
"max",
"(",
"m",
",",
"n",
")",
"-",
"min",
"(",
"m",
",",
"n",
")",
"+",
"1",
"else",
":",
"newmax",
"=",
"n",
"+",
"m",
"-",
"1",
"newindex",
"=",
"arange",
"(",
"0",
",",
"newmax",
")",
"return",
"self",
".",
"map",
"(",
"lambda",
"x",
":",
"convolve",
"(",
"x",
",",
"signal",
",",
"mode",
")",
",",
"index",
"=",
"newindex",
")"
] | Convolve series data against another signal.
Parameters
----------
signal : array
Signal to convolve with (must be 1D)
mode : str, optional, default='full'
Mode of convolution, options are 'full', 'same', and 'valid' | [
"Convolve",
"series",
"data",
"against",
"another",
"signal",
"."
] | 967ff8f3e7c2fabe1705743d95eb2746d4329786 | https://github.com/thunder-project/thunder/blob/967ff8f3e7c2fabe1705743d95eb2746d4329786/thunder/series/series.py#L914-L943 | train |
thunder-project/thunder | thunder/series/series.py | Series.crosscorr | def crosscorr(self, signal, lag=0):
"""
Cross correlate series data against another signal.
Parameters
----------
signal : array
Signal to correlate against (must be 1D).
lag : int
Range of lags to consider, will cover (-lag, +lag).
"""
from scipy.linalg import norm
s = asarray(signal)
s = s - mean(s)
s = s / norm(s)
if size(s) != size(self.index):
raise Exception('Size of signal to cross correlate with, %g, '
'does not match size of series' % size(s))
# created a matrix with lagged signals
if lag is not 0:
shifts = range(-lag, lag+1)
d = len(s)
m = len(shifts)
sshifted = zeros((m, d))
for i in range(0, len(shifts)):
tmp = roll(s, shifts[i])
if shifts[i] < 0:
tmp[(d+shifts[i]):] = 0
if shifts[i] > 0:
tmp[:shifts[i]] = 0
sshifted[i, :] = tmp
s = sshifted
else:
shifts = [0]
def get(y, s):
y = y - mean(y)
n = norm(y)
if n == 0:
b = zeros((s.shape[0],))
else:
y /= n
b = dot(s, y)
return b
return self.map(lambda x: get(x, s), index=shifts) | python | def crosscorr(self, signal, lag=0):
"""
Cross correlate series data against another signal.
Parameters
----------
signal : array
Signal to correlate against (must be 1D).
lag : int
Range of lags to consider, will cover (-lag, +lag).
"""
from scipy.linalg import norm
s = asarray(signal)
s = s - mean(s)
s = s / norm(s)
if size(s) != size(self.index):
raise Exception('Size of signal to cross correlate with, %g, '
'does not match size of series' % size(s))
# created a matrix with lagged signals
if lag is not 0:
shifts = range(-lag, lag+1)
d = len(s)
m = len(shifts)
sshifted = zeros((m, d))
for i in range(0, len(shifts)):
tmp = roll(s, shifts[i])
if shifts[i] < 0:
tmp[(d+shifts[i]):] = 0
if shifts[i] > 0:
tmp[:shifts[i]] = 0
sshifted[i, :] = tmp
s = sshifted
else:
shifts = [0]
def get(y, s):
y = y - mean(y)
n = norm(y)
if n == 0:
b = zeros((s.shape[0],))
else:
y /= n
b = dot(s, y)
return b
return self.map(lambda x: get(x, s), index=shifts) | [
"def",
"crosscorr",
"(",
"self",
",",
"signal",
",",
"lag",
"=",
"0",
")",
":",
"from",
"scipy",
".",
"linalg",
"import",
"norm",
"s",
"=",
"asarray",
"(",
"signal",
")",
"s",
"=",
"s",
"-",
"mean",
"(",
"s",
")",
"s",
"=",
"s",
"/",
"norm",
"(",
"s",
")",
"if",
"size",
"(",
"s",
")",
"!=",
"size",
"(",
"self",
".",
"index",
")",
":",
"raise",
"Exception",
"(",
"'Size of signal to cross correlate with, %g, '",
"'does not match size of series'",
"%",
"size",
"(",
"s",
")",
")",
"# created a matrix with lagged signals",
"if",
"lag",
"is",
"not",
"0",
":",
"shifts",
"=",
"range",
"(",
"-",
"lag",
",",
"lag",
"+",
"1",
")",
"d",
"=",
"len",
"(",
"s",
")",
"m",
"=",
"len",
"(",
"shifts",
")",
"sshifted",
"=",
"zeros",
"(",
"(",
"m",
",",
"d",
")",
")",
"for",
"i",
"in",
"range",
"(",
"0",
",",
"len",
"(",
"shifts",
")",
")",
":",
"tmp",
"=",
"roll",
"(",
"s",
",",
"shifts",
"[",
"i",
"]",
")",
"if",
"shifts",
"[",
"i",
"]",
"<",
"0",
":",
"tmp",
"[",
"(",
"d",
"+",
"shifts",
"[",
"i",
"]",
")",
":",
"]",
"=",
"0",
"if",
"shifts",
"[",
"i",
"]",
">",
"0",
":",
"tmp",
"[",
":",
"shifts",
"[",
"i",
"]",
"]",
"=",
"0",
"sshifted",
"[",
"i",
",",
":",
"]",
"=",
"tmp",
"s",
"=",
"sshifted",
"else",
":",
"shifts",
"=",
"[",
"0",
"]",
"def",
"get",
"(",
"y",
",",
"s",
")",
":",
"y",
"=",
"y",
"-",
"mean",
"(",
"y",
")",
"n",
"=",
"norm",
"(",
"y",
")",
"if",
"n",
"==",
"0",
":",
"b",
"=",
"zeros",
"(",
"(",
"s",
".",
"shape",
"[",
"0",
"]",
",",
")",
")",
"else",
":",
"y",
"/=",
"n",
"b",
"=",
"dot",
"(",
"s",
",",
"y",
")",
"return",
"b",
"return",
"self",
".",
"map",
"(",
"lambda",
"x",
":",
"get",
"(",
"x",
",",
"s",
")",
",",
"index",
"=",
"shifts",
")"
] | Cross correlate series data against another signal.
Parameters
----------
signal : array
Signal to correlate against (must be 1D).
lag : int
Range of lags to consider, will cover (-lag, +lag). | [
"Cross",
"correlate",
"series",
"data",
"against",
"another",
"signal",
"."
] | 967ff8f3e7c2fabe1705743d95eb2746d4329786 | https://github.com/thunder-project/thunder/blob/967ff8f3e7c2fabe1705743d95eb2746d4329786/thunder/series/series.py#L945-L994 | train |
thunder-project/thunder | thunder/series/series.py | Series.detrend | def detrend(self, method='linear', order=5):
"""
Detrend series data with linear or nonlinear detrending.
Preserve intercept so that subsequent operations can adjust the baseline.
Parameters
----------
method : str, optional, default = 'linear'
Detrending method
order : int, optional, default = 5
Order of polynomial, for non-linear detrending only
"""
check_options(method, ['linear', 'nonlinear'])
if method == 'linear':
order = 1
def func(y):
x = arange(len(y))
p = polyfit(x, y, order)
p[-1] = 0
yy = polyval(p, x)
return y - yy
return self.map(func) | python | def detrend(self, method='linear', order=5):
"""
Detrend series data with linear or nonlinear detrending.
Preserve intercept so that subsequent operations can adjust the baseline.
Parameters
----------
method : str, optional, default = 'linear'
Detrending method
order : int, optional, default = 5
Order of polynomial, for non-linear detrending only
"""
check_options(method, ['linear', 'nonlinear'])
if method == 'linear':
order = 1
def func(y):
x = arange(len(y))
p = polyfit(x, y, order)
p[-1] = 0
yy = polyval(p, x)
return y - yy
return self.map(func) | [
"def",
"detrend",
"(",
"self",
",",
"method",
"=",
"'linear'",
",",
"order",
"=",
"5",
")",
":",
"check_options",
"(",
"method",
",",
"[",
"'linear'",
",",
"'nonlinear'",
"]",
")",
"if",
"method",
"==",
"'linear'",
":",
"order",
"=",
"1",
"def",
"func",
"(",
"y",
")",
":",
"x",
"=",
"arange",
"(",
"len",
"(",
"y",
")",
")",
"p",
"=",
"polyfit",
"(",
"x",
",",
"y",
",",
"order",
")",
"p",
"[",
"-",
"1",
"]",
"=",
"0",
"yy",
"=",
"polyval",
"(",
"p",
",",
"x",
")",
"return",
"y",
"-",
"yy",
"return",
"self",
".",
"map",
"(",
"func",
")"
] | Detrend series data with linear or nonlinear detrending.
Preserve intercept so that subsequent operations can adjust the baseline.
Parameters
----------
method : str, optional, default = 'linear'
Detrending method
order : int, optional, default = 5
Order of polynomial, for non-linear detrending only | [
"Detrend",
"series",
"data",
"with",
"linear",
"or",
"nonlinear",
"detrending",
"."
] | 967ff8f3e7c2fabe1705743d95eb2746d4329786 | https://github.com/thunder-project/thunder/blob/967ff8f3e7c2fabe1705743d95eb2746d4329786/thunder/series/series.py#L996-L1022 | train |
thunder-project/thunder | thunder/series/series.py | Series.normalize | def normalize(self, method='percentile', window=None, perc=20, offset=0.1):
"""
Normalize by subtracting and dividing by a baseline.
Baseline can be derived from a global mean or percentile,
or a smoothed percentile estimated within a rolling window.
Windowed baselines may only be well-defined for
temporal series data.
Parameters
----------
baseline : str, optional, default = 'percentile'
Quantity to use as the baseline, options are 'mean', 'percentile',
'window', or 'window-exact'.
window : int, optional, default = 6
Size of window for baseline estimation,
for 'window' and 'window-exact' baseline only.
perc : int, optional, default = 20
Percentile value to use, for 'percentile',
'window', or 'window-exact' baseline only.
offset : float, optional, default = 0.1
Scalar added to baseline during division to avoid division by 0.
"""
check_options(method, ['mean', 'percentile', 'window', 'window-exact'])
from warnings import warn
if not (method == 'window' or method == 'window-exact') and window is not None:
warn('Setting window without using method "window" has no effect')
if method == 'mean':
baseFunc = mean
if method == 'percentile':
baseFunc = lambda x: percentile(x, perc)
if method == 'window':
from scipy.ndimage.filters import percentile_filter
baseFunc = lambda x: percentile_filter(x.astype(float64), perc, window, mode='nearest')
if method == 'window-exact':
if window & 0x1:
left, right = (ceil(window/2), ceil(window/2) + 1)
else:
left, right = (window/2, window/2)
n = len(self.index)
baseFunc = lambda x: asarray([percentile(x[max(ix-left, 0):min(ix+right+1, n)], perc)
for ix in arange(0, n)])
def get(y):
b = baseFunc(y)
return (y - b) / (b + offset)
return self.map(get) | python | def normalize(self, method='percentile', window=None, perc=20, offset=0.1):
"""
Normalize by subtracting and dividing by a baseline.
Baseline can be derived from a global mean or percentile,
or a smoothed percentile estimated within a rolling window.
Windowed baselines may only be well-defined for
temporal series data.
Parameters
----------
baseline : str, optional, default = 'percentile'
Quantity to use as the baseline, options are 'mean', 'percentile',
'window', or 'window-exact'.
window : int, optional, default = 6
Size of window for baseline estimation,
for 'window' and 'window-exact' baseline only.
perc : int, optional, default = 20
Percentile value to use, for 'percentile',
'window', or 'window-exact' baseline only.
offset : float, optional, default = 0.1
Scalar added to baseline during division to avoid division by 0.
"""
check_options(method, ['mean', 'percentile', 'window', 'window-exact'])
from warnings import warn
if not (method == 'window' or method == 'window-exact') and window is not None:
warn('Setting window without using method "window" has no effect')
if method == 'mean':
baseFunc = mean
if method == 'percentile':
baseFunc = lambda x: percentile(x, perc)
if method == 'window':
from scipy.ndimage.filters import percentile_filter
baseFunc = lambda x: percentile_filter(x.astype(float64), perc, window, mode='nearest')
if method == 'window-exact':
if window & 0x1:
left, right = (ceil(window/2), ceil(window/2) + 1)
else:
left, right = (window/2, window/2)
n = len(self.index)
baseFunc = lambda x: asarray([percentile(x[max(ix-left, 0):min(ix+right+1, n)], perc)
for ix in arange(0, n)])
def get(y):
b = baseFunc(y)
return (y - b) / (b + offset)
return self.map(get) | [
"def",
"normalize",
"(",
"self",
",",
"method",
"=",
"'percentile'",
",",
"window",
"=",
"None",
",",
"perc",
"=",
"20",
",",
"offset",
"=",
"0.1",
")",
":",
"check_options",
"(",
"method",
",",
"[",
"'mean'",
",",
"'percentile'",
",",
"'window'",
",",
"'window-exact'",
"]",
")",
"from",
"warnings",
"import",
"warn",
"if",
"not",
"(",
"method",
"==",
"'window'",
"or",
"method",
"==",
"'window-exact'",
")",
"and",
"window",
"is",
"not",
"None",
":",
"warn",
"(",
"'Setting window without using method \"window\" has no effect'",
")",
"if",
"method",
"==",
"'mean'",
":",
"baseFunc",
"=",
"mean",
"if",
"method",
"==",
"'percentile'",
":",
"baseFunc",
"=",
"lambda",
"x",
":",
"percentile",
"(",
"x",
",",
"perc",
")",
"if",
"method",
"==",
"'window'",
":",
"from",
"scipy",
".",
"ndimage",
".",
"filters",
"import",
"percentile_filter",
"baseFunc",
"=",
"lambda",
"x",
":",
"percentile_filter",
"(",
"x",
".",
"astype",
"(",
"float64",
")",
",",
"perc",
",",
"window",
",",
"mode",
"=",
"'nearest'",
")",
"if",
"method",
"==",
"'window-exact'",
":",
"if",
"window",
"&",
"0x1",
":",
"left",
",",
"right",
"=",
"(",
"ceil",
"(",
"window",
"/",
"2",
")",
",",
"ceil",
"(",
"window",
"/",
"2",
")",
"+",
"1",
")",
"else",
":",
"left",
",",
"right",
"=",
"(",
"window",
"/",
"2",
",",
"window",
"/",
"2",
")",
"n",
"=",
"len",
"(",
"self",
".",
"index",
")",
"baseFunc",
"=",
"lambda",
"x",
":",
"asarray",
"(",
"[",
"percentile",
"(",
"x",
"[",
"max",
"(",
"ix",
"-",
"left",
",",
"0",
")",
":",
"min",
"(",
"ix",
"+",
"right",
"+",
"1",
",",
"n",
")",
"]",
",",
"perc",
")",
"for",
"ix",
"in",
"arange",
"(",
"0",
",",
"n",
")",
"]",
")",
"def",
"get",
"(",
"y",
")",
":",
"b",
"=",
"baseFunc",
"(",
"y",
")",
"return",
"(",
"y",
"-",
"b",
")",
"/",
"(",
"b",
"+",
"offset",
")",
"return",
"self",
".",
"map",
"(",
"get",
")"
] | Normalize by subtracting and dividing by a baseline.
Baseline can be derived from a global mean or percentile,
or a smoothed percentile estimated within a rolling window.
Windowed baselines may only be well-defined for
temporal series data.
Parameters
----------
baseline : str, optional, default = 'percentile'
Quantity to use as the baseline, options are 'mean', 'percentile',
'window', or 'window-exact'.
window : int, optional, default = 6
Size of window for baseline estimation,
for 'window' and 'window-exact' baseline only.
perc : int, optional, default = 20
Percentile value to use, for 'percentile',
'window', or 'window-exact' baseline only.
offset : float, optional, default = 0.1
Scalar added to baseline during division to avoid division by 0. | [
"Normalize",
"by",
"subtracting",
"and",
"dividing",
"by",
"a",
"baseline",
"."
] | 967ff8f3e7c2fabe1705743d95eb2746d4329786 | https://github.com/thunder-project/thunder/blob/967ff8f3e7c2fabe1705743d95eb2746d4329786/thunder/series/series.py#L1024-L1081 | train |
thunder-project/thunder | thunder/series/series.py | Series.toimages | def toimages(self, chunk_size='auto'):
"""
Converts to images data.
This method is equivalent to series.toblocks(size).toimages().
Parameters
----------
chunk_size : str or tuple, size of series chunk used during conversion, default = 'auto'
String interpreted as memory size (in kilobytes, e.g. '64').
The exception is the string 'auto', which will choose a chunk size to make the
resulting blocks ~100 MB in size. Int interpreted as 'number of elements'.
Only valid in spark mode.
"""
from thunder.images.images import Images
if chunk_size is 'auto':
chunk_size = str(max([int(1e5/prod(self.baseshape)), 1]))
n = len(self.shape) - 1
if self.mode == 'spark':
return Images(self.values.swap(tuple(range(n)), (0,), size=chunk_size))
if self.mode == 'local':
return Images(self.values.transpose((n,) + tuple(range(0, n)))) | python | def toimages(self, chunk_size='auto'):
"""
Converts to images data.
This method is equivalent to series.toblocks(size).toimages().
Parameters
----------
chunk_size : str or tuple, size of series chunk used during conversion, default = 'auto'
String interpreted as memory size (in kilobytes, e.g. '64').
The exception is the string 'auto', which will choose a chunk size to make the
resulting blocks ~100 MB in size. Int interpreted as 'number of elements'.
Only valid in spark mode.
"""
from thunder.images.images import Images
if chunk_size is 'auto':
chunk_size = str(max([int(1e5/prod(self.baseshape)), 1]))
n = len(self.shape) - 1
if self.mode == 'spark':
return Images(self.values.swap(tuple(range(n)), (0,), size=chunk_size))
if self.mode == 'local':
return Images(self.values.transpose((n,) + tuple(range(0, n)))) | [
"def",
"toimages",
"(",
"self",
",",
"chunk_size",
"=",
"'auto'",
")",
":",
"from",
"thunder",
".",
"images",
".",
"images",
"import",
"Images",
"if",
"chunk_size",
"is",
"'auto'",
":",
"chunk_size",
"=",
"str",
"(",
"max",
"(",
"[",
"int",
"(",
"1e5",
"/",
"prod",
"(",
"self",
".",
"baseshape",
")",
")",
",",
"1",
"]",
")",
")",
"n",
"=",
"len",
"(",
"self",
".",
"shape",
")",
"-",
"1",
"if",
"self",
".",
"mode",
"==",
"'spark'",
":",
"return",
"Images",
"(",
"self",
".",
"values",
".",
"swap",
"(",
"tuple",
"(",
"range",
"(",
"n",
")",
")",
",",
"(",
"0",
",",
")",
",",
"size",
"=",
"chunk_size",
")",
")",
"if",
"self",
".",
"mode",
"==",
"'local'",
":",
"return",
"Images",
"(",
"self",
".",
"values",
".",
"transpose",
"(",
"(",
"n",
",",
")",
"+",
"tuple",
"(",
"range",
"(",
"0",
",",
"n",
")",
")",
")",
")"
] | Converts to images data.
This method is equivalent to series.toblocks(size).toimages().
Parameters
----------
chunk_size : str or tuple, size of series chunk used during conversion, default = 'auto'
String interpreted as memory size (in kilobytes, e.g. '64').
The exception is the string 'auto', which will choose a chunk size to make the
resulting blocks ~100 MB in size. Int interpreted as 'number of elements'.
Only valid in spark mode. | [
"Converts",
"to",
"images",
"data",
"."
] | 967ff8f3e7c2fabe1705743d95eb2746d4329786 | https://github.com/thunder-project/thunder/blob/967ff8f3e7c2fabe1705743d95eb2746d4329786/thunder/series/series.py#L1083-L1108 | train |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.