repository_name
stringlengths 7
55
| func_path_in_repository
stringlengths 4
223
| func_name
stringlengths 1
134
| whole_func_string
stringlengths 75
104k
| language
stringclasses 1
value | func_code_string
stringlengths 75
104k
| func_code_tokens
sequencelengths 19
28.4k
| func_documentation_string
stringlengths 1
46.9k
| func_documentation_tokens
sequencelengths 1
1.97k
| split_name
stringclasses 1
value | func_code_url
stringlengths 87
315
|
---|---|---|---|---|---|---|---|---|---|---|
lazygunner/xunleipy | xunleipy/rsa_lib.py | findAPrime | def findAPrime(a, b, k):
"""Return a pseudo prime number roughly between a and b,
(could be larger than b). Raise ValueError if cannot find a
pseudo prime after 10 * ln(x) + 3 tries. """
x = random.randint(a, b)
for i in range(0, int(10 * math.log(x) + 3)):
if millerRabin(x, k):
return x
else:
x += 1
raise ValueError | python | def findAPrime(a, b, k):
"""Return a pseudo prime number roughly between a and b,
(could be larger than b). Raise ValueError if cannot find a
pseudo prime after 10 * ln(x) + 3 tries. """
x = random.randint(a, b)
for i in range(0, int(10 * math.log(x) + 3)):
if millerRabin(x, k):
return x
else:
x += 1
raise ValueError | [
"def",
"findAPrime",
"(",
"a",
",",
"b",
",",
"k",
")",
":",
"x",
"=",
"random",
".",
"randint",
"(",
"a",
",",
"b",
")",
"for",
"i",
"in",
"range",
"(",
"0",
",",
"int",
"(",
"10",
"*",
"math",
".",
"log",
"(",
"x",
")",
"+",
"3",
")",
")",
":",
"if",
"millerRabin",
"(",
"x",
",",
"k",
")",
":",
"return",
"x",
"else",
":",
"x",
"+=",
"1",
"raise",
"ValueError"
] | Return a pseudo prime number roughly between a and b,
(could be larger than b). Raise ValueError if cannot find a
pseudo prime after 10 * ln(x) + 3 tries. | [
"Return",
"a",
"pseudo",
"prime",
"number",
"roughly",
"between",
"a",
"and",
"b",
"(",
"could",
"be",
"larger",
"than",
"b",
")",
".",
"Raise",
"ValueError",
"if",
"cannot",
"find",
"a",
"pseudo",
"prime",
"after",
"10",
"*",
"ln",
"(",
"x",
")",
"+",
"3",
"tries",
"."
] | train | https://github.com/lazygunner/xunleipy/blob/cded7598a7bf04495156bae2d747883d1eacb3f4/xunleipy/rsa_lib.py#L167-L177 |
lazygunner/xunleipy | xunleipy/rsa_lib.py | newKey | def newKey(a, b, k):
""" Try to find two large pseudo primes roughly between a and b.
Generate public and private keys for RSA encryption.
Raises ValueError if it fails to find one"""
try:
p = findAPrime(a, b, k)
while True:
q = findAPrime(a, b, k)
if q != p:
break
except:
raise ValueError
n = p * q
m = (p - 1) * (q - 1)
while True:
e = random.randint(1, m)
if coPrime([e, m]):
break
d = modInv(e, m)
return (n, e, d) | python | def newKey(a, b, k):
""" Try to find two large pseudo primes roughly between a and b.
Generate public and private keys for RSA encryption.
Raises ValueError if it fails to find one"""
try:
p = findAPrime(a, b, k)
while True:
q = findAPrime(a, b, k)
if q != p:
break
except:
raise ValueError
n = p * q
m = (p - 1) * (q - 1)
while True:
e = random.randint(1, m)
if coPrime([e, m]):
break
d = modInv(e, m)
return (n, e, d) | [
"def",
"newKey",
"(",
"a",
",",
"b",
",",
"k",
")",
":",
"try",
":",
"p",
"=",
"findAPrime",
"(",
"a",
",",
"b",
",",
"k",
")",
"while",
"True",
":",
"q",
"=",
"findAPrime",
"(",
"a",
",",
"b",
",",
"k",
")",
"if",
"q",
"!=",
"p",
":",
"break",
"except",
":",
"raise",
"ValueError",
"n",
"=",
"p",
"*",
"q",
"m",
"=",
"(",
"p",
"-",
"1",
")",
"*",
"(",
"q",
"-",
"1",
")",
"while",
"True",
":",
"e",
"=",
"random",
".",
"randint",
"(",
"1",
",",
"m",
")",
"if",
"coPrime",
"(",
"[",
"e",
",",
"m",
"]",
")",
":",
"break",
"d",
"=",
"modInv",
"(",
"e",
",",
"m",
")",
"return",
"(",
"n",
",",
"e",
",",
"d",
")"
] | Try to find two large pseudo primes roughly between a and b.
Generate public and private keys for RSA encryption.
Raises ValueError if it fails to find one | [
"Try",
"to",
"find",
"two",
"large",
"pseudo",
"primes",
"roughly",
"between",
"a",
"and",
"b",
".",
"Generate",
"public",
"and",
"private",
"keys",
"for",
"RSA",
"encryption",
".",
"Raises",
"ValueError",
"if",
"it",
"fails",
"to",
"find",
"one"
] | train | https://github.com/lazygunner/xunleipy/blob/cded7598a7bf04495156bae2d747883d1eacb3f4/xunleipy/rsa_lib.py#L180-L202 |
lazygunner/xunleipy | xunleipy/rsa_lib.py | numList2blocks | def numList2blocks(l, n):
"""Take a list of integers(each between 0 and 127), and combines them
into block size n using base 256. If len(L) % n != 0, use some random
junk to fill L to make it."""
# Note that ASCII printable characters range is 0x20 - 0x7E
returnList = []
toProcess = copy.copy(l)
''' copy message ascii list'''
if len(toProcess) % n != 0:
for i in range(0, n - len(toProcess) % n):
''' append rand str to list'''
toProcess.append(random.randint(32, 126))
toProcess[len(l)] = 0 # 0 after origin message list
''' generate int from ascii number list'''
for i in range(0, len(toProcess), n):
block = 0
for j in range(0, n):
block += toProcess[i + j] << (8 * (n - j - 1))
returnList.append(block)
return returnList | python | def numList2blocks(l, n):
"""Take a list of integers(each between 0 and 127), and combines them
into block size n using base 256. If len(L) % n != 0, use some random
junk to fill L to make it."""
# Note that ASCII printable characters range is 0x20 - 0x7E
returnList = []
toProcess = copy.copy(l)
''' copy message ascii list'''
if len(toProcess) % n != 0:
for i in range(0, n - len(toProcess) % n):
''' append rand str to list'''
toProcess.append(random.randint(32, 126))
toProcess[len(l)] = 0 # 0 after origin message list
''' generate int from ascii number list'''
for i in range(0, len(toProcess), n):
block = 0
for j in range(0, n):
block += toProcess[i + j] << (8 * (n - j - 1))
returnList.append(block)
return returnList | [
"def",
"numList2blocks",
"(",
"l",
",",
"n",
")",
":",
"# Note that ASCII printable characters range is 0x20 - 0x7E",
"returnList",
"=",
"[",
"]",
"toProcess",
"=",
"copy",
".",
"copy",
"(",
"l",
")",
"''' copy message ascii list'''",
"if",
"len",
"(",
"toProcess",
")",
"%",
"n",
"!=",
"0",
":",
"for",
"i",
"in",
"range",
"(",
"0",
",",
"n",
"-",
"len",
"(",
"toProcess",
")",
"%",
"n",
")",
":",
"''' append rand str to list'''",
"toProcess",
".",
"append",
"(",
"random",
".",
"randint",
"(",
"32",
",",
"126",
")",
")",
"toProcess",
"[",
"len",
"(",
"l",
")",
"]",
"=",
"0",
"# 0 after origin message list",
"''' generate int from ascii number list'''",
"for",
"i",
"in",
"range",
"(",
"0",
",",
"len",
"(",
"toProcess",
")",
",",
"n",
")",
":",
"block",
"=",
"0",
"for",
"j",
"in",
"range",
"(",
"0",
",",
"n",
")",
":",
"block",
"+=",
"toProcess",
"[",
"i",
"+",
"j",
"]",
"<<",
"(",
"8",
"*",
"(",
"n",
"-",
"j",
"-",
"1",
")",
")",
"returnList",
".",
"append",
"(",
"block",
")",
"return",
"returnList"
] | Take a list of integers(each between 0 and 127), and combines them
into block size n using base 256. If len(L) % n != 0, use some random
junk to fill L to make it. | [
"Take",
"a",
"list",
"of",
"integers",
"(",
"each",
"between",
"0",
"and",
"127",
")",
"and",
"combines",
"them",
"into",
"block",
"size",
"n",
"using",
"base",
"256",
".",
"If",
"len",
"(",
"L",
")",
"%",
"n",
"!",
"=",
"0",
"use",
"some",
"random",
"junk",
"to",
"fill",
"L",
"to",
"make",
"it",
"."
] | train | https://github.com/lazygunner/xunleipy/blob/cded7598a7bf04495156bae2d747883d1eacb3f4/xunleipy/rsa_lib.py#L216-L236 |
lazygunner/xunleipy | xunleipy/rsa_lib.py | blocks2numList | def blocks2numList(blocks, n):
"""inverse function of numList2blocks."""
toProcess = copy.copy(blocks)
returnList = []
for numBlock in toProcess:
inner = []
for i in range(0, n):
inner.append(numBlock % 256)
numBlock >>= 8
inner.reverse()
returnList.extend(inner)
return returnList | python | def blocks2numList(blocks, n):
"""inverse function of numList2blocks."""
toProcess = copy.copy(blocks)
returnList = []
for numBlock in toProcess:
inner = []
for i in range(0, n):
inner.append(numBlock % 256)
numBlock >>= 8
inner.reverse()
returnList.extend(inner)
return returnList | [
"def",
"blocks2numList",
"(",
"blocks",
",",
"n",
")",
":",
"toProcess",
"=",
"copy",
".",
"copy",
"(",
"blocks",
")",
"returnList",
"=",
"[",
"]",
"for",
"numBlock",
"in",
"toProcess",
":",
"inner",
"=",
"[",
"]",
"for",
"i",
"in",
"range",
"(",
"0",
",",
"n",
")",
":",
"inner",
".",
"append",
"(",
"numBlock",
"%",
"256",
")",
"numBlock",
">>=",
"8",
"inner",
".",
"reverse",
"(",
")",
"returnList",
".",
"extend",
"(",
"inner",
")",
"return",
"returnList"
] | inverse function of numList2blocks. | [
"inverse",
"function",
"of",
"numList2blocks",
"."
] | train | https://github.com/lazygunner/xunleipy/blob/cded7598a7bf04495156bae2d747883d1eacb3f4/xunleipy/rsa_lib.py#L239-L250 |
lazygunner/xunleipy | xunleipy/rsa_lib.py | encrypt | def encrypt(message, modN, e, blockSize):
"""given a string message, public keys and blockSize, encrypt using
RSA algorithms."""
numList = string2numList(message)
numBlocks = numList2blocks(numList, blockSize) # only one block
message = numBlocks[0]
# return [modExp(blocks, e, modN) for blocks in numBlocks]
return modExp(message, e, modN) | python | def encrypt(message, modN, e, blockSize):
"""given a string message, public keys and blockSize, encrypt using
RSA algorithms."""
numList = string2numList(message)
numBlocks = numList2blocks(numList, blockSize) # only one block
message = numBlocks[0]
# return [modExp(blocks, e, modN) for blocks in numBlocks]
return modExp(message, e, modN) | [
"def",
"encrypt",
"(",
"message",
",",
"modN",
",",
"e",
",",
"blockSize",
")",
":",
"numList",
"=",
"string2numList",
"(",
"message",
")",
"numBlocks",
"=",
"numList2blocks",
"(",
"numList",
",",
"blockSize",
")",
"# only one block",
"message",
"=",
"numBlocks",
"[",
"0",
"]",
"# return [modExp(blocks, e, modN) for blocks in numBlocks]",
"return",
"modExp",
"(",
"message",
",",
"e",
",",
"modN",
")"
] | given a string message, public keys and blockSize, encrypt using
RSA algorithms. | [
"given",
"a",
"string",
"message",
"public",
"keys",
"and",
"blockSize",
"encrypt",
"using",
"RSA",
"algorithms",
"."
] | train | https://github.com/lazygunner/xunleipy/blob/cded7598a7bf04495156bae2d747883d1eacb3f4/xunleipy/rsa_lib.py#L253-L260 |
lazygunner/xunleipy | xunleipy/rsa_lib.py | decrypt | def decrypt(secret, modN, d, blockSize):
"""reverse function of encrypt"""
numBlocks = [modExp(blocks, d, modN) for blocks in secret]
numList = blocks2numList(numBlocks, blockSize)
return numList2string(numList) | python | def decrypt(secret, modN, d, blockSize):
"""reverse function of encrypt"""
numBlocks = [modExp(blocks, d, modN) for blocks in secret]
numList = blocks2numList(numBlocks, blockSize)
return numList2string(numList) | [
"def",
"decrypt",
"(",
"secret",
",",
"modN",
",",
"d",
",",
"blockSize",
")",
":",
"numBlocks",
"=",
"[",
"modExp",
"(",
"blocks",
",",
"d",
",",
"modN",
")",
"for",
"blocks",
"in",
"secret",
"]",
"numList",
"=",
"blocks2numList",
"(",
"numBlocks",
",",
"blockSize",
")",
"return",
"numList2string",
"(",
"numList",
")"
] | reverse function of encrypt | [
"reverse",
"function",
"of",
"encrypt"
] | train | https://github.com/lazygunner/xunleipy/blob/cded7598a7bf04495156bae2d747883d1eacb3f4/xunleipy/rsa_lib.py#L263-L267 |
portfoliome/foil | foil/paths.py | match_files | def match_files(files, pattern: Pattern):
"""Yields file name if matches a regular expression pattern."""
for name in files:
if re.match(pattern, name):
yield name | python | def match_files(files, pattern: Pattern):
"""Yields file name if matches a regular expression pattern."""
for name in files:
if re.match(pattern, name):
yield name | [
"def",
"match_files",
"(",
"files",
",",
"pattern",
":",
"Pattern",
")",
":",
"for",
"name",
"in",
"files",
":",
"if",
"re",
".",
"match",
"(",
"pattern",
",",
"name",
")",
":",
"yield",
"name"
] | Yields file name if matches a regular expression pattern. | [
"Yields",
"file",
"name",
"if",
"matches",
"a",
"regular",
"expression",
"pattern",
"."
] | train | https://github.com/portfoliome/foil/blob/b66d8cf4ab048a387d8c7a033b47e922ed6917d6/foil/paths.py#L45-L50 |
portfoliome/foil | foil/paths.py | match_zipfile_members | def match_zipfile_members(zipfile_path: str, pattern: Pattern):
"""Match files to a pattern within a zip file's content."""
with ZipFile(zipfile_path, mode='r') as zfile:
members = zfile.namelist()
yield from match_files(members, pattern) | python | def match_zipfile_members(zipfile_path: str, pattern: Pattern):
"""Match files to a pattern within a zip file's content."""
with ZipFile(zipfile_path, mode='r') as zfile:
members = zfile.namelist()
yield from match_files(members, pattern) | [
"def",
"match_zipfile_members",
"(",
"zipfile_path",
":",
"str",
",",
"pattern",
":",
"Pattern",
")",
":",
"with",
"ZipFile",
"(",
"zipfile_path",
",",
"mode",
"=",
"'r'",
")",
"as",
"zfile",
":",
"members",
"=",
"zfile",
".",
"namelist",
"(",
")",
"yield",
"from",
"match_files",
"(",
"members",
",",
"pattern",
")"
] | Match files to a pattern within a zip file's content. | [
"Match",
"files",
"to",
"a",
"pattern",
"within",
"a",
"zip",
"file",
"s",
"content",
"."
] | train | https://github.com/portfoliome/foil/blob/b66d8cf4ab048a387d8c7a033b47e922ed6917d6/foil/paths.py#L53-L59 |
portfoliome/foil | foil/paths.py | directory_files | def directory_files(path):
"""Yield directory file names."""
for entry in os.scandir(path):
if not entry.name.startswith('.') and entry.is_file():
yield entry.name | python | def directory_files(path):
"""Yield directory file names."""
for entry in os.scandir(path):
if not entry.name.startswith('.') and entry.is_file():
yield entry.name | [
"def",
"directory_files",
"(",
"path",
")",
":",
"for",
"entry",
"in",
"os",
".",
"scandir",
"(",
"path",
")",
":",
"if",
"not",
"entry",
".",
"name",
".",
"startswith",
"(",
"'.'",
")",
"and",
"entry",
".",
"is_file",
"(",
")",
":",
"yield",
"entry",
".",
"name"
] | Yield directory file names. | [
"Yield",
"directory",
"file",
"names",
"."
] | train | https://github.com/portfoliome/foil/blob/b66d8cf4ab048a387d8c7a033b47e922ed6917d6/foil/paths.py#L68-L73 |
portfoliome/foil | foil/paths.py | get_file_listing_sha | def get_file_listing_sha(listing_paths: Iterable) -> str:
"""Return sha256 string for group of FTP listings."""
return sha256(''.join(sorted(listing_paths)).encode('utf-8')).hexdigest() | python | def get_file_listing_sha(listing_paths: Iterable) -> str:
"""Return sha256 string for group of FTP listings."""
return sha256(''.join(sorted(listing_paths)).encode('utf-8')).hexdigest() | [
"def",
"get_file_listing_sha",
"(",
"listing_paths",
":",
"Iterable",
")",
"->",
"str",
":",
"return",
"sha256",
"(",
"''",
".",
"join",
"(",
"sorted",
"(",
"listing_paths",
")",
")",
".",
"encode",
"(",
"'utf-8'",
")",
")",
".",
"hexdigest",
"(",
")"
] | Return sha256 string for group of FTP listings. | [
"Return",
"sha256",
"string",
"for",
"group",
"of",
"FTP",
"listings",
"."
] | train | https://github.com/portfoliome/foil/blob/b66d8cf4ab048a387d8c7a033b47e922ed6917d6/foil/paths.py#L76-L79 |
OzymandiasTheGreat/python-libinput | libinput/device.py | DeviceConfigTap.set_button_map | def set_button_map(self, button_map):
"""Set the finger number to button number mapping for tap-to-click.
The default mapping on most devices is to have a 1, 2 and 3 finger tap
to map to the left, right and middle button, respectively. A device may
permit changing the button mapping but disallow specific maps. In this
case :attr:`~libinput.constant.ConfigStatus.UNSUPPORTED` is returned,
the caller is expected to handle this case correctly.
Changing the button mapping may not take effect immediately, the device
may wait until it is in a neutral state before applying any changes.
The mapping may be changed when tap-to-click is disabled. The new
mapping takes effect when tap-to-click is enabled in the future.
If :attr:`finger_count` is 0, this method raises :exc:`AssertionError`.
Args:
button_map (~libinput.constant.TapButtonMap): The new
finger-to-button number mapping.
Returns:
~libinput.constant.ConfigStatus: A config status code.
Raises:
AssertionError
"""
assert self.finger_count > 0, 'This device does not support tapping'
return self._libinput.libinput_device_config_tap_set_button_map(
self._handle, button_map) | python | def set_button_map(self, button_map):
"""Set the finger number to button number mapping for tap-to-click.
The default mapping on most devices is to have a 1, 2 and 3 finger tap
to map to the left, right and middle button, respectively. A device may
permit changing the button mapping but disallow specific maps. In this
case :attr:`~libinput.constant.ConfigStatus.UNSUPPORTED` is returned,
the caller is expected to handle this case correctly.
Changing the button mapping may not take effect immediately, the device
may wait until it is in a neutral state before applying any changes.
The mapping may be changed when tap-to-click is disabled. The new
mapping takes effect when tap-to-click is enabled in the future.
If :attr:`finger_count` is 0, this method raises :exc:`AssertionError`.
Args:
button_map (~libinput.constant.TapButtonMap): The new
finger-to-button number mapping.
Returns:
~libinput.constant.ConfigStatus: A config status code.
Raises:
AssertionError
"""
assert self.finger_count > 0, 'This device does not support tapping'
return self._libinput.libinput_device_config_tap_set_button_map(
self._handle, button_map) | [
"def",
"set_button_map",
"(",
"self",
",",
"button_map",
")",
":",
"assert",
"self",
".",
"finger_count",
">",
"0",
",",
"'This device does not support tapping'",
"return",
"self",
".",
"_libinput",
".",
"libinput_device_config_tap_set_button_map",
"(",
"self",
".",
"_handle",
",",
"button_map",
")"
] | Set the finger number to button number mapping for tap-to-click.
The default mapping on most devices is to have a 1, 2 and 3 finger tap
to map to the left, right and middle button, respectively. A device may
permit changing the button mapping but disallow specific maps. In this
case :attr:`~libinput.constant.ConfigStatus.UNSUPPORTED` is returned,
the caller is expected to handle this case correctly.
Changing the button mapping may not take effect immediately, the device
may wait until it is in a neutral state before applying any changes.
The mapping may be changed when tap-to-click is disabled. The new
mapping takes effect when tap-to-click is enabled in the future.
If :attr:`finger_count` is 0, this method raises :exc:`AssertionError`.
Args:
button_map (~libinput.constant.TapButtonMap): The new
finger-to-button number mapping.
Returns:
~libinput.constant.ConfigStatus: A config status code.
Raises:
AssertionError | [
"Set",
"the",
"finger",
"number",
"to",
"button",
"number",
"mapping",
"for",
"tap",
"-",
"to",
"-",
"click",
"."
] | train | https://github.com/OzymandiasTheGreat/python-libinput/blob/1f477ee9f1d56b284b20e0317ea8967c64ef1218/libinput/device.py#L279-L307 |
OzymandiasTheGreat/python-libinput | libinput/device.py | DeviceConfigCalibration.set_matrix | def set_matrix(self, matrix):
"""Apply the 3x3 transformation matrix to absolute device coordinates.
This matrix has no effect on relative events.
Given a 6-element array [a, b, c, d, e, f], the matrix is applied as
::
[ a b c ] [ x ]
[ d e f ] * [ y ]
[ 0 0 1 ] [ 1 ]
The translation component (c, f) is expected to be normalized to
the device coordinate range. For example, the matrix
::
[ 1 0 1 ]
[ 0 1 -1 ]
[ 0 0 1 ]
moves all coordinates by 1 device-width to the right and
1 device-height up.
The rotation matrix for rotation around the origin is defined as
::
[ cos(a) -sin(a) 0 ]
[ sin(a) cos(a) 0 ]
[ 0 0 1 ]
Note that any rotation requires an additional translation component
to translate the rotated coordinates back into the original device
space. The rotation matrixes for 90, 180 and 270 degrees clockwise are::
90 deg cw: 180 deg cw: 270 deg cw:
[ 0 -1 1] [ -1 0 1] [ 0 1 0 ]
[ 1 0 0] [ 0 -1 1] [ -1 0 1 ]
[ 0 0 1] [ 0 0 1] [ 0 0 1 ]
Args:
matrix (iterable): An array representing the first two rows of
a 3x3 matrix as described above.
Returns:
~libinput.constant.ConfigStatus: A config status code.
"""
matrix = (c_float * 6)(*matrix)
return self._libinput.libinput_device_config_calibration_set_matrix(
self._handle, matrix) | python | def set_matrix(self, matrix):
"""Apply the 3x3 transformation matrix to absolute device coordinates.
This matrix has no effect on relative events.
Given a 6-element array [a, b, c, d, e, f], the matrix is applied as
::
[ a b c ] [ x ]
[ d e f ] * [ y ]
[ 0 0 1 ] [ 1 ]
The translation component (c, f) is expected to be normalized to
the device coordinate range. For example, the matrix
::
[ 1 0 1 ]
[ 0 1 -1 ]
[ 0 0 1 ]
moves all coordinates by 1 device-width to the right and
1 device-height up.
The rotation matrix for rotation around the origin is defined as
::
[ cos(a) -sin(a) 0 ]
[ sin(a) cos(a) 0 ]
[ 0 0 1 ]
Note that any rotation requires an additional translation component
to translate the rotated coordinates back into the original device
space. The rotation matrixes for 90, 180 and 270 degrees clockwise are::
90 deg cw: 180 deg cw: 270 deg cw:
[ 0 -1 1] [ -1 0 1] [ 0 1 0 ]
[ 1 0 0] [ 0 -1 1] [ -1 0 1 ]
[ 0 0 1] [ 0 0 1] [ 0 0 1 ]
Args:
matrix (iterable): An array representing the first two rows of
a 3x3 matrix as described above.
Returns:
~libinput.constant.ConfigStatus: A config status code.
"""
matrix = (c_float * 6)(*matrix)
return self._libinput.libinput_device_config_calibration_set_matrix(
self._handle, matrix) | [
"def",
"set_matrix",
"(",
"self",
",",
"matrix",
")",
":",
"matrix",
"=",
"(",
"c_float",
"*",
"6",
")",
"(",
"*",
"matrix",
")",
"return",
"self",
".",
"_libinput",
".",
"libinput_device_config_calibration_set_matrix",
"(",
"self",
".",
"_handle",
",",
"matrix",
")"
] | Apply the 3x3 transformation matrix to absolute device coordinates.
This matrix has no effect on relative events.
Given a 6-element array [a, b, c, d, e, f], the matrix is applied as
::
[ a b c ] [ x ]
[ d e f ] * [ y ]
[ 0 0 1 ] [ 1 ]
The translation component (c, f) is expected to be normalized to
the device coordinate range. For example, the matrix
::
[ 1 0 1 ]
[ 0 1 -1 ]
[ 0 0 1 ]
moves all coordinates by 1 device-width to the right and
1 device-height up.
The rotation matrix for rotation around the origin is defined as
::
[ cos(a) -sin(a) 0 ]
[ sin(a) cos(a) 0 ]
[ 0 0 1 ]
Note that any rotation requires an additional translation component
to translate the rotated coordinates back into the original device
space. The rotation matrixes for 90, 180 and 270 degrees clockwise are::
90 deg cw: 180 deg cw: 270 deg cw:
[ 0 -1 1] [ -1 0 1] [ 0 1 0 ]
[ 1 0 0] [ 0 -1 1] [ -1 0 1 ]
[ 0 0 1] [ 0 0 1] [ 0 0 1 ]
Args:
matrix (iterable): An array representing the first two rows of
a 3x3 matrix as described above.
Returns:
~libinput.constant.ConfigStatus: A config status code. | [
"Apply",
"the",
"3x3",
"transformation",
"matrix",
"to",
"absolute",
"device",
"coordinates",
"."
] | train | https://github.com/OzymandiasTheGreat/python-libinput/blob/1f477ee9f1d56b284b20e0317ea8967c64ef1218/libinput/device.py#L489-L537 |
OzymandiasTheGreat/python-libinput | libinput/device.py | DeviceConfigCalibration.matrix | def matrix(self):
"""The current calibration matrix for this device.
Returns:
(bool, (float, float, float, float, float, float)): :obj:`False` if
no calibration is set and
the returned matrix is the identity matrix, :obj:`True`
otherwise. :obj:`tuple` representing the first two rows of
a 3x3 matrix as described in :meth:`set_matrix`.
"""
matrix = (c_float * 6)()
rc = self._libinput.libinput_device_config_calibration_get_matrix(
self._handle, matrix)
return rc, tuple(matrix) | python | def matrix(self):
"""The current calibration matrix for this device.
Returns:
(bool, (float, float, float, float, float, float)): :obj:`False` if
no calibration is set and
the returned matrix is the identity matrix, :obj:`True`
otherwise. :obj:`tuple` representing the first two rows of
a 3x3 matrix as described in :meth:`set_matrix`.
"""
matrix = (c_float * 6)()
rc = self._libinput.libinput_device_config_calibration_get_matrix(
self._handle, matrix)
return rc, tuple(matrix) | [
"def",
"matrix",
"(",
"self",
")",
":",
"matrix",
"=",
"(",
"c_float",
"*",
"6",
")",
"(",
")",
"rc",
"=",
"self",
".",
"_libinput",
".",
"libinput_device_config_calibration_get_matrix",
"(",
"self",
".",
"_handle",
",",
"matrix",
")",
"return",
"rc",
",",
"tuple",
"(",
"matrix",
")"
] | The current calibration matrix for this device.
Returns:
(bool, (float, float, float, float, float, float)): :obj:`False` if
no calibration is set and
the returned matrix is the identity matrix, :obj:`True`
otherwise. :obj:`tuple` representing the first two rows of
a 3x3 matrix as described in :meth:`set_matrix`. | [
"The",
"current",
"calibration",
"matrix",
"for",
"this",
"device",
"."
] | train | https://github.com/OzymandiasTheGreat/python-libinput/blob/1f477ee9f1d56b284b20e0317ea8967c64ef1218/libinput/device.py#L540-L554 |
OzymandiasTheGreat/python-libinput | libinput/device.py | DeviceConfigCalibration.default_matrix | def default_matrix(self):
"""The default calibration matrix for this device.
On most devices, this is the identity matrix. If the udev property
``LIBINPUT_CALIBRATION_MATRIX`` is set on the respective udev device,
that property's value becomes the default matrix, see
`Static device configuration via udev`_.
Returns:
(bool, (float, float, float, float, float, float)): :obj:`False` if
no calibration is set and
the returned matrix is the identity matrix, :obj:`True`
otherwise. :obj:`tuple` representing the first two rows of
a 3x3 matrix as described
in :meth:`config_calibration_set_matrix`.
"""
matrix = (c_float * 6)()
rc = self._libinput \
.libinput_device_config_calibration_get_default_matrix(
self._handle, matrix)
return rc, tuple(matrix) | python | def default_matrix(self):
"""The default calibration matrix for this device.
On most devices, this is the identity matrix. If the udev property
``LIBINPUT_CALIBRATION_MATRIX`` is set on the respective udev device,
that property's value becomes the default matrix, see
`Static device configuration via udev`_.
Returns:
(bool, (float, float, float, float, float, float)): :obj:`False` if
no calibration is set and
the returned matrix is the identity matrix, :obj:`True`
otherwise. :obj:`tuple` representing the first two rows of
a 3x3 matrix as described
in :meth:`config_calibration_set_matrix`.
"""
matrix = (c_float * 6)()
rc = self._libinput \
.libinput_device_config_calibration_get_default_matrix(
self._handle, matrix)
return rc, tuple(matrix) | [
"def",
"default_matrix",
"(",
"self",
")",
":",
"matrix",
"=",
"(",
"c_float",
"*",
"6",
")",
"(",
")",
"rc",
"=",
"self",
".",
"_libinput",
".",
"libinput_device_config_calibration_get_default_matrix",
"(",
"self",
".",
"_handle",
",",
"matrix",
")",
"return",
"rc",
",",
"tuple",
"(",
"matrix",
")"
] | The default calibration matrix for this device.
On most devices, this is the identity matrix. If the udev property
``LIBINPUT_CALIBRATION_MATRIX`` is set on the respective udev device,
that property's value becomes the default matrix, see
`Static device configuration via udev`_.
Returns:
(bool, (float, float, float, float, float, float)): :obj:`False` if
no calibration is set and
the returned matrix is the identity matrix, :obj:`True`
otherwise. :obj:`tuple` representing the first two rows of
a 3x3 matrix as described
in :meth:`config_calibration_set_matrix`. | [
"The",
"default",
"calibration",
"matrix",
"for",
"this",
"device",
"."
] | train | https://github.com/OzymandiasTheGreat/python-libinput/blob/1f477ee9f1d56b284b20e0317ea8967c64ef1218/libinput/device.py#L557-L578 |
OzymandiasTheGreat/python-libinput | libinput/device.py | Device.sysname | def sysname(self):
"""The system name of the device.
To get the descriptive device name, use :attr:`name`.
Returns:
str: System name of the device.
"""
pchar = self._libinput.libinput_device_get_sysname(self._handle)
return string_at(pchar).decode() | python | def sysname(self):
"""The system name of the device.
To get the descriptive device name, use :attr:`name`.
Returns:
str: System name of the device.
"""
pchar = self._libinput.libinput_device_get_sysname(self._handle)
return string_at(pchar).decode() | [
"def",
"sysname",
"(",
"self",
")",
":",
"pchar",
"=",
"self",
".",
"_libinput",
".",
"libinput_device_get_sysname",
"(",
"self",
".",
"_handle",
")",
"return",
"string_at",
"(",
"pchar",
")",
".",
"decode",
"(",
")"
] | The system name of the device.
To get the descriptive device name, use :attr:`name`.
Returns:
str: System name of the device. | [
"The",
"system",
"name",
"of",
"the",
"device",
"."
] | train | https://github.com/OzymandiasTheGreat/python-libinput/blob/1f477ee9f1d56b284b20e0317ea8967c64ef1218/libinput/device.py#L1674-L1684 |
OzymandiasTheGreat/python-libinput | libinput/device.py | Device.name | def name(self):
"""The descriptive device name as advertised by the kernel
and/or the hardware itself.
To get the sysname for this device, use :attr:`sysname`.
Returns:
str: The device name.
"""
pchar = self._libinput.libinput_device_get_name(self._handle)
return string_at(pchar).decode() | python | def name(self):
"""The descriptive device name as advertised by the kernel
and/or the hardware itself.
To get the sysname for this device, use :attr:`sysname`.
Returns:
str: The device name.
"""
pchar = self._libinput.libinput_device_get_name(self._handle)
return string_at(pchar).decode() | [
"def",
"name",
"(",
"self",
")",
":",
"pchar",
"=",
"self",
".",
"_libinput",
".",
"libinput_device_get_name",
"(",
"self",
".",
"_handle",
")",
"return",
"string_at",
"(",
"pchar",
")",
".",
"decode",
"(",
")"
] | The descriptive device name as advertised by the kernel
and/or the hardware itself.
To get the sysname for this device, use :attr:`sysname`.
Returns:
str: The device name. | [
"The",
"descriptive",
"device",
"name",
"as",
"advertised",
"by",
"the",
"kernel",
"and",
"/",
"or",
"the",
"hardware",
"itself",
"."
] | train | https://github.com/OzymandiasTheGreat/python-libinput/blob/1f477ee9f1d56b284b20e0317ea8967c64ef1218/libinput/device.py#L1687-L1698 |
OzymandiasTheGreat/python-libinput | libinput/device.py | Device.set_seat_logical_name | def set_seat_logical_name(self, seat):
"""Change the logical seat associated with this device by removing
the device and adding it to the new seat.
This command is identical to physically unplugging the device, then
re-plugging it as a member of the new seat. libinput will generate
a :attr:`~libinput.constant.EventType.DEVICE_REMOVED` event and this
:class:`Device` is considered removed from the context; it will not
generate further events.
A :attr:`~libinput.constant.EventType.DEVICE_ADDED` event is
generated with a new :class:`Device`. It is the caller's
responsibility to update references to the new device accordingly.
If the logical seat name already exists in the device's physical seat,
the device is added to this seat. Otherwise, a new seat is created.
Note:
This change applies to this device until removal or
:meth:`~libinput.LibInput.suspend`, whichever happens earlier.
Args:
seat (str): The new logical seat name.
Raises:
AssertionError
"""
rc = self._libinput.libinput_device_set_seat_logical_name(
self._handle, seat.encode())
assert rc == 0, 'Cannot assign device to {}'.format(seat) | python | def set_seat_logical_name(self, seat):
"""Change the logical seat associated with this device by removing
the device and adding it to the new seat.
This command is identical to physically unplugging the device, then
re-plugging it as a member of the new seat. libinput will generate
a :attr:`~libinput.constant.EventType.DEVICE_REMOVED` event and this
:class:`Device` is considered removed from the context; it will not
generate further events.
A :attr:`~libinput.constant.EventType.DEVICE_ADDED` event is
generated with a new :class:`Device`. It is the caller's
responsibility to update references to the new device accordingly.
If the logical seat name already exists in the device's physical seat,
the device is added to this seat. Otherwise, a new seat is created.
Note:
This change applies to this device until removal or
:meth:`~libinput.LibInput.suspend`, whichever happens earlier.
Args:
seat (str): The new logical seat name.
Raises:
AssertionError
"""
rc = self._libinput.libinput_device_set_seat_logical_name(
self._handle, seat.encode())
assert rc == 0, 'Cannot assign device to {}'.format(seat) | [
"def",
"set_seat_logical_name",
"(",
"self",
",",
"seat",
")",
":",
"rc",
"=",
"self",
".",
"_libinput",
".",
"libinput_device_set_seat_logical_name",
"(",
"self",
".",
"_handle",
",",
"seat",
".",
"encode",
"(",
")",
")",
"assert",
"rc",
"==",
"0",
",",
"'Cannot assign device to {}'",
".",
"format",
"(",
"seat",
")"
] | Change the logical seat associated with this device by removing
the device and adding it to the new seat.
This command is identical to physically unplugging the device, then
re-plugging it as a member of the new seat. libinput will generate
a :attr:`~libinput.constant.EventType.DEVICE_REMOVED` event and this
:class:`Device` is considered removed from the context; it will not
generate further events.
A :attr:`~libinput.constant.EventType.DEVICE_ADDED` event is
generated with a new :class:`Device`. It is the caller's
responsibility to update references to the new device accordingly.
If the logical seat name already exists in the device's physical seat,
the device is added to this seat. Otherwise, a new seat is created.
Note:
This change applies to this device until removal or
:meth:`~libinput.LibInput.suspend`, whichever happens earlier.
Args:
seat (str): The new logical seat name.
Raises:
AssertionError | [
"Change",
"the",
"logical",
"seat",
"associated",
"with",
"this",
"device",
"by",
"removing",
"the",
"device",
"and",
"adding",
"it",
"to",
"the",
"new",
"seat",
"."
] | train | https://github.com/OzymandiasTheGreat/python-libinput/blob/1f477ee9f1d56b284b20e0317ea8967c64ef1218/libinput/device.py#L1735-L1762 |
OzymandiasTheGreat/python-libinput | libinput/device.py | Device.capabilities | def capabilities(self):
"""A tuple of capabilities this device supports.
Returns:
(~libinput.constant.DeviceCapability): Device capabilities.
"""
caps = []
for cap in DeviceCapability:
if self._libinput.libinput_device_has_capability(self._handle, cap):
caps.append(cap)
return tuple(caps) | python | def capabilities(self):
"""A tuple of capabilities this device supports.
Returns:
(~libinput.constant.DeviceCapability): Device capabilities.
"""
caps = []
for cap in DeviceCapability:
if self._libinput.libinput_device_has_capability(self._handle, cap):
caps.append(cap)
return tuple(caps) | [
"def",
"capabilities",
"(",
"self",
")",
":",
"caps",
"=",
"[",
"]",
"for",
"cap",
"in",
"DeviceCapability",
":",
"if",
"self",
".",
"_libinput",
".",
"libinput_device_has_capability",
"(",
"self",
".",
"_handle",
",",
"cap",
")",
":",
"caps",
".",
"append",
"(",
"cap",
")",
"return",
"tuple",
"(",
"caps",
")"
] | A tuple of capabilities this device supports.
Returns:
(~libinput.constant.DeviceCapability): Device capabilities. | [
"A",
"tuple",
"of",
"capabilities",
"this",
"device",
"supports",
"."
] | train | https://github.com/OzymandiasTheGreat/python-libinput/blob/1f477ee9f1d56b284b20e0317ea8967c64ef1218/libinput/device.py#L1800-L1811 |
OzymandiasTheGreat/python-libinput | libinput/device.py | Device.size | def size(self):
"""The physical size of a device in mm, where meaningful.
This property is only valid on devices with the required data, i.e.
tablets, touchpads and touchscreens. For other devices this property
raises :exc:`AssertionError`.
Returns:
(float, float): (Width, Height) in mm.
Raises:
AssertionError
"""
width = c_double(0)
height = c_double(0)
rc = self._libinput.libinput_device_get_size(
self._handle, byref(width), byref(height))
assert rc == 0, 'This device does not provide size information'
return width.value, height.value | python | def size(self):
"""The physical size of a device in mm, where meaningful.
This property is only valid on devices with the required data, i.e.
tablets, touchpads and touchscreens. For other devices this property
raises :exc:`AssertionError`.
Returns:
(float, float): (Width, Height) in mm.
Raises:
AssertionError
"""
width = c_double(0)
height = c_double(0)
rc = self._libinput.libinput_device_get_size(
self._handle, byref(width), byref(height))
assert rc == 0, 'This device does not provide size information'
return width.value, height.value | [
"def",
"size",
"(",
"self",
")",
":",
"width",
"=",
"c_double",
"(",
"0",
")",
"height",
"=",
"c_double",
"(",
"0",
")",
"rc",
"=",
"self",
".",
"_libinput",
".",
"libinput_device_get_size",
"(",
"self",
".",
"_handle",
",",
"byref",
"(",
"width",
")",
",",
"byref",
"(",
"height",
")",
")",
"assert",
"rc",
"==",
"0",
",",
"'This device does not provide size information'",
"return",
"width",
".",
"value",
",",
"height",
".",
"value"
] | The physical size of a device in mm, where meaningful.
This property is only valid on devices with the required data, i.e.
tablets, touchpads and touchscreens. For other devices this property
raises :exc:`AssertionError`.
Returns:
(float, float): (Width, Height) in mm.
Raises:
AssertionError | [
"The",
"physical",
"size",
"of",
"a",
"device",
"in",
"mm",
"where",
"meaningful",
"."
] | train | https://github.com/OzymandiasTheGreat/python-libinput/blob/1f477ee9f1d56b284b20e0317ea8967c64ef1218/libinput/device.py#L1814-L1832 |
OzymandiasTheGreat/python-libinput | libinput/device.py | DevicePointer.has_button | def has_button(self, button):
"""Check if this device has a given button.
Args:
button (int): Button to check for, see ``input.h`` for button
definitions.
Returns:
bool: :obj:`True` if the device has this button, :obj:`False` if
it does not.
Raises:
AssertionError
"""
rc = self._libinput.libinput_device_pointer_has_button(
self._handle, button)
assert rc >= 0, 'This device is not a pointer device'
return bool(rc) | python | def has_button(self, button):
"""Check if this device has a given button.
Args:
button (int): Button to check for, see ``input.h`` for button
definitions.
Returns:
bool: :obj:`True` if the device has this button, :obj:`False` if
it does not.
Raises:
AssertionError
"""
rc = self._libinput.libinput_device_pointer_has_button(
self._handle, button)
assert rc >= 0, 'This device is not a pointer device'
return bool(rc) | [
"def",
"has_button",
"(",
"self",
",",
"button",
")",
":",
"rc",
"=",
"self",
".",
"_libinput",
".",
"libinput_device_pointer_has_button",
"(",
"self",
".",
"_handle",
",",
"button",
")",
"assert",
"rc",
">=",
"0",
",",
"'This device is not a pointer device'",
"return",
"bool",
"(",
"rc",
")"
] | Check if this device has a given button.
Args:
button (int): Button to check for, see ``input.h`` for button
definitions.
Returns:
bool: :obj:`True` if the device has this button, :obj:`False` if
it does not.
Raises:
AssertionError | [
"Check",
"if",
"this",
"device",
"has",
"a",
"given",
"button",
"."
] | train | https://github.com/OzymandiasTheGreat/python-libinput/blob/1f477ee9f1d56b284b20e0317ea8967c64ef1218/libinput/device.py#L1892-L1908 |
OzymandiasTheGreat/python-libinput | libinput/device.py | DeviceKeyboard.has_key | def has_key(self, key):
"""Check if a :attr:`~libinput.constant.DeviceCapability.KEYBOARD`
device has a given key.
Args:
key (int): Key to check for, see ``input.h`` for key definitions.
Returns:
bool: :obj:`True` if the device has this key, :obj:`False` if
it does not.
Raises:
AssertionError
"""
rc = self._libinput.libinput_device_keyboard_has_key(self._handle, key)
assert rc >= 0, 'This device is not a keyboard device'
return bool(rc) | python | def has_key(self, key):
"""Check if a :attr:`~libinput.constant.DeviceCapability.KEYBOARD`
device has a given key.
Args:
key (int): Key to check for, see ``input.h`` for key definitions.
Returns:
bool: :obj:`True` if the device has this key, :obj:`False` if
it does not.
Raises:
AssertionError
"""
rc = self._libinput.libinput_device_keyboard_has_key(self._handle, key)
assert rc >= 0, 'This device is not a keyboard device'
return bool(rc) | [
"def",
"has_key",
"(",
"self",
",",
"key",
")",
":",
"rc",
"=",
"self",
".",
"_libinput",
".",
"libinput_device_keyboard_has_key",
"(",
"self",
".",
"_handle",
",",
"key",
")",
"assert",
"rc",
">=",
"0",
",",
"'This device is not a keyboard device'",
"return",
"bool",
"(",
"rc",
")"
] | Check if a :attr:`~libinput.constant.DeviceCapability.KEYBOARD`
device has a given key.
Args:
key (int): Key to check for, see ``input.h`` for key definitions.
Returns:
bool: :obj:`True` if the device has this key, :obj:`False` if
it does not.
Raises:
AssertionError | [
"Check",
"if",
"a",
":",
"attr",
":",
"~libinput",
".",
"constant",
".",
"DeviceCapability",
".",
"KEYBOARD",
"device",
"has",
"a",
"given",
"key",
"."
] | train | https://github.com/OzymandiasTheGreat/python-libinput/blob/1f477ee9f1d56b284b20e0317ea8967c64ef1218/libinput/device.py#L1924-L1939 |
OzymandiasTheGreat/python-libinput | libinput/device.py | DeviceTabletPad.num_buttons | def num_buttons(self):
"""The number of buttons on a device with
the :attr:`~libinput.constant.DeviceCapability.TABLET_PAD` capability.
Buttons on a pad device are numbered sequentially, see
`Tablet pad button numbers`_ for details.
Returns:
int: The number of buttons supported by the device.
Raises:
AttributeError
"""
num = self._libinput.libinput_device_tablet_pad_get_num_buttons(
self._handle)
if num < 0:
raise AttributeError('This device is not a tablet pad device')
return num | python | def num_buttons(self):
"""The number of buttons on a device with
the :attr:`~libinput.constant.DeviceCapability.TABLET_PAD` capability.
Buttons on a pad device are numbered sequentially, see
`Tablet pad button numbers`_ for details.
Returns:
int: The number of buttons supported by the device.
Raises:
AttributeError
"""
num = self._libinput.libinput_device_tablet_pad_get_num_buttons(
self._handle)
if num < 0:
raise AttributeError('This device is not a tablet pad device')
return num | [
"def",
"num_buttons",
"(",
"self",
")",
":",
"num",
"=",
"self",
".",
"_libinput",
".",
"libinput_device_tablet_pad_get_num_buttons",
"(",
"self",
".",
"_handle",
")",
"if",
"num",
"<",
"0",
":",
"raise",
"AttributeError",
"(",
"'This device is not a tablet pad device'",
")",
"return",
"num"
] | The number of buttons on a device with
the :attr:`~libinput.constant.DeviceCapability.TABLET_PAD` capability.
Buttons on a pad device are numbered sequentially, see
`Tablet pad button numbers`_ for details.
Returns:
int: The number of buttons supported by the device.
Raises:
AttributeError | [
"The",
"number",
"of",
"buttons",
"on",
"a",
"device",
"with",
"the",
":",
"attr",
":",
"~libinput",
".",
"constant",
".",
"DeviceCapability",
".",
"TABLET_PAD",
"capability",
"."
] | train | https://github.com/OzymandiasTheGreat/python-libinput/blob/1f477ee9f1d56b284b20e0317ea8967c64ef1218/libinput/device.py#L1973-L1990 |
OzymandiasTheGreat/python-libinput | libinput/device.py | DeviceTabletPad.num_rings | def num_rings(self):
"""The number of rings a device with
the :attr:`~libinput.constant.DeviceCapability.TABLET_PAD`
capability provides.
Returns:
int: The number of rings or 0 if the device has no rings.
Raises:
AttributeError
"""
num = self._libinput.libinput_device_tablet_pad_get_num_rings(
self._handle)
if num < 0:
raise AttributeError('This device is not a tablet pad device')
return num | python | def num_rings(self):
"""The number of rings a device with
the :attr:`~libinput.constant.DeviceCapability.TABLET_PAD`
capability provides.
Returns:
int: The number of rings or 0 if the device has no rings.
Raises:
AttributeError
"""
num = self._libinput.libinput_device_tablet_pad_get_num_rings(
self._handle)
if num < 0:
raise AttributeError('This device is not a tablet pad device')
return num | [
"def",
"num_rings",
"(",
"self",
")",
":",
"num",
"=",
"self",
".",
"_libinput",
".",
"libinput_device_tablet_pad_get_num_rings",
"(",
"self",
".",
"_handle",
")",
"if",
"num",
"<",
"0",
":",
"raise",
"AttributeError",
"(",
"'This device is not a tablet pad device'",
")",
"return",
"num"
] | The number of rings a device with
the :attr:`~libinput.constant.DeviceCapability.TABLET_PAD`
capability provides.
Returns:
int: The number of rings or 0 if the device has no rings.
Raises:
AttributeError | [
"The",
"number",
"of",
"rings",
"a",
"device",
"with",
"the",
":",
"attr",
":",
"~libinput",
".",
"constant",
".",
"DeviceCapability",
".",
"TABLET_PAD",
"capability",
"provides",
"."
] | train | https://github.com/OzymandiasTheGreat/python-libinput/blob/1f477ee9f1d56b284b20e0317ea8967c64ef1218/libinput/device.py#L1993-L2008 |
OzymandiasTheGreat/python-libinput | libinput/device.py | DeviceTabletPad.num_strips | def num_strips(self):
"""The number of strips a device with
the :attr:`~libinput.constant.DeviceCapability.TABLET_PAD`
capability provides.
Returns:
int: The number of strips or 0 if the device has no strips.
Raises:
AttributeError
"""
num = self._libinput.libinput_device_tablet_pad_get_num_strips(
self._handle)
if num < 0:
raise AttributeError('This device is not a tablet pad device')
return num | python | def num_strips(self):
"""The number of strips a device with
the :attr:`~libinput.constant.DeviceCapability.TABLET_PAD`
capability provides.
Returns:
int: The number of strips or 0 if the device has no strips.
Raises:
AttributeError
"""
num = self._libinput.libinput_device_tablet_pad_get_num_strips(
self._handle)
if num < 0:
raise AttributeError('This device is not a tablet pad device')
return num | [
"def",
"num_strips",
"(",
"self",
")",
":",
"num",
"=",
"self",
".",
"_libinput",
".",
"libinput_device_tablet_pad_get_num_strips",
"(",
"self",
".",
"_handle",
")",
"if",
"num",
"<",
"0",
":",
"raise",
"AttributeError",
"(",
"'This device is not a tablet pad device'",
")",
"return",
"num"
] | The number of strips a device with
the :attr:`~libinput.constant.DeviceCapability.TABLET_PAD`
capability provides.
Returns:
int: The number of strips or 0 if the device has no strips.
Raises:
AttributeError | [
"The",
"number",
"of",
"strips",
"a",
"device",
"with",
"the",
":",
"attr",
":",
"~libinput",
".",
"constant",
".",
"DeviceCapability",
".",
"TABLET_PAD",
"capability",
"provides",
"."
] | train | https://github.com/OzymandiasTheGreat/python-libinput/blob/1f477ee9f1d56b284b20e0317ea8967c64ef1218/libinput/device.py#L2011-L2026 |
OzymandiasTheGreat/python-libinput | libinput/device.py | DeviceTabletPad.num_mode_groups | def num_mode_groups(self):
"""Most devices only provide a single mode group, however devices
such as the Wacom Cintiq 22HD provide two mode groups.
If multiple mode groups are available, a caller should use
:meth:`~libinput.define.TabletPadModeGroup.has_button`,
:meth:`~libinput.define.TabletPadModeGroup.has_ring`
and :meth:`~libinput.define.TabletPadModeGroup.has_strip` to associate
each button, ring and strip with the correct mode group.
Returns:
int: The number of mode groups available on this device.
Raises:
AttributeError
"""
num = self._libinput.libinput_device_tablet_pad_get_num_mode_groups(
self._handle)
if num < 0:
raise AttributeError('This device is not a tablet pad device')
return num | python | def num_mode_groups(self):
"""Most devices only provide a single mode group, however devices
such as the Wacom Cintiq 22HD provide two mode groups.
If multiple mode groups are available, a caller should use
:meth:`~libinput.define.TabletPadModeGroup.has_button`,
:meth:`~libinput.define.TabletPadModeGroup.has_ring`
and :meth:`~libinput.define.TabletPadModeGroup.has_strip` to associate
each button, ring and strip with the correct mode group.
Returns:
int: The number of mode groups available on this device.
Raises:
AttributeError
"""
num = self._libinput.libinput_device_tablet_pad_get_num_mode_groups(
self._handle)
if num < 0:
raise AttributeError('This device is not a tablet pad device')
return num | [
"def",
"num_mode_groups",
"(",
"self",
")",
":",
"num",
"=",
"self",
".",
"_libinput",
".",
"libinput_device_tablet_pad_get_num_mode_groups",
"(",
"self",
".",
"_handle",
")",
"if",
"num",
"<",
"0",
":",
"raise",
"AttributeError",
"(",
"'This device is not a tablet pad device'",
")",
"return",
"num"
] | Most devices only provide a single mode group, however devices
such as the Wacom Cintiq 22HD provide two mode groups.
If multiple mode groups are available, a caller should use
:meth:`~libinput.define.TabletPadModeGroup.has_button`,
:meth:`~libinput.define.TabletPadModeGroup.has_ring`
and :meth:`~libinput.define.TabletPadModeGroup.has_strip` to associate
each button, ring and strip with the correct mode group.
Returns:
int: The number of mode groups available on this device.
Raises:
AttributeError | [
"Most",
"devices",
"only",
"provide",
"a",
"single",
"mode",
"group",
"however",
"devices",
"such",
"as",
"the",
"Wacom",
"Cintiq",
"22HD",
"provide",
"two",
"mode",
"groups",
"."
] | train | https://github.com/OzymandiasTheGreat/python-libinput/blob/1f477ee9f1d56b284b20e0317ea8967c64ef1218/libinput/device.py#L2029-L2049 |
OzymandiasTheGreat/python-libinput | libinput/device.py | DeviceTabletPad.get_mode_group | def get_mode_group(self, group):
"""While a reference is kept by the caller, the returned mode group
will compare equal with mode group returned by each subsequent call of
this method with the same index and mode group returned from
:attr:`~libinput.event.TabletPadEvent.mode_group`, provided
the event was generated by this mode group.
Args:
group (int): A mode group index.
Returns:
~libinput.define.TabletPadModeGroup: The mode group with the given
index or :obj:`None` if an invalid index is given.
"""
hmodegroup = self._libinput.libinput_device_tablet_pad_get_mode_group(
self._handle, group)
if hmodegroup:
return TabletPadModeGroup(hmodegroup, self._libinput)
return None | python | def get_mode_group(self, group):
"""While a reference is kept by the caller, the returned mode group
will compare equal with mode group returned by each subsequent call of
this method with the same index and mode group returned from
:attr:`~libinput.event.TabletPadEvent.mode_group`, provided
the event was generated by this mode group.
Args:
group (int): A mode group index.
Returns:
~libinput.define.TabletPadModeGroup: The mode group with the given
index or :obj:`None` if an invalid index is given.
"""
hmodegroup = self._libinput.libinput_device_tablet_pad_get_mode_group(
self._handle, group)
if hmodegroup:
return TabletPadModeGroup(hmodegroup, self._libinput)
return None | [
"def",
"get_mode_group",
"(",
"self",
",",
"group",
")",
":",
"hmodegroup",
"=",
"self",
".",
"_libinput",
".",
"libinput_device_tablet_pad_get_mode_group",
"(",
"self",
".",
"_handle",
",",
"group",
")",
"if",
"hmodegroup",
":",
"return",
"TabletPadModeGroup",
"(",
"hmodegroup",
",",
"self",
".",
"_libinput",
")",
"return",
"None"
] | While a reference is kept by the caller, the returned mode group
will compare equal with mode group returned by each subsequent call of
this method with the same index and mode group returned from
:attr:`~libinput.event.TabletPadEvent.mode_group`, provided
the event was generated by this mode group.
Args:
group (int): A mode group index.
Returns:
~libinput.define.TabletPadModeGroup: The mode group with the given
index or :obj:`None` if an invalid index is given. | [
"While",
"a",
"reference",
"is",
"kept",
"by",
"the",
"caller",
"the",
"returned",
"mode",
"group",
"will",
"compare",
"equal",
"with",
"mode",
"group",
"returned",
"by",
"each",
"subsequent",
"call",
"of",
"this",
"method",
"with",
"the",
"same",
"index",
"and",
"mode",
"group",
"returned",
"from",
":",
"attr",
":",
"~libinput",
".",
"event",
".",
"TabletPadEvent",
".",
"mode_group",
"provided",
"the",
"event",
"was",
"generated",
"by",
"this",
"mode",
"group",
"."
] | train | https://github.com/OzymandiasTheGreat/python-libinput/blob/1f477ee9f1d56b284b20e0317ea8967c64ef1218/libinput/device.py#L2051-L2069 |
OzymandiasTheGreat/python-libinput | libinput/device.py | Seat.physical_name | def physical_name(self):
"""The physical name of the seat.
For libinput contexts created from udev, this is always the same value
as passed into :meth:`~libinput.LibInputUdev.assign_seat` and all
seats from that context will have the same physical name.
The physical name of the seat is one that is usually set by the system
or lower levels of the stack. In most cases, this is the base filter
for devices - devices assigned to seats outside the current seat will
not be available to the caller.
Returns:
str: The physical name of this seat.
"""
pchar = self._libinput.libinput_seat_get_physical_name(self._handle)
return string_at(pchar).decode() | python | def physical_name(self):
"""The physical name of the seat.
For libinput contexts created from udev, this is always the same value
as passed into :meth:`~libinput.LibInputUdev.assign_seat` and all
seats from that context will have the same physical name.
The physical name of the seat is one that is usually set by the system
or lower levels of the stack. In most cases, this is the base filter
for devices - devices assigned to seats outside the current seat will
not be available to the caller.
Returns:
str: The physical name of this seat.
"""
pchar = self._libinput.libinput_seat_get_physical_name(self._handle)
return string_at(pchar).decode() | [
"def",
"physical_name",
"(",
"self",
")",
":",
"pchar",
"=",
"self",
".",
"_libinput",
".",
"libinput_seat_get_physical_name",
"(",
"self",
".",
"_handle",
")",
"return",
"string_at",
"(",
"pchar",
")",
".",
"decode",
"(",
")"
] | The physical name of the seat.
For libinput contexts created from udev, this is always the same value
as passed into :meth:`~libinput.LibInputUdev.assign_seat` and all
seats from that context will have the same physical name.
The physical name of the seat is one that is usually set by the system
or lower levels of the stack. In most cases, this is the base filter
for devices - devices assigned to seats outside the current seat will
not be available to the caller.
Returns:
str: The physical name of this seat. | [
"The",
"physical",
"name",
"of",
"the",
"seat",
"."
] | train | https://github.com/OzymandiasTheGreat/python-libinput/blob/1f477ee9f1d56b284b20e0317ea8967c64ef1218/libinput/device.py#L2111-L2128 |
OzymandiasTheGreat/python-libinput | libinput/device.py | Seat.logical_name | def logical_name(self):
"""The logical name of the seat.
This is an identifier to group sets of devices within the compositor.
Returns:
str: The logical name of this seat.
"""
pchar = self._libinput.libinput_seat_get_logical_name(self._handle)
return string_at(pchar).decode() | python | def logical_name(self):
"""The logical name of the seat.
This is an identifier to group sets of devices within the compositor.
Returns:
str: The logical name of this seat.
"""
pchar = self._libinput.libinput_seat_get_logical_name(self._handle)
return string_at(pchar).decode() | [
"def",
"logical_name",
"(",
"self",
")",
":",
"pchar",
"=",
"self",
".",
"_libinput",
".",
"libinput_seat_get_logical_name",
"(",
"self",
".",
"_handle",
")",
"return",
"string_at",
"(",
"pchar",
")",
".",
"decode",
"(",
")"
] | The logical name of the seat.
This is an identifier to group sets of devices within the compositor.
Returns:
str: The logical name of this seat. | [
"The",
"logical",
"name",
"of",
"the",
"seat",
"."
] | train | https://github.com/OzymandiasTheGreat/python-libinput/blob/1f477ee9f1d56b284b20e0317ea8967c64ef1218/libinput/device.py#L2131-L2141 |
PolyJIT/benchbuild | benchbuild/utils/log.py | configure | def configure():
"""Load logging configuration from our own defaults."""
log_levels = {
5: logging.NOTSET,
4: logging.DEBUG,
3: logging.INFO,
2: logging.WARNING,
1: logging.ERROR,
0: logging.CRITICAL
}
logging.captureWarnings(True)
root_logger = logging.getLogger()
if settings.CFG["debug"]:
details_format = logging.Formatter(
'%(name)s (%(filename)s:%(lineno)s) [%(levelname)s] %(message)s')
details_hdl = logging.StreamHandler()
details_hdl.setFormatter(details_format)
root_logger.addHandler(details_hdl)
else:
brief_format = logging.Formatter('%(message)s')
console_hdl = logging.StreamHandler()
console_hdl.setFormatter(brief_format)
root_logger.addHandler(console_hdl)
root_logger.setLevel(log_levels[int(settings.CFG["verbosity"])])
configure_plumbum_log()
configure_migrate_log()
configure_parse_log() | python | def configure():
"""Load logging configuration from our own defaults."""
log_levels = {
5: logging.NOTSET,
4: logging.DEBUG,
3: logging.INFO,
2: logging.WARNING,
1: logging.ERROR,
0: logging.CRITICAL
}
logging.captureWarnings(True)
root_logger = logging.getLogger()
if settings.CFG["debug"]:
details_format = logging.Formatter(
'%(name)s (%(filename)s:%(lineno)s) [%(levelname)s] %(message)s')
details_hdl = logging.StreamHandler()
details_hdl.setFormatter(details_format)
root_logger.addHandler(details_hdl)
else:
brief_format = logging.Formatter('%(message)s')
console_hdl = logging.StreamHandler()
console_hdl.setFormatter(brief_format)
root_logger.addHandler(console_hdl)
root_logger.setLevel(log_levels[int(settings.CFG["verbosity"])])
configure_plumbum_log()
configure_migrate_log()
configure_parse_log() | [
"def",
"configure",
"(",
")",
":",
"log_levels",
"=",
"{",
"5",
":",
"logging",
".",
"NOTSET",
",",
"4",
":",
"logging",
".",
"DEBUG",
",",
"3",
":",
"logging",
".",
"INFO",
",",
"2",
":",
"logging",
".",
"WARNING",
",",
"1",
":",
"logging",
".",
"ERROR",
",",
"0",
":",
"logging",
".",
"CRITICAL",
"}",
"logging",
".",
"captureWarnings",
"(",
"True",
")",
"root_logger",
"=",
"logging",
".",
"getLogger",
"(",
")",
"if",
"settings",
".",
"CFG",
"[",
"\"debug\"",
"]",
":",
"details_format",
"=",
"logging",
".",
"Formatter",
"(",
"'%(name)s (%(filename)s:%(lineno)s) [%(levelname)s] %(message)s'",
")",
"details_hdl",
"=",
"logging",
".",
"StreamHandler",
"(",
")",
"details_hdl",
".",
"setFormatter",
"(",
"details_format",
")",
"root_logger",
".",
"addHandler",
"(",
"details_hdl",
")",
"else",
":",
"brief_format",
"=",
"logging",
".",
"Formatter",
"(",
"'%(message)s'",
")",
"console_hdl",
"=",
"logging",
".",
"StreamHandler",
"(",
")",
"console_hdl",
".",
"setFormatter",
"(",
"brief_format",
")",
"root_logger",
".",
"addHandler",
"(",
"console_hdl",
")",
"root_logger",
".",
"setLevel",
"(",
"log_levels",
"[",
"int",
"(",
"settings",
".",
"CFG",
"[",
"\"verbosity\"",
"]",
")",
"]",
")",
"configure_plumbum_log",
"(",
")",
"configure_migrate_log",
"(",
")",
"configure_parse_log",
"(",
")"
] | Load logging configuration from our own defaults. | [
"Load",
"logging",
"configuration",
"from",
"our",
"own",
"defaults",
"."
] | train | https://github.com/PolyJIT/benchbuild/blob/9ad2ec54d96e97b642b1f06eddcbad9ba7aeaf58/benchbuild/utils/log.py#L28-L56 |
sci-bots/svg-model | svg_model/svgload/path_parser.py | PathDataParser.get_number | def get_number(self):
'''
.. versionchanged:: 0.9.2
Add support for float exponent strings (e.g., ``3.435e-7``).
Fixes `issue #4 <https://github.com/wheeler-microfluidics/svg-model/issues/4>`.
'''
number = None
start = self.get_char('0123456789.-')
if start:
number = start
finish = self.get_chars('-e0123456789.')
if finish:
number += finish
if any(c in number for c in '.e'):
return float(number)
else:
return int(number) | python | def get_number(self):
'''
.. versionchanged:: 0.9.2
Add support for float exponent strings (e.g., ``3.435e-7``).
Fixes `issue #4 <https://github.com/wheeler-microfluidics/svg-model/issues/4>`.
'''
number = None
start = self.get_char('0123456789.-')
if start:
number = start
finish = self.get_chars('-e0123456789.')
if finish:
number += finish
if any(c in number for c in '.e'):
return float(number)
else:
return int(number) | [
"def",
"get_number",
"(",
"self",
")",
":",
"number",
"=",
"None",
"start",
"=",
"self",
".",
"get_char",
"(",
"'0123456789.-'",
")",
"if",
"start",
":",
"number",
"=",
"start",
"finish",
"=",
"self",
".",
"get_chars",
"(",
"'-e0123456789.'",
")",
"if",
"finish",
":",
"number",
"+=",
"finish",
"if",
"any",
"(",
"c",
"in",
"number",
"for",
"c",
"in",
"'.e'",
")",
":",
"return",
"float",
"(",
"number",
")",
"else",
":",
"return",
"int",
"(",
"number",
")"
] | .. versionchanged:: 0.9.2
Add support for float exponent strings (e.g., ``3.435e-7``).
Fixes `issue #4 <https://github.com/wheeler-microfluidics/svg-model/issues/4>`. | [
"..",
"versionchanged",
"::",
"0",
".",
"9",
".",
"2",
"Add",
"support",
"for",
"float",
"exponent",
"strings",
"(",
"e",
".",
"g",
".",
"3",
".",
"435e",
"-",
"7",
")",
"."
] | train | https://github.com/sci-bots/svg-model/blob/2d119650f995e62b29ce0b3151a23f3b957cb072/svg_model/svgload/path_parser.py#L43-L60 |
sci-bots/svg-model | svg_model/svgload/path_parser.py | PathDataParser.to_tuples | def to_tuples(self, data):
'''
path_data : string, from an svg path tag's 'd' attribute, eg:
'M 46,74 L 35,12 l 53,-13 z'
returns the same data collected in a list of tuples, eg:
[ ('M', 46, 74), ('L', 35, 12), ('l', 53, -13), ('z') ],
The input data may have floats instead of ints, this will be reflected
in the output. The input may have its whitespace stripped out, or its
commas replaced by whitespace.
'''
self.data = data
self.pos = 0
parsed = []
command = []
while self.pos < len(self.data):
indicator = self.data[self.pos]
if indicator == ' ':
self.pos += 1
elif indicator == ',':
if len(command) >= 2:
self.pos += 1
else:
msg = 'unexpected comma at %d in %r' % (self.pos, self.data)
raise ParseError(msg)
elif indicator in '0123456789.-':
if command:
command.append(self.get_number())
else:
msg = 'missing command at %d in %r' % (self.pos, self.data)
raise ParseError(msg)
else:
if command:
parsed.append(tuple(command))
command = [indicator]
self.pos += 1
if command:
parsed.append(tuple(command))
if parsed[0][0] == 'M' and parsed[-1][0] == 'L'\
and parsed[0][1:] == parsed[-1][1:]:
parsed[-1] = ('z',)
return parsed | python | def to_tuples(self, data):
'''
path_data : string, from an svg path tag's 'd' attribute, eg:
'M 46,74 L 35,12 l 53,-13 z'
returns the same data collected in a list of tuples, eg:
[ ('M', 46, 74), ('L', 35, 12), ('l', 53, -13), ('z') ],
The input data may have floats instead of ints, this will be reflected
in the output. The input may have its whitespace stripped out, or its
commas replaced by whitespace.
'''
self.data = data
self.pos = 0
parsed = []
command = []
while self.pos < len(self.data):
indicator = self.data[self.pos]
if indicator == ' ':
self.pos += 1
elif indicator == ',':
if len(command) >= 2:
self.pos += 1
else:
msg = 'unexpected comma at %d in %r' % (self.pos, self.data)
raise ParseError(msg)
elif indicator in '0123456789.-':
if command:
command.append(self.get_number())
else:
msg = 'missing command at %d in %r' % (self.pos, self.data)
raise ParseError(msg)
else:
if command:
parsed.append(tuple(command))
command = [indicator]
self.pos += 1
if command:
parsed.append(tuple(command))
if parsed[0][0] == 'M' and parsed[-1][0] == 'L'\
and parsed[0][1:] == parsed[-1][1:]:
parsed[-1] = ('z',)
return parsed | [
"def",
"to_tuples",
"(",
"self",
",",
"data",
")",
":",
"self",
".",
"data",
"=",
"data",
"self",
".",
"pos",
"=",
"0",
"parsed",
"=",
"[",
"]",
"command",
"=",
"[",
"]",
"while",
"self",
".",
"pos",
"<",
"len",
"(",
"self",
".",
"data",
")",
":",
"indicator",
"=",
"self",
".",
"data",
"[",
"self",
".",
"pos",
"]",
"if",
"indicator",
"==",
"' '",
":",
"self",
".",
"pos",
"+=",
"1",
"elif",
"indicator",
"==",
"','",
":",
"if",
"len",
"(",
"command",
")",
">=",
"2",
":",
"self",
".",
"pos",
"+=",
"1",
"else",
":",
"msg",
"=",
"'unexpected comma at %d in %r'",
"%",
"(",
"self",
".",
"pos",
",",
"self",
".",
"data",
")",
"raise",
"ParseError",
"(",
"msg",
")",
"elif",
"indicator",
"in",
"'0123456789.-'",
":",
"if",
"command",
":",
"command",
".",
"append",
"(",
"self",
".",
"get_number",
"(",
")",
")",
"else",
":",
"msg",
"=",
"'missing command at %d in %r'",
"%",
"(",
"self",
".",
"pos",
",",
"self",
".",
"data",
")",
"raise",
"ParseError",
"(",
"msg",
")",
"else",
":",
"if",
"command",
":",
"parsed",
".",
"append",
"(",
"tuple",
"(",
"command",
")",
")",
"command",
"=",
"[",
"indicator",
"]",
"self",
".",
"pos",
"+=",
"1",
"if",
"command",
":",
"parsed",
".",
"append",
"(",
"tuple",
"(",
"command",
")",
")",
"if",
"parsed",
"[",
"0",
"]",
"[",
"0",
"]",
"==",
"'M'",
"and",
"parsed",
"[",
"-",
"1",
"]",
"[",
"0",
"]",
"==",
"'L'",
"and",
"parsed",
"[",
"0",
"]",
"[",
"1",
":",
"]",
"==",
"parsed",
"[",
"-",
"1",
"]",
"[",
"1",
":",
"]",
":",
"parsed",
"[",
"-",
"1",
"]",
"=",
"(",
"'z'",
",",
")",
"return",
"parsed"
] | path_data : string, from an svg path tag's 'd' attribute, eg:
'M 46,74 L 35,12 l 53,-13 z'
returns the same data collected in a list of tuples, eg:
[ ('M', 46, 74), ('L', 35, 12), ('l', 53, -13), ('z') ],
The input data may have floats instead of ints, this will be reflected
in the output. The input may have its whitespace stripped out, or its
commas replaced by whitespace. | [
"path_data",
":",
"string",
"from",
"an",
"svg",
"path",
"tag",
"s",
"d",
"attribute",
"eg",
":",
"M",
"46",
"74",
"L",
"35",
"12",
"l",
"53",
"-",
"13",
"z",
"returns",
"the",
"same",
"data",
"collected",
"in",
"a",
"list",
"of",
"tuples",
"eg",
":",
"[",
"(",
"M",
"46",
"74",
")",
"(",
"L",
"35",
"12",
")",
"(",
"l",
"53",
"-",
"13",
")",
"(",
"z",
")",
"]",
"The",
"input",
"data",
"may",
"have",
"floats",
"instead",
"of",
"ints",
"this",
"will",
"be",
"reflected",
"in",
"the",
"output",
".",
"The",
"input",
"may",
"have",
"its",
"whitespace",
"stripped",
"out",
"or",
"its",
"commas",
"replaced",
"by",
"whitespace",
"."
] | train | https://github.com/sci-bots/svg-model/blob/2d119650f995e62b29ce0b3151a23f3b957cb072/svg_model/svgload/path_parser.py#L62-L105 |
sci-bots/svg-model | svg_model/svgload/path_parser.py | LoopTracer.to_loops | def to_loops(self, commands):
'''
commands : list of tuples, as output from to_tuples() method, eg:
[('M', 1, 2), ('L', 3, 4), ('L', 5, 6), ('z')]
Interprets the command characters at the start of each tuple to return
a list of loops, where each loop is a closed list of verts, and each
vert is a pair of ints or floats, eg:
[[1, 2, 3, 4, 5, 6]]
Note that the final point of each loop is eliminated if it is equal to
the first.
SVG defines commands:
M x,y: move, start a new loop
L x,y: line, draw boundary
H x: move horizontal
V y: move vertical
Z: close current loop - join to start point
Lower-case command letters (eg 'm') indicate a relative offset.
See http://www.w3.org/TR/SVG11/paths.html
'''
lookup = {
'M': self.onMove,
'L': self.onLine,
'H': self.onHorizontalMove,
'h': self.onHorizontalMove,
'V': self.onVerticalMove,
'v': self.onVerticalMove,
'Z': self.onClose,
'z': self.onClose,
}
self.loops = []
self.current_loop = None
for command in commands:
action = command[0]
if action in lookup:
lookup[action](command)
else:
self.onBadCommand(action)
return self.loops | python | def to_loops(self, commands):
'''
commands : list of tuples, as output from to_tuples() method, eg:
[('M', 1, 2), ('L', 3, 4), ('L', 5, 6), ('z')]
Interprets the command characters at the start of each tuple to return
a list of loops, where each loop is a closed list of verts, and each
vert is a pair of ints or floats, eg:
[[1, 2, 3, 4, 5, 6]]
Note that the final point of each loop is eliminated if it is equal to
the first.
SVG defines commands:
M x,y: move, start a new loop
L x,y: line, draw boundary
H x: move horizontal
V y: move vertical
Z: close current loop - join to start point
Lower-case command letters (eg 'm') indicate a relative offset.
See http://www.w3.org/TR/SVG11/paths.html
'''
lookup = {
'M': self.onMove,
'L': self.onLine,
'H': self.onHorizontalMove,
'h': self.onHorizontalMove,
'V': self.onVerticalMove,
'v': self.onVerticalMove,
'Z': self.onClose,
'z': self.onClose,
}
self.loops = []
self.current_loop = None
for command in commands:
action = command[0]
if action in lookup:
lookup[action](command)
else:
self.onBadCommand(action)
return self.loops | [
"def",
"to_loops",
"(",
"self",
",",
"commands",
")",
":",
"lookup",
"=",
"{",
"'M'",
":",
"self",
".",
"onMove",
",",
"'L'",
":",
"self",
".",
"onLine",
",",
"'H'",
":",
"self",
".",
"onHorizontalMove",
",",
"'h'",
":",
"self",
".",
"onHorizontalMove",
",",
"'V'",
":",
"self",
".",
"onVerticalMove",
",",
"'v'",
":",
"self",
".",
"onVerticalMove",
",",
"'Z'",
":",
"self",
".",
"onClose",
",",
"'z'",
":",
"self",
".",
"onClose",
",",
"}",
"self",
".",
"loops",
"=",
"[",
"]",
"self",
".",
"current_loop",
"=",
"None",
"for",
"command",
"in",
"commands",
":",
"action",
"=",
"command",
"[",
"0",
"]",
"if",
"action",
"in",
"lookup",
":",
"lookup",
"[",
"action",
"]",
"(",
"command",
")",
"else",
":",
"self",
".",
"onBadCommand",
"(",
"action",
")",
"return",
"self",
".",
"loops"
] | commands : list of tuples, as output from to_tuples() method, eg:
[('M', 1, 2), ('L', 3, 4), ('L', 5, 6), ('z')]
Interprets the command characters at the start of each tuple to return
a list of loops, where each loop is a closed list of verts, and each
vert is a pair of ints or floats, eg:
[[1, 2, 3, 4, 5, 6]]
Note that the final point of each loop is eliminated if it is equal to
the first.
SVG defines commands:
M x,y: move, start a new loop
L x,y: line, draw boundary
H x: move horizontal
V y: move vertical
Z: close current loop - join to start point
Lower-case command letters (eg 'm') indicate a relative offset.
See http://www.w3.org/TR/SVG11/paths.html | [
"commands",
":",
"list",
"of",
"tuples",
"as",
"output",
"from",
"to_tuples",
"()",
"method",
"eg",
":",
"[",
"(",
"M",
"1",
"2",
")",
"(",
"L",
"3",
"4",
")",
"(",
"L",
"5",
"6",
")",
"(",
"z",
")",
"]",
"Interprets",
"the",
"command",
"characters",
"at",
"the",
"start",
"of",
"each",
"tuple",
"to",
"return",
"a",
"list",
"of",
"loops",
"where",
"each",
"loop",
"is",
"a",
"closed",
"list",
"of",
"verts",
"and",
"each",
"vert",
"is",
"a",
"pair",
"of",
"ints",
"or",
"floats",
"eg",
":",
"[[",
"1",
"2",
"3",
"4",
"5",
"6",
"]]",
"Note",
"that",
"the",
"final",
"point",
"of",
"each",
"loop",
"is",
"eliminated",
"if",
"it",
"is",
"equal",
"to",
"the",
"first",
".",
"SVG",
"defines",
"commands",
":",
"M",
"x",
"y",
":",
"move",
"start",
"a",
"new",
"loop",
"L",
"x",
"y",
":",
"line",
"draw",
"boundary",
"H",
"x",
":",
"move",
"horizontal",
"V",
"y",
":",
"move",
"vertical",
"Z",
":",
"close",
"current",
"loop",
"-",
"join",
"to",
"start",
"point",
"Lower",
"-",
"case",
"command",
"letters",
"(",
"eg",
"m",
")",
"indicate",
"a",
"relative",
"offset",
".",
"See",
"http",
":",
"//",
"www",
".",
"w3",
".",
"org",
"/",
"TR",
"/",
"SVG11",
"/",
"paths",
".",
"html"
] | train | https://github.com/sci-bots/svg-model/blob/2d119650f995e62b29ce0b3151a23f3b957cb072/svg_model/svgload/path_parser.py#L156-L194 |
sci-bots/svg-model | svg_model/svgload/path_parser.py | PathParser.parse_color | def parse_color(self, color):
'''
color : string, eg: '#rrggbb' or 'none'
(where rr, gg, bb are hex digits from 00 to ff)
returns a triple of unsigned bytes, eg: (0, 128, 255)
'''
if color == 'none':
return None
return (
int(color[1:3], 16),
int(color[3:5], 16),
int(color[5:7], 16)) | python | def parse_color(self, color):
'''
color : string, eg: '#rrggbb' or 'none'
(where rr, gg, bb are hex digits from 00 to ff)
returns a triple of unsigned bytes, eg: (0, 128, 255)
'''
if color == 'none':
return None
return (
int(color[1:3], 16),
int(color[3:5], 16),
int(color[5:7], 16)) | [
"def",
"parse_color",
"(",
"self",
",",
"color",
")",
":",
"if",
"color",
"==",
"'none'",
":",
"return",
"None",
"return",
"(",
"int",
"(",
"color",
"[",
"1",
":",
"3",
"]",
",",
"16",
")",
",",
"int",
"(",
"color",
"[",
"3",
":",
"5",
"]",
",",
"16",
")",
",",
"int",
"(",
"color",
"[",
"5",
":",
"7",
"]",
",",
"16",
")",
")"
] | color : string, eg: '#rrggbb' or 'none'
(where rr, gg, bb are hex digits from 00 to ff)
returns a triple of unsigned bytes, eg: (0, 128, 255) | [
"color",
":",
"string",
"eg",
":",
"#rrggbb",
"or",
"none",
"(",
"where",
"rr",
"gg",
"bb",
"are",
"hex",
"digits",
"from",
"00",
"to",
"ff",
")",
"returns",
"a",
"triple",
"of",
"unsigned",
"bytes",
"eg",
":",
"(",
"0",
"128",
"255",
")"
] | train | https://github.com/sci-bots/svg-model/blob/2d119650f995e62b29ce0b3151a23f3b957cb072/svg_model/svgload/path_parser.py#L213-L224 |
sci-bots/svg-model | svg_model/svgload/path_parser.py | PathParser.parse_style | def parse_style(self, style):
'''
style : string, eg:
fill:#ff2a2a;fill-rule:evenodd;stroke:none;stroke-width:1px;
stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1
returns color as a triple of unsigned bytes: (r, g, b), or None
'''
style_elements = style.split(';')
while style_elements:
element = style_elements.pop()
if element.startswith('fill:'):
return self.parse_color(element[5:])
return None | python | def parse_style(self, style):
'''
style : string, eg:
fill:#ff2a2a;fill-rule:evenodd;stroke:none;stroke-width:1px;
stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1
returns color as a triple of unsigned bytes: (r, g, b), or None
'''
style_elements = style.split(';')
while style_elements:
element = style_elements.pop()
if element.startswith('fill:'):
return self.parse_color(element[5:])
return None | [
"def",
"parse_style",
"(",
"self",
",",
"style",
")",
":",
"style_elements",
"=",
"style",
".",
"split",
"(",
"';'",
")",
"while",
"style_elements",
":",
"element",
"=",
"style_elements",
".",
"pop",
"(",
")",
"if",
"element",
".",
"startswith",
"(",
"'fill:'",
")",
":",
"return",
"self",
".",
"parse_color",
"(",
"element",
"[",
"5",
":",
"]",
")",
"return",
"None"
] | style : string, eg:
fill:#ff2a2a;fill-rule:evenodd;stroke:none;stroke-width:1px;
stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1
returns color as a triple of unsigned bytes: (r, g, b), or None | [
"style",
":",
"string",
"eg",
":",
"fill",
":",
"#ff2a2a",
";",
"fill",
"-",
"rule",
":",
"evenodd",
";",
"stroke",
":",
"none",
";",
"stroke",
"-",
"width",
":",
"1px",
";",
"stroke",
"-",
"linecap",
":",
"butt",
";",
"stroke",
"-",
"linejoin",
":",
"miter",
";",
"stroke",
"-",
"opacity",
":",
"1",
"returns",
"color",
"as",
"a",
"triple",
"of",
"unsigned",
"bytes",
":",
"(",
"r",
"g",
"b",
")",
"or",
"None"
] | train | https://github.com/sci-bots/svg-model/blob/2d119650f995e62b29ce0b3151a23f3b957cb072/svg_model/svgload/path_parser.py#L227-L239 |
sci-bots/svg-model | svg_model/svgload/path_parser.py | PathParser.parse | def parse(self, tag):
'''
returns (id, path)
where: 'id' is the path tag's id attribute
'path' is a populated instance of SvgPath
>>> from lxml import etree
>>> from lxml.builder import E
>>> path_tag = etree.XML("""
... <path id="path0"
... style="fill:#0000ff;stroke:#000000;stroke-width:0.10000000000000001;stroke-miterlimit:4;stroke-dasharray:none"
... d="M 525.93385,261.47322 L 525.933 85,269.65826 L 534.07239,269.65826 L 534.07239,261.47322 L 525.93385,261.47322" />
... """)
>>> path_parser = PathParser()
>>> id, svg_path = path_parser.parse(path_tag)
>>> id
'path0'
>>> svg_path.color
(0, 0, 255)
>>> len(svg_path.loops)
1
>>> svg_path.loops[0].verts
[(534.07239, 261.47322), (534.07239, 269.65826), (525.933, 85), (525.93385, 261.47322)]
Note that only absolute commands (i.e., uppercase) are currently supported. For example:
paths will throw a ParseError exception. For example:
>>> path_tag = E.path(id="path0", d="M 636.0331,256.9345 l 636.0331,256.9345")
>>> print etree.tostring(path_tag)
<path d="M 636.0331,256.9345 l 636.0331,256.9345" id="path0"/>
>>> path_parser.parse(path_tag)
Traceback (most recent call last):
...
ParseError: unsupported svg path command: l
>>>
'''
id = self.get_id(tag.attrib)
parser = PathDataParser()
path_data = tag.attrib['d']
path_tuple = parser.to_tuples(path_data)
tracer = LoopTracer()
loops = tracer.to_loops(path_tuple)
path = ColoredPath(loops)
if 'style' in list(tag.attrib.keys()):
style_data = tag.attrib['style']
path.color = self.parse_style(style_data)
return id, path | python | def parse(self, tag):
'''
returns (id, path)
where: 'id' is the path tag's id attribute
'path' is a populated instance of SvgPath
>>> from lxml import etree
>>> from lxml.builder import E
>>> path_tag = etree.XML("""
... <path id="path0"
... style="fill:#0000ff;stroke:#000000;stroke-width:0.10000000000000001;stroke-miterlimit:4;stroke-dasharray:none"
... d="M 525.93385,261.47322 L 525.933 85,269.65826 L 534.07239,269.65826 L 534.07239,261.47322 L 525.93385,261.47322" />
... """)
>>> path_parser = PathParser()
>>> id, svg_path = path_parser.parse(path_tag)
>>> id
'path0'
>>> svg_path.color
(0, 0, 255)
>>> len(svg_path.loops)
1
>>> svg_path.loops[0].verts
[(534.07239, 261.47322), (534.07239, 269.65826), (525.933, 85), (525.93385, 261.47322)]
Note that only absolute commands (i.e., uppercase) are currently supported. For example:
paths will throw a ParseError exception. For example:
>>> path_tag = E.path(id="path0", d="M 636.0331,256.9345 l 636.0331,256.9345")
>>> print etree.tostring(path_tag)
<path d="M 636.0331,256.9345 l 636.0331,256.9345" id="path0"/>
>>> path_parser.parse(path_tag)
Traceback (most recent call last):
...
ParseError: unsupported svg path command: l
>>>
'''
id = self.get_id(tag.attrib)
parser = PathDataParser()
path_data = tag.attrib['d']
path_tuple = parser.to_tuples(path_data)
tracer = LoopTracer()
loops = tracer.to_loops(path_tuple)
path = ColoredPath(loops)
if 'style' in list(tag.attrib.keys()):
style_data = tag.attrib['style']
path.color = self.parse_style(style_data)
return id, path | [
"def",
"parse",
"(",
"self",
",",
"tag",
")",
":",
"id",
"=",
"self",
".",
"get_id",
"(",
"tag",
".",
"attrib",
")",
"parser",
"=",
"PathDataParser",
"(",
")",
"path_data",
"=",
"tag",
".",
"attrib",
"[",
"'d'",
"]",
"path_tuple",
"=",
"parser",
".",
"to_tuples",
"(",
"path_data",
")",
"tracer",
"=",
"LoopTracer",
"(",
")",
"loops",
"=",
"tracer",
".",
"to_loops",
"(",
"path_tuple",
")",
"path",
"=",
"ColoredPath",
"(",
"loops",
")",
"if",
"'style'",
"in",
"list",
"(",
"tag",
".",
"attrib",
".",
"keys",
"(",
")",
")",
":",
"style_data",
"=",
"tag",
".",
"attrib",
"[",
"'style'",
"]",
"path",
".",
"color",
"=",
"self",
".",
"parse_style",
"(",
"style_data",
")",
"return",
"id",
",",
"path"
] | returns (id, path)
where: 'id' is the path tag's id attribute
'path' is a populated instance of SvgPath
>>> from lxml import etree
>>> from lxml.builder import E
>>> path_tag = etree.XML("""
... <path id="path0"
... style="fill:#0000ff;stroke:#000000;stroke-width:0.10000000000000001;stroke-miterlimit:4;stroke-dasharray:none"
... d="M 525.93385,261.47322 L 525.933 85,269.65826 L 534.07239,269.65826 L 534.07239,261.47322 L 525.93385,261.47322" />
... """)
>>> path_parser = PathParser()
>>> id, svg_path = path_parser.parse(path_tag)
>>> id
'path0'
>>> svg_path.color
(0, 0, 255)
>>> len(svg_path.loops)
1
>>> svg_path.loops[0].verts
[(534.07239, 261.47322), (534.07239, 269.65826), (525.933, 85), (525.93385, 261.47322)]
Note that only absolute commands (i.e., uppercase) are currently supported. For example:
paths will throw a ParseError exception. For example:
>>> path_tag = E.path(id="path0", d="M 636.0331,256.9345 l 636.0331,256.9345")
>>> print etree.tostring(path_tag)
<path d="M 636.0331,256.9345 l 636.0331,256.9345" id="path0"/>
>>> path_parser.parse(path_tag)
Traceback (most recent call last):
...
ParseError: unsupported svg path command: l
>>> | [
"returns",
"(",
"id",
"path",
")",
"where",
":",
"id",
"is",
"the",
"path",
"tag",
"s",
"id",
"attribute",
"path",
"is",
"a",
"populated",
"instance",
"of",
"SvgPath"
] | train | https://github.com/sci-bots/svg-model/blob/2d119650f995e62b29ce0b3151a23f3b957cb072/svg_model/svgload/path_parser.py#L242-L292 |
davidhuser/dhis2.py | dhis2/api.py | Api.from_auth_file | def from_auth_file(cls, location=None, api_version=None, user_agent=None):
"""
Alternative constructor to load from JSON file.
If auth_file_path is not specified, it tries to find `dish.json` in:
- DHIS_HOME
- Home folder
:param location: authentication file path
:param api_version: see Api
:param user_agent: see Api
:return: Api instance
"""
location = search_auth_file() if not location else location
a = load_json(location)
try:
section = a['dhis']
baseurl = section['baseurl']
username = section['username']
password = section['password']
assert all([baseurl, username, password])
except (KeyError, AssertionError):
raise ClientException("Auth file found but not valid: {}".format(location))
else:
return cls(baseurl, username, password, api_version=api_version, user_agent=user_agent) | python | def from_auth_file(cls, location=None, api_version=None, user_agent=None):
"""
Alternative constructor to load from JSON file.
If auth_file_path is not specified, it tries to find `dish.json` in:
- DHIS_HOME
- Home folder
:param location: authentication file path
:param api_version: see Api
:param user_agent: see Api
:return: Api instance
"""
location = search_auth_file() if not location else location
a = load_json(location)
try:
section = a['dhis']
baseurl = section['baseurl']
username = section['username']
password = section['password']
assert all([baseurl, username, password])
except (KeyError, AssertionError):
raise ClientException("Auth file found but not valid: {}".format(location))
else:
return cls(baseurl, username, password, api_version=api_version, user_agent=user_agent) | [
"def",
"from_auth_file",
"(",
"cls",
",",
"location",
"=",
"None",
",",
"api_version",
"=",
"None",
",",
"user_agent",
"=",
"None",
")",
":",
"location",
"=",
"search_auth_file",
"(",
")",
"if",
"not",
"location",
"else",
"location",
"a",
"=",
"load_json",
"(",
"location",
")",
"try",
":",
"section",
"=",
"a",
"[",
"'dhis'",
"]",
"baseurl",
"=",
"section",
"[",
"'baseurl'",
"]",
"username",
"=",
"section",
"[",
"'username'",
"]",
"password",
"=",
"section",
"[",
"'password'",
"]",
"assert",
"all",
"(",
"[",
"baseurl",
",",
"username",
",",
"password",
"]",
")",
"except",
"(",
"KeyError",
",",
"AssertionError",
")",
":",
"raise",
"ClientException",
"(",
"\"Auth file found but not valid: {}\"",
".",
"format",
"(",
"location",
")",
")",
"else",
":",
"return",
"cls",
"(",
"baseurl",
",",
"username",
",",
"password",
",",
"api_version",
"=",
"api_version",
",",
"user_agent",
"=",
"user_agent",
")"
] | Alternative constructor to load from JSON file.
If auth_file_path is not specified, it tries to find `dish.json` in:
- DHIS_HOME
- Home folder
:param location: authentication file path
:param api_version: see Api
:param user_agent: see Api
:return: Api instance | [
"Alternative",
"constructor",
"to",
"load",
"from",
"JSON",
"file",
".",
"If",
"auth_file_path",
"is",
"not",
"specified",
"it",
"tries",
"to",
"find",
"dish",
".",
"json",
"in",
":",
"-",
"DHIS_HOME",
"-",
"Home",
"folder",
":",
"param",
"location",
":",
"authentication",
"file",
"path",
":",
"param",
"api_version",
":",
"see",
"Api",
":",
"param",
"user_agent",
":",
"see",
"Api",
":",
"return",
":",
"Api",
"instance"
] | train | https://github.com/davidhuser/dhis2.py/blob/78cbf1985506db21acdfa0f2e624bc397e455c82/dhis2/api.py#L139-L162 |
davidhuser/dhis2.py | dhis2/api.py | Api._validate_response | def _validate_response(response):
"""
Return response if ok, raise RequestException if not ok
:param response: requests.response object
:return: requests.response object
"""
try:
response.raise_for_status()
except requests.RequestException:
raise RequestException(
code=response.status_code,
url=response.url,
description=response.text)
else:
return response | python | def _validate_response(response):
"""
Return response if ok, raise RequestException if not ok
:param response: requests.response object
:return: requests.response object
"""
try:
response.raise_for_status()
except requests.RequestException:
raise RequestException(
code=response.status_code,
url=response.url,
description=response.text)
else:
return response | [
"def",
"_validate_response",
"(",
"response",
")",
":",
"try",
":",
"response",
".",
"raise_for_status",
"(",
")",
"except",
"requests",
".",
"RequestException",
":",
"raise",
"RequestException",
"(",
"code",
"=",
"response",
".",
"status_code",
",",
"url",
"=",
"response",
".",
"url",
",",
"description",
"=",
"response",
".",
"text",
")",
"else",
":",
"return",
"response"
] | Return response if ok, raise RequestException if not ok
:param response: requests.response object
:return: requests.response object | [
"Return",
"response",
"if",
"ok",
"raise",
"RequestException",
"if",
"not",
"ok",
":",
"param",
"response",
":",
"requests",
".",
"response",
"object",
":",
"return",
":",
"requests",
".",
"response",
"object"
] | train | https://github.com/davidhuser/dhis2.py/blob/78cbf1985506db21acdfa0f2e624bc397e455c82/dhis2/api.py#L165-L179 |
davidhuser/dhis2.py | dhis2/api.py | Api._validate_request | def _validate_request(endpoint, file_type='json', data=None, params=None):
"""
Validate request before calling API
:param endpoint: API endpoint
:param file_type: file type requested
:param data: payload
:param params: HTTP parameters
"""
if not isinstance(endpoint, string_types) or endpoint.strip() == '':
raise ClientException("Must submit `endpoint` for DHIS2 API")
if not isinstance(file_type, string_types) or file_type.lower() not in ('json', 'csv', 'xml', 'pdf', 'xlsx'):
raise ClientException("Invalid file_type: {}".format(file_type))
if params:
if not isinstance(params, (dict, list)):
raise ClientException("`params` must be a dict or list of tuples, not {}".format(params.__class__.__name__))
if isinstance(params, list) and not all([isinstance(elem, tuple) for elem in params]):
raise ClientException("`params` list must all be tuples")
if data and not isinstance(data, dict):
raise ClientException("`data` must be a dict, not {}".format(data.__class__.__name__)) | python | def _validate_request(endpoint, file_type='json', data=None, params=None):
"""
Validate request before calling API
:param endpoint: API endpoint
:param file_type: file type requested
:param data: payload
:param params: HTTP parameters
"""
if not isinstance(endpoint, string_types) or endpoint.strip() == '':
raise ClientException("Must submit `endpoint` for DHIS2 API")
if not isinstance(file_type, string_types) or file_type.lower() not in ('json', 'csv', 'xml', 'pdf', 'xlsx'):
raise ClientException("Invalid file_type: {}".format(file_type))
if params:
if not isinstance(params, (dict, list)):
raise ClientException("`params` must be a dict or list of tuples, not {}".format(params.__class__.__name__))
if isinstance(params, list) and not all([isinstance(elem, tuple) for elem in params]):
raise ClientException("`params` list must all be tuples")
if data and not isinstance(data, dict):
raise ClientException("`data` must be a dict, not {}".format(data.__class__.__name__)) | [
"def",
"_validate_request",
"(",
"endpoint",
",",
"file_type",
"=",
"'json'",
",",
"data",
"=",
"None",
",",
"params",
"=",
"None",
")",
":",
"if",
"not",
"isinstance",
"(",
"endpoint",
",",
"string_types",
")",
"or",
"endpoint",
".",
"strip",
"(",
")",
"==",
"''",
":",
"raise",
"ClientException",
"(",
"\"Must submit `endpoint` for DHIS2 API\"",
")",
"if",
"not",
"isinstance",
"(",
"file_type",
",",
"string_types",
")",
"or",
"file_type",
".",
"lower",
"(",
")",
"not",
"in",
"(",
"'json'",
",",
"'csv'",
",",
"'xml'",
",",
"'pdf'",
",",
"'xlsx'",
")",
":",
"raise",
"ClientException",
"(",
"\"Invalid file_type: {}\"",
".",
"format",
"(",
"file_type",
")",
")",
"if",
"params",
":",
"if",
"not",
"isinstance",
"(",
"params",
",",
"(",
"dict",
",",
"list",
")",
")",
":",
"raise",
"ClientException",
"(",
"\"`params` must be a dict or list of tuples, not {}\"",
".",
"format",
"(",
"params",
".",
"__class__",
".",
"__name__",
")",
")",
"if",
"isinstance",
"(",
"params",
",",
"list",
")",
"and",
"not",
"all",
"(",
"[",
"isinstance",
"(",
"elem",
",",
"tuple",
")",
"for",
"elem",
"in",
"params",
"]",
")",
":",
"raise",
"ClientException",
"(",
"\"`params` list must all be tuples\"",
")",
"if",
"data",
"and",
"not",
"isinstance",
"(",
"data",
",",
"dict",
")",
":",
"raise",
"ClientException",
"(",
"\"`data` must be a dict, not {}\"",
".",
"format",
"(",
"data",
".",
"__class__",
".",
"__name__",
")",
")"
] | Validate request before calling API
:param endpoint: API endpoint
:param file_type: file type requested
:param data: payload
:param params: HTTP parameters | [
"Validate",
"request",
"before",
"calling",
"API",
":",
"param",
"endpoint",
":",
"API",
"endpoint",
":",
"param",
"file_type",
":",
"file",
"type",
"requested",
":",
"param",
"data",
":",
"payload",
":",
"param",
"params",
":",
"HTTP",
"parameters"
] | train | https://github.com/davidhuser/dhis2.py/blob/78cbf1985506db21acdfa0f2e624bc397e455c82/dhis2/api.py#L182-L200 |
davidhuser/dhis2.py | dhis2/api.py | Api._make_request | def _make_request(self, method, endpoint, **kwargs):
"""
Do the actual request with supplied HTTP method
:param method: HTTP method
:param endpoint: DHIS2 API endpoint
:param kwargs: keyword args
:return: response if ok, RequestException if not
"""
if isinstance(kwargs.get('file_type'), string_types):
file_type = kwargs['file_type'].lower()
else:
file_type = 'json'
params = kwargs.get('params')
data = kwargs.get('data', kwargs.get('json', None))
url = '{}/{}'.format(self.api_url, endpoint)
self._validate_request(endpoint, file_type, data, params)
if method == 'get':
stream = kwargs.get('stream', False)
url = '{}.{}'.format(url, file_type)
r = self.session.get(url, params=params, stream=stream)
elif method == 'post':
r = self.session.post(url=url, json=data, params=params)
elif method == 'put':
r = self.session.put(url=url, json=data, params=params)
elif method == 'patch':
r = self.session.patch(url=url, json=data, params=params)
elif method == 'delete':
r = self.session.delete(url=url, params=params)
else:
raise ClientException("Non-supported HTTP method: {}".format(method))
return self._validate_response(r) | python | def _make_request(self, method, endpoint, **kwargs):
"""
Do the actual request with supplied HTTP method
:param method: HTTP method
:param endpoint: DHIS2 API endpoint
:param kwargs: keyword args
:return: response if ok, RequestException if not
"""
if isinstance(kwargs.get('file_type'), string_types):
file_type = kwargs['file_type'].lower()
else:
file_type = 'json'
params = kwargs.get('params')
data = kwargs.get('data', kwargs.get('json', None))
url = '{}/{}'.format(self.api_url, endpoint)
self._validate_request(endpoint, file_type, data, params)
if method == 'get':
stream = kwargs.get('stream', False)
url = '{}.{}'.format(url, file_type)
r = self.session.get(url, params=params, stream=stream)
elif method == 'post':
r = self.session.post(url=url, json=data, params=params)
elif method == 'put':
r = self.session.put(url=url, json=data, params=params)
elif method == 'patch':
r = self.session.patch(url=url, json=data, params=params)
elif method == 'delete':
r = self.session.delete(url=url, params=params)
else:
raise ClientException("Non-supported HTTP method: {}".format(method))
return self._validate_response(r) | [
"def",
"_make_request",
"(",
"self",
",",
"method",
",",
"endpoint",
",",
"*",
"*",
"kwargs",
")",
":",
"if",
"isinstance",
"(",
"kwargs",
".",
"get",
"(",
"'file_type'",
")",
",",
"string_types",
")",
":",
"file_type",
"=",
"kwargs",
"[",
"'file_type'",
"]",
".",
"lower",
"(",
")",
"else",
":",
"file_type",
"=",
"'json'",
"params",
"=",
"kwargs",
".",
"get",
"(",
"'params'",
")",
"data",
"=",
"kwargs",
".",
"get",
"(",
"'data'",
",",
"kwargs",
".",
"get",
"(",
"'json'",
",",
"None",
")",
")",
"url",
"=",
"'{}/{}'",
".",
"format",
"(",
"self",
".",
"api_url",
",",
"endpoint",
")",
"self",
".",
"_validate_request",
"(",
"endpoint",
",",
"file_type",
",",
"data",
",",
"params",
")",
"if",
"method",
"==",
"'get'",
":",
"stream",
"=",
"kwargs",
".",
"get",
"(",
"'stream'",
",",
"False",
")",
"url",
"=",
"'{}.{}'",
".",
"format",
"(",
"url",
",",
"file_type",
")",
"r",
"=",
"self",
".",
"session",
".",
"get",
"(",
"url",
",",
"params",
"=",
"params",
",",
"stream",
"=",
"stream",
")",
"elif",
"method",
"==",
"'post'",
":",
"r",
"=",
"self",
".",
"session",
".",
"post",
"(",
"url",
"=",
"url",
",",
"json",
"=",
"data",
",",
"params",
"=",
"params",
")",
"elif",
"method",
"==",
"'put'",
":",
"r",
"=",
"self",
".",
"session",
".",
"put",
"(",
"url",
"=",
"url",
",",
"json",
"=",
"data",
",",
"params",
"=",
"params",
")",
"elif",
"method",
"==",
"'patch'",
":",
"r",
"=",
"self",
".",
"session",
".",
"patch",
"(",
"url",
"=",
"url",
",",
"json",
"=",
"data",
",",
"params",
"=",
"params",
")",
"elif",
"method",
"==",
"'delete'",
":",
"r",
"=",
"self",
".",
"session",
".",
"delete",
"(",
"url",
"=",
"url",
",",
"params",
"=",
"params",
")",
"else",
":",
"raise",
"ClientException",
"(",
"\"Non-supported HTTP method: {}\"",
".",
"format",
"(",
"method",
")",
")",
"return",
"self",
".",
"_validate_response",
"(",
"r",
")"
] | Do the actual request with supplied HTTP method
:param method: HTTP method
:param endpoint: DHIS2 API endpoint
:param kwargs: keyword args
:return: response if ok, RequestException if not | [
"Do",
"the",
"actual",
"request",
"with",
"supplied",
"HTTP",
"method",
":",
"param",
"method",
":",
"HTTP",
"method",
":",
"param",
"endpoint",
":",
"DHIS2",
"API",
"endpoint",
":",
"param",
"kwargs",
":",
"keyword",
"args",
":",
"return",
":",
"response",
"if",
"ok",
"RequestException",
"if",
"not"
] | train | https://github.com/davidhuser/dhis2.py/blob/78cbf1985506db21acdfa0f2e624bc397e455c82/dhis2/api.py#L202-L240 |
davidhuser/dhis2.py | dhis2/api.py | Api.get | def get(self, endpoint, file_type='json', params=None, stream=False):
"""
GET from DHIS2
:param endpoint: DHIS2 API endpoint
:param file_type: DHIS2 API File Type (json, xml, csv), defaults to JSON
:param params: HTTP parameters
:param stream: use requests' stream parameter
:return: requests.Response object
"""
return self._make_request('get', endpoint, params=params, file_type=file_type, stream=stream) | python | def get(self, endpoint, file_type='json', params=None, stream=False):
"""
GET from DHIS2
:param endpoint: DHIS2 API endpoint
:param file_type: DHIS2 API File Type (json, xml, csv), defaults to JSON
:param params: HTTP parameters
:param stream: use requests' stream parameter
:return: requests.Response object
"""
return self._make_request('get', endpoint, params=params, file_type=file_type, stream=stream) | [
"def",
"get",
"(",
"self",
",",
"endpoint",
",",
"file_type",
"=",
"'json'",
",",
"params",
"=",
"None",
",",
"stream",
"=",
"False",
")",
":",
"return",
"self",
".",
"_make_request",
"(",
"'get'",
",",
"endpoint",
",",
"params",
"=",
"params",
",",
"file_type",
"=",
"file_type",
",",
"stream",
"=",
"stream",
")"
] | GET from DHIS2
:param endpoint: DHIS2 API endpoint
:param file_type: DHIS2 API File Type (json, xml, csv), defaults to JSON
:param params: HTTP parameters
:param stream: use requests' stream parameter
:return: requests.Response object | [
"GET",
"from",
"DHIS2",
":",
"param",
"endpoint",
":",
"DHIS2",
"API",
"endpoint",
":",
"param",
"file_type",
":",
"DHIS2",
"API",
"File",
"Type",
"(",
"json",
"xml",
"csv",
")",
"defaults",
"to",
"JSON",
":",
"param",
"params",
":",
"HTTP",
"parameters",
":",
"param",
"stream",
":",
"use",
"requests",
"stream",
"parameter",
":",
"return",
":",
"requests",
".",
"Response",
"object"
] | train | https://github.com/davidhuser/dhis2.py/blob/78cbf1985506db21acdfa0f2e624bc397e455c82/dhis2/api.py#L242-L251 |
davidhuser/dhis2.py | dhis2/api.py | Api.post | def post(self, endpoint, json=None, params=None, **kwargs):
"""POST to DHIS2
:param endpoint: DHIS2 API endpoint
:param json: HTTP payload
:param params: HTTP parameters
:return: requests.Response object
"""
json = kwargs['data'] if 'data' in kwargs else json
return self._make_request('post', endpoint, data=json, params=params) | python | def post(self, endpoint, json=None, params=None, **kwargs):
"""POST to DHIS2
:param endpoint: DHIS2 API endpoint
:param json: HTTP payload
:param params: HTTP parameters
:return: requests.Response object
"""
json = kwargs['data'] if 'data' in kwargs else json
return self._make_request('post', endpoint, data=json, params=params) | [
"def",
"post",
"(",
"self",
",",
"endpoint",
",",
"json",
"=",
"None",
",",
"params",
"=",
"None",
",",
"*",
"*",
"kwargs",
")",
":",
"json",
"=",
"kwargs",
"[",
"'data'",
"]",
"if",
"'data'",
"in",
"kwargs",
"else",
"json",
"return",
"self",
".",
"_make_request",
"(",
"'post'",
",",
"endpoint",
",",
"data",
"=",
"json",
",",
"params",
"=",
"params",
")"
] | POST to DHIS2
:param endpoint: DHIS2 API endpoint
:param json: HTTP payload
:param params: HTTP parameters
:return: requests.Response object | [
"POST",
"to",
"DHIS2",
":",
"param",
"endpoint",
":",
"DHIS2",
"API",
"endpoint",
":",
"param",
"json",
":",
"HTTP",
"payload",
":",
"param",
"params",
":",
"HTTP",
"parameters",
":",
"return",
":",
"requests",
".",
"Response",
"object"
] | train | https://github.com/davidhuser/dhis2.py/blob/78cbf1985506db21acdfa0f2e624bc397e455c82/dhis2/api.py#L253-L261 |
davidhuser/dhis2.py | dhis2/api.py | Api.put | def put(self, endpoint, json=None, params=None, **kwargs):
"""
PUT to DHIS2
:param endpoint: DHIS2 API endpoint
:param json: HTTP payload
:param params: HTTP parameters
:return: requests.Response object
"""
json = kwargs['data'] if 'data' in kwargs else json
return self._make_request('put', endpoint, data=json, params=params) | python | def put(self, endpoint, json=None, params=None, **kwargs):
"""
PUT to DHIS2
:param endpoint: DHIS2 API endpoint
:param json: HTTP payload
:param params: HTTP parameters
:return: requests.Response object
"""
json = kwargs['data'] if 'data' in kwargs else json
return self._make_request('put', endpoint, data=json, params=params) | [
"def",
"put",
"(",
"self",
",",
"endpoint",
",",
"json",
"=",
"None",
",",
"params",
"=",
"None",
",",
"*",
"*",
"kwargs",
")",
":",
"json",
"=",
"kwargs",
"[",
"'data'",
"]",
"if",
"'data'",
"in",
"kwargs",
"else",
"json",
"return",
"self",
".",
"_make_request",
"(",
"'put'",
",",
"endpoint",
",",
"data",
"=",
"json",
",",
"params",
"=",
"params",
")"
] | PUT to DHIS2
:param endpoint: DHIS2 API endpoint
:param json: HTTP payload
:param params: HTTP parameters
:return: requests.Response object | [
"PUT",
"to",
"DHIS2",
":",
"param",
"endpoint",
":",
"DHIS2",
"API",
"endpoint",
":",
"param",
"json",
":",
"HTTP",
"payload",
":",
"param",
"params",
":",
"HTTP",
"parameters",
":",
"return",
":",
"requests",
".",
"Response",
"object"
] | train | https://github.com/davidhuser/dhis2.py/blob/78cbf1985506db21acdfa0f2e624bc397e455c82/dhis2/api.py#L263-L272 |
davidhuser/dhis2.py | dhis2/api.py | Api.patch | def patch(self, endpoint, json=None, params=None, **kwargs):
"""
PATCH to DHIS2
:param endpoint: DHIS2 API endpoint
:param json: HTTP payload
:param params: HTTP parameters (dict)
:return: requests.Response object
"""
json = kwargs['data'] if 'data' in kwargs else json
return self._make_request('patch', endpoint, data=json, params=params) | python | def patch(self, endpoint, json=None, params=None, **kwargs):
"""
PATCH to DHIS2
:param endpoint: DHIS2 API endpoint
:param json: HTTP payload
:param params: HTTP parameters (dict)
:return: requests.Response object
"""
json = kwargs['data'] if 'data' in kwargs else json
return self._make_request('patch', endpoint, data=json, params=params) | [
"def",
"patch",
"(",
"self",
",",
"endpoint",
",",
"json",
"=",
"None",
",",
"params",
"=",
"None",
",",
"*",
"*",
"kwargs",
")",
":",
"json",
"=",
"kwargs",
"[",
"'data'",
"]",
"if",
"'data'",
"in",
"kwargs",
"else",
"json",
"return",
"self",
".",
"_make_request",
"(",
"'patch'",
",",
"endpoint",
",",
"data",
"=",
"json",
",",
"params",
"=",
"params",
")"
] | PATCH to DHIS2
:param endpoint: DHIS2 API endpoint
:param json: HTTP payload
:param params: HTTP parameters (dict)
:return: requests.Response object | [
"PATCH",
"to",
"DHIS2",
":",
"param",
"endpoint",
":",
"DHIS2",
"API",
"endpoint",
":",
"param",
"json",
":",
"HTTP",
"payload",
":",
"param",
"params",
":",
"HTTP",
"parameters",
"(",
"dict",
")",
":",
"return",
":",
"requests",
".",
"Response",
"object"
] | train | https://github.com/davidhuser/dhis2.py/blob/78cbf1985506db21acdfa0f2e624bc397e455c82/dhis2/api.py#L274-L283 |
davidhuser/dhis2.py | dhis2/api.py | Api.delete | def delete(self, endpoint, json=None, params=None, **kwargs):
"""
DELETE from DHIS2
:param endpoint: DHIS2 API endpoint
:param json: HTTP payload
:param params: HTTP parameters (dict)
:return: requests.Response object
"""
json = kwargs['data'] if 'data' in kwargs else json
return self._make_request('delete', endpoint, data=json, params=params) | python | def delete(self, endpoint, json=None, params=None, **kwargs):
"""
DELETE from DHIS2
:param endpoint: DHIS2 API endpoint
:param json: HTTP payload
:param params: HTTP parameters (dict)
:return: requests.Response object
"""
json = kwargs['data'] if 'data' in kwargs else json
return self._make_request('delete', endpoint, data=json, params=params) | [
"def",
"delete",
"(",
"self",
",",
"endpoint",
",",
"json",
"=",
"None",
",",
"params",
"=",
"None",
",",
"*",
"*",
"kwargs",
")",
":",
"json",
"=",
"kwargs",
"[",
"'data'",
"]",
"if",
"'data'",
"in",
"kwargs",
"else",
"json",
"return",
"self",
".",
"_make_request",
"(",
"'delete'",
",",
"endpoint",
",",
"data",
"=",
"json",
",",
"params",
"=",
"params",
")"
] | DELETE from DHIS2
:param endpoint: DHIS2 API endpoint
:param json: HTTP payload
:param params: HTTP parameters (dict)
:return: requests.Response object | [
"DELETE",
"from",
"DHIS2",
":",
"param",
"endpoint",
":",
"DHIS2",
"API",
"endpoint",
":",
"param",
"json",
":",
"HTTP",
"payload",
":",
"param",
"params",
":",
"HTTP",
"parameters",
"(",
"dict",
")",
":",
"return",
":",
"requests",
".",
"Response",
"object"
] | train | https://github.com/davidhuser/dhis2.py/blob/78cbf1985506db21acdfa0f2e624bc397e455c82/dhis2/api.py#L285-L294 |
davidhuser/dhis2.py | dhis2/api.py | Api.get_paged | def get_paged(self, endpoint, params=None, page_size=50, merge=False):
"""
GET with paging (for large payloads).
:param page_size: how many objects per page
:param endpoint: DHIS2 API endpoint
:param params: HTTP parameters (dict), defaults to None
:param merge: If true, return a list containing all pages instead of one page. Defaults to False.
:return: generator OR a normal DHIS2 response dict, e.g. {"organisationUnits": [...]}
"""
try:
if not isinstance(page_size, (string_types, int)) or int(page_size) < 1:
raise ValueError
except ValueError:
raise ClientException("page_size must be > 1")
params = {} if not params else params
if 'paging' in params:
raise ClientException("Can't set paging manually in `params` when using `get_paged`")
params['pageSize'] = page_size
params['page'] = 1
params['totalPages'] = True
collection = endpoint.split('/')[0] # only use e.g. events when submitting events/query as endpoint
def page_generator():
"""Yield pages"""
page = self.get(endpoint=endpoint, file_type='json', params=params).json()
page_count = page['pager']['pageCount']
yield page
while page['pager']['page'] < page_count:
params['page'] += 1
page = self.get(endpoint=endpoint, file_type='json', params=params).json()
yield page
if not merge:
return page_generator()
else:
data = []
for p in page_generator():
data.append(p[collection])
return {collection: list(chain.from_iterable(data))} | python | def get_paged(self, endpoint, params=None, page_size=50, merge=False):
"""
GET with paging (for large payloads).
:param page_size: how many objects per page
:param endpoint: DHIS2 API endpoint
:param params: HTTP parameters (dict), defaults to None
:param merge: If true, return a list containing all pages instead of one page. Defaults to False.
:return: generator OR a normal DHIS2 response dict, e.g. {"organisationUnits": [...]}
"""
try:
if not isinstance(page_size, (string_types, int)) or int(page_size) < 1:
raise ValueError
except ValueError:
raise ClientException("page_size must be > 1")
params = {} if not params else params
if 'paging' in params:
raise ClientException("Can't set paging manually in `params` when using `get_paged`")
params['pageSize'] = page_size
params['page'] = 1
params['totalPages'] = True
collection = endpoint.split('/')[0] # only use e.g. events when submitting events/query as endpoint
def page_generator():
"""Yield pages"""
page = self.get(endpoint=endpoint, file_type='json', params=params).json()
page_count = page['pager']['pageCount']
yield page
while page['pager']['page'] < page_count:
params['page'] += 1
page = self.get(endpoint=endpoint, file_type='json', params=params).json()
yield page
if not merge:
return page_generator()
else:
data = []
for p in page_generator():
data.append(p[collection])
return {collection: list(chain.from_iterable(data))} | [
"def",
"get_paged",
"(",
"self",
",",
"endpoint",
",",
"params",
"=",
"None",
",",
"page_size",
"=",
"50",
",",
"merge",
"=",
"False",
")",
":",
"try",
":",
"if",
"not",
"isinstance",
"(",
"page_size",
",",
"(",
"string_types",
",",
"int",
")",
")",
"or",
"int",
"(",
"page_size",
")",
"<",
"1",
":",
"raise",
"ValueError",
"except",
"ValueError",
":",
"raise",
"ClientException",
"(",
"\"page_size must be > 1\"",
")",
"params",
"=",
"{",
"}",
"if",
"not",
"params",
"else",
"params",
"if",
"'paging'",
"in",
"params",
":",
"raise",
"ClientException",
"(",
"\"Can't set paging manually in `params` when using `get_paged`\"",
")",
"params",
"[",
"'pageSize'",
"]",
"=",
"page_size",
"params",
"[",
"'page'",
"]",
"=",
"1",
"params",
"[",
"'totalPages'",
"]",
"=",
"True",
"collection",
"=",
"endpoint",
".",
"split",
"(",
"'/'",
")",
"[",
"0",
"]",
"# only use e.g. events when submitting events/query as endpoint",
"def",
"page_generator",
"(",
")",
":",
"\"\"\"Yield pages\"\"\"",
"page",
"=",
"self",
".",
"get",
"(",
"endpoint",
"=",
"endpoint",
",",
"file_type",
"=",
"'json'",
",",
"params",
"=",
"params",
")",
".",
"json",
"(",
")",
"page_count",
"=",
"page",
"[",
"'pager'",
"]",
"[",
"'pageCount'",
"]",
"yield",
"page",
"while",
"page",
"[",
"'pager'",
"]",
"[",
"'page'",
"]",
"<",
"page_count",
":",
"params",
"[",
"'page'",
"]",
"+=",
"1",
"page",
"=",
"self",
".",
"get",
"(",
"endpoint",
"=",
"endpoint",
",",
"file_type",
"=",
"'json'",
",",
"params",
"=",
"params",
")",
".",
"json",
"(",
")",
"yield",
"page",
"if",
"not",
"merge",
":",
"return",
"page_generator",
"(",
")",
"else",
":",
"data",
"=",
"[",
"]",
"for",
"p",
"in",
"page_generator",
"(",
")",
":",
"data",
".",
"append",
"(",
"p",
"[",
"collection",
"]",
")",
"return",
"{",
"collection",
":",
"list",
"(",
"chain",
".",
"from_iterable",
"(",
"data",
")",
")",
"}"
] | GET with paging (for large payloads).
:param page_size: how many objects per page
:param endpoint: DHIS2 API endpoint
:param params: HTTP parameters (dict), defaults to None
:param merge: If true, return a list containing all pages instead of one page. Defaults to False.
:return: generator OR a normal DHIS2 response dict, e.g. {"organisationUnits": [...]} | [
"GET",
"with",
"paging",
"(",
"for",
"large",
"payloads",
")",
".",
":",
"param",
"page_size",
":",
"how",
"many",
"objects",
"per",
"page",
":",
"param",
"endpoint",
":",
"DHIS2",
"API",
"endpoint",
":",
"param",
"params",
":",
"HTTP",
"parameters",
"(",
"dict",
")",
"defaults",
"to",
"None",
":",
"param",
"merge",
":",
"If",
"true",
"return",
"a",
"list",
"containing",
"all",
"pages",
"instead",
"of",
"one",
"page",
".",
"Defaults",
"to",
"False",
".",
":",
"return",
":",
"generator",
"OR",
"a",
"normal",
"DHIS2",
"response",
"dict",
"e",
".",
"g",
".",
"{",
"organisationUnits",
":",
"[",
"...",
"]",
"}"
] | train | https://github.com/davidhuser/dhis2.py/blob/78cbf1985506db21acdfa0f2e624bc397e455c82/dhis2/api.py#L296-L337 |
davidhuser/dhis2.py | dhis2/api.py | Api.get_sqlview | def get_sqlview(self, uid, execute=False, var=None, criteria=None, merge=False):
"""
GET SQL View data
:param uid: sqlView UID
:param execute: materialize sqlView before downloading its data
:param var: for QUERY types, a dict of variables to query the sqlView
:param criteria: for VIEW / MATERIALIZED_VIEW types, a dict of criteria to filter the sqlView
:param merge: If true, return a list containing all pages instead of one page. Defaults to False.
:return: a list OR generator where __next__ is a 'row' of the SQL View
"""
params = {}
sqlview_type = self.get('sqlViews/{}'.format(uid), params={'fields': 'type'}).json().get('type')
if sqlview_type == 'QUERY':
if not isinstance(var, dict):
raise ClientException("Use a dict to submit variables: e.g. var={'key1': 'value1', 'key2': 'value2'}")
var = ['{}:{}'.format(k, v) for k, v in var.items()]
params['var'] = var
if execute:
raise ClientException("SQL view of type QUERY, no view to create (no execute=True)")
else: # MATERIALIZED_VIEW / VIEW
if criteria:
if not isinstance(criteria, dict):
raise ClientException("Use a dict to submit criteria: { 'col1': 'value1', 'col2': 'value2' }")
criteria = ['{}:{}'.format(k, v) for k, v in criteria.items()]
params['criteria'] = criteria
if execute: # materialize
self.post('sqlViews/{}/execute'.format(uid))
def page_generator():
with closing(self.get('sqlViews/{}/data'.format(uid), file_type='csv', params=params, stream=True)) as r:
# do not need to use unicodecsv.DictReader as data comes in bytes already
reader = DictReader(codecs.iterdecode(r.iter_lines(), 'utf-8'), delimiter=',', quotechar='"')
for row in reader:
yield row
if not merge:
return page_generator()
else:
return list(page_generator()) | python | def get_sqlview(self, uid, execute=False, var=None, criteria=None, merge=False):
"""
GET SQL View data
:param uid: sqlView UID
:param execute: materialize sqlView before downloading its data
:param var: for QUERY types, a dict of variables to query the sqlView
:param criteria: for VIEW / MATERIALIZED_VIEW types, a dict of criteria to filter the sqlView
:param merge: If true, return a list containing all pages instead of one page. Defaults to False.
:return: a list OR generator where __next__ is a 'row' of the SQL View
"""
params = {}
sqlview_type = self.get('sqlViews/{}'.format(uid), params={'fields': 'type'}).json().get('type')
if sqlview_type == 'QUERY':
if not isinstance(var, dict):
raise ClientException("Use a dict to submit variables: e.g. var={'key1': 'value1', 'key2': 'value2'}")
var = ['{}:{}'.format(k, v) for k, v in var.items()]
params['var'] = var
if execute:
raise ClientException("SQL view of type QUERY, no view to create (no execute=True)")
else: # MATERIALIZED_VIEW / VIEW
if criteria:
if not isinstance(criteria, dict):
raise ClientException("Use a dict to submit criteria: { 'col1': 'value1', 'col2': 'value2' }")
criteria = ['{}:{}'.format(k, v) for k, v in criteria.items()]
params['criteria'] = criteria
if execute: # materialize
self.post('sqlViews/{}/execute'.format(uid))
def page_generator():
with closing(self.get('sqlViews/{}/data'.format(uid), file_type='csv', params=params, stream=True)) as r:
# do not need to use unicodecsv.DictReader as data comes in bytes already
reader = DictReader(codecs.iterdecode(r.iter_lines(), 'utf-8'), delimiter=',', quotechar='"')
for row in reader:
yield row
if not merge:
return page_generator()
else:
return list(page_generator()) | [
"def",
"get_sqlview",
"(",
"self",
",",
"uid",
",",
"execute",
"=",
"False",
",",
"var",
"=",
"None",
",",
"criteria",
"=",
"None",
",",
"merge",
"=",
"False",
")",
":",
"params",
"=",
"{",
"}",
"sqlview_type",
"=",
"self",
".",
"get",
"(",
"'sqlViews/{}'",
".",
"format",
"(",
"uid",
")",
",",
"params",
"=",
"{",
"'fields'",
":",
"'type'",
"}",
")",
".",
"json",
"(",
")",
".",
"get",
"(",
"'type'",
")",
"if",
"sqlview_type",
"==",
"'QUERY'",
":",
"if",
"not",
"isinstance",
"(",
"var",
",",
"dict",
")",
":",
"raise",
"ClientException",
"(",
"\"Use a dict to submit variables: e.g. var={'key1': 'value1', 'key2': 'value2'}\"",
")",
"var",
"=",
"[",
"'{}:{}'",
".",
"format",
"(",
"k",
",",
"v",
")",
"for",
"k",
",",
"v",
"in",
"var",
".",
"items",
"(",
")",
"]",
"params",
"[",
"'var'",
"]",
"=",
"var",
"if",
"execute",
":",
"raise",
"ClientException",
"(",
"\"SQL view of type QUERY, no view to create (no execute=True)\"",
")",
"else",
":",
"# MATERIALIZED_VIEW / VIEW",
"if",
"criteria",
":",
"if",
"not",
"isinstance",
"(",
"criteria",
",",
"dict",
")",
":",
"raise",
"ClientException",
"(",
"\"Use a dict to submit criteria: { 'col1': 'value1', 'col2': 'value2' }\"",
")",
"criteria",
"=",
"[",
"'{}:{}'",
".",
"format",
"(",
"k",
",",
"v",
")",
"for",
"k",
",",
"v",
"in",
"criteria",
".",
"items",
"(",
")",
"]",
"params",
"[",
"'criteria'",
"]",
"=",
"criteria",
"if",
"execute",
":",
"# materialize",
"self",
".",
"post",
"(",
"'sqlViews/{}/execute'",
".",
"format",
"(",
"uid",
")",
")",
"def",
"page_generator",
"(",
")",
":",
"with",
"closing",
"(",
"self",
".",
"get",
"(",
"'sqlViews/{}/data'",
".",
"format",
"(",
"uid",
")",
",",
"file_type",
"=",
"'csv'",
",",
"params",
"=",
"params",
",",
"stream",
"=",
"True",
")",
")",
"as",
"r",
":",
"# do not need to use unicodecsv.DictReader as data comes in bytes already",
"reader",
"=",
"DictReader",
"(",
"codecs",
".",
"iterdecode",
"(",
"r",
".",
"iter_lines",
"(",
")",
",",
"'utf-8'",
")",
",",
"delimiter",
"=",
"','",
",",
"quotechar",
"=",
"'\"'",
")",
"for",
"row",
"in",
"reader",
":",
"yield",
"row",
"if",
"not",
"merge",
":",
"return",
"page_generator",
"(",
")",
"else",
":",
"return",
"list",
"(",
"page_generator",
"(",
")",
")"
] | GET SQL View data
:param uid: sqlView UID
:param execute: materialize sqlView before downloading its data
:param var: for QUERY types, a dict of variables to query the sqlView
:param criteria: for VIEW / MATERIALIZED_VIEW types, a dict of criteria to filter the sqlView
:param merge: If true, return a list containing all pages instead of one page. Defaults to False.
:return: a list OR generator where __next__ is a 'row' of the SQL View | [
"GET",
"SQL",
"View",
"data",
":",
"param",
"uid",
":",
"sqlView",
"UID",
":",
"param",
"execute",
":",
"materialize",
"sqlView",
"before",
"downloading",
"its",
"data",
":",
"param",
"var",
":",
"for",
"QUERY",
"types",
"a",
"dict",
"of",
"variables",
"to",
"query",
"the",
"sqlView",
":",
"param",
"criteria",
":",
"for",
"VIEW",
"/",
"MATERIALIZED_VIEW",
"types",
"a",
"dict",
"of",
"criteria",
"to",
"filter",
"the",
"sqlView",
":",
"param",
"merge",
":",
"If",
"true",
"return",
"a",
"list",
"containing",
"all",
"pages",
"instead",
"of",
"one",
"page",
".",
"Defaults",
"to",
"False",
".",
":",
"return",
":",
"a",
"list",
"OR",
"generator",
"where",
"__next__",
"is",
"a",
"row",
"of",
"the",
"SQL",
"View"
] | train | https://github.com/davidhuser/dhis2.py/blob/78cbf1985506db21acdfa0f2e624bc397e455c82/dhis2/api.py#L339-L379 |
davidhuser/dhis2.py | dhis2/api.py | Api.post_partitioned | def post_partitioned(self, endpoint, json, params=None, thresh=1000):
"""
Post a payload in chunks to prevent 'Request Entity Too Large' Timeout errors
:param endpoint: the API endpoint to use
:param json: payload dict
:param params: request parameters
:param thresh: the maximum amount to partition into
:return: generator where __next__ is a requests.Response object
"""
if not isinstance(json, dict):
raise ClientException('Parameter `json` must be a dict')
if not isinstance(thresh, int) or thresh < 2:
raise ClientException("`thresh` must be integer of 2 or larger")
try:
key = next(iter(json)) # the (only) key in the payload
except StopIteration:
raise ClientException("`json` is empty")
else:
if len(json.keys()) != 1:
raise ClientException('Must submit exactly one key in payload - e.g. json={"dataElements": [...]"}')
if not json.get(key):
raise ClientException("payload for key '{}' is empty".format(key))
else:
for data in partition_payload(data=json, key=key, thresh=thresh):
yield self.post(endpoint, json=data, params=params) | python | def post_partitioned(self, endpoint, json, params=None, thresh=1000):
"""
Post a payload in chunks to prevent 'Request Entity Too Large' Timeout errors
:param endpoint: the API endpoint to use
:param json: payload dict
:param params: request parameters
:param thresh: the maximum amount to partition into
:return: generator where __next__ is a requests.Response object
"""
if not isinstance(json, dict):
raise ClientException('Parameter `json` must be a dict')
if not isinstance(thresh, int) or thresh < 2:
raise ClientException("`thresh` must be integer of 2 or larger")
try:
key = next(iter(json)) # the (only) key in the payload
except StopIteration:
raise ClientException("`json` is empty")
else:
if len(json.keys()) != 1:
raise ClientException('Must submit exactly one key in payload - e.g. json={"dataElements": [...]"}')
if not json.get(key):
raise ClientException("payload for key '{}' is empty".format(key))
else:
for data in partition_payload(data=json, key=key, thresh=thresh):
yield self.post(endpoint, json=data, params=params) | [
"def",
"post_partitioned",
"(",
"self",
",",
"endpoint",
",",
"json",
",",
"params",
"=",
"None",
",",
"thresh",
"=",
"1000",
")",
":",
"if",
"not",
"isinstance",
"(",
"json",
",",
"dict",
")",
":",
"raise",
"ClientException",
"(",
"'Parameter `json` must be a dict'",
")",
"if",
"not",
"isinstance",
"(",
"thresh",
",",
"int",
")",
"or",
"thresh",
"<",
"2",
":",
"raise",
"ClientException",
"(",
"\"`thresh` must be integer of 2 or larger\"",
")",
"try",
":",
"key",
"=",
"next",
"(",
"iter",
"(",
"json",
")",
")",
"# the (only) key in the payload",
"except",
"StopIteration",
":",
"raise",
"ClientException",
"(",
"\"`json` is empty\"",
")",
"else",
":",
"if",
"len",
"(",
"json",
".",
"keys",
"(",
")",
")",
"!=",
"1",
":",
"raise",
"ClientException",
"(",
"'Must submit exactly one key in payload - e.g. json={\"dataElements\": [...]\"}'",
")",
"if",
"not",
"json",
".",
"get",
"(",
"key",
")",
":",
"raise",
"ClientException",
"(",
"\"payload for key '{}' is empty\"",
".",
"format",
"(",
"key",
")",
")",
"else",
":",
"for",
"data",
"in",
"partition_payload",
"(",
"data",
"=",
"json",
",",
"key",
"=",
"key",
",",
"thresh",
"=",
"thresh",
")",
":",
"yield",
"self",
".",
"post",
"(",
"endpoint",
",",
"json",
"=",
"data",
",",
"params",
"=",
"params",
")"
] | Post a payload in chunks to prevent 'Request Entity Too Large' Timeout errors
:param endpoint: the API endpoint to use
:param json: payload dict
:param params: request parameters
:param thresh: the maximum amount to partition into
:return: generator where __next__ is a requests.Response object | [
"Post",
"a",
"payload",
"in",
"chunks",
"to",
"prevent",
"Request",
"Entity",
"Too",
"Large",
"Timeout",
"errors",
":",
"param",
"endpoint",
":",
"the",
"API",
"endpoint",
"to",
"use",
":",
"param",
"json",
":",
"payload",
"dict",
":",
"param",
"params",
":",
"request",
"parameters",
":",
"param",
"thresh",
":",
"the",
"maximum",
"amount",
"to",
"partition",
"into",
":",
"return",
":",
"generator",
"where",
"__next__",
"is",
"a",
"requests",
".",
"Response",
"object"
] | train | https://github.com/davidhuser/dhis2.py/blob/78cbf1985506db21acdfa0f2e624bc397e455c82/dhis2/api.py#L381-L407 |
sci-bots/svg-model | svg_model/shapes_canvas.py | get_transform | def get_transform(offset, scale):
'''
Parameters
----------
offset : pandas.Series
Cartesian ``(x, y)`` coordinate of offset origin.
scale : pandas.Series
Scaling factor for ``x`` and ``y`` dimensions.
Returns
-------
pandas.DataFrame
3x3 transformation matrix resulting in specified `x/y` offset and
scale. **Note that third row label is ``w`` and not ``z``).**
'''
return pd.DataFrame([[scale, 0, offset.x], [0, scale, offset.y],
[0, 0, 1]], index=['x', 'y', 'w']) | python | def get_transform(offset, scale):
'''
Parameters
----------
offset : pandas.Series
Cartesian ``(x, y)`` coordinate of offset origin.
scale : pandas.Series
Scaling factor for ``x`` and ``y`` dimensions.
Returns
-------
pandas.DataFrame
3x3 transformation matrix resulting in specified `x/y` offset and
scale. **Note that third row label is ``w`` and not ``z``).**
'''
return pd.DataFrame([[scale, 0, offset.x], [0, scale, offset.y],
[0, 0, 1]], index=['x', 'y', 'w']) | [
"def",
"get_transform",
"(",
"offset",
",",
"scale",
")",
":",
"return",
"pd",
".",
"DataFrame",
"(",
"[",
"[",
"scale",
",",
"0",
",",
"offset",
".",
"x",
"]",
",",
"[",
"0",
",",
"scale",
",",
"offset",
".",
"y",
"]",
",",
"[",
"0",
",",
"0",
",",
"1",
"]",
"]",
",",
"index",
"=",
"[",
"'x'",
",",
"'y'",
",",
"'w'",
"]",
")"
] | Parameters
----------
offset : pandas.Series
Cartesian ``(x, y)`` coordinate of offset origin.
scale : pandas.Series
Scaling factor for ``x`` and ``y`` dimensions.
Returns
-------
pandas.DataFrame
3x3 transformation matrix resulting in specified `x/y` offset and
scale. **Note that third row label is ``w`` and not ``z``).** | [
"Parameters",
"----------",
"offset",
":",
"pandas",
".",
"Series",
"Cartesian",
"(",
"x",
"y",
")",
"coordinate",
"of",
"offset",
"origin",
".",
"scale",
":",
"pandas",
".",
"Series",
"Scaling",
"factor",
"for",
"x",
"and",
"y",
"dimensions",
"."
] | train | https://github.com/sci-bots/svg-model/blob/2d119650f995e62b29ce0b3151a23f3b957cb072/svg_model/shapes_canvas.py#L16-L32 |
sci-bots/svg-model | svg_model/shapes_canvas.py | ShapesCanvas.find_shape | def find_shape(self, canvas_x, canvas_y):
'''
Look up shape based on canvas coordinates.
'''
shape_x, shape_y, w = self.canvas_to_shapes_transform.dot([canvas_x,
canvas_y,
1])
if hasattr(self.space, 'point_query_first'):
# Assume `pymunk<5.0`.
shape = self.space.point_query_first((shape_x, shape_y))
else:
# Assume `pymunk>=5.0`, where `point_query_first` method has been
# deprecated.
info = self.space.point_query_nearest((shape_x, shape_y), 0,
[pymunk.ShapeFilter
.ALL_CATEGORIES])
shape = info.shape if info else None
if shape:
return self.bodies[shape.body]
return None | python | def find_shape(self, canvas_x, canvas_y):
'''
Look up shape based on canvas coordinates.
'''
shape_x, shape_y, w = self.canvas_to_shapes_transform.dot([canvas_x,
canvas_y,
1])
if hasattr(self.space, 'point_query_first'):
# Assume `pymunk<5.0`.
shape = self.space.point_query_first((shape_x, shape_y))
else:
# Assume `pymunk>=5.0`, where `point_query_first` method has been
# deprecated.
info = self.space.point_query_nearest((shape_x, shape_y), 0,
[pymunk.ShapeFilter
.ALL_CATEGORIES])
shape = info.shape if info else None
if shape:
return self.bodies[shape.body]
return None | [
"def",
"find_shape",
"(",
"self",
",",
"canvas_x",
",",
"canvas_y",
")",
":",
"shape_x",
",",
"shape_y",
",",
"w",
"=",
"self",
".",
"canvas_to_shapes_transform",
".",
"dot",
"(",
"[",
"canvas_x",
",",
"canvas_y",
",",
"1",
"]",
")",
"if",
"hasattr",
"(",
"self",
".",
"space",
",",
"'point_query_first'",
")",
":",
"# Assume `pymunk<5.0`.",
"shape",
"=",
"self",
".",
"space",
".",
"point_query_first",
"(",
"(",
"shape_x",
",",
"shape_y",
")",
")",
"else",
":",
"# Assume `pymunk>=5.0`, where `point_query_first` method has been",
"# deprecated.",
"info",
"=",
"self",
".",
"space",
".",
"point_query_nearest",
"(",
"(",
"shape_x",
",",
"shape_y",
")",
",",
"0",
",",
"[",
"pymunk",
".",
"ShapeFilter",
".",
"ALL_CATEGORIES",
"]",
")",
"shape",
"=",
"info",
".",
"shape",
"if",
"info",
"else",
"None",
"if",
"shape",
":",
"return",
"self",
".",
"bodies",
"[",
"shape",
".",
"body",
"]",
"return",
"None"
] | Look up shape based on canvas coordinates. | [
"Look",
"up",
"shape",
"based",
"on",
"canvas",
"coordinates",
"."
] | train | https://github.com/sci-bots/svg-model/blob/2d119650f995e62b29ce0b3151a23f3b957cb072/svg_model/shapes_canvas.py#L128-L148 |
sci-bots/svg-model | svg_model/data_frame.py | get_shape_areas | def get_shape_areas(df_shapes, shape_i_columns, signed=False):
'''
Return a `pandas.Series` indexed by `shape_i_columns` (i.e., each entry
corresponds to a single shape/polygon), containing the following columns
the area of each shape.
If `signed=True`, a positive area value corresponds to a clockwise loop,
whereas a negative area value corresponds to a counter-clockwise loop.
'''
# Make a copy of the SVG data frame since we need to add columns to it.
df_i = df_shapes.copy()
df_i['vertex_count'] = (df_i.groupby(shape_i_columns)['x']
.transform('count'))
df_i['area_a'] = df_i.x
df_i['area_b'] = df_i.y
# Vector form of [Shoelace formula][1].
#
# [1]: http://en.wikipedia.org/wiki/Shoelace_formula
df_i.loc[df_i.vertex_i == df_i.vertex_count - 1, 'area_a'] *= df_i.loc[df_i.vertex_i == 0, 'y'].values
df_i.loc[df_i.vertex_i < df_i.vertex_count - 1, 'area_a'] *= df_i.loc[df_i.vertex_i > 0, 'y'].values
df_i.loc[df_i.vertex_i == df_i.vertex_count - 1, 'area_b'] *= df_i.loc[df_i.vertex_i == 0, 'x'].values
df_i.loc[df_i.vertex_i < df_i.vertex_count - 1, 'area_b'] *= df_i.loc[df_i.vertex_i > 0, 'x'].values
area_components = df_i.groupby(shape_i_columns)[['area_a', 'area_b']].sum()
shape_areas = .5 * (area_components['area_b'] - area_components['area_a'])
if not signed:
shape_areas.name = 'area'
return shape_areas.abs()
else:
shape_areas.name = 'signed_area'
return shape_areas | python | def get_shape_areas(df_shapes, shape_i_columns, signed=False):
'''
Return a `pandas.Series` indexed by `shape_i_columns` (i.e., each entry
corresponds to a single shape/polygon), containing the following columns
the area of each shape.
If `signed=True`, a positive area value corresponds to a clockwise loop,
whereas a negative area value corresponds to a counter-clockwise loop.
'''
# Make a copy of the SVG data frame since we need to add columns to it.
df_i = df_shapes.copy()
df_i['vertex_count'] = (df_i.groupby(shape_i_columns)['x']
.transform('count'))
df_i['area_a'] = df_i.x
df_i['area_b'] = df_i.y
# Vector form of [Shoelace formula][1].
#
# [1]: http://en.wikipedia.org/wiki/Shoelace_formula
df_i.loc[df_i.vertex_i == df_i.vertex_count - 1, 'area_a'] *= df_i.loc[df_i.vertex_i == 0, 'y'].values
df_i.loc[df_i.vertex_i < df_i.vertex_count - 1, 'area_a'] *= df_i.loc[df_i.vertex_i > 0, 'y'].values
df_i.loc[df_i.vertex_i == df_i.vertex_count - 1, 'area_b'] *= df_i.loc[df_i.vertex_i == 0, 'x'].values
df_i.loc[df_i.vertex_i < df_i.vertex_count - 1, 'area_b'] *= df_i.loc[df_i.vertex_i > 0, 'x'].values
area_components = df_i.groupby(shape_i_columns)[['area_a', 'area_b']].sum()
shape_areas = .5 * (area_components['area_b'] - area_components['area_a'])
if not signed:
shape_areas.name = 'area'
return shape_areas.abs()
else:
shape_areas.name = 'signed_area'
return shape_areas | [
"def",
"get_shape_areas",
"(",
"df_shapes",
",",
"shape_i_columns",
",",
"signed",
"=",
"False",
")",
":",
"# Make a copy of the SVG data frame since we need to add columns to it.",
"df_i",
"=",
"df_shapes",
".",
"copy",
"(",
")",
"df_i",
"[",
"'vertex_count'",
"]",
"=",
"(",
"df_i",
".",
"groupby",
"(",
"shape_i_columns",
")",
"[",
"'x'",
"]",
".",
"transform",
"(",
"'count'",
")",
")",
"df_i",
"[",
"'area_a'",
"]",
"=",
"df_i",
".",
"x",
"df_i",
"[",
"'area_b'",
"]",
"=",
"df_i",
".",
"y",
"# Vector form of [Shoelace formula][1].",
"#",
"# [1]: http://en.wikipedia.org/wiki/Shoelace_formula",
"df_i",
".",
"loc",
"[",
"df_i",
".",
"vertex_i",
"==",
"df_i",
".",
"vertex_count",
"-",
"1",
",",
"'area_a'",
"]",
"*=",
"df_i",
".",
"loc",
"[",
"df_i",
".",
"vertex_i",
"==",
"0",
",",
"'y'",
"]",
".",
"values",
"df_i",
".",
"loc",
"[",
"df_i",
".",
"vertex_i",
"<",
"df_i",
".",
"vertex_count",
"-",
"1",
",",
"'area_a'",
"]",
"*=",
"df_i",
".",
"loc",
"[",
"df_i",
".",
"vertex_i",
">",
"0",
",",
"'y'",
"]",
".",
"values",
"df_i",
".",
"loc",
"[",
"df_i",
".",
"vertex_i",
"==",
"df_i",
".",
"vertex_count",
"-",
"1",
",",
"'area_b'",
"]",
"*=",
"df_i",
".",
"loc",
"[",
"df_i",
".",
"vertex_i",
"==",
"0",
",",
"'x'",
"]",
".",
"values",
"df_i",
".",
"loc",
"[",
"df_i",
".",
"vertex_i",
"<",
"df_i",
".",
"vertex_count",
"-",
"1",
",",
"'area_b'",
"]",
"*=",
"df_i",
".",
"loc",
"[",
"df_i",
".",
"vertex_i",
">",
"0",
",",
"'x'",
"]",
".",
"values",
"area_components",
"=",
"df_i",
".",
"groupby",
"(",
"shape_i_columns",
")",
"[",
"[",
"'area_a'",
",",
"'area_b'",
"]",
"]",
".",
"sum",
"(",
")",
"shape_areas",
"=",
".5",
"*",
"(",
"area_components",
"[",
"'area_b'",
"]",
"-",
"area_components",
"[",
"'area_a'",
"]",
")",
"if",
"not",
"signed",
":",
"shape_areas",
".",
"name",
"=",
"'area'",
"return",
"shape_areas",
".",
"abs",
"(",
")",
"else",
":",
"shape_areas",
".",
"name",
"=",
"'signed_area'",
"return",
"shape_areas"
] | Return a `pandas.Series` indexed by `shape_i_columns` (i.e., each entry
corresponds to a single shape/polygon), containing the following columns
the area of each shape.
If `signed=True`, a positive area value corresponds to a clockwise loop,
whereas a negative area value corresponds to a counter-clockwise loop. | [
"Return",
"a",
"pandas",
".",
"Series",
"indexed",
"by",
"shape_i_columns",
"(",
"i",
".",
"e",
".",
"each",
"entry",
"corresponds",
"to",
"a",
"single",
"shape",
"/",
"polygon",
")",
"containing",
"the",
"following",
"columns",
"the",
"area",
"of",
"each",
"shape",
"."
] | train | https://github.com/sci-bots/svg-model/blob/2d119650f995e62b29ce0b3151a23f3b957cb072/svg_model/data_frame.py#L11-L44 |
sci-bots/svg-model | svg_model/data_frame.py | get_bounding_boxes | def get_bounding_boxes(df_shapes, shape_i_columns):
'''
Return a `pandas.DataFrame` indexed by `shape_i_columns` (i.e., each row
corresponds to a single shape/polygon), containing the following columns:
- `width`: The width of the widest part of the shape.
- `height`: The height of the tallest part of the shape.
'''
xy_groups = df_shapes.groupby(shape_i_columns)[['x', 'y']]
xy_min = xy_groups.agg('min')
xy_max = xy_groups.agg('max')
shapes = (xy_max - xy_min).rename(columns={'x': 'width', 'y': 'height'})
return xy_min.join(shapes) | python | def get_bounding_boxes(df_shapes, shape_i_columns):
'''
Return a `pandas.DataFrame` indexed by `shape_i_columns` (i.e., each row
corresponds to a single shape/polygon), containing the following columns:
- `width`: The width of the widest part of the shape.
- `height`: The height of the tallest part of the shape.
'''
xy_groups = df_shapes.groupby(shape_i_columns)[['x', 'y']]
xy_min = xy_groups.agg('min')
xy_max = xy_groups.agg('max')
shapes = (xy_max - xy_min).rename(columns={'x': 'width', 'y': 'height'})
return xy_min.join(shapes) | [
"def",
"get_bounding_boxes",
"(",
"df_shapes",
",",
"shape_i_columns",
")",
":",
"xy_groups",
"=",
"df_shapes",
".",
"groupby",
"(",
"shape_i_columns",
")",
"[",
"[",
"'x'",
",",
"'y'",
"]",
"]",
"xy_min",
"=",
"xy_groups",
".",
"agg",
"(",
"'min'",
")",
"xy_max",
"=",
"xy_groups",
".",
"agg",
"(",
"'max'",
")",
"shapes",
"=",
"(",
"xy_max",
"-",
"xy_min",
")",
".",
"rename",
"(",
"columns",
"=",
"{",
"'x'",
":",
"'width'",
",",
"'y'",
":",
"'height'",
"}",
")",
"return",
"xy_min",
".",
"join",
"(",
"shapes",
")"
] | Return a `pandas.DataFrame` indexed by `shape_i_columns` (i.e., each row
corresponds to a single shape/polygon), containing the following columns:
- `width`: The width of the widest part of the shape.
- `height`: The height of the tallest part of the shape. | [
"Return",
"a",
"pandas",
".",
"DataFrame",
"indexed",
"by",
"shape_i_columns",
"(",
"i",
".",
"e",
".",
"each",
"row",
"corresponds",
"to",
"a",
"single",
"shape",
"/",
"polygon",
")",
"containing",
"the",
"following",
"columns",
":"
] | train | https://github.com/sci-bots/svg-model/blob/2d119650f995e62b29ce0b3151a23f3b957cb072/svg_model/data_frame.py#L47-L60 |
sci-bots/svg-model | svg_model/data_frame.py | get_shape_infos | def get_shape_infos(df_shapes, shape_i_columns):
'''
Return a `pandas.DataFrame` indexed by `shape_i_columns` (i.e., each row
corresponds to a single shape/polygon), containing the following columns:
- `area`: The area of the shape.
- `width`: The width of the widest part of the shape.
- `height`: The height of the tallest part of the shape.
'''
shape_areas = get_shape_areas(df_shapes, shape_i_columns)
bboxes = get_bounding_boxes(df_shapes, shape_i_columns)
return bboxes.join(pd.DataFrame(shape_areas)) | python | def get_shape_infos(df_shapes, shape_i_columns):
'''
Return a `pandas.DataFrame` indexed by `shape_i_columns` (i.e., each row
corresponds to a single shape/polygon), containing the following columns:
- `area`: The area of the shape.
- `width`: The width of the widest part of the shape.
- `height`: The height of the tallest part of the shape.
'''
shape_areas = get_shape_areas(df_shapes, shape_i_columns)
bboxes = get_bounding_boxes(df_shapes, shape_i_columns)
return bboxes.join(pd.DataFrame(shape_areas)) | [
"def",
"get_shape_infos",
"(",
"df_shapes",
",",
"shape_i_columns",
")",
":",
"shape_areas",
"=",
"get_shape_areas",
"(",
"df_shapes",
",",
"shape_i_columns",
")",
"bboxes",
"=",
"get_bounding_boxes",
"(",
"df_shapes",
",",
"shape_i_columns",
")",
"return",
"bboxes",
".",
"join",
"(",
"pd",
".",
"DataFrame",
"(",
"shape_areas",
")",
")"
] | Return a `pandas.DataFrame` indexed by `shape_i_columns` (i.e., each row
corresponds to a single shape/polygon), containing the following columns:
- `area`: The area of the shape.
- `width`: The width of the widest part of the shape.
- `height`: The height of the tallest part of the shape. | [
"Return",
"a",
"pandas",
".",
"DataFrame",
"indexed",
"by",
"shape_i_columns",
"(",
"i",
".",
"e",
".",
"each",
"row",
"corresponds",
"to",
"a",
"single",
"shape",
"/",
"polygon",
")",
"containing",
"the",
"following",
"columns",
":"
] | train | https://github.com/sci-bots/svg-model/blob/2d119650f995e62b29ce0b3151a23f3b957cb072/svg_model/data_frame.py#L63-L74 |
sci-bots/svg-model | svg_model/data_frame.py | get_bounding_box | def get_bounding_box(df_points):
'''
Calculate the bounding box of all points in a data frame.
'''
xy_min = df_points[['x', 'y']].min()
xy_max = df_points[['x', 'y']].max()
wh = xy_max - xy_min
wh.index = 'width', 'height'
bbox = pd.concat([xy_min, wh])
bbox.name = 'bounding_box'
return bbox | python | def get_bounding_box(df_points):
'''
Calculate the bounding box of all points in a data frame.
'''
xy_min = df_points[['x', 'y']].min()
xy_max = df_points[['x', 'y']].max()
wh = xy_max - xy_min
wh.index = 'width', 'height'
bbox = pd.concat([xy_min, wh])
bbox.name = 'bounding_box'
return bbox | [
"def",
"get_bounding_box",
"(",
"df_points",
")",
":",
"xy_min",
"=",
"df_points",
"[",
"[",
"'x'",
",",
"'y'",
"]",
"]",
".",
"min",
"(",
")",
"xy_max",
"=",
"df_points",
"[",
"[",
"'x'",
",",
"'y'",
"]",
"]",
".",
"max",
"(",
")",
"wh",
"=",
"xy_max",
"-",
"xy_min",
"wh",
".",
"index",
"=",
"'width'",
",",
"'height'",
"bbox",
"=",
"pd",
".",
"concat",
"(",
"[",
"xy_min",
",",
"wh",
"]",
")",
"bbox",
".",
"name",
"=",
"'bounding_box'",
"return",
"bbox"
] | Calculate the bounding box of all points in a data frame. | [
"Calculate",
"the",
"bounding",
"box",
"of",
"all",
"points",
"in",
"a",
"data",
"frame",
"."
] | train | https://github.com/sci-bots/svg-model/blob/2d119650f995e62b29ce0b3151a23f3b957cb072/svg_model/data_frame.py#L77-L88 |
portfoliome/foil | foil/deserializers.py | json_decoder_hook | def json_decoder_hook(dct, str_decoders=STRING_DECODERS,
converters=MappingProxyType(dict())) -> dict:
"""Decoder for parsing typical objects like uuid's and dates."""
for k, v in dct.items():
if k in converters:
parse_func = converters[k]
dct[k] = parse_func(v)
elif isinstance(v, str):
for decode_func in str_decoders:
v = decode_func(v)
if not isinstance(v, str):
break
dct[k] = v
elif isinstance(v, collections.Mapping):
dct[k] = json_decoder_hook(v, str_decoders, converters)
return dct | python | def json_decoder_hook(dct, str_decoders=STRING_DECODERS,
converters=MappingProxyType(dict())) -> dict:
"""Decoder for parsing typical objects like uuid's and dates."""
for k, v in dct.items():
if k in converters:
parse_func = converters[k]
dct[k] = parse_func(v)
elif isinstance(v, str):
for decode_func in str_decoders:
v = decode_func(v)
if not isinstance(v, str):
break
dct[k] = v
elif isinstance(v, collections.Mapping):
dct[k] = json_decoder_hook(v, str_decoders, converters)
return dct | [
"def",
"json_decoder_hook",
"(",
"dct",
",",
"str_decoders",
"=",
"STRING_DECODERS",
",",
"converters",
"=",
"MappingProxyType",
"(",
"dict",
"(",
")",
")",
")",
"->",
"dict",
":",
"for",
"k",
",",
"v",
"in",
"dct",
".",
"items",
"(",
")",
":",
"if",
"k",
"in",
"converters",
":",
"parse_func",
"=",
"converters",
"[",
"k",
"]",
"dct",
"[",
"k",
"]",
"=",
"parse_func",
"(",
"v",
")",
"elif",
"isinstance",
"(",
"v",
",",
"str",
")",
":",
"for",
"decode_func",
"in",
"str_decoders",
":",
"v",
"=",
"decode_func",
"(",
"v",
")",
"if",
"not",
"isinstance",
"(",
"v",
",",
"str",
")",
":",
"break",
"dct",
"[",
"k",
"]",
"=",
"v",
"elif",
"isinstance",
"(",
"v",
",",
"collections",
".",
"Mapping",
")",
":",
"dct",
"[",
"k",
"]",
"=",
"json_decoder_hook",
"(",
"v",
",",
"str_decoders",
",",
"converters",
")",
"return",
"dct"
] | Decoder for parsing typical objects like uuid's and dates. | [
"Decoder",
"for",
"parsing",
"typical",
"objects",
"like",
"uuid",
"s",
"and",
"dates",
"."
] | train | https://github.com/portfoliome/foil/blob/b66d8cf4ab048a387d8c7a033b47e922ed6917d6/foil/deserializers.py#L45-L65 |
portfoliome/foil | foil/deserializers.py | make_json_decoder_hook | def make_json_decoder_hook(str_decoders=STRING_DECODERS,
extra_str_decoders=tuple(),
converters=MappingProxyType(dict())) -> Callable:
"""Customize JSON string decoder hooks.
Object hook for typical deserialization scenarios.
Notes
-----
Specifying a field in converters will ensure custom decoding/passthrough.
Parameters
----------
str_decoders: functions for decoding strings to objects.
extra_str_decoders: appends additional string decoders to str_decoders.
converters: field / parser function mapping.
"""
str_decoders = tuple(chain(str_decoders, extra_str_decoders))
object_hook = partial(json_decoder_hook, str_decoders=str_decoders,
converters=converters)
return object_hook | python | def make_json_decoder_hook(str_decoders=STRING_DECODERS,
extra_str_decoders=tuple(),
converters=MappingProxyType(dict())) -> Callable:
"""Customize JSON string decoder hooks.
Object hook for typical deserialization scenarios.
Notes
-----
Specifying a field in converters will ensure custom decoding/passthrough.
Parameters
----------
str_decoders: functions for decoding strings to objects.
extra_str_decoders: appends additional string decoders to str_decoders.
converters: field / parser function mapping.
"""
str_decoders = tuple(chain(str_decoders, extra_str_decoders))
object_hook = partial(json_decoder_hook, str_decoders=str_decoders,
converters=converters)
return object_hook | [
"def",
"make_json_decoder_hook",
"(",
"str_decoders",
"=",
"STRING_DECODERS",
",",
"extra_str_decoders",
"=",
"tuple",
"(",
")",
",",
"converters",
"=",
"MappingProxyType",
"(",
"dict",
"(",
")",
")",
")",
"->",
"Callable",
":",
"str_decoders",
"=",
"tuple",
"(",
"chain",
"(",
"str_decoders",
",",
"extra_str_decoders",
")",
")",
"object_hook",
"=",
"partial",
"(",
"json_decoder_hook",
",",
"str_decoders",
"=",
"str_decoders",
",",
"converters",
"=",
"converters",
")",
"return",
"object_hook"
] | Customize JSON string decoder hooks.
Object hook for typical deserialization scenarios.
Notes
-----
Specifying a field in converters will ensure custom decoding/passthrough.
Parameters
----------
str_decoders: functions for decoding strings to objects.
extra_str_decoders: appends additional string decoders to str_decoders.
converters: field / parser function mapping. | [
"Customize",
"JSON",
"string",
"decoder",
"hooks",
"."
] | train | https://github.com/portfoliome/foil/blob/b66d8cf4ab048a387d8c7a033b47e922ed6917d6/foil/deserializers.py#L68-L90 |
BlueBrain/hpcbench | hpcbench/cli/benwait.py | wait_for_completion | def wait_for_completion(report, interval=10):
"""Wait for asynchronous jobs stil running in the given campaign.
:param report: memory representation of a campaign report
:type campaign: ReportNode
:param interval: wait interval
:type interval: int or float
:return: list of asynchronous job identifiers
"""
for jobid in report.collect('jobid'):
try:
if not Job.finished(jobid):
logging.info('waiting for SLURM job %s', jobid)
time.sleep(interval)
while not Job.finished(jobid):
time.sleep(interval)
yield Job.fromid(jobid)._asdict()
except OSError as e:
if e.errno == errno.ENOENT:
yield dict(id=str(jobid))
else:
raise e | python | def wait_for_completion(report, interval=10):
"""Wait for asynchronous jobs stil running in the given campaign.
:param report: memory representation of a campaign report
:type campaign: ReportNode
:param interval: wait interval
:type interval: int or float
:return: list of asynchronous job identifiers
"""
for jobid in report.collect('jobid'):
try:
if not Job.finished(jobid):
logging.info('waiting for SLURM job %s', jobid)
time.sleep(interval)
while not Job.finished(jobid):
time.sleep(interval)
yield Job.fromid(jobid)._asdict()
except OSError as e:
if e.errno == errno.ENOENT:
yield dict(id=str(jobid))
else:
raise e | [
"def",
"wait_for_completion",
"(",
"report",
",",
"interval",
"=",
"10",
")",
":",
"for",
"jobid",
"in",
"report",
".",
"collect",
"(",
"'jobid'",
")",
":",
"try",
":",
"if",
"not",
"Job",
".",
"finished",
"(",
"jobid",
")",
":",
"logging",
".",
"info",
"(",
"'waiting for SLURM job %s'",
",",
"jobid",
")",
"time",
".",
"sleep",
"(",
"interval",
")",
"while",
"not",
"Job",
".",
"finished",
"(",
"jobid",
")",
":",
"time",
".",
"sleep",
"(",
"interval",
")",
"yield",
"Job",
".",
"fromid",
"(",
"jobid",
")",
".",
"_asdict",
"(",
")",
"except",
"OSError",
"as",
"e",
":",
"if",
"e",
".",
"errno",
"==",
"errno",
".",
"ENOENT",
":",
"yield",
"dict",
"(",
"id",
"=",
"str",
"(",
"jobid",
")",
")",
"else",
":",
"raise",
"e"
] | Wait for asynchronous jobs stil running in the given campaign.
:param report: memory representation of a campaign report
:type campaign: ReportNode
:param interval: wait interval
:type interval: int or float
:return: list of asynchronous job identifiers | [
"Wait",
"for",
"asynchronous",
"jobs",
"stil",
"running",
"in",
"the",
"given",
"campaign",
"."
] | train | https://github.com/BlueBrain/hpcbench/blob/192d0ec142b897157ec25f131d1ef28f84752592/hpcbench/cli/benwait.py#L110-L131 |
BlueBrain/hpcbench | hpcbench/cli/benwait.py | main | def main(argv=None):
"""ben-wait entry point"""
arguments = cli_common(__doc__, argv=argv)
report = ReportNode(arguments['CAMPAIGN-DIR'])
jobs = wait_for_completion(report, float(arguments['--interval']))
status = ReportStatus(report, jobs)
if not arguments['--silent']:
fmt = arguments['--format'] or 'log'
status.log(fmt)
if argv is None:
sys.exit(0 if status.succeeded else 1)
return status.status | python | def main(argv=None):
"""ben-wait entry point"""
arguments = cli_common(__doc__, argv=argv)
report = ReportNode(arguments['CAMPAIGN-DIR'])
jobs = wait_for_completion(report, float(arguments['--interval']))
status = ReportStatus(report, jobs)
if not arguments['--silent']:
fmt = arguments['--format'] or 'log'
status.log(fmt)
if argv is None:
sys.exit(0 if status.succeeded else 1)
return status.status | [
"def",
"main",
"(",
"argv",
"=",
"None",
")",
":",
"arguments",
"=",
"cli_common",
"(",
"__doc__",
",",
"argv",
"=",
"argv",
")",
"report",
"=",
"ReportNode",
"(",
"arguments",
"[",
"'CAMPAIGN-DIR'",
"]",
")",
"jobs",
"=",
"wait_for_completion",
"(",
"report",
",",
"float",
"(",
"arguments",
"[",
"'--interval'",
"]",
")",
")",
"status",
"=",
"ReportStatus",
"(",
"report",
",",
"jobs",
")",
"if",
"not",
"arguments",
"[",
"'--silent'",
"]",
":",
"fmt",
"=",
"arguments",
"[",
"'--format'",
"]",
"or",
"'log'",
"status",
".",
"log",
"(",
"fmt",
")",
"if",
"argv",
"is",
"None",
":",
"sys",
".",
"exit",
"(",
"0",
"if",
"status",
".",
"succeeded",
"else",
"1",
")",
"return",
"status",
".",
"status"
] | ben-wait entry point | [
"ben",
"-",
"wait",
"entry",
"point"
] | train | https://github.com/BlueBrain/hpcbench/blob/192d0ec142b897157ec25f131d1ef28f84752592/hpcbench/cli/benwait.py#L134-L145 |
elkiwy/paynter | paynter/color.py | getColors_Triad | def getColors_Triad(hue=None, sat = 1, val = 1, spread = 60):
"""
Create a palette with one main color and two opposite color evenly spread apart from the main one.
:param hue: A 0-1 float with the starting hue value.
:param sat: A 0-1 float with the palette saturation.
:param val: A 0-1 float with the palette value.
:param val: An int with the spread in degrees from the opposite color.
:rtype: A list of :py:class:`Color` objects.
"""
palette = list()
if hue==None:
leadHue = randFloat(0, 1)
else:
leadHue = hue
palette.append(Color(0,0,0,1).set_HSV(leadHue, sat, val))
palette.append(Color(0,0,0,1).set_HSV((leadHue + 0.5 + spread/360) % 1, sat, val))
palette.append(Color(0,0,0,1).set_HSV((leadHue + 0.5 - spread/360) % 1, sat, val))
return palette | python | def getColors_Triad(hue=None, sat = 1, val = 1, spread = 60):
"""
Create a palette with one main color and two opposite color evenly spread apart from the main one.
:param hue: A 0-1 float with the starting hue value.
:param sat: A 0-1 float with the palette saturation.
:param val: A 0-1 float with the palette value.
:param val: An int with the spread in degrees from the opposite color.
:rtype: A list of :py:class:`Color` objects.
"""
palette = list()
if hue==None:
leadHue = randFloat(0, 1)
else:
leadHue = hue
palette.append(Color(0,0,0,1).set_HSV(leadHue, sat, val))
palette.append(Color(0,0,0,1).set_HSV((leadHue + 0.5 + spread/360) % 1, sat, val))
palette.append(Color(0,0,0,1).set_HSV((leadHue + 0.5 - spread/360) % 1, sat, val))
return palette | [
"def",
"getColors_Triad",
"(",
"hue",
"=",
"None",
",",
"sat",
"=",
"1",
",",
"val",
"=",
"1",
",",
"spread",
"=",
"60",
")",
":",
"palette",
"=",
"list",
"(",
")",
"if",
"hue",
"==",
"None",
":",
"leadHue",
"=",
"randFloat",
"(",
"0",
",",
"1",
")",
"else",
":",
"leadHue",
"=",
"hue",
"palette",
".",
"append",
"(",
"Color",
"(",
"0",
",",
"0",
",",
"0",
",",
"1",
")",
".",
"set_HSV",
"(",
"leadHue",
",",
"sat",
",",
"val",
")",
")",
"palette",
".",
"append",
"(",
"Color",
"(",
"0",
",",
"0",
",",
"0",
",",
"1",
")",
".",
"set_HSV",
"(",
"(",
"leadHue",
"+",
"0.5",
"+",
"spread",
"/",
"360",
")",
"%",
"1",
",",
"sat",
",",
"val",
")",
")",
"palette",
".",
"append",
"(",
"Color",
"(",
"0",
",",
"0",
",",
"0",
",",
"1",
")",
".",
"set_HSV",
"(",
"(",
"leadHue",
"+",
"0.5",
"-",
"spread",
"/",
"360",
")",
"%",
"1",
",",
"sat",
",",
"val",
")",
")",
"return",
"palette"
] | Create a palette with one main color and two opposite color evenly spread apart from the main one.
:param hue: A 0-1 float with the starting hue value.
:param sat: A 0-1 float with the palette saturation.
:param val: A 0-1 float with the palette value.
:param val: An int with the spread in degrees from the opposite color.
:rtype: A list of :py:class:`Color` objects. | [
"Create",
"a",
"palette",
"with",
"one",
"main",
"color",
"and",
"two",
"opposite",
"color",
"evenly",
"spread",
"apart",
"from",
"the",
"main",
"one",
"."
] | train | https://github.com/elkiwy/paynter/blob/f73cb5bb010a6b32ee41640a50396ed0bae8d496/paynter/color.py#L71-L89 |
BlueBrain/hpcbench | hpcbench/driver/slurm.py | SbatchDriver.default_job_name | def default_job_name(self):
"""Slurm job name if not already specified
in the `sbatch` section"""
name = ''
if not self.root.existing_campaign:
campaign_file = osp.basename(self.root.campaign_file)
campaign = osp.splitext(campaign_file)[0]
name += campaign + '/'
name += self.tag
return name | python | def default_job_name(self):
"""Slurm job name if not already specified
in the `sbatch` section"""
name = ''
if not self.root.existing_campaign:
campaign_file = osp.basename(self.root.campaign_file)
campaign = osp.splitext(campaign_file)[0]
name += campaign + '/'
name += self.tag
return name | [
"def",
"default_job_name",
"(",
"self",
")",
":",
"name",
"=",
"''",
"if",
"not",
"self",
".",
"root",
".",
"existing_campaign",
":",
"campaign_file",
"=",
"osp",
".",
"basename",
"(",
"self",
".",
"root",
".",
"campaign_file",
")",
"campaign",
"=",
"osp",
".",
"splitext",
"(",
"campaign_file",
")",
"[",
"0",
"]",
"name",
"+=",
"campaign",
"+",
"'/'",
"name",
"+=",
"self",
".",
"tag",
"return",
"name"
] | Slurm job name if not already specified
in the `sbatch` section | [
"Slurm",
"job",
"name",
"if",
"not",
"already",
"specified",
"in",
"the",
"sbatch",
"section"
] | train | https://github.com/BlueBrain/hpcbench/blob/192d0ec142b897157ec25f131d1ef28f84752592/hpcbench/driver/slurm.py#L102-L111 |
BlueBrain/hpcbench | hpcbench/driver/slurm.py | SbatchDriver.sbatch_template | def sbatch_template(self):
""":return Jinja sbatch template for the current tag"""
template = self.sbatch_template_str
if template.startswith('#!'):
# script is embedded in YAML
return jinja_environment.from_string(template)
return jinja_environment.get_template(template) | python | def sbatch_template(self):
""":return Jinja sbatch template for the current tag"""
template = self.sbatch_template_str
if template.startswith('#!'):
# script is embedded in YAML
return jinja_environment.from_string(template)
return jinja_environment.get_template(template) | [
"def",
"sbatch_template",
"(",
"self",
")",
":",
"template",
"=",
"self",
".",
"sbatch_template_str",
"if",
"template",
".",
"startswith",
"(",
"'#!'",
")",
":",
"# script is embedded in YAML",
"return",
"jinja_environment",
".",
"from_string",
"(",
"template",
")",
"return",
"jinja_environment",
".",
"get_template",
"(",
"template",
")"
] | :return Jinja sbatch template for the current tag | [
":",
"return",
"Jinja",
"sbatch",
"template",
"for",
"the",
"current",
"tag"
] | train | https://github.com/BlueBrain/hpcbench/blob/192d0ec142b897157ec25f131d1ef28f84752592/hpcbench/driver/slurm.py#L157-L163 |
BlueBrain/hpcbench | hpcbench/driver/slurm.py | SbatchDriver.sbatch_template_str | def sbatch_template_str(self):
""":return Jinja sbatch template for the current tag as string"""
templates = self.campaign.process.sbatch_template
if isinstance(templates, Mapping):
# find proper template according to the tag
template = templates.get(self.tag)
if template is None:
template = templates.get('*')
if template is None:
template = SBATCH_JINJA_TEMPLATE
else:
template = templates
return template | python | def sbatch_template_str(self):
""":return Jinja sbatch template for the current tag as string"""
templates = self.campaign.process.sbatch_template
if isinstance(templates, Mapping):
# find proper template according to the tag
template = templates.get(self.tag)
if template is None:
template = templates.get('*')
if template is None:
template = SBATCH_JINJA_TEMPLATE
else:
template = templates
return template | [
"def",
"sbatch_template_str",
"(",
"self",
")",
":",
"templates",
"=",
"self",
".",
"campaign",
".",
"process",
".",
"sbatch_template",
"if",
"isinstance",
"(",
"templates",
",",
"Mapping",
")",
":",
"# find proper template according to the tag",
"template",
"=",
"templates",
".",
"get",
"(",
"self",
".",
"tag",
")",
"if",
"template",
"is",
"None",
":",
"template",
"=",
"templates",
".",
"get",
"(",
"'*'",
")",
"if",
"template",
"is",
"None",
":",
"template",
"=",
"SBATCH_JINJA_TEMPLATE",
"else",
":",
"template",
"=",
"templates",
"return",
"template"
] | :return Jinja sbatch template for the current tag as string | [
":",
"return",
"Jinja",
"sbatch",
"template",
"for",
"the",
"current",
"tag",
"as",
"string"
] | train | https://github.com/BlueBrain/hpcbench/blob/192d0ec142b897157ec25f131d1ef28f84752592/hpcbench/driver/slurm.py#L166-L178 |
BlueBrain/hpcbench | hpcbench/driver/slurm.py | SbatchDriver._create_sbatch | def _create_sbatch(self, ostr):
"""Write sbatch template to output stream
:param ostr: opened file to write to
"""
properties = dict(
sbatch_arguments=self.sbatch_args, hpcbench_command=self.hpcbench_cmd
)
try:
self.sbatch_template.stream(**properties).dump(ostr)
except jinja2.exceptions.UndefinedError:
self.logger.error('Error while generating SBATCH template:')
self.logger.error('%%<--------' * 5)
for line in self.sbatch_template_str.splitlines():
self.logger.error(line)
self.logger.error('%%<--------' * 5)
self.logger.error('Template properties: %s', properties)
raise | python | def _create_sbatch(self, ostr):
"""Write sbatch template to output stream
:param ostr: opened file to write to
"""
properties = dict(
sbatch_arguments=self.sbatch_args, hpcbench_command=self.hpcbench_cmd
)
try:
self.sbatch_template.stream(**properties).dump(ostr)
except jinja2.exceptions.UndefinedError:
self.logger.error('Error while generating SBATCH template:')
self.logger.error('%%<--------' * 5)
for line in self.sbatch_template_str.splitlines():
self.logger.error(line)
self.logger.error('%%<--------' * 5)
self.logger.error('Template properties: %s', properties)
raise | [
"def",
"_create_sbatch",
"(",
"self",
",",
"ostr",
")",
":",
"properties",
"=",
"dict",
"(",
"sbatch_arguments",
"=",
"self",
".",
"sbatch_args",
",",
"hpcbench_command",
"=",
"self",
".",
"hpcbench_cmd",
")",
"try",
":",
"self",
".",
"sbatch_template",
".",
"stream",
"(",
"*",
"*",
"properties",
")",
".",
"dump",
"(",
"ostr",
")",
"except",
"jinja2",
".",
"exceptions",
".",
"UndefinedError",
":",
"self",
".",
"logger",
".",
"error",
"(",
"'Error while generating SBATCH template:'",
")",
"self",
".",
"logger",
".",
"error",
"(",
"'%%<--------'",
"*",
"5",
")",
"for",
"line",
"in",
"self",
".",
"sbatch_template_str",
".",
"splitlines",
"(",
")",
":",
"self",
".",
"logger",
".",
"error",
"(",
"line",
")",
"self",
".",
"logger",
".",
"error",
"(",
"'%%<--------'",
"*",
"5",
")",
"self",
".",
"logger",
".",
"error",
"(",
"'Template properties: %s'",
",",
"properties",
")",
"raise"
] | Write sbatch template to output stream
:param ostr: opened file to write to | [
"Write",
"sbatch",
"template",
"to",
"output",
"stream",
":",
"param",
"ostr",
":",
"opened",
"file",
"to",
"write",
"to"
] | train | https://github.com/BlueBrain/hpcbench/blob/192d0ec142b897157ec25f131d1ef28f84752592/hpcbench/driver/slurm.py#L193-L209 |
BlueBrain/hpcbench | hpcbench/driver/slurm.py | SbatchDriver._execute_sbatch | def _execute_sbatch(self):
"""Schedule the sbatch file using the sbatch command
:returns the slurm job id
"""
commands = self.campaign.process.get('commands', {})
sbatch = find_executable(commands.get('sbatch', 'sbatch'))
sbatch_command = [sbatch, '--parsable', self.sbatch_filename]
try:
self.logger.debug(
'Executing command: %s',
' '.join(map(six.moves.shlex_quote, sbatch_command)),
)
sbatch_out = subprocess.check_output(
sbatch_command, universal_newlines=True
)
except subprocess.CalledProcessError as cpe:
self.logger.error(
"SBATCH return non-zero exit" "status %d for tag %s",
cpe.returncode,
self.tag,
)
sbatch_out = cpe.output
jobidre = re.compile(r'^([\d]+)(?:;\S*)?$')
jobid = None
for line in sbatch_out.splitlines():
res = jobidre.match(line)
if res is not None:
jobid = res.group(1)
self.logger.info("Submitted SBATCH job %s for tag %s", jobid, self.tag)
elif line:
self.logger.warning("SBATCH: %s", line)
if jobid is None:
self.logger.error("SBATCH submission failed for tag %s", self.tag)
return -1
else:
return int(jobid) | python | def _execute_sbatch(self):
"""Schedule the sbatch file using the sbatch command
:returns the slurm job id
"""
commands = self.campaign.process.get('commands', {})
sbatch = find_executable(commands.get('sbatch', 'sbatch'))
sbatch_command = [sbatch, '--parsable', self.sbatch_filename]
try:
self.logger.debug(
'Executing command: %s',
' '.join(map(six.moves.shlex_quote, sbatch_command)),
)
sbatch_out = subprocess.check_output(
sbatch_command, universal_newlines=True
)
except subprocess.CalledProcessError as cpe:
self.logger.error(
"SBATCH return non-zero exit" "status %d for tag %s",
cpe.returncode,
self.tag,
)
sbatch_out = cpe.output
jobidre = re.compile(r'^([\d]+)(?:;\S*)?$')
jobid = None
for line in sbatch_out.splitlines():
res = jobidre.match(line)
if res is not None:
jobid = res.group(1)
self.logger.info("Submitted SBATCH job %s for tag %s", jobid, self.tag)
elif line:
self.logger.warning("SBATCH: %s", line)
if jobid is None:
self.logger.error("SBATCH submission failed for tag %s", self.tag)
return -1
else:
return int(jobid) | [
"def",
"_execute_sbatch",
"(",
"self",
")",
":",
"commands",
"=",
"self",
".",
"campaign",
".",
"process",
".",
"get",
"(",
"'commands'",
",",
"{",
"}",
")",
"sbatch",
"=",
"find_executable",
"(",
"commands",
".",
"get",
"(",
"'sbatch'",
",",
"'sbatch'",
")",
")",
"sbatch_command",
"=",
"[",
"sbatch",
",",
"'--parsable'",
",",
"self",
".",
"sbatch_filename",
"]",
"try",
":",
"self",
".",
"logger",
".",
"debug",
"(",
"'Executing command: %s'",
",",
"' '",
".",
"join",
"(",
"map",
"(",
"six",
".",
"moves",
".",
"shlex_quote",
",",
"sbatch_command",
")",
")",
",",
")",
"sbatch_out",
"=",
"subprocess",
".",
"check_output",
"(",
"sbatch_command",
",",
"universal_newlines",
"=",
"True",
")",
"except",
"subprocess",
".",
"CalledProcessError",
"as",
"cpe",
":",
"self",
".",
"logger",
".",
"error",
"(",
"\"SBATCH return non-zero exit\"",
"\"status %d for tag %s\"",
",",
"cpe",
".",
"returncode",
",",
"self",
".",
"tag",
",",
")",
"sbatch_out",
"=",
"cpe",
".",
"output",
"jobidre",
"=",
"re",
".",
"compile",
"(",
"r'^([\\d]+)(?:;\\S*)?$'",
")",
"jobid",
"=",
"None",
"for",
"line",
"in",
"sbatch_out",
".",
"splitlines",
"(",
")",
":",
"res",
"=",
"jobidre",
".",
"match",
"(",
"line",
")",
"if",
"res",
"is",
"not",
"None",
":",
"jobid",
"=",
"res",
".",
"group",
"(",
"1",
")",
"self",
".",
"logger",
".",
"info",
"(",
"\"Submitted SBATCH job %s for tag %s\"",
",",
"jobid",
",",
"self",
".",
"tag",
")",
"elif",
"line",
":",
"self",
".",
"logger",
".",
"warning",
"(",
"\"SBATCH: %s\"",
",",
"line",
")",
"if",
"jobid",
"is",
"None",
":",
"self",
".",
"logger",
".",
"error",
"(",
"\"SBATCH submission failed for tag %s\"",
",",
"self",
".",
"tag",
")",
"return",
"-",
"1",
"else",
":",
"return",
"int",
"(",
"jobid",
")"
] | Schedule the sbatch file using the sbatch command
:returns the slurm job id | [
"Schedule",
"the",
"sbatch",
"file",
"using",
"the",
"sbatch",
"command",
":",
"returns",
"the",
"slurm",
"job",
"id"
] | train | https://github.com/BlueBrain/hpcbench/blob/192d0ec142b897157ec25f131d1ef28f84752592/hpcbench/driver/slurm.py#L225-L260 |
BlueBrain/hpcbench | hpcbench/cli/bentpl.py | main | def main(argv=None):
"""ben-tpl entry point"""
arguments = cli_common(__doc__, argv=argv)
plugin = 'benchmark' if arguments['benchmark'] else None
if arguments['-g']:
template.generate_config(plugin, arguments['<FILE>'])
else:
with open(arguments['<FILE>']) as istr:
context = json.load(istr)
kwargs = dict(no_input=True, extra_context=context)
if arguments['--output-dir']:
kwargs.update(output_dir=arguments['--output-dir'])
if arguments['--interactive']:
kwargs.update(no_input=False)
logging.info(
'generating template in directory ' + kwargs.get('output_dir', os.getcwd())
)
template.generate_template(plugin, **kwargs) | python | def main(argv=None):
"""ben-tpl entry point"""
arguments = cli_common(__doc__, argv=argv)
plugin = 'benchmark' if arguments['benchmark'] else None
if arguments['-g']:
template.generate_config(plugin, arguments['<FILE>'])
else:
with open(arguments['<FILE>']) as istr:
context = json.load(istr)
kwargs = dict(no_input=True, extra_context=context)
if arguments['--output-dir']:
kwargs.update(output_dir=arguments['--output-dir'])
if arguments['--interactive']:
kwargs.update(no_input=False)
logging.info(
'generating template in directory ' + kwargs.get('output_dir', os.getcwd())
)
template.generate_template(plugin, **kwargs) | [
"def",
"main",
"(",
"argv",
"=",
"None",
")",
":",
"arguments",
"=",
"cli_common",
"(",
"__doc__",
",",
"argv",
"=",
"argv",
")",
"plugin",
"=",
"'benchmark'",
"if",
"arguments",
"[",
"'benchmark'",
"]",
"else",
"None",
"if",
"arguments",
"[",
"'-g'",
"]",
":",
"template",
".",
"generate_config",
"(",
"plugin",
",",
"arguments",
"[",
"'<FILE>'",
"]",
")",
"else",
":",
"with",
"open",
"(",
"arguments",
"[",
"'<FILE>'",
"]",
")",
"as",
"istr",
":",
"context",
"=",
"json",
".",
"load",
"(",
"istr",
")",
"kwargs",
"=",
"dict",
"(",
"no_input",
"=",
"True",
",",
"extra_context",
"=",
"context",
")",
"if",
"arguments",
"[",
"'--output-dir'",
"]",
":",
"kwargs",
".",
"update",
"(",
"output_dir",
"=",
"arguments",
"[",
"'--output-dir'",
"]",
")",
"if",
"arguments",
"[",
"'--interactive'",
"]",
":",
"kwargs",
".",
"update",
"(",
"no_input",
"=",
"False",
")",
"logging",
".",
"info",
"(",
"'generating template in directory '",
"+",
"kwargs",
".",
"get",
"(",
"'output_dir'",
",",
"os",
".",
"getcwd",
"(",
")",
")",
")",
"template",
".",
"generate_template",
"(",
"plugin",
",",
"*",
"*",
"kwargs",
")"
] | ben-tpl entry point | [
"ben",
"-",
"tpl",
"entry",
"point"
] | train | https://github.com/BlueBrain/hpcbench/blob/192d0ec142b897157ec25f131d1ef28f84752592/hpcbench/cli/bentpl.py#L27-L45 |
portfoliome/foil | foil/order.py | partition_ordered | def partition_ordered(sequence, key=None):
"""Partition ordered sequence by key.
Sequence is expected to already be ordered.
Parameters
----------
sequence: iterable data.
key: partition key function
Yields
-------
iterable tuple(s) of partition key, data list pairs.
Examples
--------
1. By object attributes.
Partition sequence of objects by a height and weight attributes
into an ordered dict.
>> attributes = ('height', 'weight')
>> OrderedDict(partition_ordered(sequence, attrgetter(*attributes)))
2. By index items.
Partition sequence by the first character index of each element.
>> index = 0
>> sequence = ['112', '124', '289', '220', 'Z23']
>> list(partition_ordered(sequence, itemgetter(index)))
"""
yield from ((k, list(g)) for k, g in groupby(sequence, key=key)) | python | def partition_ordered(sequence, key=None):
"""Partition ordered sequence by key.
Sequence is expected to already be ordered.
Parameters
----------
sequence: iterable data.
key: partition key function
Yields
-------
iterable tuple(s) of partition key, data list pairs.
Examples
--------
1. By object attributes.
Partition sequence of objects by a height and weight attributes
into an ordered dict.
>> attributes = ('height', 'weight')
>> OrderedDict(partition_ordered(sequence, attrgetter(*attributes)))
2. By index items.
Partition sequence by the first character index of each element.
>> index = 0
>> sequence = ['112', '124', '289', '220', 'Z23']
>> list(partition_ordered(sequence, itemgetter(index)))
"""
yield from ((k, list(g)) for k, g in groupby(sequence, key=key)) | [
"def",
"partition_ordered",
"(",
"sequence",
",",
"key",
"=",
"None",
")",
":",
"yield",
"from",
"(",
"(",
"k",
",",
"list",
"(",
"g",
")",
")",
"for",
"k",
",",
"g",
"in",
"groupby",
"(",
"sequence",
",",
"key",
"=",
"key",
")",
")"
] | Partition ordered sequence by key.
Sequence is expected to already be ordered.
Parameters
----------
sequence: iterable data.
key: partition key function
Yields
-------
iterable tuple(s) of partition key, data list pairs.
Examples
--------
1. By object attributes.
Partition sequence of objects by a height and weight attributes
into an ordered dict.
>> attributes = ('height', 'weight')
>> OrderedDict(partition_ordered(sequence, attrgetter(*attributes)))
2. By index items.
Partition sequence by the first character index of each element.
>> index = 0
>> sequence = ['112', '124', '289', '220', 'Z23']
>> list(partition_ordered(sequence, itemgetter(index))) | [
"Partition",
"ordered",
"sequence",
"by",
"key",
"."
] | train | https://github.com/portfoliome/foil/blob/b66d8cf4ab048a387d8c7a033b47e922ed6917d6/foil/order.py#L4-L37 |
portfoliome/foil | foil/order.py | partition | def partition(predicate, iterable):
"""Use a predicate to partition true and false entries.
Reference
---------
Python itertools documentation.
"""
t1, t2 = tee(iterable)
return filterfalse(predicate, t1), filter(predicate, t2) | python | def partition(predicate, iterable):
"""Use a predicate to partition true and false entries.
Reference
---------
Python itertools documentation.
"""
t1, t2 = tee(iterable)
return filterfalse(predicate, t1), filter(predicate, t2) | [
"def",
"partition",
"(",
"predicate",
",",
"iterable",
")",
":",
"t1",
",",
"t2",
"=",
"tee",
"(",
"iterable",
")",
"return",
"filterfalse",
"(",
"predicate",
",",
"t1",
")",
",",
"filter",
"(",
"predicate",
",",
"t2",
")"
] | Use a predicate to partition true and false entries.
Reference
---------
Python itertools documentation. | [
"Use",
"a",
"predicate",
"to",
"partition",
"true",
"and",
"false",
"entries",
"."
] | train | https://github.com/portfoliome/foil/blob/b66d8cf4ab048a387d8c7a033b47e922ed6917d6/foil/order.py#L40-L50 |
PolyJIT/benchbuild | benchbuild/projects/gentoo/portage_gen.py | PortageFactory | def PortageFactory(name, NAME, DOMAIN, BaseClass=autoportage.AutoPortage):
"""
Create a new dynamic portage project.
Auto-Generated projects can only be used for compilie-time experiments,
because there simply is no run-time test defined for it. Therefore,
we implement the run symbol as a noop (with minor logging).
This way we avoid the default implementation for run() that all projects
inherit.
Args:
name: Name of the dynamic class.
NAME: NAME property of the dynamic class.
DOMAIN: DOMAIN property of the dynamic class.
BaseClass: Base class to use for the dynamic class.
Returns:
A new class with NAME,DOMAIN properties set, unable to perform
run-time tests.
Examples:
>>> from benchbuild.projects.gentoo.portage_gen import PortageFactory
>>> from benchbuild.experiments.empty import Empty
>>> c = PortageFactory("test", "NAME", "DOMAIN")
>>> c
<class '__main__.test'>
>>> i = c(Empty())
>>> i.NAME
'NAME'
>>> i.DOMAIN
'DOMAIN'
"""
def run_not_supported(self, *args, **kwargs):
"""Dynamic projects don't support a run() test."""
del args, kwargs # Unused
LOG.warning(
"Runtime testing not supported on auto-generated projects.")
return
newclass = type(
name, (BaseClass, ), {
"NAME": NAME,
"DOMAIN": DOMAIN,
"SRC_FILE": "none",
"VERSION": BaseClass.VERSION,
"GROUP": "auto-gentoo",
"run": run_not_supported,
"__module__": "__main__"
})
return newclass | python | def PortageFactory(name, NAME, DOMAIN, BaseClass=autoportage.AutoPortage):
"""
Create a new dynamic portage project.
Auto-Generated projects can only be used for compilie-time experiments,
because there simply is no run-time test defined for it. Therefore,
we implement the run symbol as a noop (with minor logging).
This way we avoid the default implementation for run() that all projects
inherit.
Args:
name: Name of the dynamic class.
NAME: NAME property of the dynamic class.
DOMAIN: DOMAIN property of the dynamic class.
BaseClass: Base class to use for the dynamic class.
Returns:
A new class with NAME,DOMAIN properties set, unable to perform
run-time tests.
Examples:
>>> from benchbuild.projects.gentoo.portage_gen import PortageFactory
>>> from benchbuild.experiments.empty import Empty
>>> c = PortageFactory("test", "NAME", "DOMAIN")
>>> c
<class '__main__.test'>
>>> i = c(Empty())
>>> i.NAME
'NAME'
>>> i.DOMAIN
'DOMAIN'
"""
def run_not_supported(self, *args, **kwargs):
"""Dynamic projects don't support a run() test."""
del args, kwargs # Unused
LOG.warning(
"Runtime testing not supported on auto-generated projects.")
return
newclass = type(
name, (BaseClass, ), {
"NAME": NAME,
"DOMAIN": DOMAIN,
"SRC_FILE": "none",
"VERSION": BaseClass.VERSION,
"GROUP": "auto-gentoo",
"run": run_not_supported,
"__module__": "__main__"
})
return newclass | [
"def",
"PortageFactory",
"(",
"name",
",",
"NAME",
",",
"DOMAIN",
",",
"BaseClass",
"=",
"autoportage",
".",
"AutoPortage",
")",
":",
"def",
"run_not_supported",
"(",
"self",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"\"\"\"Dynamic projects don't support a run() test.\"\"\"",
"del",
"args",
",",
"kwargs",
"# Unused",
"LOG",
".",
"warning",
"(",
"\"Runtime testing not supported on auto-generated projects.\"",
")",
"return",
"newclass",
"=",
"type",
"(",
"name",
",",
"(",
"BaseClass",
",",
")",
",",
"{",
"\"NAME\"",
":",
"NAME",
",",
"\"DOMAIN\"",
":",
"DOMAIN",
",",
"\"SRC_FILE\"",
":",
"\"none\"",
",",
"\"VERSION\"",
":",
"BaseClass",
".",
"VERSION",
",",
"\"GROUP\"",
":",
"\"auto-gentoo\"",
",",
"\"run\"",
":",
"run_not_supported",
",",
"\"__module__\"",
":",
"\"__main__\"",
"}",
")",
"return",
"newclass"
] | Create a new dynamic portage project.
Auto-Generated projects can only be used for compilie-time experiments,
because there simply is no run-time test defined for it. Therefore,
we implement the run symbol as a noop (with minor logging).
This way we avoid the default implementation for run() that all projects
inherit.
Args:
name: Name of the dynamic class.
NAME: NAME property of the dynamic class.
DOMAIN: DOMAIN property of the dynamic class.
BaseClass: Base class to use for the dynamic class.
Returns:
A new class with NAME,DOMAIN properties set, unable to perform
run-time tests.
Examples:
>>> from benchbuild.projects.gentoo.portage_gen import PortageFactory
>>> from benchbuild.experiments.empty import Empty
>>> c = PortageFactory("test", "NAME", "DOMAIN")
>>> c
<class '__main__.test'>
>>> i = c(Empty())
>>> i.NAME
'NAME'
>>> i.DOMAIN
'DOMAIN' | [
"Create",
"a",
"new",
"dynamic",
"portage",
"project",
"."
] | train | https://github.com/PolyJIT/benchbuild/blob/9ad2ec54d96e97b642b1f06eddcbad9ba7aeaf58/benchbuild/projects/gentoo/portage_gen.py#L66-L118 |
chrisjsewell/jsonextended | jsonextended/units/core.py | apply_unitschema | def apply_unitschema(data, uschema, as_quantity=True,
raise_outerr=False, convert_base=False,
use_wildcards=False, list_of_dicts=False):
""" apply the unit schema to the data
Parameters
----------
data : dict
uschema : dict
units schema to apply
as_quantity : bool
if true, return values as pint.Quantity objects
raise_outerr : bool
raise error if a unit cannot be found in the outschema
convert_to_base : bool
rescale units to base units
use_wildcards : bool
if true, can use * (matches everything) and ? (matches any single character)
list_of_dicts: bool
treat list of dicts as additional branches
Examples
--------
>>> from pprint import pprint
>>> data = {'energy':1,'x':[1,2],'other':{'y':[4,5]},'y':[4,5],'meta':None}
>>> uschema = {'energy':'eV','x':'nm','other':{'y':'m'},'y':'cm'}
>>> data_units = apply_unitschema(data,uschema)
>>> pprint(data_units)
{'energy': <Quantity(1, 'electron_volt')>,
'meta': None,
'other': {'y': <Quantity([4 5], 'meter')>},
'x': <Quantity([1 2], 'nanometer')>,
'y': <Quantity([4 5], 'centimeter')>}
>>> newschema = {'energy':'kJ','other':{'y':'nm'},'y':'m'}
>>> new_data = apply_unitschema(data_units,newschema)
>>> str(new_data["energy"])
'1.60217653e-22 kilojoule'
>>> new_data["other"]["y"].magnitude.round(3).tolist(), str(new_data["other"]["y"].units)
([4000000000.0, 5000000000.0], 'nanometer')
>>> old_data = apply_unitschema(new_data,uschema,as_quantity=False)
>>> old_data["energy"]
1.0
>>> old_data["other"]["y"].round(3).tolist()
[4.0, 5.0]
""" # noqa: E501
try:
from pint import UnitRegistry
ureg = UnitRegistry()
from pint.quantity import _Quantity
except ImportError:
raise ImportError('please install pint to use this module')
list_of_dicts = '__list__' if list_of_dicts else None
# flatten edict
uschema_flat = flatten(uschema, key_as_tuple=True)
# sorted by longest key size, to get best match first
uschema_keys = sorted(uschema_flat, key=len, reverse=True)
data_flat = flatten(data, key_as_tuple=True, list_of_dicts=list_of_dicts)
for dkey, dvalue in data_flat.items():
converted = False
for ukey in uschema_keys:
if not len(ukey) == len(dkey[-len(ukey):]):
continue
if use_wildcards:
match = all(
[fnmatch(d, u) for u, d in zip(ukey, dkey[-len(ukey):])])
else:
match = ukey == dkey[-len(ukey):]
if match:
# handle that it return an numpy object type if list of floats
if isinstance(dvalue, (list, tuple)):
dvalue = np.array(dvalue)
if dvalue.dtype == np.object:
dvalue = dvalue.astype(float)
if isinstance(dvalue, _Quantity):
quantity = dvalue.to(uschema_flat[ukey])
else:
quantity = ureg.Quantity(dvalue, uschema_flat[ukey])
if convert_base:
quantity = quantity.to_base_units()
if as_quantity:
data_flat[dkey] = quantity
else:
data_flat[dkey] = quantity.magnitude
break
if not converted and raise_outerr:
raise KeyError('could not find units for {}'.format(dkey))
return unflatten(data_flat, list_of_dicts=list_of_dicts) | python | def apply_unitschema(data, uschema, as_quantity=True,
raise_outerr=False, convert_base=False,
use_wildcards=False, list_of_dicts=False):
""" apply the unit schema to the data
Parameters
----------
data : dict
uschema : dict
units schema to apply
as_quantity : bool
if true, return values as pint.Quantity objects
raise_outerr : bool
raise error if a unit cannot be found in the outschema
convert_to_base : bool
rescale units to base units
use_wildcards : bool
if true, can use * (matches everything) and ? (matches any single character)
list_of_dicts: bool
treat list of dicts as additional branches
Examples
--------
>>> from pprint import pprint
>>> data = {'energy':1,'x':[1,2],'other':{'y':[4,5]},'y':[4,5],'meta':None}
>>> uschema = {'energy':'eV','x':'nm','other':{'y':'m'},'y':'cm'}
>>> data_units = apply_unitschema(data,uschema)
>>> pprint(data_units)
{'energy': <Quantity(1, 'electron_volt')>,
'meta': None,
'other': {'y': <Quantity([4 5], 'meter')>},
'x': <Quantity([1 2], 'nanometer')>,
'y': <Quantity([4 5], 'centimeter')>}
>>> newschema = {'energy':'kJ','other':{'y':'nm'},'y':'m'}
>>> new_data = apply_unitschema(data_units,newschema)
>>> str(new_data["energy"])
'1.60217653e-22 kilojoule'
>>> new_data["other"]["y"].magnitude.round(3).tolist(), str(new_data["other"]["y"].units)
([4000000000.0, 5000000000.0], 'nanometer')
>>> old_data = apply_unitschema(new_data,uschema,as_quantity=False)
>>> old_data["energy"]
1.0
>>> old_data["other"]["y"].round(3).tolist()
[4.0, 5.0]
""" # noqa: E501
try:
from pint import UnitRegistry
ureg = UnitRegistry()
from pint.quantity import _Quantity
except ImportError:
raise ImportError('please install pint to use this module')
list_of_dicts = '__list__' if list_of_dicts else None
# flatten edict
uschema_flat = flatten(uschema, key_as_tuple=True)
# sorted by longest key size, to get best match first
uschema_keys = sorted(uschema_flat, key=len, reverse=True)
data_flat = flatten(data, key_as_tuple=True, list_of_dicts=list_of_dicts)
for dkey, dvalue in data_flat.items():
converted = False
for ukey in uschema_keys:
if not len(ukey) == len(dkey[-len(ukey):]):
continue
if use_wildcards:
match = all(
[fnmatch(d, u) for u, d in zip(ukey, dkey[-len(ukey):])])
else:
match = ukey == dkey[-len(ukey):]
if match:
# handle that it return an numpy object type if list of floats
if isinstance(dvalue, (list, tuple)):
dvalue = np.array(dvalue)
if dvalue.dtype == np.object:
dvalue = dvalue.astype(float)
if isinstance(dvalue, _Quantity):
quantity = dvalue.to(uschema_flat[ukey])
else:
quantity = ureg.Quantity(dvalue, uschema_flat[ukey])
if convert_base:
quantity = quantity.to_base_units()
if as_quantity:
data_flat[dkey] = quantity
else:
data_flat[dkey] = quantity.magnitude
break
if not converted and raise_outerr:
raise KeyError('could not find units for {}'.format(dkey))
return unflatten(data_flat, list_of_dicts=list_of_dicts) | [
"def",
"apply_unitschema",
"(",
"data",
",",
"uschema",
",",
"as_quantity",
"=",
"True",
",",
"raise_outerr",
"=",
"False",
",",
"convert_base",
"=",
"False",
",",
"use_wildcards",
"=",
"False",
",",
"list_of_dicts",
"=",
"False",
")",
":",
"# noqa: E501",
"try",
":",
"from",
"pint",
"import",
"UnitRegistry",
"ureg",
"=",
"UnitRegistry",
"(",
")",
"from",
"pint",
".",
"quantity",
"import",
"_Quantity",
"except",
"ImportError",
":",
"raise",
"ImportError",
"(",
"'please install pint to use this module'",
")",
"list_of_dicts",
"=",
"'__list__'",
"if",
"list_of_dicts",
"else",
"None",
"# flatten edict",
"uschema_flat",
"=",
"flatten",
"(",
"uschema",
",",
"key_as_tuple",
"=",
"True",
")",
"# sorted by longest key size, to get best match first",
"uschema_keys",
"=",
"sorted",
"(",
"uschema_flat",
",",
"key",
"=",
"len",
",",
"reverse",
"=",
"True",
")",
"data_flat",
"=",
"flatten",
"(",
"data",
",",
"key_as_tuple",
"=",
"True",
",",
"list_of_dicts",
"=",
"list_of_dicts",
")",
"for",
"dkey",
",",
"dvalue",
"in",
"data_flat",
".",
"items",
"(",
")",
":",
"converted",
"=",
"False",
"for",
"ukey",
"in",
"uschema_keys",
":",
"if",
"not",
"len",
"(",
"ukey",
")",
"==",
"len",
"(",
"dkey",
"[",
"-",
"len",
"(",
"ukey",
")",
":",
"]",
")",
":",
"continue",
"if",
"use_wildcards",
":",
"match",
"=",
"all",
"(",
"[",
"fnmatch",
"(",
"d",
",",
"u",
")",
"for",
"u",
",",
"d",
"in",
"zip",
"(",
"ukey",
",",
"dkey",
"[",
"-",
"len",
"(",
"ukey",
")",
":",
"]",
")",
"]",
")",
"else",
":",
"match",
"=",
"ukey",
"==",
"dkey",
"[",
"-",
"len",
"(",
"ukey",
")",
":",
"]",
"if",
"match",
":",
"# handle that it return an numpy object type if list of floats",
"if",
"isinstance",
"(",
"dvalue",
",",
"(",
"list",
",",
"tuple",
")",
")",
":",
"dvalue",
"=",
"np",
".",
"array",
"(",
"dvalue",
")",
"if",
"dvalue",
".",
"dtype",
"==",
"np",
".",
"object",
":",
"dvalue",
"=",
"dvalue",
".",
"astype",
"(",
"float",
")",
"if",
"isinstance",
"(",
"dvalue",
",",
"_Quantity",
")",
":",
"quantity",
"=",
"dvalue",
".",
"to",
"(",
"uschema_flat",
"[",
"ukey",
"]",
")",
"else",
":",
"quantity",
"=",
"ureg",
".",
"Quantity",
"(",
"dvalue",
",",
"uschema_flat",
"[",
"ukey",
"]",
")",
"if",
"convert_base",
":",
"quantity",
"=",
"quantity",
".",
"to_base_units",
"(",
")",
"if",
"as_quantity",
":",
"data_flat",
"[",
"dkey",
"]",
"=",
"quantity",
"else",
":",
"data_flat",
"[",
"dkey",
"]",
"=",
"quantity",
".",
"magnitude",
"break",
"if",
"not",
"converted",
"and",
"raise_outerr",
":",
"raise",
"KeyError",
"(",
"'could not find units for {}'",
".",
"format",
"(",
"dkey",
")",
")",
"return",
"unflatten",
"(",
"data_flat",
",",
"list_of_dicts",
"=",
"list_of_dicts",
")"
] | apply the unit schema to the data
Parameters
----------
data : dict
uschema : dict
units schema to apply
as_quantity : bool
if true, return values as pint.Quantity objects
raise_outerr : bool
raise error if a unit cannot be found in the outschema
convert_to_base : bool
rescale units to base units
use_wildcards : bool
if true, can use * (matches everything) and ? (matches any single character)
list_of_dicts: bool
treat list of dicts as additional branches
Examples
--------
>>> from pprint import pprint
>>> data = {'energy':1,'x':[1,2],'other':{'y':[4,5]},'y':[4,5],'meta':None}
>>> uschema = {'energy':'eV','x':'nm','other':{'y':'m'},'y':'cm'}
>>> data_units = apply_unitschema(data,uschema)
>>> pprint(data_units)
{'energy': <Quantity(1, 'electron_volt')>,
'meta': None,
'other': {'y': <Quantity([4 5], 'meter')>},
'x': <Quantity([1 2], 'nanometer')>,
'y': <Quantity([4 5], 'centimeter')>}
>>> newschema = {'energy':'kJ','other':{'y':'nm'},'y':'m'}
>>> new_data = apply_unitschema(data_units,newschema)
>>> str(new_data["energy"])
'1.60217653e-22 kilojoule'
>>> new_data["other"]["y"].magnitude.round(3).tolist(), str(new_data["other"]["y"].units)
([4000000000.0, 5000000000.0], 'nanometer')
>>> old_data = apply_unitschema(new_data,uschema,as_quantity=False)
>>> old_data["energy"]
1.0
>>> old_data["other"]["y"].round(3).tolist()
[4.0, 5.0] | [
"apply",
"the",
"unit",
"schema",
"to",
"the",
"data"
] | train | https://github.com/chrisjsewell/jsonextended/blob/c3a7a880cc09789b3c61204265dcbb127be76c8a/jsonextended/units/core.py#L23-L123 |
chrisjsewell/jsonextended | jsonextended/units/core.py | split_quantities | def split_quantities(data, units='units', magnitude='magnitude',
list_of_dicts=False):
""" split pint.Quantity objects into <unit,magnitude> pairs
Parameters
----------
data : dict
units : str
name for units key
magnitude : str
name for magnitude key
list_of_dicts: bool
treat list of dicts as additional branches
Examples
--------
>>> from pprint import pprint
>>> from pint import UnitRegistry
>>> ureg = UnitRegistry()
>>> Q = ureg.Quantity
>>> qdata = {'energy': Q(1.602e-22, 'kilojoule'),
... 'meta': None,
... 'other': {'y': Q([4,5,6], 'nanometer')},
... 'x': Q([1,2,3], 'nanometer'),
... 'y': Q([8,9,10], 'meter')}
...
>>> split_data = split_quantities(qdata)
>>> pprint(split_data)
{'energy': {'magnitude': 1.602e-22, 'units': 'kilojoule'},
'meta': None,
'other': {'y': {'magnitude': array([4, 5, 6]), 'units': 'nanometer'}},
'x': {'magnitude': array([1, 2, 3]), 'units': 'nanometer'},
'y': {'magnitude': array([ 8, 9, 10]), 'units': 'meter'}}
"""
try:
from pint.quantity import _Quantity
except ImportError:
raise ImportError('please install pint to use this module')
list_of_dicts = '__list__' if list_of_dicts else None
data_flatten = flatten(data, list_of_dicts=list_of_dicts)
for key, val in data_flatten.items():
if isinstance(val, _Quantity):
data_flatten[key] = {units: str(val.units),
magnitude: val.magnitude}
return unflatten(data_flatten, list_of_dicts=list_of_dicts) | python | def split_quantities(data, units='units', magnitude='magnitude',
list_of_dicts=False):
""" split pint.Quantity objects into <unit,magnitude> pairs
Parameters
----------
data : dict
units : str
name for units key
magnitude : str
name for magnitude key
list_of_dicts: bool
treat list of dicts as additional branches
Examples
--------
>>> from pprint import pprint
>>> from pint import UnitRegistry
>>> ureg = UnitRegistry()
>>> Q = ureg.Quantity
>>> qdata = {'energy': Q(1.602e-22, 'kilojoule'),
... 'meta': None,
... 'other': {'y': Q([4,5,6], 'nanometer')},
... 'x': Q([1,2,3], 'nanometer'),
... 'y': Q([8,9,10], 'meter')}
...
>>> split_data = split_quantities(qdata)
>>> pprint(split_data)
{'energy': {'magnitude': 1.602e-22, 'units': 'kilojoule'},
'meta': None,
'other': {'y': {'magnitude': array([4, 5, 6]), 'units': 'nanometer'}},
'x': {'magnitude': array([1, 2, 3]), 'units': 'nanometer'},
'y': {'magnitude': array([ 8, 9, 10]), 'units': 'meter'}}
"""
try:
from pint.quantity import _Quantity
except ImportError:
raise ImportError('please install pint to use this module')
list_of_dicts = '__list__' if list_of_dicts else None
data_flatten = flatten(data, list_of_dicts=list_of_dicts)
for key, val in data_flatten.items():
if isinstance(val, _Quantity):
data_flatten[key] = {units: str(val.units),
magnitude: val.magnitude}
return unflatten(data_flatten, list_of_dicts=list_of_dicts) | [
"def",
"split_quantities",
"(",
"data",
",",
"units",
"=",
"'units'",
",",
"magnitude",
"=",
"'magnitude'",
",",
"list_of_dicts",
"=",
"False",
")",
":",
"try",
":",
"from",
"pint",
".",
"quantity",
"import",
"_Quantity",
"except",
"ImportError",
":",
"raise",
"ImportError",
"(",
"'please install pint to use this module'",
")",
"list_of_dicts",
"=",
"'__list__'",
"if",
"list_of_dicts",
"else",
"None",
"data_flatten",
"=",
"flatten",
"(",
"data",
",",
"list_of_dicts",
"=",
"list_of_dicts",
")",
"for",
"key",
",",
"val",
"in",
"data_flatten",
".",
"items",
"(",
")",
":",
"if",
"isinstance",
"(",
"val",
",",
"_Quantity",
")",
":",
"data_flatten",
"[",
"key",
"]",
"=",
"{",
"units",
":",
"str",
"(",
"val",
".",
"units",
")",
",",
"magnitude",
":",
"val",
".",
"magnitude",
"}",
"return",
"unflatten",
"(",
"data_flatten",
",",
"list_of_dicts",
"=",
"list_of_dicts",
")"
] | split pint.Quantity objects into <unit,magnitude> pairs
Parameters
----------
data : dict
units : str
name for units key
magnitude : str
name for magnitude key
list_of_dicts: bool
treat list of dicts as additional branches
Examples
--------
>>> from pprint import pprint
>>> from pint import UnitRegistry
>>> ureg = UnitRegistry()
>>> Q = ureg.Quantity
>>> qdata = {'energy': Q(1.602e-22, 'kilojoule'),
... 'meta': None,
... 'other': {'y': Q([4,5,6], 'nanometer')},
... 'x': Q([1,2,3], 'nanometer'),
... 'y': Q([8,9,10], 'meter')}
...
>>> split_data = split_quantities(qdata)
>>> pprint(split_data)
{'energy': {'magnitude': 1.602e-22, 'units': 'kilojoule'},
'meta': None,
'other': {'y': {'magnitude': array([4, 5, 6]), 'units': 'nanometer'}},
'x': {'magnitude': array([1, 2, 3]), 'units': 'nanometer'},
'y': {'magnitude': array([ 8, 9, 10]), 'units': 'meter'}} | [
"split",
"pint",
".",
"Quantity",
"objects",
"into",
"<unit",
"magnitude",
">",
"pairs"
] | train | https://github.com/chrisjsewell/jsonextended/blob/c3a7a880cc09789b3c61204265dcbb127be76c8a/jsonextended/units/core.py#L126-L173 |
chrisjsewell/jsonextended | jsonextended/units/core.py | combine_quantities | def combine_quantities(data, units='units', magnitude='magnitude',
list_of_dicts=False):
""" combine <unit,magnitude> pairs into pint.Quantity objects
Parameters
----------
data : dict
units : str
name of units key
magnitude : str
name of magnitude key
list_of_dicts: bool
treat list of dicts as additional branches
Examples
--------
>>> from pprint import pprint
>>> sdata = {'energy': {'magnitude': 1.602e-22, 'units': 'kilojoule'},
... 'meta': None,
... 'other': {'y': {'magnitude': [4, 5, 6], 'units': 'nanometer'}},
... 'x': {'magnitude': [1, 2, 3], 'units': 'nanometer'},
... 'y': {'magnitude': [8,9,10], 'units': 'meter'}}
...
>>> combined_data = combine_quantities(sdata)
>>> pprint(combined_data)
{'energy': <Quantity(1.602e-22, 'kilojoule')>,
'meta': None,
'other': {'y': <Quantity([4 5 6], 'nanometer')>},
'x': <Quantity([1 2 3], 'nanometer')>,
'y': <Quantity([ 8 9 10], 'meter')>}
""" # noqa: E501
try:
from pint import UnitRegistry
ureg = UnitRegistry()
except ImportError:
raise ImportError('please install pint to use this module')
list_of_dicts = '__list__' if list_of_dicts else None
data_flatten2d = flatten2d(data, list_of_dicts=list_of_dicts)
new_dict = {}
for key, val in list(data_flatten2d.items()):
if units in val and magnitude in val:
quantity = ureg.Quantity(val.pop(magnitude), val.pop(units))
if not val:
data_flatten2d.pop(key)
new_dict[key] = quantity
final_dict = merge([data_flatten2d, new_dict])
# olddict = unflatten(data_flatten2d,list_of_dicts=list_of_dicts)
# new_dict = unflatten(new_dict,list_of_dicts=list_of_dicts)
return unflatten(
final_dict, list_of_dicts=list_of_dicts) | python | def combine_quantities(data, units='units', magnitude='magnitude',
list_of_dicts=False):
""" combine <unit,magnitude> pairs into pint.Quantity objects
Parameters
----------
data : dict
units : str
name of units key
magnitude : str
name of magnitude key
list_of_dicts: bool
treat list of dicts as additional branches
Examples
--------
>>> from pprint import pprint
>>> sdata = {'energy': {'magnitude': 1.602e-22, 'units': 'kilojoule'},
... 'meta': None,
... 'other': {'y': {'magnitude': [4, 5, 6], 'units': 'nanometer'}},
... 'x': {'magnitude': [1, 2, 3], 'units': 'nanometer'},
... 'y': {'magnitude': [8,9,10], 'units': 'meter'}}
...
>>> combined_data = combine_quantities(sdata)
>>> pprint(combined_data)
{'energy': <Quantity(1.602e-22, 'kilojoule')>,
'meta': None,
'other': {'y': <Quantity([4 5 6], 'nanometer')>},
'x': <Quantity([1 2 3], 'nanometer')>,
'y': <Quantity([ 8 9 10], 'meter')>}
""" # noqa: E501
try:
from pint import UnitRegistry
ureg = UnitRegistry()
except ImportError:
raise ImportError('please install pint to use this module')
list_of_dicts = '__list__' if list_of_dicts else None
data_flatten2d = flatten2d(data, list_of_dicts=list_of_dicts)
new_dict = {}
for key, val in list(data_flatten2d.items()):
if units in val and magnitude in val:
quantity = ureg.Quantity(val.pop(magnitude), val.pop(units))
if not val:
data_flatten2d.pop(key)
new_dict[key] = quantity
final_dict = merge([data_flatten2d, new_dict])
# olddict = unflatten(data_flatten2d,list_of_dicts=list_of_dicts)
# new_dict = unflatten(new_dict,list_of_dicts=list_of_dicts)
return unflatten(
final_dict, list_of_dicts=list_of_dicts) | [
"def",
"combine_quantities",
"(",
"data",
",",
"units",
"=",
"'units'",
",",
"magnitude",
"=",
"'magnitude'",
",",
"list_of_dicts",
"=",
"False",
")",
":",
"# noqa: E501",
"try",
":",
"from",
"pint",
"import",
"UnitRegistry",
"ureg",
"=",
"UnitRegistry",
"(",
")",
"except",
"ImportError",
":",
"raise",
"ImportError",
"(",
"'please install pint to use this module'",
")",
"list_of_dicts",
"=",
"'__list__'",
"if",
"list_of_dicts",
"else",
"None",
"data_flatten2d",
"=",
"flatten2d",
"(",
"data",
",",
"list_of_dicts",
"=",
"list_of_dicts",
")",
"new_dict",
"=",
"{",
"}",
"for",
"key",
",",
"val",
"in",
"list",
"(",
"data_flatten2d",
".",
"items",
"(",
")",
")",
":",
"if",
"units",
"in",
"val",
"and",
"magnitude",
"in",
"val",
":",
"quantity",
"=",
"ureg",
".",
"Quantity",
"(",
"val",
".",
"pop",
"(",
"magnitude",
")",
",",
"val",
".",
"pop",
"(",
"units",
")",
")",
"if",
"not",
"val",
":",
"data_flatten2d",
".",
"pop",
"(",
"key",
")",
"new_dict",
"[",
"key",
"]",
"=",
"quantity",
"final_dict",
"=",
"merge",
"(",
"[",
"data_flatten2d",
",",
"new_dict",
"]",
")",
"# olddict = unflatten(data_flatten2d,list_of_dicts=list_of_dicts)",
"# new_dict = unflatten(new_dict,list_of_dicts=list_of_dicts)",
"return",
"unflatten",
"(",
"final_dict",
",",
"list_of_dicts",
"=",
"list_of_dicts",
")"
] | combine <unit,magnitude> pairs into pint.Quantity objects
Parameters
----------
data : dict
units : str
name of units key
magnitude : str
name of magnitude key
list_of_dicts: bool
treat list of dicts as additional branches
Examples
--------
>>> from pprint import pprint
>>> sdata = {'energy': {'magnitude': 1.602e-22, 'units': 'kilojoule'},
... 'meta': None,
... 'other': {'y': {'magnitude': [4, 5, 6], 'units': 'nanometer'}},
... 'x': {'magnitude': [1, 2, 3], 'units': 'nanometer'},
... 'y': {'magnitude': [8,9,10], 'units': 'meter'}}
...
>>> combined_data = combine_quantities(sdata)
>>> pprint(combined_data)
{'energy': <Quantity(1.602e-22, 'kilojoule')>,
'meta': None,
'other': {'y': <Quantity([4 5 6], 'nanometer')>},
'x': <Quantity([1 2 3], 'nanometer')>,
'y': <Quantity([ 8 9 10], 'meter')>} | [
"combine",
"<unit",
"magnitude",
">",
"pairs",
"into",
"pint",
".",
"Quantity",
"objects"
] | train | https://github.com/chrisjsewell/jsonextended/blob/c3a7a880cc09789b3c61204265dcbb127be76c8a/jsonextended/units/core.py#L176-L228 |
PolyJIT/benchbuild | benchbuild/utils/versions.py | get_version_from_cache_dir | def get_version_from_cache_dir(src_file):
"""
Creates a version for a project out of the hash.
The hash is taken from the directory of the source file.
Args:
src_file: The source file of the project using this function.
Returns:
Either returns the first 8 digits of the hash as string,
the entire hash as a string if the hash consists out of less
than 7 digits or None if the path is incorrect.
"""
if src_file is None:
return None
tmp_dir = local.path(str(CFG["tmp_dir"]))
if tmp_dir.exists():
cache_file = tmp_dir / src_file
dir_hash = get_hash_of_dirs(cache_file)
if dir_hash is None:
return None
if len(str(dir_hash)) <= 7:
return str(dir_hash)
return str(dir_hash)[:7]
return None | python | def get_version_from_cache_dir(src_file):
"""
Creates a version for a project out of the hash.
The hash is taken from the directory of the source file.
Args:
src_file: The source file of the project using this function.
Returns:
Either returns the first 8 digits of the hash as string,
the entire hash as a string if the hash consists out of less
than 7 digits or None if the path is incorrect.
"""
if src_file is None:
return None
tmp_dir = local.path(str(CFG["tmp_dir"]))
if tmp_dir.exists():
cache_file = tmp_dir / src_file
dir_hash = get_hash_of_dirs(cache_file)
if dir_hash is None:
return None
if len(str(dir_hash)) <= 7:
return str(dir_hash)
return str(dir_hash)[:7]
return None | [
"def",
"get_version_from_cache_dir",
"(",
"src_file",
")",
":",
"if",
"src_file",
"is",
"None",
":",
"return",
"None",
"tmp_dir",
"=",
"local",
".",
"path",
"(",
"str",
"(",
"CFG",
"[",
"\"tmp_dir\"",
"]",
")",
")",
"if",
"tmp_dir",
".",
"exists",
"(",
")",
":",
"cache_file",
"=",
"tmp_dir",
"/",
"src_file",
"dir_hash",
"=",
"get_hash_of_dirs",
"(",
"cache_file",
")",
"if",
"dir_hash",
"is",
"None",
":",
"return",
"None",
"if",
"len",
"(",
"str",
"(",
"dir_hash",
")",
")",
"<=",
"7",
":",
"return",
"str",
"(",
"dir_hash",
")",
"return",
"str",
"(",
"dir_hash",
")",
"[",
":",
"7",
"]",
"return",
"None"
] | Creates a version for a project out of the hash.
The hash is taken from the directory of the source file.
Args:
src_file: The source file of the project using this function.
Returns:
Either returns the first 8 digits of the hash as string,
the entire hash as a string if the hash consists out of less
than 7 digits or None if the path is incorrect. | [
"Creates",
"a",
"version",
"for",
"a",
"project",
"out",
"of",
"the",
"hash",
"."
] | train | https://github.com/PolyJIT/benchbuild/blob/9ad2ec54d96e97b642b1f06eddcbad9ba7aeaf58/benchbuild/utils/versions.py#L11-L37 |
PolyJIT/benchbuild | benchbuild/utils/versions.py | get_git_hash | def get_git_hash(from_url):
"""
Get the git commit hash of HEAD from :from_url.
Args:
from_url: The file system url of our git repository.
Returns:
git commit hash of HEAD, or empty string.
"""
from benchbuild.utils.cmd import git
if from_url is None:
return ""
if not path.exists(from_url):
return ""
with local.cwd(from_url):
return git("rev-parse", "HEAD", retcode=None) | python | def get_git_hash(from_url):
"""
Get the git commit hash of HEAD from :from_url.
Args:
from_url: The file system url of our git repository.
Returns:
git commit hash of HEAD, or empty string.
"""
from benchbuild.utils.cmd import git
if from_url is None:
return ""
if not path.exists(from_url):
return ""
with local.cwd(from_url):
return git("rev-parse", "HEAD", retcode=None) | [
"def",
"get_git_hash",
"(",
"from_url",
")",
":",
"from",
"benchbuild",
".",
"utils",
".",
"cmd",
"import",
"git",
"if",
"from_url",
"is",
"None",
":",
"return",
"\"\"",
"if",
"not",
"path",
".",
"exists",
"(",
"from_url",
")",
":",
"return",
"\"\"",
"with",
"local",
".",
"cwd",
"(",
"from_url",
")",
":",
"return",
"git",
"(",
"\"rev-parse\"",
",",
"\"HEAD\"",
",",
"retcode",
"=",
"None",
")"
] | Get the git commit hash of HEAD from :from_url.
Args:
from_url: The file system url of our git repository.
Returns:
git commit hash of HEAD, or empty string. | [
"Get",
"the",
"git",
"commit",
"hash",
"of",
"HEAD",
"from",
":",
"from_url",
"."
] | train | https://github.com/PolyJIT/benchbuild/blob/9ad2ec54d96e97b642b1f06eddcbad9ba7aeaf58/benchbuild/utils/versions.py#L40-L58 |
PolyJIT/benchbuild | benchbuild/extensions/time.py | fetch_time_output | def fetch_time_output(marker, format_s, ins):
"""
Fetch the output /usr/bin/time from a.
Args:
marker: The marker that limits the time output
format_s: The format string used to parse the timings
ins: A list of lines we look for the output.
Returns:
A list of timing tuples
"""
from parse import parse
timings = [x for x in ins if marker in x]
res = [parse(format_s, t) for t in timings]
return [_f for _f in res if _f] | python | def fetch_time_output(marker, format_s, ins):
"""
Fetch the output /usr/bin/time from a.
Args:
marker: The marker that limits the time output
format_s: The format string used to parse the timings
ins: A list of lines we look for the output.
Returns:
A list of timing tuples
"""
from parse import parse
timings = [x for x in ins if marker in x]
res = [parse(format_s, t) for t in timings]
return [_f for _f in res if _f] | [
"def",
"fetch_time_output",
"(",
"marker",
",",
"format_s",
",",
"ins",
")",
":",
"from",
"parse",
"import",
"parse",
"timings",
"=",
"[",
"x",
"for",
"x",
"in",
"ins",
"if",
"marker",
"in",
"x",
"]",
"res",
"=",
"[",
"parse",
"(",
"format_s",
",",
"t",
")",
"for",
"t",
"in",
"timings",
"]",
"return",
"[",
"_f",
"for",
"_f",
"in",
"res",
"if",
"_f",
"]"
] | Fetch the output /usr/bin/time from a.
Args:
marker: The marker that limits the time output
format_s: The format string used to parse the timings
ins: A list of lines we look for the output.
Returns:
A list of timing tuples | [
"Fetch",
"the",
"output",
"/",
"usr",
"/",
"bin",
"/",
"time",
"from",
"a",
"."
] | train | https://github.com/PolyJIT/benchbuild/blob/9ad2ec54d96e97b642b1f06eddcbad9ba7aeaf58/benchbuild/extensions/time.py#L42-L58 |
PolyJIT/benchbuild | benchbuild/utils/path.py | path_to_list | def path_to_list(pathstr):
"""Conver a path string to a list of path elements."""
return [elem for elem in pathstr.split(os.path.pathsep) if elem] | python | def path_to_list(pathstr):
"""Conver a path string to a list of path elements."""
return [elem for elem in pathstr.split(os.path.pathsep) if elem] | [
"def",
"path_to_list",
"(",
"pathstr",
")",
":",
"return",
"[",
"elem",
"for",
"elem",
"in",
"pathstr",
".",
"split",
"(",
"os",
".",
"path",
".",
"pathsep",
")",
"if",
"elem",
"]"
] | Conver a path string to a list of path elements. | [
"Conver",
"a",
"path",
"string",
"to",
"a",
"list",
"of",
"path",
"elements",
"."
] | train | https://github.com/PolyJIT/benchbuild/blob/9ad2ec54d96e97b642b1f06eddcbad9ba7aeaf58/benchbuild/utils/path.py#L12-L14 |
PolyJIT/benchbuild | benchbuild/utils/path.py | determine_path | def determine_path():
"""Borrowed from wxglade.py"""
root = __file__
if os.path.islink(root):
root = os.path.realpath(root)
return os.path.dirname(os.path.abspath(root)) | python | def determine_path():
"""Borrowed from wxglade.py"""
root = __file__
if os.path.islink(root):
root = os.path.realpath(root)
return os.path.dirname(os.path.abspath(root)) | [
"def",
"determine_path",
"(",
")",
":",
"root",
"=",
"__file__",
"if",
"os",
".",
"path",
".",
"islink",
"(",
"root",
")",
":",
"root",
"=",
"os",
".",
"path",
".",
"realpath",
"(",
"root",
")",
"return",
"os",
".",
"path",
".",
"dirname",
"(",
"os",
".",
"path",
".",
"abspath",
"(",
"root",
")",
")"
] | Borrowed from wxglade.py | [
"Borrowed",
"from",
"wxglade",
".",
"py"
] | train | https://github.com/PolyJIT/benchbuild/blob/9ad2ec54d96e97b642b1f06eddcbad9ba7aeaf58/benchbuild/utils/path.py#L17-L22 |
PolyJIT/benchbuild | benchbuild/utils/path.py | template_files | def template_files(path, exts=None):
"""
Return a list of filenames found at @path.
The list of filenames can be filtered by extensions.
Arguments:
path: Existing filepath we want to list.
exts: List of extensions to filter by.
Returns:
A list of filenames found in the path.
"""
if not os.path.isabs(path):
_path = os.path.join(determine_path(), path)
if not (os.path.exists(_path) and os.path.isdir(_path)):
return []
if not exts:
exts = []
files = os.listdir(_path)
files = [f for f in files if os.path.splitext(f)[-1] in exts]
files = [os.path.join(path, f) for f in files]
return files | python | def template_files(path, exts=None):
"""
Return a list of filenames found at @path.
The list of filenames can be filtered by extensions.
Arguments:
path: Existing filepath we want to list.
exts: List of extensions to filter by.
Returns:
A list of filenames found in the path.
"""
if not os.path.isabs(path):
_path = os.path.join(determine_path(), path)
if not (os.path.exists(_path) and os.path.isdir(_path)):
return []
if not exts:
exts = []
files = os.listdir(_path)
files = [f for f in files if os.path.splitext(f)[-1] in exts]
files = [os.path.join(path, f) for f in files]
return files | [
"def",
"template_files",
"(",
"path",
",",
"exts",
"=",
"None",
")",
":",
"if",
"not",
"os",
".",
"path",
".",
"isabs",
"(",
"path",
")",
":",
"_path",
"=",
"os",
".",
"path",
".",
"join",
"(",
"determine_path",
"(",
")",
",",
"path",
")",
"if",
"not",
"(",
"os",
".",
"path",
".",
"exists",
"(",
"_path",
")",
"and",
"os",
".",
"path",
".",
"isdir",
"(",
"_path",
")",
")",
":",
"return",
"[",
"]",
"if",
"not",
"exts",
":",
"exts",
"=",
"[",
"]",
"files",
"=",
"os",
".",
"listdir",
"(",
"_path",
")",
"files",
"=",
"[",
"f",
"for",
"f",
"in",
"files",
"if",
"os",
".",
"path",
".",
"splitext",
"(",
"f",
")",
"[",
"-",
"1",
"]",
"in",
"exts",
"]",
"files",
"=",
"[",
"os",
".",
"path",
".",
"join",
"(",
"path",
",",
"f",
")",
"for",
"f",
"in",
"files",
"]",
"return",
"files"
] | Return a list of filenames found at @path.
The list of filenames can be filtered by extensions.
Arguments:
path: Existing filepath we want to list.
exts: List of extensions to filter by.
Returns:
A list of filenames found in the path. | [
"Return",
"a",
"list",
"of",
"filenames",
"found",
"at",
"@path",
"."
] | train | https://github.com/PolyJIT/benchbuild/blob/9ad2ec54d96e97b642b1f06eddcbad9ba7aeaf58/benchbuild/utils/path.py#L25-L47 |
PolyJIT/benchbuild | benchbuild/utils/path.py | template_str | def template_str(template):
"""Read a template file from the resources and return it as str."""
tmpl_file = os.path.join(determine_path(), template)
with open(tmpl_file, mode='r') as tmpl_strm:
return "".join(tmpl_strm.readlines()) | python | def template_str(template):
"""Read a template file from the resources and return it as str."""
tmpl_file = os.path.join(determine_path(), template)
with open(tmpl_file, mode='r') as tmpl_strm:
return "".join(tmpl_strm.readlines()) | [
"def",
"template_str",
"(",
"template",
")",
":",
"tmpl_file",
"=",
"os",
".",
"path",
".",
"join",
"(",
"determine_path",
"(",
")",
",",
"template",
")",
"with",
"open",
"(",
"tmpl_file",
",",
"mode",
"=",
"'r'",
")",
"as",
"tmpl_strm",
":",
"return",
"\"\"",
".",
"join",
"(",
"tmpl_strm",
".",
"readlines",
"(",
")",
")"
] | Read a template file from the resources and return it as str. | [
"Read",
"a",
"template",
"file",
"from",
"the",
"resources",
"and",
"return",
"it",
"as",
"str",
"."
] | train | https://github.com/PolyJIT/benchbuild/blob/9ad2ec54d96e97b642b1f06eddcbad9ba7aeaf58/benchbuild/utils/path.py#L55-L59 |
PolyJIT/benchbuild | benchbuild/utils/path.py | mkfile_uchroot | def mkfile_uchroot(filepath, root="."):
"""
Create a file inside a uchroot env.
You will want to use this when you need to create a file with apropriate
rights inside a uchroot container with subuid/subgid handling enabled.
Args:
filepath:
The filepath that should be created. Absolute inside the
uchroot container.
root:
The root PATH of the container filesystem as seen outside of
the container.
"""
from benchbuild.utils.uchroot import no_args, uretry
uchroot = no_args()
uchroot = uchroot["-E", "-A", "-C", "-w", "/", "-r"]
uchroot = uchroot[os.path.abspath(root)]
uretry(uchroot["--", "/bin/touch", filepath]) | python | def mkfile_uchroot(filepath, root="."):
"""
Create a file inside a uchroot env.
You will want to use this when you need to create a file with apropriate
rights inside a uchroot container with subuid/subgid handling enabled.
Args:
filepath:
The filepath that should be created. Absolute inside the
uchroot container.
root:
The root PATH of the container filesystem as seen outside of
the container.
"""
from benchbuild.utils.uchroot import no_args, uretry
uchroot = no_args()
uchroot = uchroot["-E", "-A", "-C", "-w", "/", "-r"]
uchroot = uchroot[os.path.abspath(root)]
uretry(uchroot["--", "/bin/touch", filepath]) | [
"def",
"mkfile_uchroot",
"(",
"filepath",
",",
"root",
"=",
"\".\"",
")",
":",
"from",
"benchbuild",
".",
"utils",
".",
"uchroot",
"import",
"no_args",
",",
"uretry",
"uchroot",
"=",
"no_args",
"(",
")",
"uchroot",
"=",
"uchroot",
"[",
"\"-E\"",
",",
"\"-A\"",
",",
"\"-C\"",
",",
"\"-w\"",
",",
"\"/\"",
",",
"\"-r\"",
"]",
"uchroot",
"=",
"uchroot",
"[",
"os",
".",
"path",
".",
"abspath",
"(",
"root",
")",
"]",
"uretry",
"(",
"uchroot",
"[",
"\"--\"",
",",
"\"/bin/touch\"",
",",
"filepath",
"]",
")"
] | Create a file inside a uchroot env.
You will want to use this when you need to create a file with apropriate
rights inside a uchroot container with subuid/subgid handling enabled.
Args:
filepath:
The filepath that should be created. Absolute inside the
uchroot container.
root:
The root PATH of the container filesystem as seen outside of
the container. | [
"Create",
"a",
"file",
"inside",
"a",
"uchroot",
"env",
"."
] | train | https://github.com/PolyJIT/benchbuild/blob/9ad2ec54d96e97b642b1f06eddcbad9ba7aeaf58/benchbuild/utils/path.py#L62-L82 |
PolyJIT/benchbuild | benchbuild/utils/path.py | mkdir_uchroot | def mkdir_uchroot(dirpath, root="."):
"""
Create a file inside a uchroot env.
You will want to use this when you need to create a file with apropriate
rights inside a uchroot container with subuid/subgid handling enabled.
Args:
dirpath:
The dirpath that should be created. Absolute inside the
uchroot container.
root:
The root PATH of the container filesystem as seen outside of
the container.
"""
from benchbuild.utils.uchroot import no_args, uretry
uchroot = no_args()
uchroot = uchroot["-E", "-A", "-C", "-w", "/", "-r"]
uchroot = uchroot[os.path.abspath(root)]
uretry(uchroot["--", "/bin/mkdir", "-p", dirpath]) | python | def mkdir_uchroot(dirpath, root="."):
"""
Create a file inside a uchroot env.
You will want to use this when you need to create a file with apropriate
rights inside a uchroot container with subuid/subgid handling enabled.
Args:
dirpath:
The dirpath that should be created. Absolute inside the
uchroot container.
root:
The root PATH of the container filesystem as seen outside of
the container.
"""
from benchbuild.utils.uchroot import no_args, uretry
uchroot = no_args()
uchroot = uchroot["-E", "-A", "-C", "-w", "/", "-r"]
uchroot = uchroot[os.path.abspath(root)]
uretry(uchroot["--", "/bin/mkdir", "-p", dirpath]) | [
"def",
"mkdir_uchroot",
"(",
"dirpath",
",",
"root",
"=",
"\".\"",
")",
":",
"from",
"benchbuild",
".",
"utils",
".",
"uchroot",
"import",
"no_args",
",",
"uretry",
"uchroot",
"=",
"no_args",
"(",
")",
"uchroot",
"=",
"uchroot",
"[",
"\"-E\"",
",",
"\"-A\"",
",",
"\"-C\"",
",",
"\"-w\"",
",",
"\"/\"",
",",
"\"-r\"",
"]",
"uchroot",
"=",
"uchroot",
"[",
"os",
".",
"path",
".",
"abspath",
"(",
"root",
")",
"]",
"uretry",
"(",
"uchroot",
"[",
"\"--\"",
",",
"\"/bin/mkdir\"",
",",
"\"-p\"",
",",
"dirpath",
"]",
")"
] | Create a file inside a uchroot env.
You will want to use this when you need to create a file with apropriate
rights inside a uchroot container with subuid/subgid handling enabled.
Args:
dirpath:
The dirpath that should be created. Absolute inside the
uchroot container.
root:
The root PATH of the container filesystem as seen outside of
the container. | [
"Create",
"a",
"file",
"inside",
"a",
"uchroot",
"env",
"."
] | train | https://github.com/PolyJIT/benchbuild/blob/9ad2ec54d96e97b642b1f06eddcbad9ba7aeaf58/benchbuild/utils/path.py#L85-L105 |
PolyJIT/benchbuild | benchbuild/utils/path.py | mkdir_interactive | def mkdir_interactive(dirpath):
"""
Create a directory if required.
This will query the user for a confirmation.
Args:
dirname: The path to create.
"""
from benchbuild.utils.cmd import mkdir
if os.path.exists(dirpath):
return
response = ui.ask(
"The directory {dirname} does not exist yet. "
"Should I create it?".format(dirname=dirpath),
default_answer=True,
default_answer_str="yes")
if response:
mkdir("-p", dirpath)
print("Created directory {0}.".format(dirpath)) | python | def mkdir_interactive(dirpath):
"""
Create a directory if required.
This will query the user for a confirmation.
Args:
dirname: The path to create.
"""
from benchbuild.utils.cmd import mkdir
if os.path.exists(dirpath):
return
response = ui.ask(
"The directory {dirname} does not exist yet. "
"Should I create it?".format(dirname=dirpath),
default_answer=True,
default_answer_str="yes")
if response:
mkdir("-p", dirpath)
print("Created directory {0}.".format(dirpath)) | [
"def",
"mkdir_interactive",
"(",
"dirpath",
")",
":",
"from",
"benchbuild",
".",
"utils",
".",
"cmd",
"import",
"mkdir",
"if",
"os",
".",
"path",
".",
"exists",
"(",
"dirpath",
")",
":",
"return",
"response",
"=",
"ui",
".",
"ask",
"(",
"\"The directory {dirname} does not exist yet. \"",
"\"Should I create it?\"",
".",
"format",
"(",
"dirname",
"=",
"dirpath",
")",
",",
"default_answer",
"=",
"True",
",",
"default_answer_str",
"=",
"\"yes\"",
")",
"if",
"response",
":",
"mkdir",
"(",
"\"-p\"",
",",
"dirpath",
")",
"print",
"(",
"\"Created directory {0}.\"",
".",
"format",
"(",
"dirpath",
")",
")"
] | Create a directory if required.
This will query the user for a confirmation.
Args:
dirname: The path to create. | [
"Create",
"a",
"directory",
"if",
"required",
"."
] | train | https://github.com/PolyJIT/benchbuild/blob/9ad2ec54d96e97b642b1f06eddcbad9ba7aeaf58/benchbuild/utils/path.py#L108-L129 |
mckib2/rawdatarinator | rawdatarinator/readMeasDataVB15.py | get_val_by_text | def get_val_by_text(root,search):
"""From MeasYaps XML root find next sibling of node matching 'search'.
MeasYaps looks like:
<value>Key</value>
<value>Value</value>
Thus 'search' is the Key and we want to find the node that has the Value.
We return the node containing the desired Value.
Arguments:
root (Element) root XML node (xml.etree.ElementTree Element)
search (String) String to match Element.text
"""
found_flag = False
for el in root.iter():
if found_flag:
return(el)
if el.text == search:
# We want the next el
found_flag = True | python | def get_val_by_text(root,search):
"""From MeasYaps XML root find next sibling of node matching 'search'.
MeasYaps looks like:
<value>Key</value>
<value>Value</value>
Thus 'search' is the Key and we want to find the node that has the Value.
We return the node containing the desired Value.
Arguments:
root (Element) root XML node (xml.etree.ElementTree Element)
search (String) String to match Element.text
"""
found_flag = False
for el in root.iter():
if found_flag:
return(el)
if el.text == search:
# We want the next el
found_flag = True | [
"def",
"get_val_by_text",
"(",
"root",
",",
"search",
")",
":",
"found_flag",
"=",
"False",
"for",
"el",
"in",
"root",
".",
"iter",
"(",
")",
":",
"if",
"found_flag",
":",
"return",
"(",
"el",
")",
"if",
"el",
".",
"text",
"==",
"search",
":",
"# We want the next el",
"found_flag",
"=",
"True"
] | From MeasYaps XML root find next sibling of node matching 'search'.
MeasYaps looks like:
<value>Key</value>
<value>Value</value>
Thus 'search' is the Key and we want to find the node that has the Value.
We return the node containing the desired Value.
Arguments:
root (Element) root XML node (xml.etree.ElementTree Element)
search (String) String to match Element.text | [
"From",
"MeasYaps",
"XML",
"root",
"find",
"next",
"sibling",
"of",
"node",
"matching",
"search",
"."
] | train | https://github.com/mckib2/rawdatarinator/blob/03a85fd8f5e380b424027d28e97972bd7a6a3f1b/rawdatarinator/readMeasDataVB15.py#L9-L30 |
mckib2/rawdatarinator | rawdatarinator/readMeasDataVB15.py | get_yaps_by_name | def get_yaps_by_name(root,name,afun=lambda x:x,default=None):
"""From XML root, return value of node matching attribute 'name'.
Arguments:
root (Element) Root XML node (xml.etree.ElementTree Element).
This is the root of the entire XML document, not the YAPS
subtree.
name (String) name='name' attribute of ParamLong tag to be
matched.
afun Anonymous function in the form of a lambda expression to
process the string value. Defaults to the identity function.
default Default value if node is not found. Defaults to 'None'.
"""
node = root.find("ParamMap[@name='YAPS']/ParamLong[@name='%s']/value" % name)
if node is not None:
return(afun(node.text))
else:
return(default) | python | def get_yaps_by_name(root,name,afun=lambda x:x,default=None):
"""From XML root, return value of node matching attribute 'name'.
Arguments:
root (Element) Root XML node (xml.etree.ElementTree Element).
This is the root of the entire XML document, not the YAPS
subtree.
name (String) name='name' attribute of ParamLong tag to be
matched.
afun Anonymous function in the form of a lambda expression to
process the string value. Defaults to the identity function.
default Default value if node is not found. Defaults to 'None'.
"""
node = root.find("ParamMap[@name='YAPS']/ParamLong[@name='%s']/value" % name)
if node is not None:
return(afun(node.text))
else:
return(default) | [
"def",
"get_yaps_by_name",
"(",
"root",
",",
"name",
",",
"afun",
"=",
"lambda",
"x",
":",
"x",
",",
"default",
"=",
"None",
")",
":",
"node",
"=",
"root",
".",
"find",
"(",
"\"ParamMap[@name='YAPS']/ParamLong[@name='%s']/value\"",
"%",
"name",
")",
"if",
"node",
"is",
"not",
"None",
":",
"return",
"(",
"afun",
"(",
"node",
".",
"text",
")",
")",
"else",
":",
"return",
"(",
"default",
")"
] | From XML root, return value of node matching attribute 'name'.
Arguments:
root (Element) Root XML node (xml.etree.ElementTree Element).
This is the root of the entire XML document, not the YAPS
subtree.
name (String) name='name' attribute of ParamLong tag to be
matched.
afun Anonymous function in the form of a lambda expression to
process the string value. Defaults to the identity function.
default Default value if node is not found. Defaults to 'None'. | [
"From",
"XML",
"root",
"return",
"value",
"of",
"node",
"matching",
"attribute",
"name",
"."
] | train | https://github.com/mckib2/rawdatarinator/blob/03a85fd8f5e380b424027d28e97972bd7a6a3f1b/rawdatarinator/readMeasDataVB15.py#L32-L53 |
mckib2/rawdatarinator | rawdatarinator/readMeasDataVB15.py | readMeasDataVB15 | def readMeasDataVB15(filename,
resetFFTscale=False,
readOneCoil=False,
readPhaseCorInfo=False,
readNavigator=False,
readTimeStamp=True,
nNavEK=False,
removeOS=False,
removeOSafter=False,
transformToImageSpace=False,
writeToFile=False,
npz=False):
"""Read raw data from Siemens MRI scanners with IDEA VB15.
Will return an array of measured k-space data from raw data from
Siemens MRI scanners using IDEA VB15 (single value). If the option
'-I' is used, then image space data will be returned instead.
Usage:
readMeasDataVB15 filename [ -t ] [ -rfft ] [ -r1 ] [ -rp ] [ -rn ]
[ -skipts ] [ -nnavek ] [ -ros ]
[ -rosa ] [ -I ] [ -w ] [-npz]
Examples:
python3 -m rawdatarinator.readMeasDataVB15 raw.dat -w
or using the shortned alias...
python3 -m rawdatarinator.raw raw.dat -w
Command-line Options:
filename Filename of file containing raw measurements.
-rfft (resetFFTscale)
Resets FFTscale and DataCorrection for each coil
to 1.
-r1 (readOneCoil)
Read measurement data from from individual coil.
-rp (readPhaseCorInfo)
_
-rn (readNavigator)
_
-skipts (skip readTimeStamp)
_
-nnavek (nNavEK)
_
-ros (removeOS)
Flag to remove oversampling (OS) in the x
direction. removeOS=True is more efficient as it
processes each readout line independently,
reducing the required memory space to keep all
measured data.
-rosa (removeOSafter)
Flag to remove oversampling (OS) in the x
direction. This works in image space, cutting FOV.
Not likely a good idea for radial.
-I (transformToImageSpace)
Produce image space representation. Note that
there is no correction for partial Fourier or
parallel imaging k-space undersampling. The given
version of code only uses numpy's FFT operation.
-w (writeToFile)
Save k-space or image space volume. Currently the
output filename is auto generated.
-npz (npz)
Save k-space or image space volume using the .npz
file extension. Default is to use hdf5 file
standard.
-h (help)
Displays this documentation.
"""
filename_temp = os.path.splitext(filename)[0]
if transformToImageSpace is False:
filenameOut = '%s_Kspace' % filename_temp
else:
filenameOut = '%s_imageSpace' % filename_temp
# Useful Parameters
globalHeader = 32
localHeader = 128
infoParser = InfoParser()
xmlstr = infoParser.raw2xml(filename)
# Start in MeasYaps. MeasYaps starts with <value mod="MeasYaps"> and ends at
# the next XProtocol mod='Phoenix'
startIdx = xmlstr.find('<value mod="MeasYaps">')
endIdx = xmlstr.find('<XProtocol mod="Phoenix">')
my_xmlstr = '<MeasYaps>' + xmlstr[startIdx:endIdx] + '</MeasYaps>' # add root
# Parse into XML
root = ET.fromstring(my_xmlstr)
# Used to decode Partial Fourier fractions (poor man's switch)
fractionDict = { 10: 1.0,
8 : 7./8.,
4 : 0.75,
2 : 5./8.,
1 : 0.5 }
# vals are tuples: (key,search,default,lambda)
# key => key to the dictionary entry
# search => string to search the xml document with
# default => if not found in xml doc, use this value
# lambda => whatever value you end up with, operate on it with this
# anonymous function.
vals = [
('ScanDimension','sKSpace.ucDimension',None,lambda x:int(x,16)),
('flag3D',None,None,lambda _:True if data['ScanDimension'] is 4 else False),
('NxAll','sKSpace.lBaseResolution',None,lambda x:int(x)),
('NyAll','sKSpace.lPhaseEncodingLines',None,lambda x:int(x)),
('OSfactorPE','sKSpace.dPhaseOversamplingForDialog',None,lambda x:1.0 if my_xmlstr.find('sKSpace.dPhaseOversamplingForDialog') < 0 else 1.0+float(x)),
('NzAll','sKSpace.lPartitions',None,lambda x:int(x)),
('OSfactor3D','sKSpace.dSliceOversamplingForDialog',None,lambda x:1.0 if my_xmlstr.find('sKSpace.dSliceOversamplingForDialog') < 0 else 1.0+float(x)),
('phaseResolution','sKSpace.dPhaseResolution',None,lambda x:float(x)),
('sliceResolution','sKSpace.dSliceResolution',None,lambda x:float(x)),
('Nsl','sSliceArray.lSize',None,lambda x:int(x)),
('Nconc','sSliceArray.lConc',None,lambda x:int(x)),
('Nc',None,None,lambda _:len(re.findall(r'\.lRxChannelConnected',my_xmlstr))),
('Nc',None,None,lambda _:data['Nc']-1 if my_xmlstr.find('AdjustSeq%/AdjCoilSensSeq') > 0 else data['Nc']),
('nContrast','lContrasts',1,lambda x:int(x)),
('nSet','lSets',1,lambda x:int(x)),
('nAverage','lAverages',1,lambda x:int(x)),
('nRepetition','lRepetitions',None,lambda x:1 if my_xmlstr.find('lRepetitions') < 0 else 1+int(x)),
('nPhase','sPhysioImaging.lPhases',1,lambda x:int(x)),
('fractionY','sKSpace.PhasePartialFourier',10,lambda x: fractionDict[int(x)]),
('fractionZ','sKSpace.SlicePartialFourier',10,lambda x: fractionDict[int(x)]),
('phasePartialFourierForSNR','sKSpace.dSeqPhasePartialFourierForSNR',1.0,lambda x:float(x)),
('EPIFactor','sFastImaging.lEPIFactor',None,lambda x:int(x)),
('turboFactor','sFastImaging.lTurboFactor',1,lambda x:int(x)),
('PATMode','sPat.ucPATMode',None,lambda x:int(x,16)),
('PATRefScanMode','sPat.ucRefScanMode',None,lambda x:int(x,16)),
('AccelFactorPE','sPat.lAccelFactPE',None,lambda x:int(x)),
('AccelFactor3D','sPat.lAccelFact3D',None,lambda x:int(x)),
('nRefLinesPE','sPat.lRefLinesPE',None,lambda x:0 if data['AccelFactorPE'] is 1 else int(x)),
('nRefLines3D','sPat.lRefLines3D',None,lambda x:0 if data['AccelFactor3D'] is 1 else int(x)) ]
# Evaluate all tuples
data = dict() # dictionary to store all values we want to compre with MATLAB
for tup in vals:
if tup[1] is not None:
idx = my_xmlstr.find(tup[1])
else:
idx = -1
if idx < 0:
val = tup[2] # Take the default if we can't find it
else:
val = get_val_by_text(root,tup[1]).text
afun = tup[3]
if afun is not None:
val = afun(val) # Evaluate anonymous function if provided
data[tup[0]] = val # Store the value in the dictionary
## Now use the whole xml document
root = ET.fromstring(xmlstr)
# Enforce a max value for Nc
Nct = get_yaps_by_name(root,'iMaxNoOfRxChannels',lambda x:int(x))
if Nct is not None:
data['Nct'] = Nct
if Nct < data['Nc']:
data['Nc'] = Nct
# If this exists, then we'll need it
nPhCorrScan = get_yaps_by_name(root,'lNoOfPhaseCorrScans',lambda x:int(x))
if nPhCorrScan is not None:
data['nPhCorrScan'] = nPhCorrScan
# Define some more variables
if data['turboFactor'] > 1:
data['nPhCorEcho'] = 1
data['nPhCorScan'] = 1
if data['EPIFactor'] > 1:
data['nPhCorScan'] = 1
data['nPhCorEcho'] = 3
if data['AccelFactorPE'] is 1:
data['FirstFourierLine'] = 1
data['FirstRefLine'] = 1
else:
data['FirstFourierLine'] = get_yaps_by_name(root,'lFirstFourierLine',lambda x:1+int(x),1)
data['FirstRefLine'] = get_yaps_by_name(root,'lFirstRefLine',lambda x:1+int(x),1)
if data['AccelFactor3D'] is 1:
data['FirstFourierPar'] = 1
data['FirstRefPartition'] = 1
else:
data['FirstFourierPartition'] = get_yaps_by_name(root,'lFirstFourierPartition',lambda x:1+int(x),1)
data['FirstRefPartition'] = get_yaps_by_name(root,'lFirstRefPartition',lambda x:1+int(x),1)
data['NxOS'] = get_yaps_by_name(root,'iNoOfFourierColumns',lambda x:int(x))
data['OSfactorRO'] = 2 # we can actually find this in YAPS, but MATLAB impl gives as magic num
data['Nx'] = int(np.around(data['NxOS']/data['OSfactorRO']))
data['Ny'] = int(root.find("ParamFunctor[@name='FillMiniHeaderData']/ParamLong[@name='NoOfFourierLines']/value").text)
data['Nz'] = get_yaps_by_name(root,'iNoOfFourierPartitions',lambda x:int(x))
data['NxRecon'] = get_yaps_by_name(root,'iRoFTLength',lambda x:int(x))
data['NyRecon'] = get_yaps_by_name(root,'iPEFTLength',lambda x:int(x))
data['NzRecon'] = 1 if data['Nz'] is 1 else get_yaps_by_name(root,'i3DFTLength',lambda x:int(x))
data['NslicePerConcat'] = get_yaps_by_name(root,'ushSlicePerConcat',lambda x:int(x))
## Partial Fourier Mode and Parameters
data['PCAlgorithm'] = get_yaps_by_name(root,'lPCAlgorithm',lambda x:int(x))
data['NoOfPhaseCorrColumns'] = get_yaps_by_name(root,'lNoOfPhaseCorrColumns',lambda x:int(x),data['Nx']/2)
data['NoOfPhaseCorrLines'] = get_yaps_by_name(root,'lNoOfPhaseCorrLines',lambda x:int(x),data['NyAll']/2)
data['NoOfPhaseCorrPartitions'] = get_yaps_by_name(root,'lNoOfPhaseCorrPartitions',lambda x:int(x),data['NzAll']/2)
data['ColSlopeLength'] = get_yaps_by_name(root,'lColSlopeLength',lambda x:int(x),data['Nx']/4)
# I think there's a mistake here in the MATLAB code: ColSlopeLength defined again instead of LinSlopeLength.
# Commented is it how I think it should be and we'll go from there. The value is not actually used.
# data['LinSlopeLength'] = get_yaps_by_name(root,'lLinSlopeLength',lambda x:int(x),data['NyAll']/4)
data['ColSlopeLength'] = get_yaps_by_name(root,'lLinSlopeLength',lambda x:int(x),data['NyAll']/4)
data['ParSlopeLength'] = get_yaps_by_name(root,'lParSlopeLength',lambda x:int(x),data['NzAll']/4)
## Raw data correction factors, use the MeasYaps portion of the xml document
root = ET.fromstring(my_xmlstr)
data['CorrFactor'] = np.ones(data['Nc'])
for c in range(data['Nc']):
text = 'axRawDataCorrectionFactor[0][%d].dRe' % c
data['CorrFactor'][c] = 1 if my_xmlstr.find(text) < 0 else float(get_val_by_text(root,text).text)
text = 'axRawDataCorrectionFactor[0][%d].dIm' % c
if my_xmlstr.find(text) >= 0:
data['CorrFactor'][c] = data['CorrFactor'][c] + 1j*float(get_val_by_text(root,text).text)
## FFT Correction Factors
data['FFTCorrFactor'] = np.ones(data['Nc'])
if resetFFTscale is False:
data['FFTCorrFactor'] = np.ones(data['Nc'])
for c in range(data['Nc']):
text = 'asCoilSelectMeas[0].aFFT_SCALE[%d].flFactor' % c
data['FFTCorrFactor'][c] = float(get_val_by_text(root,text).text)
## For PC Angio
data['Nset'] = 1
text = 'sAngio.ucPCFlowMode'
data['PCMRAFlag'] = int(get_val_by_text(root,text).text,16) if my_xmlstr.find(text) > 0 else 0
if data['PCMRAFlag'] is 1:
text = 'sAngio.sFlowArray.lSize'
if my_xmlstr.find(text) < 0:
data['Nset'] = int(get_val_by_text(root,text).text)
## Recalculation of partial Fourier factors and EPI/turbo factor for EPI and TSE
data['fractionPE'] = float(data['Ny'])/float(data['NyAll'])
data['fraction3D'] = float(data['Nz'])/float(data['NzAll'])
data['EPIFactor'] = np.around(data['fractionPE']*data['EPIFactor'])
data['nEPITrain'] = data['Ny']/data['EPIFactor']
data['turboFactor'] = np.around(data['fractionPE']*data['turboFactor'])
data['nTSETrain'] = data['Ny']/data['turboFactor']
data['Nc0'] = data['Nc']
data['Nc'] = 1 if readOneCoil is True else data['Nc0']
## Calculation of the number of valid k-space readouts and k-space data matrix dimensions
if data['PATMode'] is 1:
data['nReadout'] = (
data['nAverage']* \
data['nPhase']* \
data['nRepetition']* \
data['nContrast']* \
data['Nsl']* \
data['Nz']* \
data['Nc']* \
data['Ny'])
elif (data['PATMode'] is 2) and (data['PATRefScanMode'] is 2):
if (data['Ny'] % 2) is 1:
data['NyPAT'] = (data['Ny'] - 1 + data['nRefLinesPE']*(data['AccelFactorPE'] - 1))/data['AccelFactorPE']
else:
data['NyPAT'] = np.floor((data['Ny'] + data['nRefLinesPE']*(data['AccelFactorPE'] - 1))/data['AccelFactorPE'])
data['nReadout'] = (
data['nAverage']* \
data['nPhase']* \
data['nRepetition']* \
data['nContrast']* \
data['Nsl']* \
data['Nz']* \
data['Nc']* \
data['NyPAT'])
if removeOS is True:
data['kSpace'] = np.zeros((
data['nAverage'],
data['nPhase'],
data['nRepetition'],
data['nContrast'],
data['Nsl'],
data['Nc'],
data['Nz'],
data['Nx'],
data['Ny']), dtype=np.complex64)
else:
data['kSpace'] = np.zeros((
data['nAverage'],
data['nPhase'],
data['nRepetition'],
data['nContrast'],
data['Nsl'],
data['Nc'],
data['Nz'],
data['NxOS'],
data['Ny']), dtype=np.complex64)
if (readPhaseCorInfo is True) and (data['nPhCorScan'] > 0):
data['kPhaseCor'] = np.zeros((
data['nPhCorScan'],
data['nPhCorEcho'],
data['Nsl'],
data['nRepetition'],
data['Nc'],
data['NxOS']), dtype=np.float32)
if readNavigator is True:
data['nNavigator'] = (
data['nAverage']* \
data['nPhase']* \
data['nRepetition']* \
data['nContrast']* \
data['Nsl']* \
data['Nz']* \
data['Nc']* \
data['nEPITrain'] \
*data['nNavEK'])
data['kNavigator'] = np.zeros((
data['nAverage'],
data['nPhase'],
data['nRepetition'],
data['nContrast']*nNavEK,
data['Nsl'],
data['Nc'],
data['Nz'],
data['nEPITrain'],
data['NxOS']), dtype=np.float32)
if readTimeStamp is True:
data['nTimeStamp'] = (
data['nAverage']* \
data['nPhase']* \
data['nRepetition']* \
data['nContrast']* \
data['Nz'])
data['timeStamp'] = np.zeros((
data['nAverage'],
data['nPhase'],
data['nRepetition'],
data['nContrast']*nNavEK,
data['Nz']), dtype=np.float32)
## Data Readout and Reordering
# Read k-space data
data['noiseMeasCounter'] = 0
data['noiseMeas'] = np.zeros((data['NxOS'],data['Nc']),dtype=np.complex64)
data['navigatorPrep'] = 0
data['LineN'] = -1
data['Ndr'] = 1
for r in range(data['Ndr']):
data['xCoil'] = r
with open(filename,'rb') as f:
readFlag = True
skipField = 0
countNavigator = 0
navigatorDataON = False
temp1 = np.zeros(data['NxOS'])
data['dataField'] = np.fromfile(f,dtype=np.int32,count=1)[0]
f.seek(data['dataField'],os.SEEK_SET)
while readFlag is True:
if readTimeStamp is True:
f.seek(12,os.SEEK_CUR)
data['timeS'] = np.fromfile(f,dtype=np.uint32,count=1)[0]
f.seek(4,os.SEEK_CUR)
else:
f.seek(20,os.SEEK_CUR)
data['evalMask1'] = np.fromfile(f,dtype=np.uint32,count=1)[0]
data['evalMask2'] = np.fromfile(f,dtype=np.uint32,count=1)[0]
flag = [(32 - m.start()) for m in re.finditer('1',np.binary_repr(data['evalMask1'],32))]
# Tuples: (key,dtype,afun)
vals = [ ('Nxr',None,None),
('Ncr',None,None),
('Line',None,None),
('Acquisition',None,None),
('Slice',None,None),
('Partition',None,None),
('Echo',None,None),
('Phase',None,None),
('Repetition',None,None),
('Set',None,None),
('CutOffDataPre',None,lambda:f.seek(12,os.SEEK_CUR)),
('CutOffDataPost',None,None),
('KSpaceCentreColumn',None,None),
('CoilMode',None,None),
('ReadOutOffCentre',np.float32,None),
('KSpaceCentreLineNo',None,lambda:f.seek(4,os.SEEK_CUR)),
('KSpaceCentrePartitionNo',None,None),
('Channel',None,lambda:f.seek(44,os.SEEK_CUR)) ]
for tup in vals:
t = np.uint16 if tup[1] is None else tup[1]
if hasattr(tup[2], '__call__'):
tup[2]()
data[tup[0]] = np.fromfile(f,dtype=t,count=1)[0]
f.seek(2,os.SEEK_CUR)
if 1 in flag:
break
if any([k for k in [2,22,26] if k in flag]):
if (22 in flag) and (readPhaseCorInfo is False):
skipField = data['nPhCorScan']*data['nPhCorEcho']*data['Ncr']*(localHeader + 8*data['Nxr']) - localHeader
if (22 in flag) and (readPhaseCorInfo is True):
skipField = -localHeader
f.seek(skipField,os.SEEK_CUR)
skipField = 0
for m in range(data['nPhCorScan']*data['nPhCorEcho']*data['Ncr']):
infoMDH_TimeStamp = readMDH_TimeStamp_VB13(f)
temp = np.fromfile(f,dtype=np.float32,count=2*data['Nxr'])
if 25 in flag:
temp[0::2] = np.flipud(temp[0::2])
temp[1::2] = np.flipud(temp[1::2])
if data['CutOffDataPre'] > 0:
temp[0:2*data['CutOffDataPre']] = 0
if data['CutOffDataPost'] > 0:
temp[len(temp) - 2*data['CutOffDataPost']:] = 0
data['kPhaseCor'][data['Echo'],np.ceil(m/data['Ncr']),data['Slice'],data['Repetition'],data['Channel'],:] = (temp[0::2] + 1j*temp[1::2]).astype(np.float32)
if (2 in flag) and (readNavigator is False):
skipField = data['Ncr']*(localHeader + 8*data['Nxr']) - localHeader
if (2 in flag) and (readNavigator is True):
if (countNavigator is False) and (navigatorPrep is False):
kNavigator = np.zeros((
data['Nxr'],
data['Ncr'],
data['nContrast']*nNavEK,
data['nEPITrain'],
data['Nz'],
data['Nsl'],
data['nAverage'],
data['nPhase'],
data['nRepetition']),dtype=np.float32)
kNavigatorTemp = np.zeros((
data['Nxr'],
data['Ncr'],
data['nContrast']*nNavEK),dtype=float32)
navigatorPrep = 1
skipField = -localHeader
f.seek(skipField,os.SEEK_CUR)
skipField = 0
for m in range(nNavEK*data['Ncr']):
infoMDH_TimeStamp = readMDH_TimeStamp_VB13(f)
temp = np.fromfile(f,dtype=np.float32,count=2*data['Nxr'])
if 25 in flag:
temp[0::2] = np.flipud(temp[0::2])
temp[1::2] = np.flipud(temp[1::2])
if data['CutOffDataPre'] > 0:
temp[0:2*data['CutOffDataPre']] = 0
if data['CutOffDataPost'] > 0:
temp[len(temp) - 2*data['CutOffDataPost']-1:] = 0;
kNavigatorTemp[:,data['Channel'],data['Echo']] = (temp[0::2] + 1j*temp[1::2]).astype(np.complex64)
navigatorDataON = True
if 26 in flag:
temp = np.fromfile(f,dtype=np.float32,count=2*data['Nxr'])
if 25 in flag:
temp[0::2] = np.flipud(temp[0::2])
temp[1::2] = np.flipud(temp[1::2])
data['noiseMeas'][:,data['Channel']] = temp[0::2] + 1j*temp[1::2]
skipField = 0
f.seek(skipField,os.SEEK_CUR)
else:
temp = np.fromfile(f,dtype=np.float32,count=2*data['Nxr'])
if 25 in flag:
temp[0::2] = np.flipud(temp[0::2])
temp[1::2] = np.flipud(temp[1::2])
if data['CutOffDataPre'] > 0:
temp[0:2*data['CutOffDataPre']-1] = 0
if data['CutOffDataPost'] > 0:
temp[len(temp) - 2*data['CutOffDataPost']-1] = 0
if 11 in flag:
temp = data['CorrFactor'][data['Channel']]*temp
temp = data['FFTCorrFactor'][data['Channel']]*temp
if readOneCoil is False:
if removeOS is True:
temp1[len(temp1) - data['Nxr']:] = temp[0::2] + 1j*temp[1::2]
tempX = np.fftshift(np.fft(np.fftshift(temp1)))
tempK = np.fftshift(np.ifftshift(np.fftshift(tempX[np.around((data['NxOS'] - data['Nx'])/2):data['Nx'] + np.around((data['NxOS'] - data['Nx'])/2)])))
data['kSpace'][data['Acquisition'],data['Phase'],data['Repetition'],data['Echo'],data['Slice'],data['Channel'],data['Partition'],:,data['Line']] = tempK.astype(np.complex64)
else:
data['kSpace'][data['Acquisition'],data['Phase'],data['Repetition'],data['Echo'],data['Slice'],data['Channel'],data['Partition'],data['kSpace'].shape[7] - data['Nxr']:data['kSpace'].shape[7],data['Line']] = (temp[0::2] + 1j*temp[1::2]).astype(np.complex64)
elif (readOneCoil is True) and (data['Channel']+1 == coilIndex):
if removeOS is True:
temp1[len(temp1) - data['Nxr']:] = temp[0::2] + 1j*temp[1::2]
tempx = np.fftshift(np.fft(np.fftshift(temp1)))
tempK = np.fftshift(np.fft(np.fftshift(tempX[np.around((data['NxOS'] - data['Nx'])/2):data['Nx'] + np.around((data['NxOS'] - data['Nx'])/2)])))
data['kSpace'][data['Acquisition'],data['Phase'],data['Repetition'],data['Echo'],data['Slice'],0,data['Partition'],:,data['Line']] = tempK.astype(np.complex64)
else:
data['kSpace'][data['Acquisition'],data['Phase'],data['Repetition'],data['Echo'],data['Slice'],0,data['Partition'],data['kSpace'].shape[7] - data['Nxr']:,data['Line']] = (temp[0::2] + 1j*temp[1::2]).astype(np.complex64)
if (readTimeStamp is True) and (data['Channel'] == 0) and (navigatorDataON is True):
data['EPITrain'] = countNavigator % data['nEPITrain']
data['timeStamp'][data['Echo'],data['EPITrain'],data['Partition'],data['Slice'],data['Acquisition'],data['Phase'],data['Repetition']] = (0.0025*timeS).astype(np.complex64)
if (readNavigator is True) and (data['Channel'] == 0) and (navigatorDataON is True):
data['EPITrain'] = countNavigator % data['nEPITrain']
kNavigator[:,:,:,data['EPITrain'],data['Partition'],data['Slice'],data['Acquisition'],data['Phase'],data[Repetition]] = kNavigatorTemp.astype(np.complex64)
navigatorDataON = False
countNavigator += 1
if 1 in flag:
break
data['kSpace'] = np.squeeze(data['kSpace'])
if len(data['kSpace'].shape) == 3:
data['kSpace'] = np.transpose(data['kSpace'],[1,2,0])
elif len(data['kSpace'].shape) == 4:
if data['flag3D'] is False:
data['kSpace'] = np.transpose(data['kSpace'],[2,3,0,1])
else:
data['kSpace'] = np.transpose(data['kSpace'],[2,3,1,0])
elif len(data['kSpace'].shape) == 5:
data['kSpace'] = np.transpose(data['kSpace'],[3,4,2,0,1])
if transformToImageSpace is True:
if data['flag3D'] is True:
data['imSpace'] = np.fft.ifftshift(np.fft.ifftn(np.fft.fftshift(data['kSpace'],axes=(0,1,2)),axes=(0,1,2)),axes=(0,1,2))
data['imSpace'][2] *= data['Nz']
else:
data['imSpace'] = np.fft.ifftshift(np.fft.ifft2(np.fft.fftshift(data['kSpace'],axes=(0,1)),axes=(0,1)),axes=(0,1))
data['imSpace'][0] *= data['NxOS']
data['imSpace'][1] *= data['Ny']
if (removeOSafter is True) and (removeOS is False):
if len(data['imSpace'].shape) == 2:
data['imSpace'][0:data['NxOS']/4,:] = []
data['imSpace'][data['imSpace'].shape[0] - data['NxOS']/4 :,:] = []
elif len(data['imSpace'].shape) == 3:
data['imSpace'][0:data['NxOS']/4,:,:] = []
data['imSpace'][data['imSpace'].shape[0] - data['NxOS']/4:,:,:] = []
elif len(data['imSpace'].shape) == 4:
data['imSpace'][0:data['NxOS']/4,:,:,:] = []
data['imSpace'][data['imSpace'].shape[0] - data['NxOS']/4:,:,:,:] = []
elif len(data['imSpace'].shape) == 5:
data['imSpace'][0:data['NxOS']/4,:,:,:,:] = []
data['imSpace'][data['imSpace'].shape[0] - data['NxOS']/4:,:,:,:,:] = []
if writeToFile is True:
if transformToImageSpace is True:
if npz:
np.savez_compressed(filenameOut,imSpace=data['imSpace'],timeStamp=data['timeStamp'])
else:
with h5py.File('%s.hdf5' % filenameOut,'w') as f:
dset = f.create_dataset('kSpace',data=data['imSpace'])
else:
if npz:
np.savez_compressed(filenameOut,kSpace=data['kSpace'],timeStamp=data['timeStamp'])
else:
with h5py.File('%s.hdf5' % filenameOut,'w') as f:
dset = f.create_dataset('kSpace',data=data['kSpace'])
return(data) | python | def readMeasDataVB15(filename,
resetFFTscale=False,
readOneCoil=False,
readPhaseCorInfo=False,
readNavigator=False,
readTimeStamp=True,
nNavEK=False,
removeOS=False,
removeOSafter=False,
transformToImageSpace=False,
writeToFile=False,
npz=False):
"""Read raw data from Siemens MRI scanners with IDEA VB15.
Will return an array of measured k-space data from raw data from
Siemens MRI scanners using IDEA VB15 (single value). If the option
'-I' is used, then image space data will be returned instead.
Usage:
readMeasDataVB15 filename [ -t ] [ -rfft ] [ -r1 ] [ -rp ] [ -rn ]
[ -skipts ] [ -nnavek ] [ -ros ]
[ -rosa ] [ -I ] [ -w ] [-npz]
Examples:
python3 -m rawdatarinator.readMeasDataVB15 raw.dat -w
or using the shortned alias...
python3 -m rawdatarinator.raw raw.dat -w
Command-line Options:
filename Filename of file containing raw measurements.
-rfft (resetFFTscale)
Resets FFTscale and DataCorrection for each coil
to 1.
-r1 (readOneCoil)
Read measurement data from from individual coil.
-rp (readPhaseCorInfo)
_
-rn (readNavigator)
_
-skipts (skip readTimeStamp)
_
-nnavek (nNavEK)
_
-ros (removeOS)
Flag to remove oversampling (OS) in the x
direction. removeOS=True is more efficient as it
processes each readout line independently,
reducing the required memory space to keep all
measured data.
-rosa (removeOSafter)
Flag to remove oversampling (OS) in the x
direction. This works in image space, cutting FOV.
Not likely a good idea for radial.
-I (transformToImageSpace)
Produce image space representation. Note that
there is no correction for partial Fourier or
parallel imaging k-space undersampling. The given
version of code only uses numpy's FFT operation.
-w (writeToFile)
Save k-space or image space volume. Currently the
output filename is auto generated.
-npz (npz)
Save k-space or image space volume using the .npz
file extension. Default is to use hdf5 file
standard.
-h (help)
Displays this documentation.
"""
filename_temp = os.path.splitext(filename)[0]
if transformToImageSpace is False:
filenameOut = '%s_Kspace' % filename_temp
else:
filenameOut = '%s_imageSpace' % filename_temp
# Useful Parameters
globalHeader = 32
localHeader = 128
infoParser = InfoParser()
xmlstr = infoParser.raw2xml(filename)
# Start in MeasYaps. MeasYaps starts with <value mod="MeasYaps"> and ends at
# the next XProtocol mod='Phoenix'
startIdx = xmlstr.find('<value mod="MeasYaps">')
endIdx = xmlstr.find('<XProtocol mod="Phoenix">')
my_xmlstr = '<MeasYaps>' + xmlstr[startIdx:endIdx] + '</MeasYaps>' # add root
# Parse into XML
root = ET.fromstring(my_xmlstr)
# Used to decode Partial Fourier fractions (poor man's switch)
fractionDict = { 10: 1.0,
8 : 7./8.,
4 : 0.75,
2 : 5./8.,
1 : 0.5 }
# vals are tuples: (key,search,default,lambda)
# key => key to the dictionary entry
# search => string to search the xml document with
# default => if not found in xml doc, use this value
# lambda => whatever value you end up with, operate on it with this
# anonymous function.
vals = [
('ScanDimension','sKSpace.ucDimension',None,lambda x:int(x,16)),
('flag3D',None,None,lambda _:True if data['ScanDimension'] is 4 else False),
('NxAll','sKSpace.lBaseResolution',None,lambda x:int(x)),
('NyAll','sKSpace.lPhaseEncodingLines',None,lambda x:int(x)),
('OSfactorPE','sKSpace.dPhaseOversamplingForDialog',None,lambda x:1.0 if my_xmlstr.find('sKSpace.dPhaseOversamplingForDialog') < 0 else 1.0+float(x)),
('NzAll','sKSpace.lPartitions',None,lambda x:int(x)),
('OSfactor3D','sKSpace.dSliceOversamplingForDialog',None,lambda x:1.0 if my_xmlstr.find('sKSpace.dSliceOversamplingForDialog') < 0 else 1.0+float(x)),
('phaseResolution','sKSpace.dPhaseResolution',None,lambda x:float(x)),
('sliceResolution','sKSpace.dSliceResolution',None,lambda x:float(x)),
('Nsl','sSliceArray.lSize',None,lambda x:int(x)),
('Nconc','sSliceArray.lConc',None,lambda x:int(x)),
('Nc',None,None,lambda _:len(re.findall(r'\.lRxChannelConnected',my_xmlstr))),
('Nc',None,None,lambda _:data['Nc']-1 if my_xmlstr.find('AdjustSeq%/AdjCoilSensSeq') > 0 else data['Nc']),
('nContrast','lContrasts',1,lambda x:int(x)),
('nSet','lSets',1,lambda x:int(x)),
('nAverage','lAverages',1,lambda x:int(x)),
('nRepetition','lRepetitions',None,lambda x:1 if my_xmlstr.find('lRepetitions') < 0 else 1+int(x)),
('nPhase','sPhysioImaging.lPhases',1,lambda x:int(x)),
('fractionY','sKSpace.PhasePartialFourier',10,lambda x: fractionDict[int(x)]),
('fractionZ','sKSpace.SlicePartialFourier',10,lambda x: fractionDict[int(x)]),
('phasePartialFourierForSNR','sKSpace.dSeqPhasePartialFourierForSNR',1.0,lambda x:float(x)),
('EPIFactor','sFastImaging.lEPIFactor',None,lambda x:int(x)),
('turboFactor','sFastImaging.lTurboFactor',1,lambda x:int(x)),
('PATMode','sPat.ucPATMode',None,lambda x:int(x,16)),
('PATRefScanMode','sPat.ucRefScanMode',None,lambda x:int(x,16)),
('AccelFactorPE','sPat.lAccelFactPE',None,lambda x:int(x)),
('AccelFactor3D','sPat.lAccelFact3D',None,lambda x:int(x)),
('nRefLinesPE','sPat.lRefLinesPE',None,lambda x:0 if data['AccelFactorPE'] is 1 else int(x)),
('nRefLines3D','sPat.lRefLines3D',None,lambda x:0 if data['AccelFactor3D'] is 1 else int(x)) ]
# Evaluate all tuples
data = dict() # dictionary to store all values we want to compre with MATLAB
for tup in vals:
if tup[1] is not None:
idx = my_xmlstr.find(tup[1])
else:
idx = -1
if idx < 0:
val = tup[2] # Take the default if we can't find it
else:
val = get_val_by_text(root,tup[1]).text
afun = tup[3]
if afun is not None:
val = afun(val) # Evaluate anonymous function if provided
data[tup[0]] = val # Store the value in the dictionary
## Now use the whole xml document
root = ET.fromstring(xmlstr)
# Enforce a max value for Nc
Nct = get_yaps_by_name(root,'iMaxNoOfRxChannels',lambda x:int(x))
if Nct is not None:
data['Nct'] = Nct
if Nct < data['Nc']:
data['Nc'] = Nct
# If this exists, then we'll need it
nPhCorrScan = get_yaps_by_name(root,'lNoOfPhaseCorrScans',lambda x:int(x))
if nPhCorrScan is not None:
data['nPhCorrScan'] = nPhCorrScan
# Define some more variables
if data['turboFactor'] > 1:
data['nPhCorEcho'] = 1
data['nPhCorScan'] = 1
if data['EPIFactor'] > 1:
data['nPhCorScan'] = 1
data['nPhCorEcho'] = 3
if data['AccelFactorPE'] is 1:
data['FirstFourierLine'] = 1
data['FirstRefLine'] = 1
else:
data['FirstFourierLine'] = get_yaps_by_name(root,'lFirstFourierLine',lambda x:1+int(x),1)
data['FirstRefLine'] = get_yaps_by_name(root,'lFirstRefLine',lambda x:1+int(x),1)
if data['AccelFactor3D'] is 1:
data['FirstFourierPar'] = 1
data['FirstRefPartition'] = 1
else:
data['FirstFourierPartition'] = get_yaps_by_name(root,'lFirstFourierPartition',lambda x:1+int(x),1)
data['FirstRefPartition'] = get_yaps_by_name(root,'lFirstRefPartition',lambda x:1+int(x),1)
data['NxOS'] = get_yaps_by_name(root,'iNoOfFourierColumns',lambda x:int(x))
data['OSfactorRO'] = 2 # we can actually find this in YAPS, but MATLAB impl gives as magic num
data['Nx'] = int(np.around(data['NxOS']/data['OSfactorRO']))
data['Ny'] = int(root.find("ParamFunctor[@name='FillMiniHeaderData']/ParamLong[@name='NoOfFourierLines']/value").text)
data['Nz'] = get_yaps_by_name(root,'iNoOfFourierPartitions',lambda x:int(x))
data['NxRecon'] = get_yaps_by_name(root,'iRoFTLength',lambda x:int(x))
data['NyRecon'] = get_yaps_by_name(root,'iPEFTLength',lambda x:int(x))
data['NzRecon'] = 1 if data['Nz'] is 1 else get_yaps_by_name(root,'i3DFTLength',lambda x:int(x))
data['NslicePerConcat'] = get_yaps_by_name(root,'ushSlicePerConcat',lambda x:int(x))
## Partial Fourier Mode and Parameters
data['PCAlgorithm'] = get_yaps_by_name(root,'lPCAlgorithm',lambda x:int(x))
data['NoOfPhaseCorrColumns'] = get_yaps_by_name(root,'lNoOfPhaseCorrColumns',lambda x:int(x),data['Nx']/2)
data['NoOfPhaseCorrLines'] = get_yaps_by_name(root,'lNoOfPhaseCorrLines',lambda x:int(x),data['NyAll']/2)
data['NoOfPhaseCorrPartitions'] = get_yaps_by_name(root,'lNoOfPhaseCorrPartitions',lambda x:int(x),data['NzAll']/2)
data['ColSlopeLength'] = get_yaps_by_name(root,'lColSlopeLength',lambda x:int(x),data['Nx']/4)
# I think there's a mistake here in the MATLAB code: ColSlopeLength defined again instead of LinSlopeLength.
# Commented is it how I think it should be and we'll go from there. The value is not actually used.
# data['LinSlopeLength'] = get_yaps_by_name(root,'lLinSlopeLength',lambda x:int(x),data['NyAll']/4)
data['ColSlopeLength'] = get_yaps_by_name(root,'lLinSlopeLength',lambda x:int(x),data['NyAll']/4)
data['ParSlopeLength'] = get_yaps_by_name(root,'lParSlopeLength',lambda x:int(x),data['NzAll']/4)
## Raw data correction factors, use the MeasYaps portion of the xml document
root = ET.fromstring(my_xmlstr)
data['CorrFactor'] = np.ones(data['Nc'])
for c in range(data['Nc']):
text = 'axRawDataCorrectionFactor[0][%d].dRe' % c
data['CorrFactor'][c] = 1 if my_xmlstr.find(text) < 0 else float(get_val_by_text(root,text).text)
text = 'axRawDataCorrectionFactor[0][%d].dIm' % c
if my_xmlstr.find(text) >= 0:
data['CorrFactor'][c] = data['CorrFactor'][c] + 1j*float(get_val_by_text(root,text).text)
## FFT Correction Factors
data['FFTCorrFactor'] = np.ones(data['Nc'])
if resetFFTscale is False:
data['FFTCorrFactor'] = np.ones(data['Nc'])
for c in range(data['Nc']):
text = 'asCoilSelectMeas[0].aFFT_SCALE[%d].flFactor' % c
data['FFTCorrFactor'][c] = float(get_val_by_text(root,text).text)
## For PC Angio
data['Nset'] = 1
text = 'sAngio.ucPCFlowMode'
data['PCMRAFlag'] = int(get_val_by_text(root,text).text,16) if my_xmlstr.find(text) > 0 else 0
if data['PCMRAFlag'] is 1:
text = 'sAngio.sFlowArray.lSize'
if my_xmlstr.find(text) < 0:
data['Nset'] = int(get_val_by_text(root,text).text)
## Recalculation of partial Fourier factors and EPI/turbo factor for EPI and TSE
data['fractionPE'] = float(data['Ny'])/float(data['NyAll'])
data['fraction3D'] = float(data['Nz'])/float(data['NzAll'])
data['EPIFactor'] = np.around(data['fractionPE']*data['EPIFactor'])
data['nEPITrain'] = data['Ny']/data['EPIFactor']
data['turboFactor'] = np.around(data['fractionPE']*data['turboFactor'])
data['nTSETrain'] = data['Ny']/data['turboFactor']
data['Nc0'] = data['Nc']
data['Nc'] = 1 if readOneCoil is True else data['Nc0']
## Calculation of the number of valid k-space readouts and k-space data matrix dimensions
if data['PATMode'] is 1:
data['nReadout'] = (
data['nAverage']* \
data['nPhase']* \
data['nRepetition']* \
data['nContrast']* \
data['Nsl']* \
data['Nz']* \
data['Nc']* \
data['Ny'])
elif (data['PATMode'] is 2) and (data['PATRefScanMode'] is 2):
if (data['Ny'] % 2) is 1:
data['NyPAT'] = (data['Ny'] - 1 + data['nRefLinesPE']*(data['AccelFactorPE'] - 1))/data['AccelFactorPE']
else:
data['NyPAT'] = np.floor((data['Ny'] + data['nRefLinesPE']*(data['AccelFactorPE'] - 1))/data['AccelFactorPE'])
data['nReadout'] = (
data['nAverage']* \
data['nPhase']* \
data['nRepetition']* \
data['nContrast']* \
data['Nsl']* \
data['Nz']* \
data['Nc']* \
data['NyPAT'])
if removeOS is True:
data['kSpace'] = np.zeros((
data['nAverage'],
data['nPhase'],
data['nRepetition'],
data['nContrast'],
data['Nsl'],
data['Nc'],
data['Nz'],
data['Nx'],
data['Ny']), dtype=np.complex64)
else:
data['kSpace'] = np.zeros((
data['nAverage'],
data['nPhase'],
data['nRepetition'],
data['nContrast'],
data['Nsl'],
data['Nc'],
data['Nz'],
data['NxOS'],
data['Ny']), dtype=np.complex64)
if (readPhaseCorInfo is True) and (data['nPhCorScan'] > 0):
data['kPhaseCor'] = np.zeros((
data['nPhCorScan'],
data['nPhCorEcho'],
data['Nsl'],
data['nRepetition'],
data['Nc'],
data['NxOS']), dtype=np.float32)
if readNavigator is True:
data['nNavigator'] = (
data['nAverage']* \
data['nPhase']* \
data['nRepetition']* \
data['nContrast']* \
data['Nsl']* \
data['Nz']* \
data['Nc']* \
data['nEPITrain'] \
*data['nNavEK'])
data['kNavigator'] = np.zeros((
data['nAverage'],
data['nPhase'],
data['nRepetition'],
data['nContrast']*nNavEK,
data['Nsl'],
data['Nc'],
data['Nz'],
data['nEPITrain'],
data['NxOS']), dtype=np.float32)
if readTimeStamp is True:
data['nTimeStamp'] = (
data['nAverage']* \
data['nPhase']* \
data['nRepetition']* \
data['nContrast']* \
data['Nz'])
data['timeStamp'] = np.zeros((
data['nAverage'],
data['nPhase'],
data['nRepetition'],
data['nContrast']*nNavEK,
data['Nz']), dtype=np.float32)
## Data Readout and Reordering
# Read k-space data
data['noiseMeasCounter'] = 0
data['noiseMeas'] = np.zeros((data['NxOS'],data['Nc']),dtype=np.complex64)
data['navigatorPrep'] = 0
data['LineN'] = -1
data['Ndr'] = 1
for r in range(data['Ndr']):
data['xCoil'] = r
with open(filename,'rb') as f:
readFlag = True
skipField = 0
countNavigator = 0
navigatorDataON = False
temp1 = np.zeros(data['NxOS'])
data['dataField'] = np.fromfile(f,dtype=np.int32,count=1)[0]
f.seek(data['dataField'],os.SEEK_SET)
while readFlag is True:
if readTimeStamp is True:
f.seek(12,os.SEEK_CUR)
data['timeS'] = np.fromfile(f,dtype=np.uint32,count=1)[0]
f.seek(4,os.SEEK_CUR)
else:
f.seek(20,os.SEEK_CUR)
data['evalMask1'] = np.fromfile(f,dtype=np.uint32,count=1)[0]
data['evalMask2'] = np.fromfile(f,dtype=np.uint32,count=1)[0]
flag = [(32 - m.start()) for m in re.finditer('1',np.binary_repr(data['evalMask1'],32))]
# Tuples: (key,dtype,afun)
vals = [ ('Nxr',None,None),
('Ncr',None,None),
('Line',None,None),
('Acquisition',None,None),
('Slice',None,None),
('Partition',None,None),
('Echo',None,None),
('Phase',None,None),
('Repetition',None,None),
('Set',None,None),
('CutOffDataPre',None,lambda:f.seek(12,os.SEEK_CUR)),
('CutOffDataPost',None,None),
('KSpaceCentreColumn',None,None),
('CoilMode',None,None),
('ReadOutOffCentre',np.float32,None),
('KSpaceCentreLineNo',None,lambda:f.seek(4,os.SEEK_CUR)),
('KSpaceCentrePartitionNo',None,None),
('Channel',None,lambda:f.seek(44,os.SEEK_CUR)) ]
for tup in vals:
t = np.uint16 if tup[1] is None else tup[1]
if hasattr(tup[2], '__call__'):
tup[2]()
data[tup[0]] = np.fromfile(f,dtype=t,count=1)[0]
f.seek(2,os.SEEK_CUR)
if 1 in flag:
break
if any([k for k in [2,22,26] if k in flag]):
if (22 in flag) and (readPhaseCorInfo is False):
skipField = data['nPhCorScan']*data['nPhCorEcho']*data['Ncr']*(localHeader + 8*data['Nxr']) - localHeader
if (22 in flag) and (readPhaseCorInfo is True):
skipField = -localHeader
f.seek(skipField,os.SEEK_CUR)
skipField = 0
for m in range(data['nPhCorScan']*data['nPhCorEcho']*data['Ncr']):
infoMDH_TimeStamp = readMDH_TimeStamp_VB13(f)
temp = np.fromfile(f,dtype=np.float32,count=2*data['Nxr'])
if 25 in flag:
temp[0::2] = np.flipud(temp[0::2])
temp[1::2] = np.flipud(temp[1::2])
if data['CutOffDataPre'] > 0:
temp[0:2*data['CutOffDataPre']] = 0
if data['CutOffDataPost'] > 0:
temp[len(temp) - 2*data['CutOffDataPost']:] = 0
data['kPhaseCor'][data['Echo'],np.ceil(m/data['Ncr']),data['Slice'],data['Repetition'],data['Channel'],:] = (temp[0::2] + 1j*temp[1::2]).astype(np.float32)
if (2 in flag) and (readNavigator is False):
skipField = data['Ncr']*(localHeader + 8*data['Nxr']) - localHeader
if (2 in flag) and (readNavigator is True):
if (countNavigator is False) and (navigatorPrep is False):
kNavigator = np.zeros((
data['Nxr'],
data['Ncr'],
data['nContrast']*nNavEK,
data['nEPITrain'],
data['Nz'],
data['Nsl'],
data['nAverage'],
data['nPhase'],
data['nRepetition']),dtype=np.float32)
kNavigatorTemp = np.zeros((
data['Nxr'],
data['Ncr'],
data['nContrast']*nNavEK),dtype=float32)
navigatorPrep = 1
skipField = -localHeader
f.seek(skipField,os.SEEK_CUR)
skipField = 0
for m in range(nNavEK*data['Ncr']):
infoMDH_TimeStamp = readMDH_TimeStamp_VB13(f)
temp = np.fromfile(f,dtype=np.float32,count=2*data['Nxr'])
if 25 in flag:
temp[0::2] = np.flipud(temp[0::2])
temp[1::2] = np.flipud(temp[1::2])
if data['CutOffDataPre'] > 0:
temp[0:2*data['CutOffDataPre']] = 0
if data['CutOffDataPost'] > 0:
temp[len(temp) - 2*data['CutOffDataPost']-1:] = 0;
kNavigatorTemp[:,data['Channel'],data['Echo']] = (temp[0::2] + 1j*temp[1::2]).astype(np.complex64)
navigatorDataON = True
if 26 in flag:
temp = np.fromfile(f,dtype=np.float32,count=2*data['Nxr'])
if 25 in flag:
temp[0::2] = np.flipud(temp[0::2])
temp[1::2] = np.flipud(temp[1::2])
data['noiseMeas'][:,data['Channel']] = temp[0::2] + 1j*temp[1::2]
skipField = 0
f.seek(skipField,os.SEEK_CUR)
else:
temp = np.fromfile(f,dtype=np.float32,count=2*data['Nxr'])
if 25 in flag:
temp[0::2] = np.flipud(temp[0::2])
temp[1::2] = np.flipud(temp[1::2])
if data['CutOffDataPre'] > 0:
temp[0:2*data['CutOffDataPre']-1] = 0
if data['CutOffDataPost'] > 0:
temp[len(temp) - 2*data['CutOffDataPost']-1] = 0
if 11 in flag:
temp = data['CorrFactor'][data['Channel']]*temp
temp = data['FFTCorrFactor'][data['Channel']]*temp
if readOneCoil is False:
if removeOS is True:
temp1[len(temp1) - data['Nxr']:] = temp[0::2] + 1j*temp[1::2]
tempX = np.fftshift(np.fft(np.fftshift(temp1)))
tempK = np.fftshift(np.ifftshift(np.fftshift(tempX[np.around((data['NxOS'] - data['Nx'])/2):data['Nx'] + np.around((data['NxOS'] - data['Nx'])/2)])))
data['kSpace'][data['Acquisition'],data['Phase'],data['Repetition'],data['Echo'],data['Slice'],data['Channel'],data['Partition'],:,data['Line']] = tempK.astype(np.complex64)
else:
data['kSpace'][data['Acquisition'],data['Phase'],data['Repetition'],data['Echo'],data['Slice'],data['Channel'],data['Partition'],data['kSpace'].shape[7] - data['Nxr']:data['kSpace'].shape[7],data['Line']] = (temp[0::2] + 1j*temp[1::2]).astype(np.complex64)
elif (readOneCoil is True) and (data['Channel']+1 == coilIndex):
if removeOS is True:
temp1[len(temp1) - data['Nxr']:] = temp[0::2] + 1j*temp[1::2]
tempx = np.fftshift(np.fft(np.fftshift(temp1)))
tempK = np.fftshift(np.fft(np.fftshift(tempX[np.around((data['NxOS'] - data['Nx'])/2):data['Nx'] + np.around((data['NxOS'] - data['Nx'])/2)])))
data['kSpace'][data['Acquisition'],data['Phase'],data['Repetition'],data['Echo'],data['Slice'],0,data['Partition'],:,data['Line']] = tempK.astype(np.complex64)
else:
data['kSpace'][data['Acquisition'],data['Phase'],data['Repetition'],data['Echo'],data['Slice'],0,data['Partition'],data['kSpace'].shape[7] - data['Nxr']:,data['Line']] = (temp[0::2] + 1j*temp[1::2]).astype(np.complex64)
if (readTimeStamp is True) and (data['Channel'] == 0) and (navigatorDataON is True):
data['EPITrain'] = countNavigator % data['nEPITrain']
data['timeStamp'][data['Echo'],data['EPITrain'],data['Partition'],data['Slice'],data['Acquisition'],data['Phase'],data['Repetition']] = (0.0025*timeS).astype(np.complex64)
if (readNavigator is True) and (data['Channel'] == 0) and (navigatorDataON is True):
data['EPITrain'] = countNavigator % data['nEPITrain']
kNavigator[:,:,:,data['EPITrain'],data['Partition'],data['Slice'],data['Acquisition'],data['Phase'],data[Repetition]] = kNavigatorTemp.astype(np.complex64)
navigatorDataON = False
countNavigator += 1
if 1 in flag:
break
data['kSpace'] = np.squeeze(data['kSpace'])
if len(data['kSpace'].shape) == 3:
data['kSpace'] = np.transpose(data['kSpace'],[1,2,0])
elif len(data['kSpace'].shape) == 4:
if data['flag3D'] is False:
data['kSpace'] = np.transpose(data['kSpace'],[2,3,0,1])
else:
data['kSpace'] = np.transpose(data['kSpace'],[2,3,1,0])
elif len(data['kSpace'].shape) == 5:
data['kSpace'] = np.transpose(data['kSpace'],[3,4,2,0,1])
if transformToImageSpace is True:
if data['flag3D'] is True:
data['imSpace'] = np.fft.ifftshift(np.fft.ifftn(np.fft.fftshift(data['kSpace'],axes=(0,1,2)),axes=(0,1,2)),axes=(0,1,2))
data['imSpace'][2] *= data['Nz']
else:
data['imSpace'] = np.fft.ifftshift(np.fft.ifft2(np.fft.fftshift(data['kSpace'],axes=(0,1)),axes=(0,1)),axes=(0,1))
data['imSpace'][0] *= data['NxOS']
data['imSpace'][1] *= data['Ny']
if (removeOSafter is True) and (removeOS is False):
if len(data['imSpace'].shape) == 2:
data['imSpace'][0:data['NxOS']/4,:] = []
data['imSpace'][data['imSpace'].shape[0] - data['NxOS']/4 :,:] = []
elif len(data['imSpace'].shape) == 3:
data['imSpace'][0:data['NxOS']/4,:,:] = []
data['imSpace'][data['imSpace'].shape[0] - data['NxOS']/4:,:,:] = []
elif len(data['imSpace'].shape) == 4:
data['imSpace'][0:data['NxOS']/4,:,:,:] = []
data['imSpace'][data['imSpace'].shape[0] - data['NxOS']/4:,:,:,:] = []
elif len(data['imSpace'].shape) == 5:
data['imSpace'][0:data['NxOS']/4,:,:,:,:] = []
data['imSpace'][data['imSpace'].shape[0] - data['NxOS']/4:,:,:,:,:] = []
if writeToFile is True:
if transformToImageSpace is True:
if npz:
np.savez_compressed(filenameOut,imSpace=data['imSpace'],timeStamp=data['timeStamp'])
else:
with h5py.File('%s.hdf5' % filenameOut,'w') as f:
dset = f.create_dataset('kSpace',data=data['imSpace'])
else:
if npz:
np.savez_compressed(filenameOut,kSpace=data['kSpace'],timeStamp=data['timeStamp'])
else:
with h5py.File('%s.hdf5' % filenameOut,'w') as f:
dset = f.create_dataset('kSpace',data=data['kSpace'])
return(data) | [
"def",
"readMeasDataVB15",
"(",
"filename",
",",
"resetFFTscale",
"=",
"False",
",",
"readOneCoil",
"=",
"False",
",",
"readPhaseCorInfo",
"=",
"False",
",",
"readNavigator",
"=",
"False",
",",
"readTimeStamp",
"=",
"True",
",",
"nNavEK",
"=",
"False",
",",
"removeOS",
"=",
"False",
",",
"removeOSafter",
"=",
"False",
",",
"transformToImageSpace",
"=",
"False",
",",
"writeToFile",
"=",
"False",
",",
"npz",
"=",
"False",
")",
":",
"filename_temp",
"=",
"os",
".",
"path",
".",
"splitext",
"(",
"filename",
")",
"[",
"0",
"]",
"if",
"transformToImageSpace",
"is",
"False",
":",
"filenameOut",
"=",
"'%s_Kspace'",
"%",
"filename_temp",
"else",
":",
"filenameOut",
"=",
"'%s_imageSpace'",
"%",
"filename_temp",
"# Useful Parameters",
"globalHeader",
"=",
"32",
"localHeader",
"=",
"128",
"infoParser",
"=",
"InfoParser",
"(",
")",
"xmlstr",
"=",
"infoParser",
".",
"raw2xml",
"(",
"filename",
")",
"# Start in MeasYaps. MeasYaps starts with <value mod=\"MeasYaps\"> and ends at",
"# the next XProtocol mod='Phoenix'",
"startIdx",
"=",
"xmlstr",
".",
"find",
"(",
"'<value mod=\"MeasYaps\">'",
")",
"endIdx",
"=",
"xmlstr",
".",
"find",
"(",
"'<XProtocol mod=\"Phoenix\">'",
")",
"my_xmlstr",
"=",
"'<MeasYaps>'",
"+",
"xmlstr",
"[",
"startIdx",
":",
"endIdx",
"]",
"+",
"'</MeasYaps>'",
"# add root",
"# Parse into XML",
"root",
"=",
"ET",
".",
"fromstring",
"(",
"my_xmlstr",
")",
"# Used to decode Partial Fourier fractions (poor man's switch)",
"fractionDict",
"=",
"{",
"10",
":",
"1.0",
",",
"8",
":",
"7.",
"/",
"8.",
",",
"4",
":",
"0.75",
",",
"2",
":",
"5.",
"/",
"8.",
",",
"1",
":",
"0.5",
"}",
"# vals are tuples: (key,search,default,lambda)",
"# key => key to the dictionary entry",
"# search => string to search the xml document with",
"# default => if not found in xml doc, use this value",
"# lambda => whatever value you end up with, operate on it with this",
"# anonymous function.",
"vals",
"=",
"[",
"(",
"'ScanDimension'",
",",
"'sKSpace.ucDimension'",
",",
"None",
",",
"lambda",
"x",
":",
"int",
"(",
"x",
",",
"16",
")",
")",
",",
"(",
"'flag3D'",
",",
"None",
",",
"None",
",",
"lambda",
"_",
":",
"True",
"if",
"data",
"[",
"'ScanDimension'",
"]",
"is",
"4",
"else",
"False",
")",
",",
"(",
"'NxAll'",
",",
"'sKSpace.lBaseResolution'",
",",
"None",
",",
"lambda",
"x",
":",
"int",
"(",
"x",
")",
")",
",",
"(",
"'NyAll'",
",",
"'sKSpace.lPhaseEncodingLines'",
",",
"None",
",",
"lambda",
"x",
":",
"int",
"(",
"x",
")",
")",
",",
"(",
"'OSfactorPE'",
",",
"'sKSpace.dPhaseOversamplingForDialog'",
",",
"None",
",",
"lambda",
"x",
":",
"1.0",
"if",
"my_xmlstr",
".",
"find",
"(",
"'sKSpace.dPhaseOversamplingForDialog'",
")",
"<",
"0",
"else",
"1.0",
"+",
"float",
"(",
"x",
")",
")",
",",
"(",
"'NzAll'",
",",
"'sKSpace.lPartitions'",
",",
"None",
",",
"lambda",
"x",
":",
"int",
"(",
"x",
")",
")",
",",
"(",
"'OSfactor3D'",
",",
"'sKSpace.dSliceOversamplingForDialog'",
",",
"None",
",",
"lambda",
"x",
":",
"1.0",
"if",
"my_xmlstr",
".",
"find",
"(",
"'sKSpace.dSliceOversamplingForDialog'",
")",
"<",
"0",
"else",
"1.0",
"+",
"float",
"(",
"x",
")",
")",
",",
"(",
"'phaseResolution'",
",",
"'sKSpace.dPhaseResolution'",
",",
"None",
",",
"lambda",
"x",
":",
"float",
"(",
"x",
")",
")",
",",
"(",
"'sliceResolution'",
",",
"'sKSpace.dSliceResolution'",
",",
"None",
",",
"lambda",
"x",
":",
"float",
"(",
"x",
")",
")",
",",
"(",
"'Nsl'",
",",
"'sSliceArray.lSize'",
",",
"None",
",",
"lambda",
"x",
":",
"int",
"(",
"x",
")",
")",
",",
"(",
"'Nconc'",
",",
"'sSliceArray.lConc'",
",",
"None",
",",
"lambda",
"x",
":",
"int",
"(",
"x",
")",
")",
",",
"(",
"'Nc'",
",",
"None",
",",
"None",
",",
"lambda",
"_",
":",
"len",
"(",
"re",
".",
"findall",
"(",
"r'\\.lRxChannelConnected'",
",",
"my_xmlstr",
")",
")",
")",
",",
"(",
"'Nc'",
",",
"None",
",",
"None",
",",
"lambda",
"_",
":",
"data",
"[",
"'Nc'",
"]",
"-",
"1",
"if",
"my_xmlstr",
".",
"find",
"(",
"'AdjustSeq%/AdjCoilSensSeq'",
")",
">",
"0",
"else",
"data",
"[",
"'Nc'",
"]",
")",
",",
"(",
"'nContrast'",
",",
"'lContrasts'",
",",
"1",
",",
"lambda",
"x",
":",
"int",
"(",
"x",
")",
")",
",",
"(",
"'nSet'",
",",
"'lSets'",
",",
"1",
",",
"lambda",
"x",
":",
"int",
"(",
"x",
")",
")",
",",
"(",
"'nAverage'",
",",
"'lAverages'",
",",
"1",
",",
"lambda",
"x",
":",
"int",
"(",
"x",
")",
")",
",",
"(",
"'nRepetition'",
",",
"'lRepetitions'",
",",
"None",
",",
"lambda",
"x",
":",
"1",
"if",
"my_xmlstr",
".",
"find",
"(",
"'lRepetitions'",
")",
"<",
"0",
"else",
"1",
"+",
"int",
"(",
"x",
")",
")",
",",
"(",
"'nPhase'",
",",
"'sPhysioImaging.lPhases'",
",",
"1",
",",
"lambda",
"x",
":",
"int",
"(",
"x",
")",
")",
",",
"(",
"'fractionY'",
",",
"'sKSpace.PhasePartialFourier'",
",",
"10",
",",
"lambda",
"x",
":",
"fractionDict",
"[",
"int",
"(",
"x",
")",
"]",
")",
",",
"(",
"'fractionZ'",
",",
"'sKSpace.SlicePartialFourier'",
",",
"10",
",",
"lambda",
"x",
":",
"fractionDict",
"[",
"int",
"(",
"x",
")",
"]",
")",
",",
"(",
"'phasePartialFourierForSNR'",
",",
"'sKSpace.dSeqPhasePartialFourierForSNR'",
",",
"1.0",
",",
"lambda",
"x",
":",
"float",
"(",
"x",
")",
")",
",",
"(",
"'EPIFactor'",
",",
"'sFastImaging.lEPIFactor'",
",",
"None",
",",
"lambda",
"x",
":",
"int",
"(",
"x",
")",
")",
",",
"(",
"'turboFactor'",
",",
"'sFastImaging.lTurboFactor'",
",",
"1",
",",
"lambda",
"x",
":",
"int",
"(",
"x",
")",
")",
",",
"(",
"'PATMode'",
",",
"'sPat.ucPATMode'",
",",
"None",
",",
"lambda",
"x",
":",
"int",
"(",
"x",
",",
"16",
")",
")",
",",
"(",
"'PATRefScanMode'",
",",
"'sPat.ucRefScanMode'",
",",
"None",
",",
"lambda",
"x",
":",
"int",
"(",
"x",
",",
"16",
")",
")",
",",
"(",
"'AccelFactorPE'",
",",
"'sPat.lAccelFactPE'",
",",
"None",
",",
"lambda",
"x",
":",
"int",
"(",
"x",
")",
")",
",",
"(",
"'AccelFactor3D'",
",",
"'sPat.lAccelFact3D'",
",",
"None",
",",
"lambda",
"x",
":",
"int",
"(",
"x",
")",
")",
",",
"(",
"'nRefLinesPE'",
",",
"'sPat.lRefLinesPE'",
",",
"None",
",",
"lambda",
"x",
":",
"0",
"if",
"data",
"[",
"'AccelFactorPE'",
"]",
"is",
"1",
"else",
"int",
"(",
"x",
")",
")",
",",
"(",
"'nRefLines3D'",
",",
"'sPat.lRefLines3D'",
",",
"None",
",",
"lambda",
"x",
":",
"0",
"if",
"data",
"[",
"'AccelFactor3D'",
"]",
"is",
"1",
"else",
"int",
"(",
"x",
")",
")",
"]",
"# Evaluate all tuples",
"data",
"=",
"dict",
"(",
")",
"# dictionary to store all values we want to compre with MATLAB",
"for",
"tup",
"in",
"vals",
":",
"if",
"tup",
"[",
"1",
"]",
"is",
"not",
"None",
":",
"idx",
"=",
"my_xmlstr",
".",
"find",
"(",
"tup",
"[",
"1",
"]",
")",
"else",
":",
"idx",
"=",
"-",
"1",
"if",
"idx",
"<",
"0",
":",
"val",
"=",
"tup",
"[",
"2",
"]",
"# Take the default if we can't find it",
"else",
":",
"val",
"=",
"get_val_by_text",
"(",
"root",
",",
"tup",
"[",
"1",
"]",
")",
".",
"text",
"afun",
"=",
"tup",
"[",
"3",
"]",
"if",
"afun",
"is",
"not",
"None",
":",
"val",
"=",
"afun",
"(",
"val",
")",
"# Evaluate anonymous function if provided",
"data",
"[",
"tup",
"[",
"0",
"]",
"]",
"=",
"val",
"# Store the value in the dictionary",
"## Now use the whole xml document",
"root",
"=",
"ET",
".",
"fromstring",
"(",
"xmlstr",
")",
"# Enforce a max value for Nc",
"Nct",
"=",
"get_yaps_by_name",
"(",
"root",
",",
"'iMaxNoOfRxChannels'",
",",
"lambda",
"x",
":",
"int",
"(",
"x",
")",
")",
"if",
"Nct",
"is",
"not",
"None",
":",
"data",
"[",
"'Nct'",
"]",
"=",
"Nct",
"if",
"Nct",
"<",
"data",
"[",
"'Nc'",
"]",
":",
"data",
"[",
"'Nc'",
"]",
"=",
"Nct",
"# If this exists, then we'll need it",
"nPhCorrScan",
"=",
"get_yaps_by_name",
"(",
"root",
",",
"'lNoOfPhaseCorrScans'",
",",
"lambda",
"x",
":",
"int",
"(",
"x",
")",
")",
"if",
"nPhCorrScan",
"is",
"not",
"None",
":",
"data",
"[",
"'nPhCorrScan'",
"]",
"=",
"nPhCorrScan",
"# Define some more variables",
"if",
"data",
"[",
"'turboFactor'",
"]",
">",
"1",
":",
"data",
"[",
"'nPhCorEcho'",
"]",
"=",
"1",
"data",
"[",
"'nPhCorScan'",
"]",
"=",
"1",
"if",
"data",
"[",
"'EPIFactor'",
"]",
">",
"1",
":",
"data",
"[",
"'nPhCorScan'",
"]",
"=",
"1",
"data",
"[",
"'nPhCorEcho'",
"]",
"=",
"3",
"if",
"data",
"[",
"'AccelFactorPE'",
"]",
"is",
"1",
":",
"data",
"[",
"'FirstFourierLine'",
"]",
"=",
"1",
"data",
"[",
"'FirstRefLine'",
"]",
"=",
"1",
"else",
":",
"data",
"[",
"'FirstFourierLine'",
"]",
"=",
"get_yaps_by_name",
"(",
"root",
",",
"'lFirstFourierLine'",
",",
"lambda",
"x",
":",
"1",
"+",
"int",
"(",
"x",
")",
",",
"1",
")",
"data",
"[",
"'FirstRefLine'",
"]",
"=",
"get_yaps_by_name",
"(",
"root",
",",
"'lFirstRefLine'",
",",
"lambda",
"x",
":",
"1",
"+",
"int",
"(",
"x",
")",
",",
"1",
")",
"if",
"data",
"[",
"'AccelFactor3D'",
"]",
"is",
"1",
":",
"data",
"[",
"'FirstFourierPar'",
"]",
"=",
"1",
"data",
"[",
"'FirstRefPartition'",
"]",
"=",
"1",
"else",
":",
"data",
"[",
"'FirstFourierPartition'",
"]",
"=",
"get_yaps_by_name",
"(",
"root",
",",
"'lFirstFourierPartition'",
",",
"lambda",
"x",
":",
"1",
"+",
"int",
"(",
"x",
")",
",",
"1",
")",
"data",
"[",
"'FirstRefPartition'",
"]",
"=",
"get_yaps_by_name",
"(",
"root",
",",
"'lFirstRefPartition'",
",",
"lambda",
"x",
":",
"1",
"+",
"int",
"(",
"x",
")",
",",
"1",
")",
"data",
"[",
"'NxOS'",
"]",
"=",
"get_yaps_by_name",
"(",
"root",
",",
"'iNoOfFourierColumns'",
",",
"lambda",
"x",
":",
"int",
"(",
"x",
")",
")",
"data",
"[",
"'OSfactorRO'",
"]",
"=",
"2",
"# we can actually find this in YAPS, but MATLAB impl gives as magic num",
"data",
"[",
"'Nx'",
"]",
"=",
"int",
"(",
"np",
".",
"around",
"(",
"data",
"[",
"'NxOS'",
"]",
"/",
"data",
"[",
"'OSfactorRO'",
"]",
")",
")",
"data",
"[",
"'Ny'",
"]",
"=",
"int",
"(",
"root",
".",
"find",
"(",
"\"ParamFunctor[@name='FillMiniHeaderData']/ParamLong[@name='NoOfFourierLines']/value\"",
")",
".",
"text",
")",
"data",
"[",
"'Nz'",
"]",
"=",
"get_yaps_by_name",
"(",
"root",
",",
"'iNoOfFourierPartitions'",
",",
"lambda",
"x",
":",
"int",
"(",
"x",
")",
")",
"data",
"[",
"'NxRecon'",
"]",
"=",
"get_yaps_by_name",
"(",
"root",
",",
"'iRoFTLength'",
",",
"lambda",
"x",
":",
"int",
"(",
"x",
")",
")",
"data",
"[",
"'NyRecon'",
"]",
"=",
"get_yaps_by_name",
"(",
"root",
",",
"'iPEFTLength'",
",",
"lambda",
"x",
":",
"int",
"(",
"x",
")",
")",
"data",
"[",
"'NzRecon'",
"]",
"=",
"1",
"if",
"data",
"[",
"'Nz'",
"]",
"is",
"1",
"else",
"get_yaps_by_name",
"(",
"root",
",",
"'i3DFTLength'",
",",
"lambda",
"x",
":",
"int",
"(",
"x",
")",
")",
"data",
"[",
"'NslicePerConcat'",
"]",
"=",
"get_yaps_by_name",
"(",
"root",
",",
"'ushSlicePerConcat'",
",",
"lambda",
"x",
":",
"int",
"(",
"x",
")",
")",
"## Partial Fourier Mode and Parameters",
"data",
"[",
"'PCAlgorithm'",
"]",
"=",
"get_yaps_by_name",
"(",
"root",
",",
"'lPCAlgorithm'",
",",
"lambda",
"x",
":",
"int",
"(",
"x",
")",
")",
"data",
"[",
"'NoOfPhaseCorrColumns'",
"]",
"=",
"get_yaps_by_name",
"(",
"root",
",",
"'lNoOfPhaseCorrColumns'",
",",
"lambda",
"x",
":",
"int",
"(",
"x",
")",
",",
"data",
"[",
"'Nx'",
"]",
"/",
"2",
")",
"data",
"[",
"'NoOfPhaseCorrLines'",
"]",
"=",
"get_yaps_by_name",
"(",
"root",
",",
"'lNoOfPhaseCorrLines'",
",",
"lambda",
"x",
":",
"int",
"(",
"x",
")",
",",
"data",
"[",
"'NyAll'",
"]",
"/",
"2",
")",
"data",
"[",
"'NoOfPhaseCorrPartitions'",
"]",
"=",
"get_yaps_by_name",
"(",
"root",
",",
"'lNoOfPhaseCorrPartitions'",
",",
"lambda",
"x",
":",
"int",
"(",
"x",
")",
",",
"data",
"[",
"'NzAll'",
"]",
"/",
"2",
")",
"data",
"[",
"'ColSlopeLength'",
"]",
"=",
"get_yaps_by_name",
"(",
"root",
",",
"'lColSlopeLength'",
",",
"lambda",
"x",
":",
"int",
"(",
"x",
")",
",",
"data",
"[",
"'Nx'",
"]",
"/",
"4",
")",
"# I think there's a mistake here in the MATLAB code: ColSlopeLength defined again instead of LinSlopeLength.",
"# Commented is it how I think it should be and we'll go from there. The value is not actually used.",
"# data['LinSlopeLength'] = get_yaps_by_name(root,'lLinSlopeLength',lambda x:int(x),data['NyAll']/4)",
"data",
"[",
"'ColSlopeLength'",
"]",
"=",
"get_yaps_by_name",
"(",
"root",
",",
"'lLinSlopeLength'",
",",
"lambda",
"x",
":",
"int",
"(",
"x",
")",
",",
"data",
"[",
"'NyAll'",
"]",
"/",
"4",
")",
"data",
"[",
"'ParSlopeLength'",
"]",
"=",
"get_yaps_by_name",
"(",
"root",
",",
"'lParSlopeLength'",
",",
"lambda",
"x",
":",
"int",
"(",
"x",
")",
",",
"data",
"[",
"'NzAll'",
"]",
"/",
"4",
")",
"## Raw data correction factors, use the MeasYaps portion of the xml document",
"root",
"=",
"ET",
".",
"fromstring",
"(",
"my_xmlstr",
")",
"data",
"[",
"'CorrFactor'",
"]",
"=",
"np",
".",
"ones",
"(",
"data",
"[",
"'Nc'",
"]",
")",
"for",
"c",
"in",
"range",
"(",
"data",
"[",
"'Nc'",
"]",
")",
":",
"text",
"=",
"'axRawDataCorrectionFactor[0][%d].dRe'",
"%",
"c",
"data",
"[",
"'CorrFactor'",
"]",
"[",
"c",
"]",
"=",
"1",
"if",
"my_xmlstr",
".",
"find",
"(",
"text",
")",
"<",
"0",
"else",
"float",
"(",
"get_val_by_text",
"(",
"root",
",",
"text",
")",
".",
"text",
")",
"text",
"=",
"'axRawDataCorrectionFactor[0][%d].dIm'",
"%",
"c",
"if",
"my_xmlstr",
".",
"find",
"(",
"text",
")",
">=",
"0",
":",
"data",
"[",
"'CorrFactor'",
"]",
"[",
"c",
"]",
"=",
"data",
"[",
"'CorrFactor'",
"]",
"[",
"c",
"]",
"+",
"1j",
"*",
"float",
"(",
"get_val_by_text",
"(",
"root",
",",
"text",
")",
".",
"text",
")",
"## FFT Correction Factors",
"data",
"[",
"'FFTCorrFactor'",
"]",
"=",
"np",
".",
"ones",
"(",
"data",
"[",
"'Nc'",
"]",
")",
"if",
"resetFFTscale",
"is",
"False",
":",
"data",
"[",
"'FFTCorrFactor'",
"]",
"=",
"np",
".",
"ones",
"(",
"data",
"[",
"'Nc'",
"]",
")",
"for",
"c",
"in",
"range",
"(",
"data",
"[",
"'Nc'",
"]",
")",
":",
"text",
"=",
"'asCoilSelectMeas[0].aFFT_SCALE[%d].flFactor'",
"%",
"c",
"data",
"[",
"'FFTCorrFactor'",
"]",
"[",
"c",
"]",
"=",
"float",
"(",
"get_val_by_text",
"(",
"root",
",",
"text",
")",
".",
"text",
")",
"## For PC Angio",
"data",
"[",
"'Nset'",
"]",
"=",
"1",
"text",
"=",
"'sAngio.ucPCFlowMode'",
"data",
"[",
"'PCMRAFlag'",
"]",
"=",
"int",
"(",
"get_val_by_text",
"(",
"root",
",",
"text",
")",
".",
"text",
",",
"16",
")",
"if",
"my_xmlstr",
".",
"find",
"(",
"text",
")",
">",
"0",
"else",
"0",
"if",
"data",
"[",
"'PCMRAFlag'",
"]",
"is",
"1",
":",
"text",
"=",
"'sAngio.sFlowArray.lSize'",
"if",
"my_xmlstr",
".",
"find",
"(",
"text",
")",
"<",
"0",
":",
"data",
"[",
"'Nset'",
"]",
"=",
"int",
"(",
"get_val_by_text",
"(",
"root",
",",
"text",
")",
".",
"text",
")",
"## Recalculation of partial Fourier factors and EPI/turbo factor for EPI and TSE",
"data",
"[",
"'fractionPE'",
"]",
"=",
"float",
"(",
"data",
"[",
"'Ny'",
"]",
")",
"/",
"float",
"(",
"data",
"[",
"'NyAll'",
"]",
")",
"data",
"[",
"'fraction3D'",
"]",
"=",
"float",
"(",
"data",
"[",
"'Nz'",
"]",
")",
"/",
"float",
"(",
"data",
"[",
"'NzAll'",
"]",
")",
"data",
"[",
"'EPIFactor'",
"]",
"=",
"np",
".",
"around",
"(",
"data",
"[",
"'fractionPE'",
"]",
"*",
"data",
"[",
"'EPIFactor'",
"]",
")",
"data",
"[",
"'nEPITrain'",
"]",
"=",
"data",
"[",
"'Ny'",
"]",
"/",
"data",
"[",
"'EPIFactor'",
"]",
"data",
"[",
"'turboFactor'",
"]",
"=",
"np",
".",
"around",
"(",
"data",
"[",
"'fractionPE'",
"]",
"*",
"data",
"[",
"'turboFactor'",
"]",
")",
"data",
"[",
"'nTSETrain'",
"]",
"=",
"data",
"[",
"'Ny'",
"]",
"/",
"data",
"[",
"'turboFactor'",
"]",
"data",
"[",
"'Nc0'",
"]",
"=",
"data",
"[",
"'Nc'",
"]",
"data",
"[",
"'Nc'",
"]",
"=",
"1",
"if",
"readOneCoil",
"is",
"True",
"else",
"data",
"[",
"'Nc0'",
"]",
"## Calculation of the number of valid k-space readouts and k-space data matrix dimensions",
"if",
"data",
"[",
"'PATMode'",
"]",
"is",
"1",
":",
"data",
"[",
"'nReadout'",
"]",
"=",
"(",
"data",
"[",
"'nAverage'",
"]",
"*",
"data",
"[",
"'nPhase'",
"]",
"*",
"data",
"[",
"'nRepetition'",
"]",
"*",
"data",
"[",
"'nContrast'",
"]",
"*",
"data",
"[",
"'Nsl'",
"]",
"*",
"data",
"[",
"'Nz'",
"]",
"*",
"data",
"[",
"'Nc'",
"]",
"*",
"data",
"[",
"'Ny'",
"]",
")",
"elif",
"(",
"data",
"[",
"'PATMode'",
"]",
"is",
"2",
")",
"and",
"(",
"data",
"[",
"'PATRefScanMode'",
"]",
"is",
"2",
")",
":",
"if",
"(",
"data",
"[",
"'Ny'",
"]",
"%",
"2",
")",
"is",
"1",
":",
"data",
"[",
"'NyPAT'",
"]",
"=",
"(",
"data",
"[",
"'Ny'",
"]",
"-",
"1",
"+",
"data",
"[",
"'nRefLinesPE'",
"]",
"*",
"(",
"data",
"[",
"'AccelFactorPE'",
"]",
"-",
"1",
")",
")",
"/",
"data",
"[",
"'AccelFactorPE'",
"]",
"else",
":",
"data",
"[",
"'NyPAT'",
"]",
"=",
"np",
".",
"floor",
"(",
"(",
"data",
"[",
"'Ny'",
"]",
"+",
"data",
"[",
"'nRefLinesPE'",
"]",
"*",
"(",
"data",
"[",
"'AccelFactorPE'",
"]",
"-",
"1",
")",
")",
"/",
"data",
"[",
"'AccelFactorPE'",
"]",
")",
"data",
"[",
"'nReadout'",
"]",
"=",
"(",
"data",
"[",
"'nAverage'",
"]",
"*",
"data",
"[",
"'nPhase'",
"]",
"*",
"data",
"[",
"'nRepetition'",
"]",
"*",
"data",
"[",
"'nContrast'",
"]",
"*",
"data",
"[",
"'Nsl'",
"]",
"*",
"data",
"[",
"'Nz'",
"]",
"*",
"data",
"[",
"'Nc'",
"]",
"*",
"data",
"[",
"'NyPAT'",
"]",
")",
"if",
"removeOS",
"is",
"True",
":",
"data",
"[",
"'kSpace'",
"]",
"=",
"np",
".",
"zeros",
"(",
"(",
"data",
"[",
"'nAverage'",
"]",
",",
"data",
"[",
"'nPhase'",
"]",
",",
"data",
"[",
"'nRepetition'",
"]",
",",
"data",
"[",
"'nContrast'",
"]",
",",
"data",
"[",
"'Nsl'",
"]",
",",
"data",
"[",
"'Nc'",
"]",
",",
"data",
"[",
"'Nz'",
"]",
",",
"data",
"[",
"'Nx'",
"]",
",",
"data",
"[",
"'Ny'",
"]",
")",
",",
"dtype",
"=",
"np",
".",
"complex64",
")",
"else",
":",
"data",
"[",
"'kSpace'",
"]",
"=",
"np",
".",
"zeros",
"(",
"(",
"data",
"[",
"'nAverage'",
"]",
",",
"data",
"[",
"'nPhase'",
"]",
",",
"data",
"[",
"'nRepetition'",
"]",
",",
"data",
"[",
"'nContrast'",
"]",
",",
"data",
"[",
"'Nsl'",
"]",
",",
"data",
"[",
"'Nc'",
"]",
",",
"data",
"[",
"'Nz'",
"]",
",",
"data",
"[",
"'NxOS'",
"]",
",",
"data",
"[",
"'Ny'",
"]",
")",
",",
"dtype",
"=",
"np",
".",
"complex64",
")",
"if",
"(",
"readPhaseCorInfo",
"is",
"True",
")",
"and",
"(",
"data",
"[",
"'nPhCorScan'",
"]",
">",
"0",
")",
":",
"data",
"[",
"'kPhaseCor'",
"]",
"=",
"np",
".",
"zeros",
"(",
"(",
"data",
"[",
"'nPhCorScan'",
"]",
",",
"data",
"[",
"'nPhCorEcho'",
"]",
",",
"data",
"[",
"'Nsl'",
"]",
",",
"data",
"[",
"'nRepetition'",
"]",
",",
"data",
"[",
"'Nc'",
"]",
",",
"data",
"[",
"'NxOS'",
"]",
")",
",",
"dtype",
"=",
"np",
".",
"float32",
")",
"if",
"readNavigator",
"is",
"True",
":",
"data",
"[",
"'nNavigator'",
"]",
"=",
"(",
"data",
"[",
"'nAverage'",
"]",
"*",
"data",
"[",
"'nPhase'",
"]",
"*",
"data",
"[",
"'nRepetition'",
"]",
"*",
"data",
"[",
"'nContrast'",
"]",
"*",
"data",
"[",
"'Nsl'",
"]",
"*",
"data",
"[",
"'Nz'",
"]",
"*",
"data",
"[",
"'Nc'",
"]",
"*",
"data",
"[",
"'nEPITrain'",
"]",
"*",
"data",
"[",
"'nNavEK'",
"]",
")",
"data",
"[",
"'kNavigator'",
"]",
"=",
"np",
".",
"zeros",
"(",
"(",
"data",
"[",
"'nAverage'",
"]",
",",
"data",
"[",
"'nPhase'",
"]",
",",
"data",
"[",
"'nRepetition'",
"]",
",",
"data",
"[",
"'nContrast'",
"]",
"*",
"nNavEK",
",",
"data",
"[",
"'Nsl'",
"]",
",",
"data",
"[",
"'Nc'",
"]",
",",
"data",
"[",
"'Nz'",
"]",
",",
"data",
"[",
"'nEPITrain'",
"]",
",",
"data",
"[",
"'NxOS'",
"]",
")",
",",
"dtype",
"=",
"np",
".",
"float32",
")",
"if",
"readTimeStamp",
"is",
"True",
":",
"data",
"[",
"'nTimeStamp'",
"]",
"=",
"(",
"data",
"[",
"'nAverage'",
"]",
"*",
"data",
"[",
"'nPhase'",
"]",
"*",
"data",
"[",
"'nRepetition'",
"]",
"*",
"data",
"[",
"'nContrast'",
"]",
"*",
"data",
"[",
"'Nz'",
"]",
")",
"data",
"[",
"'timeStamp'",
"]",
"=",
"np",
".",
"zeros",
"(",
"(",
"data",
"[",
"'nAverage'",
"]",
",",
"data",
"[",
"'nPhase'",
"]",
",",
"data",
"[",
"'nRepetition'",
"]",
",",
"data",
"[",
"'nContrast'",
"]",
"*",
"nNavEK",
",",
"data",
"[",
"'Nz'",
"]",
")",
",",
"dtype",
"=",
"np",
".",
"float32",
")",
"## Data Readout and Reordering",
"# Read k-space data",
"data",
"[",
"'noiseMeasCounter'",
"]",
"=",
"0",
"data",
"[",
"'noiseMeas'",
"]",
"=",
"np",
".",
"zeros",
"(",
"(",
"data",
"[",
"'NxOS'",
"]",
",",
"data",
"[",
"'Nc'",
"]",
")",
",",
"dtype",
"=",
"np",
".",
"complex64",
")",
"data",
"[",
"'navigatorPrep'",
"]",
"=",
"0",
"data",
"[",
"'LineN'",
"]",
"=",
"-",
"1",
"data",
"[",
"'Ndr'",
"]",
"=",
"1",
"for",
"r",
"in",
"range",
"(",
"data",
"[",
"'Ndr'",
"]",
")",
":",
"data",
"[",
"'xCoil'",
"]",
"=",
"r",
"with",
"open",
"(",
"filename",
",",
"'rb'",
")",
"as",
"f",
":",
"readFlag",
"=",
"True",
"skipField",
"=",
"0",
"countNavigator",
"=",
"0",
"navigatorDataON",
"=",
"False",
"temp1",
"=",
"np",
".",
"zeros",
"(",
"data",
"[",
"'NxOS'",
"]",
")",
"data",
"[",
"'dataField'",
"]",
"=",
"np",
".",
"fromfile",
"(",
"f",
",",
"dtype",
"=",
"np",
".",
"int32",
",",
"count",
"=",
"1",
")",
"[",
"0",
"]",
"f",
".",
"seek",
"(",
"data",
"[",
"'dataField'",
"]",
",",
"os",
".",
"SEEK_SET",
")",
"while",
"readFlag",
"is",
"True",
":",
"if",
"readTimeStamp",
"is",
"True",
":",
"f",
".",
"seek",
"(",
"12",
",",
"os",
".",
"SEEK_CUR",
")",
"data",
"[",
"'timeS'",
"]",
"=",
"np",
".",
"fromfile",
"(",
"f",
",",
"dtype",
"=",
"np",
".",
"uint32",
",",
"count",
"=",
"1",
")",
"[",
"0",
"]",
"f",
".",
"seek",
"(",
"4",
",",
"os",
".",
"SEEK_CUR",
")",
"else",
":",
"f",
".",
"seek",
"(",
"20",
",",
"os",
".",
"SEEK_CUR",
")",
"data",
"[",
"'evalMask1'",
"]",
"=",
"np",
".",
"fromfile",
"(",
"f",
",",
"dtype",
"=",
"np",
".",
"uint32",
",",
"count",
"=",
"1",
")",
"[",
"0",
"]",
"data",
"[",
"'evalMask2'",
"]",
"=",
"np",
".",
"fromfile",
"(",
"f",
",",
"dtype",
"=",
"np",
".",
"uint32",
",",
"count",
"=",
"1",
")",
"[",
"0",
"]",
"flag",
"=",
"[",
"(",
"32",
"-",
"m",
".",
"start",
"(",
")",
")",
"for",
"m",
"in",
"re",
".",
"finditer",
"(",
"'1'",
",",
"np",
".",
"binary_repr",
"(",
"data",
"[",
"'evalMask1'",
"]",
",",
"32",
")",
")",
"]",
"# Tuples: (key,dtype,afun)",
"vals",
"=",
"[",
"(",
"'Nxr'",
",",
"None",
",",
"None",
")",
",",
"(",
"'Ncr'",
",",
"None",
",",
"None",
")",
",",
"(",
"'Line'",
",",
"None",
",",
"None",
")",
",",
"(",
"'Acquisition'",
",",
"None",
",",
"None",
")",
",",
"(",
"'Slice'",
",",
"None",
",",
"None",
")",
",",
"(",
"'Partition'",
",",
"None",
",",
"None",
")",
",",
"(",
"'Echo'",
",",
"None",
",",
"None",
")",
",",
"(",
"'Phase'",
",",
"None",
",",
"None",
")",
",",
"(",
"'Repetition'",
",",
"None",
",",
"None",
")",
",",
"(",
"'Set'",
",",
"None",
",",
"None",
")",
",",
"(",
"'CutOffDataPre'",
",",
"None",
",",
"lambda",
":",
"f",
".",
"seek",
"(",
"12",
",",
"os",
".",
"SEEK_CUR",
")",
")",
",",
"(",
"'CutOffDataPost'",
",",
"None",
",",
"None",
")",
",",
"(",
"'KSpaceCentreColumn'",
",",
"None",
",",
"None",
")",
",",
"(",
"'CoilMode'",
",",
"None",
",",
"None",
")",
",",
"(",
"'ReadOutOffCentre'",
",",
"np",
".",
"float32",
",",
"None",
")",
",",
"(",
"'KSpaceCentreLineNo'",
",",
"None",
",",
"lambda",
":",
"f",
".",
"seek",
"(",
"4",
",",
"os",
".",
"SEEK_CUR",
")",
")",
",",
"(",
"'KSpaceCentrePartitionNo'",
",",
"None",
",",
"None",
")",
",",
"(",
"'Channel'",
",",
"None",
",",
"lambda",
":",
"f",
".",
"seek",
"(",
"44",
",",
"os",
".",
"SEEK_CUR",
")",
")",
"]",
"for",
"tup",
"in",
"vals",
":",
"t",
"=",
"np",
".",
"uint16",
"if",
"tup",
"[",
"1",
"]",
"is",
"None",
"else",
"tup",
"[",
"1",
"]",
"if",
"hasattr",
"(",
"tup",
"[",
"2",
"]",
",",
"'__call__'",
")",
":",
"tup",
"[",
"2",
"]",
"(",
")",
"data",
"[",
"tup",
"[",
"0",
"]",
"]",
"=",
"np",
".",
"fromfile",
"(",
"f",
",",
"dtype",
"=",
"t",
",",
"count",
"=",
"1",
")",
"[",
"0",
"]",
"f",
".",
"seek",
"(",
"2",
",",
"os",
".",
"SEEK_CUR",
")",
"if",
"1",
"in",
"flag",
":",
"break",
"if",
"any",
"(",
"[",
"k",
"for",
"k",
"in",
"[",
"2",
",",
"22",
",",
"26",
"]",
"if",
"k",
"in",
"flag",
"]",
")",
":",
"if",
"(",
"22",
"in",
"flag",
")",
"and",
"(",
"readPhaseCorInfo",
"is",
"False",
")",
":",
"skipField",
"=",
"data",
"[",
"'nPhCorScan'",
"]",
"*",
"data",
"[",
"'nPhCorEcho'",
"]",
"*",
"data",
"[",
"'Ncr'",
"]",
"*",
"(",
"localHeader",
"+",
"8",
"*",
"data",
"[",
"'Nxr'",
"]",
")",
"-",
"localHeader",
"if",
"(",
"22",
"in",
"flag",
")",
"and",
"(",
"readPhaseCorInfo",
"is",
"True",
")",
":",
"skipField",
"=",
"-",
"localHeader",
"f",
".",
"seek",
"(",
"skipField",
",",
"os",
".",
"SEEK_CUR",
")",
"skipField",
"=",
"0",
"for",
"m",
"in",
"range",
"(",
"data",
"[",
"'nPhCorScan'",
"]",
"*",
"data",
"[",
"'nPhCorEcho'",
"]",
"*",
"data",
"[",
"'Ncr'",
"]",
")",
":",
"infoMDH_TimeStamp",
"=",
"readMDH_TimeStamp_VB13",
"(",
"f",
")",
"temp",
"=",
"np",
".",
"fromfile",
"(",
"f",
",",
"dtype",
"=",
"np",
".",
"float32",
",",
"count",
"=",
"2",
"*",
"data",
"[",
"'Nxr'",
"]",
")",
"if",
"25",
"in",
"flag",
":",
"temp",
"[",
"0",
":",
":",
"2",
"]",
"=",
"np",
".",
"flipud",
"(",
"temp",
"[",
"0",
":",
":",
"2",
"]",
")",
"temp",
"[",
"1",
":",
":",
"2",
"]",
"=",
"np",
".",
"flipud",
"(",
"temp",
"[",
"1",
":",
":",
"2",
"]",
")",
"if",
"data",
"[",
"'CutOffDataPre'",
"]",
">",
"0",
":",
"temp",
"[",
"0",
":",
"2",
"*",
"data",
"[",
"'CutOffDataPre'",
"]",
"]",
"=",
"0",
"if",
"data",
"[",
"'CutOffDataPost'",
"]",
">",
"0",
":",
"temp",
"[",
"len",
"(",
"temp",
")",
"-",
"2",
"*",
"data",
"[",
"'CutOffDataPost'",
"]",
":",
"]",
"=",
"0",
"data",
"[",
"'kPhaseCor'",
"]",
"[",
"data",
"[",
"'Echo'",
"]",
",",
"np",
".",
"ceil",
"(",
"m",
"/",
"data",
"[",
"'Ncr'",
"]",
")",
",",
"data",
"[",
"'Slice'",
"]",
",",
"data",
"[",
"'Repetition'",
"]",
",",
"data",
"[",
"'Channel'",
"]",
",",
":",
"]",
"=",
"(",
"temp",
"[",
"0",
":",
":",
"2",
"]",
"+",
"1j",
"*",
"temp",
"[",
"1",
":",
":",
"2",
"]",
")",
".",
"astype",
"(",
"np",
".",
"float32",
")",
"if",
"(",
"2",
"in",
"flag",
")",
"and",
"(",
"readNavigator",
"is",
"False",
")",
":",
"skipField",
"=",
"data",
"[",
"'Ncr'",
"]",
"*",
"(",
"localHeader",
"+",
"8",
"*",
"data",
"[",
"'Nxr'",
"]",
")",
"-",
"localHeader",
"if",
"(",
"2",
"in",
"flag",
")",
"and",
"(",
"readNavigator",
"is",
"True",
")",
":",
"if",
"(",
"countNavigator",
"is",
"False",
")",
"and",
"(",
"navigatorPrep",
"is",
"False",
")",
":",
"kNavigator",
"=",
"np",
".",
"zeros",
"(",
"(",
"data",
"[",
"'Nxr'",
"]",
",",
"data",
"[",
"'Ncr'",
"]",
",",
"data",
"[",
"'nContrast'",
"]",
"*",
"nNavEK",
",",
"data",
"[",
"'nEPITrain'",
"]",
",",
"data",
"[",
"'Nz'",
"]",
",",
"data",
"[",
"'Nsl'",
"]",
",",
"data",
"[",
"'nAverage'",
"]",
",",
"data",
"[",
"'nPhase'",
"]",
",",
"data",
"[",
"'nRepetition'",
"]",
")",
",",
"dtype",
"=",
"np",
".",
"float32",
")",
"kNavigatorTemp",
"=",
"np",
".",
"zeros",
"(",
"(",
"data",
"[",
"'Nxr'",
"]",
",",
"data",
"[",
"'Ncr'",
"]",
",",
"data",
"[",
"'nContrast'",
"]",
"*",
"nNavEK",
")",
",",
"dtype",
"=",
"float32",
")",
"navigatorPrep",
"=",
"1",
"skipField",
"=",
"-",
"localHeader",
"f",
".",
"seek",
"(",
"skipField",
",",
"os",
".",
"SEEK_CUR",
")",
"skipField",
"=",
"0",
"for",
"m",
"in",
"range",
"(",
"nNavEK",
"*",
"data",
"[",
"'Ncr'",
"]",
")",
":",
"infoMDH_TimeStamp",
"=",
"readMDH_TimeStamp_VB13",
"(",
"f",
")",
"temp",
"=",
"np",
".",
"fromfile",
"(",
"f",
",",
"dtype",
"=",
"np",
".",
"float32",
",",
"count",
"=",
"2",
"*",
"data",
"[",
"'Nxr'",
"]",
")",
"if",
"25",
"in",
"flag",
":",
"temp",
"[",
"0",
":",
":",
"2",
"]",
"=",
"np",
".",
"flipud",
"(",
"temp",
"[",
"0",
":",
":",
"2",
"]",
")",
"temp",
"[",
"1",
":",
":",
"2",
"]",
"=",
"np",
".",
"flipud",
"(",
"temp",
"[",
"1",
":",
":",
"2",
"]",
")",
"if",
"data",
"[",
"'CutOffDataPre'",
"]",
">",
"0",
":",
"temp",
"[",
"0",
":",
"2",
"*",
"data",
"[",
"'CutOffDataPre'",
"]",
"]",
"=",
"0",
"if",
"data",
"[",
"'CutOffDataPost'",
"]",
">",
"0",
":",
"temp",
"[",
"len",
"(",
"temp",
")",
"-",
"2",
"*",
"data",
"[",
"'CutOffDataPost'",
"]",
"-",
"1",
":",
"]",
"=",
"0",
"kNavigatorTemp",
"[",
":",
",",
"data",
"[",
"'Channel'",
"]",
",",
"data",
"[",
"'Echo'",
"]",
"]",
"=",
"(",
"temp",
"[",
"0",
":",
":",
"2",
"]",
"+",
"1j",
"*",
"temp",
"[",
"1",
":",
":",
"2",
"]",
")",
".",
"astype",
"(",
"np",
".",
"complex64",
")",
"navigatorDataON",
"=",
"True",
"if",
"26",
"in",
"flag",
":",
"temp",
"=",
"np",
".",
"fromfile",
"(",
"f",
",",
"dtype",
"=",
"np",
".",
"float32",
",",
"count",
"=",
"2",
"*",
"data",
"[",
"'Nxr'",
"]",
")",
"if",
"25",
"in",
"flag",
":",
"temp",
"[",
"0",
":",
":",
"2",
"]",
"=",
"np",
".",
"flipud",
"(",
"temp",
"[",
"0",
":",
":",
"2",
"]",
")",
"temp",
"[",
"1",
":",
":",
"2",
"]",
"=",
"np",
".",
"flipud",
"(",
"temp",
"[",
"1",
":",
":",
"2",
"]",
")",
"data",
"[",
"'noiseMeas'",
"]",
"[",
":",
",",
"data",
"[",
"'Channel'",
"]",
"]",
"=",
"temp",
"[",
"0",
":",
":",
"2",
"]",
"+",
"1j",
"*",
"temp",
"[",
"1",
":",
":",
"2",
"]",
"skipField",
"=",
"0",
"f",
".",
"seek",
"(",
"skipField",
",",
"os",
".",
"SEEK_CUR",
")",
"else",
":",
"temp",
"=",
"np",
".",
"fromfile",
"(",
"f",
",",
"dtype",
"=",
"np",
".",
"float32",
",",
"count",
"=",
"2",
"*",
"data",
"[",
"'Nxr'",
"]",
")",
"if",
"25",
"in",
"flag",
":",
"temp",
"[",
"0",
":",
":",
"2",
"]",
"=",
"np",
".",
"flipud",
"(",
"temp",
"[",
"0",
":",
":",
"2",
"]",
")",
"temp",
"[",
"1",
":",
":",
"2",
"]",
"=",
"np",
".",
"flipud",
"(",
"temp",
"[",
"1",
":",
":",
"2",
"]",
")",
"if",
"data",
"[",
"'CutOffDataPre'",
"]",
">",
"0",
":",
"temp",
"[",
"0",
":",
"2",
"*",
"data",
"[",
"'CutOffDataPre'",
"]",
"-",
"1",
"]",
"=",
"0",
"if",
"data",
"[",
"'CutOffDataPost'",
"]",
">",
"0",
":",
"temp",
"[",
"len",
"(",
"temp",
")",
"-",
"2",
"*",
"data",
"[",
"'CutOffDataPost'",
"]",
"-",
"1",
"]",
"=",
"0",
"if",
"11",
"in",
"flag",
":",
"temp",
"=",
"data",
"[",
"'CorrFactor'",
"]",
"[",
"data",
"[",
"'Channel'",
"]",
"]",
"*",
"temp",
"temp",
"=",
"data",
"[",
"'FFTCorrFactor'",
"]",
"[",
"data",
"[",
"'Channel'",
"]",
"]",
"*",
"temp",
"if",
"readOneCoil",
"is",
"False",
":",
"if",
"removeOS",
"is",
"True",
":",
"temp1",
"[",
"len",
"(",
"temp1",
")",
"-",
"data",
"[",
"'Nxr'",
"]",
":",
"]",
"=",
"temp",
"[",
"0",
":",
":",
"2",
"]",
"+",
"1j",
"*",
"temp",
"[",
"1",
":",
":",
"2",
"]",
"tempX",
"=",
"np",
".",
"fftshift",
"(",
"np",
".",
"fft",
"(",
"np",
".",
"fftshift",
"(",
"temp1",
")",
")",
")",
"tempK",
"=",
"np",
".",
"fftshift",
"(",
"np",
".",
"ifftshift",
"(",
"np",
".",
"fftshift",
"(",
"tempX",
"[",
"np",
".",
"around",
"(",
"(",
"data",
"[",
"'NxOS'",
"]",
"-",
"data",
"[",
"'Nx'",
"]",
")",
"/",
"2",
")",
":",
"data",
"[",
"'Nx'",
"]",
"+",
"np",
".",
"around",
"(",
"(",
"data",
"[",
"'NxOS'",
"]",
"-",
"data",
"[",
"'Nx'",
"]",
")",
"/",
"2",
")",
"]",
")",
")",
")",
"data",
"[",
"'kSpace'",
"]",
"[",
"data",
"[",
"'Acquisition'",
"]",
",",
"data",
"[",
"'Phase'",
"]",
",",
"data",
"[",
"'Repetition'",
"]",
",",
"data",
"[",
"'Echo'",
"]",
",",
"data",
"[",
"'Slice'",
"]",
",",
"data",
"[",
"'Channel'",
"]",
",",
"data",
"[",
"'Partition'",
"]",
",",
":",
",",
"data",
"[",
"'Line'",
"]",
"]",
"=",
"tempK",
".",
"astype",
"(",
"np",
".",
"complex64",
")",
"else",
":",
"data",
"[",
"'kSpace'",
"]",
"[",
"data",
"[",
"'Acquisition'",
"]",
",",
"data",
"[",
"'Phase'",
"]",
",",
"data",
"[",
"'Repetition'",
"]",
",",
"data",
"[",
"'Echo'",
"]",
",",
"data",
"[",
"'Slice'",
"]",
",",
"data",
"[",
"'Channel'",
"]",
",",
"data",
"[",
"'Partition'",
"]",
",",
"data",
"[",
"'kSpace'",
"]",
".",
"shape",
"[",
"7",
"]",
"-",
"data",
"[",
"'Nxr'",
"]",
":",
"data",
"[",
"'kSpace'",
"]",
".",
"shape",
"[",
"7",
"]",
",",
"data",
"[",
"'Line'",
"]",
"]",
"=",
"(",
"temp",
"[",
"0",
":",
":",
"2",
"]",
"+",
"1j",
"*",
"temp",
"[",
"1",
":",
":",
"2",
"]",
")",
".",
"astype",
"(",
"np",
".",
"complex64",
")",
"elif",
"(",
"readOneCoil",
"is",
"True",
")",
"and",
"(",
"data",
"[",
"'Channel'",
"]",
"+",
"1",
"==",
"coilIndex",
")",
":",
"if",
"removeOS",
"is",
"True",
":",
"temp1",
"[",
"len",
"(",
"temp1",
")",
"-",
"data",
"[",
"'Nxr'",
"]",
":",
"]",
"=",
"temp",
"[",
"0",
":",
":",
"2",
"]",
"+",
"1j",
"*",
"temp",
"[",
"1",
":",
":",
"2",
"]",
"tempx",
"=",
"np",
".",
"fftshift",
"(",
"np",
".",
"fft",
"(",
"np",
".",
"fftshift",
"(",
"temp1",
")",
")",
")",
"tempK",
"=",
"np",
".",
"fftshift",
"(",
"np",
".",
"fft",
"(",
"np",
".",
"fftshift",
"(",
"tempX",
"[",
"np",
".",
"around",
"(",
"(",
"data",
"[",
"'NxOS'",
"]",
"-",
"data",
"[",
"'Nx'",
"]",
")",
"/",
"2",
")",
":",
"data",
"[",
"'Nx'",
"]",
"+",
"np",
".",
"around",
"(",
"(",
"data",
"[",
"'NxOS'",
"]",
"-",
"data",
"[",
"'Nx'",
"]",
")",
"/",
"2",
")",
"]",
")",
")",
")",
"data",
"[",
"'kSpace'",
"]",
"[",
"data",
"[",
"'Acquisition'",
"]",
",",
"data",
"[",
"'Phase'",
"]",
",",
"data",
"[",
"'Repetition'",
"]",
",",
"data",
"[",
"'Echo'",
"]",
",",
"data",
"[",
"'Slice'",
"]",
",",
"0",
",",
"data",
"[",
"'Partition'",
"]",
",",
":",
",",
"data",
"[",
"'Line'",
"]",
"]",
"=",
"tempK",
".",
"astype",
"(",
"np",
".",
"complex64",
")",
"else",
":",
"data",
"[",
"'kSpace'",
"]",
"[",
"data",
"[",
"'Acquisition'",
"]",
",",
"data",
"[",
"'Phase'",
"]",
",",
"data",
"[",
"'Repetition'",
"]",
",",
"data",
"[",
"'Echo'",
"]",
",",
"data",
"[",
"'Slice'",
"]",
",",
"0",
",",
"data",
"[",
"'Partition'",
"]",
",",
"data",
"[",
"'kSpace'",
"]",
".",
"shape",
"[",
"7",
"]",
"-",
"data",
"[",
"'Nxr'",
"]",
":",
",",
"data",
"[",
"'Line'",
"]",
"]",
"=",
"(",
"temp",
"[",
"0",
":",
":",
"2",
"]",
"+",
"1j",
"*",
"temp",
"[",
"1",
":",
":",
"2",
"]",
")",
".",
"astype",
"(",
"np",
".",
"complex64",
")",
"if",
"(",
"readTimeStamp",
"is",
"True",
")",
"and",
"(",
"data",
"[",
"'Channel'",
"]",
"==",
"0",
")",
"and",
"(",
"navigatorDataON",
"is",
"True",
")",
":",
"data",
"[",
"'EPITrain'",
"]",
"=",
"countNavigator",
"%",
"data",
"[",
"'nEPITrain'",
"]",
"data",
"[",
"'timeStamp'",
"]",
"[",
"data",
"[",
"'Echo'",
"]",
",",
"data",
"[",
"'EPITrain'",
"]",
",",
"data",
"[",
"'Partition'",
"]",
",",
"data",
"[",
"'Slice'",
"]",
",",
"data",
"[",
"'Acquisition'",
"]",
",",
"data",
"[",
"'Phase'",
"]",
",",
"data",
"[",
"'Repetition'",
"]",
"]",
"=",
"(",
"0.0025",
"*",
"timeS",
")",
".",
"astype",
"(",
"np",
".",
"complex64",
")",
"if",
"(",
"readNavigator",
"is",
"True",
")",
"and",
"(",
"data",
"[",
"'Channel'",
"]",
"==",
"0",
")",
"and",
"(",
"navigatorDataON",
"is",
"True",
")",
":",
"data",
"[",
"'EPITrain'",
"]",
"=",
"countNavigator",
"%",
"data",
"[",
"'nEPITrain'",
"]",
"kNavigator",
"[",
":",
",",
":",
",",
":",
",",
"data",
"[",
"'EPITrain'",
"]",
",",
"data",
"[",
"'Partition'",
"]",
",",
"data",
"[",
"'Slice'",
"]",
",",
"data",
"[",
"'Acquisition'",
"]",
",",
"data",
"[",
"'Phase'",
"]",
",",
"data",
"[",
"Repetition",
"]",
"]",
"=",
"kNavigatorTemp",
".",
"astype",
"(",
"np",
".",
"complex64",
")",
"navigatorDataON",
"=",
"False",
"countNavigator",
"+=",
"1",
"if",
"1",
"in",
"flag",
":",
"break",
"data",
"[",
"'kSpace'",
"]",
"=",
"np",
".",
"squeeze",
"(",
"data",
"[",
"'kSpace'",
"]",
")",
"if",
"len",
"(",
"data",
"[",
"'kSpace'",
"]",
".",
"shape",
")",
"==",
"3",
":",
"data",
"[",
"'kSpace'",
"]",
"=",
"np",
".",
"transpose",
"(",
"data",
"[",
"'kSpace'",
"]",
",",
"[",
"1",
",",
"2",
",",
"0",
"]",
")",
"elif",
"len",
"(",
"data",
"[",
"'kSpace'",
"]",
".",
"shape",
")",
"==",
"4",
":",
"if",
"data",
"[",
"'flag3D'",
"]",
"is",
"False",
":",
"data",
"[",
"'kSpace'",
"]",
"=",
"np",
".",
"transpose",
"(",
"data",
"[",
"'kSpace'",
"]",
",",
"[",
"2",
",",
"3",
",",
"0",
",",
"1",
"]",
")",
"else",
":",
"data",
"[",
"'kSpace'",
"]",
"=",
"np",
".",
"transpose",
"(",
"data",
"[",
"'kSpace'",
"]",
",",
"[",
"2",
",",
"3",
",",
"1",
",",
"0",
"]",
")",
"elif",
"len",
"(",
"data",
"[",
"'kSpace'",
"]",
".",
"shape",
")",
"==",
"5",
":",
"data",
"[",
"'kSpace'",
"]",
"=",
"np",
".",
"transpose",
"(",
"data",
"[",
"'kSpace'",
"]",
",",
"[",
"3",
",",
"4",
",",
"2",
",",
"0",
",",
"1",
"]",
")",
"if",
"transformToImageSpace",
"is",
"True",
":",
"if",
"data",
"[",
"'flag3D'",
"]",
"is",
"True",
":",
"data",
"[",
"'imSpace'",
"]",
"=",
"np",
".",
"fft",
".",
"ifftshift",
"(",
"np",
".",
"fft",
".",
"ifftn",
"(",
"np",
".",
"fft",
".",
"fftshift",
"(",
"data",
"[",
"'kSpace'",
"]",
",",
"axes",
"=",
"(",
"0",
",",
"1",
",",
"2",
")",
")",
",",
"axes",
"=",
"(",
"0",
",",
"1",
",",
"2",
")",
")",
",",
"axes",
"=",
"(",
"0",
",",
"1",
",",
"2",
")",
")",
"data",
"[",
"'imSpace'",
"]",
"[",
"2",
"]",
"*=",
"data",
"[",
"'Nz'",
"]",
"else",
":",
"data",
"[",
"'imSpace'",
"]",
"=",
"np",
".",
"fft",
".",
"ifftshift",
"(",
"np",
".",
"fft",
".",
"ifft2",
"(",
"np",
".",
"fft",
".",
"fftshift",
"(",
"data",
"[",
"'kSpace'",
"]",
",",
"axes",
"=",
"(",
"0",
",",
"1",
")",
")",
",",
"axes",
"=",
"(",
"0",
",",
"1",
")",
")",
",",
"axes",
"=",
"(",
"0",
",",
"1",
")",
")",
"data",
"[",
"'imSpace'",
"]",
"[",
"0",
"]",
"*=",
"data",
"[",
"'NxOS'",
"]",
"data",
"[",
"'imSpace'",
"]",
"[",
"1",
"]",
"*=",
"data",
"[",
"'Ny'",
"]",
"if",
"(",
"removeOSafter",
"is",
"True",
")",
"and",
"(",
"removeOS",
"is",
"False",
")",
":",
"if",
"len",
"(",
"data",
"[",
"'imSpace'",
"]",
".",
"shape",
")",
"==",
"2",
":",
"data",
"[",
"'imSpace'",
"]",
"[",
"0",
":",
"data",
"[",
"'NxOS'",
"]",
"/",
"4",
",",
":",
"]",
"=",
"[",
"]",
"data",
"[",
"'imSpace'",
"]",
"[",
"data",
"[",
"'imSpace'",
"]",
".",
"shape",
"[",
"0",
"]",
"-",
"data",
"[",
"'NxOS'",
"]",
"/",
"4",
":",
",",
":",
"]",
"=",
"[",
"]",
"elif",
"len",
"(",
"data",
"[",
"'imSpace'",
"]",
".",
"shape",
")",
"==",
"3",
":",
"data",
"[",
"'imSpace'",
"]",
"[",
"0",
":",
"data",
"[",
"'NxOS'",
"]",
"/",
"4",
",",
":",
",",
":",
"]",
"=",
"[",
"]",
"data",
"[",
"'imSpace'",
"]",
"[",
"data",
"[",
"'imSpace'",
"]",
".",
"shape",
"[",
"0",
"]",
"-",
"data",
"[",
"'NxOS'",
"]",
"/",
"4",
":",
",",
":",
",",
":",
"]",
"=",
"[",
"]",
"elif",
"len",
"(",
"data",
"[",
"'imSpace'",
"]",
".",
"shape",
")",
"==",
"4",
":",
"data",
"[",
"'imSpace'",
"]",
"[",
"0",
":",
"data",
"[",
"'NxOS'",
"]",
"/",
"4",
",",
":",
",",
":",
",",
":",
"]",
"=",
"[",
"]",
"data",
"[",
"'imSpace'",
"]",
"[",
"data",
"[",
"'imSpace'",
"]",
".",
"shape",
"[",
"0",
"]",
"-",
"data",
"[",
"'NxOS'",
"]",
"/",
"4",
":",
",",
":",
",",
":",
",",
":",
"]",
"=",
"[",
"]",
"elif",
"len",
"(",
"data",
"[",
"'imSpace'",
"]",
".",
"shape",
")",
"==",
"5",
":",
"data",
"[",
"'imSpace'",
"]",
"[",
"0",
":",
"data",
"[",
"'NxOS'",
"]",
"/",
"4",
",",
":",
",",
":",
",",
":",
",",
":",
"]",
"=",
"[",
"]",
"data",
"[",
"'imSpace'",
"]",
"[",
"data",
"[",
"'imSpace'",
"]",
".",
"shape",
"[",
"0",
"]",
"-",
"data",
"[",
"'NxOS'",
"]",
"/",
"4",
":",
",",
":",
",",
":",
",",
":",
",",
":",
"]",
"=",
"[",
"]",
"if",
"writeToFile",
"is",
"True",
":",
"if",
"transformToImageSpace",
"is",
"True",
":",
"if",
"npz",
":",
"np",
".",
"savez_compressed",
"(",
"filenameOut",
",",
"imSpace",
"=",
"data",
"[",
"'imSpace'",
"]",
",",
"timeStamp",
"=",
"data",
"[",
"'timeStamp'",
"]",
")",
"else",
":",
"with",
"h5py",
".",
"File",
"(",
"'%s.hdf5'",
"%",
"filenameOut",
",",
"'w'",
")",
"as",
"f",
":",
"dset",
"=",
"f",
".",
"create_dataset",
"(",
"'kSpace'",
",",
"data",
"=",
"data",
"[",
"'imSpace'",
"]",
")",
"else",
":",
"if",
"npz",
":",
"np",
".",
"savez_compressed",
"(",
"filenameOut",
",",
"kSpace",
"=",
"data",
"[",
"'kSpace'",
"]",
",",
"timeStamp",
"=",
"data",
"[",
"'timeStamp'",
"]",
")",
"else",
":",
"with",
"h5py",
".",
"File",
"(",
"'%s.hdf5'",
"%",
"filenameOut",
",",
"'w'",
")",
"as",
"f",
":",
"dset",
"=",
"f",
".",
"create_dataset",
"(",
"'kSpace'",
",",
"data",
"=",
"data",
"[",
"'kSpace'",
"]",
")",
"return",
"(",
"data",
")"
] | Read raw data from Siemens MRI scanners with IDEA VB15.
Will return an array of measured k-space data from raw data from
Siemens MRI scanners using IDEA VB15 (single value). If the option
'-I' is used, then image space data will be returned instead.
Usage:
readMeasDataVB15 filename [ -t ] [ -rfft ] [ -r1 ] [ -rp ] [ -rn ]
[ -skipts ] [ -nnavek ] [ -ros ]
[ -rosa ] [ -I ] [ -w ] [-npz]
Examples:
python3 -m rawdatarinator.readMeasDataVB15 raw.dat -w
or using the shortned alias...
python3 -m rawdatarinator.raw raw.dat -w
Command-line Options:
filename Filename of file containing raw measurements.
-rfft (resetFFTscale)
Resets FFTscale and DataCorrection for each coil
to 1.
-r1 (readOneCoil)
Read measurement data from from individual coil.
-rp (readPhaseCorInfo)
_
-rn (readNavigator)
_
-skipts (skip readTimeStamp)
_
-nnavek (nNavEK)
_
-ros (removeOS)
Flag to remove oversampling (OS) in the x
direction. removeOS=True is more efficient as it
processes each readout line independently,
reducing the required memory space to keep all
measured data.
-rosa (removeOSafter)
Flag to remove oversampling (OS) in the x
direction. This works in image space, cutting FOV.
Not likely a good idea for radial.
-I (transformToImageSpace)
Produce image space representation. Note that
there is no correction for partial Fourier or
parallel imaging k-space undersampling. The given
version of code only uses numpy's FFT operation.
-w (writeToFile)
Save k-space or image space volume. Currently the
output filename is auto generated.
-npz (npz)
Save k-space or image space volume using the .npz
file extension. Default is to use hdf5 file
standard.
-h (help)
Displays this documentation. | [
"Read",
"raw",
"data",
"from",
"Siemens",
"MRI",
"scanners",
"with",
"IDEA",
"VB15",
"."
] | train | https://github.com/mckib2/rawdatarinator/blob/03a85fd8f5e380b424027d28e97972bd7a6a3f1b/rawdatarinator/readMeasDataVB15.py#L56-L649 |
mckib2/rawdatarinator | rawdatarinator/readMeasDataVB15.py | main | def main(args):
'''Function run when called from command line.'''
options = { '-rfft': ['resetFFTscale',False],
'-r1': ['readOneCoil',False],
'-rp': ['readPhaseCorInfo',False],
'-rn': ['readNavigator',False],
'-skipts': ['readTimeStamp',True],
'-nnavek': ['nNavEK',False],
'-ros': ['removeOS',False],
'-rosa': ['removeOSafter',False],
'-I': ['transformToImageSpace',False],
'-w': ['writeToFile',False],
'-npz': ['npz',False] }
decode_simple_opts(options,args,readMeasDataVB15) | python | def main(args):
'''Function run when called from command line.'''
options = { '-rfft': ['resetFFTscale',False],
'-r1': ['readOneCoil',False],
'-rp': ['readPhaseCorInfo',False],
'-rn': ['readNavigator',False],
'-skipts': ['readTimeStamp',True],
'-nnavek': ['nNavEK',False],
'-ros': ['removeOS',False],
'-rosa': ['removeOSafter',False],
'-I': ['transformToImageSpace',False],
'-w': ['writeToFile',False],
'-npz': ['npz',False] }
decode_simple_opts(options,args,readMeasDataVB15) | [
"def",
"main",
"(",
"args",
")",
":",
"options",
"=",
"{",
"'-rfft'",
":",
"[",
"'resetFFTscale'",
",",
"False",
"]",
",",
"'-r1'",
":",
"[",
"'readOneCoil'",
",",
"False",
"]",
",",
"'-rp'",
":",
"[",
"'readPhaseCorInfo'",
",",
"False",
"]",
",",
"'-rn'",
":",
"[",
"'readNavigator'",
",",
"False",
"]",
",",
"'-skipts'",
":",
"[",
"'readTimeStamp'",
",",
"True",
"]",
",",
"'-nnavek'",
":",
"[",
"'nNavEK'",
",",
"False",
"]",
",",
"'-ros'",
":",
"[",
"'removeOS'",
",",
"False",
"]",
",",
"'-rosa'",
":",
"[",
"'removeOSafter'",
",",
"False",
"]",
",",
"'-I'",
":",
"[",
"'transformToImageSpace'",
",",
"False",
"]",
",",
"'-w'",
":",
"[",
"'writeToFile'",
",",
"False",
"]",
",",
"'-npz'",
":",
"[",
"'npz'",
",",
"False",
"]",
"}",
"decode_simple_opts",
"(",
"options",
",",
"args",
",",
"readMeasDataVB15",
")"
] | Function run when called from command line. | [
"Function",
"run",
"when",
"called",
"from",
"command",
"line",
"."
] | train | https://github.com/mckib2/rawdatarinator/blob/03a85fd8f5e380b424027d28e97972bd7a6a3f1b/rawdatarinator/readMeasDataVB15.py#L651-L666 |
chrisjsewell/jsonextended | jsonextended/ejson.py | _get_keys_folder | def _get_keys_folder(jdir, key_path=None, in_memory=True,
ignore_prefix=('.', '_')):
""" get json keys from directory structure
e.g.
jdir
sub_dir1
data1.json
data2.json
sub_dir2
data.json
_get_keys_folder(jdir)
=> ['sub_dir1', 'sub_dir2']
_get_keys_folder(jdir,['sub_dir1'])
=> ['data1', 'data2']
NB: json files are identified with .json extension
files/directories beginning with '.' are ignored
"""
if not hasattr(jdir, 'iterdir'):
raise ValueError('jdir is not a path object; {}'.format(jdir))
key_path = [] if key_path is None else key_path
keys = []
key_found = False if key_path else True
search_key = key_path[0] if len(key_path) > 0 else None
for jsub in jdir.iterdir():
if jsub.is_file() and jsub.name[-5:] == '.json':
name, ext = os.path.splitext(jsub.name)
if name == search_key or not key_path:
key_found = True
if key_path:
return jkeys(jsub, key_path[1:], in_memory, ignore_prefix)
else:
keys.append(name)
elif (jsub.is_dir()
and not jsub.name.startswith(ignore_prefix)
and (jsub.name == search_key or not key_path)):
key_found = True
if jsub.name in keys:
raise IOError(
'directory has a sub-dir and file with same name: '
'{1} and {1}.json in {0}'.format(jdir, jsub.name))
if key_path:
return jkeys(jsub, key_path[1:], in_memory, ignore_prefix)
else:
keys.append(jsub.name)
if not key_found:
raise KeyError('key not found: {0}'.format(search_key))
return sorted(keys) | python | def _get_keys_folder(jdir, key_path=None, in_memory=True,
ignore_prefix=('.', '_')):
""" get json keys from directory structure
e.g.
jdir
sub_dir1
data1.json
data2.json
sub_dir2
data.json
_get_keys_folder(jdir)
=> ['sub_dir1', 'sub_dir2']
_get_keys_folder(jdir,['sub_dir1'])
=> ['data1', 'data2']
NB: json files are identified with .json extension
files/directories beginning with '.' are ignored
"""
if not hasattr(jdir, 'iterdir'):
raise ValueError('jdir is not a path object; {}'.format(jdir))
key_path = [] if key_path is None else key_path
keys = []
key_found = False if key_path else True
search_key = key_path[0] if len(key_path) > 0 else None
for jsub in jdir.iterdir():
if jsub.is_file() and jsub.name[-5:] == '.json':
name, ext = os.path.splitext(jsub.name)
if name == search_key or not key_path:
key_found = True
if key_path:
return jkeys(jsub, key_path[1:], in_memory, ignore_prefix)
else:
keys.append(name)
elif (jsub.is_dir()
and not jsub.name.startswith(ignore_prefix)
and (jsub.name == search_key or not key_path)):
key_found = True
if jsub.name in keys:
raise IOError(
'directory has a sub-dir and file with same name: '
'{1} and {1}.json in {0}'.format(jdir, jsub.name))
if key_path:
return jkeys(jsub, key_path[1:], in_memory, ignore_prefix)
else:
keys.append(jsub.name)
if not key_found:
raise KeyError('key not found: {0}'.format(search_key))
return sorted(keys) | [
"def",
"_get_keys_folder",
"(",
"jdir",
",",
"key_path",
"=",
"None",
",",
"in_memory",
"=",
"True",
",",
"ignore_prefix",
"=",
"(",
"'.'",
",",
"'_'",
")",
")",
":",
"if",
"not",
"hasattr",
"(",
"jdir",
",",
"'iterdir'",
")",
":",
"raise",
"ValueError",
"(",
"'jdir is not a path object; {}'",
".",
"format",
"(",
"jdir",
")",
")",
"key_path",
"=",
"[",
"]",
"if",
"key_path",
"is",
"None",
"else",
"key_path",
"keys",
"=",
"[",
"]",
"key_found",
"=",
"False",
"if",
"key_path",
"else",
"True",
"search_key",
"=",
"key_path",
"[",
"0",
"]",
"if",
"len",
"(",
"key_path",
")",
">",
"0",
"else",
"None",
"for",
"jsub",
"in",
"jdir",
".",
"iterdir",
"(",
")",
":",
"if",
"jsub",
".",
"is_file",
"(",
")",
"and",
"jsub",
".",
"name",
"[",
"-",
"5",
":",
"]",
"==",
"'.json'",
":",
"name",
",",
"ext",
"=",
"os",
".",
"path",
".",
"splitext",
"(",
"jsub",
".",
"name",
")",
"if",
"name",
"==",
"search_key",
"or",
"not",
"key_path",
":",
"key_found",
"=",
"True",
"if",
"key_path",
":",
"return",
"jkeys",
"(",
"jsub",
",",
"key_path",
"[",
"1",
":",
"]",
",",
"in_memory",
",",
"ignore_prefix",
")",
"else",
":",
"keys",
".",
"append",
"(",
"name",
")",
"elif",
"(",
"jsub",
".",
"is_dir",
"(",
")",
"and",
"not",
"jsub",
".",
"name",
".",
"startswith",
"(",
"ignore_prefix",
")",
"and",
"(",
"jsub",
".",
"name",
"==",
"search_key",
"or",
"not",
"key_path",
")",
")",
":",
"key_found",
"=",
"True",
"if",
"jsub",
".",
"name",
"in",
"keys",
":",
"raise",
"IOError",
"(",
"'directory has a sub-dir and file with same name: '",
"'{1} and {1}.json in {0}'",
".",
"format",
"(",
"jdir",
",",
"jsub",
".",
"name",
")",
")",
"if",
"key_path",
":",
"return",
"jkeys",
"(",
"jsub",
",",
"key_path",
"[",
"1",
":",
"]",
",",
"in_memory",
",",
"ignore_prefix",
")",
"else",
":",
"keys",
".",
"append",
"(",
"jsub",
".",
"name",
")",
"if",
"not",
"key_found",
":",
"raise",
"KeyError",
"(",
"'key not found: {0}'",
".",
"format",
"(",
"search_key",
")",
")",
"return",
"sorted",
"(",
"keys",
")"
] | get json keys from directory structure
e.g.
jdir
sub_dir1
data1.json
data2.json
sub_dir2
data.json
_get_keys_folder(jdir)
=> ['sub_dir1', 'sub_dir2']
_get_keys_folder(jdir,['sub_dir1'])
=> ['data1', 'data2']
NB: json files are identified with .json extension
files/directories beginning with '.' are ignored | [
"get",
"json",
"keys",
"from",
"directory",
"structure"
] | train | https://github.com/chrisjsewell/jsonextended/blob/c3a7a880cc09789b3c61204265dcbb127be76c8a/jsonextended/ejson.py#L74-L134 |
chrisjsewell/jsonextended | jsonextended/ejson.py | jkeys | def jkeys(jfile, key_path=None, in_memory=True, ignore_prefix=('.', '_')):
""" get keys for initial json level, or at level after following key_path
Parameters
----------
jfile : str, file_like or path_like
if str, must be existing file or folder,
if file_like, must have 'read' method
if path_like, must have 'iterdir' method (see pathlib.Path)
key_path : list[str]
a list of keys to index into the json before returning keys
in_memory : bool
if true reads json into memory before finding keys
(this is faster but uses more memory)
ignore_prefix : list[str]
ignore folders beginning with these prefixes
Examples
--------
>>> from jsonextended.utils import MockPath
>>> file_obj = MockPath('test.json',is_file=True,
... content='''
... {
... "a": 1,
... "b": [1.1,2.1],
... "c": {"d":"e","f":"g"}
... }
... ''')
...
>>> jkeys(file_obj)
['a', 'b', 'c']
>>> jkeys(file_obj,["c"])
['d', 'f']
>>> from jsonextended.utils import get_test_path
>>> path = get_test_path()
>>> jkeys(path)
['dir1', 'dir2', 'dir3']
>>> path = get_test_path()
>>> jkeys(path, ['dir1','file1'], in_memory=True)
['initial', 'meta', 'optimised', 'units']
"""
key_path = [] if key_path is None else key_path
def eval_file(file_obj):
if not in_memory:
return _get_keys_ijson(file_obj, key_path)
else:
return _get_keys(file_obj, key_path)
if isinstance(jfile, basestring):
if not os.path.exists(jfile):
raise IOError('jfile does not exist: {}'.format(jfile))
if os.path.isdir(jfile):
jpath = pathlib.Path(jfile)
return _get_keys_folder(jpath, key_path, in_memory, ignore_prefix)
else:
with open(jfile, 'r') as file_obj:
return eval_file(file_obj)
elif hasattr(jfile, 'read'):
return eval_file(jfile)
elif hasattr(jfile, 'iterdir'):
if jfile.is_file():
with jfile.open('r') as file_obj:
return eval_file(file_obj)
else:
return _get_keys_folder(jfile, key_path, in_memory, ignore_prefix)
else:
raise ValueError(
'jfile should be a str, '
'file_like or path_like object: {}'.format(jfile)) | python | def jkeys(jfile, key_path=None, in_memory=True, ignore_prefix=('.', '_')):
""" get keys for initial json level, or at level after following key_path
Parameters
----------
jfile : str, file_like or path_like
if str, must be existing file or folder,
if file_like, must have 'read' method
if path_like, must have 'iterdir' method (see pathlib.Path)
key_path : list[str]
a list of keys to index into the json before returning keys
in_memory : bool
if true reads json into memory before finding keys
(this is faster but uses more memory)
ignore_prefix : list[str]
ignore folders beginning with these prefixes
Examples
--------
>>> from jsonextended.utils import MockPath
>>> file_obj = MockPath('test.json',is_file=True,
... content='''
... {
... "a": 1,
... "b": [1.1,2.1],
... "c": {"d":"e","f":"g"}
... }
... ''')
...
>>> jkeys(file_obj)
['a', 'b', 'c']
>>> jkeys(file_obj,["c"])
['d', 'f']
>>> from jsonextended.utils import get_test_path
>>> path = get_test_path()
>>> jkeys(path)
['dir1', 'dir2', 'dir3']
>>> path = get_test_path()
>>> jkeys(path, ['dir1','file1'], in_memory=True)
['initial', 'meta', 'optimised', 'units']
"""
key_path = [] if key_path is None else key_path
def eval_file(file_obj):
if not in_memory:
return _get_keys_ijson(file_obj, key_path)
else:
return _get_keys(file_obj, key_path)
if isinstance(jfile, basestring):
if not os.path.exists(jfile):
raise IOError('jfile does not exist: {}'.format(jfile))
if os.path.isdir(jfile):
jpath = pathlib.Path(jfile)
return _get_keys_folder(jpath, key_path, in_memory, ignore_prefix)
else:
with open(jfile, 'r') as file_obj:
return eval_file(file_obj)
elif hasattr(jfile, 'read'):
return eval_file(jfile)
elif hasattr(jfile, 'iterdir'):
if jfile.is_file():
with jfile.open('r') as file_obj:
return eval_file(file_obj)
else:
return _get_keys_folder(jfile, key_path, in_memory, ignore_prefix)
else:
raise ValueError(
'jfile should be a str, '
'file_like or path_like object: {}'.format(jfile)) | [
"def",
"jkeys",
"(",
"jfile",
",",
"key_path",
"=",
"None",
",",
"in_memory",
"=",
"True",
",",
"ignore_prefix",
"=",
"(",
"'.'",
",",
"'_'",
")",
")",
":",
"key_path",
"=",
"[",
"]",
"if",
"key_path",
"is",
"None",
"else",
"key_path",
"def",
"eval_file",
"(",
"file_obj",
")",
":",
"if",
"not",
"in_memory",
":",
"return",
"_get_keys_ijson",
"(",
"file_obj",
",",
"key_path",
")",
"else",
":",
"return",
"_get_keys",
"(",
"file_obj",
",",
"key_path",
")",
"if",
"isinstance",
"(",
"jfile",
",",
"basestring",
")",
":",
"if",
"not",
"os",
".",
"path",
".",
"exists",
"(",
"jfile",
")",
":",
"raise",
"IOError",
"(",
"'jfile does not exist: {}'",
".",
"format",
"(",
"jfile",
")",
")",
"if",
"os",
".",
"path",
".",
"isdir",
"(",
"jfile",
")",
":",
"jpath",
"=",
"pathlib",
".",
"Path",
"(",
"jfile",
")",
"return",
"_get_keys_folder",
"(",
"jpath",
",",
"key_path",
",",
"in_memory",
",",
"ignore_prefix",
")",
"else",
":",
"with",
"open",
"(",
"jfile",
",",
"'r'",
")",
"as",
"file_obj",
":",
"return",
"eval_file",
"(",
"file_obj",
")",
"elif",
"hasattr",
"(",
"jfile",
",",
"'read'",
")",
":",
"return",
"eval_file",
"(",
"jfile",
")",
"elif",
"hasattr",
"(",
"jfile",
",",
"'iterdir'",
")",
":",
"if",
"jfile",
".",
"is_file",
"(",
")",
":",
"with",
"jfile",
".",
"open",
"(",
"'r'",
")",
"as",
"file_obj",
":",
"return",
"eval_file",
"(",
"file_obj",
")",
"else",
":",
"return",
"_get_keys_folder",
"(",
"jfile",
",",
"key_path",
",",
"in_memory",
",",
"ignore_prefix",
")",
"else",
":",
"raise",
"ValueError",
"(",
"'jfile should be a str, '",
"'file_like or path_like object: {}'",
".",
"format",
"(",
"jfile",
")",
")"
] | get keys for initial json level, or at level after following key_path
Parameters
----------
jfile : str, file_like or path_like
if str, must be existing file or folder,
if file_like, must have 'read' method
if path_like, must have 'iterdir' method (see pathlib.Path)
key_path : list[str]
a list of keys to index into the json before returning keys
in_memory : bool
if true reads json into memory before finding keys
(this is faster but uses more memory)
ignore_prefix : list[str]
ignore folders beginning with these prefixes
Examples
--------
>>> from jsonextended.utils import MockPath
>>> file_obj = MockPath('test.json',is_file=True,
... content='''
... {
... "a": 1,
... "b": [1.1,2.1],
... "c": {"d":"e","f":"g"}
... }
... ''')
...
>>> jkeys(file_obj)
['a', 'b', 'c']
>>> jkeys(file_obj,["c"])
['d', 'f']
>>> from jsonextended.utils import get_test_path
>>> path = get_test_path()
>>> jkeys(path)
['dir1', 'dir2', 'dir3']
>>> path = get_test_path()
>>> jkeys(path, ['dir1','file1'], in_memory=True)
['initial', 'meta', 'optimised', 'units'] | [
"get",
"keys",
"for",
"initial",
"json",
"level",
"or",
"at",
"level",
"after",
"following",
"key_path"
] | train | https://github.com/chrisjsewell/jsonextended/blob/c3a7a880cc09789b3c61204265dcbb127be76c8a/jsonextended/ejson.py#L137-L211 |
chrisjsewell/jsonextended | jsonextended/ejson.py | _file_with_keys | def _file_with_keys(file_obj, key_path=None, parse_decimal=False):
"""read json with keys
Parameters
----------
file_obj : object
object with read method
key_path : list[str]
key to index befor parsing
parse_decimal : bool
whether to parse numbers as Decimal instances (retains exact precision)
Notes
-----
ijson outputs decimals as Decimal class (for arbitrary precision)
"""
key_path = [] if key_path is None else key_path
try:
objs = ijson.items(file_obj, '.'.join(key_path))
except NameError:
warnings.warn('ijson package not found in environment, \
please install for on-disk key indexing', ImportWarning)
data = json.load(
file_obj, parse_float=Decimal if parse_decimal else float,
object_hook=decode)
return indexes(data, key_path)
try:
data = next(objs) # .next()
except StopIteration:
raise KeyError('key path not available in json: {}'.format(key_path))
# by default ijson parses Decimal values
if not parse_decimal:
convert_type(data, Decimal, float, in_place=True)
datastr = json.dumps(data)
data = json.loads(datastr, object_hook=decode)
return data | python | def _file_with_keys(file_obj, key_path=None, parse_decimal=False):
"""read json with keys
Parameters
----------
file_obj : object
object with read method
key_path : list[str]
key to index befor parsing
parse_decimal : bool
whether to parse numbers as Decimal instances (retains exact precision)
Notes
-----
ijson outputs decimals as Decimal class (for arbitrary precision)
"""
key_path = [] if key_path is None else key_path
try:
objs = ijson.items(file_obj, '.'.join(key_path))
except NameError:
warnings.warn('ijson package not found in environment, \
please install for on-disk key indexing', ImportWarning)
data = json.load(
file_obj, parse_float=Decimal if parse_decimal else float,
object_hook=decode)
return indexes(data, key_path)
try:
data = next(objs) # .next()
except StopIteration:
raise KeyError('key path not available in json: {}'.format(key_path))
# by default ijson parses Decimal values
if not parse_decimal:
convert_type(data, Decimal, float, in_place=True)
datastr = json.dumps(data)
data = json.loads(datastr, object_hook=decode)
return data | [
"def",
"_file_with_keys",
"(",
"file_obj",
",",
"key_path",
"=",
"None",
",",
"parse_decimal",
"=",
"False",
")",
":",
"key_path",
"=",
"[",
"]",
"if",
"key_path",
"is",
"None",
"else",
"key_path",
"try",
":",
"objs",
"=",
"ijson",
".",
"items",
"(",
"file_obj",
",",
"'.'",
".",
"join",
"(",
"key_path",
")",
")",
"except",
"NameError",
":",
"warnings",
".",
"warn",
"(",
"'ijson package not found in environment, \\\n please install for on-disk key indexing'",
",",
"ImportWarning",
")",
"data",
"=",
"json",
".",
"load",
"(",
"file_obj",
",",
"parse_float",
"=",
"Decimal",
"if",
"parse_decimal",
"else",
"float",
",",
"object_hook",
"=",
"decode",
")",
"return",
"indexes",
"(",
"data",
",",
"key_path",
")",
"try",
":",
"data",
"=",
"next",
"(",
"objs",
")",
"# .next()",
"except",
"StopIteration",
":",
"raise",
"KeyError",
"(",
"'key path not available in json: {}'",
".",
"format",
"(",
"key_path",
")",
")",
"# by default ijson parses Decimal values",
"if",
"not",
"parse_decimal",
":",
"convert_type",
"(",
"data",
",",
"Decimal",
",",
"float",
",",
"in_place",
"=",
"True",
")",
"datastr",
"=",
"json",
".",
"dumps",
"(",
"data",
")",
"data",
"=",
"json",
".",
"loads",
"(",
"datastr",
",",
"object_hook",
"=",
"decode",
")",
"return",
"data"
] | read json with keys
Parameters
----------
file_obj : object
object with read method
key_path : list[str]
key to index befor parsing
parse_decimal : bool
whether to parse numbers as Decimal instances (retains exact precision)
Notes
-----
ijson outputs decimals as Decimal class (for arbitrary precision) | [
"read",
"json",
"with",
"keys"
] | train | https://github.com/chrisjsewell/jsonextended/blob/c3a7a880cc09789b3c61204265dcbb127be76c8a/jsonextended/ejson.py#L214-L254 |
chrisjsewell/jsonextended | jsonextended/ejson.py | _folder_to_json | def _folder_to_json(jdir, key_path=None, in_memory=True,
ignore_prefix=('.', '_'), dic={}, parse_decimal=False):
""" read in folder structure as json
e.g.
jdir
sub_dir1
data.json
sub_dir2
data.json
_folder_to_json(jdir)
=> {'sub_dir1':{'data':{...}},
'sub_dir2':{'data':{...}}}
NB: json files are identified with .json extension
"""
key_path = [] if key_path is None else key_path
if not hasattr(jdir, 'iterdir'):
raise ValueError('jdir is not a path object; {}'.format(jdir))
key_found = False if key_path else True
search_key = key_path[0] if len(key_path) > 0 else None
for jsub in jdir.iterdir():
if jsub.is_file() and jsub.name.endswith('.json'):
name, ext = os.path.splitext(jsub.name)
if name == search_key or not key_path:
key_found = True
if key_path:
data = to_dict(jsub, key_path[1:], in_memory,
ignore_prefix, parse_decimal)
if isinstance(data, dict):
dic.update(data)
else:
dic.update({_Terminus(): data})
else:
dic[name] = to_dict(jsub, key_path[1:], in_memory,
ignore_prefix, parse_decimal)
elif (jsub.is_dir()
and not jsub.name.startswith(ignore_prefix)
and (jsub.name == search_key or not key_path)):
key_found = True
if jsub.name in dic.keys():
raise IOError(
'directory has a sub-dir and file with same name: '
'{1} and {1}.json in {0}'.format(jdir, jsub.name))
if key_path:
sub_d = dic
else:
dic[jsub.name] = {}
sub_d = dic[jsub.name]
_folder_to_json(jsub, key_path[1:], in_memory, ignore_prefix,
sub_d, parse_decimal)
if not key_found:
raise KeyError('key not found: {0}'.format(search_key)) | python | def _folder_to_json(jdir, key_path=None, in_memory=True,
ignore_prefix=('.', '_'), dic={}, parse_decimal=False):
""" read in folder structure as json
e.g.
jdir
sub_dir1
data.json
sub_dir2
data.json
_folder_to_json(jdir)
=> {'sub_dir1':{'data':{...}},
'sub_dir2':{'data':{...}}}
NB: json files are identified with .json extension
"""
key_path = [] if key_path is None else key_path
if not hasattr(jdir, 'iterdir'):
raise ValueError('jdir is not a path object; {}'.format(jdir))
key_found = False if key_path else True
search_key = key_path[0] if len(key_path) > 0 else None
for jsub in jdir.iterdir():
if jsub.is_file() and jsub.name.endswith('.json'):
name, ext = os.path.splitext(jsub.name)
if name == search_key or not key_path:
key_found = True
if key_path:
data = to_dict(jsub, key_path[1:], in_memory,
ignore_prefix, parse_decimal)
if isinstance(data, dict):
dic.update(data)
else:
dic.update({_Terminus(): data})
else:
dic[name] = to_dict(jsub, key_path[1:], in_memory,
ignore_prefix, parse_decimal)
elif (jsub.is_dir()
and not jsub.name.startswith(ignore_prefix)
and (jsub.name == search_key or not key_path)):
key_found = True
if jsub.name in dic.keys():
raise IOError(
'directory has a sub-dir and file with same name: '
'{1} and {1}.json in {0}'.format(jdir, jsub.name))
if key_path:
sub_d = dic
else:
dic[jsub.name] = {}
sub_d = dic[jsub.name]
_folder_to_json(jsub, key_path[1:], in_memory, ignore_prefix,
sub_d, parse_decimal)
if not key_found:
raise KeyError('key not found: {0}'.format(search_key)) | [
"def",
"_folder_to_json",
"(",
"jdir",
",",
"key_path",
"=",
"None",
",",
"in_memory",
"=",
"True",
",",
"ignore_prefix",
"=",
"(",
"'.'",
",",
"'_'",
")",
",",
"dic",
"=",
"{",
"}",
",",
"parse_decimal",
"=",
"False",
")",
":",
"key_path",
"=",
"[",
"]",
"if",
"key_path",
"is",
"None",
"else",
"key_path",
"if",
"not",
"hasattr",
"(",
"jdir",
",",
"'iterdir'",
")",
":",
"raise",
"ValueError",
"(",
"'jdir is not a path object; {}'",
".",
"format",
"(",
"jdir",
")",
")",
"key_found",
"=",
"False",
"if",
"key_path",
"else",
"True",
"search_key",
"=",
"key_path",
"[",
"0",
"]",
"if",
"len",
"(",
"key_path",
")",
">",
"0",
"else",
"None",
"for",
"jsub",
"in",
"jdir",
".",
"iterdir",
"(",
")",
":",
"if",
"jsub",
".",
"is_file",
"(",
")",
"and",
"jsub",
".",
"name",
".",
"endswith",
"(",
"'.json'",
")",
":",
"name",
",",
"ext",
"=",
"os",
".",
"path",
".",
"splitext",
"(",
"jsub",
".",
"name",
")",
"if",
"name",
"==",
"search_key",
"or",
"not",
"key_path",
":",
"key_found",
"=",
"True",
"if",
"key_path",
":",
"data",
"=",
"to_dict",
"(",
"jsub",
",",
"key_path",
"[",
"1",
":",
"]",
",",
"in_memory",
",",
"ignore_prefix",
",",
"parse_decimal",
")",
"if",
"isinstance",
"(",
"data",
",",
"dict",
")",
":",
"dic",
".",
"update",
"(",
"data",
")",
"else",
":",
"dic",
".",
"update",
"(",
"{",
"_Terminus",
"(",
")",
":",
"data",
"}",
")",
"else",
":",
"dic",
"[",
"name",
"]",
"=",
"to_dict",
"(",
"jsub",
",",
"key_path",
"[",
"1",
":",
"]",
",",
"in_memory",
",",
"ignore_prefix",
",",
"parse_decimal",
")",
"elif",
"(",
"jsub",
".",
"is_dir",
"(",
")",
"and",
"not",
"jsub",
".",
"name",
".",
"startswith",
"(",
"ignore_prefix",
")",
"and",
"(",
"jsub",
".",
"name",
"==",
"search_key",
"or",
"not",
"key_path",
")",
")",
":",
"key_found",
"=",
"True",
"if",
"jsub",
".",
"name",
"in",
"dic",
".",
"keys",
"(",
")",
":",
"raise",
"IOError",
"(",
"'directory has a sub-dir and file with same name: '",
"'{1} and {1}.json in {0}'",
".",
"format",
"(",
"jdir",
",",
"jsub",
".",
"name",
")",
")",
"if",
"key_path",
":",
"sub_d",
"=",
"dic",
"else",
":",
"dic",
"[",
"jsub",
".",
"name",
"]",
"=",
"{",
"}",
"sub_d",
"=",
"dic",
"[",
"jsub",
".",
"name",
"]",
"_folder_to_json",
"(",
"jsub",
",",
"key_path",
"[",
"1",
":",
"]",
",",
"in_memory",
",",
"ignore_prefix",
",",
"sub_d",
",",
"parse_decimal",
")",
"if",
"not",
"key_found",
":",
"raise",
"KeyError",
"(",
"'key not found: {0}'",
".",
"format",
"(",
"search_key",
")",
")"
] | read in folder structure as json
e.g.
jdir
sub_dir1
data.json
sub_dir2
data.json
_folder_to_json(jdir)
=> {'sub_dir1':{'data':{...}},
'sub_dir2':{'data':{...}}}
NB: json files are identified with .json extension | [
"read",
"in",
"folder",
"structure",
"as",
"json"
] | train | https://github.com/chrisjsewell/jsonextended/blob/c3a7a880cc09789b3c61204265dcbb127be76c8a/jsonextended/ejson.py#L267-L329 |
chrisjsewell/jsonextended | jsonextended/ejson.py | to_dict | def to_dict(jfile, key_path=None, in_memory=True,
ignore_prefix=('.', '_'), parse_decimal=False):
""" input json to dict
Parameters
----------
jfile : str, file_like or path_like
if str, must be existing file or folder,
if file_like, must have 'read' method
if path_like, must have 'iterdir' method (see pathlib.Path)
key_path : list[str]
a list of keys to index into the json before parsing it
in_memory : bool
if true reads full json into memory before filtering keys
(this is faster but uses more memory)
ignore_prefix : list[str]
ignore folders beginning with these prefixes
parse_decimal : bool
whether to parse numbers as Decimal instances (retains exact precision)
Examples
--------
>>> from pprint import pformat
>>> from jsonextended.utils import MockPath
>>> file_obj = MockPath('test.json',is_file=True,
... content='''
... {
... "a": 1,
... "b": [1.1,2.1],
... "c": {"d":"e"}
... }
... ''')
...
>>> dstr = pformat(to_dict(file_obj))
>>> print(dstr.replace("u'","'"))
{'a': 1, 'b': [1.1, 2.1], 'c': {'d': 'e'}}
>>> dstr = pformat(to_dict(file_obj,parse_decimal=True))
>>> print(dstr.replace("u'","'"))
{'a': 1, 'b': [Decimal('1.1'), Decimal('2.1')], 'c': {'d': 'e'}}
>>> str(to_dict(file_obj,["c","d"]))
'e'
>>> from jsonextended.utils import get_test_path
>>> path = get_test_path()
>>> jdict1 = to_dict(path)
>>> pprint(jdict1,depth=2)
dir1:
dir1_1: {...}
file1: {...}
file2: {...}
dir2:
file1: {...}
dir3:
>>> jdict2 = to_dict(path,['dir1','file1','initial'],in_memory=False)
>>> pprint(jdict2,depth=1)
crystallographic: {...}
primitive: {...}
"""
key_path = [] if key_path is None else key_path
if isinstance(jfile, basestring):
if not os.path.exists(jfile):
raise IOError('jfile does not exist: {}'.format(jfile))
if os.path.isdir(jfile):
data = {}
jpath = pathlib.Path(jfile)
_folder_to_json(jpath, key_path[:], in_memory, ignore_prefix,
data, parse_decimal)
if isinstance(list(data.keys())[0], _Terminus):
data = data.values()[0]
else:
with open(jfile, 'r') as file_obj:
if key_path and not in_memory:
data = _file_with_keys(file_obj, key_path, parse_decimal)
elif key_path:
data = json.load(
file_obj, object_hook=decode,
parse_float=Decimal if parse_decimal else float)
data = indexes(data, key_path)
else:
data = json.load(
file_obj, object_hook=decode,
parse_float=Decimal if parse_decimal else float)
elif hasattr(jfile, 'read'):
if key_path and not in_memory:
data = _file_with_keys(jfile, key_path, parse_decimal)
elif key_path:
data = json.load(
jfile, object_hook=decode,
parse_float=Decimal if parse_decimal else float)
data = indexes(data, key_path)
else:
data = json.load(
jfile, object_hook=decode,
parse_float=Decimal if parse_decimal else float)
elif hasattr(jfile, 'iterdir'):
if jfile.is_file():
with jfile.open() as file_obj:
if key_path and not in_memory:
data = _file_with_keys(file_obj, key_path, parse_decimal)
elif key_path:
data = json.load(
file_obj, object_hook=decode,
parse_float=Decimal if parse_decimal else float)
data = indexes(data, key_path)
else:
data = json.load(
file_obj, object_hook=decode,
parse_float=Decimal if parse_decimal else float)
else:
data = {}
_folder_to_json(jfile, key_path[:], in_memory, ignore_prefix,
data, parse_decimal)
if isinstance(list(data.keys())[0], _Terminus):
data = data.values()[0]
else:
raise ValueError(
'jfile should be a str, '
'file_like or path_like object: {}'.format(jfile))
return data | python | def to_dict(jfile, key_path=None, in_memory=True,
ignore_prefix=('.', '_'), parse_decimal=False):
""" input json to dict
Parameters
----------
jfile : str, file_like or path_like
if str, must be existing file or folder,
if file_like, must have 'read' method
if path_like, must have 'iterdir' method (see pathlib.Path)
key_path : list[str]
a list of keys to index into the json before parsing it
in_memory : bool
if true reads full json into memory before filtering keys
(this is faster but uses more memory)
ignore_prefix : list[str]
ignore folders beginning with these prefixes
parse_decimal : bool
whether to parse numbers as Decimal instances (retains exact precision)
Examples
--------
>>> from pprint import pformat
>>> from jsonextended.utils import MockPath
>>> file_obj = MockPath('test.json',is_file=True,
... content='''
... {
... "a": 1,
... "b": [1.1,2.1],
... "c": {"d":"e"}
... }
... ''')
...
>>> dstr = pformat(to_dict(file_obj))
>>> print(dstr.replace("u'","'"))
{'a': 1, 'b': [1.1, 2.1], 'c': {'d': 'e'}}
>>> dstr = pformat(to_dict(file_obj,parse_decimal=True))
>>> print(dstr.replace("u'","'"))
{'a': 1, 'b': [Decimal('1.1'), Decimal('2.1')], 'c': {'d': 'e'}}
>>> str(to_dict(file_obj,["c","d"]))
'e'
>>> from jsonextended.utils import get_test_path
>>> path = get_test_path()
>>> jdict1 = to_dict(path)
>>> pprint(jdict1,depth=2)
dir1:
dir1_1: {...}
file1: {...}
file2: {...}
dir2:
file1: {...}
dir3:
>>> jdict2 = to_dict(path,['dir1','file1','initial'],in_memory=False)
>>> pprint(jdict2,depth=1)
crystallographic: {...}
primitive: {...}
"""
key_path = [] if key_path is None else key_path
if isinstance(jfile, basestring):
if not os.path.exists(jfile):
raise IOError('jfile does not exist: {}'.format(jfile))
if os.path.isdir(jfile):
data = {}
jpath = pathlib.Path(jfile)
_folder_to_json(jpath, key_path[:], in_memory, ignore_prefix,
data, parse_decimal)
if isinstance(list(data.keys())[0], _Terminus):
data = data.values()[0]
else:
with open(jfile, 'r') as file_obj:
if key_path and not in_memory:
data = _file_with_keys(file_obj, key_path, parse_decimal)
elif key_path:
data = json.load(
file_obj, object_hook=decode,
parse_float=Decimal if parse_decimal else float)
data = indexes(data, key_path)
else:
data = json.load(
file_obj, object_hook=decode,
parse_float=Decimal if parse_decimal else float)
elif hasattr(jfile, 'read'):
if key_path and not in_memory:
data = _file_with_keys(jfile, key_path, parse_decimal)
elif key_path:
data = json.load(
jfile, object_hook=decode,
parse_float=Decimal if parse_decimal else float)
data = indexes(data, key_path)
else:
data = json.load(
jfile, object_hook=decode,
parse_float=Decimal if parse_decimal else float)
elif hasattr(jfile, 'iterdir'):
if jfile.is_file():
with jfile.open() as file_obj:
if key_path and not in_memory:
data = _file_with_keys(file_obj, key_path, parse_decimal)
elif key_path:
data = json.load(
file_obj, object_hook=decode,
parse_float=Decimal if parse_decimal else float)
data = indexes(data, key_path)
else:
data = json.load(
file_obj, object_hook=decode,
parse_float=Decimal if parse_decimal else float)
else:
data = {}
_folder_to_json(jfile, key_path[:], in_memory, ignore_prefix,
data, parse_decimal)
if isinstance(list(data.keys())[0], _Terminus):
data = data.values()[0]
else:
raise ValueError(
'jfile should be a str, '
'file_like or path_like object: {}'.format(jfile))
return data | [
"def",
"to_dict",
"(",
"jfile",
",",
"key_path",
"=",
"None",
",",
"in_memory",
"=",
"True",
",",
"ignore_prefix",
"=",
"(",
"'.'",
",",
"'_'",
")",
",",
"parse_decimal",
"=",
"False",
")",
":",
"key_path",
"=",
"[",
"]",
"if",
"key_path",
"is",
"None",
"else",
"key_path",
"if",
"isinstance",
"(",
"jfile",
",",
"basestring",
")",
":",
"if",
"not",
"os",
".",
"path",
".",
"exists",
"(",
"jfile",
")",
":",
"raise",
"IOError",
"(",
"'jfile does not exist: {}'",
".",
"format",
"(",
"jfile",
")",
")",
"if",
"os",
".",
"path",
".",
"isdir",
"(",
"jfile",
")",
":",
"data",
"=",
"{",
"}",
"jpath",
"=",
"pathlib",
".",
"Path",
"(",
"jfile",
")",
"_folder_to_json",
"(",
"jpath",
",",
"key_path",
"[",
":",
"]",
",",
"in_memory",
",",
"ignore_prefix",
",",
"data",
",",
"parse_decimal",
")",
"if",
"isinstance",
"(",
"list",
"(",
"data",
".",
"keys",
"(",
")",
")",
"[",
"0",
"]",
",",
"_Terminus",
")",
":",
"data",
"=",
"data",
".",
"values",
"(",
")",
"[",
"0",
"]",
"else",
":",
"with",
"open",
"(",
"jfile",
",",
"'r'",
")",
"as",
"file_obj",
":",
"if",
"key_path",
"and",
"not",
"in_memory",
":",
"data",
"=",
"_file_with_keys",
"(",
"file_obj",
",",
"key_path",
",",
"parse_decimal",
")",
"elif",
"key_path",
":",
"data",
"=",
"json",
".",
"load",
"(",
"file_obj",
",",
"object_hook",
"=",
"decode",
",",
"parse_float",
"=",
"Decimal",
"if",
"parse_decimal",
"else",
"float",
")",
"data",
"=",
"indexes",
"(",
"data",
",",
"key_path",
")",
"else",
":",
"data",
"=",
"json",
".",
"load",
"(",
"file_obj",
",",
"object_hook",
"=",
"decode",
",",
"parse_float",
"=",
"Decimal",
"if",
"parse_decimal",
"else",
"float",
")",
"elif",
"hasattr",
"(",
"jfile",
",",
"'read'",
")",
":",
"if",
"key_path",
"and",
"not",
"in_memory",
":",
"data",
"=",
"_file_with_keys",
"(",
"jfile",
",",
"key_path",
",",
"parse_decimal",
")",
"elif",
"key_path",
":",
"data",
"=",
"json",
".",
"load",
"(",
"jfile",
",",
"object_hook",
"=",
"decode",
",",
"parse_float",
"=",
"Decimal",
"if",
"parse_decimal",
"else",
"float",
")",
"data",
"=",
"indexes",
"(",
"data",
",",
"key_path",
")",
"else",
":",
"data",
"=",
"json",
".",
"load",
"(",
"jfile",
",",
"object_hook",
"=",
"decode",
",",
"parse_float",
"=",
"Decimal",
"if",
"parse_decimal",
"else",
"float",
")",
"elif",
"hasattr",
"(",
"jfile",
",",
"'iterdir'",
")",
":",
"if",
"jfile",
".",
"is_file",
"(",
")",
":",
"with",
"jfile",
".",
"open",
"(",
")",
"as",
"file_obj",
":",
"if",
"key_path",
"and",
"not",
"in_memory",
":",
"data",
"=",
"_file_with_keys",
"(",
"file_obj",
",",
"key_path",
",",
"parse_decimal",
")",
"elif",
"key_path",
":",
"data",
"=",
"json",
".",
"load",
"(",
"file_obj",
",",
"object_hook",
"=",
"decode",
",",
"parse_float",
"=",
"Decimal",
"if",
"parse_decimal",
"else",
"float",
")",
"data",
"=",
"indexes",
"(",
"data",
",",
"key_path",
")",
"else",
":",
"data",
"=",
"json",
".",
"load",
"(",
"file_obj",
",",
"object_hook",
"=",
"decode",
",",
"parse_float",
"=",
"Decimal",
"if",
"parse_decimal",
"else",
"float",
")",
"else",
":",
"data",
"=",
"{",
"}",
"_folder_to_json",
"(",
"jfile",
",",
"key_path",
"[",
":",
"]",
",",
"in_memory",
",",
"ignore_prefix",
",",
"data",
",",
"parse_decimal",
")",
"if",
"isinstance",
"(",
"list",
"(",
"data",
".",
"keys",
"(",
")",
")",
"[",
"0",
"]",
",",
"_Terminus",
")",
":",
"data",
"=",
"data",
".",
"values",
"(",
")",
"[",
"0",
"]",
"else",
":",
"raise",
"ValueError",
"(",
"'jfile should be a str, '",
"'file_like or path_like object: {}'",
".",
"format",
"(",
"jfile",
")",
")",
"return",
"data"
] | input json to dict
Parameters
----------
jfile : str, file_like or path_like
if str, must be existing file or folder,
if file_like, must have 'read' method
if path_like, must have 'iterdir' method (see pathlib.Path)
key_path : list[str]
a list of keys to index into the json before parsing it
in_memory : bool
if true reads full json into memory before filtering keys
(this is faster but uses more memory)
ignore_prefix : list[str]
ignore folders beginning with these prefixes
parse_decimal : bool
whether to parse numbers as Decimal instances (retains exact precision)
Examples
--------
>>> from pprint import pformat
>>> from jsonextended.utils import MockPath
>>> file_obj = MockPath('test.json',is_file=True,
... content='''
... {
... "a": 1,
... "b": [1.1,2.1],
... "c": {"d":"e"}
... }
... ''')
...
>>> dstr = pformat(to_dict(file_obj))
>>> print(dstr.replace("u'","'"))
{'a': 1, 'b': [1.1, 2.1], 'c': {'d': 'e'}}
>>> dstr = pformat(to_dict(file_obj,parse_decimal=True))
>>> print(dstr.replace("u'","'"))
{'a': 1, 'b': [Decimal('1.1'), Decimal('2.1')], 'c': {'d': 'e'}}
>>> str(to_dict(file_obj,["c","d"]))
'e'
>>> from jsonextended.utils import get_test_path
>>> path = get_test_path()
>>> jdict1 = to_dict(path)
>>> pprint(jdict1,depth=2)
dir1:
dir1_1: {...}
file1: {...}
file2: {...}
dir2:
file1: {...}
dir3:
>>> jdict2 = to_dict(path,['dir1','file1','initial'],in_memory=False)
>>> pprint(jdict2,depth=1)
crystallographic: {...}
primitive: {...} | [
"input",
"json",
"to",
"dict"
] | train | https://github.com/chrisjsewell/jsonextended/blob/c3a7a880cc09789b3c61204265dcbb127be76c8a/jsonextended/ejson.py#L332-L459 |
portfoliome/foil | foil/iteration.py | chunks | def chunks(items, chunksize):
"""Turn generator sequence into sequence of chunks."""
items = iter(items)
for first in items:
chunk = chain((first,), islice(items, chunksize - 1))
yield chunk
deque(chunk, 0) | python | def chunks(items, chunksize):
"""Turn generator sequence into sequence of chunks."""
items = iter(items)
for first in items:
chunk = chain((first,), islice(items, chunksize - 1))
yield chunk
deque(chunk, 0) | [
"def",
"chunks",
"(",
"items",
",",
"chunksize",
")",
":",
"items",
"=",
"iter",
"(",
"items",
")",
"for",
"first",
"in",
"items",
":",
"chunk",
"=",
"chain",
"(",
"(",
"first",
",",
")",
",",
"islice",
"(",
"items",
",",
"chunksize",
"-",
"1",
")",
")",
"yield",
"chunk",
"deque",
"(",
"chunk",
",",
"0",
")"
] | Turn generator sequence into sequence of chunks. | [
"Turn",
"generator",
"sequence",
"into",
"sequence",
"of",
"chunks",
"."
] | train | https://github.com/portfoliome/foil/blob/b66d8cf4ab048a387d8c7a033b47e922ed6917d6/foil/iteration.py#L5-L12 |
PolyJIT/benchbuild | benchbuild/utils/unionfs.py | unionfs | def unionfs(rw='rw', ro=None, union='union'):
"""
Decorator for the UnionFS feature.
This configures a unionfs for projects. The given base_dir and/or image_dir
are layered as follows:
image_dir=RW:base_dir=RO
All writes go to the image_dir, while base_dir delivers the (read-only)
versions of the rest of the filesystem.
The unified version will be provided in the project's builddir. Unmouting
is done as soon as the function completes.
Args:
rw: writeable storage area for the unified fuse filesystem.
ro: read-only storage area for the unified fuse filesystem.
union: mountpoint of the unified fuse filesystem.
"""
from functools import wraps
def wrap_in_union_fs(func):
"""
Function that wraps a given function inside the file system.
Args:
func: The function that needs to be wrapped inside the unions fs.
Return:
The file system with the function wrapped inside.
"""
@wraps(func)
def wrap_in_union_fs_func(project, *args, **kwargs):
"""
Wrap the func in the UnionFS mount stack.
We make sure that the mount points all exist and stack up the
directories for the unionfs. All directories outside of the default
build environment are tracked for deletion.
"""
container = project.container
if container is None or in_container():
return func(project, *args, **kwargs)
build_dir = local.path(project.builddir)
LOG.debug("UnionFS - Project builddir: %s", project.builddir)
if __unionfs_is_active(root=build_dir):
LOG.debug(
"UnionFS already active in %s, nesting not supported.",
build_dir)
return func(project, *args, **kwargs)
ro_dir = local.path(container.local)
rw_dir = build_dir / rw
un_dir = build_dir / union
LOG.debug("UnionFS - RW: %s", rw_dir)
unionfs_cmd = __unionfs_set_up(ro_dir, rw_dir, un_dir)
project_builddir_bak = project.builddir
project.builddir = un_dir
proc = unionfs_cmd.popen()
while (not __unionfs_is_active(root=un_dir)) and \
(proc.poll() is None):
pass
ret = None
if proc.poll() is None:
try:
with local.cwd(un_dir):
ret = func(project, *args, **kwargs)
finally:
project.builddir = project_builddir_bak
from signal import SIGINT
is_running = proc.poll() is None
while __unionfs_is_active(root=un_dir) and is_running:
try:
proc.send_signal(SIGINT)
proc.wait(timeout=3)
except subprocess.TimeoutExpired:
proc.kill()
is_running = False
LOG.debug("Unionfs shut down.")
if __unionfs_is_active(root=un_dir):
raise UnmountError()
return ret
return wrap_in_union_fs_func
return wrap_in_union_fs | python | def unionfs(rw='rw', ro=None, union='union'):
"""
Decorator for the UnionFS feature.
This configures a unionfs for projects. The given base_dir and/or image_dir
are layered as follows:
image_dir=RW:base_dir=RO
All writes go to the image_dir, while base_dir delivers the (read-only)
versions of the rest of the filesystem.
The unified version will be provided in the project's builddir. Unmouting
is done as soon as the function completes.
Args:
rw: writeable storage area for the unified fuse filesystem.
ro: read-only storage area for the unified fuse filesystem.
union: mountpoint of the unified fuse filesystem.
"""
from functools import wraps
def wrap_in_union_fs(func):
"""
Function that wraps a given function inside the file system.
Args:
func: The function that needs to be wrapped inside the unions fs.
Return:
The file system with the function wrapped inside.
"""
@wraps(func)
def wrap_in_union_fs_func(project, *args, **kwargs):
"""
Wrap the func in the UnionFS mount stack.
We make sure that the mount points all exist and stack up the
directories for the unionfs. All directories outside of the default
build environment are tracked for deletion.
"""
container = project.container
if container is None or in_container():
return func(project, *args, **kwargs)
build_dir = local.path(project.builddir)
LOG.debug("UnionFS - Project builddir: %s", project.builddir)
if __unionfs_is_active(root=build_dir):
LOG.debug(
"UnionFS already active in %s, nesting not supported.",
build_dir)
return func(project, *args, **kwargs)
ro_dir = local.path(container.local)
rw_dir = build_dir / rw
un_dir = build_dir / union
LOG.debug("UnionFS - RW: %s", rw_dir)
unionfs_cmd = __unionfs_set_up(ro_dir, rw_dir, un_dir)
project_builddir_bak = project.builddir
project.builddir = un_dir
proc = unionfs_cmd.popen()
while (not __unionfs_is_active(root=un_dir)) and \
(proc.poll() is None):
pass
ret = None
if proc.poll() is None:
try:
with local.cwd(un_dir):
ret = func(project, *args, **kwargs)
finally:
project.builddir = project_builddir_bak
from signal import SIGINT
is_running = proc.poll() is None
while __unionfs_is_active(root=un_dir) and is_running:
try:
proc.send_signal(SIGINT)
proc.wait(timeout=3)
except subprocess.TimeoutExpired:
proc.kill()
is_running = False
LOG.debug("Unionfs shut down.")
if __unionfs_is_active(root=un_dir):
raise UnmountError()
return ret
return wrap_in_union_fs_func
return wrap_in_union_fs | [
"def",
"unionfs",
"(",
"rw",
"=",
"'rw'",
",",
"ro",
"=",
"None",
",",
"union",
"=",
"'union'",
")",
":",
"from",
"functools",
"import",
"wraps",
"def",
"wrap_in_union_fs",
"(",
"func",
")",
":",
"\"\"\"\n Function that wraps a given function inside the file system.\n\n Args:\n func: The function that needs to be wrapped inside the unions fs.\n Return:\n The file system with the function wrapped inside.\n \"\"\"",
"@",
"wraps",
"(",
"func",
")",
"def",
"wrap_in_union_fs_func",
"(",
"project",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"\"\"\"\n Wrap the func in the UnionFS mount stack.\n\n We make sure that the mount points all exist and stack up the\n directories for the unionfs. All directories outside of the default\n build environment are tracked for deletion.\n \"\"\"",
"container",
"=",
"project",
".",
"container",
"if",
"container",
"is",
"None",
"or",
"in_container",
"(",
")",
":",
"return",
"func",
"(",
"project",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
"build_dir",
"=",
"local",
".",
"path",
"(",
"project",
".",
"builddir",
")",
"LOG",
".",
"debug",
"(",
"\"UnionFS - Project builddir: %s\"",
",",
"project",
".",
"builddir",
")",
"if",
"__unionfs_is_active",
"(",
"root",
"=",
"build_dir",
")",
":",
"LOG",
".",
"debug",
"(",
"\"UnionFS already active in %s, nesting not supported.\"",
",",
"build_dir",
")",
"return",
"func",
"(",
"project",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
"ro_dir",
"=",
"local",
".",
"path",
"(",
"container",
".",
"local",
")",
"rw_dir",
"=",
"build_dir",
"/",
"rw",
"un_dir",
"=",
"build_dir",
"/",
"union",
"LOG",
".",
"debug",
"(",
"\"UnionFS - RW: %s\"",
",",
"rw_dir",
")",
"unionfs_cmd",
"=",
"__unionfs_set_up",
"(",
"ro_dir",
",",
"rw_dir",
",",
"un_dir",
")",
"project_builddir_bak",
"=",
"project",
".",
"builddir",
"project",
".",
"builddir",
"=",
"un_dir",
"proc",
"=",
"unionfs_cmd",
".",
"popen",
"(",
")",
"while",
"(",
"not",
"__unionfs_is_active",
"(",
"root",
"=",
"un_dir",
")",
")",
"and",
"(",
"proc",
".",
"poll",
"(",
")",
"is",
"None",
")",
":",
"pass",
"ret",
"=",
"None",
"if",
"proc",
".",
"poll",
"(",
")",
"is",
"None",
":",
"try",
":",
"with",
"local",
".",
"cwd",
"(",
"un_dir",
")",
":",
"ret",
"=",
"func",
"(",
"project",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
"finally",
":",
"project",
".",
"builddir",
"=",
"project_builddir_bak",
"from",
"signal",
"import",
"SIGINT",
"is_running",
"=",
"proc",
".",
"poll",
"(",
")",
"is",
"None",
"while",
"__unionfs_is_active",
"(",
"root",
"=",
"un_dir",
")",
"and",
"is_running",
":",
"try",
":",
"proc",
".",
"send_signal",
"(",
"SIGINT",
")",
"proc",
".",
"wait",
"(",
"timeout",
"=",
"3",
")",
"except",
"subprocess",
".",
"TimeoutExpired",
":",
"proc",
".",
"kill",
"(",
")",
"is_running",
"=",
"False",
"LOG",
".",
"debug",
"(",
"\"Unionfs shut down.\"",
")",
"if",
"__unionfs_is_active",
"(",
"root",
"=",
"un_dir",
")",
":",
"raise",
"UnmountError",
"(",
")",
"return",
"ret",
"return",
"wrap_in_union_fs_func",
"return",
"wrap_in_union_fs"
] | Decorator for the UnionFS feature.
This configures a unionfs for projects. The given base_dir and/or image_dir
are layered as follows:
image_dir=RW:base_dir=RO
All writes go to the image_dir, while base_dir delivers the (read-only)
versions of the rest of the filesystem.
The unified version will be provided in the project's builddir. Unmouting
is done as soon as the function completes.
Args:
rw: writeable storage area for the unified fuse filesystem.
ro: read-only storage area for the unified fuse filesystem.
union: mountpoint of the unified fuse filesystem. | [
"Decorator",
"for",
"the",
"UnionFS",
"feature",
"."
] | train | https://github.com/PolyJIT/benchbuild/blob/9ad2ec54d96e97b642b1f06eddcbad9ba7aeaf58/benchbuild/utils/unionfs.py#L14-L105 |
PolyJIT/benchbuild | benchbuild/utils/unionfs.py | __update_cleanup_paths | def __update_cleanup_paths(new_path):
"""
Add the new path to the list of paths to clean up afterwards.
Args:
new_path: Path to the directory that need to be cleaned up.
"""
cleanup_dirs = settings.CFG["cleanup_paths"].value
cleanup_dirs = set(cleanup_dirs)
cleanup_dirs.add(new_path)
cleanup_dirs = list(cleanup_dirs)
settings.CFG["cleanup_paths"] = cleanup_dirs | python | def __update_cleanup_paths(new_path):
"""
Add the new path to the list of paths to clean up afterwards.
Args:
new_path: Path to the directory that need to be cleaned up.
"""
cleanup_dirs = settings.CFG["cleanup_paths"].value
cleanup_dirs = set(cleanup_dirs)
cleanup_dirs.add(new_path)
cleanup_dirs = list(cleanup_dirs)
settings.CFG["cleanup_paths"] = cleanup_dirs | [
"def",
"__update_cleanup_paths",
"(",
"new_path",
")",
":",
"cleanup_dirs",
"=",
"settings",
".",
"CFG",
"[",
"\"cleanup_paths\"",
"]",
".",
"value",
"cleanup_dirs",
"=",
"set",
"(",
"cleanup_dirs",
")",
"cleanup_dirs",
".",
"add",
"(",
"new_path",
")",
"cleanup_dirs",
"=",
"list",
"(",
"cleanup_dirs",
")",
"settings",
".",
"CFG",
"[",
"\"cleanup_paths\"",
"]",
"=",
"cleanup_dirs"
] | Add the new path to the list of paths to clean up afterwards.
Args:
new_path: Path to the directory that need to be cleaned up. | [
"Add",
"the",
"new",
"path",
"to",
"the",
"list",
"of",
"paths",
"to",
"clean",
"up",
"afterwards",
"."
] | train | https://github.com/PolyJIT/benchbuild/blob/9ad2ec54d96e97b642b1f06eddcbad9ba7aeaf58/benchbuild/utils/unionfs.py#L108-L119 |
PolyJIT/benchbuild | benchbuild/utils/unionfs.py | __is_outside_of_builddir | def __is_outside_of_builddir(project, path_to_check):
"""Check if a project lies outside of its expected directory."""
bdir = project.builddir
cprefix = os.path.commonprefix([path_to_check, bdir])
return cprefix != bdir | python | def __is_outside_of_builddir(project, path_to_check):
"""Check if a project lies outside of its expected directory."""
bdir = project.builddir
cprefix = os.path.commonprefix([path_to_check, bdir])
return cprefix != bdir | [
"def",
"__is_outside_of_builddir",
"(",
"project",
",",
"path_to_check",
")",
":",
"bdir",
"=",
"project",
".",
"builddir",
"cprefix",
"=",
"os",
".",
"path",
".",
"commonprefix",
"(",
"[",
"path_to_check",
",",
"bdir",
"]",
")",
"return",
"cprefix",
"!=",
"bdir"
] | Check if a project lies outside of its expected directory. | [
"Check",
"if",
"a",
"project",
"lies",
"outside",
"of",
"its",
"expected",
"directory",
"."
] | train | https://github.com/PolyJIT/benchbuild/blob/9ad2ec54d96e97b642b1f06eddcbad9ba7aeaf58/benchbuild/utils/unionfs.py#L122-L126 |
PolyJIT/benchbuild | benchbuild/utils/unionfs.py | __unionfs_set_up | def __unionfs_set_up(ro_dir, rw_dir, mount_dir):
"""
Setup a unionfs via unionfs-fuse.
Args:
ro_base: base_directory of the project
rw_image: virtual image of actual file system
mountpoint: location where ro_base and rw_image merge
"""
mount_dir.mkdir()
rw_dir.mkdir()
if not ro_dir.exists():
LOG.error("Base dir does not exist: '%s'", ro_dir)
raise ValueError("Base directory does not exist")
from benchbuild.utils.cmd import unionfs as unionfs_cmd
LOG.debug("Mounting UnionFS on %s with RO:%s RW:%s", mount_dir, ro_dir,
rw_dir)
return unionfs_cmd["-f", "-o", "auto_unmount,allow_other,cow", rw_dir +
"=RW:" + ro_dir + "=RO", mount_dir] | python | def __unionfs_set_up(ro_dir, rw_dir, mount_dir):
"""
Setup a unionfs via unionfs-fuse.
Args:
ro_base: base_directory of the project
rw_image: virtual image of actual file system
mountpoint: location where ro_base and rw_image merge
"""
mount_dir.mkdir()
rw_dir.mkdir()
if not ro_dir.exists():
LOG.error("Base dir does not exist: '%s'", ro_dir)
raise ValueError("Base directory does not exist")
from benchbuild.utils.cmd import unionfs as unionfs_cmd
LOG.debug("Mounting UnionFS on %s with RO:%s RW:%s", mount_dir, ro_dir,
rw_dir)
return unionfs_cmd["-f", "-o", "auto_unmount,allow_other,cow", rw_dir +
"=RW:" + ro_dir + "=RO", mount_dir] | [
"def",
"__unionfs_set_up",
"(",
"ro_dir",
",",
"rw_dir",
",",
"mount_dir",
")",
":",
"mount_dir",
".",
"mkdir",
"(",
")",
"rw_dir",
".",
"mkdir",
"(",
")",
"if",
"not",
"ro_dir",
".",
"exists",
"(",
")",
":",
"LOG",
".",
"error",
"(",
"\"Base dir does not exist: '%s'\"",
",",
"ro_dir",
")",
"raise",
"ValueError",
"(",
"\"Base directory does not exist\"",
")",
"from",
"benchbuild",
".",
"utils",
".",
"cmd",
"import",
"unionfs",
"as",
"unionfs_cmd",
"LOG",
".",
"debug",
"(",
"\"Mounting UnionFS on %s with RO:%s RW:%s\"",
",",
"mount_dir",
",",
"ro_dir",
",",
"rw_dir",
")",
"return",
"unionfs_cmd",
"[",
"\"-f\"",
",",
"\"-o\"",
",",
"\"auto_unmount,allow_other,cow\"",
",",
"rw_dir",
"+",
"\"=RW:\"",
"+",
"ro_dir",
"+",
"\"=RO\"",
",",
"mount_dir",
"]"
] | Setup a unionfs via unionfs-fuse.
Args:
ro_base: base_directory of the project
rw_image: virtual image of actual file system
mountpoint: location where ro_base and rw_image merge | [
"Setup",
"a",
"unionfs",
"via",
"unionfs",
"-",
"fuse",
"."
] | train | https://github.com/PolyJIT/benchbuild/blob/9ad2ec54d96e97b642b1f06eddcbad9ba7aeaf58/benchbuild/utils/unionfs.py#L138-L157 |
BlueBrain/hpcbench | hpcbench/benchmark/hpl.py | get_precision_regex | def get_precision_regex():
"""Build regular expression used to extract precision
metric from command output"""
expr = re.escape(PRECISION_FORMULA)
expr += r'=\s*(\S*)\s.*\s([A-Z]*)'
return re.compile(expr) | python | def get_precision_regex():
"""Build regular expression used to extract precision
metric from command output"""
expr = re.escape(PRECISION_FORMULA)
expr += r'=\s*(\S*)\s.*\s([A-Z]*)'
return re.compile(expr) | [
"def",
"get_precision_regex",
"(",
")",
":",
"expr",
"=",
"re",
".",
"escape",
"(",
"PRECISION_FORMULA",
")",
"expr",
"+=",
"r'=\\s*(\\S*)\\s.*\\s([A-Z]*)'",
"return",
"re",
".",
"compile",
"(",
"expr",
")"
] | Build regular expression used to extract precision
metric from command output | [
"Build",
"regular",
"expression",
"used",
"to",
"extract",
"precision",
"metric",
"from",
"command",
"output"
] | train | https://github.com/BlueBrain/hpcbench/blob/192d0ec142b897157ec25f131d1ef28f84752592/hpcbench/benchmark/hpl.py#L19-L24 |
BlueBrain/hpcbench | hpcbench/benchmark/hpl.py | HPL._build_data | def _build_data(self):
"""Build HPL data from basic parameters"""
def baseN(nodes, mpn):
return int(math.sqrt(mpn * 0.80 * nodes * 1024 * 1024 / 8))
def nFromNb(baseN, nb):
factor = int(baseN / nb)
if factor % 2 != 0:
factor -= 1
return nb * factor
def get_grid(nodes, ppn):
cores = nodes * ppn
sqrt = math.sqrt(cores)
factors = [
num for num in range(2, int(math.floor(sqrt) + 1)) if cores % num == 0
]
if len(factors) == 0:
factors = [1]
diff = 0
keep = 0
for factor in factors:
if diff == 0:
diff = cores - factor
if keep == 0:
keep = factor
tmp_diff = cores - factor
if tmp_diff < diff:
diff = tmp_diff
keep = factor
return [keep, int(cores / keep)]
properties = dict(
realN=nFromNb(baseN(self.nodes, self.memory_per_node), self.block_size),
nb=self.block_size,
pQ=get_grid(self.nodes, self.cores_per_node),
)
return self._data_from_jinja(**properties) | python | def _build_data(self):
"""Build HPL data from basic parameters"""
def baseN(nodes, mpn):
return int(math.sqrt(mpn * 0.80 * nodes * 1024 * 1024 / 8))
def nFromNb(baseN, nb):
factor = int(baseN / nb)
if factor % 2 != 0:
factor -= 1
return nb * factor
def get_grid(nodes, ppn):
cores = nodes * ppn
sqrt = math.sqrt(cores)
factors = [
num for num in range(2, int(math.floor(sqrt) + 1)) if cores % num == 0
]
if len(factors) == 0:
factors = [1]
diff = 0
keep = 0
for factor in factors:
if diff == 0:
diff = cores - factor
if keep == 0:
keep = factor
tmp_diff = cores - factor
if tmp_diff < diff:
diff = tmp_diff
keep = factor
return [keep, int(cores / keep)]
properties = dict(
realN=nFromNb(baseN(self.nodes, self.memory_per_node), self.block_size),
nb=self.block_size,
pQ=get_grid(self.nodes, self.cores_per_node),
)
return self._data_from_jinja(**properties) | [
"def",
"_build_data",
"(",
"self",
")",
":",
"def",
"baseN",
"(",
"nodes",
",",
"mpn",
")",
":",
"return",
"int",
"(",
"math",
".",
"sqrt",
"(",
"mpn",
"*",
"0.80",
"*",
"nodes",
"*",
"1024",
"*",
"1024",
"/",
"8",
")",
")",
"def",
"nFromNb",
"(",
"baseN",
",",
"nb",
")",
":",
"factor",
"=",
"int",
"(",
"baseN",
"/",
"nb",
")",
"if",
"factor",
"%",
"2",
"!=",
"0",
":",
"factor",
"-=",
"1",
"return",
"nb",
"*",
"factor",
"def",
"get_grid",
"(",
"nodes",
",",
"ppn",
")",
":",
"cores",
"=",
"nodes",
"*",
"ppn",
"sqrt",
"=",
"math",
".",
"sqrt",
"(",
"cores",
")",
"factors",
"=",
"[",
"num",
"for",
"num",
"in",
"range",
"(",
"2",
",",
"int",
"(",
"math",
".",
"floor",
"(",
"sqrt",
")",
"+",
"1",
")",
")",
"if",
"cores",
"%",
"num",
"==",
"0",
"]",
"if",
"len",
"(",
"factors",
")",
"==",
"0",
":",
"factors",
"=",
"[",
"1",
"]",
"diff",
"=",
"0",
"keep",
"=",
"0",
"for",
"factor",
"in",
"factors",
":",
"if",
"diff",
"==",
"0",
":",
"diff",
"=",
"cores",
"-",
"factor",
"if",
"keep",
"==",
"0",
":",
"keep",
"=",
"factor",
"tmp_diff",
"=",
"cores",
"-",
"factor",
"if",
"tmp_diff",
"<",
"diff",
":",
"diff",
"=",
"tmp_diff",
"keep",
"=",
"factor",
"return",
"[",
"keep",
",",
"int",
"(",
"cores",
"/",
"keep",
")",
"]",
"properties",
"=",
"dict",
"(",
"realN",
"=",
"nFromNb",
"(",
"baseN",
"(",
"self",
".",
"nodes",
",",
"self",
".",
"memory_per_node",
")",
",",
"self",
".",
"block_size",
")",
",",
"nb",
"=",
"self",
".",
"block_size",
",",
"pQ",
"=",
"get_grid",
"(",
"self",
".",
"nodes",
",",
"self",
".",
"cores_per_node",
")",
",",
")",
"return",
"self",
".",
"_data_from_jinja",
"(",
"*",
"*",
"properties",
")"
] | Build HPL data from basic parameters | [
"Build",
"HPL",
"data",
"from",
"basic",
"parameters"
] | train | https://github.com/BlueBrain/hpcbench/blob/192d0ec142b897157ec25f131d1ef28f84752592/hpcbench/benchmark/hpl.py#L212-L251 |
BlueBrain/hpcbench | hpcbench/benchmark/hpl.py | HPL.mpirun | def mpirun(self):
"""Additional options passed as a list to the ``mpirun`` command"""
cmd = self.attributes['mpirun']
if cmd and cmd[0] != 'mpirun':
cmd = ['mpirun']
return [str(e) for e in cmd] | python | def mpirun(self):
"""Additional options passed as a list to the ``mpirun`` command"""
cmd = self.attributes['mpirun']
if cmd and cmd[0] != 'mpirun':
cmd = ['mpirun']
return [str(e) for e in cmd] | [
"def",
"mpirun",
"(",
"self",
")",
":",
"cmd",
"=",
"self",
".",
"attributes",
"[",
"'mpirun'",
"]",
"if",
"cmd",
"and",
"cmd",
"[",
"0",
"]",
"!=",
"'mpirun'",
":",
"cmd",
"=",
"[",
"'mpirun'",
"]",
"return",
"[",
"str",
"(",
"e",
")",
"for",
"e",
"in",
"cmd",
"]"
] | Additional options passed as a list to the ``mpirun`` command | [
"Additional",
"options",
"passed",
"as",
"a",
"list",
"to",
"the",
"mpirun",
"command"
] | train | https://github.com/BlueBrain/hpcbench/blob/192d0ec142b897157ec25f131d1ef28f84752592/hpcbench/benchmark/hpl.py#L263-L268 |
BlueBrain/hpcbench | hpcbench/toolbox/env.py | expandvars | def expandvars(s, vars=None):
"""Perform variable substitution on the given string
Supported syntax:
* $VARIABLE
* ${VARIABLE}
* ${#VARIABLE}
* ${VARIABLE:-default}
:param s: message to expand
:type s: str
:param vars: dictionary of variables. Default is ``os.environ``
:type vars: dict
:return: expanded string
:rtype: str
"""
tpl = TemplateWithDefaults(s)
return tpl.substitute(vars or os.environ) | python | def expandvars(s, vars=None):
"""Perform variable substitution on the given string
Supported syntax:
* $VARIABLE
* ${VARIABLE}
* ${#VARIABLE}
* ${VARIABLE:-default}
:param s: message to expand
:type s: str
:param vars: dictionary of variables. Default is ``os.environ``
:type vars: dict
:return: expanded string
:rtype: str
"""
tpl = TemplateWithDefaults(s)
return tpl.substitute(vars or os.environ) | [
"def",
"expandvars",
"(",
"s",
",",
"vars",
"=",
"None",
")",
":",
"tpl",
"=",
"TemplateWithDefaults",
"(",
"s",
")",
"return",
"tpl",
".",
"substitute",
"(",
"vars",
"or",
"os",
".",
"environ",
")"
] | Perform variable substitution on the given string
Supported syntax:
* $VARIABLE
* ${VARIABLE}
* ${#VARIABLE}
* ${VARIABLE:-default}
:param s: message to expand
:type s: str
:param vars: dictionary of variables. Default is ``os.environ``
:type vars: dict
:return: expanded string
:rtype: str | [
"Perform",
"variable",
"substitution",
"on",
"the",
"given",
"string"
] | train | https://github.com/BlueBrain/hpcbench/blob/192d0ec142b897157ec25f131d1ef28f84752592/hpcbench/toolbox/env.py#L35-L54 |
BlueBrain/hpcbench | hpcbench/toolbox/edsl.py | kwargsql.and_ | def and_(cls, obj, **kwargs):
"""Query an object
:param obj:
object to test
:param kwargs: query specified in kwargssql
:return:
`True` if all `kwargs` expression are `True`, `False` otherwise.
:rtype: bool
"""
return cls.__eval_seqexp(obj, operator.and_, **kwargs) | python | def and_(cls, obj, **kwargs):
"""Query an object
:param obj:
object to test
:param kwargs: query specified in kwargssql
:return:
`True` if all `kwargs` expression are `True`, `False` otherwise.
:rtype: bool
"""
return cls.__eval_seqexp(obj, operator.and_, **kwargs) | [
"def",
"and_",
"(",
"cls",
",",
"obj",
",",
"*",
"*",
"kwargs",
")",
":",
"return",
"cls",
".",
"__eval_seqexp",
"(",
"obj",
",",
"operator",
".",
"and_",
",",
"*",
"*",
"kwargs",
")"
] | Query an object
:param obj:
object to test
:param kwargs: query specified in kwargssql
:return:
`True` if all `kwargs` expression are `True`, `False` otherwise.
:rtype: bool | [
"Query",
"an",
"object"
] | train | https://github.com/BlueBrain/hpcbench/blob/192d0ec142b897157ec25f131d1ef28f84752592/hpcbench/toolbox/edsl.py#L127-L139 |
BlueBrain/hpcbench | hpcbench/toolbox/edsl.py | kwargsql.or_ | def or_(cls, obj, **kwargs):
"""Query an object
:param obj:
object to test
:param kwargs: query specified in kwargssql
:return:
`True` if at leat one `kwargs` expression is `True`,
`False` otherwise.
:rtype: bool
"""
return cls.__eval_seqexp(obj, operator.or_, **kwargs) | python | def or_(cls, obj, **kwargs):
"""Query an object
:param obj:
object to test
:param kwargs: query specified in kwargssql
:return:
`True` if at leat one `kwargs` expression is `True`,
`False` otherwise.
:rtype: bool
"""
return cls.__eval_seqexp(obj, operator.or_, **kwargs) | [
"def",
"or_",
"(",
"cls",
",",
"obj",
",",
"*",
"*",
"kwargs",
")",
":",
"return",
"cls",
".",
"__eval_seqexp",
"(",
"obj",
",",
"operator",
".",
"or_",
",",
"*",
"*",
"kwargs",
")"
] | Query an object
:param obj:
object to test
:param kwargs: query specified in kwargssql
:return:
`True` if at leat one `kwargs` expression is `True`,
`False` otherwise.
:rtype: bool | [
"Query",
"an",
"object"
] | train | https://github.com/BlueBrain/hpcbench/blob/192d0ec142b897157ec25f131d1ef28f84752592/hpcbench/toolbox/edsl.py#L142-L155 |
Subsets and Splits