repository_name
stringlengths 7
55
| func_path_in_repository
stringlengths 4
223
| func_name
stringlengths 1
134
| whole_func_string
stringlengths 75
104k
| language
stringclasses 1
value | func_code_string
stringlengths 75
104k
| func_code_tokens
sequencelengths 19
28.4k
| func_documentation_string
stringlengths 1
46.9k
| func_documentation_tokens
sequencelengths 1
1.97k
| split_name
stringclasses 1
value | func_code_url
stringlengths 87
315
|
---|---|---|---|---|---|---|---|---|---|---|
OzymandiasTheGreat/python-libinput | libinput/event.py | TabletPadEvent.strip_number | def strip_number(self):
"""The number of the strip that has changed state,
with 0 being the first strip.
On tablets with only one strip, this method always returns 0.
For events not of type
:attr:`~libinput.constant.EventType.TABLET_PAD_STRIP`, this property
raises :exc:`AttributeError`.
Returns:
int: The index of the strip that changed state.
Raises:
AttributeError
"""
if self.type != EventType.TABLET_PAD_STRIP:
raise AttributeError(_wrong_prop.format(self.type))
return self._libinput.libinput_event_tablet_pad_get_strip_number(
self._handle) | python | def strip_number(self):
"""The number of the strip that has changed state,
with 0 being the first strip.
On tablets with only one strip, this method always returns 0.
For events not of type
:attr:`~libinput.constant.EventType.TABLET_PAD_STRIP`, this property
raises :exc:`AttributeError`.
Returns:
int: The index of the strip that changed state.
Raises:
AttributeError
"""
if self.type != EventType.TABLET_PAD_STRIP:
raise AttributeError(_wrong_prop.format(self.type))
return self._libinput.libinput_event_tablet_pad_get_strip_number(
self._handle) | [
"def",
"strip_number",
"(",
"self",
")",
":",
"if",
"self",
".",
"type",
"!=",
"EventType",
".",
"TABLET_PAD_STRIP",
":",
"raise",
"AttributeError",
"(",
"_wrong_prop",
".",
"format",
"(",
"self",
".",
"type",
")",
")",
"return",
"self",
".",
"_libinput",
".",
"libinput_event_tablet_pad_get_strip_number",
"(",
"self",
".",
"_handle",
")"
] | The number of the strip that has changed state,
with 0 being the first strip.
On tablets with only one strip, this method always returns 0.
For events not of type
:attr:`~libinput.constant.EventType.TABLET_PAD_STRIP`, this property
raises :exc:`AttributeError`.
Returns:
int: The index of the strip that changed state.
Raises:
AttributeError | [
"The",
"number",
"of",
"the",
"strip",
"that",
"has",
"changed",
"state",
"with",
"0",
"being",
"the",
"first",
"strip",
"."
] | train | https://github.com/OzymandiasTheGreat/python-libinput/blob/1f477ee9f1d56b284b20e0317ea8967c64ef1218/libinput/event.py#L1548-L1567 |
OzymandiasTheGreat/python-libinput | libinput/event.py | TabletPadEvent.strip_source | def strip_source(self):
"""The source of the interaction with the strip.
If the source is
:attr:`~libinput.constant.TabletPadStripAxisSource.FINGER`, libinput
sends a strip position value of -1 to terminate the current interaction.
For events not of type
:attr:`~libinput.constant.EventType.TABLET_PAD_STRIP`, this property
raises :exc:`AttributeError`.
Returns:
~libinput.constant.TabletPadStripAxisSource: The source of
the strip interaction.
Raises:
AttributeError
"""
if self.type != EventType.TABLET_PAD_STRIP:
raise AttributeError(_wrong_prop.format(self.type))
return self._libinput.libinput_event_tablet_pad_get_strip_source(
self._handle) | python | def strip_source(self):
"""The source of the interaction with the strip.
If the source is
:attr:`~libinput.constant.TabletPadStripAxisSource.FINGER`, libinput
sends a strip position value of -1 to terminate the current interaction.
For events not of type
:attr:`~libinput.constant.EventType.TABLET_PAD_STRIP`, this property
raises :exc:`AttributeError`.
Returns:
~libinput.constant.TabletPadStripAxisSource: The source of
the strip interaction.
Raises:
AttributeError
"""
if self.type != EventType.TABLET_PAD_STRIP:
raise AttributeError(_wrong_prop.format(self.type))
return self._libinput.libinput_event_tablet_pad_get_strip_source(
self._handle) | [
"def",
"strip_source",
"(",
"self",
")",
":",
"if",
"self",
".",
"type",
"!=",
"EventType",
".",
"TABLET_PAD_STRIP",
":",
"raise",
"AttributeError",
"(",
"_wrong_prop",
".",
"format",
"(",
"self",
".",
"type",
")",
")",
"return",
"self",
".",
"_libinput",
".",
"libinput_event_tablet_pad_get_strip_source",
"(",
"self",
".",
"_handle",
")"
] | The source of the interaction with the strip.
If the source is
:attr:`~libinput.constant.TabletPadStripAxisSource.FINGER`, libinput
sends a strip position value of -1 to terminate the current interaction.
For events not of type
:attr:`~libinput.constant.EventType.TABLET_PAD_STRIP`, this property
raises :exc:`AttributeError`.
Returns:
~libinput.constant.TabletPadStripAxisSource: The source of
the strip interaction.
Raises:
AttributeError | [
"The",
"source",
"of",
"the",
"interaction",
"with",
"the",
"strip",
"."
] | train | https://github.com/OzymandiasTheGreat/python-libinput/blob/1f477ee9f1d56b284b20e0317ea8967c64ef1218/libinput/event.py#L1570-L1591 |
OzymandiasTheGreat/python-libinput | libinput/event.py | TabletPadEvent.button_number | def button_number(self):
"""The button number that triggered this event, starting at 0.
For events that are not of type
:attr:`~libinput.constant.Event.TABLET_PAD_BUTTON`,
this property raises :exc:`AttributeError`.
Note that the number returned is a generic sequential button number
and not a semantic button code as defined in ``linux/input.h``.
See `Tablet pad button numbers`_ for more details.
Returns:
int: The button triggering this event.
Raises:
AttributeError
"""
if self.type != EventType.TABLET_PAD_BUTTON:
raise AttributeError(_wrong_prop.format(self.type))
return self._libinput.libinput_event_tablet_pad_get_button_number(
self._handle) | python | def button_number(self):
"""The button number that triggered this event, starting at 0.
For events that are not of type
:attr:`~libinput.constant.Event.TABLET_PAD_BUTTON`,
this property raises :exc:`AttributeError`.
Note that the number returned is a generic sequential button number
and not a semantic button code as defined in ``linux/input.h``.
See `Tablet pad button numbers`_ for more details.
Returns:
int: The button triggering this event.
Raises:
AttributeError
"""
if self.type != EventType.TABLET_PAD_BUTTON:
raise AttributeError(_wrong_prop.format(self.type))
return self._libinput.libinput_event_tablet_pad_get_button_number(
self._handle) | [
"def",
"button_number",
"(",
"self",
")",
":",
"if",
"self",
".",
"type",
"!=",
"EventType",
".",
"TABLET_PAD_BUTTON",
":",
"raise",
"AttributeError",
"(",
"_wrong_prop",
".",
"format",
"(",
"self",
".",
"type",
")",
")",
"return",
"self",
".",
"_libinput",
".",
"libinput_event_tablet_pad_get_button_number",
"(",
"self",
".",
"_handle",
")"
] | The button number that triggered this event, starting at 0.
For events that are not of type
:attr:`~libinput.constant.Event.TABLET_PAD_BUTTON`,
this property raises :exc:`AttributeError`.
Note that the number returned is a generic sequential button number
and not a semantic button code as defined in ``linux/input.h``.
See `Tablet pad button numbers`_ for more details.
Returns:
int: The button triggering this event.
Raises:
AttributeError | [
"The",
"button",
"number",
"that",
"triggered",
"this",
"event",
"starting",
"at",
"0",
"."
] | train | https://github.com/OzymandiasTheGreat/python-libinput/blob/1f477ee9f1d56b284b20e0317ea8967c64ef1218/libinput/event.py#L1594-L1614 |
OzymandiasTheGreat/python-libinput | libinput/event.py | TabletPadEvent.button_state | def button_state(self):
"""The button state of the event.
For events not of type
:attr:`~libinput.constant.EventType.TABLET_PAD_BUTTON`, this property
raises :exc:`AttributeError`.
Returns:
~libinput.constant.ButtonState: The button state triggering
this event.
Raises:
AttributeError
"""
if self.type != EventType.TABLET_PAD_BUTTON:
raise AttributeError(_wrong_prop.format(self.type))
return self._libinput.libinput_event_tablet_pad_get_button_state(
self._handle) | python | def button_state(self):
"""The button state of the event.
For events not of type
:attr:`~libinput.constant.EventType.TABLET_PAD_BUTTON`, this property
raises :exc:`AttributeError`.
Returns:
~libinput.constant.ButtonState: The button state triggering
this event.
Raises:
AttributeError
"""
if self.type != EventType.TABLET_PAD_BUTTON:
raise AttributeError(_wrong_prop.format(self.type))
return self._libinput.libinput_event_tablet_pad_get_button_state(
self._handle) | [
"def",
"button_state",
"(",
"self",
")",
":",
"if",
"self",
".",
"type",
"!=",
"EventType",
".",
"TABLET_PAD_BUTTON",
":",
"raise",
"AttributeError",
"(",
"_wrong_prop",
".",
"format",
"(",
"self",
".",
"type",
")",
")",
"return",
"self",
".",
"_libinput",
".",
"libinput_event_tablet_pad_get_button_state",
"(",
"self",
".",
"_handle",
")"
] | The button state of the event.
For events not of type
:attr:`~libinput.constant.EventType.TABLET_PAD_BUTTON`, this property
raises :exc:`AttributeError`.
Returns:
~libinput.constant.ButtonState: The button state triggering
this event.
Raises:
AttributeError | [
"The",
"button",
"state",
"of",
"the",
"event",
"."
] | train | https://github.com/OzymandiasTheGreat/python-libinput/blob/1f477ee9f1d56b284b20e0317ea8967c64ef1218/libinput/event.py#L1617-L1634 |
OzymandiasTheGreat/python-libinput | libinput/event.py | TabletPadEvent.mode_group | def mode_group(self):
"""The mode group that the button, ring, or strip that
triggered this event is considered in.
The mode is a virtual grouping of functionality, usually based on some
visual feedback like LEDs on the pad. See `Tablet pad modes`_
for details.
Returns:
~libinput.define.TabletPadModeGroup: The mode group of the button,
ring or strip that caused this event.
"""
hmodegroup = self._libinput.libinput_event_tablet_pad_get_mode_group(
self._handle)
return TabletPadModeGroup(hmodegroup, self._libinput) | python | def mode_group(self):
"""The mode group that the button, ring, or strip that
triggered this event is considered in.
The mode is a virtual grouping of functionality, usually based on some
visual feedback like LEDs on the pad. See `Tablet pad modes`_
for details.
Returns:
~libinput.define.TabletPadModeGroup: The mode group of the button,
ring or strip that caused this event.
"""
hmodegroup = self._libinput.libinput_event_tablet_pad_get_mode_group(
self._handle)
return TabletPadModeGroup(hmodegroup, self._libinput) | [
"def",
"mode_group",
"(",
"self",
")",
":",
"hmodegroup",
"=",
"self",
".",
"_libinput",
".",
"libinput_event_tablet_pad_get_mode_group",
"(",
"self",
".",
"_handle",
")",
"return",
"TabletPadModeGroup",
"(",
"hmodegroup",
",",
"self",
".",
"_libinput",
")"
] | The mode group that the button, ring, or strip that
triggered this event is considered in.
The mode is a virtual grouping of functionality, usually based on some
visual feedback like LEDs on the pad. See `Tablet pad modes`_
for details.
Returns:
~libinput.define.TabletPadModeGroup: The mode group of the button,
ring or strip that caused this event. | [
"The",
"mode",
"group",
"that",
"the",
"button",
"ring",
"or",
"strip",
"that",
"triggered",
"this",
"event",
"is",
"considered",
"in",
"."
] | train | https://github.com/OzymandiasTheGreat/python-libinput/blob/1f477ee9f1d56b284b20e0317ea8967c64ef1218/libinput/event.py#L1665-L1680 |
sci-bots/svg-model | svg_model/bin/detect_connections.py | parse_args | def parse_args(args=None):
"""Parses arguments, returns (options, args)."""
from argparse import ArgumentParser
if args is None:
args = sys.argv
parser = ArgumentParser(description='''
Attempt to automatically find "adjacent" shapes in a SVG layer, and on a second
SVG layer, draw each detected connection between the center points of the
corresponding shapes.'''.strip())
parser.add_argument('svg_input_file', type=path, default=None)
parser.add_argument('svg_output_file', type=path, default="-",
help='Output file path ("-" for stdout)', nargs='?')
parser.add_argument('-f', '--overwrite', action='store_true')
args = parser.parse_args()
if not args.overwrite and (args.svg_input_file == args.svg_output_file):
parser.error('Input and output file are the same. Use `-f` to force '
'overwrite.')
return args | python | def parse_args(args=None):
"""Parses arguments, returns (options, args)."""
from argparse import ArgumentParser
if args is None:
args = sys.argv
parser = ArgumentParser(description='''
Attempt to automatically find "adjacent" shapes in a SVG layer, and on a second
SVG layer, draw each detected connection between the center points of the
corresponding shapes.'''.strip())
parser.add_argument('svg_input_file', type=path, default=None)
parser.add_argument('svg_output_file', type=path, default="-",
help='Output file path ("-" for stdout)', nargs='?')
parser.add_argument('-f', '--overwrite', action='store_true')
args = parser.parse_args()
if not args.overwrite and (args.svg_input_file == args.svg_output_file):
parser.error('Input and output file are the same. Use `-f` to force '
'overwrite.')
return args | [
"def",
"parse_args",
"(",
"args",
"=",
"None",
")",
":",
"from",
"argparse",
"import",
"ArgumentParser",
"if",
"args",
"is",
"None",
":",
"args",
"=",
"sys",
".",
"argv",
"parser",
"=",
"ArgumentParser",
"(",
"description",
"=",
"'''\nAttempt to automatically find \"adjacent\" shapes in a SVG layer, and on a second\nSVG layer, draw each detected connection between the center points of the\ncorresponding shapes.'''",
".",
"strip",
"(",
")",
")",
"parser",
".",
"add_argument",
"(",
"'svg_input_file'",
",",
"type",
"=",
"path",
",",
"default",
"=",
"None",
")",
"parser",
".",
"add_argument",
"(",
"'svg_output_file'",
",",
"type",
"=",
"path",
",",
"default",
"=",
"\"-\"",
",",
"help",
"=",
"'Output file path (\"-\" for stdout)'",
",",
"nargs",
"=",
"'?'",
")",
"parser",
".",
"add_argument",
"(",
"'-f'",
",",
"'--overwrite'",
",",
"action",
"=",
"'store_true'",
")",
"args",
"=",
"parser",
".",
"parse_args",
"(",
")",
"if",
"not",
"args",
".",
"overwrite",
"and",
"(",
"args",
".",
"svg_input_file",
"==",
"args",
".",
"svg_output_file",
")",
":",
"parser",
".",
"error",
"(",
"'Input and output file are the same. Use `-f` to force '",
"'overwrite.'",
")",
"return",
"args"
] | Parses arguments, returns (options, args). | [
"Parses",
"arguments",
"returns",
"(",
"options",
"args",
")",
"."
] | train | https://github.com/sci-bots/svg-model/blob/2d119650f995e62b29ce0b3151a23f3b957cb072/svg_model/bin/detect_connections.py#L13-L35 |
KelSolaar/Manager | setup.py | get_long_description | def get_long_description():
"""
Returns the Package long description.
:return: Package long description.
:rtype: unicode
"""
description = []
with open("README.rst") as file:
for line in file:
if ".. code:: python" in line and len(description) >= 2:
blockLine = description[-2]
if re.search(r":$", blockLine) and not re.search(r"::$", blockLine):
description[-2] = "::".join(blockLine.rsplit(":", 1))
continue
description.append(line)
return "".join(description) | python | def get_long_description():
"""
Returns the Package long description.
:return: Package long description.
:rtype: unicode
"""
description = []
with open("README.rst") as file:
for line in file:
if ".. code:: python" in line and len(description) >= 2:
blockLine = description[-2]
if re.search(r":$", blockLine) and not re.search(r"::$", blockLine):
description[-2] = "::".join(blockLine.rsplit(":", 1))
continue
description.append(line)
return "".join(description) | [
"def",
"get_long_description",
"(",
")",
":",
"description",
"=",
"[",
"]",
"with",
"open",
"(",
"\"README.rst\"",
")",
"as",
"file",
":",
"for",
"line",
"in",
"file",
":",
"if",
"\".. code:: python\"",
"in",
"line",
"and",
"len",
"(",
"description",
")",
">=",
"2",
":",
"blockLine",
"=",
"description",
"[",
"-",
"2",
"]",
"if",
"re",
".",
"search",
"(",
"r\":$\"",
",",
"blockLine",
")",
"and",
"not",
"re",
".",
"search",
"(",
"r\"::$\"",
",",
"blockLine",
")",
":",
"description",
"[",
"-",
"2",
"]",
"=",
"\"::\"",
".",
"join",
"(",
"blockLine",
".",
"rsplit",
"(",
"\":\"",
",",
"1",
")",
")",
"continue",
"description",
".",
"append",
"(",
"line",
")",
"return",
"\"\"",
".",
"join",
"(",
"description",
")"
] | Returns the Package long description.
:return: Package long description.
:rtype: unicode | [
"Returns",
"the",
"Package",
"long",
"description",
"."
] | train | https://github.com/KelSolaar/Manager/blob/39c8153fc021fc8a76e345a6e336ec2644f089d1/setup.py#L35-L53 |
portfoliome/foil | foil/ftp.py | ftp_listing_paths | def ftp_listing_paths(ftpconn: FTP, root: str) -> Iterable[str]:
"""Generate the full file paths from a root path."""
for current_path, dirs, files in ftp_walk(ftpconn, root):
yield from (os.path.join(current_path, file) for file in files) | python | def ftp_listing_paths(ftpconn: FTP, root: str) -> Iterable[str]:
"""Generate the full file paths from a root path."""
for current_path, dirs, files in ftp_walk(ftpconn, root):
yield from (os.path.join(current_path, file) for file in files) | [
"def",
"ftp_listing_paths",
"(",
"ftpconn",
":",
"FTP",
",",
"root",
":",
"str",
")",
"->",
"Iterable",
"[",
"str",
"]",
":",
"for",
"current_path",
",",
"dirs",
",",
"files",
"in",
"ftp_walk",
"(",
"ftpconn",
",",
"root",
")",
":",
"yield",
"from",
"(",
"os",
".",
"path",
".",
"join",
"(",
"current_path",
",",
"file",
")",
"for",
"file",
"in",
"files",
")"
] | Generate the full file paths from a root path. | [
"Generate",
"the",
"full",
"file",
"paths",
"from",
"a",
"root",
"path",
"."
] | train | https://github.com/portfoliome/foil/blob/b66d8cf4ab048a387d8c7a033b47e922ed6917d6/foil/ftp.py#L35-L39 |
portfoliome/foil | foil/ftp.py | ftp_walk | def ftp_walk(ftpconn: FTP, rootpath=''):
"""Recursively traverse an ftp directory to discovery directory listing."""
current_directory = rootpath
try:
directories, files = directory_listing(ftpconn, current_directory)
except ftplib.error_perm:
return
# Yield before recursion
yield current_directory, directories, files
# Recurse into sub-directories
for name in directories:
new_path = os.path.join(current_directory, name)
for entry in ftp_walk(ftpconn, rootpath=new_path):
yield entry
else:
return | python | def ftp_walk(ftpconn: FTP, rootpath=''):
"""Recursively traverse an ftp directory to discovery directory listing."""
current_directory = rootpath
try:
directories, files = directory_listing(ftpconn, current_directory)
except ftplib.error_perm:
return
# Yield before recursion
yield current_directory, directories, files
# Recurse into sub-directories
for name in directories:
new_path = os.path.join(current_directory, name)
for entry in ftp_walk(ftpconn, rootpath=new_path):
yield entry
else:
return | [
"def",
"ftp_walk",
"(",
"ftpconn",
":",
"FTP",
",",
"rootpath",
"=",
"''",
")",
":",
"current_directory",
"=",
"rootpath",
"try",
":",
"directories",
",",
"files",
"=",
"directory_listing",
"(",
"ftpconn",
",",
"current_directory",
")",
"except",
"ftplib",
".",
"error_perm",
":",
"return",
"# Yield before recursion",
"yield",
"current_directory",
",",
"directories",
",",
"files",
"# Recurse into sub-directories",
"for",
"name",
"in",
"directories",
":",
"new_path",
"=",
"os",
".",
"path",
".",
"join",
"(",
"current_directory",
",",
"name",
")",
"for",
"entry",
"in",
"ftp_walk",
"(",
"ftpconn",
",",
"rootpath",
"=",
"new_path",
")",
":",
"yield",
"entry",
"else",
":",
"return"
] | Recursively traverse an ftp directory to discovery directory listing. | [
"Recursively",
"traverse",
"an",
"ftp",
"directory",
"to",
"discovery",
"directory",
"listing",
"."
] | train | https://github.com/portfoliome/foil/blob/b66d8cf4ab048a387d8c7a033b47e922ed6917d6/foil/ftp.py#L42-L62 |
portfoliome/foil | foil/ftp.py | directory_listing | def directory_listing(conn: FTP, path: str) -> Tuple[List, List]:
"""Return the directories and files for single FTP listing."""
entries = deque()
conn.dir(path, entries.append)
entries = map(parse_line, entries)
grouped_entries = defaultdict(list)
for key, value in entries:
grouped_entries[key].append(value)
directories = grouped_entries[ListingType.directory]
files = grouped_entries[ListingType.file]
return directories, files | python | def directory_listing(conn: FTP, path: str) -> Tuple[List, List]:
"""Return the directories and files for single FTP listing."""
entries = deque()
conn.dir(path, entries.append)
entries = map(parse_line, entries)
grouped_entries = defaultdict(list)
for key, value in entries:
grouped_entries[key].append(value)
directories = grouped_entries[ListingType.directory]
files = grouped_entries[ListingType.file]
return directories, files | [
"def",
"directory_listing",
"(",
"conn",
":",
"FTP",
",",
"path",
":",
"str",
")",
"->",
"Tuple",
"[",
"List",
",",
"List",
"]",
":",
"entries",
"=",
"deque",
"(",
")",
"conn",
".",
"dir",
"(",
"path",
",",
"entries",
".",
"append",
")",
"entries",
"=",
"map",
"(",
"parse_line",
",",
"entries",
")",
"grouped_entries",
"=",
"defaultdict",
"(",
"list",
")",
"for",
"key",
",",
"value",
"in",
"entries",
":",
"grouped_entries",
"[",
"key",
"]",
".",
"append",
"(",
"value",
")",
"directories",
"=",
"grouped_entries",
"[",
"ListingType",
".",
"directory",
"]",
"files",
"=",
"grouped_entries",
"[",
"ListingType",
".",
"file",
"]",
"return",
"directories",
",",
"files"
] | Return the directories and files for single FTP listing. | [
"Return",
"the",
"directories",
"and",
"files",
"for",
"single",
"FTP",
"listing",
"."
] | train | https://github.com/portfoliome/foil/blob/b66d8cf4ab048a387d8c7a033b47e922ed6917d6/foil/ftp.py#L65-L79 |
portfoliome/foil | foil/ftp.py | parse_line | def parse_line(line: str, char_index=0) -> Tuple[ListingType, str]:
"""Parse FTP directory listing into (type, filename)."""
entry_name = str.rpartition(line, ' ')[-1]
entry_type = LISTING_FLAG_MAP.get(line[char_index], ListingType.other)
return entry_type, entry_name | python | def parse_line(line: str, char_index=0) -> Tuple[ListingType, str]:
"""Parse FTP directory listing into (type, filename)."""
entry_name = str.rpartition(line, ' ')[-1]
entry_type = LISTING_FLAG_MAP.get(line[char_index], ListingType.other)
return entry_type, entry_name | [
"def",
"parse_line",
"(",
"line",
":",
"str",
",",
"char_index",
"=",
"0",
")",
"->",
"Tuple",
"[",
"ListingType",
",",
"str",
"]",
":",
"entry_name",
"=",
"str",
".",
"rpartition",
"(",
"line",
",",
"' '",
")",
"[",
"-",
"1",
"]",
"entry_type",
"=",
"LISTING_FLAG_MAP",
".",
"get",
"(",
"line",
"[",
"char_index",
"]",
",",
"ListingType",
".",
"other",
")",
"return",
"entry_type",
",",
"entry_name"
] | Parse FTP directory listing into (type, filename). | [
"Parse",
"FTP",
"directory",
"listing",
"into",
"(",
"type",
"filename",
")",
"."
] | train | https://github.com/portfoliome/foil/blob/b66d8cf4ab048a387d8c7a033b47e922ed6917d6/foil/ftp.py#L82-L87 |
portfoliome/foil | foil/ftp.py | download_ftp_url | def download_ftp_url(source_url, target_uri, buffer_size=8192):
"""Uses urllib. thread safe?"""
ensure_file_directory(target_uri)
with urllib.request.urlopen(source_url) as source_file:
with open(target_uri, 'wb') as target_file:
shutil.copyfileobj(source_file, target_file, buffer_size) | python | def download_ftp_url(source_url, target_uri, buffer_size=8192):
"""Uses urllib. thread safe?"""
ensure_file_directory(target_uri)
with urllib.request.urlopen(source_url) as source_file:
with open(target_uri, 'wb') as target_file:
shutil.copyfileobj(source_file, target_file, buffer_size) | [
"def",
"download_ftp_url",
"(",
"source_url",
",",
"target_uri",
",",
"buffer_size",
"=",
"8192",
")",
":",
"ensure_file_directory",
"(",
"target_uri",
")",
"with",
"urllib",
".",
"request",
".",
"urlopen",
"(",
"source_url",
")",
"as",
"source_file",
":",
"with",
"open",
"(",
"target_uri",
",",
"'wb'",
")",
"as",
"target_file",
":",
"shutil",
".",
"copyfileobj",
"(",
"source_file",
",",
"target_file",
",",
"buffer_size",
")"
] | Uses urllib. thread safe? | [
"Uses",
"urllib",
".",
"thread",
"safe?"
] | train | https://github.com/portfoliome/foil/blob/b66d8cf4ab048a387d8c7a033b47e922ed6917d6/foil/ftp.py#L90-L97 |
BlueBrain/hpcbench | hpcbench/benchmark/babelstream.py | BabelStream.devices | def devices(self):
"""List of devices to test
"""
eax = self.attributes.get('devices')
if eax is None:
eax = self._all_devices
if not isinstance(eax, list):
eax = [eax]
return [str(dev) for dev in eax] | python | def devices(self):
"""List of devices to test
"""
eax = self.attributes.get('devices')
if eax is None:
eax = self._all_devices
if not isinstance(eax, list):
eax = [eax]
return [str(dev) for dev in eax] | [
"def",
"devices",
"(",
"self",
")",
":",
"eax",
"=",
"self",
".",
"attributes",
".",
"get",
"(",
"'devices'",
")",
"if",
"eax",
"is",
"None",
":",
"eax",
"=",
"self",
".",
"_all_devices",
"if",
"not",
"isinstance",
"(",
"eax",
",",
"list",
")",
":",
"eax",
"=",
"[",
"eax",
"]",
"return",
"[",
"str",
"(",
"dev",
")",
"for",
"dev",
"in",
"eax",
"]"
] | List of devices to test | [
"List",
"of",
"devices",
"to",
"test"
] | train | https://github.com/BlueBrain/hpcbench/blob/192d0ec142b897157ec25f131d1ef28f84752592/hpcbench/benchmark/babelstream.py#L69-L77 |
PolyJIT/benchbuild | benchbuild/utils/schema.py | exceptions | def exceptions(error_is_fatal=True, error_messages=None):
"""
Handle SQLAlchemy exceptions in a sane way.
Args:
func: An arbitrary function to wrap.
error_is_fatal: Should we exit the program on exception?
reraise: Should we reraise the exception, after logging? Only makes sense
if error_is_fatal is False.
error_messages: A dictionary that assigns an exception class to a
customized error message.
"""
def exception_decorator(func):
nonlocal error_messages
@functools.wraps(func)
def exc_wrapper(*args, **kwargs):
nonlocal error_messages
try:
result = func(*args, **kwargs)
except sa.exc.SQLAlchemyError as err:
result = None
details = None
err_type = err.__class__
if error_messages and err_type in error_messages:
details = error_messages[err_type]
if details:
LOG.error(details)
LOG.error("For developers: (%s) %s", err.__class__, str(err))
if error_is_fatal:
sys.exit("Abort, SQL operation failed.")
if not ui.ask(
"I can continue at your own risk, do you want that?"):
raise err
return result
return exc_wrapper
return exception_decorator | python | def exceptions(error_is_fatal=True, error_messages=None):
"""
Handle SQLAlchemy exceptions in a sane way.
Args:
func: An arbitrary function to wrap.
error_is_fatal: Should we exit the program on exception?
reraise: Should we reraise the exception, after logging? Only makes sense
if error_is_fatal is False.
error_messages: A dictionary that assigns an exception class to a
customized error message.
"""
def exception_decorator(func):
nonlocal error_messages
@functools.wraps(func)
def exc_wrapper(*args, **kwargs):
nonlocal error_messages
try:
result = func(*args, **kwargs)
except sa.exc.SQLAlchemyError as err:
result = None
details = None
err_type = err.__class__
if error_messages and err_type in error_messages:
details = error_messages[err_type]
if details:
LOG.error(details)
LOG.error("For developers: (%s) %s", err.__class__, str(err))
if error_is_fatal:
sys.exit("Abort, SQL operation failed.")
if not ui.ask(
"I can continue at your own risk, do you want that?"):
raise err
return result
return exc_wrapper
return exception_decorator | [
"def",
"exceptions",
"(",
"error_is_fatal",
"=",
"True",
",",
"error_messages",
"=",
"None",
")",
":",
"def",
"exception_decorator",
"(",
"func",
")",
":",
"nonlocal",
"error_messages",
"@",
"functools",
".",
"wraps",
"(",
"func",
")",
"def",
"exc_wrapper",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"nonlocal",
"error_messages",
"try",
":",
"result",
"=",
"func",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
"except",
"sa",
".",
"exc",
".",
"SQLAlchemyError",
"as",
"err",
":",
"result",
"=",
"None",
"details",
"=",
"None",
"err_type",
"=",
"err",
".",
"__class__",
"if",
"error_messages",
"and",
"err_type",
"in",
"error_messages",
":",
"details",
"=",
"error_messages",
"[",
"err_type",
"]",
"if",
"details",
":",
"LOG",
".",
"error",
"(",
"details",
")",
"LOG",
".",
"error",
"(",
"\"For developers: (%s) %s\"",
",",
"err",
".",
"__class__",
",",
"str",
"(",
"err",
")",
")",
"if",
"error_is_fatal",
":",
"sys",
".",
"exit",
"(",
"\"Abort, SQL operation failed.\"",
")",
"if",
"not",
"ui",
".",
"ask",
"(",
"\"I can continue at your own risk, do you want that?\"",
")",
":",
"raise",
"err",
"return",
"result",
"return",
"exc_wrapper",
"return",
"exception_decorator"
] | Handle SQLAlchemy exceptions in a sane way.
Args:
func: An arbitrary function to wrap.
error_is_fatal: Should we exit the program on exception?
reraise: Should we reraise the exception, after logging? Only makes sense
if error_is_fatal is False.
error_messages: A dictionary that assigns an exception class to a
customized error message. | [
"Handle",
"SQLAlchemy",
"exceptions",
"in",
"a",
"sane",
"way",
"."
] | train | https://github.com/PolyJIT/benchbuild/blob/9ad2ec54d96e97b642b1f06eddcbad9ba7aeaf58/benchbuild/utils/schema.py#L50-L89 |
PolyJIT/benchbuild | benchbuild/utils/schema.py | get_version_data | def get_version_data():
"""Retreive migration information."""
connect_str = str(settings.CFG["db"]["connect_string"])
repo_url = path.template_path("../db/")
return (connect_str, repo_url) | python | def get_version_data():
"""Retreive migration information."""
connect_str = str(settings.CFG["db"]["connect_string"])
repo_url = path.template_path("../db/")
return (connect_str, repo_url) | [
"def",
"get_version_data",
"(",
")",
":",
"connect_str",
"=",
"str",
"(",
"settings",
".",
"CFG",
"[",
"\"db\"",
"]",
"[",
"\"connect_string\"",
"]",
")",
"repo_url",
"=",
"path",
".",
"template_path",
"(",
"\"../db/\"",
")",
"return",
"(",
"connect_str",
",",
"repo_url",
")"
] | Retreive migration information. | [
"Retreive",
"migration",
"information",
"."
] | train | https://github.com/PolyJIT/benchbuild/blob/9ad2ec54d96e97b642b1f06eddcbad9ba7aeaf58/benchbuild/utils/schema.py#L339-L343 |
PolyJIT/benchbuild | benchbuild/utils/schema.py | enforce_versioning | def enforce_versioning(force=False):
"""Install versioning on the db."""
connect_str, repo_url = get_version_data()
LOG.warning("Your database uses an unversioned benchbuild schema.")
if not force and not ui.ask(
"Should I enforce version control on your schema?"):
LOG.error("User declined schema versioning.")
return None
repo_version = migrate.version(repo_url, url=connect_str)
migrate.version_control(connect_str, repo_url, version=repo_version)
return repo_version | python | def enforce_versioning(force=False):
"""Install versioning on the db."""
connect_str, repo_url = get_version_data()
LOG.warning("Your database uses an unversioned benchbuild schema.")
if not force and not ui.ask(
"Should I enforce version control on your schema?"):
LOG.error("User declined schema versioning.")
return None
repo_version = migrate.version(repo_url, url=connect_str)
migrate.version_control(connect_str, repo_url, version=repo_version)
return repo_version | [
"def",
"enforce_versioning",
"(",
"force",
"=",
"False",
")",
":",
"connect_str",
",",
"repo_url",
"=",
"get_version_data",
"(",
")",
"LOG",
".",
"warning",
"(",
"\"Your database uses an unversioned benchbuild schema.\"",
")",
"if",
"not",
"force",
"and",
"not",
"ui",
".",
"ask",
"(",
"\"Should I enforce version control on your schema?\"",
")",
":",
"LOG",
".",
"error",
"(",
"\"User declined schema versioning.\"",
")",
"return",
"None",
"repo_version",
"=",
"migrate",
".",
"version",
"(",
"repo_url",
",",
"url",
"=",
"connect_str",
")",
"migrate",
".",
"version_control",
"(",
"connect_str",
",",
"repo_url",
",",
"version",
"=",
"repo_version",
")",
"return",
"repo_version"
] | Install versioning on the db. | [
"Install",
"versioning",
"on",
"the",
"db",
"."
] | train | https://github.com/PolyJIT/benchbuild/blob/9ad2ec54d96e97b642b1f06eddcbad9ba7aeaf58/benchbuild/utils/schema.py#L351-L361 |
PolyJIT/benchbuild | benchbuild/utils/schema.py | init_functions | def init_functions(connection):
"""Initialize all SQL functions in the database."""
if settings.CFG["db"]["create_functions"]:
print("Refreshing SQL functions...")
for file in path.template_files("../sql/", exts=[".sql"]):
func = sa.DDL(path.template_str(file))
LOG.info("Loading: '%s' into database", file)
connection.execute(func)
connection.commit() | python | def init_functions(connection):
"""Initialize all SQL functions in the database."""
if settings.CFG["db"]["create_functions"]:
print("Refreshing SQL functions...")
for file in path.template_files("../sql/", exts=[".sql"]):
func = sa.DDL(path.template_str(file))
LOG.info("Loading: '%s' into database", file)
connection.execute(func)
connection.commit() | [
"def",
"init_functions",
"(",
"connection",
")",
":",
"if",
"settings",
".",
"CFG",
"[",
"\"db\"",
"]",
"[",
"\"create_functions\"",
"]",
":",
"print",
"(",
"\"Refreshing SQL functions...\"",
")",
"for",
"file",
"in",
"path",
".",
"template_files",
"(",
"\"../sql/\"",
",",
"exts",
"=",
"[",
"\".sql\"",
"]",
")",
":",
"func",
"=",
"sa",
".",
"DDL",
"(",
"path",
".",
"template_str",
"(",
"file",
")",
")",
"LOG",
".",
"info",
"(",
"\"Loading: '%s' into database\"",
",",
"file",
")",
"connection",
".",
"execute",
"(",
"func",
")",
"connection",
".",
"commit",
"(",
")"
] | Initialize all SQL functions in the database. | [
"Initialize",
"all",
"SQL",
"functions",
"in",
"the",
"database",
"."
] | train | https://github.com/PolyJIT/benchbuild/blob/9ad2ec54d96e97b642b1f06eddcbad9ba7aeaf58/benchbuild/utils/schema.py#L494-L502 |
PolyJIT/benchbuild | benchbuild/utils/schema.py | SessionManager.connect_engine | def connect_engine(self):
"""
Establish a connection to the database.
Provides simple error handling for fatal errors.
Returns:
True, if we could establish a connection, else False.
"""
try:
self.connection = self.engine.connect()
return True
except sa.exc.OperationalError as opex:
LOG.fatal("Could not connect to the database. The error was: '%s'",
str(opex))
return False | python | def connect_engine(self):
"""
Establish a connection to the database.
Provides simple error handling for fatal errors.
Returns:
True, if we could establish a connection, else False.
"""
try:
self.connection = self.engine.connect()
return True
except sa.exc.OperationalError as opex:
LOG.fatal("Could not connect to the database. The error was: '%s'",
str(opex))
return False | [
"def",
"connect_engine",
"(",
"self",
")",
":",
"try",
":",
"self",
".",
"connection",
"=",
"self",
".",
"engine",
".",
"connect",
"(",
")",
"return",
"True",
"except",
"sa",
".",
"exc",
".",
"OperationalError",
"as",
"opex",
":",
"LOG",
".",
"fatal",
"(",
"\"Could not connect to the database. The error was: '%s'\"",
",",
"str",
"(",
"opex",
")",
")",
"return",
"False"
] | Establish a connection to the database.
Provides simple error handling for fatal errors.
Returns:
True, if we could establish a connection, else False. | [
"Establish",
"a",
"connection",
"to",
"the",
"database",
"."
] | train | https://github.com/PolyJIT/benchbuild/blob/9ad2ec54d96e97b642b1f06eddcbad9ba7aeaf58/benchbuild/utils/schema.py#L410-L425 |
PolyJIT/benchbuild | benchbuild/utils/schema.py | SessionManager.configure_engine | def configure_engine(self):
"""
Configure the databse connection.
Sets appropriate transaction isolation levels and handle errors.
Returns:
True, if we did not encounter any unrecoverable errors, else False.
"""
try:
self.connection.execution_options(isolation_level="SERIALIZABLE")
except sa.exc.ArgumentError:
LOG.debug("Unable to set isolation level to SERIALIZABLE")
return True | python | def configure_engine(self):
"""
Configure the databse connection.
Sets appropriate transaction isolation levels and handle errors.
Returns:
True, if we did not encounter any unrecoverable errors, else False.
"""
try:
self.connection.execution_options(isolation_level="SERIALIZABLE")
except sa.exc.ArgumentError:
LOG.debug("Unable to set isolation level to SERIALIZABLE")
return True | [
"def",
"configure_engine",
"(",
"self",
")",
":",
"try",
":",
"self",
".",
"connection",
".",
"execution_options",
"(",
"isolation_level",
"=",
"\"SERIALIZABLE\"",
")",
"except",
"sa",
".",
"exc",
".",
"ArgumentError",
":",
"LOG",
".",
"debug",
"(",
"\"Unable to set isolation level to SERIALIZABLE\"",
")",
"return",
"True"
] | Configure the databse connection.
Sets appropriate transaction isolation levels and handle errors.
Returns:
True, if we did not encounter any unrecoverable errors, else False. | [
"Configure",
"the",
"databse",
"connection",
"."
] | train | https://github.com/PolyJIT/benchbuild/blob/9ad2ec54d96e97b642b1f06eddcbad9ba7aeaf58/benchbuild/utils/schema.py#L427-L440 |
portfoliome/foil | foil/dates.py | parse_date | def parse_date(date_str: str, pattern=_RE_DATE) -> dt.date:
"""Parse datetime.date from YYYY-MM-DD format."""
groups = re.match(pattern, date_str)
return dt.date(*_date_to_tuple(groups.groupdict())) | python | def parse_date(date_str: str, pattern=_RE_DATE) -> dt.date:
"""Parse datetime.date from YYYY-MM-DD format."""
groups = re.match(pattern, date_str)
return dt.date(*_date_to_tuple(groups.groupdict())) | [
"def",
"parse_date",
"(",
"date_str",
":",
"str",
",",
"pattern",
"=",
"_RE_DATE",
")",
"->",
"dt",
".",
"date",
":",
"groups",
"=",
"re",
".",
"match",
"(",
"pattern",
",",
"date_str",
")",
"return",
"dt",
".",
"date",
"(",
"*",
"_date_to_tuple",
"(",
"groups",
".",
"groupdict",
"(",
")",
")",
")"
] | Parse datetime.date from YYYY-MM-DD format. | [
"Parse",
"datetime",
".",
"date",
"from",
"YYYY",
"-",
"MM",
"-",
"DD",
"format",
"."
] | train | https://github.com/portfoliome/foil/blob/b66d8cf4ab048a387d8c7a033b47e922ed6917d6/foil/dates.py#L47-L52 |
portfoliome/foil | foil/dates.py | _datetime_to_tuple | def _datetime_to_tuple(dt_dict):
"""datetime.datetime components from dictionary to tuple.
Example
-------
dt_dict = {'year': '2014','month': '07','day': '23',
'hour': '13','minute': '12','second': '45','microsecond': '321'}
_datetime_to_tuple(dt_dict) -> (2014, 7, 23, 13, 12, 45, 321)
"""
year, month, day = _date_to_tuple(dt_dict)
hour, minute, second, microsecond = _time_to_tuple(dt_dict)
return year, month, day, hour, minute, second, microsecond | python | def _datetime_to_tuple(dt_dict):
"""datetime.datetime components from dictionary to tuple.
Example
-------
dt_dict = {'year': '2014','month': '07','day': '23',
'hour': '13','minute': '12','second': '45','microsecond': '321'}
_datetime_to_tuple(dt_dict) -> (2014, 7, 23, 13, 12, 45, 321)
"""
year, month, day = _date_to_tuple(dt_dict)
hour, minute, second, microsecond = _time_to_tuple(dt_dict)
return year, month, day, hour, minute, second, microsecond | [
"def",
"_datetime_to_tuple",
"(",
"dt_dict",
")",
":",
"year",
",",
"month",
",",
"day",
"=",
"_date_to_tuple",
"(",
"dt_dict",
")",
"hour",
",",
"minute",
",",
"second",
",",
"microsecond",
"=",
"_time_to_tuple",
"(",
"dt_dict",
")",
"return",
"year",
",",
"month",
",",
"day",
",",
"hour",
",",
"minute",
",",
"second",
",",
"microsecond"
] | datetime.datetime components from dictionary to tuple.
Example
-------
dt_dict = {'year': '2014','month': '07','day': '23',
'hour': '13','minute': '12','second': '45','microsecond': '321'}
_datetime_to_tuple(dt_dict) -> (2014, 7, 23, 13, 12, 45, 321) | [
"datetime",
".",
"datetime",
"components",
"from",
"dictionary",
"to",
"tuple",
"."
] | train | https://github.com/portfoliome/foil/blob/b66d8cf4ab048a387d8c7a033b47e922ed6917d6/foil/dates.py#L81-L95 |
portfoliome/foil | foil/dates.py | DateTimeParser.convert_2_utc | def convert_2_utc(self, datetime_, timezone):
"""convert to datetime to UTC offset."""
datetime_ = self.tz_mapper[timezone].localize(datetime_)
return datetime_.astimezone(pytz.UTC) | python | def convert_2_utc(self, datetime_, timezone):
"""convert to datetime to UTC offset."""
datetime_ = self.tz_mapper[timezone].localize(datetime_)
return datetime_.astimezone(pytz.UTC) | [
"def",
"convert_2_utc",
"(",
"self",
",",
"datetime_",
",",
"timezone",
")",
":",
"datetime_",
"=",
"self",
".",
"tz_mapper",
"[",
"timezone",
"]",
".",
"localize",
"(",
"datetime_",
")",
"return",
"datetime_",
".",
"astimezone",
"(",
"pytz",
".",
"UTC",
")"
] | convert to datetime to UTC offset. | [
"convert",
"to",
"datetime",
"to",
"UTC",
"offset",
"."
] | train | https://github.com/portfoliome/foil/blob/b66d8cf4ab048a387d8c7a033b47e922ed6917d6/foil/dates.py#L74-L78 |
jfear/sramongo | sramongo/services/entrez.py | esearch | def esearch(database, query, userhistory=True, webenv=False, query_key=False, retstart=False, retmax=False,
api_key=False, email=False, **kwargs) -> Optional[EsearchResult]:
"""Search for a query using the Entrez ESearch API.
Parameters
----------
database : str
Entez database to search.
query : str
Query string
userhistory : bool
Tells API to return a WebEnV and query_key.
webenv : str
An Entrez WebEnv to use saved history.
query_key : str
An Entrez query_key to use saved history.
retstart : int
Return values starting at this index.
retmax : int
Return at most this number of values.
api_key : str
A users API key which allows more requests per second
email : str
A users email which is required if not using API.
Returns
-------
EsearchResult
A named tuple with values [ids, count, webenv, query_key]
"""
cleaned_query = urllib.parse.quote_plus(query, safe='/+')
url = BASE_URL + f'esearch.fcgi?db={database}&term={cleaned_query}&retmode=json'
url = check_userhistory(userhistory, url)
url = check_webenv(webenv, url)
url = check_query_key(query_key, url)
url = check_retstart(retstart, url)
url = check_retmax(retmax, url)
url = check_api_key(api_key, url)
url = check_email(email, url)
time.sleep(PAUSE)
resp = requests.get(url)
if resp.status_code != 200:
print('There was a server error')
return
text = resp.json()
time.sleep(.5)
return EsearchResult(
text['esearchresult'].get('idlist', []),
make_number(text['esearchresult'].get('count', ''), int),
text['esearchresult'].get('webenv', ''),
text['esearchresult'].get('querykey', '')
) | python | def esearch(database, query, userhistory=True, webenv=False, query_key=False, retstart=False, retmax=False,
api_key=False, email=False, **kwargs) -> Optional[EsearchResult]:
"""Search for a query using the Entrez ESearch API.
Parameters
----------
database : str
Entez database to search.
query : str
Query string
userhistory : bool
Tells API to return a WebEnV and query_key.
webenv : str
An Entrez WebEnv to use saved history.
query_key : str
An Entrez query_key to use saved history.
retstart : int
Return values starting at this index.
retmax : int
Return at most this number of values.
api_key : str
A users API key which allows more requests per second
email : str
A users email which is required if not using API.
Returns
-------
EsearchResult
A named tuple with values [ids, count, webenv, query_key]
"""
cleaned_query = urllib.parse.quote_plus(query, safe='/+')
url = BASE_URL + f'esearch.fcgi?db={database}&term={cleaned_query}&retmode=json'
url = check_userhistory(userhistory, url)
url = check_webenv(webenv, url)
url = check_query_key(query_key, url)
url = check_retstart(retstart, url)
url = check_retmax(retmax, url)
url = check_api_key(api_key, url)
url = check_email(email, url)
time.sleep(PAUSE)
resp = requests.get(url)
if resp.status_code != 200:
print('There was a server error')
return
text = resp.json()
time.sleep(.5)
return EsearchResult(
text['esearchresult'].get('idlist', []),
make_number(text['esearchresult'].get('count', ''), int),
text['esearchresult'].get('webenv', ''),
text['esearchresult'].get('querykey', '')
) | [
"def",
"esearch",
"(",
"database",
",",
"query",
",",
"userhistory",
"=",
"True",
",",
"webenv",
"=",
"False",
",",
"query_key",
"=",
"False",
",",
"retstart",
"=",
"False",
",",
"retmax",
"=",
"False",
",",
"api_key",
"=",
"False",
",",
"email",
"=",
"False",
",",
"*",
"*",
"kwargs",
")",
"->",
"Optional",
"[",
"EsearchResult",
"]",
":",
"cleaned_query",
"=",
"urllib",
".",
"parse",
".",
"quote_plus",
"(",
"query",
",",
"safe",
"=",
"'/+'",
")",
"url",
"=",
"BASE_URL",
"+",
"f'esearch.fcgi?db={database}&term={cleaned_query}&retmode=json'",
"url",
"=",
"check_userhistory",
"(",
"userhistory",
",",
"url",
")",
"url",
"=",
"check_webenv",
"(",
"webenv",
",",
"url",
")",
"url",
"=",
"check_query_key",
"(",
"query_key",
",",
"url",
")",
"url",
"=",
"check_retstart",
"(",
"retstart",
",",
"url",
")",
"url",
"=",
"check_retmax",
"(",
"retmax",
",",
"url",
")",
"url",
"=",
"check_api_key",
"(",
"api_key",
",",
"url",
")",
"url",
"=",
"check_email",
"(",
"email",
",",
"url",
")",
"time",
".",
"sleep",
"(",
"PAUSE",
")",
"resp",
"=",
"requests",
".",
"get",
"(",
"url",
")",
"if",
"resp",
".",
"status_code",
"!=",
"200",
":",
"print",
"(",
"'There was a server error'",
")",
"return",
"text",
"=",
"resp",
".",
"json",
"(",
")",
"time",
".",
"sleep",
"(",
".5",
")",
"return",
"EsearchResult",
"(",
"text",
"[",
"'esearchresult'",
"]",
".",
"get",
"(",
"'idlist'",
",",
"[",
"]",
")",
",",
"make_number",
"(",
"text",
"[",
"'esearchresult'",
"]",
".",
"get",
"(",
"'count'",
",",
"''",
")",
",",
"int",
")",
",",
"text",
"[",
"'esearchresult'",
"]",
".",
"get",
"(",
"'webenv'",
",",
"''",
")",
",",
"text",
"[",
"'esearchresult'",
"]",
".",
"get",
"(",
"'querykey'",
",",
"''",
")",
")"
] | Search for a query using the Entrez ESearch API.
Parameters
----------
database : str
Entez database to search.
query : str
Query string
userhistory : bool
Tells API to return a WebEnV and query_key.
webenv : str
An Entrez WebEnv to use saved history.
query_key : str
An Entrez query_key to use saved history.
retstart : int
Return values starting at this index.
retmax : int
Return at most this number of values.
api_key : str
A users API key which allows more requests per second
email : str
A users email which is required if not using API.
Returns
-------
EsearchResult
A named tuple with values [ids, count, webenv, query_key] | [
"Search",
"for",
"a",
"query",
"using",
"the",
"Entrez",
"ESearch",
"API",
"."
] | train | https://github.com/jfear/sramongo/blob/82a9a157e44bda4100be385c644b3ac21be66038/sramongo/services/entrez.py#L28-L83 |
jfear/sramongo | sramongo/services/entrez.py | epost | def epost(database, ids: List[str], webenv=False, api_key=False, email=False, **kwargs) -> Optional[EpostResult]:
"""Post IDs using the Entrez ESearch API.
Parameters
----------
database : str
Entez database to search.
ids : list
List of IDs to submit to the server.
webenv : str
An Entrez WebEnv to post ids to.
api_key : str
A users API key which allows more requests per second
email : str
A users email which is required if not using API.
Returns
-------
requests.Response
"""
url = BASE_URL + f'epost.fcgi'
id = ','.join(ids)
url_params = f'db={database}&id={id}'
url_params = check_webenv(webenv, url_params)
url_params = check_api_key(api_key, url_params)
url_params = check_email(email, url_params)
resp = entrez_try_put_multiple_times(url, url_params, num_tries=3)
time.sleep(.5)
return parse_epost(resp.text) | python | def epost(database, ids: List[str], webenv=False, api_key=False, email=False, **kwargs) -> Optional[EpostResult]:
"""Post IDs using the Entrez ESearch API.
Parameters
----------
database : str
Entez database to search.
ids : list
List of IDs to submit to the server.
webenv : str
An Entrez WebEnv to post ids to.
api_key : str
A users API key which allows more requests per second
email : str
A users email which is required if not using API.
Returns
-------
requests.Response
"""
url = BASE_URL + f'epost.fcgi'
id = ','.join(ids)
url_params = f'db={database}&id={id}'
url_params = check_webenv(webenv, url_params)
url_params = check_api_key(api_key, url_params)
url_params = check_email(email, url_params)
resp = entrez_try_put_multiple_times(url, url_params, num_tries=3)
time.sleep(.5)
return parse_epost(resp.text) | [
"def",
"epost",
"(",
"database",
",",
"ids",
":",
"List",
"[",
"str",
"]",
",",
"webenv",
"=",
"False",
",",
"api_key",
"=",
"False",
",",
"email",
"=",
"False",
",",
"*",
"*",
"kwargs",
")",
"->",
"Optional",
"[",
"EpostResult",
"]",
":",
"url",
"=",
"BASE_URL",
"+",
"f'epost.fcgi'",
"id",
"=",
"','",
".",
"join",
"(",
"ids",
")",
"url_params",
"=",
"f'db={database}&id={id}'",
"url_params",
"=",
"check_webenv",
"(",
"webenv",
",",
"url_params",
")",
"url_params",
"=",
"check_api_key",
"(",
"api_key",
",",
"url_params",
")",
"url_params",
"=",
"check_email",
"(",
"email",
",",
"url_params",
")",
"resp",
"=",
"entrez_try_put_multiple_times",
"(",
"url",
",",
"url_params",
",",
"num_tries",
"=",
"3",
")",
"time",
".",
"sleep",
"(",
".5",
")",
"return",
"parse_epost",
"(",
"resp",
".",
"text",
")"
] | Post IDs using the Entrez ESearch API.
Parameters
----------
database : str
Entez database to search.
ids : list
List of IDs to submit to the server.
webenv : str
An Entrez WebEnv to post ids to.
api_key : str
A users API key which allows more requests per second
email : str
A users email which is required if not using API.
Returns
-------
requests.Response | [
"Post",
"IDs",
"using",
"the",
"Entrez",
"ESearch",
"API",
"."
] | train | https://github.com/jfear/sramongo/blob/82a9a157e44bda4100be385c644b3ac21be66038/sramongo/services/entrez.py#L86-L115 |
jfear/sramongo | sramongo/services/entrez.py | elink | def elink(db: str, dbfrom: str, ids=False, webenv=False, query_key=False, api_key=False, email=False,
**kwargs) -> Optional[ElinkResult]:
"""Get document summaries using the Entrez ESearch API.
Parameters
----------
db : str
Entez database to get ids from.
dbfrom : str
Entez database the provided ids are from.
ids : list or str
List of IDs to submit to the server.
webenv : str
An Entrez WebEnv to use saved history.
query_key : str
An Entrez query_key to use saved history.
api_key : str
A users API key which allows more requests per second
email : str
A users email which is required if not using API.
Returns
-------
list
A list of ElinkResult with values [id, srx, create_date, update_date]
"""
url = BASE_URL + f'elink.fcgi?dbfrom={dbfrom}&db={db}&retmode=json&cmd=neighbor_history'
url = check_webenv(webenv, url)
url = check_query_key(query_key, url)
url = check_api_key(api_key, url)
url = check_email(email, url)
if ids:
if isinstance(ids, str):
id = ids
else:
id = ','.join(ids)
url += f'&id={id}'
time.sleep(PAUSE)
resp = requests.get(url)
if resp.status_code != 200:
print('There was a server error')
return
text = resp.json()
time.sleep(.5)
return ElinkResult(
text['linksets'][0].get('dbfrom', ''),
text['linksets'][0].get('linksetdbhistories', [{'dbto': ''}])[0].get('dbto', ''),
text['linksets'][0].get('webenv', ''),
text['linksets'][0].get('linksetdbhistories', [{'querykey': ''}])[0].get('querykey', ''),
) | python | def elink(db: str, dbfrom: str, ids=False, webenv=False, query_key=False, api_key=False, email=False,
**kwargs) -> Optional[ElinkResult]:
"""Get document summaries using the Entrez ESearch API.
Parameters
----------
db : str
Entez database to get ids from.
dbfrom : str
Entez database the provided ids are from.
ids : list or str
List of IDs to submit to the server.
webenv : str
An Entrez WebEnv to use saved history.
query_key : str
An Entrez query_key to use saved history.
api_key : str
A users API key which allows more requests per second
email : str
A users email which is required if not using API.
Returns
-------
list
A list of ElinkResult with values [id, srx, create_date, update_date]
"""
url = BASE_URL + f'elink.fcgi?dbfrom={dbfrom}&db={db}&retmode=json&cmd=neighbor_history'
url = check_webenv(webenv, url)
url = check_query_key(query_key, url)
url = check_api_key(api_key, url)
url = check_email(email, url)
if ids:
if isinstance(ids, str):
id = ids
else:
id = ','.join(ids)
url += f'&id={id}'
time.sleep(PAUSE)
resp = requests.get(url)
if resp.status_code != 200:
print('There was a server error')
return
text = resp.json()
time.sleep(.5)
return ElinkResult(
text['linksets'][0].get('dbfrom', ''),
text['linksets'][0].get('linksetdbhistories', [{'dbto': ''}])[0].get('dbto', ''),
text['linksets'][0].get('webenv', ''),
text['linksets'][0].get('linksetdbhistories', [{'querykey': ''}])[0].get('querykey', ''),
) | [
"def",
"elink",
"(",
"db",
":",
"str",
",",
"dbfrom",
":",
"str",
",",
"ids",
"=",
"False",
",",
"webenv",
"=",
"False",
",",
"query_key",
"=",
"False",
",",
"api_key",
"=",
"False",
",",
"email",
"=",
"False",
",",
"*",
"*",
"kwargs",
")",
"->",
"Optional",
"[",
"ElinkResult",
"]",
":",
"url",
"=",
"BASE_URL",
"+",
"f'elink.fcgi?dbfrom={dbfrom}&db={db}&retmode=json&cmd=neighbor_history'",
"url",
"=",
"check_webenv",
"(",
"webenv",
",",
"url",
")",
"url",
"=",
"check_query_key",
"(",
"query_key",
",",
"url",
")",
"url",
"=",
"check_api_key",
"(",
"api_key",
",",
"url",
")",
"url",
"=",
"check_email",
"(",
"email",
",",
"url",
")",
"if",
"ids",
":",
"if",
"isinstance",
"(",
"ids",
",",
"str",
")",
":",
"id",
"=",
"ids",
"else",
":",
"id",
"=",
"','",
".",
"join",
"(",
"ids",
")",
"url",
"+=",
"f'&id={id}'",
"time",
".",
"sleep",
"(",
"PAUSE",
")",
"resp",
"=",
"requests",
".",
"get",
"(",
"url",
")",
"if",
"resp",
".",
"status_code",
"!=",
"200",
":",
"print",
"(",
"'There was a server error'",
")",
"return",
"text",
"=",
"resp",
".",
"json",
"(",
")",
"time",
".",
"sleep",
"(",
".5",
")",
"return",
"ElinkResult",
"(",
"text",
"[",
"'linksets'",
"]",
"[",
"0",
"]",
".",
"get",
"(",
"'dbfrom'",
",",
"''",
")",
",",
"text",
"[",
"'linksets'",
"]",
"[",
"0",
"]",
".",
"get",
"(",
"'linksetdbhistories'",
",",
"[",
"{",
"'dbto'",
":",
"''",
"}",
"]",
")",
"[",
"0",
"]",
".",
"get",
"(",
"'dbto'",
",",
"''",
")",
",",
"text",
"[",
"'linksets'",
"]",
"[",
"0",
"]",
".",
"get",
"(",
"'webenv'",
",",
"''",
")",
",",
"text",
"[",
"'linksets'",
"]",
"[",
"0",
"]",
".",
"get",
"(",
"'linksetdbhistories'",
",",
"[",
"{",
"'querykey'",
":",
"''",
"}",
"]",
")",
"[",
"0",
"]",
".",
"get",
"(",
"'querykey'",
",",
"''",
")",
",",
")"
] | Get document summaries using the Entrez ESearch API.
Parameters
----------
db : str
Entez database to get ids from.
dbfrom : str
Entez database the provided ids are from.
ids : list or str
List of IDs to submit to the server.
webenv : str
An Entrez WebEnv to use saved history.
query_key : str
An Entrez query_key to use saved history.
api_key : str
A users API key which allows more requests per second
email : str
A users email which is required if not using API.
Returns
-------
list
A list of ElinkResult with values [id, srx, create_date, update_date] | [
"Get",
"document",
"summaries",
"using",
"the",
"Entrez",
"ESearch",
"API",
"."
] | train | https://github.com/jfear/sramongo/blob/82a9a157e44bda4100be385c644b3ac21be66038/sramongo/services/entrez.py#L118-L171 |
jfear/sramongo | sramongo/services/entrez.py | esummary | def esummary(database: str, ids=False, webenv=False, query_key=False, count=False, retstart=False, retmax=False,
api_key=False, email=False, **kwargs) -> Optional[List[EsummaryResult]]:
"""Get document summaries using the Entrez ESearch API.
Parameters
----------
database : str
Entez database to search.
ids : list or str
List of IDs to submit to the server.
webenv : str
An Entrez WebEnv to use saved history.
query_key : str
An Entrez query_key to use saved history.
count : int
Number of records in the webenv
retstart : int
Return values starting at this index.
retmax : int
Return at most this number of values.
api_key : str
A users API key which allows more requests per second
email : str
A users email which is required if not using API.
Returns
-------
list
A list of EsummaryResults with values [id, srx, create_date, update_date]
"""
url = BASE_URL + f'esummary.fcgi?db={database}'
url = check_webenv(webenv, url)
url = check_query_key(query_key, url)
url = check_api_key(api_key, url)
url = check_email(email, url)
if ids:
if isinstance(ids, str):
id = ids
else:
id = ','.join(ids)
url += f'&id={id}'
count = len(id.split(','))
for resp in entrez_sets_of_results(url, retstart, retmax, count):
yield resp.text | python | def esummary(database: str, ids=False, webenv=False, query_key=False, count=False, retstart=False, retmax=False,
api_key=False, email=False, **kwargs) -> Optional[List[EsummaryResult]]:
"""Get document summaries using the Entrez ESearch API.
Parameters
----------
database : str
Entez database to search.
ids : list or str
List of IDs to submit to the server.
webenv : str
An Entrez WebEnv to use saved history.
query_key : str
An Entrez query_key to use saved history.
count : int
Number of records in the webenv
retstart : int
Return values starting at this index.
retmax : int
Return at most this number of values.
api_key : str
A users API key which allows more requests per second
email : str
A users email which is required if not using API.
Returns
-------
list
A list of EsummaryResults with values [id, srx, create_date, update_date]
"""
url = BASE_URL + f'esummary.fcgi?db={database}'
url = check_webenv(webenv, url)
url = check_query_key(query_key, url)
url = check_api_key(api_key, url)
url = check_email(email, url)
if ids:
if isinstance(ids, str):
id = ids
else:
id = ','.join(ids)
url += f'&id={id}'
count = len(id.split(','))
for resp in entrez_sets_of_results(url, retstart, retmax, count):
yield resp.text | [
"def",
"esummary",
"(",
"database",
":",
"str",
",",
"ids",
"=",
"False",
",",
"webenv",
"=",
"False",
",",
"query_key",
"=",
"False",
",",
"count",
"=",
"False",
",",
"retstart",
"=",
"False",
",",
"retmax",
"=",
"False",
",",
"api_key",
"=",
"False",
",",
"email",
"=",
"False",
",",
"*",
"*",
"kwargs",
")",
"->",
"Optional",
"[",
"List",
"[",
"EsummaryResult",
"]",
"]",
":",
"url",
"=",
"BASE_URL",
"+",
"f'esummary.fcgi?db={database}'",
"url",
"=",
"check_webenv",
"(",
"webenv",
",",
"url",
")",
"url",
"=",
"check_query_key",
"(",
"query_key",
",",
"url",
")",
"url",
"=",
"check_api_key",
"(",
"api_key",
",",
"url",
")",
"url",
"=",
"check_email",
"(",
"email",
",",
"url",
")",
"if",
"ids",
":",
"if",
"isinstance",
"(",
"ids",
",",
"str",
")",
":",
"id",
"=",
"ids",
"else",
":",
"id",
"=",
"','",
".",
"join",
"(",
"ids",
")",
"url",
"+=",
"f'&id={id}'",
"count",
"=",
"len",
"(",
"id",
".",
"split",
"(",
"','",
")",
")",
"for",
"resp",
"in",
"entrez_sets_of_results",
"(",
"url",
",",
"retstart",
",",
"retmax",
",",
"count",
")",
":",
"yield",
"resp",
".",
"text"
] | Get document summaries using the Entrez ESearch API.
Parameters
----------
database : str
Entez database to search.
ids : list or str
List of IDs to submit to the server.
webenv : str
An Entrez WebEnv to use saved history.
query_key : str
An Entrez query_key to use saved history.
count : int
Number of records in the webenv
retstart : int
Return values starting at this index.
retmax : int
Return at most this number of values.
api_key : str
A users API key which allows more requests per second
email : str
A users email which is required if not using API.
Returns
-------
list
A list of EsummaryResults with values [id, srx, create_date, update_date] | [
"Get",
"document",
"summaries",
"using",
"the",
"Entrez",
"ESearch",
"API",
"."
] | train | https://github.com/jfear/sramongo/blob/82a9a157e44bda4100be385c644b3ac21be66038/sramongo/services/entrez.py#L174-L220 |
jfear/sramongo | sramongo/services/entrez.py | efetch | def efetch(database, ids=False, webenv=False, query_key=False, count=False, retstart=False, retmax=False,
rettype='full', retmode='xml', api_key=False, email=False, **kwargs) -> str:
"""Get documents using the Entrez ESearch API.gg
Parameters
----------
database : str
Entez database to search.
ids : list or str
List of IDs to submit to the server.
webenv : str
An Entrez WebEnv to use saved history.
query_key : str
An Entrez query_key to use saved history.
count : int
Number of records in the webenv
retstart : int
Return values starting at this index.
retmax : int
Return at most this number of values.
rettype : str
The type of document to return. Refer to link for valid return types for each database.
https://www.ncbi.nlm.nih.gov/books/NBK25499/table/chapter4.T._valid_values_of__retmode_and/?report=objectonly
retmode : str
The format of document to return. Refer to link for valid formats for each database.
https://www.ncbi.nlm.nih.gov/books/NBK25499/table/chapter4.T._valid_values_of__retmode_and/?report=objectonly
api_key : str
A users API key which allows more requests per second
email : str
A users email which is required if not using API.
Yields
------
str
Text from effect results. Format depends on parameters passed to retmode
"""
url = BASE_URL + f'efetch.fcgi?db={database}&retmode={retmode}&rettype={rettype}'
url = check_webenv(webenv, url)
url = check_query_key(query_key, url)
url = check_api_key(api_key, url)
url = check_email(email, url)
if ids:
if isinstance(ids, str):
id = ids
else:
id = ','.join(ids)
url += f'&id={id}'
count = len(id.split(','))
for resp in entrez_sets_of_results(url, retstart, retmax, count):
yield resp.text | python | def efetch(database, ids=False, webenv=False, query_key=False, count=False, retstart=False, retmax=False,
rettype='full', retmode='xml', api_key=False, email=False, **kwargs) -> str:
"""Get documents using the Entrez ESearch API.gg
Parameters
----------
database : str
Entez database to search.
ids : list or str
List of IDs to submit to the server.
webenv : str
An Entrez WebEnv to use saved history.
query_key : str
An Entrez query_key to use saved history.
count : int
Number of records in the webenv
retstart : int
Return values starting at this index.
retmax : int
Return at most this number of values.
rettype : str
The type of document to return. Refer to link for valid return types for each database.
https://www.ncbi.nlm.nih.gov/books/NBK25499/table/chapter4.T._valid_values_of__retmode_and/?report=objectonly
retmode : str
The format of document to return. Refer to link for valid formats for each database.
https://www.ncbi.nlm.nih.gov/books/NBK25499/table/chapter4.T._valid_values_of__retmode_and/?report=objectonly
api_key : str
A users API key which allows more requests per second
email : str
A users email which is required if not using API.
Yields
------
str
Text from effect results. Format depends on parameters passed to retmode
"""
url = BASE_URL + f'efetch.fcgi?db={database}&retmode={retmode}&rettype={rettype}'
url = check_webenv(webenv, url)
url = check_query_key(query_key, url)
url = check_api_key(api_key, url)
url = check_email(email, url)
if ids:
if isinstance(ids, str):
id = ids
else:
id = ','.join(ids)
url += f'&id={id}'
count = len(id.split(','))
for resp in entrez_sets_of_results(url, retstart, retmax, count):
yield resp.text | [
"def",
"efetch",
"(",
"database",
",",
"ids",
"=",
"False",
",",
"webenv",
"=",
"False",
",",
"query_key",
"=",
"False",
",",
"count",
"=",
"False",
",",
"retstart",
"=",
"False",
",",
"retmax",
"=",
"False",
",",
"rettype",
"=",
"'full'",
",",
"retmode",
"=",
"'xml'",
",",
"api_key",
"=",
"False",
",",
"email",
"=",
"False",
",",
"*",
"*",
"kwargs",
")",
"->",
"str",
":",
"url",
"=",
"BASE_URL",
"+",
"f'efetch.fcgi?db={database}&retmode={retmode}&rettype={rettype}'",
"url",
"=",
"check_webenv",
"(",
"webenv",
",",
"url",
")",
"url",
"=",
"check_query_key",
"(",
"query_key",
",",
"url",
")",
"url",
"=",
"check_api_key",
"(",
"api_key",
",",
"url",
")",
"url",
"=",
"check_email",
"(",
"email",
",",
"url",
")",
"if",
"ids",
":",
"if",
"isinstance",
"(",
"ids",
",",
"str",
")",
":",
"id",
"=",
"ids",
"else",
":",
"id",
"=",
"','",
".",
"join",
"(",
"ids",
")",
"url",
"+=",
"f'&id={id}'",
"count",
"=",
"len",
"(",
"id",
".",
"split",
"(",
"','",
")",
")",
"for",
"resp",
"in",
"entrez_sets_of_results",
"(",
"url",
",",
"retstart",
",",
"retmax",
",",
"count",
")",
":",
"yield",
"resp",
".",
"text"
] | Get documents using the Entrez ESearch API.gg
Parameters
----------
database : str
Entez database to search.
ids : list or str
List of IDs to submit to the server.
webenv : str
An Entrez WebEnv to use saved history.
query_key : str
An Entrez query_key to use saved history.
count : int
Number of records in the webenv
retstart : int
Return values starting at this index.
retmax : int
Return at most this number of values.
rettype : str
The type of document to return. Refer to link for valid return types for each database.
https://www.ncbi.nlm.nih.gov/books/NBK25499/table/chapter4.T._valid_values_of__retmode_and/?report=objectonly
retmode : str
The format of document to return. Refer to link for valid formats for each database.
https://www.ncbi.nlm.nih.gov/books/NBK25499/table/chapter4.T._valid_values_of__retmode_and/?report=objectonly
api_key : str
A users API key which allows more requests per second
email : str
A users email which is required if not using API.
Yields
------
str
Text from effect results. Format depends on parameters passed to retmode | [
"Get",
"documents",
"using",
"the",
"Entrez",
"ESearch",
"API",
".",
"gg"
] | train | https://github.com/jfear/sramongo/blob/82a9a157e44bda4100be385c644b3ac21be66038/sramongo/services/entrez.py#L223-L275 |
jfear/sramongo | sramongo/services/entrez.py | entrez_sets_of_results | def entrez_sets_of_results(url, retstart=False, retmax=False, count=False) -> Optional[List[requests.Response]]:
"""Gets sets of results back from Entrez.
Entrez can only return 500 results at a time. This creates a generator that gets results by incrementing
retstart and retmax.
Parameters
----------
url : str
The Entrez API url to use.
retstart : int
Return values starting at this index.
retmax : int
Return at most this number of values.
count : int
The number of results returned by EQuery.
Yields
------
requests.Response
"""
if not retstart:
retstart = 0
if not retmax:
retmax = 500
if not count:
count = retmax
retmax = 500 # Entrez can return a max of 500
while retstart < count:
diff = count - retstart
if diff < 500:
retmax = diff
_url = url + f'&retstart={retstart}&retmax={retmax}'
resp = entrez_try_get_multiple_times(_url)
if resp is None:
return
retstart += retmax
yield resp | python | def entrez_sets_of_results(url, retstart=False, retmax=False, count=False) -> Optional[List[requests.Response]]:
"""Gets sets of results back from Entrez.
Entrez can only return 500 results at a time. This creates a generator that gets results by incrementing
retstart and retmax.
Parameters
----------
url : str
The Entrez API url to use.
retstart : int
Return values starting at this index.
retmax : int
Return at most this number of values.
count : int
The number of results returned by EQuery.
Yields
------
requests.Response
"""
if not retstart:
retstart = 0
if not retmax:
retmax = 500
if not count:
count = retmax
retmax = 500 # Entrez can return a max of 500
while retstart < count:
diff = count - retstart
if diff < 500:
retmax = diff
_url = url + f'&retstart={retstart}&retmax={retmax}'
resp = entrez_try_get_multiple_times(_url)
if resp is None:
return
retstart += retmax
yield resp | [
"def",
"entrez_sets_of_results",
"(",
"url",
",",
"retstart",
"=",
"False",
",",
"retmax",
"=",
"False",
",",
"count",
"=",
"False",
")",
"->",
"Optional",
"[",
"List",
"[",
"requests",
".",
"Response",
"]",
"]",
":",
"if",
"not",
"retstart",
":",
"retstart",
"=",
"0",
"if",
"not",
"retmax",
":",
"retmax",
"=",
"500",
"if",
"not",
"count",
":",
"count",
"=",
"retmax",
"retmax",
"=",
"500",
"# Entrez can return a max of 500",
"while",
"retstart",
"<",
"count",
":",
"diff",
"=",
"count",
"-",
"retstart",
"if",
"diff",
"<",
"500",
":",
"retmax",
"=",
"diff",
"_url",
"=",
"url",
"+",
"f'&retstart={retstart}&retmax={retmax}'",
"resp",
"=",
"entrez_try_get_multiple_times",
"(",
"_url",
")",
"if",
"resp",
"is",
"None",
":",
"return",
"retstart",
"+=",
"retmax",
"yield",
"resp"
] | Gets sets of results back from Entrez.
Entrez can only return 500 results at a time. This creates a generator that gets results by incrementing
retstart and retmax.
Parameters
----------
url : str
The Entrez API url to use.
retstart : int
Return values starting at this index.
retmax : int
Return at most this number of values.
count : int
The number of results returned by EQuery.
Yields
------
requests.Response | [
"Gets",
"sets",
"of",
"results",
"back",
"from",
"Entrez",
"."
] | train | https://github.com/jfear/sramongo/blob/82a9a157e44bda4100be385c644b3ac21be66038/sramongo/services/entrez.py#L358-L401 |
PolyJIT/benchbuild | benchbuild/cli/log.py | print_runs | def print_runs(query):
""" Print all rows in this result query. """
if query is None:
return
for tup in query:
print(("{0} @ {1} - {2} id: {3} group: {4}".format(
tup.end, tup.experiment_name, tup.project_name,
tup.experiment_group, tup.run_group))) | python | def print_runs(query):
""" Print all rows in this result query. """
if query is None:
return
for tup in query:
print(("{0} @ {1} - {2} id: {3} group: {4}".format(
tup.end, tup.experiment_name, tup.project_name,
tup.experiment_group, tup.run_group))) | [
"def",
"print_runs",
"(",
"query",
")",
":",
"if",
"query",
"is",
"None",
":",
"return",
"for",
"tup",
"in",
"query",
":",
"print",
"(",
"(",
"\"{0} @ {1} - {2} id: {3} group: {4}\"",
".",
"format",
"(",
"tup",
".",
"end",
",",
"tup",
".",
"experiment_name",
",",
"tup",
".",
"project_name",
",",
"tup",
".",
"experiment_group",
",",
"tup",
".",
"run_group",
")",
")",
")"
] | Print all rows in this result query. | [
"Print",
"all",
"rows",
"in",
"this",
"result",
"query",
"."
] | train | https://github.com/PolyJIT/benchbuild/blob/9ad2ec54d96e97b642b1f06eddcbad9ba7aeaf58/benchbuild/cli/log.py#L9-L18 |
PolyJIT/benchbuild | benchbuild/cli/log.py | print_logs | def print_logs(query, types=None):
""" Print status logs. """
if query is None:
return
for run, log in query:
print(("{0} @ {1} - {2} id: {3} group: {4} status: {5}".format(
run.end, run.experiment_name, run.project_name,
run.experiment_group, run.run_group, log.status)))
print(("command: {0}".format(run.command)))
if "stderr" in types:
print("StdErr:")
print((log.stderr))
if "stdout" in types:
print("StdOut:")
print((log.stdout))
print() | python | def print_logs(query, types=None):
""" Print status logs. """
if query is None:
return
for run, log in query:
print(("{0} @ {1} - {2} id: {3} group: {4} status: {5}".format(
run.end, run.experiment_name, run.project_name,
run.experiment_group, run.run_group, log.status)))
print(("command: {0}".format(run.command)))
if "stderr" in types:
print("StdErr:")
print((log.stderr))
if "stdout" in types:
print("StdOut:")
print((log.stdout))
print() | [
"def",
"print_logs",
"(",
"query",
",",
"types",
"=",
"None",
")",
":",
"if",
"query",
"is",
"None",
":",
"return",
"for",
"run",
",",
"log",
"in",
"query",
":",
"print",
"(",
"(",
"\"{0} @ {1} - {2} id: {3} group: {4} status: {5}\"",
".",
"format",
"(",
"run",
".",
"end",
",",
"run",
".",
"experiment_name",
",",
"run",
".",
"project_name",
",",
"run",
".",
"experiment_group",
",",
"run",
".",
"run_group",
",",
"log",
".",
"status",
")",
")",
")",
"print",
"(",
"(",
"\"command: {0}\"",
".",
"format",
"(",
"run",
".",
"command",
")",
")",
")",
"if",
"\"stderr\"",
"in",
"types",
":",
"print",
"(",
"\"StdErr:\"",
")",
"print",
"(",
"(",
"log",
".",
"stderr",
")",
")",
"if",
"\"stdout\"",
"in",
"types",
":",
"print",
"(",
"\"StdOut:\"",
")",
"print",
"(",
"(",
"log",
".",
"stdout",
")",
")",
"print",
"(",
")"
] | Print status logs. | [
"Print",
"status",
"logs",
"."
] | train | https://github.com/PolyJIT/benchbuild/blob/9ad2ec54d96e97b642b1f06eddcbad9ba7aeaf58/benchbuild/cli/log.py#L21-L37 |
sci-bots/svg-model | svg_model/merge.py | get_svg_layers | def get_svg_layers(svg_sources):
'''
Collect layers from input svg sources.
Args:
svg_sources (list) : A list of file-like objects, each containing
one or more XML layers.
Returns
-------
(width, height), layers : (int, int), list
The first item in the tuple is the shape of the largest layer, and the
second item is a list of ``Element`` objects (from :mod:`lxml.etree`
module), one per SVG layer.
'''
layers = []
width, height = None, None
def extract_length(attr):
'Extract length in pixels.'
match = CRE_MM_LENGTH.match(attr)
if match:
# Length is specified in millimeters.
return INKSCAPE_PPmm.magnitude * float(match.group('length'))
else:
return float(attr)
for svg_source_i in svg_sources:
# Parse input file.
xml_root = etree.parse(svg_source_i)
svg_root = xml_root.xpath('/svg:svg', namespaces=INKSCAPE_NSMAP)[0]
width = max(extract_length(svg_root.attrib['width']), width)
height = max(extract_length(svg_root.attrib['height']), height)
layers += svg_root.xpath('//svg:g[@inkscape:groupmode="layer"]',
namespaces=INKSCAPE_NSMAP)
for i, layer_i in enumerate(layers):
layer_i.attrib['id'] = 'layer%d' % (i + 1)
return (width, height), layers | python | def get_svg_layers(svg_sources):
'''
Collect layers from input svg sources.
Args:
svg_sources (list) : A list of file-like objects, each containing
one or more XML layers.
Returns
-------
(width, height), layers : (int, int), list
The first item in the tuple is the shape of the largest layer, and the
second item is a list of ``Element`` objects (from :mod:`lxml.etree`
module), one per SVG layer.
'''
layers = []
width, height = None, None
def extract_length(attr):
'Extract length in pixels.'
match = CRE_MM_LENGTH.match(attr)
if match:
# Length is specified in millimeters.
return INKSCAPE_PPmm.magnitude * float(match.group('length'))
else:
return float(attr)
for svg_source_i in svg_sources:
# Parse input file.
xml_root = etree.parse(svg_source_i)
svg_root = xml_root.xpath('/svg:svg', namespaces=INKSCAPE_NSMAP)[0]
width = max(extract_length(svg_root.attrib['width']), width)
height = max(extract_length(svg_root.attrib['height']), height)
layers += svg_root.xpath('//svg:g[@inkscape:groupmode="layer"]',
namespaces=INKSCAPE_NSMAP)
for i, layer_i in enumerate(layers):
layer_i.attrib['id'] = 'layer%d' % (i + 1)
return (width, height), layers | [
"def",
"get_svg_layers",
"(",
"svg_sources",
")",
":",
"layers",
"=",
"[",
"]",
"width",
",",
"height",
"=",
"None",
",",
"None",
"def",
"extract_length",
"(",
"attr",
")",
":",
"'Extract length in pixels.'",
"match",
"=",
"CRE_MM_LENGTH",
".",
"match",
"(",
"attr",
")",
"if",
"match",
":",
"# Length is specified in millimeters.",
"return",
"INKSCAPE_PPmm",
".",
"magnitude",
"*",
"float",
"(",
"match",
".",
"group",
"(",
"'length'",
")",
")",
"else",
":",
"return",
"float",
"(",
"attr",
")",
"for",
"svg_source_i",
"in",
"svg_sources",
":",
"# Parse input file.",
"xml_root",
"=",
"etree",
".",
"parse",
"(",
"svg_source_i",
")",
"svg_root",
"=",
"xml_root",
".",
"xpath",
"(",
"'/svg:svg'",
",",
"namespaces",
"=",
"INKSCAPE_NSMAP",
")",
"[",
"0",
"]",
"width",
"=",
"max",
"(",
"extract_length",
"(",
"svg_root",
".",
"attrib",
"[",
"'width'",
"]",
")",
",",
"width",
")",
"height",
"=",
"max",
"(",
"extract_length",
"(",
"svg_root",
".",
"attrib",
"[",
"'height'",
"]",
")",
",",
"height",
")",
"layers",
"+=",
"svg_root",
".",
"xpath",
"(",
"'//svg:g[@inkscape:groupmode=\"layer\"]'",
",",
"namespaces",
"=",
"INKSCAPE_NSMAP",
")",
"for",
"i",
",",
"layer_i",
"in",
"enumerate",
"(",
"layers",
")",
":",
"layer_i",
".",
"attrib",
"[",
"'id'",
"]",
"=",
"'layer%d'",
"%",
"(",
"i",
"+",
"1",
")",
"return",
"(",
"width",
",",
"height",
")",
",",
"layers"
] | Collect layers from input svg sources.
Args:
svg_sources (list) : A list of file-like objects, each containing
one or more XML layers.
Returns
-------
(width, height), layers : (int, int), list
The first item in the tuple is the shape of the largest layer, and the
second item is a list of ``Element`` objects (from :mod:`lxml.etree`
module), one per SVG layer. | [
"Collect",
"layers",
"from",
"input",
"svg",
"sources",
"."
] | train | https://github.com/sci-bots/svg-model/blob/2d119650f995e62b29ce0b3151a23f3b957cb072/svg_model/merge.py#L14-L53 |
sci-bots/svg-model | svg_model/merge.py | merge_svg_layers | def merge_svg_layers(svg_sources, share_transform=True):
'''
Merge layers from input svg sources into a single XML document.
Args:
svg_sources (list) : A list of file-like objects, each containing
one or more XML layers.
share_transform (bool) : If exactly one layer has a transform, apply it
to *all* other layers as well.
Returns:
StringIO.StringIO : File-like object containing merge XML document.
'''
# Get list of XML layers.
(width, height), layers = get_svg_layers(svg_sources)
if share_transform:
transforms = [layer_i.attrib['transform'] for layer_i in layers
if 'transform' in layer_i.attrib]
if len(transforms) > 1:
raise ValueError('Transform can only be shared if *exactly one* '
'layer has a transform ({} layers have '
'`transform` attributes)'.format(len(transforms)))
elif transforms:
# Apply single common transform to all layers.
for layer_i in layers:
layer_i.attrib['transform'] = transforms[0]
# Create blank XML output document.
dwg = svgwrite.Drawing(profile='tiny', debug=False, size=(width, height))
# Add append layers to output XML root element.
output_svg_root = etree.fromstring(dwg.tostring())
output_svg_root.extend(layers)
# Write merged XML document to output file-like object.
output = StringIO.StringIO()
output.write(etree.tostring(output_svg_root))
output.seek(0)
return output | python | def merge_svg_layers(svg_sources, share_transform=True):
'''
Merge layers from input svg sources into a single XML document.
Args:
svg_sources (list) : A list of file-like objects, each containing
one or more XML layers.
share_transform (bool) : If exactly one layer has a transform, apply it
to *all* other layers as well.
Returns:
StringIO.StringIO : File-like object containing merge XML document.
'''
# Get list of XML layers.
(width, height), layers = get_svg_layers(svg_sources)
if share_transform:
transforms = [layer_i.attrib['transform'] for layer_i in layers
if 'transform' in layer_i.attrib]
if len(transforms) > 1:
raise ValueError('Transform can only be shared if *exactly one* '
'layer has a transform ({} layers have '
'`transform` attributes)'.format(len(transforms)))
elif transforms:
# Apply single common transform to all layers.
for layer_i in layers:
layer_i.attrib['transform'] = transforms[0]
# Create blank XML output document.
dwg = svgwrite.Drawing(profile='tiny', debug=False, size=(width, height))
# Add append layers to output XML root element.
output_svg_root = etree.fromstring(dwg.tostring())
output_svg_root.extend(layers)
# Write merged XML document to output file-like object.
output = StringIO.StringIO()
output.write(etree.tostring(output_svg_root))
output.seek(0)
return output | [
"def",
"merge_svg_layers",
"(",
"svg_sources",
",",
"share_transform",
"=",
"True",
")",
":",
"# Get list of XML layers.",
"(",
"width",
",",
"height",
")",
",",
"layers",
"=",
"get_svg_layers",
"(",
"svg_sources",
")",
"if",
"share_transform",
":",
"transforms",
"=",
"[",
"layer_i",
".",
"attrib",
"[",
"'transform'",
"]",
"for",
"layer_i",
"in",
"layers",
"if",
"'transform'",
"in",
"layer_i",
".",
"attrib",
"]",
"if",
"len",
"(",
"transforms",
")",
">",
"1",
":",
"raise",
"ValueError",
"(",
"'Transform can only be shared if *exactly one* '",
"'layer has a transform ({} layers have '",
"'`transform` attributes)'",
".",
"format",
"(",
"len",
"(",
"transforms",
")",
")",
")",
"elif",
"transforms",
":",
"# Apply single common transform to all layers.",
"for",
"layer_i",
"in",
"layers",
":",
"layer_i",
".",
"attrib",
"[",
"'transform'",
"]",
"=",
"transforms",
"[",
"0",
"]",
"# Create blank XML output document.",
"dwg",
"=",
"svgwrite",
".",
"Drawing",
"(",
"profile",
"=",
"'tiny'",
",",
"debug",
"=",
"False",
",",
"size",
"=",
"(",
"width",
",",
"height",
")",
")",
"# Add append layers to output XML root element.",
"output_svg_root",
"=",
"etree",
".",
"fromstring",
"(",
"dwg",
".",
"tostring",
"(",
")",
")",
"output_svg_root",
".",
"extend",
"(",
"layers",
")",
"# Write merged XML document to output file-like object.",
"output",
"=",
"StringIO",
".",
"StringIO",
"(",
")",
"output",
".",
"write",
"(",
"etree",
".",
"tostring",
"(",
"output_svg_root",
")",
")",
"output",
".",
"seek",
"(",
"0",
")",
"return",
"output"
] | Merge layers from input svg sources into a single XML document.
Args:
svg_sources (list) : A list of file-like objects, each containing
one or more XML layers.
share_transform (bool) : If exactly one layer has a transform, apply it
to *all* other layers as well.
Returns:
StringIO.StringIO : File-like object containing merge XML document. | [
"Merge",
"layers",
"from",
"input",
"svg",
"sources",
"into",
"a",
"single",
"XML",
"document",
"."
] | train | https://github.com/sci-bots/svg-model/blob/2d119650f995e62b29ce0b3151a23f3b957cb072/svg_model/merge.py#L56-L97 |
BlueBrain/hpcbench | hpcbench/cli/bensh.py | main | def main(argv=None):
"""ben-sh entry point"""
arguments = cli_common(__doc__, argv=argv)
campaign_file = arguments['CAMPAIGN_FILE']
if arguments['-g']:
if osp.exists(campaign_file):
raise Exception('Campaign file already exists')
with open(campaign_file, 'w') as ostr:
Generator().write(ostr)
else:
node = arguments.get('-n')
output_dir = arguments.get('--output-dir')
exclude_nodes = arguments.get('--exclude-nodes')
srun_tag = arguments.get('--srun')
driver = CampaignDriver(
campaign_file,
node=node,
output_dir=output_dir,
srun=srun_tag,
exclude_nodes=exclude_nodes,
)
driver()
if argv is not None:
return driver
campaign_fd = int(arguments.get('--campaign-path-fd') or 1)
message = (osp.abspath(driver.campaign_path) + '\n').encode()
os.write(campaign_fd, message) | python | def main(argv=None):
"""ben-sh entry point"""
arguments = cli_common(__doc__, argv=argv)
campaign_file = arguments['CAMPAIGN_FILE']
if arguments['-g']:
if osp.exists(campaign_file):
raise Exception('Campaign file already exists')
with open(campaign_file, 'w') as ostr:
Generator().write(ostr)
else:
node = arguments.get('-n')
output_dir = arguments.get('--output-dir')
exclude_nodes = arguments.get('--exclude-nodes')
srun_tag = arguments.get('--srun')
driver = CampaignDriver(
campaign_file,
node=node,
output_dir=output_dir,
srun=srun_tag,
exclude_nodes=exclude_nodes,
)
driver()
if argv is not None:
return driver
campaign_fd = int(arguments.get('--campaign-path-fd') or 1)
message = (osp.abspath(driver.campaign_path) + '\n').encode()
os.write(campaign_fd, message) | [
"def",
"main",
"(",
"argv",
"=",
"None",
")",
":",
"arguments",
"=",
"cli_common",
"(",
"__doc__",
",",
"argv",
"=",
"argv",
")",
"campaign_file",
"=",
"arguments",
"[",
"'CAMPAIGN_FILE'",
"]",
"if",
"arguments",
"[",
"'-g'",
"]",
":",
"if",
"osp",
".",
"exists",
"(",
"campaign_file",
")",
":",
"raise",
"Exception",
"(",
"'Campaign file already exists'",
")",
"with",
"open",
"(",
"campaign_file",
",",
"'w'",
")",
"as",
"ostr",
":",
"Generator",
"(",
")",
".",
"write",
"(",
"ostr",
")",
"else",
":",
"node",
"=",
"arguments",
".",
"get",
"(",
"'-n'",
")",
"output_dir",
"=",
"arguments",
".",
"get",
"(",
"'--output-dir'",
")",
"exclude_nodes",
"=",
"arguments",
".",
"get",
"(",
"'--exclude-nodes'",
")",
"srun_tag",
"=",
"arguments",
".",
"get",
"(",
"'--srun'",
")",
"driver",
"=",
"CampaignDriver",
"(",
"campaign_file",
",",
"node",
"=",
"node",
",",
"output_dir",
"=",
"output_dir",
",",
"srun",
"=",
"srun_tag",
",",
"exclude_nodes",
"=",
"exclude_nodes",
",",
")",
"driver",
"(",
")",
"if",
"argv",
"is",
"not",
"None",
":",
"return",
"driver",
"campaign_fd",
"=",
"int",
"(",
"arguments",
".",
"get",
"(",
"'--campaign-path-fd'",
")",
"or",
"1",
")",
"message",
"=",
"(",
"osp",
".",
"abspath",
"(",
"driver",
".",
"campaign_path",
")",
"+",
"'\\n'",
")",
".",
"encode",
"(",
")",
"os",
".",
"write",
"(",
"campaign_fd",
",",
"message",
")"
] | ben-sh entry point | [
"ben",
"-",
"sh",
"entry",
"point"
] | train | https://github.com/BlueBrain/hpcbench/blob/192d0ec142b897157ec25f131d1ef28f84752592/hpcbench/cli/bensh.py#L35-L61 |
BlueBrain/hpcbench | hpcbench/cli/benumb.py | main | def main(argv=None):
"""ben-umb entry point"""
arguments = cli_common(__doc__, argv=argv)
driver = CampaignDriver(arguments['CAMPAIGN-DIR'], expandcampvars=False)
driver(no_exec=True)
if argv is not None:
return driver | python | def main(argv=None):
"""ben-umb entry point"""
arguments = cli_common(__doc__, argv=argv)
driver = CampaignDriver(arguments['CAMPAIGN-DIR'], expandcampvars=False)
driver(no_exec=True)
if argv is not None:
return driver | [
"def",
"main",
"(",
"argv",
"=",
"None",
")",
":",
"arguments",
"=",
"cli_common",
"(",
"__doc__",
",",
"argv",
"=",
"argv",
")",
"driver",
"=",
"CampaignDriver",
"(",
"arguments",
"[",
"'CAMPAIGN-DIR'",
"]",
",",
"expandcampvars",
"=",
"False",
")",
"driver",
"(",
"no_exec",
"=",
"True",
")",
"if",
"argv",
"is",
"not",
"None",
":",
"return",
"driver"
] | ben-umb entry point | [
"ben",
"-",
"umb",
"entry",
"point"
] | train | https://github.com/BlueBrain/hpcbench/blob/192d0ec142b897157ec25f131d1ef28f84752592/hpcbench/cli/benumb.py#L19-L25 |
portfoliome/foil | foil/counting.py | count_by | def count_by(records: Sequence[Dict], field_name: str) -> defaultdict:
"""
Frequency each value occurs in a record sequence for a given field name.
"""
counter = defaultdict(int)
for record in records:
name = record[field_name]
counter[name] += 1
return counter | python | def count_by(records: Sequence[Dict], field_name: str) -> defaultdict:
"""
Frequency each value occurs in a record sequence for a given field name.
"""
counter = defaultdict(int)
for record in records:
name = record[field_name]
counter[name] += 1
return counter | [
"def",
"count_by",
"(",
"records",
":",
"Sequence",
"[",
"Dict",
"]",
",",
"field_name",
":",
"str",
")",
"->",
"defaultdict",
":",
"counter",
"=",
"defaultdict",
"(",
"int",
")",
"for",
"record",
"in",
"records",
":",
"name",
"=",
"record",
"[",
"field_name",
"]",
"counter",
"[",
"name",
"]",
"+=",
"1",
"return",
"counter"
] | Frequency each value occurs in a record sequence for a given field name. | [
"Frequency",
"each",
"value",
"occurs",
"in",
"a",
"record",
"sequence",
"for",
"a",
"given",
"field",
"name",
"."
] | train | https://github.com/portfoliome/foil/blob/b66d8cf4ab048a387d8c7a033b47e922ed6917d6/foil/counting.py#L5-L16 |
PolyJIT/benchbuild | benchbuild/reports/status.py | FullDump.generate | def generate(self):
"""
Fetch all rows associated with this experiment.
This will generate a huge .csv.
"""
exp_name = self.exp_name()
fname = os.path.basename(self.out_path)
fname = "{exp}_{prefix}_{name}{ending}".format(
exp=exp_name,
prefix=os.path.splitext(fname)[0],
ending=os.path.splitext(fname)[-1],
name="full")
first = True
for chunk in self.report():
print("Writing chunk to :'{0}'".format(fname))
chunk.to_csv(fname, header=first, mode='a')
first = False | python | def generate(self):
"""
Fetch all rows associated with this experiment.
This will generate a huge .csv.
"""
exp_name = self.exp_name()
fname = os.path.basename(self.out_path)
fname = "{exp}_{prefix}_{name}{ending}".format(
exp=exp_name,
prefix=os.path.splitext(fname)[0],
ending=os.path.splitext(fname)[-1],
name="full")
first = True
for chunk in self.report():
print("Writing chunk to :'{0}'".format(fname))
chunk.to_csv(fname, header=first, mode='a')
first = False | [
"def",
"generate",
"(",
"self",
")",
":",
"exp_name",
"=",
"self",
".",
"exp_name",
"(",
")",
"fname",
"=",
"os",
".",
"path",
".",
"basename",
"(",
"self",
".",
"out_path",
")",
"fname",
"=",
"\"{exp}_{prefix}_{name}{ending}\"",
".",
"format",
"(",
"exp",
"=",
"exp_name",
",",
"prefix",
"=",
"os",
".",
"path",
".",
"splitext",
"(",
"fname",
")",
"[",
"0",
"]",
",",
"ending",
"=",
"os",
".",
"path",
".",
"splitext",
"(",
"fname",
")",
"[",
"-",
"1",
"]",
",",
"name",
"=",
"\"full\"",
")",
"first",
"=",
"True",
"for",
"chunk",
"in",
"self",
".",
"report",
"(",
")",
":",
"print",
"(",
"\"Writing chunk to :'{0}'\"",
".",
"format",
"(",
"fname",
")",
")",
"chunk",
".",
"to_csv",
"(",
"fname",
",",
"header",
"=",
"first",
",",
"mode",
"=",
"'a'",
")",
"first",
"=",
"False"
] | Fetch all rows associated with this experiment.
This will generate a huge .csv. | [
"Fetch",
"all",
"rows",
"associated",
"with",
"this",
"experiment",
"."
] | train | https://github.com/PolyJIT/benchbuild/blob/9ad2ec54d96e97b642b1f06eddcbad9ba7aeaf58/benchbuild/reports/status.py#L82-L100 |
Metatab/metatab | metatab/appurl.py | MetatabUrl.doc | def doc(self):
"""Return the metatab document for the URL"""
from metatab import MetatabDoc
t = self.get_resource().get_target()
return MetatabDoc(t.inner) | python | def doc(self):
"""Return the metatab document for the URL"""
from metatab import MetatabDoc
t = self.get_resource().get_target()
return MetatabDoc(t.inner) | [
"def",
"doc",
"(",
"self",
")",
":",
"from",
"metatab",
"import",
"MetatabDoc",
"t",
"=",
"self",
".",
"get_resource",
"(",
")",
".",
"get_target",
"(",
")",
"return",
"MetatabDoc",
"(",
"t",
".",
"inner",
")"
] | Return the metatab document for the URL | [
"Return",
"the",
"metatab",
"document",
"for",
"the",
"URL"
] | train | https://github.com/Metatab/metatab/blob/8336ec3e4bd8da84a9a5cb86de1c1086e14b8b22/metatab/appurl.py#L92-L96 |
BlueBrain/hpcbench | hpcbench/cli/bendoc.py | main | def main(argv=None):
"""ben-doc entry point"""
arguments = cli_common(__doc__, argv=argv)
campaign_path = arguments['CAMPAIGN-DIR']
driver = CampaignDriver(campaign_path, expandcampvars=False)
with pushd(campaign_path):
render(
template=arguments['--template'],
ostr=arguments['--output'],
campaign=driver,
)
if argv is not None:
return driver | python | def main(argv=None):
"""ben-doc entry point"""
arguments = cli_common(__doc__, argv=argv)
campaign_path = arguments['CAMPAIGN-DIR']
driver = CampaignDriver(campaign_path, expandcampvars=False)
with pushd(campaign_path):
render(
template=arguments['--template'],
ostr=arguments['--output'],
campaign=driver,
)
if argv is not None:
return driver | [
"def",
"main",
"(",
"argv",
"=",
"None",
")",
":",
"arguments",
"=",
"cli_common",
"(",
"__doc__",
",",
"argv",
"=",
"argv",
")",
"campaign_path",
"=",
"arguments",
"[",
"'CAMPAIGN-DIR'",
"]",
"driver",
"=",
"CampaignDriver",
"(",
"campaign_path",
",",
"expandcampvars",
"=",
"False",
")",
"with",
"pushd",
"(",
"campaign_path",
")",
":",
"render",
"(",
"template",
"=",
"arguments",
"[",
"'--template'",
"]",
",",
"ostr",
"=",
"arguments",
"[",
"'--output'",
"]",
",",
"campaign",
"=",
"driver",
",",
")",
"if",
"argv",
"is",
"not",
"None",
":",
"return",
"driver"
] | ben-doc entry point | [
"ben",
"-",
"doc",
"entry",
"point"
] | train | https://github.com/BlueBrain/hpcbench/blob/192d0ec142b897157ec25f131d1ef28f84752592/hpcbench/cli/bendoc.py#L24-L36 |
chrisjsewell/jsonextended | jsonextended/utils.py | class_to_str | def class_to_str(obj):
""" get class string from object
Examples
--------
>>> class_to_str(list).split('.')[1]
'list'
"""
mod_str = obj.__module__
name_str = obj.__name__
if mod_str == '__main__':
return name_str
else:
return '.'.join([mod_str, name_str]) | python | def class_to_str(obj):
""" get class string from object
Examples
--------
>>> class_to_str(list).split('.')[1]
'list'
"""
mod_str = obj.__module__
name_str = obj.__name__
if mod_str == '__main__':
return name_str
else:
return '.'.join([mod_str, name_str]) | [
"def",
"class_to_str",
"(",
"obj",
")",
":",
"mod_str",
"=",
"obj",
".",
"__module__",
"name_str",
"=",
"obj",
".",
"__name__",
"if",
"mod_str",
"==",
"'__main__'",
":",
"return",
"name_str",
"else",
":",
"return",
"'.'",
".",
"join",
"(",
"[",
"mod_str",
",",
"name_str",
"]",
")"
] | get class string from object
Examples
--------
>>> class_to_str(list).split('.')[1]
'list' | [
"get",
"class",
"string",
"from",
"object"
] | train | https://github.com/chrisjsewell/jsonextended/blob/c3a7a880cc09789b3c61204265dcbb127be76c8a/jsonextended/utils.py#L22-L37 |
chrisjsewell/jsonextended | jsonextended/utils.py | get_module_path | def get_module_path(module):
"""return a directory path to a module"""
return pathlib.Path(
os.path.dirname(os.path.abspath(inspect.getfile(module)))) | python | def get_module_path(module):
"""return a directory path to a module"""
return pathlib.Path(
os.path.dirname(os.path.abspath(inspect.getfile(module)))) | [
"def",
"get_module_path",
"(",
"module",
")",
":",
"return",
"pathlib",
".",
"Path",
"(",
"os",
".",
"path",
".",
"dirname",
"(",
"os",
".",
"path",
".",
"abspath",
"(",
"inspect",
".",
"getfile",
"(",
"module",
")",
")",
")",
")"
] | return a directory path to a module | [
"return",
"a",
"directory",
"path",
"to",
"a",
"module"
] | train | https://github.com/chrisjsewell/jsonextended/blob/c3a7a880cc09789b3c61204265dcbb127be76c8a/jsonextended/utils.py#L40-L43 |
chrisjsewell/jsonextended | jsonextended/utils.py | get_data_path | def get_data_path(data, module, check_exists=True):
"""return a directory path to data within a module
Parameters
----------
data : str or list[str]
file name or list of sub-directories
and file name (e.g. ['lammps','data.txt'])
"""
basepath = os.path.dirname(os.path.abspath(inspect.getfile(module)))
if isinstance(data, basestring):
data = [data]
dirpath = os.path.join(basepath, *data)
if check_exists:
assert os.path.exists(dirpath), '{0} does not exist'.format(dirpath)
return pathlib.Path(dirpath) | python | def get_data_path(data, module, check_exists=True):
"""return a directory path to data within a module
Parameters
----------
data : str or list[str]
file name or list of sub-directories
and file name (e.g. ['lammps','data.txt'])
"""
basepath = os.path.dirname(os.path.abspath(inspect.getfile(module)))
if isinstance(data, basestring):
data = [data]
dirpath = os.path.join(basepath, *data)
if check_exists:
assert os.path.exists(dirpath), '{0} does not exist'.format(dirpath)
return pathlib.Path(dirpath) | [
"def",
"get_data_path",
"(",
"data",
",",
"module",
",",
"check_exists",
"=",
"True",
")",
":",
"basepath",
"=",
"os",
".",
"path",
".",
"dirname",
"(",
"os",
".",
"path",
".",
"abspath",
"(",
"inspect",
".",
"getfile",
"(",
"module",
")",
")",
")",
"if",
"isinstance",
"(",
"data",
",",
"basestring",
")",
":",
"data",
"=",
"[",
"data",
"]",
"dirpath",
"=",
"os",
".",
"path",
".",
"join",
"(",
"basepath",
",",
"*",
"data",
")",
"if",
"check_exists",
":",
"assert",
"os",
".",
"path",
".",
"exists",
"(",
"dirpath",
")",
",",
"'{0} does not exist'",
".",
"format",
"(",
"dirpath",
")",
"return",
"pathlib",
".",
"Path",
"(",
"dirpath",
")"
] | return a directory path to data within a module
Parameters
----------
data : str or list[str]
file name or list of sub-directories
and file name (e.g. ['lammps','data.txt']) | [
"return",
"a",
"directory",
"path",
"to",
"data",
"within",
"a",
"module"
] | train | https://github.com/chrisjsewell/jsonextended/blob/c3a7a880cc09789b3c61204265dcbb127be76c8a/jsonextended/utils.py#L59-L79 |
chrisjsewell/jsonextended | jsonextended/utils.py | memory_usage | def memory_usage():
"""return memory usage of python process in MB
from
http://fa.bianp.net/blog/2013/different-ways-to-get-memory-consumption-or-lessons-learned-from-memory_profiler/
psutil is quicker
>>> isinstance(memory_usage(),float)
True
"""
try:
import psutil
import os
except ImportError:
return _memory_usage_ps()
process = psutil.Process(os.getpid())
mem = process.memory_info()[0] / float(2 ** 20)
return mem | python | def memory_usage():
"""return memory usage of python process in MB
from
http://fa.bianp.net/blog/2013/different-ways-to-get-memory-consumption-or-lessons-learned-from-memory_profiler/
psutil is quicker
>>> isinstance(memory_usage(),float)
True
"""
try:
import psutil
import os
except ImportError:
return _memory_usage_ps()
process = psutil.Process(os.getpid())
mem = process.memory_info()[0] / float(2 ** 20)
return mem | [
"def",
"memory_usage",
"(",
")",
":",
"try",
":",
"import",
"psutil",
"import",
"os",
"except",
"ImportError",
":",
"return",
"_memory_usage_ps",
"(",
")",
"process",
"=",
"psutil",
".",
"Process",
"(",
"os",
".",
"getpid",
"(",
")",
")",
"mem",
"=",
"process",
".",
"memory_info",
"(",
")",
"[",
"0",
"]",
"/",
"float",
"(",
"2",
"**",
"20",
")",
"return",
"mem"
] | return memory usage of python process in MB
from
http://fa.bianp.net/blog/2013/different-ways-to-get-memory-consumption-or-lessons-learned-from-memory_profiler/
psutil is quicker
>>> isinstance(memory_usage(),float)
True | [
"return",
"memory",
"usage",
"of",
"python",
"process",
"in",
"MB"
] | train | https://github.com/chrisjsewell/jsonextended/blob/c3a7a880cc09789b3c61204265dcbb127be76c8a/jsonextended/utils.py#L110-L129 |
chrisjsewell/jsonextended | jsonextended/utils.py | _memory_usage_ps | def _memory_usage_ps():
"""return memory usage of python process in MB
>>> isinstance(_memory_usage_ps(),float)
True
"""
out = subprocess.Popen(
['ps', 'v', '-p', str(os.getpid())],
stdout=subprocess.PIPE).communicate()[0].split(b'\n')
vsz_index = out[0].split().index(b'RSS')
mem = float(out[1].split()[vsz_index]) / 1024
return mem | python | def _memory_usage_ps():
"""return memory usage of python process in MB
>>> isinstance(_memory_usage_ps(),float)
True
"""
out = subprocess.Popen(
['ps', 'v', '-p', str(os.getpid())],
stdout=subprocess.PIPE).communicate()[0].split(b'\n')
vsz_index = out[0].split().index(b'RSS')
mem = float(out[1].split()[vsz_index]) / 1024
return mem | [
"def",
"_memory_usage_ps",
"(",
")",
":",
"out",
"=",
"subprocess",
".",
"Popen",
"(",
"[",
"'ps'",
",",
"'v'",
",",
"'-p'",
",",
"str",
"(",
"os",
".",
"getpid",
"(",
")",
")",
"]",
",",
"stdout",
"=",
"subprocess",
".",
"PIPE",
")",
".",
"communicate",
"(",
")",
"[",
"0",
"]",
".",
"split",
"(",
"b'\\n'",
")",
"vsz_index",
"=",
"out",
"[",
"0",
"]",
".",
"split",
"(",
")",
".",
"index",
"(",
"b'RSS'",
")",
"mem",
"=",
"float",
"(",
"out",
"[",
"1",
"]",
".",
"split",
"(",
")",
"[",
"vsz_index",
"]",
")",
"/",
"1024",
"return",
"mem"
] | return memory usage of python process in MB
>>> isinstance(_memory_usage_ps(),float)
True | [
"return",
"memory",
"usage",
"of",
"python",
"process",
"in",
"MB"
] | train | https://github.com/chrisjsewell/jsonextended/blob/c3a7a880cc09789b3c61204265dcbb127be76c8a/jsonextended/utils.py#L132-L144 |
chrisjsewell/jsonextended | jsonextended/utils.py | load_memit | def load_memit():
"""load memory usage ipython magic,
require memory_profiler package to be installed
to get usage: %memit?
Author: Vlad Niculae <[email protected]>
Makes use of memory_profiler from Fabian Pedregosa
available at https://github.com/fabianp/memory_profiler
"""
from IPython.core.magic import Magics, line_magic, magics_class
from memory_profiler import memory_usage as _mu
try:
ip = get_ipython()
except NameError as err:
raise Exception('not in ipython/jupyter kernel:\n {}'.format(err))
@magics_class
class MemMagics(Magics):
@line_magic
def memit(self, line='', setup='pass'):
"""Measure memory usage of a Python statement
Usage, in line mode:
%memit [-ir<R>t<T>] statement
Options:
-r<R>: repeat the loop iteration <R> times
and take the best result. Default: 3
-i: run the code in the current environment, without forking a new
process.
This is required on some MacOS versions of Accelerate if your
line contains a call to `np.dot`.
-t<T>: timeout after <T> seconds. Unused if `-i` is active.
Default: None
Examples
--------
::
In [1]: import numpy as np
In [2]: %memit np.zeros(1e7)
maximum of 3: 76.402344 MB per loop
In [3]: %memit np.ones(1e6)
maximum of 3: 7.820312 MB per loop
In [4]: %memit -r 10 np.empty(1e8)
maximum of 10: 0.101562 MB per loop
In [5]: memit -t 3 while True: pass;
Subprocess timed out.
Subprocess timed out.
Subprocess timed out.
ERROR: all subprocesses exited unsuccessfully. Try again with the
`-i` option.
maximum of 3: -inf MB per loop
"""
opts, stmt = self.parse_options(line, 'r:t:i', posix=False,
strict=False)
repeat = int(getattr(opts, 'r', 3))
if repeat < 1:
repeat == 1
timeout = int(getattr(opts, 't', 0))
if timeout <= 0:
timeout = None
run_in_place = hasattr(opts, 'i')
# Don't depend on multiprocessing:
try:
import multiprocessing as pr
from multiprocessing.queues import SimpleQueue
q = SimpleQueue()
except ImportError:
class ListWithPut(list):
"""Just a list,
where the `append` method is aliased to `put`."""
def put(self, x):
self.append(x)
q = ListWithPut()
print(
'WARNING: cannot import module `multiprocessing`. Forcing '
'the `-i` option.')
run_in_place = True
ns = self.shell.user_ns
def _get_usage(q, stmt, setup='pass', ns={}):
try:
exec(setup) in ns
_mu0 = _mu()[0]
exec(stmt) in ns
_mu1 = _mu()[0]
q.put(_mu1 - _mu0)
except Exception as e:
q.put(float('-inf'))
raise e
if run_in_place:
for _ in range(repeat):
_get_usage(q, stmt, ns=ns)
else:
# run in consecutive subprocesses
at_least_one_worked = False
for _ in range(repeat):
p = pr.Process(
target=_get_usage, args=(q, stmt, 'pass', ns))
p.start()
p.join(timeout=timeout)
if p.exitcode == 0:
at_least_one_worked = True
else:
p.terminate()
if p.exitcode is None:
print('Subprocess timed out.')
else:
print(
'Subprocess exited with code %d.' % p.exitcode)
q.put(float('-inf'))
if not at_least_one_worked:
print('ERROR: all subprocesses exited unsuccessfully. Try '
'again with the `-i` option.')
usages = [q.get() for _ in range(repeat)]
usage = max(usages)
print("maximum of %d: %f MB per loop" % (repeat, usage))
ip.register_magics(MemMagics) | python | def load_memit():
"""load memory usage ipython magic,
require memory_profiler package to be installed
to get usage: %memit?
Author: Vlad Niculae <[email protected]>
Makes use of memory_profiler from Fabian Pedregosa
available at https://github.com/fabianp/memory_profiler
"""
from IPython.core.magic import Magics, line_magic, magics_class
from memory_profiler import memory_usage as _mu
try:
ip = get_ipython()
except NameError as err:
raise Exception('not in ipython/jupyter kernel:\n {}'.format(err))
@magics_class
class MemMagics(Magics):
@line_magic
def memit(self, line='', setup='pass'):
"""Measure memory usage of a Python statement
Usage, in line mode:
%memit [-ir<R>t<T>] statement
Options:
-r<R>: repeat the loop iteration <R> times
and take the best result. Default: 3
-i: run the code in the current environment, without forking a new
process.
This is required on some MacOS versions of Accelerate if your
line contains a call to `np.dot`.
-t<T>: timeout after <T> seconds. Unused if `-i` is active.
Default: None
Examples
--------
::
In [1]: import numpy as np
In [2]: %memit np.zeros(1e7)
maximum of 3: 76.402344 MB per loop
In [3]: %memit np.ones(1e6)
maximum of 3: 7.820312 MB per loop
In [4]: %memit -r 10 np.empty(1e8)
maximum of 10: 0.101562 MB per loop
In [5]: memit -t 3 while True: pass;
Subprocess timed out.
Subprocess timed out.
Subprocess timed out.
ERROR: all subprocesses exited unsuccessfully. Try again with the
`-i` option.
maximum of 3: -inf MB per loop
"""
opts, stmt = self.parse_options(line, 'r:t:i', posix=False,
strict=False)
repeat = int(getattr(opts, 'r', 3))
if repeat < 1:
repeat == 1
timeout = int(getattr(opts, 't', 0))
if timeout <= 0:
timeout = None
run_in_place = hasattr(opts, 'i')
# Don't depend on multiprocessing:
try:
import multiprocessing as pr
from multiprocessing.queues import SimpleQueue
q = SimpleQueue()
except ImportError:
class ListWithPut(list):
"""Just a list,
where the `append` method is aliased to `put`."""
def put(self, x):
self.append(x)
q = ListWithPut()
print(
'WARNING: cannot import module `multiprocessing`. Forcing '
'the `-i` option.')
run_in_place = True
ns = self.shell.user_ns
def _get_usage(q, stmt, setup='pass', ns={}):
try:
exec(setup) in ns
_mu0 = _mu()[0]
exec(stmt) in ns
_mu1 = _mu()[0]
q.put(_mu1 - _mu0)
except Exception as e:
q.put(float('-inf'))
raise e
if run_in_place:
for _ in range(repeat):
_get_usage(q, stmt, ns=ns)
else:
# run in consecutive subprocesses
at_least_one_worked = False
for _ in range(repeat):
p = pr.Process(
target=_get_usage, args=(q, stmt, 'pass', ns))
p.start()
p.join(timeout=timeout)
if p.exitcode == 0:
at_least_one_worked = True
else:
p.terminate()
if p.exitcode is None:
print('Subprocess timed out.')
else:
print(
'Subprocess exited with code %d.' % p.exitcode)
q.put(float('-inf'))
if not at_least_one_worked:
print('ERROR: all subprocesses exited unsuccessfully. Try '
'again with the `-i` option.')
usages = [q.get() for _ in range(repeat)]
usage = max(usages)
print("maximum of %d: %f MB per loop" % (repeat, usage))
ip.register_magics(MemMagics) | [
"def",
"load_memit",
"(",
")",
":",
"from",
"IPython",
".",
"core",
".",
"magic",
"import",
"Magics",
",",
"line_magic",
",",
"magics_class",
"from",
"memory_profiler",
"import",
"memory_usage",
"as",
"_mu",
"try",
":",
"ip",
"=",
"get_ipython",
"(",
")",
"except",
"NameError",
"as",
"err",
":",
"raise",
"Exception",
"(",
"'not in ipython/jupyter kernel:\\n {}'",
".",
"format",
"(",
"err",
")",
")",
"@",
"magics_class",
"class",
"MemMagics",
"(",
"Magics",
")",
":",
"@",
"line_magic",
"def",
"memit",
"(",
"self",
",",
"line",
"=",
"''",
",",
"setup",
"=",
"'pass'",
")",
":",
"\"\"\"Measure memory usage of a Python statement\n\n Usage, in line mode:\n %memit [-ir<R>t<T>] statement\n\n Options:\n -r<R>: repeat the loop iteration <R> times\n and take the best result. Default: 3\n\n -i: run the code in the current environment, without forking a new\n process.\n This is required on some MacOS versions of Accelerate if your\n line contains a call to `np.dot`.\n\n -t<T>: timeout after <T> seconds. Unused if `-i` is active.\n Default: None\n\n Examples\n --------\n ::\n\n In [1]: import numpy as np\n\n In [2]: %memit np.zeros(1e7)\n maximum of 3: 76.402344 MB per loop\n\n In [3]: %memit np.ones(1e6)\n maximum of 3: 7.820312 MB per loop\n\n In [4]: %memit -r 10 np.empty(1e8)\n maximum of 10: 0.101562 MB per loop\n\n In [5]: memit -t 3 while True: pass;\n Subprocess timed out.\n Subprocess timed out.\n Subprocess timed out.\n ERROR: all subprocesses exited unsuccessfully. Try again with the\n `-i` option.\n maximum of 3: -inf MB per loop\n\n \"\"\"",
"opts",
",",
"stmt",
"=",
"self",
".",
"parse_options",
"(",
"line",
",",
"'r:t:i'",
",",
"posix",
"=",
"False",
",",
"strict",
"=",
"False",
")",
"repeat",
"=",
"int",
"(",
"getattr",
"(",
"opts",
",",
"'r'",
",",
"3",
")",
")",
"if",
"repeat",
"<",
"1",
":",
"repeat",
"==",
"1",
"timeout",
"=",
"int",
"(",
"getattr",
"(",
"opts",
",",
"'t'",
",",
"0",
")",
")",
"if",
"timeout",
"<=",
"0",
":",
"timeout",
"=",
"None",
"run_in_place",
"=",
"hasattr",
"(",
"opts",
",",
"'i'",
")",
"# Don't depend on multiprocessing:",
"try",
":",
"import",
"multiprocessing",
"as",
"pr",
"from",
"multiprocessing",
".",
"queues",
"import",
"SimpleQueue",
"q",
"=",
"SimpleQueue",
"(",
")",
"except",
"ImportError",
":",
"class",
"ListWithPut",
"(",
"list",
")",
":",
"\"\"\"Just a list,\n where the `append` method is aliased to `put`.\"\"\"",
"def",
"put",
"(",
"self",
",",
"x",
")",
":",
"self",
".",
"append",
"(",
"x",
")",
"q",
"=",
"ListWithPut",
"(",
")",
"print",
"(",
"'WARNING: cannot import module `multiprocessing`. Forcing '",
"'the `-i` option.'",
")",
"run_in_place",
"=",
"True",
"ns",
"=",
"self",
".",
"shell",
".",
"user_ns",
"def",
"_get_usage",
"(",
"q",
",",
"stmt",
",",
"setup",
"=",
"'pass'",
",",
"ns",
"=",
"{",
"}",
")",
":",
"try",
":",
"exec",
"(",
"setup",
")",
"in",
"ns",
"_mu0",
"=",
"_mu",
"(",
")",
"[",
"0",
"]",
"exec",
"(",
"stmt",
")",
"in",
"ns",
"_mu1",
"=",
"_mu",
"(",
")",
"[",
"0",
"]",
"q",
".",
"put",
"(",
"_mu1",
"-",
"_mu0",
")",
"except",
"Exception",
"as",
"e",
":",
"q",
".",
"put",
"(",
"float",
"(",
"'-inf'",
")",
")",
"raise",
"e",
"if",
"run_in_place",
":",
"for",
"_",
"in",
"range",
"(",
"repeat",
")",
":",
"_get_usage",
"(",
"q",
",",
"stmt",
",",
"ns",
"=",
"ns",
")",
"else",
":",
"# run in consecutive subprocesses",
"at_least_one_worked",
"=",
"False",
"for",
"_",
"in",
"range",
"(",
"repeat",
")",
":",
"p",
"=",
"pr",
".",
"Process",
"(",
"target",
"=",
"_get_usage",
",",
"args",
"=",
"(",
"q",
",",
"stmt",
",",
"'pass'",
",",
"ns",
")",
")",
"p",
".",
"start",
"(",
")",
"p",
".",
"join",
"(",
"timeout",
"=",
"timeout",
")",
"if",
"p",
".",
"exitcode",
"==",
"0",
":",
"at_least_one_worked",
"=",
"True",
"else",
":",
"p",
".",
"terminate",
"(",
")",
"if",
"p",
".",
"exitcode",
"is",
"None",
":",
"print",
"(",
"'Subprocess timed out.'",
")",
"else",
":",
"print",
"(",
"'Subprocess exited with code %d.'",
"%",
"p",
".",
"exitcode",
")",
"q",
".",
"put",
"(",
"float",
"(",
"'-inf'",
")",
")",
"if",
"not",
"at_least_one_worked",
":",
"print",
"(",
"'ERROR: all subprocesses exited unsuccessfully. Try '",
"'again with the `-i` option.'",
")",
"usages",
"=",
"[",
"q",
".",
"get",
"(",
")",
"for",
"_",
"in",
"range",
"(",
"repeat",
")",
"]",
"usage",
"=",
"max",
"(",
"usages",
")",
"print",
"(",
"\"maximum of %d: %f MB per loop\"",
"%",
"(",
"repeat",
",",
"usage",
")",
")",
"ip",
".",
"register_magics",
"(",
"MemMagics",
")"
] | load memory usage ipython magic,
require memory_profiler package to be installed
to get usage: %memit?
Author: Vlad Niculae <[email protected]>
Makes use of memory_profiler from Fabian Pedregosa
available at https://github.com/fabianp/memory_profiler | [
"load",
"memory",
"usage",
"ipython",
"magic",
"require",
"memory_profiler",
"package",
"to",
"be",
"installed"
] | train | https://github.com/chrisjsewell/jsonextended/blob/c3a7a880cc09789b3c61204265dcbb127be76c8a/jsonextended/utils.py#L147-L282 |
jfear/sramongo | sramongo/xml_helpers.py | parse_tree_from_dict | def parse_tree_from_dict(node, locs):
"""Processes key locations.
Parameters
----------
node: xml.etree.ElementTree.ElementTree.element
Current node.
locs: dict
A dictionary mapping key to a tuple. The tuple can either be 2 or 3
elements long. The first element maps to the location in the
current node. The second element given a processing hint. Possible
values are:
* 'text': assumes the text element of the path is wanted.
* 'child': assumes that the child of the given path is wanted.
* str: Any other string will be treated as an attribute lookup
of the path.
If 'child' is given, then a third element needs to be given
indicating the type of processing. Possible values are:
* 'text': assumes the text element of the path is wanted.
* 'tag': assumes the class tag of the path is wanted.
* str: Any other string will be treated as an attribute lookup
of the path.
"""
d = dict()
for n, l in locs.items():
try:
if l[1] == 'text':
d[n] = node.find(l[0]).text
elif l[1] == 'child':
child = node.find(l[0]).getchildren()
if len(child) > 1:
raise AmbiguousElementException(
'There are too many elements')
elif l[2] == 'text':
d[n] = child[0].text
elif l[2] == 'tag':
d[n] = child[0].tag
else:
d[n] = node.find(l[0]).get(l[1])
except:
pass
return d | python | def parse_tree_from_dict(node, locs):
"""Processes key locations.
Parameters
----------
node: xml.etree.ElementTree.ElementTree.element
Current node.
locs: dict
A dictionary mapping key to a tuple. The tuple can either be 2 or 3
elements long. The first element maps to the location in the
current node. The second element given a processing hint. Possible
values are:
* 'text': assumes the text element of the path is wanted.
* 'child': assumes that the child of the given path is wanted.
* str: Any other string will be treated as an attribute lookup
of the path.
If 'child' is given, then a third element needs to be given
indicating the type of processing. Possible values are:
* 'text': assumes the text element of the path is wanted.
* 'tag': assumes the class tag of the path is wanted.
* str: Any other string will be treated as an attribute lookup
of the path.
"""
d = dict()
for n, l in locs.items():
try:
if l[1] == 'text':
d[n] = node.find(l[0]).text
elif l[1] == 'child':
child = node.find(l[0]).getchildren()
if len(child) > 1:
raise AmbiguousElementException(
'There are too many elements')
elif l[2] == 'text':
d[n] = child[0].text
elif l[2] == 'tag':
d[n] = child[0].tag
else:
d[n] = node.find(l[0]).get(l[1])
except:
pass
return d | [
"def",
"parse_tree_from_dict",
"(",
"node",
",",
"locs",
")",
":",
"d",
"=",
"dict",
"(",
")",
"for",
"n",
",",
"l",
"in",
"locs",
".",
"items",
"(",
")",
":",
"try",
":",
"if",
"l",
"[",
"1",
"]",
"==",
"'text'",
":",
"d",
"[",
"n",
"]",
"=",
"node",
".",
"find",
"(",
"l",
"[",
"0",
"]",
")",
".",
"text",
"elif",
"l",
"[",
"1",
"]",
"==",
"'child'",
":",
"child",
"=",
"node",
".",
"find",
"(",
"l",
"[",
"0",
"]",
")",
".",
"getchildren",
"(",
")",
"if",
"len",
"(",
"child",
")",
">",
"1",
":",
"raise",
"AmbiguousElementException",
"(",
"'There are too many elements'",
")",
"elif",
"l",
"[",
"2",
"]",
"==",
"'text'",
":",
"d",
"[",
"n",
"]",
"=",
"child",
"[",
"0",
"]",
".",
"text",
"elif",
"l",
"[",
"2",
"]",
"==",
"'tag'",
":",
"d",
"[",
"n",
"]",
"=",
"child",
"[",
"0",
"]",
".",
"tag",
"else",
":",
"d",
"[",
"n",
"]",
"=",
"node",
".",
"find",
"(",
"l",
"[",
"0",
"]",
")",
".",
"get",
"(",
"l",
"[",
"1",
"]",
")",
"except",
":",
"pass",
"return",
"d"
] | Processes key locations.
Parameters
----------
node: xml.etree.ElementTree.ElementTree.element
Current node.
locs: dict
A dictionary mapping key to a tuple. The tuple can either be 2 or 3
elements long. The first element maps to the location in the
current node. The second element given a processing hint. Possible
values are:
* 'text': assumes the text element of the path is wanted.
* 'child': assumes that the child of the given path is wanted.
* str: Any other string will be treated as an attribute lookup
of the path.
If 'child' is given, then a third element needs to be given
indicating the type of processing. Possible values are:
* 'text': assumes the text element of the path is wanted.
* 'tag': assumes the class tag of the path is wanted.
* str: Any other string will be treated as an attribute lookup
of the path. | [
"Processes",
"key",
"locations",
"."
] | train | https://github.com/jfear/sramongo/blob/82a9a157e44bda4100be385c644b3ac21be66038/sramongo/xml_helpers.py#L26-L72 |
jfear/sramongo | sramongo/xml_helpers.py | xml_to_root | def xml_to_root(xml: Union[str, IO]) -> ElementTree.Element:
"""Parse XML into an ElemeTree object.
Parameters
----------
xml : str or file-like object
A filename, file object or string version of xml can be passed.
Returns
-------
Elementree.Element
"""
if isinstance(xml, str):
if '<' in xml:
return ElementTree.fromstring(xml)
else:
with open(xml) as fh:
xml_to_root(fh)
tree = ElementTree.parse(xml)
return tree.getroot() | python | def xml_to_root(xml: Union[str, IO]) -> ElementTree.Element:
"""Parse XML into an ElemeTree object.
Parameters
----------
xml : str or file-like object
A filename, file object or string version of xml can be passed.
Returns
-------
Elementree.Element
"""
if isinstance(xml, str):
if '<' in xml:
return ElementTree.fromstring(xml)
else:
with open(xml) as fh:
xml_to_root(fh)
tree = ElementTree.parse(xml)
return tree.getroot() | [
"def",
"xml_to_root",
"(",
"xml",
":",
"Union",
"[",
"str",
",",
"IO",
"]",
")",
"->",
"ElementTree",
".",
"Element",
":",
"if",
"isinstance",
"(",
"xml",
",",
"str",
")",
":",
"if",
"'<'",
"in",
"xml",
":",
"return",
"ElementTree",
".",
"fromstring",
"(",
"xml",
")",
"else",
":",
"with",
"open",
"(",
"xml",
")",
"as",
"fh",
":",
"xml_to_root",
"(",
"fh",
")",
"tree",
"=",
"ElementTree",
".",
"parse",
"(",
"xml",
")",
"return",
"tree",
".",
"getroot",
"(",
")"
] | Parse XML into an ElemeTree object.
Parameters
----------
xml : str or file-like object
A filename, file object or string version of xml can be passed.
Returns
-------
Elementree.Element | [
"Parse",
"XML",
"into",
"an",
"ElemeTree",
"object",
"."
] | train | https://github.com/jfear/sramongo/blob/82a9a157e44bda4100be385c644b3ac21be66038/sramongo/xml_helpers.py#L75-L95 |
elkiwy/paynter | paynter/image.py | Image.newLayer | def newLayer(self, effect=''):
"""
Creates a new :py:class:`Layer` and set that as the active.
:param effect: A string with the blend mode for that layer that will be used when during the rendering process. The accepted values are: :code:`'soft_light','lighten','screen','dodge','addition','darken','multiply','hard_light','difference','subtract','grain_extract','grain_merge','divide','overlay'`.
:rtype: Nothing.
"""
self.layers.append(Layer(effect = effect))
self.activeLayer = len(self.layers)-1 | python | def newLayer(self, effect=''):
"""
Creates a new :py:class:`Layer` and set that as the active.
:param effect: A string with the blend mode for that layer that will be used when during the rendering process. The accepted values are: :code:`'soft_light','lighten','screen','dodge','addition','darken','multiply','hard_light','difference','subtract','grain_extract','grain_merge','divide','overlay'`.
:rtype: Nothing.
"""
self.layers.append(Layer(effect = effect))
self.activeLayer = len(self.layers)-1 | [
"def",
"newLayer",
"(",
"self",
",",
"effect",
"=",
"''",
")",
":",
"self",
".",
"layers",
".",
"append",
"(",
"Layer",
"(",
"effect",
"=",
"effect",
")",
")",
"self",
".",
"activeLayer",
"=",
"len",
"(",
"self",
".",
"layers",
")",
"-",
"1"
] | Creates a new :py:class:`Layer` and set that as the active.
:param effect: A string with the blend mode for that layer that will be used when during the rendering process. The accepted values are: :code:`'soft_light','lighten','screen','dodge','addition','darken','multiply','hard_light','difference','subtract','grain_extract','grain_merge','divide','overlay'`.
:rtype: Nothing. | [
"Creates",
"a",
"new",
":",
"py",
":",
"class",
":",
"Layer",
"and",
"set",
"that",
"as",
"the",
"active",
".",
":",
"param",
"effect",
":",
"A",
"string",
"with",
"the",
"blend",
"mode",
"for",
"that",
"layer",
"that",
"will",
"be",
"used",
"when",
"during",
"the",
"rendering",
"process",
".",
"The",
"accepted",
"values",
"are",
":",
":",
"code",
":",
"soft_light",
"lighten",
"screen",
"dodge",
"addition",
"darken",
"multiply",
"hard_light",
"difference",
"subtract",
"grain_extract",
"grain_merge",
"divide",
"overlay",
".",
":",
"rtype",
":",
"Nothing",
"."
] | train | https://github.com/elkiwy/paynter/blob/f73cb5bb010a6b32ee41640a50396ed0bae8d496/paynter/image.py#L54-L62 |
elkiwy/paynter | paynter/image.py | Image.duplicateActiveLayer | def duplicateActiveLayer(self):
"""
Duplicates the current active :py:class:`Layer`.
:rtype: Nothing.
"""
activeLayer = self.layers[self.activeLayer]
newLayer = Layer(data=activeLayer.data, effect=activeLayer.effect)
self.layers.append(newLayer)
self.activeLayer = len(self.layers)-1 | python | def duplicateActiveLayer(self):
"""
Duplicates the current active :py:class:`Layer`.
:rtype: Nothing.
"""
activeLayer = self.layers[self.activeLayer]
newLayer = Layer(data=activeLayer.data, effect=activeLayer.effect)
self.layers.append(newLayer)
self.activeLayer = len(self.layers)-1 | [
"def",
"duplicateActiveLayer",
"(",
"self",
")",
":",
"activeLayer",
"=",
"self",
".",
"layers",
"[",
"self",
".",
"activeLayer",
"]",
"newLayer",
"=",
"Layer",
"(",
"data",
"=",
"activeLayer",
".",
"data",
",",
"effect",
"=",
"activeLayer",
".",
"effect",
")",
"self",
".",
"layers",
".",
"append",
"(",
"newLayer",
")",
"self",
".",
"activeLayer",
"=",
"len",
"(",
"self",
".",
"layers",
")",
"-",
"1"
] | Duplicates the current active :py:class:`Layer`.
:rtype: Nothing. | [
"Duplicates",
"the",
"current",
"active",
":",
"py",
":",
"class",
":",
"Layer",
".",
":",
"rtype",
":",
"Nothing",
"."
] | train | https://github.com/elkiwy/paynter/blob/f73cb5bb010a6b32ee41640a50396ed0bae8d496/paynter/image.py#L65-L74 |
elkiwy/paynter | paynter/image.py | Image.mergeAllLayers | def mergeAllLayers(self):
"""
Merge all the layers together.
:rtype: The result :py:class:`Layer` object.
"""
start = time.time()
while(len(self.layers)>1):
self.mergeBottomLayers()
print('merge time:'+str(time.time()-start))
return self.layers[0] | python | def mergeAllLayers(self):
"""
Merge all the layers together.
:rtype: The result :py:class:`Layer` object.
"""
start = time.time()
while(len(self.layers)>1):
self.mergeBottomLayers()
print('merge time:'+str(time.time()-start))
return self.layers[0] | [
"def",
"mergeAllLayers",
"(",
"self",
")",
":",
"start",
"=",
"time",
".",
"time",
"(",
")",
"while",
"(",
"len",
"(",
"self",
".",
"layers",
")",
">",
"1",
")",
":",
"self",
".",
"mergeBottomLayers",
"(",
")",
"print",
"(",
"'merge time:'",
"+",
"str",
"(",
"time",
".",
"time",
"(",
")",
"-",
"start",
")",
")",
"return",
"self",
".",
"layers",
"[",
"0",
"]"
] | Merge all the layers together.
:rtype: The result :py:class:`Layer` object. | [
"Merge",
"all",
"the",
"layers",
"together",
"."
] | train | https://github.com/elkiwy/paynter/blob/f73cb5bb010a6b32ee41640a50396ed0bae8d496/paynter/image.py#L77-L87 |
Metatab/metatab | metatab/parser.py | TermParser.synonyms | def synonyms(self):
"""Return a dict of term synonyms"""
syns = {}
for k, v in self._declared_terms.items():
k = k.strip()
if v.get('synonym'):
syns[k.lower()] = v['synonym']
if not '.' in k:
syns[ROOT_TERM + '.' + k.lower()] = v['synonym']
return syns | python | def synonyms(self):
"""Return a dict of term synonyms"""
syns = {}
for k, v in self._declared_terms.items():
k = k.strip()
if v.get('synonym'):
syns[k.lower()] = v['synonym']
if not '.' in k:
syns[ROOT_TERM + '.' + k.lower()] = v['synonym']
return syns | [
"def",
"synonyms",
"(",
"self",
")",
":",
"syns",
"=",
"{",
"}",
"for",
"k",
",",
"v",
"in",
"self",
".",
"_declared_terms",
".",
"items",
"(",
")",
":",
"k",
"=",
"k",
".",
"strip",
"(",
")",
"if",
"v",
".",
"get",
"(",
"'synonym'",
")",
":",
"syns",
"[",
"k",
".",
"lower",
"(",
")",
"]",
"=",
"v",
"[",
"'synonym'",
"]",
"if",
"not",
"'.'",
"in",
"k",
":",
"syns",
"[",
"ROOT_TERM",
"+",
"'.'",
"+",
"k",
".",
"lower",
"(",
")",
"]",
"=",
"v",
"[",
"'synonym'",
"]",
"return",
"syns"
] | Return a dict of term synonyms | [
"Return",
"a",
"dict",
"of",
"term",
"synonyms"
] | train | https://github.com/Metatab/metatab/blob/8336ec3e4bd8da84a9a5cb86de1c1086e14b8b22/metatab/parser.py#L107-L119 |
Metatab/metatab | metatab/parser.py | TermParser.super_terms | def super_terms(self):
"""Return a dictionary mapping term names to their super terms"""
# If the doc already has super terms, we've already parsed something, so
# assume we parsed the declaration, and can use re-use the old decls.
if self.doc and self.doc.super_terms:
return self.doc.super_terms
return {k.lower(): v['inheritsfrom'].lower()
for k, v in self._declared_terms.items() if 'inheritsfrom' in v} | python | def super_terms(self):
"""Return a dictionary mapping term names to their super terms"""
# If the doc already has super terms, we've already parsed something, so
# assume we parsed the declaration, and can use re-use the old decls.
if self.doc and self.doc.super_terms:
return self.doc.super_terms
return {k.lower(): v['inheritsfrom'].lower()
for k, v in self._declared_terms.items() if 'inheritsfrom' in v} | [
"def",
"super_terms",
"(",
"self",
")",
":",
"# If the doc already has super terms, we've already parsed something, so",
"# assume we parsed the declaration, and can use re-use the old decls.",
"if",
"self",
".",
"doc",
"and",
"self",
".",
"doc",
".",
"super_terms",
":",
"return",
"self",
".",
"doc",
".",
"super_terms",
"return",
"{",
"k",
".",
"lower",
"(",
")",
":",
"v",
"[",
"'inheritsfrom'",
"]",
".",
"lower",
"(",
")",
"for",
"k",
",",
"v",
"in",
"self",
".",
"_declared_terms",
".",
"items",
"(",
")",
"if",
"'inheritsfrom'",
"in",
"v",
"}"
] | Return a dictionary mapping term names to their super terms | [
"Return",
"a",
"dictionary",
"mapping",
"term",
"names",
"to",
"their",
"super",
"terms"
] | train | https://github.com/Metatab/metatab/blob/8336ec3e4bd8da84a9a5cb86de1c1086e14b8b22/metatab/parser.py#L123-L133 |
Metatab/metatab | metatab/parser.py | TermParser.declare_dict | def declare_dict(self):
"""Return declared sections, terms and synonyms as a dict"""
# Run the parser, if it has not been run yet.
if not self.root:
for _ in self: pass
return {
'sections': self._declared_sections,
'terms': self._declared_terms,
'synonyms': self.synonyms
} | python | def declare_dict(self):
"""Return declared sections, terms and synonyms as a dict"""
# Run the parser, if it has not been run yet.
if not self.root:
for _ in self: pass
return {
'sections': self._declared_sections,
'terms': self._declared_terms,
'synonyms': self.synonyms
} | [
"def",
"declare_dict",
"(",
"self",
")",
":",
"# Run the parser, if it has not been run yet.",
"if",
"not",
"self",
".",
"root",
":",
"for",
"_",
"in",
"self",
":",
"pass",
"return",
"{",
"'sections'",
":",
"self",
".",
"_declared_sections",
",",
"'terms'",
":",
"self",
".",
"_declared_terms",
",",
"'synonyms'",
":",
"self",
".",
"synonyms",
"}"
] | Return declared sections, terms and synonyms as a dict | [
"Return",
"declared",
"sections",
"terms",
"and",
"synonyms",
"as",
"a",
"dict"
] | train | https://github.com/Metatab/metatab/blob/8336ec3e4bd8da84a9a5cb86de1c1086e14b8b22/metatab/parser.py#L136-L146 |
Metatab/metatab | metatab/parser.py | TermParser.substitute_synonym | def substitute_synonym(self, nt):
"""
Replace the record_term and parent_term with a synonym
:param nt:
:return:
"""
if nt.join_lc in self.synonyms:
nt.parent_term, nt.record_term = Term.split_term_lower(self.synonyms[nt.join_lc]); | python | def substitute_synonym(self, nt):
"""
Replace the record_term and parent_term with a synonym
:param nt:
:return:
"""
if nt.join_lc in self.synonyms:
nt.parent_term, nt.record_term = Term.split_term_lower(self.synonyms[nt.join_lc]); | [
"def",
"substitute_synonym",
"(",
"self",
",",
"nt",
")",
":",
"if",
"nt",
".",
"join_lc",
"in",
"self",
".",
"synonyms",
":",
"nt",
".",
"parent_term",
",",
"nt",
".",
"record_term",
"=",
"Term",
".",
"split_term_lower",
"(",
"self",
".",
"synonyms",
"[",
"nt",
".",
"join_lc",
"]",
")"
] | Replace the record_term and parent_term with a synonym
:param nt:
:return: | [
"Replace",
"the",
"record_term",
"and",
"parent_term",
"with",
"a",
"synonym",
":",
"param",
"nt",
":",
":",
"return",
":"
] | train | https://github.com/Metatab/metatab/blob/8336ec3e4bd8da84a9a5cb86de1c1086e14b8b22/metatab/parser.py#L167-L175 |
Metatab/metatab | metatab/parser.py | TermParser.errors_as_dict | def errors_as_dict(self):
"""Return parse errors as a dict"""
errors = []
for e in self.errors:
errors.append({
'file': e.term.file_name,
'row': e.term.row if e.term else '<unknown>',
'col': e.term.col if e.term else '<unknown>',
'term': e.term.join if e.term else '<unknown>',
'error': str(e)
})
return errors | python | def errors_as_dict(self):
"""Return parse errors as a dict"""
errors = []
for e in self.errors:
errors.append({
'file': e.term.file_name,
'row': e.term.row if e.term else '<unknown>',
'col': e.term.col if e.term else '<unknown>',
'term': e.term.join if e.term else '<unknown>',
'error': str(e)
})
return errors | [
"def",
"errors_as_dict",
"(",
"self",
")",
":",
"errors",
"=",
"[",
"]",
"for",
"e",
"in",
"self",
".",
"errors",
":",
"errors",
".",
"append",
"(",
"{",
"'file'",
":",
"e",
".",
"term",
".",
"file_name",
",",
"'row'",
":",
"e",
".",
"term",
".",
"row",
"if",
"e",
".",
"term",
"else",
"'<unknown>'",
",",
"'col'",
":",
"e",
".",
"term",
".",
"col",
"if",
"e",
".",
"term",
"else",
"'<unknown>'",
",",
"'term'",
":",
"e",
".",
"term",
".",
"join",
"if",
"e",
".",
"term",
"else",
"'<unknown>'",
",",
"'error'",
":",
"str",
"(",
"e",
")",
"}",
")",
"return",
"errors"
] | Return parse errors as a dict | [
"Return",
"parse",
"errors",
"as",
"a",
"dict"
] | train | https://github.com/Metatab/metatab/blob/8336ec3e4bd8da84a9a5cb86de1c1086e14b8b22/metatab/parser.py#L220-L234 |
Metatab/metatab | metatab/parser.py | TermParser.find_declare_doc | def find_declare_doc(self, d, name):
"""Given a name, try to resolve the name to a path or URL to
a declaration document. It will try:
* The name as a filesystem path
* The name as a file name in the directory d
* The name + '.csv' as a name in the directory d
* The name as a URL
* The name as a key in the standard_declares dict
* The name as a path in this module's metatab.declarations package
"""
path = None
while True:
if exists(name):
path =name
break
try:
# Look for a local document
path = declaration_path(name)
break
except IncludeError:
pass
for fn in (join(d, name), join(d, name + '.csv')):
if exists(fn):
path = fn
break
if path:
break
if name.startswith('http'):
path = name.strip('/') # Look for the file on the web
break
elif exists(name):
path = name
break
else:
path = self.resolver.find_decl_doc(name)
break
raise IncludeError("No local declaration file for '{}'".format(name))
return parse_app_url(path) | python | def find_declare_doc(self, d, name):
"""Given a name, try to resolve the name to a path or URL to
a declaration document. It will try:
* The name as a filesystem path
* The name as a file name in the directory d
* The name + '.csv' as a name in the directory d
* The name as a URL
* The name as a key in the standard_declares dict
* The name as a path in this module's metatab.declarations package
"""
path = None
while True:
if exists(name):
path =name
break
try:
# Look for a local document
path = declaration_path(name)
break
except IncludeError:
pass
for fn in (join(d, name), join(d, name + '.csv')):
if exists(fn):
path = fn
break
if path:
break
if name.startswith('http'):
path = name.strip('/') # Look for the file on the web
break
elif exists(name):
path = name
break
else:
path = self.resolver.find_decl_doc(name)
break
raise IncludeError("No local declaration file for '{}'".format(name))
return parse_app_url(path) | [
"def",
"find_declare_doc",
"(",
"self",
",",
"d",
",",
"name",
")",
":",
"path",
"=",
"None",
"while",
"True",
":",
"if",
"exists",
"(",
"name",
")",
":",
"path",
"=",
"name",
"break",
"try",
":",
"# Look for a local document",
"path",
"=",
"declaration_path",
"(",
"name",
")",
"break",
"except",
"IncludeError",
":",
"pass",
"for",
"fn",
"in",
"(",
"join",
"(",
"d",
",",
"name",
")",
",",
"join",
"(",
"d",
",",
"name",
"+",
"'.csv'",
")",
")",
":",
"if",
"exists",
"(",
"fn",
")",
":",
"path",
"=",
"fn",
"break",
"if",
"path",
":",
"break",
"if",
"name",
".",
"startswith",
"(",
"'http'",
")",
":",
"path",
"=",
"name",
".",
"strip",
"(",
"'/'",
")",
"# Look for the file on the web",
"break",
"elif",
"exists",
"(",
"name",
")",
":",
"path",
"=",
"name",
"break",
"else",
":",
"path",
"=",
"self",
".",
"resolver",
".",
"find_decl_doc",
"(",
"name",
")",
"break",
"raise",
"IncludeError",
"(",
"\"No local declaration file for '{}'\"",
".",
"format",
"(",
"name",
")",
")",
"return",
"parse_app_url",
"(",
"path",
")"
] | Given a name, try to resolve the name to a path or URL to
a declaration document. It will try:
* The name as a filesystem path
* The name as a file name in the directory d
* The name + '.csv' as a name in the directory d
* The name as a URL
* The name as a key in the standard_declares dict
* The name as a path in this module's metatab.declarations package | [
"Given",
"a",
"name",
"try",
"to",
"resolve",
"the",
"name",
"to",
"a",
"path",
"or",
"URL",
"to",
"a",
"declaration",
"document",
".",
"It",
"will",
"try",
":"
] | train | https://github.com/Metatab/metatab/blob/8336ec3e4bd8da84a9a5cb86de1c1086e14b8b22/metatab/parser.py#L236-L281 |
Metatab/metatab | metatab/parser.py | TermParser.find_include_doc | def find_include_doc(self, d, name):
"""Resolve a name or path for an include doc to a an absolute path or url
:param name:
"""
from metatab import parse_app_url
include_ref = name.strip('/')
if include_ref.startswith('http'):
path = include_ref
else:
if not d:
raise IncludeError("Can't include '{}' because don't know current path "
.format(name))
path = join(d, include_ref)
return parse_app_url(path) | python | def find_include_doc(self, d, name):
"""Resolve a name or path for an include doc to a an absolute path or url
:param name:
"""
from metatab import parse_app_url
include_ref = name.strip('/')
if include_ref.startswith('http'):
path = include_ref
else:
if not d:
raise IncludeError("Can't include '{}' because don't know current path "
.format(name))
path = join(d, include_ref)
return parse_app_url(path) | [
"def",
"find_include_doc",
"(",
"self",
",",
"d",
",",
"name",
")",
":",
"from",
"metatab",
"import",
"parse_app_url",
"include_ref",
"=",
"name",
".",
"strip",
"(",
"'/'",
")",
"if",
"include_ref",
".",
"startswith",
"(",
"'http'",
")",
":",
"path",
"=",
"include_ref",
"else",
":",
"if",
"not",
"d",
":",
"raise",
"IncludeError",
"(",
"\"Can't include '{}' because don't know current path \"",
".",
"format",
"(",
"name",
")",
")",
"path",
"=",
"join",
"(",
"d",
",",
"include_ref",
")",
"return",
"parse_app_url",
"(",
"path",
")"
] | Resolve a name or path for an include doc to a an absolute path or url
:param name: | [
"Resolve",
"a",
"name",
"or",
"path",
"for",
"an",
"include",
"doc",
"to",
"a",
"an",
"absolute",
"path",
"or",
"url",
":",
"param",
"name",
":"
] | train | https://github.com/Metatab/metatab/blob/8336ec3e4bd8da84a9a5cb86de1c1086e14b8b22/metatab/parser.py#L283-L300 |
Metatab/metatab | metatab/parser.py | TermParser.generate_terms | def generate_terms(self, ref, root, file_type=None):
"""An generator that yields term objects, handling includes and argument
children.
:param file_type:
:param doc:
:param root:
:param ref:
"""
last_section = root
t = None
if isinstance(ref, Source):
row_gen = ref
ref_path = row_gen.__class__.__name__
else:
row_gen = get_generator(ref)
ref_path = ref.path
try:
for line_n, row in enumerate(row_gen, 1):
if not row or not row[0] or not row[0].strip() or row[0].strip().startswith('#'):
continue
tt = Term(row[0], None) # Just to get the qualified name constructed property
term_class = self.get_term_class(tt.join_lc)
t = term_class(tt.join_lc,
row[1] if len(row) > 1 else '',
row[2:] if len(row) > 2 else [],
row=line_n,
col=1,
file_name=ref_path, file_type=file_type, doc=self.doc)
# Why did we remove comments from values? It strips out Markdown
#if t.value and str(t.value).startswith('#'): # Comments are ignored
# continue
if t.term_is('include') or t.term_is('declare'):
if t.term_is('include'):
resolved = self.find_include_doc(dirname(ref_path), t.value.strip())
else:
resolved = self.find_declare_doc(dirname(ref_path), t.value.strip())
if row_gen.ref == resolved:
raise IncludeError("Include loop for '{}' ".format(resolved))
yield t
try:
sub_gen = get_generator(resolved.get_resource().get_target())
for t in self.generate_terms(sub_gen, root, file_type=t.record_term_lc):
yield t
if last_section:
yield last_section # Re-assert the last section
except IncludeError as e:
e.term = t
raise
except (OSError, FileNotFoundError, GenerateError, DownloadError) as e:
e = IncludeError("Failed to Include; {}".format(e))
e.term = t
raise e
continue # Already yielded the include/declare term, and includes can't have children
elif t.term_is('section'):
# If there is already a section in the document, emit the existing section,
# rather than a new one.
try:
last_section = self.doc[t.name]
t = last_section
except (KeyError, TypeError): # TypeError -> self.doc is None
last_section = t
yield t
# Yield any child terms, from the term row arguments
if not t.term_is('section') and not t.term_is('header'):
for col, value in enumerate(t.args, 0):
if str(value).strip():
term_name = t.record_term_lc + '.' + str(col)
term_class = self.get_term_class(term_name)
yield term_class(term_name, str(value), [],
row=line_n,
col=col + 2, # The 0th argument starts in col 2
file_name=ref_path,
file_type=file_type,
parent=t) #,
#doc=None,
#section=last_section)
except IncludeError as e:
exc = IncludeError(str(e) + "; in '{}' ".format(ref_path))
exc.term = e.term if hasattr(e, 'term') else None
raise exc | python | def generate_terms(self, ref, root, file_type=None):
"""An generator that yields term objects, handling includes and argument
children.
:param file_type:
:param doc:
:param root:
:param ref:
"""
last_section = root
t = None
if isinstance(ref, Source):
row_gen = ref
ref_path = row_gen.__class__.__name__
else:
row_gen = get_generator(ref)
ref_path = ref.path
try:
for line_n, row in enumerate(row_gen, 1):
if not row or not row[0] or not row[0].strip() or row[0].strip().startswith('#'):
continue
tt = Term(row[0], None) # Just to get the qualified name constructed property
term_class = self.get_term_class(tt.join_lc)
t = term_class(tt.join_lc,
row[1] if len(row) > 1 else '',
row[2:] if len(row) > 2 else [],
row=line_n,
col=1,
file_name=ref_path, file_type=file_type, doc=self.doc)
# Why did we remove comments from values? It strips out Markdown
#if t.value and str(t.value).startswith('#'): # Comments are ignored
# continue
if t.term_is('include') or t.term_is('declare'):
if t.term_is('include'):
resolved = self.find_include_doc(dirname(ref_path), t.value.strip())
else:
resolved = self.find_declare_doc(dirname(ref_path), t.value.strip())
if row_gen.ref == resolved:
raise IncludeError("Include loop for '{}' ".format(resolved))
yield t
try:
sub_gen = get_generator(resolved.get_resource().get_target())
for t in self.generate_terms(sub_gen, root, file_type=t.record_term_lc):
yield t
if last_section:
yield last_section # Re-assert the last section
except IncludeError as e:
e.term = t
raise
except (OSError, FileNotFoundError, GenerateError, DownloadError) as e:
e = IncludeError("Failed to Include; {}".format(e))
e.term = t
raise e
continue # Already yielded the include/declare term, and includes can't have children
elif t.term_is('section'):
# If there is already a section in the document, emit the existing section,
# rather than a new one.
try:
last_section = self.doc[t.name]
t = last_section
except (KeyError, TypeError): # TypeError -> self.doc is None
last_section = t
yield t
# Yield any child terms, from the term row arguments
if not t.term_is('section') and not t.term_is('header'):
for col, value in enumerate(t.args, 0):
if str(value).strip():
term_name = t.record_term_lc + '.' + str(col)
term_class = self.get_term_class(term_name)
yield term_class(term_name, str(value), [],
row=line_n,
col=col + 2, # The 0th argument starts in col 2
file_name=ref_path,
file_type=file_type,
parent=t) #,
#doc=None,
#section=last_section)
except IncludeError as e:
exc = IncludeError(str(e) + "; in '{}' ".format(ref_path))
exc.term = e.term if hasattr(e, 'term') else None
raise exc | [
"def",
"generate_terms",
"(",
"self",
",",
"ref",
",",
"root",
",",
"file_type",
"=",
"None",
")",
":",
"last_section",
"=",
"root",
"t",
"=",
"None",
"if",
"isinstance",
"(",
"ref",
",",
"Source",
")",
":",
"row_gen",
"=",
"ref",
"ref_path",
"=",
"row_gen",
".",
"__class__",
".",
"__name__",
"else",
":",
"row_gen",
"=",
"get_generator",
"(",
"ref",
")",
"ref_path",
"=",
"ref",
".",
"path",
"try",
":",
"for",
"line_n",
",",
"row",
"in",
"enumerate",
"(",
"row_gen",
",",
"1",
")",
":",
"if",
"not",
"row",
"or",
"not",
"row",
"[",
"0",
"]",
"or",
"not",
"row",
"[",
"0",
"]",
".",
"strip",
"(",
")",
"or",
"row",
"[",
"0",
"]",
".",
"strip",
"(",
")",
".",
"startswith",
"(",
"'#'",
")",
":",
"continue",
"tt",
"=",
"Term",
"(",
"row",
"[",
"0",
"]",
",",
"None",
")",
"# Just to get the qualified name constructed property",
"term_class",
"=",
"self",
".",
"get_term_class",
"(",
"tt",
".",
"join_lc",
")",
"t",
"=",
"term_class",
"(",
"tt",
".",
"join_lc",
",",
"row",
"[",
"1",
"]",
"if",
"len",
"(",
"row",
")",
">",
"1",
"else",
"''",
",",
"row",
"[",
"2",
":",
"]",
"if",
"len",
"(",
"row",
")",
">",
"2",
"else",
"[",
"]",
",",
"row",
"=",
"line_n",
",",
"col",
"=",
"1",
",",
"file_name",
"=",
"ref_path",
",",
"file_type",
"=",
"file_type",
",",
"doc",
"=",
"self",
".",
"doc",
")",
"# Why did we remove comments from values? It strips out Markdown",
"#if t.value and str(t.value).startswith('#'): # Comments are ignored",
"# continue",
"if",
"t",
".",
"term_is",
"(",
"'include'",
")",
"or",
"t",
".",
"term_is",
"(",
"'declare'",
")",
":",
"if",
"t",
".",
"term_is",
"(",
"'include'",
")",
":",
"resolved",
"=",
"self",
".",
"find_include_doc",
"(",
"dirname",
"(",
"ref_path",
")",
",",
"t",
".",
"value",
".",
"strip",
"(",
")",
")",
"else",
":",
"resolved",
"=",
"self",
".",
"find_declare_doc",
"(",
"dirname",
"(",
"ref_path",
")",
",",
"t",
".",
"value",
".",
"strip",
"(",
")",
")",
"if",
"row_gen",
".",
"ref",
"==",
"resolved",
":",
"raise",
"IncludeError",
"(",
"\"Include loop for '{}' \"",
".",
"format",
"(",
"resolved",
")",
")",
"yield",
"t",
"try",
":",
"sub_gen",
"=",
"get_generator",
"(",
"resolved",
".",
"get_resource",
"(",
")",
".",
"get_target",
"(",
")",
")",
"for",
"t",
"in",
"self",
".",
"generate_terms",
"(",
"sub_gen",
",",
"root",
",",
"file_type",
"=",
"t",
".",
"record_term_lc",
")",
":",
"yield",
"t",
"if",
"last_section",
":",
"yield",
"last_section",
"# Re-assert the last section",
"except",
"IncludeError",
"as",
"e",
":",
"e",
".",
"term",
"=",
"t",
"raise",
"except",
"(",
"OSError",
",",
"FileNotFoundError",
",",
"GenerateError",
",",
"DownloadError",
")",
"as",
"e",
":",
"e",
"=",
"IncludeError",
"(",
"\"Failed to Include; {}\"",
".",
"format",
"(",
"e",
")",
")",
"e",
".",
"term",
"=",
"t",
"raise",
"e",
"continue",
"# Already yielded the include/declare term, and includes can't have children",
"elif",
"t",
".",
"term_is",
"(",
"'section'",
")",
":",
"# If there is already a section in the document, emit the existing section,",
"# rather than a new one.",
"try",
":",
"last_section",
"=",
"self",
".",
"doc",
"[",
"t",
".",
"name",
"]",
"t",
"=",
"last_section",
"except",
"(",
"KeyError",
",",
"TypeError",
")",
":",
"# TypeError -> self.doc is None",
"last_section",
"=",
"t",
"yield",
"t",
"# Yield any child terms, from the term row arguments",
"if",
"not",
"t",
".",
"term_is",
"(",
"'section'",
")",
"and",
"not",
"t",
".",
"term_is",
"(",
"'header'",
")",
":",
"for",
"col",
",",
"value",
"in",
"enumerate",
"(",
"t",
".",
"args",
",",
"0",
")",
":",
"if",
"str",
"(",
"value",
")",
".",
"strip",
"(",
")",
":",
"term_name",
"=",
"t",
".",
"record_term_lc",
"+",
"'.'",
"+",
"str",
"(",
"col",
")",
"term_class",
"=",
"self",
".",
"get_term_class",
"(",
"term_name",
")",
"yield",
"term_class",
"(",
"term_name",
",",
"str",
"(",
"value",
")",
",",
"[",
"]",
",",
"row",
"=",
"line_n",
",",
"col",
"=",
"col",
"+",
"2",
",",
"# The 0th argument starts in col 2",
"file_name",
"=",
"ref_path",
",",
"file_type",
"=",
"file_type",
",",
"parent",
"=",
"t",
")",
"#,",
"#doc=None,",
"#section=last_section)",
"except",
"IncludeError",
"as",
"e",
":",
"exc",
"=",
"IncludeError",
"(",
"str",
"(",
"e",
")",
"+",
"\"; in '{}' \"",
".",
"format",
"(",
"ref_path",
")",
")",
"exc",
".",
"term",
"=",
"e",
".",
"term",
"if",
"hasattr",
"(",
"e",
",",
"'term'",
")",
"else",
"None",
"raise",
"exc"
] | An generator that yields term objects, handling includes and argument
children.
:param file_type:
:param doc:
:param root:
:param ref: | [
"An",
"generator",
"that",
"yields",
"term",
"objects",
"handling",
"includes",
"and",
"argument",
"children",
".",
":",
"param",
"file_type",
":",
":",
"param",
"doc",
":",
":",
"param",
"root",
":",
":",
"param",
"ref",
":"
] | train | https://github.com/Metatab/metatab/blob/8336ec3e4bd8da84a9a5cb86de1c1086e14b8b22/metatab/parser.py#L302-L410 |
Metatab/metatab | metatab/parser.py | TermParser.inherited_children | def inherited_children(self, t):
"""Generate inherited children based on a terms InhertsFrom property.
The input term must have both an InheritsFrom property and a defined Section
:param t: A subclassed terms -- has an InheritsFrom value
"""
if not t.get('inheritsfrom'):
return
if not 'section' in t:
raise DeclarationError("DeclareTerm for '{}' must specify a section to use InheritsFrom"
.format(t['term']))
t_p, t_r = Term.split_term(t['term'])
ih_p, ih_r = Term.split_term(t['inheritsfrom'])
# The inherited terms must come from the same section
section_terms = self._declared_sections[t['section'].lower()]['terms']
# For each of the terms in the section, look for terms that are children
# of the term that the input term inherits from. Then yield each of those terms
# after chang the term name to be a child of the input term.
for st_name in section_terms:
if st_name.lower().startswith(ih_r.lower() + '.'):
st_p, st_r = Term.split_term(st_name)
# Yield the term, but replace the parent part
subtype_name = t_r + '.' + st_r
subtype_d = dict(self._declared_terms[st_name.lower()].items())
subtype_d['inheritsfrom'] = '';
subtype_d['term'] = subtype_name
yield subtype_d | python | def inherited_children(self, t):
"""Generate inherited children based on a terms InhertsFrom property.
The input term must have both an InheritsFrom property and a defined Section
:param t: A subclassed terms -- has an InheritsFrom value
"""
if not t.get('inheritsfrom'):
return
if not 'section' in t:
raise DeclarationError("DeclareTerm for '{}' must specify a section to use InheritsFrom"
.format(t['term']))
t_p, t_r = Term.split_term(t['term'])
ih_p, ih_r = Term.split_term(t['inheritsfrom'])
# The inherited terms must come from the same section
section_terms = self._declared_sections[t['section'].lower()]['terms']
# For each of the terms in the section, look for terms that are children
# of the term that the input term inherits from. Then yield each of those terms
# after chang the term name to be a child of the input term.
for st_name in section_terms:
if st_name.lower().startswith(ih_r.lower() + '.'):
st_p, st_r = Term.split_term(st_name)
# Yield the term, but replace the parent part
subtype_name = t_r + '.' + st_r
subtype_d = dict(self._declared_terms[st_name.lower()].items())
subtype_d['inheritsfrom'] = '';
subtype_d['term'] = subtype_name
yield subtype_d | [
"def",
"inherited_children",
"(",
"self",
",",
"t",
")",
":",
"if",
"not",
"t",
".",
"get",
"(",
"'inheritsfrom'",
")",
":",
"return",
"if",
"not",
"'section'",
"in",
"t",
":",
"raise",
"DeclarationError",
"(",
"\"DeclareTerm for '{}' must specify a section to use InheritsFrom\"",
".",
"format",
"(",
"t",
"[",
"'term'",
"]",
")",
")",
"t_p",
",",
"t_r",
"=",
"Term",
".",
"split_term",
"(",
"t",
"[",
"'term'",
"]",
")",
"ih_p",
",",
"ih_r",
"=",
"Term",
".",
"split_term",
"(",
"t",
"[",
"'inheritsfrom'",
"]",
")",
"# The inherited terms must come from the same section",
"section_terms",
"=",
"self",
".",
"_declared_sections",
"[",
"t",
"[",
"'section'",
"]",
".",
"lower",
"(",
")",
"]",
"[",
"'terms'",
"]",
"# For each of the terms in the section, look for terms that are children",
"# of the term that the input term inherits from. Then yield each of those terms",
"# after chang the term name to be a child of the input term.",
"for",
"st_name",
"in",
"section_terms",
":",
"if",
"st_name",
".",
"lower",
"(",
")",
".",
"startswith",
"(",
"ih_r",
".",
"lower",
"(",
")",
"+",
"'.'",
")",
":",
"st_p",
",",
"st_r",
"=",
"Term",
".",
"split_term",
"(",
"st_name",
")",
"# Yield the term, but replace the parent part",
"subtype_name",
"=",
"t_r",
"+",
"'.'",
"+",
"st_r",
"subtype_d",
"=",
"dict",
"(",
"self",
".",
"_declared_terms",
"[",
"st_name",
".",
"lower",
"(",
")",
"]",
".",
"items",
"(",
")",
")",
"subtype_d",
"[",
"'inheritsfrom'",
"]",
"=",
"''",
"subtype_d",
"[",
"'term'",
"]",
"=",
"subtype_name",
"yield",
"subtype_d"
] | Generate inherited children based on a terms InhertsFrom property.
The input term must have both an InheritsFrom property and a defined Section
:param t: A subclassed terms -- has an InheritsFrom value | [
"Generate",
"inherited",
"children",
"based",
"on",
"a",
"terms",
"InhertsFrom",
"property",
".",
"The",
"input",
"term",
"must",
"have",
"both",
"an",
"InheritsFrom",
"property",
"and",
"a",
"defined",
"Section"
] | train | https://github.com/Metatab/metatab/blob/8336ec3e4bd8da84a9a5cb86de1c1086e14b8b22/metatab/parser.py#L548-L581 |
lazygunner/xunleipy | xunleipy/remote.py | XunLeiRemote.get_remote_peer_list | def get_remote_peer_list(self):
'''
listPeer 返回列表
{
"rtn":0,
"peerList": [{
"category": "",
"status": 0,
"name": "GUNNER_HOME",
"vodPort": 43566,
"company": "XUNLEI_MIPS_BE_MIPS32",
"pid": "8498352EB4F5208X0001",
"lastLoginTime": 1412053233,
"accesscode": "",
"localIP": "",
"location": "\u6d59\u6c5f\u7701 \u8054\u901a",
"online": 1,
"path_list": "C:/",
"type": 30,
"deviceVersion": 22083310
}]
}
'''
params = {
'type': 0,
'v': DEFAULT_V,
'ct': 2
}
res = self._get('listPeer', params=params)
return res['peerList'] | python | def get_remote_peer_list(self):
'''
listPeer 返回列表
{
"rtn":0,
"peerList": [{
"category": "",
"status": 0,
"name": "GUNNER_HOME",
"vodPort": 43566,
"company": "XUNLEI_MIPS_BE_MIPS32",
"pid": "8498352EB4F5208X0001",
"lastLoginTime": 1412053233,
"accesscode": "",
"localIP": "",
"location": "\u6d59\u6c5f\u7701 \u8054\u901a",
"online": 1,
"path_list": "C:/",
"type": 30,
"deviceVersion": 22083310
}]
}
'''
params = {
'type': 0,
'v': DEFAULT_V,
'ct': 2
}
res = self._get('listPeer', params=params)
return res['peerList'] | [
"def",
"get_remote_peer_list",
"(",
"self",
")",
":",
"params",
"=",
"{",
"'type'",
":",
"0",
",",
"'v'",
":",
"DEFAULT_V",
",",
"'ct'",
":",
"2",
"}",
"res",
"=",
"self",
".",
"_get",
"(",
"'listPeer'",
",",
"params",
"=",
"params",
")",
"return",
"res",
"[",
"'peerList'",
"]"
] | listPeer 返回列表
{
"rtn":0,
"peerList": [{
"category": "",
"status": 0,
"name": "GUNNER_HOME",
"vodPort": 43566,
"company": "XUNLEI_MIPS_BE_MIPS32",
"pid": "8498352EB4F5208X0001",
"lastLoginTime": 1412053233,
"accesscode": "",
"localIP": "",
"location": "\u6d59\u6c5f\u7701 \u8054\u901a",
"online": 1,
"path_list": "C:/",
"type": 30,
"deviceVersion": 22083310
}]
} | [
"listPeer",
"返回列表",
"{",
"rtn",
":",
"0",
"peerList",
":",
"[",
"{",
"category",
":",
"status",
":",
"0",
"name",
":",
"GUNNER_HOME",
"vodPort",
":",
"43566",
"company",
":",
"XUNLEI_MIPS_BE_MIPS32",
"pid",
":",
"8498352EB4F5208X0001",
"lastLoginTime",
":",
"1412053233",
"accesscode",
":",
"localIP",
":",
"location",
":",
"\\",
"u6d59",
"\\",
"u6c5f",
"\\",
"u7701",
"\\",
"u8054",
"\\",
"u901a",
"online",
":",
"1",
"path_list",
":",
"C",
":",
"/",
"type",
":",
"30",
"deviceVersion",
":",
"22083310",
"}",
"]",
"}"
] | train | https://github.com/lazygunner/xunleipy/blob/cded7598a7bf04495156bae2d747883d1eacb3f4/xunleipy/remote.py#L78-L108 |
lazygunner/xunleipy | xunleipy/remote.py | XunLeiRemote.get_remote_task_list | def get_remote_task_list(
self, peer_id, list_type=ListType.downloading, pos=0, number=10):
'''
list 返回列表
{
"recycleNum": 0,
"serverFailNum": 0,
"rtn": 0,
"completeNum": 34,
"sync": 0,
"tasks": [{
"failCode": 15414,
"vipChannel": {
"available": 0,
"failCode": 0,
"opened": 0,
"type": 0,
"dlBytes": 0,
"speed": 0
},
"name": "Blablaba",
"url": "magnet:?xt=urn:btih:5DF6B321CCBDEBE1D52E8E15CBFC6F002",
"speed": 0,
"lixianChannel": {
"failCode": 0,
"serverProgress": 0,
"dlBytes": 0,
"state": 0,
"serverSpeed": 0,
"speed": 0
},
"downTime": 0,
"subList": [],
"createTime": 1412217010,
"state": 8,
"remainTime": 0,
"progress": 0,
"path": "/tmp/thunder/volumes/C:/TDDOWNLOAD/",
"type": 2,
"id": "39",
"completeTime": 0,
"size": 0
},
...
]
}
'''
params = {
'pid': peer_id,
'type': list_type,
'pos': pos,
'number': number,
'needUrl': 1,
'v': DEFAULT_V,
'ct': DEFAULT_CT
}
res = self._get('list', params=params)
return res['tasks'] | python | def get_remote_task_list(
self, peer_id, list_type=ListType.downloading, pos=0, number=10):
'''
list 返回列表
{
"recycleNum": 0,
"serverFailNum": 0,
"rtn": 0,
"completeNum": 34,
"sync": 0,
"tasks": [{
"failCode": 15414,
"vipChannel": {
"available": 0,
"failCode": 0,
"opened": 0,
"type": 0,
"dlBytes": 0,
"speed": 0
},
"name": "Blablaba",
"url": "magnet:?xt=urn:btih:5DF6B321CCBDEBE1D52E8E15CBFC6F002",
"speed": 0,
"lixianChannel": {
"failCode": 0,
"serverProgress": 0,
"dlBytes": 0,
"state": 0,
"serverSpeed": 0,
"speed": 0
},
"downTime": 0,
"subList": [],
"createTime": 1412217010,
"state": 8,
"remainTime": 0,
"progress": 0,
"path": "/tmp/thunder/volumes/C:/TDDOWNLOAD/",
"type": 2,
"id": "39",
"completeTime": 0,
"size": 0
},
...
]
}
'''
params = {
'pid': peer_id,
'type': list_type,
'pos': pos,
'number': number,
'needUrl': 1,
'v': DEFAULT_V,
'ct': DEFAULT_CT
}
res = self._get('list', params=params)
return res['tasks'] | [
"def",
"get_remote_task_list",
"(",
"self",
",",
"peer_id",
",",
"list_type",
"=",
"ListType",
".",
"downloading",
",",
"pos",
"=",
"0",
",",
"number",
"=",
"10",
")",
":",
"params",
"=",
"{",
"'pid'",
":",
"peer_id",
",",
"'type'",
":",
"list_type",
",",
"'pos'",
":",
"pos",
",",
"'number'",
":",
"number",
",",
"'needUrl'",
":",
"1",
",",
"'v'",
":",
"DEFAULT_V",
",",
"'ct'",
":",
"DEFAULT_CT",
"}",
"res",
"=",
"self",
".",
"_get",
"(",
"'list'",
",",
"params",
"=",
"params",
")",
"return",
"res",
"[",
"'tasks'",
"]"
] | list 返回列表
{
"recycleNum": 0,
"serverFailNum": 0,
"rtn": 0,
"completeNum": 34,
"sync": 0,
"tasks": [{
"failCode": 15414,
"vipChannel": {
"available": 0,
"failCode": 0,
"opened": 0,
"type": 0,
"dlBytes": 0,
"speed": 0
},
"name": "Blablaba",
"url": "magnet:?xt=urn:btih:5DF6B321CCBDEBE1D52E8E15CBFC6F002",
"speed": 0,
"lixianChannel": {
"failCode": 0,
"serverProgress": 0,
"dlBytes": 0,
"state": 0,
"serverSpeed": 0,
"speed": 0
},
"downTime": 0,
"subList": [],
"createTime": 1412217010,
"state": 8,
"remainTime": 0,
"progress": 0,
"path": "/tmp/thunder/volumes/C:/TDDOWNLOAD/",
"type": 2,
"id": "39",
"completeTime": 0,
"size": 0
},
...
]
} | [
"list",
"返回列表",
"{",
"recycleNum",
":",
"0",
"serverFailNum",
":",
"0",
"rtn",
":",
"0",
"completeNum",
":",
"34",
"sync",
":",
"0",
"tasks",
":",
"[",
"{",
"failCode",
":",
"15414",
"vipChannel",
":",
"{",
"available",
":",
"0",
"failCode",
":",
"0",
"opened",
":",
"0",
"type",
":",
"0",
"dlBytes",
":",
"0",
"speed",
":",
"0",
"}",
"name",
":",
"Blablaba",
"url",
":",
"magnet",
":",
"?xt",
"=",
"urn",
":",
"btih",
":",
"5DF6B321CCBDEBE1D52E8E15CBFC6F002",
"speed",
":",
"0",
"lixianChannel",
":",
"{",
"failCode",
":",
"0",
"serverProgress",
":",
"0",
"dlBytes",
":",
"0",
"state",
":",
"0",
"serverSpeed",
":",
"0",
"speed",
":",
"0",
"}",
"downTime",
":",
"0",
"subList",
":",
"[]",
"createTime",
":",
"1412217010",
"state",
":",
"8",
"remainTime",
":",
"0",
"progress",
":",
"0",
"path",
":",
"/",
"tmp",
"/",
"thunder",
"/",
"volumes",
"/",
"C",
":",
"/",
"TDDOWNLOAD",
"/",
"type",
":",
"2",
"id",
":",
"39",
"completeTime",
":",
"0",
"size",
":",
"0",
"}",
"...",
"]",
"}"
] | train | https://github.com/lazygunner/xunleipy/blob/cded7598a7bf04495156bae2d747883d1eacb3f4/xunleipy/remote.py#L118-L176 |
lazygunner/xunleipy | xunleipy/remote.py | XunLeiRemote.check_url | def check_url(self, pid, url_list):
'''
urlCheck 返回数据
{
"rtn": 0,
"taskInfo": {
"failCode": 0,
"name": ".HDTVrip.1024X576.mkv",
"url": "ed2k://|file|%E6%B0%",
"type": 1,
"id": "0",
"size": 505005442
}
}
'''
task_list = []
for url in url_list:
params = {
'pid': pid,
'url': url,
'type': 1,
'v': DEFAULT_V,
'ct': DEFAULT_CT
}
res = self._get('urlCheck', params=params)
if res['rtn'] == 0:
task_info = res['taskInfo']
task_list.append({
'url': task_info['url'],
'name': task_info['name'],
'filesize': task_info['size'],
'gcid': '',
'cid': ''
})
else:
print(
'url [%s] check failed, code:%s.',
url,
task_info['failCode']
)
return task_list | python | def check_url(self, pid, url_list):
'''
urlCheck 返回数据
{
"rtn": 0,
"taskInfo": {
"failCode": 0,
"name": ".HDTVrip.1024X576.mkv",
"url": "ed2k://|file|%E6%B0%",
"type": 1,
"id": "0",
"size": 505005442
}
}
'''
task_list = []
for url in url_list:
params = {
'pid': pid,
'url': url,
'type': 1,
'v': DEFAULT_V,
'ct': DEFAULT_CT
}
res = self._get('urlCheck', params=params)
if res['rtn'] == 0:
task_info = res['taskInfo']
task_list.append({
'url': task_info['url'],
'name': task_info['name'],
'filesize': task_info['size'],
'gcid': '',
'cid': ''
})
else:
print(
'url [%s] check failed, code:%s.',
url,
task_info['failCode']
)
return task_list | [
"def",
"check_url",
"(",
"self",
",",
"pid",
",",
"url_list",
")",
":",
"task_list",
"=",
"[",
"]",
"for",
"url",
"in",
"url_list",
":",
"params",
"=",
"{",
"'pid'",
":",
"pid",
",",
"'url'",
":",
"url",
",",
"'type'",
":",
"1",
",",
"'v'",
":",
"DEFAULT_V",
",",
"'ct'",
":",
"DEFAULT_CT",
"}",
"res",
"=",
"self",
".",
"_get",
"(",
"'urlCheck'",
",",
"params",
"=",
"params",
")",
"if",
"res",
"[",
"'rtn'",
"]",
"==",
"0",
":",
"task_info",
"=",
"res",
"[",
"'taskInfo'",
"]",
"task_list",
".",
"append",
"(",
"{",
"'url'",
":",
"task_info",
"[",
"'url'",
"]",
",",
"'name'",
":",
"task_info",
"[",
"'name'",
"]",
",",
"'filesize'",
":",
"task_info",
"[",
"'size'",
"]",
",",
"'gcid'",
":",
"''",
",",
"'cid'",
":",
"''",
"}",
")",
"else",
":",
"print",
"(",
"'url [%s] check failed, code:%s.'",
",",
"url",
",",
"task_info",
"[",
"'failCode'",
"]",
")",
"return",
"task_list"
] | urlCheck 返回数据
{
"rtn": 0,
"taskInfo": {
"failCode": 0,
"name": ".HDTVrip.1024X576.mkv",
"url": "ed2k://|file|%E6%B0%",
"type": 1,
"id": "0",
"size": 505005442
}
} | [
"urlCheck",
"返回数据",
"{",
"rtn",
":",
"0",
"taskInfo",
":",
"{",
"failCode",
":",
"0",
"name",
":",
".",
"HDTVrip",
".",
"1024X576",
".",
"mkv",
"url",
":",
"ed2k",
":",
"//",
"|file|%E6%B0%",
"type",
":",
"1",
"id",
":",
"0",
"size",
":",
"505005442",
"}",
"}"
] | train | https://github.com/lazygunner/xunleipy/blob/cded7598a7bf04495156bae2d747883d1eacb3f4/xunleipy/remote.py#L178-L220 |
lazygunner/xunleipy | xunleipy/remote.py | XunLeiRemote.add_tasks_to_remote | def add_tasks_to_remote(self, pid, path='C:/TDDOWNLOAD/', task_list=[]):
'''
post data:
{
"path":"C:/TDDOWNLOAD/",
"tasks":[{
"url":"ed2k://|file|%E6%B0%B8%E6%81%92.Forever...",
"name":"永恒.Forever.S01E02.中英字幕.WEB-HR.mkv",
"gcid":"",
"cid":"",
"filesize":512807020
}]
}
return data:
{
"tasks": [{
"name": "\u6c38\u6052.Fore76.x264.mkv",
"url": "ed2k://|file|%E6%B0%B8%E6%81%92",
"result": 202,
"taskid": "48",
"msg": "repeate_taskid:48",
"id": 1
}],
"rtn": 0
}
'''
if len(task_list) == 0:
return []
params = {
'pid': pid,
'v': DEFAULT_V,
'ct': DEFAULT_CT,
}
data = {
'path': path,
'tasks': task_list
}
headers = {
'Content-Type': 'application/x-www-form-urlencoded'
}
data = json.dumps(data)
data = quote(data)
data = 'json=' + data
res = self._post(
'createTask',
params=params,
data=data,
headers=headers
)
return res | python | def add_tasks_to_remote(self, pid, path='C:/TDDOWNLOAD/', task_list=[]):
'''
post data:
{
"path":"C:/TDDOWNLOAD/",
"tasks":[{
"url":"ed2k://|file|%E6%B0%B8%E6%81%92.Forever...",
"name":"永恒.Forever.S01E02.中英字幕.WEB-HR.mkv",
"gcid":"",
"cid":"",
"filesize":512807020
}]
}
return data:
{
"tasks": [{
"name": "\u6c38\u6052.Fore76.x264.mkv",
"url": "ed2k://|file|%E6%B0%B8%E6%81%92",
"result": 202,
"taskid": "48",
"msg": "repeate_taskid:48",
"id": 1
}],
"rtn": 0
}
'''
if len(task_list) == 0:
return []
params = {
'pid': pid,
'v': DEFAULT_V,
'ct': DEFAULT_CT,
}
data = {
'path': path,
'tasks': task_list
}
headers = {
'Content-Type': 'application/x-www-form-urlencoded'
}
data = json.dumps(data)
data = quote(data)
data = 'json=' + data
res = self._post(
'createTask',
params=params,
data=data,
headers=headers
)
return res | [
"def",
"add_tasks_to_remote",
"(",
"self",
",",
"pid",
",",
"path",
"=",
"'C:/TDDOWNLOAD/'",
",",
"task_list",
"=",
"[",
"]",
")",
":",
"if",
"len",
"(",
"task_list",
")",
"==",
"0",
":",
"return",
"[",
"]",
"params",
"=",
"{",
"'pid'",
":",
"pid",
",",
"'v'",
":",
"DEFAULT_V",
",",
"'ct'",
":",
"DEFAULT_CT",
",",
"}",
"data",
"=",
"{",
"'path'",
":",
"path",
",",
"'tasks'",
":",
"task_list",
"}",
"headers",
"=",
"{",
"'Content-Type'",
":",
"'application/x-www-form-urlencoded'",
"}",
"data",
"=",
"json",
".",
"dumps",
"(",
"data",
")",
"data",
"=",
"quote",
"(",
"data",
")",
"data",
"=",
"'json='",
"+",
"data",
"res",
"=",
"self",
".",
"_post",
"(",
"'createTask'",
",",
"params",
"=",
"params",
",",
"data",
"=",
"data",
",",
"headers",
"=",
"headers",
")",
"return",
"res"
] | post data:
{
"path":"C:/TDDOWNLOAD/",
"tasks":[{
"url":"ed2k://|file|%E6%B0%B8%E6%81%92.Forever...",
"name":"永恒.Forever.S01E02.中英字幕.WEB-HR.mkv",
"gcid":"",
"cid":"",
"filesize":512807020
}]
}
return data:
{
"tasks": [{
"name": "\u6c38\u6052.Fore76.x264.mkv",
"url": "ed2k://|file|%E6%B0%B8%E6%81%92",
"result": 202,
"taskid": "48",
"msg": "repeate_taskid:48",
"id": 1
}],
"rtn": 0
} | [
"post",
"data",
":",
"{",
"path",
":",
"C",
":",
"/",
"TDDOWNLOAD",
"/",
"tasks",
":",
"[",
"{",
"url",
":",
"ed2k",
":",
"//",
"|file|%E6%B0%B8%E6%81%92",
".",
"Forever",
"...",
"name",
":",
"永恒",
".",
"Forever",
".",
"S01E02",
".",
"中英字幕",
".",
"WEB",
"-",
"HR",
".",
"mkv",
"gcid",
":",
"cid",
":",
"filesize",
":",
"512807020",
"}",
"]",
"}"
] | train | https://github.com/lazygunner/xunleipy/blob/cded7598a7bf04495156bae2d747883d1eacb3f4/xunleipy/remote.py#L233-L290 |
Capitains/Nautilus | capitains_nautilus/manager.py | read_levels | def read_levels(text):
""" Read text and get there reffs
:param text: Collection (Readable)
:return:
"""
x = []
for i in range(0, len(NAUTILUSRESOLVER.getMetadata(text).citation)):
x.append(NAUTILUSRESOLVER.getReffs(text, level=i))
return x | python | def read_levels(text):
""" Read text and get there reffs
:param text: Collection (Readable)
:return:
"""
x = []
for i in range(0, len(NAUTILUSRESOLVER.getMetadata(text).citation)):
x.append(NAUTILUSRESOLVER.getReffs(text, level=i))
return x | [
"def",
"read_levels",
"(",
"text",
")",
":",
"x",
"=",
"[",
"]",
"for",
"i",
"in",
"range",
"(",
"0",
",",
"len",
"(",
"NAUTILUSRESOLVER",
".",
"getMetadata",
"(",
"text",
")",
".",
"citation",
")",
")",
":",
"x",
".",
"append",
"(",
"NAUTILUSRESOLVER",
".",
"getReffs",
"(",
"text",
",",
"level",
"=",
"i",
")",
")",
"return",
"x"
] | Read text and get there reffs
:param text: Collection (Readable)
:return: | [
"Read",
"text",
"and",
"get",
"there",
"reffs"
] | train | https://github.com/Capitains/Nautilus/blob/6be453fe0cc0e2c1b89ff06e5af1409165fc1411/capitains_nautilus/manager.py#L15-L24 |
Capitains/Nautilus | capitains_nautilus/manager.py | FlaskNautilusManager | def FlaskNautilusManager(resolver, flask_nautilus):
""" Provides a manager for flask scripts to perform specific maintenance operations
:param resolver: Nautilus Extension Instance
:type resolver: NautilusCtsResolver
:param flask_nautilus: Flask Application
:type flask_nautilus: FlaskNautilus
:return: CLI
:rtype: click.group
Import with
.. code-block:: python
:lineno:
from capitains_nautilus.manager import FlaskNautilusManager
manager = FlaskNautilusManager(resolver, flask_nautilus, app) # Where app is the name of your app
if __name__ == "__main__":
manager()
"""
global NAUTILUSRESOLVER
NAUTILUSRESOLVER = resolver
@click.group()
@click.option('--verbose', default=False)
def CLI(verbose):
""" CLI for Flask Nautilus """
click.echo("Command Line Interface of Flask")
resolver.logger.disabled = not verbose
@CLI.command()
def flush_resolver():
""" Flush the resolver cache system """
if resolver.clear() is True:
click.echo("Caching of Resolver Cleared")
@CLI.command()
def flush_http_cache():
""" Flush the http cache
Warning : Might flush other Flask Caching data !
"""
flask_nautilus.flaskcache.clear()
@CLI.command()
def flush_both():
""" Flush all caches
"""
if resolver.cache.clear() is True:
click.echo("Caching of Resolver Cleared")
if flask_nautilus.flaskcache.clear() is True:
click.echo("Caching of HTTP Cleared")
@CLI.command()
def parse():
""" Preprocess the inventory and cache it """
ret = resolver.parse()
click.echo("Preprocessed %s texts" % len(ret.readableDescendants))
@CLI.command()
@click.option('--threads', default=0, type=int)
def process_reffs(threads):
""" Preprocess the inventory and cache it """
if threads < 1:
threads = THREADS
texts = list(resolver.getMetadata().readableDescendants)
click.echo("Using {} processes to parse references of {} texts".format(threads, len(texts)))
with Pool(processes=threads) as executor:
for future in executor.imap_unordered(read_levels, [t.id for t in texts]):
del future
click.echo("References parsed")
return CLI | python | def FlaskNautilusManager(resolver, flask_nautilus):
""" Provides a manager for flask scripts to perform specific maintenance operations
:param resolver: Nautilus Extension Instance
:type resolver: NautilusCtsResolver
:param flask_nautilus: Flask Application
:type flask_nautilus: FlaskNautilus
:return: CLI
:rtype: click.group
Import with
.. code-block:: python
:lineno:
from capitains_nautilus.manager import FlaskNautilusManager
manager = FlaskNautilusManager(resolver, flask_nautilus, app) # Where app is the name of your app
if __name__ == "__main__":
manager()
"""
global NAUTILUSRESOLVER
NAUTILUSRESOLVER = resolver
@click.group()
@click.option('--verbose', default=False)
def CLI(verbose):
""" CLI for Flask Nautilus """
click.echo("Command Line Interface of Flask")
resolver.logger.disabled = not verbose
@CLI.command()
def flush_resolver():
""" Flush the resolver cache system """
if resolver.clear() is True:
click.echo("Caching of Resolver Cleared")
@CLI.command()
def flush_http_cache():
""" Flush the http cache
Warning : Might flush other Flask Caching data !
"""
flask_nautilus.flaskcache.clear()
@CLI.command()
def flush_both():
""" Flush all caches
"""
if resolver.cache.clear() is True:
click.echo("Caching of Resolver Cleared")
if flask_nautilus.flaskcache.clear() is True:
click.echo("Caching of HTTP Cleared")
@CLI.command()
def parse():
""" Preprocess the inventory and cache it """
ret = resolver.parse()
click.echo("Preprocessed %s texts" % len(ret.readableDescendants))
@CLI.command()
@click.option('--threads', default=0, type=int)
def process_reffs(threads):
""" Preprocess the inventory and cache it """
if threads < 1:
threads = THREADS
texts = list(resolver.getMetadata().readableDescendants)
click.echo("Using {} processes to parse references of {} texts".format(threads, len(texts)))
with Pool(processes=threads) as executor:
for future in executor.imap_unordered(read_levels, [t.id for t in texts]):
del future
click.echo("References parsed")
return CLI | [
"def",
"FlaskNautilusManager",
"(",
"resolver",
",",
"flask_nautilus",
")",
":",
"global",
"NAUTILUSRESOLVER",
"NAUTILUSRESOLVER",
"=",
"resolver",
"@",
"click",
".",
"group",
"(",
")",
"@",
"click",
".",
"option",
"(",
"'--verbose'",
",",
"default",
"=",
"False",
")",
"def",
"CLI",
"(",
"verbose",
")",
":",
"\"\"\" CLI for Flask Nautilus \"\"\"",
"click",
".",
"echo",
"(",
"\"Command Line Interface of Flask\"",
")",
"resolver",
".",
"logger",
".",
"disabled",
"=",
"not",
"verbose",
"@",
"CLI",
".",
"command",
"(",
")",
"def",
"flush_resolver",
"(",
")",
":",
"\"\"\" Flush the resolver cache system \"\"\"",
"if",
"resolver",
".",
"clear",
"(",
")",
"is",
"True",
":",
"click",
".",
"echo",
"(",
"\"Caching of Resolver Cleared\"",
")",
"@",
"CLI",
".",
"command",
"(",
")",
"def",
"flush_http_cache",
"(",
")",
":",
"\"\"\" Flush the http cache\n\n Warning : Might flush other Flask Caching data !\n \"\"\"",
"flask_nautilus",
".",
"flaskcache",
".",
"clear",
"(",
")",
"@",
"CLI",
".",
"command",
"(",
")",
"def",
"flush_both",
"(",
")",
":",
"\"\"\" Flush all caches\n\n \"\"\"",
"if",
"resolver",
".",
"cache",
".",
"clear",
"(",
")",
"is",
"True",
":",
"click",
".",
"echo",
"(",
"\"Caching of Resolver Cleared\"",
")",
"if",
"flask_nautilus",
".",
"flaskcache",
".",
"clear",
"(",
")",
"is",
"True",
":",
"click",
".",
"echo",
"(",
"\"Caching of HTTP Cleared\"",
")",
"@",
"CLI",
".",
"command",
"(",
")",
"def",
"parse",
"(",
")",
":",
"\"\"\" Preprocess the inventory and cache it \"\"\"",
"ret",
"=",
"resolver",
".",
"parse",
"(",
")",
"click",
".",
"echo",
"(",
"\"Preprocessed %s texts\"",
"%",
"len",
"(",
"ret",
".",
"readableDescendants",
")",
")",
"@",
"CLI",
".",
"command",
"(",
")",
"@",
"click",
".",
"option",
"(",
"'--threads'",
",",
"default",
"=",
"0",
",",
"type",
"=",
"int",
")",
"def",
"process_reffs",
"(",
"threads",
")",
":",
"\"\"\" Preprocess the inventory and cache it \"\"\"",
"if",
"threads",
"<",
"1",
":",
"threads",
"=",
"THREADS",
"texts",
"=",
"list",
"(",
"resolver",
".",
"getMetadata",
"(",
")",
".",
"readableDescendants",
")",
"click",
".",
"echo",
"(",
"\"Using {} processes to parse references of {} texts\"",
".",
"format",
"(",
"threads",
",",
"len",
"(",
"texts",
")",
")",
")",
"with",
"Pool",
"(",
"processes",
"=",
"threads",
")",
"as",
"executor",
":",
"for",
"future",
"in",
"executor",
".",
"imap_unordered",
"(",
"read_levels",
",",
"[",
"t",
".",
"id",
"for",
"t",
"in",
"texts",
"]",
")",
":",
"del",
"future",
"click",
".",
"echo",
"(",
"\"References parsed\"",
")",
"return",
"CLI"
] | Provides a manager for flask scripts to perform specific maintenance operations
:param resolver: Nautilus Extension Instance
:type resolver: NautilusCtsResolver
:param flask_nautilus: Flask Application
:type flask_nautilus: FlaskNautilus
:return: CLI
:rtype: click.group
Import with
.. code-block:: python
:lineno:
from capitains_nautilus.manager import FlaskNautilusManager
manager = FlaskNautilusManager(resolver, flask_nautilus, app) # Where app is the name of your app
if __name__ == "__main__":
manager() | [
"Provides",
"a",
"manager",
"for",
"flask",
"scripts",
"to",
"perform",
"specific",
"maintenance",
"operations"
] | train | https://github.com/Capitains/Nautilus/blob/6be453fe0cc0e2c1b89ff06e5af1409165fc1411/capitains_nautilus/manager.py#L27-L102 |
chrisjsewell/jsonextended | jsonextended/edict.py | is_iter_non_string | def is_iter_non_string(obj):
"""test if object is a list or tuple"""
if isinstance(obj, list) or isinstance(obj, tuple):
return True
return False | python | def is_iter_non_string(obj):
"""test if object is a list or tuple"""
if isinstance(obj, list) or isinstance(obj, tuple):
return True
return False | [
"def",
"is_iter_non_string",
"(",
"obj",
")",
":",
"if",
"isinstance",
"(",
"obj",
",",
"list",
")",
"or",
"isinstance",
"(",
"obj",
",",
"tuple",
")",
":",
"return",
"True",
"return",
"False"
] | test if object is a list or tuple | [
"test",
"if",
"object",
"is",
"a",
"list",
"or",
"tuple"
] | train | https://github.com/chrisjsewell/jsonextended/blob/c3a7a880cc09789b3c61204265dcbb127be76c8a/jsonextended/edict.py#L45-L49 |
chrisjsewell/jsonextended | jsonextended/edict.py | is_dict_like | def is_dict_like(obj, attr=('keys', 'items')):
"""test if object is dict like"""
for a in attr:
if not hasattr(obj, a):
return False
return True | python | def is_dict_like(obj, attr=('keys', 'items')):
"""test if object is dict like"""
for a in attr:
if not hasattr(obj, a):
return False
return True | [
"def",
"is_dict_like",
"(",
"obj",
",",
"attr",
"=",
"(",
"'keys'",
",",
"'items'",
")",
")",
":",
"for",
"a",
"in",
"attr",
":",
"if",
"not",
"hasattr",
"(",
"obj",
",",
"a",
")",
":",
"return",
"False",
"return",
"True"
] | test if object is dict like | [
"test",
"if",
"object",
"is",
"dict",
"like"
] | train | https://github.com/chrisjsewell/jsonextended/blob/c3a7a880cc09789b3c61204265dcbb127be76c8a/jsonextended/edict.py#L58-L63 |
chrisjsewell/jsonextended | jsonextended/edict.py | is_list_of_dict_like | def is_list_of_dict_like(obj, attr=('keys', 'items')):
"""test if object is a list only containing dict like items """
try:
if len(obj) == 0:
return False
return all([is_dict_like(i, attr) for i in obj])
except Exception:
return False | python | def is_list_of_dict_like(obj, attr=('keys', 'items')):
"""test if object is a list only containing dict like items """
try:
if len(obj) == 0:
return False
return all([is_dict_like(i, attr) for i in obj])
except Exception:
return False | [
"def",
"is_list_of_dict_like",
"(",
"obj",
",",
"attr",
"=",
"(",
"'keys'",
",",
"'items'",
")",
")",
":",
"try",
":",
"if",
"len",
"(",
"obj",
")",
"==",
"0",
":",
"return",
"False",
"return",
"all",
"(",
"[",
"is_dict_like",
"(",
"i",
",",
"attr",
")",
"for",
"i",
"in",
"obj",
"]",
")",
"except",
"Exception",
":",
"return",
"False"
] | test if object is a list only containing dict like items | [
"test",
"if",
"object",
"is",
"a",
"list",
"only",
"containing",
"dict",
"like",
"items"
] | train | https://github.com/chrisjsewell/jsonextended/blob/c3a7a880cc09789b3c61204265dcbb127be76c8a/jsonextended/edict.py#L66-L73 |
chrisjsewell/jsonextended | jsonextended/edict.py | is_path_like | def is_path_like(obj, attr=('name', 'is_file', 'is_dir', 'iterdir')):
"""test if object is pathlib.Path like"""
for a in attr:
if not hasattr(obj, a):
return False
return True | python | def is_path_like(obj, attr=('name', 'is_file', 'is_dir', 'iterdir')):
"""test if object is pathlib.Path like"""
for a in attr:
if not hasattr(obj, a):
return False
return True | [
"def",
"is_path_like",
"(",
"obj",
",",
"attr",
"=",
"(",
"'name'",
",",
"'is_file'",
",",
"'is_dir'",
",",
"'iterdir'",
")",
")",
":",
"for",
"a",
"in",
"attr",
":",
"if",
"not",
"hasattr",
"(",
"obj",
",",
"a",
")",
":",
"return",
"False",
"return",
"True"
] | test if object is pathlib.Path like | [
"test",
"if",
"object",
"is",
"pathlib",
".",
"Path",
"like"
] | train | https://github.com/chrisjsewell/jsonextended/blob/c3a7a880cc09789b3c61204265dcbb127be76c8a/jsonextended/edict.py#L76-L81 |
chrisjsewell/jsonextended | jsonextended/edict.py | convert_type | def convert_type(d, intype, outtype, convert_list=True, in_place=True):
""" convert all values of one type to another
Parameters
----------
d : dict
intype : type_class
outtype : type_class
convert_list : bool
whether to convert instances inside lists and tuples
in_place : bool
if True, applies conversions to original dict, else returns copy
Examples
--------
>>> from pprint import pprint
>>> d = {'a':'1','b':'2'}
>>> pprint(convert_type(d,str,float))
{'a': 1.0, 'b': 2.0}
>>> d = {'a':['1','2']}
>>> pprint(convert_type(d,str,float))
{'a': [1.0, 2.0]}
>>> d = {'a':[('1','2'),[3,4]]}
>>> pprint(convert_type(d,str,float))
{'a': [(1.0, 2.0), [3, 4]]}
"""
if not in_place:
out_dict = copy.deepcopy(d)
else:
out_dict = d
def _convert(obj):
if isinstance(obj, intype):
try:
obj = outtype(obj)
except Exception:
pass
elif isinstance(obj, list) and convert_list:
obj = _traverse_iter(obj)
elif isinstance(obj, tuple) and convert_list:
obj = tuple(_traverse_iter(obj))
return obj
def _traverse_dict(dic):
for key in dic.keys():
if is_dict_like(dic[key]):
_traverse_dict(dic[key])
else:
dic[key] = _convert(dic[key])
def _traverse_iter(iter):
new_iter = []
for key in iter:
if is_dict_like(key):
_traverse_dict(key)
new_iter.append(key)
else:
new_iter.append(_convert(key))
return new_iter
if is_dict_like(out_dict):
_traverse_dict(out_dict)
else:
_convert(out_dict)
return out_dict | python | def convert_type(d, intype, outtype, convert_list=True, in_place=True):
""" convert all values of one type to another
Parameters
----------
d : dict
intype : type_class
outtype : type_class
convert_list : bool
whether to convert instances inside lists and tuples
in_place : bool
if True, applies conversions to original dict, else returns copy
Examples
--------
>>> from pprint import pprint
>>> d = {'a':'1','b':'2'}
>>> pprint(convert_type(d,str,float))
{'a': 1.0, 'b': 2.0}
>>> d = {'a':['1','2']}
>>> pprint(convert_type(d,str,float))
{'a': [1.0, 2.0]}
>>> d = {'a':[('1','2'),[3,4]]}
>>> pprint(convert_type(d,str,float))
{'a': [(1.0, 2.0), [3, 4]]}
"""
if not in_place:
out_dict = copy.deepcopy(d)
else:
out_dict = d
def _convert(obj):
if isinstance(obj, intype):
try:
obj = outtype(obj)
except Exception:
pass
elif isinstance(obj, list) and convert_list:
obj = _traverse_iter(obj)
elif isinstance(obj, tuple) and convert_list:
obj = tuple(_traverse_iter(obj))
return obj
def _traverse_dict(dic):
for key in dic.keys():
if is_dict_like(dic[key]):
_traverse_dict(dic[key])
else:
dic[key] = _convert(dic[key])
def _traverse_iter(iter):
new_iter = []
for key in iter:
if is_dict_like(key):
_traverse_dict(key)
new_iter.append(key)
else:
new_iter.append(_convert(key))
return new_iter
if is_dict_like(out_dict):
_traverse_dict(out_dict)
else:
_convert(out_dict)
return out_dict | [
"def",
"convert_type",
"(",
"d",
",",
"intype",
",",
"outtype",
",",
"convert_list",
"=",
"True",
",",
"in_place",
"=",
"True",
")",
":",
"if",
"not",
"in_place",
":",
"out_dict",
"=",
"copy",
".",
"deepcopy",
"(",
"d",
")",
"else",
":",
"out_dict",
"=",
"d",
"def",
"_convert",
"(",
"obj",
")",
":",
"if",
"isinstance",
"(",
"obj",
",",
"intype",
")",
":",
"try",
":",
"obj",
"=",
"outtype",
"(",
"obj",
")",
"except",
"Exception",
":",
"pass",
"elif",
"isinstance",
"(",
"obj",
",",
"list",
")",
"and",
"convert_list",
":",
"obj",
"=",
"_traverse_iter",
"(",
"obj",
")",
"elif",
"isinstance",
"(",
"obj",
",",
"tuple",
")",
"and",
"convert_list",
":",
"obj",
"=",
"tuple",
"(",
"_traverse_iter",
"(",
"obj",
")",
")",
"return",
"obj",
"def",
"_traverse_dict",
"(",
"dic",
")",
":",
"for",
"key",
"in",
"dic",
".",
"keys",
"(",
")",
":",
"if",
"is_dict_like",
"(",
"dic",
"[",
"key",
"]",
")",
":",
"_traverse_dict",
"(",
"dic",
"[",
"key",
"]",
")",
"else",
":",
"dic",
"[",
"key",
"]",
"=",
"_convert",
"(",
"dic",
"[",
"key",
"]",
")",
"def",
"_traverse_iter",
"(",
"iter",
")",
":",
"new_iter",
"=",
"[",
"]",
"for",
"key",
"in",
"iter",
":",
"if",
"is_dict_like",
"(",
"key",
")",
":",
"_traverse_dict",
"(",
"key",
")",
"new_iter",
".",
"append",
"(",
"key",
")",
"else",
":",
"new_iter",
".",
"append",
"(",
"_convert",
"(",
"key",
")",
")",
"return",
"new_iter",
"if",
"is_dict_like",
"(",
"out_dict",
")",
":",
"_traverse_dict",
"(",
"out_dict",
")",
"else",
":",
"_convert",
"(",
"out_dict",
")",
"return",
"out_dict"
] | convert all values of one type to another
Parameters
----------
d : dict
intype : type_class
outtype : type_class
convert_list : bool
whether to convert instances inside lists and tuples
in_place : bool
if True, applies conversions to original dict, else returns copy
Examples
--------
>>> from pprint import pprint
>>> d = {'a':'1','b':'2'}
>>> pprint(convert_type(d,str,float))
{'a': 1.0, 'b': 2.0}
>>> d = {'a':['1','2']}
>>> pprint(convert_type(d,str,float))
{'a': [1.0, 2.0]}
>>> d = {'a':[('1','2'),[3,4]]}
>>> pprint(convert_type(d,str,float))
{'a': [(1.0, 2.0), [3, 4]]} | [
"convert",
"all",
"values",
"of",
"one",
"type",
"to",
"another"
] | train | https://github.com/chrisjsewell/jsonextended/blob/c3a7a880cc09789b3c61204265dcbb127be76c8a/jsonextended/edict.py#L84-L156 |
chrisjsewell/jsonextended | jsonextended/edict.py | pprint | def pprint(d, lvlindent=2, initindent=0, delim=':',
max_width=80, depth=3, no_values=False,
align_vals=True, print_func=None,
keycolor=None, compress_lists=None,
round_floats=None, _dlist=False):
""" print a nested dict in readable format
(- denotes an element in a list of dictionaries)
Parameters
----------
d : object
lvlindent : int
additional indentation spaces for each level
initindent : int
initial indentation spaces
delim : str
delimiter between key and value nodes
max_width : int
max character width of each line
depth : int or None
maximum levels to display
no_values : bool
whether to print values
align_vals : bool
whether to align values for each level
print_func : callable or None
function to print strings (print if None)
keycolor : None or str
if str, color keys by this color,
allowed: red, green, yellow, blue, magenta, cyan, white
compress_lists : int
compress lists/tuples longer than this,
e.g. [1,1,1,1,1,1] -> [1, 1,..., 1]
round_floats : int
significant figures for floats
Examples
--------
>>> d = {'a':{'b':{'c':'Å','de':[4,5,[7,'x'],9]}}}
>>> pprint(d,depth=None)
a:
b:
c: Å
de: [4, 5, [7, x], 9]
>>> pprint(d,max_width=17,depth=None)
a:
b:
c: Å
de: [4, 5,
[7, x],
9]
>>> pprint(d,no_values=True,depth=None)
a:
b:
c:
de:
>>> pprint(d,depth=2)
a:
b: {...}
>>> pprint({'a':[1,1,1,1,1,1,1,1]},
... compress_lists=3)
a: [1, 1, 1, ...(x5)]
"""
if print_func is None:
print_func = _default_print_func
if not is_dict_like(d):
d = {'': d}
# print_func('{}'.format(d))
# return
extra = lvlindent if _dlist else 0
def decode_to_str(obj):
val_string = obj
if isinstance(obj, list):
if compress_lists is not None:
if len(obj) > compress_lists:
diff = str(len(obj) - compress_lists)
obj = obj[:compress_lists] + ['...(x{})'.format(diff)]
val_string = '[' + ', '.join([decode_to_str(o) for o in obj]) + ']'
elif isinstance(obj, tuple):
if compress_lists is not None:
if len(obj) > compress_lists:
diff = str(len(obj) - compress_lists)
obj = list(
obj[:compress_lists]) + ['...(x{})'.format(diff)]
val_string = '(' + ', '.join([decode_to_str(o) for o in obj]) + ')'
elif isinstance(obj, float) and round_floats is not None:
round_str = '{0:.' + str(round_floats - 1) + 'E}'
val_string = str(float(round_str.format(obj)))
else:
try:
val_string = encode(obj, outtype='str')
except (TypeError, UnicodeError):
pass
# convert unicode to str (so no u'' prefix in python 2)
try:
return str(val_string)
except Exception:
return unicode(val_string)
if align_vals:
key_width = 0
for key, val in d.items():
if not is_dict_like(val):
key_str = decode_to_str(key)
key_width = max(key_width, len(key_str))
max_depth = depth
for i, key in enumerate(natural_sort(d.keys())):
value = d[key]
if _dlist and i == 0:
key_str = '- ' + decode_to_str(key)
elif _dlist:
key_str = ' ' + decode_to_str(key)
else:
key_str = decode_to_str(key)
if keycolor is not None:
key_str = colortxt(key_str, keycolor)
if align_vals:
key_str = '{0: <{1}} '.format(
key_str + delim, key_width + len(delim))
else:
key_str = '{0}{1} '.format(key_str, delim)
depth = max_depth if max_depth is not None else 2
if keycolor is not None:
key_length = len(_strip_ansi(key_str))
else:
key_length = len(key_str)
key_line = ' ' * initindent + key_str
new_line = ' ' * initindent + ' ' * key_length
if depth <= 0:
continue
if is_dict_like(value):
if depth <= 1:
print_func(' ' * initindent + key_str + '{...}')
else:
print_func(' ' * initindent + key_str)
pprint(value, lvlindent, initindent + lvlindent + extra, delim,
max_width,
depth=max_depth - 1 if max_depth is not None else None,
no_values=no_values, align_vals=align_vals,
print_func=print_func, keycolor=keycolor,
compress_lists=compress_lists,
round_floats=round_floats)
continue
if isinstance(value, list):
if all([is_dict_like(o) for o in value]) and value:
if depth <= 1:
print_func(key_line + '[...]')
continue
print_func(key_line)
for obj in value:
pprint(
obj, lvlindent, initindent + lvlindent + extra, delim,
max_width,
depth=max_depth - 1 if max_depth is not None else None,
no_values=no_values, align_vals=align_vals,
print_func=print_func, keycolor=keycolor,
compress_lists=compress_lists,
round_floats=round_floats, _dlist=True)
continue
val_string_all = decode_to_str(value) if not no_values else ''
for i, val_string in enumerate(val_string_all.split('\n')):
if max_width is not None:
if len(key_line) + 1 > max_width:
raise Exception(
'cannot fit keys and data within set max_width')
# divide into chuncks and join by same indentation
val_indent = ' ' * (initindent + key_length)
n = max_width - len(val_indent)
val_string = val_indent.join(
[s + '\n' for s in textwrap.wrap(val_string, n)])[:-1]
if i == 0:
print_func(key_line + val_string)
else:
print_func(new_line + val_string) | python | def pprint(d, lvlindent=2, initindent=0, delim=':',
max_width=80, depth=3, no_values=False,
align_vals=True, print_func=None,
keycolor=None, compress_lists=None,
round_floats=None, _dlist=False):
""" print a nested dict in readable format
(- denotes an element in a list of dictionaries)
Parameters
----------
d : object
lvlindent : int
additional indentation spaces for each level
initindent : int
initial indentation spaces
delim : str
delimiter between key and value nodes
max_width : int
max character width of each line
depth : int or None
maximum levels to display
no_values : bool
whether to print values
align_vals : bool
whether to align values for each level
print_func : callable or None
function to print strings (print if None)
keycolor : None or str
if str, color keys by this color,
allowed: red, green, yellow, blue, magenta, cyan, white
compress_lists : int
compress lists/tuples longer than this,
e.g. [1,1,1,1,1,1] -> [1, 1,..., 1]
round_floats : int
significant figures for floats
Examples
--------
>>> d = {'a':{'b':{'c':'Å','de':[4,5,[7,'x'],9]}}}
>>> pprint(d,depth=None)
a:
b:
c: Å
de: [4, 5, [7, x], 9]
>>> pprint(d,max_width=17,depth=None)
a:
b:
c: Å
de: [4, 5,
[7, x],
9]
>>> pprint(d,no_values=True,depth=None)
a:
b:
c:
de:
>>> pprint(d,depth=2)
a:
b: {...}
>>> pprint({'a':[1,1,1,1,1,1,1,1]},
... compress_lists=3)
a: [1, 1, 1, ...(x5)]
"""
if print_func is None:
print_func = _default_print_func
if not is_dict_like(d):
d = {'': d}
# print_func('{}'.format(d))
# return
extra = lvlindent if _dlist else 0
def decode_to_str(obj):
val_string = obj
if isinstance(obj, list):
if compress_lists is not None:
if len(obj) > compress_lists:
diff = str(len(obj) - compress_lists)
obj = obj[:compress_lists] + ['...(x{})'.format(diff)]
val_string = '[' + ', '.join([decode_to_str(o) for o in obj]) + ']'
elif isinstance(obj, tuple):
if compress_lists is not None:
if len(obj) > compress_lists:
diff = str(len(obj) - compress_lists)
obj = list(
obj[:compress_lists]) + ['...(x{})'.format(diff)]
val_string = '(' + ', '.join([decode_to_str(o) for o in obj]) + ')'
elif isinstance(obj, float) and round_floats is not None:
round_str = '{0:.' + str(round_floats - 1) + 'E}'
val_string = str(float(round_str.format(obj)))
else:
try:
val_string = encode(obj, outtype='str')
except (TypeError, UnicodeError):
pass
# convert unicode to str (so no u'' prefix in python 2)
try:
return str(val_string)
except Exception:
return unicode(val_string)
if align_vals:
key_width = 0
for key, val in d.items():
if not is_dict_like(val):
key_str = decode_to_str(key)
key_width = max(key_width, len(key_str))
max_depth = depth
for i, key in enumerate(natural_sort(d.keys())):
value = d[key]
if _dlist and i == 0:
key_str = '- ' + decode_to_str(key)
elif _dlist:
key_str = ' ' + decode_to_str(key)
else:
key_str = decode_to_str(key)
if keycolor is not None:
key_str = colortxt(key_str, keycolor)
if align_vals:
key_str = '{0: <{1}} '.format(
key_str + delim, key_width + len(delim))
else:
key_str = '{0}{1} '.format(key_str, delim)
depth = max_depth if max_depth is not None else 2
if keycolor is not None:
key_length = len(_strip_ansi(key_str))
else:
key_length = len(key_str)
key_line = ' ' * initindent + key_str
new_line = ' ' * initindent + ' ' * key_length
if depth <= 0:
continue
if is_dict_like(value):
if depth <= 1:
print_func(' ' * initindent + key_str + '{...}')
else:
print_func(' ' * initindent + key_str)
pprint(value, lvlindent, initindent + lvlindent + extra, delim,
max_width,
depth=max_depth - 1 if max_depth is not None else None,
no_values=no_values, align_vals=align_vals,
print_func=print_func, keycolor=keycolor,
compress_lists=compress_lists,
round_floats=round_floats)
continue
if isinstance(value, list):
if all([is_dict_like(o) for o in value]) and value:
if depth <= 1:
print_func(key_line + '[...]')
continue
print_func(key_line)
for obj in value:
pprint(
obj, lvlindent, initindent + lvlindent + extra, delim,
max_width,
depth=max_depth - 1 if max_depth is not None else None,
no_values=no_values, align_vals=align_vals,
print_func=print_func, keycolor=keycolor,
compress_lists=compress_lists,
round_floats=round_floats, _dlist=True)
continue
val_string_all = decode_to_str(value) if not no_values else ''
for i, val_string in enumerate(val_string_all.split('\n')):
if max_width is not None:
if len(key_line) + 1 > max_width:
raise Exception(
'cannot fit keys and data within set max_width')
# divide into chuncks and join by same indentation
val_indent = ' ' * (initindent + key_length)
n = max_width - len(val_indent)
val_string = val_indent.join(
[s + '\n' for s in textwrap.wrap(val_string, n)])[:-1]
if i == 0:
print_func(key_line + val_string)
else:
print_func(new_line + val_string) | [
"def",
"pprint",
"(",
"d",
",",
"lvlindent",
"=",
"2",
",",
"initindent",
"=",
"0",
",",
"delim",
"=",
"':'",
",",
"max_width",
"=",
"80",
",",
"depth",
"=",
"3",
",",
"no_values",
"=",
"False",
",",
"align_vals",
"=",
"True",
",",
"print_func",
"=",
"None",
",",
"keycolor",
"=",
"None",
",",
"compress_lists",
"=",
"None",
",",
"round_floats",
"=",
"None",
",",
"_dlist",
"=",
"False",
")",
":",
"if",
"print_func",
"is",
"None",
":",
"print_func",
"=",
"_default_print_func",
"if",
"not",
"is_dict_like",
"(",
"d",
")",
":",
"d",
"=",
"{",
"''",
":",
"d",
"}",
"# print_func('{}'.format(d))",
"# return",
"extra",
"=",
"lvlindent",
"if",
"_dlist",
"else",
"0",
"def",
"decode_to_str",
"(",
"obj",
")",
":",
"val_string",
"=",
"obj",
"if",
"isinstance",
"(",
"obj",
",",
"list",
")",
":",
"if",
"compress_lists",
"is",
"not",
"None",
":",
"if",
"len",
"(",
"obj",
")",
">",
"compress_lists",
":",
"diff",
"=",
"str",
"(",
"len",
"(",
"obj",
")",
"-",
"compress_lists",
")",
"obj",
"=",
"obj",
"[",
":",
"compress_lists",
"]",
"+",
"[",
"'...(x{})'",
".",
"format",
"(",
"diff",
")",
"]",
"val_string",
"=",
"'['",
"+",
"', '",
".",
"join",
"(",
"[",
"decode_to_str",
"(",
"o",
")",
"for",
"o",
"in",
"obj",
"]",
")",
"+",
"']'",
"elif",
"isinstance",
"(",
"obj",
",",
"tuple",
")",
":",
"if",
"compress_lists",
"is",
"not",
"None",
":",
"if",
"len",
"(",
"obj",
")",
">",
"compress_lists",
":",
"diff",
"=",
"str",
"(",
"len",
"(",
"obj",
")",
"-",
"compress_lists",
")",
"obj",
"=",
"list",
"(",
"obj",
"[",
":",
"compress_lists",
"]",
")",
"+",
"[",
"'...(x{})'",
".",
"format",
"(",
"diff",
")",
"]",
"val_string",
"=",
"'('",
"+",
"', '",
".",
"join",
"(",
"[",
"decode_to_str",
"(",
"o",
")",
"for",
"o",
"in",
"obj",
"]",
")",
"+",
"')'",
"elif",
"isinstance",
"(",
"obj",
",",
"float",
")",
"and",
"round_floats",
"is",
"not",
"None",
":",
"round_str",
"=",
"'{0:.'",
"+",
"str",
"(",
"round_floats",
"-",
"1",
")",
"+",
"'E}'",
"val_string",
"=",
"str",
"(",
"float",
"(",
"round_str",
".",
"format",
"(",
"obj",
")",
")",
")",
"else",
":",
"try",
":",
"val_string",
"=",
"encode",
"(",
"obj",
",",
"outtype",
"=",
"'str'",
")",
"except",
"(",
"TypeError",
",",
"UnicodeError",
")",
":",
"pass",
"# convert unicode to str (so no u'' prefix in python 2)",
"try",
":",
"return",
"str",
"(",
"val_string",
")",
"except",
"Exception",
":",
"return",
"unicode",
"(",
"val_string",
")",
"if",
"align_vals",
":",
"key_width",
"=",
"0",
"for",
"key",
",",
"val",
"in",
"d",
".",
"items",
"(",
")",
":",
"if",
"not",
"is_dict_like",
"(",
"val",
")",
":",
"key_str",
"=",
"decode_to_str",
"(",
"key",
")",
"key_width",
"=",
"max",
"(",
"key_width",
",",
"len",
"(",
"key_str",
")",
")",
"max_depth",
"=",
"depth",
"for",
"i",
",",
"key",
"in",
"enumerate",
"(",
"natural_sort",
"(",
"d",
".",
"keys",
"(",
")",
")",
")",
":",
"value",
"=",
"d",
"[",
"key",
"]",
"if",
"_dlist",
"and",
"i",
"==",
"0",
":",
"key_str",
"=",
"'- '",
"+",
"decode_to_str",
"(",
"key",
")",
"elif",
"_dlist",
":",
"key_str",
"=",
"' '",
"+",
"decode_to_str",
"(",
"key",
")",
"else",
":",
"key_str",
"=",
"decode_to_str",
"(",
"key",
")",
"if",
"keycolor",
"is",
"not",
"None",
":",
"key_str",
"=",
"colortxt",
"(",
"key_str",
",",
"keycolor",
")",
"if",
"align_vals",
":",
"key_str",
"=",
"'{0: <{1}} '",
".",
"format",
"(",
"key_str",
"+",
"delim",
",",
"key_width",
"+",
"len",
"(",
"delim",
")",
")",
"else",
":",
"key_str",
"=",
"'{0}{1} '",
".",
"format",
"(",
"key_str",
",",
"delim",
")",
"depth",
"=",
"max_depth",
"if",
"max_depth",
"is",
"not",
"None",
"else",
"2",
"if",
"keycolor",
"is",
"not",
"None",
":",
"key_length",
"=",
"len",
"(",
"_strip_ansi",
"(",
"key_str",
")",
")",
"else",
":",
"key_length",
"=",
"len",
"(",
"key_str",
")",
"key_line",
"=",
"' '",
"*",
"initindent",
"+",
"key_str",
"new_line",
"=",
"' '",
"*",
"initindent",
"+",
"' '",
"*",
"key_length",
"if",
"depth",
"<=",
"0",
":",
"continue",
"if",
"is_dict_like",
"(",
"value",
")",
":",
"if",
"depth",
"<=",
"1",
":",
"print_func",
"(",
"' '",
"*",
"initindent",
"+",
"key_str",
"+",
"'{...}'",
")",
"else",
":",
"print_func",
"(",
"' '",
"*",
"initindent",
"+",
"key_str",
")",
"pprint",
"(",
"value",
",",
"lvlindent",
",",
"initindent",
"+",
"lvlindent",
"+",
"extra",
",",
"delim",
",",
"max_width",
",",
"depth",
"=",
"max_depth",
"-",
"1",
"if",
"max_depth",
"is",
"not",
"None",
"else",
"None",
",",
"no_values",
"=",
"no_values",
",",
"align_vals",
"=",
"align_vals",
",",
"print_func",
"=",
"print_func",
",",
"keycolor",
"=",
"keycolor",
",",
"compress_lists",
"=",
"compress_lists",
",",
"round_floats",
"=",
"round_floats",
")",
"continue",
"if",
"isinstance",
"(",
"value",
",",
"list",
")",
":",
"if",
"all",
"(",
"[",
"is_dict_like",
"(",
"o",
")",
"for",
"o",
"in",
"value",
"]",
")",
"and",
"value",
":",
"if",
"depth",
"<=",
"1",
":",
"print_func",
"(",
"key_line",
"+",
"'[...]'",
")",
"continue",
"print_func",
"(",
"key_line",
")",
"for",
"obj",
"in",
"value",
":",
"pprint",
"(",
"obj",
",",
"lvlindent",
",",
"initindent",
"+",
"lvlindent",
"+",
"extra",
",",
"delim",
",",
"max_width",
",",
"depth",
"=",
"max_depth",
"-",
"1",
"if",
"max_depth",
"is",
"not",
"None",
"else",
"None",
",",
"no_values",
"=",
"no_values",
",",
"align_vals",
"=",
"align_vals",
",",
"print_func",
"=",
"print_func",
",",
"keycolor",
"=",
"keycolor",
",",
"compress_lists",
"=",
"compress_lists",
",",
"round_floats",
"=",
"round_floats",
",",
"_dlist",
"=",
"True",
")",
"continue",
"val_string_all",
"=",
"decode_to_str",
"(",
"value",
")",
"if",
"not",
"no_values",
"else",
"''",
"for",
"i",
",",
"val_string",
"in",
"enumerate",
"(",
"val_string_all",
".",
"split",
"(",
"'\\n'",
")",
")",
":",
"if",
"max_width",
"is",
"not",
"None",
":",
"if",
"len",
"(",
"key_line",
")",
"+",
"1",
">",
"max_width",
":",
"raise",
"Exception",
"(",
"'cannot fit keys and data within set max_width'",
")",
"# divide into chuncks and join by same indentation",
"val_indent",
"=",
"' '",
"*",
"(",
"initindent",
"+",
"key_length",
")",
"n",
"=",
"max_width",
"-",
"len",
"(",
"val_indent",
")",
"val_string",
"=",
"val_indent",
".",
"join",
"(",
"[",
"s",
"+",
"'\\n'",
"for",
"s",
"in",
"textwrap",
".",
"wrap",
"(",
"val_string",
",",
"n",
")",
"]",
")",
"[",
":",
"-",
"1",
"]",
"if",
"i",
"==",
"0",
":",
"print_func",
"(",
"key_line",
"+",
"val_string",
")",
"else",
":",
"print_func",
"(",
"new_line",
"+",
"val_string",
")"
] | print a nested dict in readable format
(- denotes an element in a list of dictionaries)
Parameters
----------
d : object
lvlindent : int
additional indentation spaces for each level
initindent : int
initial indentation spaces
delim : str
delimiter between key and value nodes
max_width : int
max character width of each line
depth : int or None
maximum levels to display
no_values : bool
whether to print values
align_vals : bool
whether to align values for each level
print_func : callable or None
function to print strings (print if None)
keycolor : None or str
if str, color keys by this color,
allowed: red, green, yellow, blue, magenta, cyan, white
compress_lists : int
compress lists/tuples longer than this,
e.g. [1,1,1,1,1,1] -> [1, 1,..., 1]
round_floats : int
significant figures for floats
Examples
--------
>>> d = {'a':{'b':{'c':'Å','de':[4,5,[7,'x'],9]}}}
>>> pprint(d,depth=None)
a:
b:
c: Å
de: [4, 5, [7, x], 9]
>>> pprint(d,max_width=17,depth=None)
a:
b:
c: Å
de: [4, 5,
[7, x],
9]
>>> pprint(d,no_values=True,depth=None)
a:
b:
c:
de:
>>> pprint(d,depth=2)
a:
b: {...}
>>> pprint({'a':[1,1,1,1,1,1,1,1]},
... compress_lists=3)
a: [1, 1, 1, ...(x5)] | [
"print",
"a",
"nested",
"dict",
"in",
"readable",
"format",
"(",
"-",
"denotes",
"an",
"element",
"in",
"a",
"list",
"of",
"dictionaries",
")"
] | train | https://github.com/chrisjsewell/jsonextended/blob/c3a7a880cc09789b3c61204265dcbb127be76c8a/jsonextended/edict.py#L178-L364 |
chrisjsewell/jsonextended | jsonextended/edict.py | extract | def extract(d, path=None):
""" extract section of dictionary
Parameters
----------
d : dict
path : list[str]
keys to section
Returns
-------
new_dict : dict
original, without extracted section
extract_dict : dict
extracted section
Examples
--------
>>> from pprint import pprint
>>> d = {1:{"a":"A"},2:{"b":"B",'c':'C'}}
>>> pprint(extract(d,[2,'b']))
({1: {'a': 'A'}, 2: {'c': 'C'}}, {'b': 'B'})
"""
path = [] if path is None else path
d_new = copy.deepcopy(d)
d_sub = d_new
for key in path[:-1]:
d_sub = d_sub[key]
key = path[-1]
d_extract = {key: d_sub[key]}
d_sub.pop(key)
return d_new, d_extract | python | def extract(d, path=None):
""" extract section of dictionary
Parameters
----------
d : dict
path : list[str]
keys to section
Returns
-------
new_dict : dict
original, without extracted section
extract_dict : dict
extracted section
Examples
--------
>>> from pprint import pprint
>>> d = {1:{"a":"A"},2:{"b":"B",'c':'C'}}
>>> pprint(extract(d,[2,'b']))
({1: {'a': 'A'}, 2: {'c': 'C'}}, {'b': 'B'})
"""
path = [] if path is None else path
d_new = copy.deepcopy(d)
d_sub = d_new
for key in path[:-1]:
d_sub = d_sub[key]
key = path[-1]
d_extract = {key: d_sub[key]}
d_sub.pop(key)
return d_new, d_extract | [
"def",
"extract",
"(",
"d",
",",
"path",
"=",
"None",
")",
":",
"path",
"=",
"[",
"]",
"if",
"path",
"is",
"None",
"else",
"path",
"d_new",
"=",
"copy",
".",
"deepcopy",
"(",
"d",
")",
"d_sub",
"=",
"d_new",
"for",
"key",
"in",
"path",
"[",
":",
"-",
"1",
"]",
":",
"d_sub",
"=",
"d_sub",
"[",
"key",
"]",
"key",
"=",
"path",
"[",
"-",
"1",
"]",
"d_extract",
"=",
"{",
"key",
":",
"d_sub",
"[",
"key",
"]",
"}",
"d_sub",
".",
"pop",
"(",
"key",
")",
"return",
"d_new",
",",
"d_extract"
] | extract section of dictionary
Parameters
----------
d : dict
path : list[str]
keys to section
Returns
-------
new_dict : dict
original, without extracted section
extract_dict : dict
extracted section
Examples
--------
>>> from pprint import pprint
>>> d = {1:{"a":"A"},2:{"b":"B",'c':'C'}}
>>> pprint(extract(d,[2,'b']))
({1: {'a': 'A'}, 2: {'c': 'C'}}, {'b': 'B'}) | [
"extract",
"section",
"of",
"dictionary"
] | train | https://github.com/chrisjsewell/jsonextended/blob/c3a7a880cc09789b3c61204265dcbb127be76c8a/jsonextended/edict.py#L367-L403 |
chrisjsewell/jsonextended | jsonextended/edict.py | indexes | def indexes(dic, keys=None):
""" index dictionary by multiple keys
Parameters
----------
dic : dict
keys : list
Examples
--------
>>> d = {1:{"a":"A"},2:{"b":"B"}}
>>> indexes(d,[1,'a'])
'A'
"""
keys = [] if keys is None else keys
assert hasattr(dic, 'keys')
new = dic.copy()
old_key = None
for key in keys:
if not hasattr(new, 'keys'):
raise KeyError('No indexes after: {}'.format(old_key))
old_key = key
new = new[key]
return new | python | def indexes(dic, keys=None):
""" index dictionary by multiple keys
Parameters
----------
dic : dict
keys : list
Examples
--------
>>> d = {1:{"a":"A"},2:{"b":"B"}}
>>> indexes(d,[1,'a'])
'A'
"""
keys = [] if keys is None else keys
assert hasattr(dic, 'keys')
new = dic.copy()
old_key = None
for key in keys:
if not hasattr(new, 'keys'):
raise KeyError('No indexes after: {}'.format(old_key))
old_key = key
new = new[key]
return new | [
"def",
"indexes",
"(",
"dic",
",",
"keys",
"=",
"None",
")",
":",
"keys",
"=",
"[",
"]",
"if",
"keys",
"is",
"None",
"else",
"keys",
"assert",
"hasattr",
"(",
"dic",
",",
"'keys'",
")",
"new",
"=",
"dic",
".",
"copy",
"(",
")",
"old_key",
"=",
"None",
"for",
"key",
"in",
"keys",
":",
"if",
"not",
"hasattr",
"(",
"new",
",",
"'keys'",
")",
":",
"raise",
"KeyError",
"(",
"'No indexes after: {}'",
".",
"format",
"(",
"old_key",
")",
")",
"old_key",
"=",
"key",
"new",
"=",
"new",
"[",
"key",
"]",
"return",
"new"
] | index dictionary by multiple keys
Parameters
----------
dic : dict
keys : list
Examples
--------
>>> d = {1:{"a":"A"},2:{"b":"B"}}
>>> indexes(d,[1,'a'])
'A' | [
"index",
"dictionary",
"by",
"multiple",
"keys"
] | train | https://github.com/chrisjsewell/jsonextended/blob/c3a7a880cc09789b3c61204265dcbb127be76c8a/jsonextended/edict.py#L406-L432 |
chrisjsewell/jsonextended | jsonextended/edict.py | flatten | def flatten(d, key_as_tuple=True, sep='.', list_of_dicts=None, all_iters=None):
""" get nested dict as flat {key:val,...},
where key is tuple/string of all nested keys
Parameters
----------
d : object
key_as_tuple : bool
whether keys are list of nested keys or delimited string of nested keys
sep : str
if key_as_tuple=False, delimiter for keys
list_of_dicts: str or None
if not None, flatten lists of dicts using this prefix
all_iters: str or None
if not None, flatten all lists and tuples using this prefix
Examples
--------
>>> from pprint import pprint
>>> d = {1:{"a":"A"}, 2:{"b":"B"}}
>>> pprint(flatten(d))
{(1, 'a'): 'A', (2, 'b'): 'B'}
>>> d = {1:{"a":"A"},2:{"b":"B"}}
>>> pprint(flatten(d,key_as_tuple=False))
{'1.a': 'A', '2.b': 'B'}
>>> d = [{'a':1},{'b':[1, 2]}]
>>> pprint(flatten(d,list_of_dicts='__list__'))
{('__list__0', 'a'): 1, ('__list__1', 'b'): [1, 2]}
>>> d = [{'a':1},{'b':[1, 2]}]
>>> pprint(flatten(d,all_iters='__iter__'))
{('__iter__0', 'a'): 1,
('__iter__1', 'b', '__iter__0'): 1,
('__iter__1', 'b', '__iter__1'): 2}
"""
def expand(key, value):
if is_dict_like(value):
flatten_dict = flatten(value, key_as_tuple, sep,
list_of_dicts, all_iters)
if key_as_tuple:
return [(key + k, v) for k, v in flatten_dict.items()]
else:
return [(str(key) + sep + k, v)
for k, v in flatten_dict.items()]
elif is_iter_non_string(value) and all_iters is not None:
value = {'{0}{1}'.format(all_iters, i): v
for i, v in enumerate(value)}
flatten_dict = flatten(value, key_as_tuple, sep,
list_of_dicts, all_iters)
if key_as_tuple:
return [(key + k, v) for k, v in flatten_dict.items()]
else:
return [(str(key) + sep + k, v)
for k, v in flatten_dict.items()]
elif is_list_of_dict_like(value) and list_of_dicts is not None:
value = {'{0}{1}'.format(list_of_dicts, i): v
for i, v in enumerate(value)}
flatten_dict = flatten(value, key_as_tuple, sep,
list_of_dicts, all_iters)
if key_as_tuple:
return [(key + k, v) for k, v in flatten_dict.items()]
else:
return [(str(key) + sep + k, v)
for k, v in flatten_dict.items()]
else:
return [(key, value)]
if is_iter_non_string(d) and all_iters is not None:
d = {'{0}{1}'.format(all_iters, i): v for i, v in enumerate(d)}
elif is_list_of_dict_like(d) and list_of_dicts is not None:
d = {'{0}{1}'.format(list_of_dicts, i): v for i, v in enumerate(d)}
elif not is_dict_like(d):
raise TypeError('d is not dict like: {}'.format(d))
if key_as_tuple:
items = [item for k, v in d.items() for item in expand((k,), v)]
else:
items = [item for k, v in d.items() for item in expand(k, v)]
return dict(items) | python | def flatten(d, key_as_tuple=True, sep='.', list_of_dicts=None, all_iters=None):
""" get nested dict as flat {key:val,...},
where key is tuple/string of all nested keys
Parameters
----------
d : object
key_as_tuple : bool
whether keys are list of nested keys or delimited string of nested keys
sep : str
if key_as_tuple=False, delimiter for keys
list_of_dicts: str or None
if not None, flatten lists of dicts using this prefix
all_iters: str or None
if not None, flatten all lists and tuples using this prefix
Examples
--------
>>> from pprint import pprint
>>> d = {1:{"a":"A"}, 2:{"b":"B"}}
>>> pprint(flatten(d))
{(1, 'a'): 'A', (2, 'b'): 'B'}
>>> d = {1:{"a":"A"},2:{"b":"B"}}
>>> pprint(flatten(d,key_as_tuple=False))
{'1.a': 'A', '2.b': 'B'}
>>> d = [{'a':1},{'b':[1, 2]}]
>>> pprint(flatten(d,list_of_dicts='__list__'))
{('__list__0', 'a'): 1, ('__list__1', 'b'): [1, 2]}
>>> d = [{'a':1},{'b':[1, 2]}]
>>> pprint(flatten(d,all_iters='__iter__'))
{('__iter__0', 'a'): 1,
('__iter__1', 'b', '__iter__0'): 1,
('__iter__1', 'b', '__iter__1'): 2}
"""
def expand(key, value):
if is_dict_like(value):
flatten_dict = flatten(value, key_as_tuple, sep,
list_of_dicts, all_iters)
if key_as_tuple:
return [(key + k, v) for k, v in flatten_dict.items()]
else:
return [(str(key) + sep + k, v)
for k, v in flatten_dict.items()]
elif is_iter_non_string(value) and all_iters is not None:
value = {'{0}{1}'.format(all_iters, i): v
for i, v in enumerate(value)}
flatten_dict = flatten(value, key_as_tuple, sep,
list_of_dicts, all_iters)
if key_as_tuple:
return [(key + k, v) for k, v in flatten_dict.items()]
else:
return [(str(key) + sep + k, v)
for k, v in flatten_dict.items()]
elif is_list_of_dict_like(value) and list_of_dicts is not None:
value = {'{0}{1}'.format(list_of_dicts, i): v
for i, v in enumerate(value)}
flatten_dict = flatten(value, key_as_tuple, sep,
list_of_dicts, all_iters)
if key_as_tuple:
return [(key + k, v) for k, v in flatten_dict.items()]
else:
return [(str(key) + sep + k, v)
for k, v in flatten_dict.items()]
else:
return [(key, value)]
if is_iter_non_string(d) and all_iters is not None:
d = {'{0}{1}'.format(all_iters, i): v for i, v in enumerate(d)}
elif is_list_of_dict_like(d) and list_of_dicts is not None:
d = {'{0}{1}'.format(list_of_dicts, i): v for i, v in enumerate(d)}
elif not is_dict_like(d):
raise TypeError('d is not dict like: {}'.format(d))
if key_as_tuple:
items = [item for k, v in d.items() for item in expand((k,), v)]
else:
items = [item for k, v in d.items() for item in expand(k, v)]
return dict(items) | [
"def",
"flatten",
"(",
"d",
",",
"key_as_tuple",
"=",
"True",
",",
"sep",
"=",
"'.'",
",",
"list_of_dicts",
"=",
"None",
",",
"all_iters",
"=",
"None",
")",
":",
"def",
"expand",
"(",
"key",
",",
"value",
")",
":",
"if",
"is_dict_like",
"(",
"value",
")",
":",
"flatten_dict",
"=",
"flatten",
"(",
"value",
",",
"key_as_tuple",
",",
"sep",
",",
"list_of_dicts",
",",
"all_iters",
")",
"if",
"key_as_tuple",
":",
"return",
"[",
"(",
"key",
"+",
"k",
",",
"v",
")",
"for",
"k",
",",
"v",
"in",
"flatten_dict",
".",
"items",
"(",
")",
"]",
"else",
":",
"return",
"[",
"(",
"str",
"(",
"key",
")",
"+",
"sep",
"+",
"k",
",",
"v",
")",
"for",
"k",
",",
"v",
"in",
"flatten_dict",
".",
"items",
"(",
")",
"]",
"elif",
"is_iter_non_string",
"(",
"value",
")",
"and",
"all_iters",
"is",
"not",
"None",
":",
"value",
"=",
"{",
"'{0}{1}'",
".",
"format",
"(",
"all_iters",
",",
"i",
")",
":",
"v",
"for",
"i",
",",
"v",
"in",
"enumerate",
"(",
"value",
")",
"}",
"flatten_dict",
"=",
"flatten",
"(",
"value",
",",
"key_as_tuple",
",",
"sep",
",",
"list_of_dicts",
",",
"all_iters",
")",
"if",
"key_as_tuple",
":",
"return",
"[",
"(",
"key",
"+",
"k",
",",
"v",
")",
"for",
"k",
",",
"v",
"in",
"flatten_dict",
".",
"items",
"(",
")",
"]",
"else",
":",
"return",
"[",
"(",
"str",
"(",
"key",
")",
"+",
"sep",
"+",
"k",
",",
"v",
")",
"for",
"k",
",",
"v",
"in",
"flatten_dict",
".",
"items",
"(",
")",
"]",
"elif",
"is_list_of_dict_like",
"(",
"value",
")",
"and",
"list_of_dicts",
"is",
"not",
"None",
":",
"value",
"=",
"{",
"'{0}{1}'",
".",
"format",
"(",
"list_of_dicts",
",",
"i",
")",
":",
"v",
"for",
"i",
",",
"v",
"in",
"enumerate",
"(",
"value",
")",
"}",
"flatten_dict",
"=",
"flatten",
"(",
"value",
",",
"key_as_tuple",
",",
"sep",
",",
"list_of_dicts",
",",
"all_iters",
")",
"if",
"key_as_tuple",
":",
"return",
"[",
"(",
"key",
"+",
"k",
",",
"v",
")",
"for",
"k",
",",
"v",
"in",
"flatten_dict",
".",
"items",
"(",
")",
"]",
"else",
":",
"return",
"[",
"(",
"str",
"(",
"key",
")",
"+",
"sep",
"+",
"k",
",",
"v",
")",
"for",
"k",
",",
"v",
"in",
"flatten_dict",
".",
"items",
"(",
")",
"]",
"else",
":",
"return",
"[",
"(",
"key",
",",
"value",
")",
"]",
"if",
"is_iter_non_string",
"(",
"d",
")",
"and",
"all_iters",
"is",
"not",
"None",
":",
"d",
"=",
"{",
"'{0}{1}'",
".",
"format",
"(",
"all_iters",
",",
"i",
")",
":",
"v",
"for",
"i",
",",
"v",
"in",
"enumerate",
"(",
"d",
")",
"}",
"elif",
"is_list_of_dict_like",
"(",
"d",
")",
"and",
"list_of_dicts",
"is",
"not",
"None",
":",
"d",
"=",
"{",
"'{0}{1}'",
".",
"format",
"(",
"list_of_dicts",
",",
"i",
")",
":",
"v",
"for",
"i",
",",
"v",
"in",
"enumerate",
"(",
"d",
")",
"}",
"elif",
"not",
"is_dict_like",
"(",
"d",
")",
":",
"raise",
"TypeError",
"(",
"'d is not dict like: {}'",
".",
"format",
"(",
"d",
")",
")",
"if",
"key_as_tuple",
":",
"items",
"=",
"[",
"item",
"for",
"k",
",",
"v",
"in",
"d",
".",
"items",
"(",
")",
"for",
"item",
"in",
"expand",
"(",
"(",
"k",
",",
")",
",",
"v",
")",
"]",
"else",
":",
"items",
"=",
"[",
"item",
"for",
"k",
",",
"v",
"in",
"d",
".",
"items",
"(",
")",
"for",
"item",
"in",
"expand",
"(",
"k",
",",
"v",
")",
"]",
"return",
"dict",
"(",
"items",
")"
] | get nested dict as flat {key:val,...},
where key is tuple/string of all nested keys
Parameters
----------
d : object
key_as_tuple : bool
whether keys are list of nested keys or delimited string of nested keys
sep : str
if key_as_tuple=False, delimiter for keys
list_of_dicts: str or None
if not None, flatten lists of dicts using this prefix
all_iters: str or None
if not None, flatten all lists and tuples using this prefix
Examples
--------
>>> from pprint import pprint
>>> d = {1:{"a":"A"}, 2:{"b":"B"}}
>>> pprint(flatten(d))
{(1, 'a'): 'A', (2, 'b'): 'B'}
>>> d = {1:{"a":"A"},2:{"b":"B"}}
>>> pprint(flatten(d,key_as_tuple=False))
{'1.a': 'A', '2.b': 'B'}
>>> d = [{'a':1},{'b':[1, 2]}]
>>> pprint(flatten(d,list_of_dicts='__list__'))
{('__list__0', 'a'): 1, ('__list__1', 'b'): [1, 2]}
>>> d = [{'a':1},{'b':[1, 2]}]
>>> pprint(flatten(d,all_iters='__iter__'))
{('__iter__0', 'a'): 1,
('__iter__1', 'b', '__iter__0'): 1,
('__iter__1', 'b', '__iter__1'): 2} | [
"get",
"nested",
"dict",
"as",
"flat",
"{",
"key",
":",
"val",
"...",
"}",
"where",
"key",
"is",
"tuple",
"/",
"string",
"of",
"all",
"nested",
"keys"
] | train | https://github.com/chrisjsewell/jsonextended/blob/c3a7a880cc09789b3c61204265dcbb127be76c8a/jsonextended/edict.py#L435-L520 |
chrisjsewell/jsonextended | jsonextended/edict.py | unflatten | def unflatten(d, key_as_tuple=True, delim='.',
list_of_dicts=None, deepcopy=True):
r""" unflatten dictionary with keys as tuples or delimited strings
Parameters
----------
d : dict
key_as_tuple : bool
if true, keys are tuples, else, keys are delimited strings
delim : str
if keys are strings, then split by delim
list_of_dicts: str or None
if key starts with this treat as a list
Examples
--------
>>> from pprint import pprint
>>> d = {('a','b'):1,('a','c'):2}
>>> pprint(unflatten(d))
{'a': {'b': 1, 'c': 2}}
>>> d2 = {'a.b':1,'a.c':2}
>>> pprint(unflatten(d2,key_as_tuple=False))
{'a': {'b': 1, 'c': 2}}
>>> d3 = {('a','__list__1', 'a'): 1, ('a','__list__0', 'b'): 2}
>>> pprint(unflatten(d3,list_of_dicts='__list__'))
{'a': [{'b': 2}, {'a': 1}]}
>>> unflatten({('a','b','c'):1,('a','b'):2})
Traceback (most recent call last):
...
KeyError: "child conflict for path: ('a', 'b'); 2 and {'c': 1}"
"""
if not d:
return d
if deepcopy:
try:
d = copy.deepcopy(d)
except Exception:
warnings.warn(
'error in deepcopy, so using references to input dict')
if key_as_tuple:
result = d.pop(()) if () in d else {}
else:
result = d.pop('') if '' in d else {}
for key, value in d.items():
if not isinstance(key, tuple) and key_as_tuple:
raise ValueError(
'key not tuple and key_as_tuple set to True: {}'.format(key))
elif not isinstance(key, basestring) and not key_as_tuple:
raise ValueError(
'key not string and key_as_tuple set to False: {}'.format(key))
elif isinstance(key, basestring) and not key_as_tuple:
parts = key.split(delim)
else:
parts = key
d = result
for part in parts[:-1]:
if part not in d:
d[part] = {}
d = d[part]
if not is_dict_like(d):
v1, v2 = sorted([str(d), str({parts[-1]: value})])
raise KeyError("child conflict for path: "
"{0}; {1} and {2}".format(parts[:-1], v1, v2))
elif parts[-1] in d:
try:
value = merge([d[parts[-1]], value])
except Exception:
v1, v2 = sorted([str(value), str(d[parts[-1]])])
raise KeyError("child conflict for path: "
"{0}; {1} and {2}".format(parts, v1, v2))
d[parts[-1]] = value
if list_of_dicts is not None:
result = _recreate_lists(result, list_of_dicts)
# if is_dict_like(result):
# if all([str(k).startswith(list_of_dicts) for k in result.keys()]):
# result = [result[k] for k in sorted(list(result.keys()),
# key=lambda x: int(x.replace(list_of_dicts, '')))]
return result | python | def unflatten(d, key_as_tuple=True, delim='.',
list_of_dicts=None, deepcopy=True):
r""" unflatten dictionary with keys as tuples or delimited strings
Parameters
----------
d : dict
key_as_tuple : bool
if true, keys are tuples, else, keys are delimited strings
delim : str
if keys are strings, then split by delim
list_of_dicts: str or None
if key starts with this treat as a list
Examples
--------
>>> from pprint import pprint
>>> d = {('a','b'):1,('a','c'):2}
>>> pprint(unflatten(d))
{'a': {'b': 1, 'c': 2}}
>>> d2 = {'a.b':1,'a.c':2}
>>> pprint(unflatten(d2,key_as_tuple=False))
{'a': {'b': 1, 'c': 2}}
>>> d3 = {('a','__list__1', 'a'): 1, ('a','__list__0', 'b'): 2}
>>> pprint(unflatten(d3,list_of_dicts='__list__'))
{'a': [{'b': 2}, {'a': 1}]}
>>> unflatten({('a','b','c'):1,('a','b'):2})
Traceback (most recent call last):
...
KeyError: "child conflict for path: ('a', 'b'); 2 and {'c': 1}"
"""
if not d:
return d
if deepcopy:
try:
d = copy.deepcopy(d)
except Exception:
warnings.warn(
'error in deepcopy, so using references to input dict')
if key_as_tuple:
result = d.pop(()) if () in d else {}
else:
result = d.pop('') if '' in d else {}
for key, value in d.items():
if not isinstance(key, tuple) and key_as_tuple:
raise ValueError(
'key not tuple and key_as_tuple set to True: {}'.format(key))
elif not isinstance(key, basestring) and not key_as_tuple:
raise ValueError(
'key not string and key_as_tuple set to False: {}'.format(key))
elif isinstance(key, basestring) and not key_as_tuple:
parts = key.split(delim)
else:
parts = key
d = result
for part in parts[:-1]:
if part not in d:
d[part] = {}
d = d[part]
if not is_dict_like(d):
v1, v2 = sorted([str(d), str({parts[-1]: value})])
raise KeyError("child conflict for path: "
"{0}; {1} and {2}".format(parts[:-1], v1, v2))
elif parts[-1] in d:
try:
value = merge([d[parts[-1]], value])
except Exception:
v1, v2 = sorted([str(value), str(d[parts[-1]])])
raise KeyError("child conflict for path: "
"{0}; {1} and {2}".format(parts, v1, v2))
d[parts[-1]] = value
if list_of_dicts is not None:
result = _recreate_lists(result, list_of_dicts)
# if is_dict_like(result):
# if all([str(k).startswith(list_of_dicts) for k in result.keys()]):
# result = [result[k] for k in sorted(list(result.keys()),
# key=lambda x: int(x.replace(list_of_dicts, '')))]
return result | [
"def",
"unflatten",
"(",
"d",
",",
"key_as_tuple",
"=",
"True",
",",
"delim",
"=",
"'.'",
",",
"list_of_dicts",
"=",
"None",
",",
"deepcopy",
"=",
"True",
")",
":",
"if",
"not",
"d",
":",
"return",
"d",
"if",
"deepcopy",
":",
"try",
":",
"d",
"=",
"copy",
".",
"deepcopy",
"(",
"d",
")",
"except",
"Exception",
":",
"warnings",
".",
"warn",
"(",
"'error in deepcopy, so using references to input dict'",
")",
"if",
"key_as_tuple",
":",
"result",
"=",
"d",
".",
"pop",
"(",
"(",
")",
")",
"if",
"(",
")",
"in",
"d",
"else",
"{",
"}",
"else",
":",
"result",
"=",
"d",
".",
"pop",
"(",
"''",
")",
"if",
"''",
"in",
"d",
"else",
"{",
"}",
"for",
"key",
",",
"value",
"in",
"d",
".",
"items",
"(",
")",
":",
"if",
"not",
"isinstance",
"(",
"key",
",",
"tuple",
")",
"and",
"key_as_tuple",
":",
"raise",
"ValueError",
"(",
"'key not tuple and key_as_tuple set to True: {}'",
".",
"format",
"(",
"key",
")",
")",
"elif",
"not",
"isinstance",
"(",
"key",
",",
"basestring",
")",
"and",
"not",
"key_as_tuple",
":",
"raise",
"ValueError",
"(",
"'key not string and key_as_tuple set to False: {}'",
".",
"format",
"(",
"key",
")",
")",
"elif",
"isinstance",
"(",
"key",
",",
"basestring",
")",
"and",
"not",
"key_as_tuple",
":",
"parts",
"=",
"key",
".",
"split",
"(",
"delim",
")",
"else",
":",
"parts",
"=",
"key",
"d",
"=",
"result",
"for",
"part",
"in",
"parts",
"[",
":",
"-",
"1",
"]",
":",
"if",
"part",
"not",
"in",
"d",
":",
"d",
"[",
"part",
"]",
"=",
"{",
"}",
"d",
"=",
"d",
"[",
"part",
"]",
"if",
"not",
"is_dict_like",
"(",
"d",
")",
":",
"v1",
",",
"v2",
"=",
"sorted",
"(",
"[",
"str",
"(",
"d",
")",
",",
"str",
"(",
"{",
"parts",
"[",
"-",
"1",
"]",
":",
"value",
"}",
")",
"]",
")",
"raise",
"KeyError",
"(",
"\"child conflict for path: \"",
"\"{0}; {1} and {2}\"",
".",
"format",
"(",
"parts",
"[",
":",
"-",
"1",
"]",
",",
"v1",
",",
"v2",
")",
")",
"elif",
"parts",
"[",
"-",
"1",
"]",
"in",
"d",
":",
"try",
":",
"value",
"=",
"merge",
"(",
"[",
"d",
"[",
"parts",
"[",
"-",
"1",
"]",
"]",
",",
"value",
"]",
")",
"except",
"Exception",
":",
"v1",
",",
"v2",
"=",
"sorted",
"(",
"[",
"str",
"(",
"value",
")",
",",
"str",
"(",
"d",
"[",
"parts",
"[",
"-",
"1",
"]",
"]",
")",
"]",
")",
"raise",
"KeyError",
"(",
"\"child conflict for path: \"",
"\"{0}; {1} and {2}\"",
".",
"format",
"(",
"parts",
",",
"v1",
",",
"v2",
")",
")",
"d",
"[",
"parts",
"[",
"-",
"1",
"]",
"]",
"=",
"value",
"if",
"list_of_dicts",
"is",
"not",
"None",
":",
"result",
"=",
"_recreate_lists",
"(",
"result",
",",
"list_of_dicts",
")",
"# if is_dict_like(result):",
"# if all([str(k).startswith(list_of_dicts) for k in result.keys()]):",
"# result = [result[k] for k in sorted(list(result.keys()),",
"# key=lambda x: int(x.replace(list_of_dicts, '')))]",
"return",
"result"
] | r""" unflatten dictionary with keys as tuples or delimited strings
Parameters
----------
d : dict
key_as_tuple : bool
if true, keys are tuples, else, keys are delimited strings
delim : str
if keys are strings, then split by delim
list_of_dicts: str or None
if key starts with this treat as a list
Examples
--------
>>> from pprint import pprint
>>> d = {('a','b'):1,('a','c'):2}
>>> pprint(unflatten(d))
{'a': {'b': 1, 'c': 2}}
>>> d2 = {'a.b':1,'a.c':2}
>>> pprint(unflatten(d2,key_as_tuple=False))
{'a': {'b': 1, 'c': 2}}
>>> d3 = {('a','__list__1', 'a'): 1, ('a','__list__0', 'b'): 2}
>>> pprint(unflatten(d3,list_of_dicts='__list__'))
{'a': [{'b': 2}, {'a': 1}]}
>>> unflatten({('a','b','c'):1,('a','b'):2})
Traceback (most recent call last):
...
KeyError: "child conflict for path: ('a', 'b'); 2 and {'c': 1}" | [
"r",
"unflatten",
"dictionary",
"with",
"keys",
"as",
"tuples",
"or",
"delimited",
"strings"
] | train | https://github.com/chrisjsewell/jsonextended/blob/c3a7a880cc09789b3c61204265dcbb127be76c8a/jsonextended/edict.py#L543-L634 |
chrisjsewell/jsonextended | jsonextended/edict.py | _single_merge | def _single_merge(a, b, error_path=None, overwrite=False,
append=False, list_of_dicts=False):
"""merges b into a
"""
if error_path is None:
error_path = []
if list_of_dicts and is_list_of_dict_like(a) and is_list_of_dict_like(b):
if len(a) != len(b):
raise ValueError(
'list of dicts are of different lengths at '
'"{0}": old: {1}, new: {2}'.format('.'.join(error_path), a, b))
return [_single_merge(a_item, b_item,
error_path + ["iter_{}".format(i)],
overwrite, append, list_of_dicts)
for i, (a_item, b_item) in enumerate(zip(a, b))]
for key in b:
if key in a:
if is_dict_like(a[key]) and is_dict_like(b[key]):
_single_merge(a[key], b[key], error_path +
[str(key)], overwrite, append, list_of_dicts)
elif (isinstance(a[key], list)
and isinstance(b[key], list) and append):
a[key] += b[key]
elif (list_of_dicts
and is_list_of_dict_like(a[key])
and is_list_of_dict_like(b[key])):
if len(a[key]) != len(b[key]):
raise ValueError(
'list of dicts are of different lengths at '
'"{0}": old: {1}, new: {2}'.format(
'.'.join(error_path + [str(key)]), a[key], b[key]))
for i, (a_item, b_item) in enumerate(zip(a[key], b[key])):
_single_merge(a_item, b_item,
error_path + [str(key), "iter_{}".format(i)],
overwrite, append, list_of_dicts)
elif a[key] == b[key]:
pass # same leaf value
elif overwrite:
a[key] = b[key]
else:
raise ValueError(
'different data already exists at '
'"{0}": old: {1}, new: {2}'.format(
'.'.join(error_path + [str(key)]), a[key], b[key]))
else:
a[key] = b[key]
return a | python | def _single_merge(a, b, error_path=None, overwrite=False,
append=False, list_of_dicts=False):
"""merges b into a
"""
if error_path is None:
error_path = []
if list_of_dicts and is_list_of_dict_like(a) and is_list_of_dict_like(b):
if len(a) != len(b):
raise ValueError(
'list of dicts are of different lengths at '
'"{0}": old: {1}, new: {2}'.format('.'.join(error_path), a, b))
return [_single_merge(a_item, b_item,
error_path + ["iter_{}".format(i)],
overwrite, append, list_of_dicts)
for i, (a_item, b_item) in enumerate(zip(a, b))]
for key in b:
if key in a:
if is_dict_like(a[key]) and is_dict_like(b[key]):
_single_merge(a[key], b[key], error_path +
[str(key)], overwrite, append, list_of_dicts)
elif (isinstance(a[key], list)
and isinstance(b[key], list) and append):
a[key] += b[key]
elif (list_of_dicts
and is_list_of_dict_like(a[key])
and is_list_of_dict_like(b[key])):
if len(a[key]) != len(b[key]):
raise ValueError(
'list of dicts are of different lengths at '
'"{0}": old: {1}, new: {2}'.format(
'.'.join(error_path + [str(key)]), a[key], b[key]))
for i, (a_item, b_item) in enumerate(zip(a[key], b[key])):
_single_merge(a_item, b_item,
error_path + [str(key), "iter_{}".format(i)],
overwrite, append, list_of_dicts)
elif a[key] == b[key]:
pass # same leaf value
elif overwrite:
a[key] = b[key]
else:
raise ValueError(
'different data already exists at '
'"{0}": old: {1}, new: {2}'.format(
'.'.join(error_path + [str(key)]), a[key], b[key]))
else:
a[key] = b[key]
return a | [
"def",
"_single_merge",
"(",
"a",
",",
"b",
",",
"error_path",
"=",
"None",
",",
"overwrite",
"=",
"False",
",",
"append",
"=",
"False",
",",
"list_of_dicts",
"=",
"False",
")",
":",
"if",
"error_path",
"is",
"None",
":",
"error_path",
"=",
"[",
"]",
"if",
"list_of_dicts",
"and",
"is_list_of_dict_like",
"(",
"a",
")",
"and",
"is_list_of_dict_like",
"(",
"b",
")",
":",
"if",
"len",
"(",
"a",
")",
"!=",
"len",
"(",
"b",
")",
":",
"raise",
"ValueError",
"(",
"'list of dicts are of different lengths at '",
"'\"{0}\": old: {1}, new: {2}'",
".",
"format",
"(",
"'.'",
".",
"join",
"(",
"error_path",
")",
",",
"a",
",",
"b",
")",
")",
"return",
"[",
"_single_merge",
"(",
"a_item",
",",
"b_item",
",",
"error_path",
"+",
"[",
"\"iter_{}\"",
".",
"format",
"(",
"i",
")",
"]",
",",
"overwrite",
",",
"append",
",",
"list_of_dicts",
")",
"for",
"i",
",",
"(",
"a_item",
",",
"b_item",
")",
"in",
"enumerate",
"(",
"zip",
"(",
"a",
",",
"b",
")",
")",
"]",
"for",
"key",
"in",
"b",
":",
"if",
"key",
"in",
"a",
":",
"if",
"is_dict_like",
"(",
"a",
"[",
"key",
"]",
")",
"and",
"is_dict_like",
"(",
"b",
"[",
"key",
"]",
")",
":",
"_single_merge",
"(",
"a",
"[",
"key",
"]",
",",
"b",
"[",
"key",
"]",
",",
"error_path",
"+",
"[",
"str",
"(",
"key",
")",
"]",
",",
"overwrite",
",",
"append",
",",
"list_of_dicts",
")",
"elif",
"(",
"isinstance",
"(",
"a",
"[",
"key",
"]",
",",
"list",
")",
"and",
"isinstance",
"(",
"b",
"[",
"key",
"]",
",",
"list",
")",
"and",
"append",
")",
":",
"a",
"[",
"key",
"]",
"+=",
"b",
"[",
"key",
"]",
"elif",
"(",
"list_of_dicts",
"and",
"is_list_of_dict_like",
"(",
"a",
"[",
"key",
"]",
")",
"and",
"is_list_of_dict_like",
"(",
"b",
"[",
"key",
"]",
")",
")",
":",
"if",
"len",
"(",
"a",
"[",
"key",
"]",
")",
"!=",
"len",
"(",
"b",
"[",
"key",
"]",
")",
":",
"raise",
"ValueError",
"(",
"'list of dicts are of different lengths at '",
"'\"{0}\": old: {1}, new: {2}'",
".",
"format",
"(",
"'.'",
".",
"join",
"(",
"error_path",
"+",
"[",
"str",
"(",
"key",
")",
"]",
")",
",",
"a",
"[",
"key",
"]",
",",
"b",
"[",
"key",
"]",
")",
")",
"for",
"i",
",",
"(",
"a_item",
",",
"b_item",
")",
"in",
"enumerate",
"(",
"zip",
"(",
"a",
"[",
"key",
"]",
",",
"b",
"[",
"key",
"]",
")",
")",
":",
"_single_merge",
"(",
"a_item",
",",
"b_item",
",",
"error_path",
"+",
"[",
"str",
"(",
"key",
")",
",",
"\"iter_{}\"",
".",
"format",
"(",
"i",
")",
"]",
",",
"overwrite",
",",
"append",
",",
"list_of_dicts",
")",
"elif",
"a",
"[",
"key",
"]",
"==",
"b",
"[",
"key",
"]",
":",
"pass",
"# same leaf value",
"elif",
"overwrite",
":",
"a",
"[",
"key",
"]",
"=",
"b",
"[",
"key",
"]",
"else",
":",
"raise",
"ValueError",
"(",
"'different data already exists at '",
"'\"{0}\": old: {1}, new: {2}'",
".",
"format",
"(",
"'.'",
".",
"join",
"(",
"error_path",
"+",
"[",
"str",
"(",
"key",
")",
"]",
")",
",",
"a",
"[",
"key",
"]",
",",
"b",
"[",
"key",
"]",
")",
")",
"else",
":",
"a",
"[",
"key",
"]",
"=",
"b",
"[",
"key",
"]",
"return",
"a"
] | merges b into a | [
"merges",
"b",
"into",
"a"
] | train | https://github.com/chrisjsewell/jsonextended/blob/c3a7a880cc09789b3c61204265dcbb127be76c8a/jsonextended/edict.py#L637-L685 |
chrisjsewell/jsonextended | jsonextended/edict.py | merge | def merge(dicts, overwrite=False, append=False, list_of_dicts=False):
""" merge dicts,
starting with dicts[1] into dicts[0]
Parameters
----------
dicts : list[dict]
list of dictionaries
overwrite : bool
if true allow overwriting of current data
append : bool
if true and items are both lists, then add them
list_of_dicts: bool
treat list of dicts as additional branches
Examples
--------
>>> from pprint import pprint
>>> d1 = {1:{"a":"A"},2:{"b":"B"}}
>>> d2 = {1:{"a":"A"},2:{"c":"C"}}
>>> pprint(merge([d1,d2]))
{1: {'a': 'A'}, 2: {'b': 'B', 'c': 'C'}}
>>> d1 = {1:{"a":["A"]}}
>>> d2 = {1:{"a":["D"]}}
>>> pprint(merge([d1,d2],append=True))
{1: {'a': ['A', 'D']}}
>>> d1 = {1:{"a":"A"},2:{"b":"B"}}
>>> d2 = {1:{"a":"X"},2:{"c":"C"}}
>>> merge([d1,d2],overwrite=False)
Traceback (most recent call last):
...
ValueError: different data already exists at "1.a": old: A, new: X
>>> merge([{},{}],overwrite=False)
{}
>>> merge([{},{'a':1}],overwrite=False)
{'a': 1}
>>> pprint(merge([{},{'a':1},{'a':1},{'b':2}]))
{'a': 1, 'b': 2}
>>> pprint(merge([{'a':[{"b": 1}, {"c": 2}]}, {'a':[{"d": 3}]}]))
Traceback (most recent call last):
...
ValueError: different data already exists at "a": old: [{'b': 1}, {'c': 2}], new: [{'d': 3}]
>>> pprint(merge([{'a':[{"b": 1}, {"c": 2}]}, {'a':[{"d": 3}]}], list_of_dicts=True))
Traceback (most recent call last):
...
ValueError: list of dicts are of different lengths at "a": old: [{'b': 1}, {'c': 2}], new: [{'d': 3}]
>>> pprint(merge([{'a':[{"b": 1}, {"c": 2}]}, {'a':[{"d": 3}, {"e": 4}]}], list_of_dicts=True))
{'a': [{'b': 1, 'd': 3}, {'c': 2, 'e': 4}]}
""" # noqa: E501
outdict = copy.deepcopy(dicts[0])
def single_merge(a, b):
return _single_merge(a, b, overwrite=overwrite, append=append,
list_of_dicts=list_of_dicts)
reduce(single_merge, [outdict] + dicts[1:])
return outdict | python | def merge(dicts, overwrite=False, append=False, list_of_dicts=False):
""" merge dicts,
starting with dicts[1] into dicts[0]
Parameters
----------
dicts : list[dict]
list of dictionaries
overwrite : bool
if true allow overwriting of current data
append : bool
if true and items are both lists, then add them
list_of_dicts: bool
treat list of dicts as additional branches
Examples
--------
>>> from pprint import pprint
>>> d1 = {1:{"a":"A"},2:{"b":"B"}}
>>> d2 = {1:{"a":"A"},2:{"c":"C"}}
>>> pprint(merge([d1,d2]))
{1: {'a': 'A'}, 2: {'b': 'B', 'c': 'C'}}
>>> d1 = {1:{"a":["A"]}}
>>> d2 = {1:{"a":["D"]}}
>>> pprint(merge([d1,d2],append=True))
{1: {'a': ['A', 'D']}}
>>> d1 = {1:{"a":"A"},2:{"b":"B"}}
>>> d2 = {1:{"a":"X"},2:{"c":"C"}}
>>> merge([d1,d2],overwrite=False)
Traceback (most recent call last):
...
ValueError: different data already exists at "1.a": old: A, new: X
>>> merge([{},{}],overwrite=False)
{}
>>> merge([{},{'a':1}],overwrite=False)
{'a': 1}
>>> pprint(merge([{},{'a':1},{'a':1},{'b':2}]))
{'a': 1, 'b': 2}
>>> pprint(merge([{'a':[{"b": 1}, {"c": 2}]}, {'a':[{"d": 3}]}]))
Traceback (most recent call last):
...
ValueError: different data already exists at "a": old: [{'b': 1}, {'c': 2}], new: [{'d': 3}]
>>> pprint(merge([{'a':[{"b": 1}, {"c": 2}]}, {'a':[{"d": 3}]}], list_of_dicts=True))
Traceback (most recent call last):
...
ValueError: list of dicts are of different lengths at "a": old: [{'b': 1}, {'c': 2}], new: [{'d': 3}]
>>> pprint(merge([{'a':[{"b": 1}, {"c": 2}]}, {'a':[{"d": 3}, {"e": 4}]}], list_of_dicts=True))
{'a': [{'b': 1, 'd': 3}, {'c': 2, 'e': 4}]}
""" # noqa: E501
outdict = copy.deepcopy(dicts[0])
def single_merge(a, b):
return _single_merge(a, b, overwrite=overwrite, append=append,
list_of_dicts=list_of_dicts)
reduce(single_merge, [outdict] + dicts[1:])
return outdict | [
"def",
"merge",
"(",
"dicts",
",",
"overwrite",
"=",
"False",
",",
"append",
"=",
"False",
",",
"list_of_dicts",
"=",
"False",
")",
":",
"# noqa: E501",
"outdict",
"=",
"copy",
".",
"deepcopy",
"(",
"dicts",
"[",
"0",
"]",
")",
"def",
"single_merge",
"(",
"a",
",",
"b",
")",
":",
"return",
"_single_merge",
"(",
"a",
",",
"b",
",",
"overwrite",
"=",
"overwrite",
",",
"append",
"=",
"append",
",",
"list_of_dicts",
"=",
"list_of_dicts",
")",
"reduce",
"(",
"single_merge",
",",
"[",
"outdict",
"]",
"+",
"dicts",
"[",
"1",
":",
"]",
")",
"return",
"outdict"
] | merge dicts,
starting with dicts[1] into dicts[0]
Parameters
----------
dicts : list[dict]
list of dictionaries
overwrite : bool
if true allow overwriting of current data
append : bool
if true and items are both lists, then add them
list_of_dicts: bool
treat list of dicts as additional branches
Examples
--------
>>> from pprint import pprint
>>> d1 = {1:{"a":"A"},2:{"b":"B"}}
>>> d2 = {1:{"a":"A"},2:{"c":"C"}}
>>> pprint(merge([d1,d2]))
{1: {'a': 'A'}, 2: {'b': 'B', 'c': 'C'}}
>>> d1 = {1:{"a":["A"]}}
>>> d2 = {1:{"a":["D"]}}
>>> pprint(merge([d1,d2],append=True))
{1: {'a': ['A', 'D']}}
>>> d1 = {1:{"a":"A"},2:{"b":"B"}}
>>> d2 = {1:{"a":"X"},2:{"c":"C"}}
>>> merge([d1,d2],overwrite=False)
Traceback (most recent call last):
...
ValueError: different data already exists at "1.a": old: A, new: X
>>> merge([{},{}],overwrite=False)
{}
>>> merge([{},{'a':1}],overwrite=False)
{'a': 1}
>>> pprint(merge([{},{'a':1},{'a':1},{'b':2}]))
{'a': 1, 'b': 2}
>>> pprint(merge([{'a':[{"b": 1}, {"c": 2}]}, {'a':[{"d": 3}]}]))
Traceback (most recent call last):
...
ValueError: different data already exists at "a": old: [{'b': 1}, {'c': 2}], new: [{'d': 3}]
>>> pprint(merge([{'a':[{"b": 1}, {"c": 2}]}, {'a':[{"d": 3}]}], list_of_dicts=True))
Traceback (most recent call last):
...
ValueError: list of dicts are of different lengths at "a": old: [{'b': 1}, {'c': 2}], new: [{'d': 3}]
>>> pprint(merge([{'a':[{"b": 1}, {"c": 2}]}, {'a':[{"d": 3}, {"e": 4}]}], list_of_dicts=True))
{'a': [{'b': 1, 'd': 3}, {'c': 2, 'e': 4}]} | [
"merge",
"dicts",
"starting",
"with",
"dicts",
"[",
"1",
"]",
"into",
"dicts",
"[",
"0",
"]"
] | train | https://github.com/chrisjsewell/jsonextended/blob/c3a7a880cc09789b3c61204265dcbb127be76c8a/jsonextended/edict.py#L688-L751 |
chrisjsewell/jsonextended | jsonextended/edict.py | flattennd | def flattennd(d, levels=0, key_as_tuple=True, delim='.',
list_of_dicts=None):
""" get nested dict as {key:dict,...},
where key is tuple/string of all-n levels of nested keys
Parameters
----------
d : dict
levels : int
the number of levels to leave unflattened
key_as_tuple : bool
whether keys are list of nested keys or delimited string of nested keys
delim : str
if key_as_tuple=False, delimiter for keys
list_of_dicts: str or None
if not None, flatten lists of dicts using this prefix
Examples
--------
>>> from pprint import pprint
>>> d = {1:{2:{3:{'b':'B','c':'C'},4:'D'}}}
>>> pprint(flattennd(d,0))
{(1, 2, 3, 'b'): 'B', (1, 2, 3, 'c'): 'C', (1, 2, 4): 'D'}
>>> pprint(flattennd(d,1))
{(1, 2): {4: 'D'}, (1, 2, 3): {'b': 'B', 'c': 'C'}}
>>> pprint(flattennd(d,2))
{(1,): {2: {4: 'D'}}, (1, 2): {3: {'b': 'B', 'c': 'C'}}}
>>> pprint(flattennd(d,3))
{(): {1: {2: {4: 'D'}}}, (1,): {2: {3: {'b': 'B', 'c': 'C'}}}}
>>> pprint(flattennd(d,4))
{(): {1: {2: {3: {'b': 'B', 'c': 'C'}, 4: 'D'}}}}
>>> pprint(flattennd(d,5))
{(): {1: {2: {3: {'b': 'B', 'c': 'C'}, 4: 'D'}}}}
>>> pprint(flattennd(d,1,key_as_tuple=False,delim='.'))
{'1.2': {4: 'D'}, '1.2.3': {'b': 'B', 'c': 'C'}}
>>> test_dict = {"a":[{"b":[{"c":1, "d": 2}, {"e":3, "f": 4}]}, {"b":[{"c":5, "d": 6}, {"e":7, "f": 8}]}]}
>>> pprint(flattennd(test_dict, list_of_dicts="__list__", levels=2))
{('a', '__list__0', 'b'): [{'c': 1, 'd': 2}, {'e': 3, 'f': 4}],
('a', '__list__1', 'b'): [{'c': 5, 'd': 6}, {'e': 7, 'f': 8}]}
>>> pprint(flattennd(test_dict, list_of_dicts="__list__", levels=3))
{('a', '__list__0'): {'b': [{'c': 1, 'd': 2}, {'e': 3, 'f': 4}]},
('a', '__list__1'): {'b': [{'c': 5, 'd': 6}, {'e': 7, 'f': 8}]}}
""" # noqa: E501
if levels < 0:
raise ValueError('unflattened levels must be greater than 0')
new_d = {}
flattened = flatten(d, True, delim, list_of_dicts=list_of_dicts)
if levels == 0:
return flattened
for key, value in flattened.items():
if key_as_tuple:
new_key = key[: - (levels)]
else:
new_key = delim.join([str(k) for k in key[:-(levels)]])
new_levels = key[-(levels):]
# val_dict = {new_levels: value}
# val_dict = unflatten(val_dict, True, delim)
if new_key not in new_d:
new_d[new_key] = {new_levels: value}
else:
if new_levels in new_d[new_key]:
raise ValueError(
"key clash for: {0}; {1}".format(new_key, new_levels))
new_d[new_key][new_levels] = value
for nkey, nvalue in new_d.items():
new_d[nkey] = unflatten(
nvalue, list_of_dicts=list_of_dicts, deepcopy=False)
return new_d | python | def flattennd(d, levels=0, key_as_tuple=True, delim='.',
list_of_dicts=None):
""" get nested dict as {key:dict,...},
where key is tuple/string of all-n levels of nested keys
Parameters
----------
d : dict
levels : int
the number of levels to leave unflattened
key_as_tuple : bool
whether keys are list of nested keys or delimited string of nested keys
delim : str
if key_as_tuple=False, delimiter for keys
list_of_dicts: str or None
if not None, flatten lists of dicts using this prefix
Examples
--------
>>> from pprint import pprint
>>> d = {1:{2:{3:{'b':'B','c':'C'},4:'D'}}}
>>> pprint(flattennd(d,0))
{(1, 2, 3, 'b'): 'B', (1, 2, 3, 'c'): 'C', (1, 2, 4): 'D'}
>>> pprint(flattennd(d,1))
{(1, 2): {4: 'D'}, (1, 2, 3): {'b': 'B', 'c': 'C'}}
>>> pprint(flattennd(d,2))
{(1,): {2: {4: 'D'}}, (1, 2): {3: {'b': 'B', 'c': 'C'}}}
>>> pprint(flattennd(d,3))
{(): {1: {2: {4: 'D'}}}, (1,): {2: {3: {'b': 'B', 'c': 'C'}}}}
>>> pprint(flattennd(d,4))
{(): {1: {2: {3: {'b': 'B', 'c': 'C'}, 4: 'D'}}}}
>>> pprint(flattennd(d,5))
{(): {1: {2: {3: {'b': 'B', 'c': 'C'}, 4: 'D'}}}}
>>> pprint(flattennd(d,1,key_as_tuple=False,delim='.'))
{'1.2': {4: 'D'}, '1.2.3': {'b': 'B', 'c': 'C'}}
>>> test_dict = {"a":[{"b":[{"c":1, "d": 2}, {"e":3, "f": 4}]}, {"b":[{"c":5, "d": 6}, {"e":7, "f": 8}]}]}
>>> pprint(flattennd(test_dict, list_of_dicts="__list__", levels=2))
{('a', '__list__0', 'b'): [{'c': 1, 'd': 2}, {'e': 3, 'f': 4}],
('a', '__list__1', 'b'): [{'c': 5, 'd': 6}, {'e': 7, 'f': 8}]}
>>> pprint(flattennd(test_dict, list_of_dicts="__list__", levels=3))
{('a', '__list__0'): {'b': [{'c': 1, 'd': 2}, {'e': 3, 'f': 4}]},
('a', '__list__1'): {'b': [{'c': 5, 'd': 6}, {'e': 7, 'f': 8}]}}
""" # noqa: E501
if levels < 0:
raise ValueError('unflattened levels must be greater than 0')
new_d = {}
flattened = flatten(d, True, delim, list_of_dicts=list_of_dicts)
if levels == 0:
return flattened
for key, value in flattened.items():
if key_as_tuple:
new_key = key[: - (levels)]
else:
new_key = delim.join([str(k) for k in key[:-(levels)]])
new_levels = key[-(levels):]
# val_dict = {new_levels: value}
# val_dict = unflatten(val_dict, True, delim)
if new_key not in new_d:
new_d[new_key] = {new_levels: value}
else:
if new_levels in new_d[new_key]:
raise ValueError(
"key clash for: {0}; {1}".format(new_key, new_levels))
new_d[new_key][new_levels] = value
for nkey, nvalue in new_d.items():
new_d[nkey] = unflatten(
nvalue, list_of_dicts=list_of_dicts, deepcopy=False)
return new_d | [
"def",
"flattennd",
"(",
"d",
",",
"levels",
"=",
"0",
",",
"key_as_tuple",
"=",
"True",
",",
"delim",
"=",
"'.'",
",",
"list_of_dicts",
"=",
"None",
")",
":",
"# noqa: E501",
"if",
"levels",
"<",
"0",
":",
"raise",
"ValueError",
"(",
"'unflattened levels must be greater than 0'",
")",
"new_d",
"=",
"{",
"}",
"flattened",
"=",
"flatten",
"(",
"d",
",",
"True",
",",
"delim",
",",
"list_of_dicts",
"=",
"list_of_dicts",
")",
"if",
"levels",
"==",
"0",
":",
"return",
"flattened",
"for",
"key",
",",
"value",
"in",
"flattened",
".",
"items",
"(",
")",
":",
"if",
"key_as_tuple",
":",
"new_key",
"=",
"key",
"[",
":",
"-",
"(",
"levels",
")",
"]",
"else",
":",
"new_key",
"=",
"delim",
".",
"join",
"(",
"[",
"str",
"(",
"k",
")",
"for",
"k",
"in",
"key",
"[",
":",
"-",
"(",
"levels",
")",
"]",
"]",
")",
"new_levels",
"=",
"key",
"[",
"-",
"(",
"levels",
")",
":",
"]",
"# val_dict = {new_levels: value}",
"# val_dict = unflatten(val_dict, True, delim)",
"if",
"new_key",
"not",
"in",
"new_d",
":",
"new_d",
"[",
"new_key",
"]",
"=",
"{",
"new_levels",
":",
"value",
"}",
"else",
":",
"if",
"new_levels",
"in",
"new_d",
"[",
"new_key",
"]",
":",
"raise",
"ValueError",
"(",
"\"key clash for: {0}; {1}\"",
".",
"format",
"(",
"new_key",
",",
"new_levels",
")",
")",
"new_d",
"[",
"new_key",
"]",
"[",
"new_levels",
"]",
"=",
"value",
"for",
"nkey",
",",
"nvalue",
"in",
"new_d",
".",
"items",
"(",
")",
":",
"new_d",
"[",
"nkey",
"]",
"=",
"unflatten",
"(",
"nvalue",
",",
"list_of_dicts",
"=",
"list_of_dicts",
",",
"deepcopy",
"=",
"False",
")",
"return",
"new_d"
] | get nested dict as {key:dict,...},
where key is tuple/string of all-n levels of nested keys
Parameters
----------
d : dict
levels : int
the number of levels to leave unflattened
key_as_tuple : bool
whether keys are list of nested keys or delimited string of nested keys
delim : str
if key_as_tuple=False, delimiter for keys
list_of_dicts: str or None
if not None, flatten lists of dicts using this prefix
Examples
--------
>>> from pprint import pprint
>>> d = {1:{2:{3:{'b':'B','c':'C'},4:'D'}}}
>>> pprint(flattennd(d,0))
{(1, 2, 3, 'b'): 'B', (1, 2, 3, 'c'): 'C', (1, 2, 4): 'D'}
>>> pprint(flattennd(d,1))
{(1, 2): {4: 'D'}, (1, 2, 3): {'b': 'B', 'c': 'C'}}
>>> pprint(flattennd(d,2))
{(1,): {2: {4: 'D'}}, (1, 2): {3: {'b': 'B', 'c': 'C'}}}
>>> pprint(flattennd(d,3))
{(): {1: {2: {4: 'D'}}}, (1,): {2: {3: {'b': 'B', 'c': 'C'}}}}
>>> pprint(flattennd(d,4))
{(): {1: {2: {3: {'b': 'B', 'c': 'C'}, 4: 'D'}}}}
>>> pprint(flattennd(d,5))
{(): {1: {2: {3: {'b': 'B', 'c': 'C'}, 4: 'D'}}}}
>>> pprint(flattennd(d,1,key_as_tuple=False,delim='.'))
{'1.2': {4: 'D'}, '1.2.3': {'b': 'B', 'c': 'C'}}
>>> test_dict = {"a":[{"b":[{"c":1, "d": 2}, {"e":3, "f": 4}]}, {"b":[{"c":5, "d": 6}, {"e":7, "f": 8}]}]}
>>> pprint(flattennd(test_dict, list_of_dicts="__list__", levels=2))
{('a', '__list__0', 'b'): [{'c': 1, 'd': 2}, {'e': 3, 'f': 4}],
('a', '__list__1', 'b'): [{'c': 5, 'd': 6}, {'e': 7, 'f': 8}]}
>>> pprint(flattennd(test_dict, list_of_dicts="__list__", levels=3))
{('a', '__list__0'): {'b': [{'c': 1, 'd': 2}, {'e': 3, 'f': 4}]},
('a', '__list__1'): {'b': [{'c': 5, 'd': 6}, {'e': 7, 'f': 8}]}} | [
"get",
"nested",
"dict",
"as",
"{",
"key",
":",
"dict",
"...",
"}",
"where",
"key",
"is",
"tuple",
"/",
"string",
"of",
"all",
"-",
"n",
"levels",
"of",
"nested",
"keys"
] | train | https://github.com/chrisjsewell/jsonextended/blob/c3a7a880cc09789b3c61204265dcbb127be76c8a/jsonextended/edict.py#L754-L838 |
chrisjsewell/jsonextended | jsonextended/edict.py | flatten2d | def flatten2d(d, key_as_tuple=True, delim='.',
list_of_dicts=None):
""" get nested dict as {key:dict,...},
where key is tuple/string of all-1 nested keys
NB: is same as flattennd(d,1,key_as_tuple,delim)
Parameters
----------
d : dict
key_as_tuple : bool
whether keys are list of nested keys or delimited string of nested keys
delim : str
if key_as_tuple=False, delimiter for keys
list_of_dicts: str or None
if not None, flatten lists of dicts using this prefix
Examples
--------
>>> from pprint import pprint
>>> d = {1:{2:{3:{'b':'B','c':'C'},4:'D'}}}
>>> pprint(flatten2d(d))
{(1, 2): {4: 'D'}, (1, 2, 3): {'b': 'B', 'c': 'C'}}
>>> pprint(flatten2d(d,key_as_tuple=False,delim=','))
{'1,2': {4: 'D'}, '1,2,3': {'b': 'B', 'c': 'C'}}
"""
return flattennd(d, 1, key_as_tuple, delim, list_of_dicts=list_of_dicts) | python | def flatten2d(d, key_as_tuple=True, delim='.',
list_of_dicts=None):
""" get nested dict as {key:dict,...},
where key is tuple/string of all-1 nested keys
NB: is same as flattennd(d,1,key_as_tuple,delim)
Parameters
----------
d : dict
key_as_tuple : bool
whether keys are list of nested keys or delimited string of nested keys
delim : str
if key_as_tuple=False, delimiter for keys
list_of_dicts: str or None
if not None, flatten lists of dicts using this prefix
Examples
--------
>>> from pprint import pprint
>>> d = {1:{2:{3:{'b':'B','c':'C'},4:'D'}}}
>>> pprint(flatten2d(d))
{(1, 2): {4: 'D'}, (1, 2, 3): {'b': 'B', 'c': 'C'}}
>>> pprint(flatten2d(d,key_as_tuple=False,delim=','))
{'1,2': {4: 'D'}, '1,2,3': {'b': 'B', 'c': 'C'}}
"""
return flattennd(d, 1, key_as_tuple, delim, list_of_dicts=list_of_dicts) | [
"def",
"flatten2d",
"(",
"d",
",",
"key_as_tuple",
"=",
"True",
",",
"delim",
"=",
"'.'",
",",
"list_of_dicts",
"=",
"None",
")",
":",
"return",
"flattennd",
"(",
"d",
",",
"1",
",",
"key_as_tuple",
",",
"delim",
",",
"list_of_dicts",
"=",
"list_of_dicts",
")"
] | get nested dict as {key:dict,...},
where key is tuple/string of all-1 nested keys
NB: is same as flattennd(d,1,key_as_tuple,delim)
Parameters
----------
d : dict
key_as_tuple : bool
whether keys are list of nested keys or delimited string of nested keys
delim : str
if key_as_tuple=False, delimiter for keys
list_of_dicts: str or None
if not None, flatten lists of dicts using this prefix
Examples
--------
>>> from pprint import pprint
>>> d = {1:{2:{3:{'b':'B','c':'C'},4:'D'}}}
>>> pprint(flatten2d(d))
{(1, 2): {4: 'D'}, (1, 2, 3): {'b': 'B', 'c': 'C'}}
>>> pprint(flatten2d(d,key_as_tuple=False,delim=','))
{'1,2': {4: 'D'}, '1,2,3': {'b': 'B', 'c': 'C'}} | [
"get",
"nested",
"dict",
"as",
"{",
"key",
":",
"dict",
"...",
"}",
"where",
"key",
"is",
"tuple",
"/",
"string",
"of",
"all",
"-",
"1",
"nested",
"keys"
] | train | https://github.com/chrisjsewell/jsonextended/blob/c3a7a880cc09789b3c61204265dcbb127be76c8a/jsonextended/edict.py#L841-L871 |
chrisjsewell/jsonextended | jsonextended/edict.py | remove_keys | def remove_keys(d, keys=None, use_wildcards=True,
list_of_dicts=False, deepcopy=True):
"""remove certain keys from nested dict, retaining preceeding paths
Parameters
----------
keys: list
use_wildcards : bool
if true, can use * (matches everything)
and ? (matches any single character)
list_of_dicts: bool
treat list of dicts as additional branches
deepcopy: bool
Examples
--------
>>> from pprint import pprint
>>> d = {1:{"a":"A"},"a":{"b":"B"}}
>>> pprint(remove_keys(d,['a']))
{1: 'A', 'b': 'B'}
>>> pprint(remove_keys({'abc':1},['a*'],use_wildcards=False))
{'abc': 1}
>>> pprint(remove_keys({'abc':1},['a*'],use_wildcards=True))
{}
"""
keys = [] if keys is None else keys
list_of_dicts = '__list__' if list_of_dicts else None
def is_in(a, bs):
if use_wildcards:
for b in bs:
try:
if a == b:
return True
if fnmatch(a, b):
return True
except Exception:
pass
return False
else:
try:
return a in bs
except Exception:
return False
if not hasattr(d, 'items'):
return d
else:
dic = flatten(d, list_of_dicts=list_of_dicts)
new_dic = {}
for key, value in dic.items():
new_key = tuple([i for i in key if not is_in(i, keys)])
if not new_key:
continue
try:
if new_key[-1].startswith(list_of_dicts):
continue
except Exception:
pass
new_dic[new_key] = value
return unflatten(
new_dic, list_of_dicts=list_of_dicts, deepcopy=deepcopy) | python | def remove_keys(d, keys=None, use_wildcards=True,
list_of_dicts=False, deepcopy=True):
"""remove certain keys from nested dict, retaining preceeding paths
Parameters
----------
keys: list
use_wildcards : bool
if true, can use * (matches everything)
and ? (matches any single character)
list_of_dicts: bool
treat list of dicts as additional branches
deepcopy: bool
Examples
--------
>>> from pprint import pprint
>>> d = {1:{"a":"A"},"a":{"b":"B"}}
>>> pprint(remove_keys(d,['a']))
{1: 'A', 'b': 'B'}
>>> pprint(remove_keys({'abc':1},['a*'],use_wildcards=False))
{'abc': 1}
>>> pprint(remove_keys({'abc':1},['a*'],use_wildcards=True))
{}
"""
keys = [] if keys is None else keys
list_of_dicts = '__list__' if list_of_dicts else None
def is_in(a, bs):
if use_wildcards:
for b in bs:
try:
if a == b:
return True
if fnmatch(a, b):
return True
except Exception:
pass
return False
else:
try:
return a in bs
except Exception:
return False
if not hasattr(d, 'items'):
return d
else:
dic = flatten(d, list_of_dicts=list_of_dicts)
new_dic = {}
for key, value in dic.items():
new_key = tuple([i for i in key if not is_in(i, keys)])
if not new_key:
continue
try:
if new_key[-1].startswith(list_of_dicts):
continue
except Exception:
pass
new_dic[new_key] = value
return unflatten(
new_dic, list_of_dicts=list_of_dicts, deepcopy=deepcopy) | [
"def",
"remove_keys",
"(",
"d",
",",
"keys",
"=",
"None",
",",
"use_wildcards",
"=",
"True",
",",
"list_of_dicts",
"=",
"False",
",",
"deepcopy",
"=",
"True",
")",
":",
"keys",
"=",
"[",
"]",
"if",
"keys",
"is",
"None",
"else",
"keys",
"list_of_dicts",
"=",
"'__list__'",
"if",
"list_of_dicts",
"else",
"None",
"def",
"is_in",
"(",
"a",
",",
"bs",
")",
":",
"if",
"use_wildcards",
":",
"for",
"b",
"in",
"bs",
":",
"try",
":",
"if",
"a",
"==",
"b",
":",
"return",
"True",
"if",
"fnmatch",
"(",
"a",
",",
"b",
")",
":",
"return",
"True",
"except",
"Exception",
":",
"pass",
"return",
"False",
"else",
":",
"try",
":",
"return",
"a",
"in",
"bs",
"except",
"Exception",
":",
"return",
"False",
"if",
"not",
"hasattr",
"(",
"d",
",",
"'items'",
")",
":",
"return",
"d",
"else",
":",
"dic",
"=",
"flatten",
"(",
"d",
",",
"list_of_dicts",
"=",
"list_of_dicts",
")",
"new_dic",
"=",
"{",
"}",
"for",
"key",
",",
"value",
"in",
"dic",
".",
"items",
"(",
")",
":",
"new_key",
"=",
"tuple",
"(",
"[",
"i",
"for",
"i",
"in",
"key",
"if",
"not",
"is_in",
"(",
"i",
",",
"keys",
")",
"]",
")",
"if",
"not",
"new_key",
":",
"continue",
"try",
":",
"if",
"new_key",
"[",
"-",
"1",
"]",
".",
"startswith",
"(",
"list_of_dicts",
")",
":",
"continue",
"except",
"Exception",
":",
"pass",
"new_dic",
"[",
"new_key",
"]",
"=",
"value",
"return",
"unflatten",
"(",
"new_dic",
",",
"list_of_dicts",
"=",
"list_of_dicts",
",",
"deepcopy",
"=",
"deepcopy",
")"
] | remove certain keys from nested dict, retaining preceeding paths
Parameters
----------
keys: list
use_wildcards : bool
if true, can use * (matches everything)
and ? (matches any single character)
list_of_dicts: bool
treat list of dicts as additional branches
deepcopy: bool
Examples
--------
>>> from pprint import pprint
>>> d = {1:{"a":"A"},"a":{"b":"B"}}
>>> pprint(remove_keys(d,['a']))
{1: 'A', 'b': 'B'}
>>> pprint(remove_keys({'abc':1},['a*'],use_wildcards=False))
{'abc': 1}
>>> pprint(remove_keys({'abc':1},['a*'],use_wildcards=True))
{} | [
"remove",
"certain",
"keys",
"from",
"nested",
"dict",
"retaining",
"preceeding",
"paths"
] | train | https://github.com/chrisjsewell/jsonextended/blob/c3a7a880cc09789b3c61204265dcbb127be76c8a/jsonextended/edict.py#L874-L938 |
chrisjsewell/jsonextended | jsonextended/edict.py | remove_keyvals | def remove_keyvals(d, keyvals=None, list_of_dicts=False, deepcopy=True):
"""remove paths with at least one branch leading
to certain (key,value) pairs from dict
Parameters
----------
d : dict
keyvals : dict or list[tuple]
(key,value) pairs to remove
list_of_dicts: bool
treat list of dicts as additional branches
Examples
--------
>>> from pprint import pprint
>>> d = {1:{"b":"A"},"a":{"b":"B","c":"D"},"b":{"a":"B"}}
>>> pprint(remove_keyvals(d,[("b","B")]))
{1: {'b': 'A'}, 'b': {'a': 'B'}}
>>> d2 = {'a':[{'b':1,'c':1},{'b':1,'c':2}]}
>>> pprint(remove_keyvals(d2,[("b",1)]))
{'a': [{'b': 1, 'c': 1}, {'b': 1, 'c': 2}]}
>>> pprint(remove_keyvals(d2,[("b",1)],list_of_dicts=True))
{}
"""
keyvals = [] if keyvals is None else keyvals
list_of_dicts = '__list__' if list_of_dicts else None
if hasattr(keyvals, 'items'):
keyvals = [(k, v) for k, v in keyvals.items()]
if not hasattr(d, 'items'):
return d
flatd = flatten(d, list_of_dicts=list_of_dicts)
def is_in(a, b):
try:
return a in b
except Exception:
return False
prune = [k[0] for k, v in flatd.items() if is_in((k[-1], v), keyvals)]
flatd = {k: v for k, v in flatd.items() if not is_in(k[0], prune)}
return unflatten(flatd, list_of_dicts=list_of_dicts, deepcopy=deepcopy) | python | def remove_keyvals(d, keyvals=None, list_of_dicts=False, deepcopy=True):
"""remove paths with at least one branch leading
to certain (key,value) pairs from dict
Parameters
----------
d : dict
keyvals : dict or list[tuple]
(key,value) pairs to remove
list_of_dicts: bool
treat list of dicts as additional branches
Examples
--------
>>> from pprint import pprint
>>> d = {1:{"b":"A"},"a":{"b":"B","c":"D"},"b":{"a":"B"}}
>>> pprint(remove_keyvals(d,[("b","B")]))
{1: {'b': 'A'}, 'b': {'a': 'B'}}
>>> d2 = {'a':[{'b':1,'c':1},{'b':1,'c':2}]}
>>> pprint(remove_keyvals(d2,[("b",1)]))
{'a': [{'b': 1, 'c': 1}, {'b': 1, 'c': 2}]}
>>> pprint(remove_keyvals(d2,[("b",1)],list_of_dicts=True))
{}
"""
keyvals = [] if keyvals is None else keyvals
list_of_dicts = '__list__' if list_of_dicts else None
if hasattr(keyvals, 'items'):
keyvals = [(k, v) for k, v in keyvals.items()]
if not hasattr(d, 'items'):
return d
flatd = flatten(d, list_of_dicts=list_of_dicts)
def is_in(a, b):
try:
return a in b
except Exception:
return False
prune = [k[0] for k, v in flatd.items() if is_in((k[-1], v), keyvals)]
flatd = {k: v for k, v in flatd.items() if not is_in(k[0], prune)}
return unflatten(flatd, list_of_dicts=list_of_dicts, deepcopy=deepcopy) | [
"def",
"remove_keyvals",
"(",
"d",
",",
"keyvals",
"=",
"None",
",",
"list_of_dicts",
"=",
"False",
",",
"deepcopy",
"=",
"True",
")",
":",
"keyvals",
"=",
"[",
"]",
"if",
"keyvals",
"is",
"None",
"else",
"keyvals",
"list_of_dicts",
"=",
"'__list__'",
"if",
"list_of_dicts",
"else",
"None",
"if",
"hasattr",
"(",
"keyvals",
",",
"'items'",
")",
":",
"keyvals",
"=",
"[",
"(",
"k",
",",
"v",
")",
"for",
"k",
",",
"v",
"in",
"keyvals",
".",
"items",
"(",
")",
"]",
"if",
"not",
"hasattr",
"(",
"d",
",",
"'items'",
")",
":",
"return",
"d",
"flatd",
"=",
"flatten",
"(",
"d",
",",
"list_of_dicts",
"=",
"list_of_dicts",
")",
"def",
"is_in",
"(",
"a",
",",
"b",
")",
":",
"try",
":",
"return",
"a",
"in",
"b",
"except",
"Exception",
":",
"return",
"False",
"prune",
"=",
"[",
"k",
"[",
"0",
"]",
"for",
"k",
",",
"v",
"in",
"flatd",
".",
"items",
"(",
")",
"if",
"is_in",
"(",
"(",
"k",
"[",
"-",
"1",
"]",
",",
"v",
")",
",",
"keyvals",
")",
"]",
"flatd",
"=",
"{",
"k",
":",
"v",
"for",
"k",
",",
"v",
"in",
"flatd",
".",
"items",
"(",
")",
"if",
"not",
"is_in",
"(",
"k",
"[",
"0",
"]",
",",
"prune",
")",
"}",
"return",
"unflatten",
"(",
"flatd",
",",
"list_of_dicts",
"=",
"list_of_dicts",
",",
"deepcopy",
"=",
"deepcopy",
")"
] | remove paths with at least one branch leading
to certain (key,value) pairs from dict
Parameters
----------
d : dict
keyvals : dict or list[tuple]
(key,value) pairs to remove
list_of_dicts: bool
treat list of dicts as additional branches
Examples
--------
>>> from pprint import pprint
>>> d = {1:{"b":"A"},"a":{"b":"B","c":"D"},"b":{"a":"B"}}
>>> pprint(remove_keyvals(d,[("b","B")]))
{1: {'b': 'A'}, 'b': {'a': 'B'}}
>>> d2 = {'a':[{'b':1,'c':1},{'b':1,'c':2}]}
>>> pprint(remove_keyvals(d2,[("b",1)]))
{'a': [{'b': 1, 'c': 1}, {'b': 1, 'c': 2}]}
>>> pprint(remove_keyvals(d2,[("b",1)],list_of_dicts=True))
{} | [
"remove",
"paths",
"with",
"at",
"least",
"one",
"branch",
"leading",
"to",
"certain",
"(",
"key",
"value",
")",
"pairs",
"from",
"dict"
] | train | https://github.com/chrisjsewell/jsonextended/blob/c3a7a880cc09789b3c61204265dcbb127be76c8a/jsonextended/edict.py#L941-L989 |
chrisjsewell/jsonextended | jsonextended/edict.py | remove_paths | def remove_paths(d, keys, list_of_dicts=False, deepcopy=True):
""" remove paths containing certain keys from dict
Parameters
----------
d: dict
keys : list
list of keys to find and remove path
list_of_dicts: bool
treat list of dicts as additional branches
deepcopy: bool
deepcopy values
Examples
--------
>>> from pprint import pprint
>>> d = {1:{"a":"A"},2:{"b":"B"},4:{5:{6:'a',7:'b'}}}
>>> pprint(remove_paths(d,[6,'a']))
{2: {'b': 'B'}, 4: {5: {7: 'b'}}}
>>> d = {1:{2: 3}, 1:{4: 5}}
>>> pprint(remove_paths(d,[(1, 2)]))
{1: {4: 5}}
>>> d2 = {'a':[{'b':1,'c':{'b':3}},{'b':1,'c':2}]}
>>> pprint(remove_paths(d2,["b"],list_of_dicts=False))
{'a': [{'b': 1, 'c': {'b': 3}}, {'b': 1, 'c': 2}]}
>>> pprint(remove_paths(d2,["b"],list_of_dicts=True))
{'a': [{'c': 2}]}
"""
keys = [(key,) if not isinstance(key, tuple) else key for key in keys]
list_of_dicts = '__list__' if list_of_dicts else None
def contains(path):
for k in keys:
if set(k).issubset(path):
return True
return False
flatd = flatten(d, list_of_dicts=list_of_dicts)
flatd = {path: v for path, v in flatd.items() if not contains(path)}
return unflatten(flatd, list_of_dicts=list_of_dicts, deepcopy=deepcopy) | python | def remove_paths(d, keys, list_of_dicts=False, deepcopy=True):
""" remove paths containing certain keys from dict
Parameters
----------
d: dict
keys : list
list of keys to find and remove path
list_of_dicts: bool
treat list of dicts as additional branches
deepcopy: bool
deepcopy values
Examples
--------
>>> from pprint import pprint
>>> d = {1:{"a":"A"},2:{"b":"B"},4:{5:{6:'a',7:'b'}}}
>>> pprint(remove_paths(d,[6,'a']))
{2: {'b': 'B'}, 4: {5: {7: 'b'}}}
>>> d = {1:{2: 3}, 1:{4: 5}}
>>> pprint(remove_paths(d,[(1, 2)]))
{1: {4: 5}}
>>> d2 = {'a':[{'b':1,'c':{'b':3}},{'b':1,'c':2}]}
>>> pprint(remove_paths(d2,["b"],list_of_dicts=False))
{'a': [{'b': 1, 'c': {'b': 3}}, {'b': 1, 'c': 2}]}
>>> pprint(remove_paths(d2,["b"],list_of_dicts=True))
{'a': [{'c': 2}]}
"""
keys = [(key,) if not isinstance(key, tuple) else key for key in keys]
list_of_dicts = '__list__' if list_of_dicts else None
def contains(path):
for k in keys:
if set(k).issubset(path):
return True
return False
flatd = flatten(d, list_of_dicts=list_of_dicts)
flatd = {path: v for path, v in flatd.items() if not contains(path)}
return unflatten(flatd, list_of_dicts=list_of_dicts, deepcopy=deepcopy) | [
"def",
"remove_paths",
"(",
"d",
",",
"keys",
",",
"list_of_dicts",
"=",
"False",
",",
"deepcopy",
"=",
"True",
")",
":",
"keys",
"=",
"[",
"(",
"key",
",",
")",
"if",
"not",
"isinstance",
"(",
"key",
",",
"tuple",
")",
"else",
"key",
"for",
"key",
"in",
"keys",
"]",
"list_of_dicts",
"=",
"'__list__'",
"if",
"list_of_dicts",
"else",
"None",
"def",
"contains",
"(",
"path",
")",
":",
"for",
"k",
"in",
"keys",
":",
"if",
"set",
"(",
"k",
")",
".",
"issubset",
"(",
"path",
")",
":",
"return",
"True",
"return",
"False",
"flatd",
"=",
"flatten",
"(",
"d",
",",
"list_of_dicts",
"=",
"list_of_dicts",
")",
"flatd",
"=",
"{",
"path",
":",
"v",
"for",
"path",
",",
"v",
"in",
"flatd",
".",
"items",
"(",
")",
"if",
"not",
"contains",
"(",
"path",
")",
"}",
"return",
"unflatten",
"(",
"flatd",
",",
"list_of_dicts",
"=",
"list_of_dicts",
",",
"deepcopy",
"=",
"deepcopy",
")"
] | remove paths containing certain keys from dict
Parameters
----------
d: dict
keys : list
list of keys to find and remove path
list_of_dicts: bool
treat list of dicts as additional branches
deepcopy: bool
deepcopy values
Examples
--------
>>> from pprint import pprint
>>> d = {1:{"a":"A"},2:{"b":"B"},4:{5:{6:'a',7:'b'}}}
>>> pprint(remove_paths(d,[6,'a']))
{2: {'b': 'B'}, 4: {5: {7: 'b'}}}
>>> d = {1:{2: 3}, 1:{4: 5}}
>>> pprint(remove_paths(d,[(1, 2)]))
{1: {4: 5}}
>>> d2 = {'a':[{'b':1,'c':{'b':3}},{'b':1,'c':2}]}
>>> pprint(remove_paths(d2,["b"],list_of_dicts=False))
{'a': [{'b': 1, 'c': {'b': 3}}, {'b': 1, 'c': 2}]}
>>> pprint(remove_paths(d2,["b"],list_of_dicts=True))
{'a': [{'c': 2}]} | [
"remove",
"paths",
"containing",
"certain",
"keys",
"from",
"dict"
] | train | https://github.com/chrisjsewell/jsonextended/blob/c3a7a880cc09789b3c61204265dcbb127be76c8a/jsonextended/edict.py#L992-L1039 |
chrisjsewell/jsonextended | jsonextended/edict.py | filter_values | def filter_values(d, vals=None, list_of_dicts=False, deepcopy=True):
""" filters leaf nodes of nested dictionary
Parameters
----------
d : dict
vals : list
values to filter by
list_of_dicts: bool
treat list of dicts as additional branches
deepcopy: bool
deepcopy values
Examples
--------
>>> d = {1:{"a":"A"},2:{"b":"B"},4:{5:{6:'a'}}}
>>> filter_values(d,['a'])
{4: {5: {6: 'a'}}}
"""
vals = [] if vals is None else vals
list_of_dicts = '__list__' if list_of_dicts else None
flatd = flatten(d, list_of_dicts=list_of_dicts)
def is_in(a, b):
try:
return a in b
except Exception:
return False
flatd = {k: v for k, v in flatd.items() if is_in(v, vals)}
return unflatten(flatd, list_of_dicts=list_of_dicts, deepcopy=deepcopy) | python | def filter_values(d, vals=None, list_of_dicts=False, deepcopy=True):
""" filters leaf nodes of nested dictionary
Parameters
----------
d : dict
vals : list
values to filter by
list_of_dicts: bool
treat list of dicts as additional branches
deepcopy: bool
deepcopy values
Examples
--------
>>> d = {1:{"a":"A"},2:{"b":"B"},4:{5:{6:'a'}}}
>>> filter_values(d,['a'])
{4: {5: {6: 'a'}}}
"""
vals = [] if vals is None else vals
list_of_dicts = '__list__' if list_of_dicts else None
flatd = flatten(d, list_of_dicts=list_of_dicts)
def is_in(a, b):
try:
return a in b
except Exception:
return False
flatd = {k: v for k, v in flatd.items() if is_in(v, vals)}
return unflatten(flatd, list_of_dicts=list_of_dicts, deepcopy=deepcopy) | [
"def",
"filter_values",
"(",
"d",
",",
"vals",
"=",
"None",
",",
"list_of_dicts",
"=",
"False",
",",
"deepcopy",
"=",
"True",
")",
":",
"vals",
"=",
"[",
"]",
"if",
"vals",
"is",
"None",
"else",
"vals",
"list_of_dicts",
"=",
"'__list__'",
"if",
"list_of_dicts",
"else",
"None",
"flatd",
"=",
"flatten",
"(",
"d",
",",
"list_of_dicts",
"=",
"list_of_dicts",
")",
"def",
"is_in",
"(",
"a",
",",
"b",
")",
":",
"try",
":",
"return",
"a",
"in",
"b",
"except",
"Exception",
":",
"return",
"False",
"flatd",
"=",
"{",
"k",
":",
"v",
"for",
"k",
",",
"v",
"in",
"flatd",
".",
"items",
"(",
")",
"if",
"is_in",
"(",
"v",
",",
"vals",
")",
"}",
"return",
"unflatten",
"(",
"flatd",
",",
"list_of_dicts",
"=",
"list_of_dicts",
",",
"deepcopy",
"=",
"deepcopy",
")"
] | filters leaf nodes of nested dictionary
Parameters
----------
d : dict
vals : list
values to filter by
list_of_dicts: bool
treat list of dicts as additional branches
deepcopy: bool
deepcopy values
Examples
--------
>>> d = {1:{"a":"A"},2:{"b":"B"},4:{5:{6:'a'}}}
>>> filter_values(d,['a'])
{4: {5: {6: 'a'}}} | [
"filters",
"leaf",
"nodes",
"of",
"nested",
"dictionary"
] | train | https://github.com/chrisjsewell/jsonextended/blob/c3a7a880cc09789b3c61204265dcbb127be76c8a/jsonextended/edict.py#L1044-L1077 |
chrisjsewell/jsonextended | jsonextended/edict.py | filter_keyvals | def filter_keyvals(d, keyvals, logic="OR", keep_siblings=False,
list_of_dicts=False, deepcopy=True):
""" filters leaf nodes key:value pairs of nested dictionary
Parameters
----------
d : dict
keyvals : dict or list[tuple]
(key,value) pairs to filter by
logic : str
"OR" or "AND" for matching pairs
keep_siblings : bool
keep all sibling paths
list_of_dicts : bool
treat list of dicts as additional branches
deepcopy: bool
deepcopy values
Examples
--------
>>> from pprint import pprint
>>> d = {1:{6:'a'},3:{7:'a'},2:{6:"b"},4:{5:{6:'a'}}}
>>> pprint(filter_keyvals(d,[(6,'a')]))
{1: {6: 'a'}, 4: {5: {6: 'a'}}}
>>> d2 = {'a':{'b':1,'c':2,'d':3}, 'e':4}
>>> pprint(filter_keyvals(d2, {'b': 1, 'e': 4}, logic="OR", keep_siblings=False))
{'a': {'b': 1}, 'e': 4}
>>> pprint(filter_keyvals(d2,[('b',1)], logic="OR", keep_siblings=True))
{'a': {'b': 1, 'c': 2, 'd': 3}}
>>> pprint(filter_keyvals(d2, {'b': 1, 'e': 4}, logic="AND", keep_siblings=False))
{}
>>> pprint(filter_keyvals(d2, {'b': 1, 'c': 2}, logic="AND", keep_siblings=False))
{'a': {'b': 1, 'c': 2}}
>>> pprint(filter_keyvals(d2,[('b',1), ('c',2)], logic="AND", keep_siblings=True))
{'a': {'b': 1, 'c': 2, 'd': 3}}
>>> d3 = {"a": {"b": 1, "f": {"d": 3}}, "e": {"b": 1, "c": 2, "f": {"d": 3}}, "g": 5}
>>> pprint(filter_keyvals(d3,[('b',1), ('c', 2)], logic="OR", keep_siblings=True))
{'a': {'b': 1, 'f': {'d': 3}}, 'e': {'b': 1, 'c': 2, 'f': {'d': 3}}}
>>> pprint(filter_keyvals(d3,[('b',1), ('c', 2)], logic="AND", keep_siblings=True))
{'e': {'b': 1, 'c': 2, 'f': {'d': 3}}}
""" # noqa: E501
if len(keyvals) != len(dict(keyvals)):
raise ValueError("repeating keys in keyvals: {}".format(keyvals))
keyvals = dict(keyvals)
list_of_dicts = '__list__' if list_of_dicts else None
flattened = flatten(d, list_of_dicts=list_of_dicts)
if logic == "OR":
if keep_siblings:
pruned = {
tuple(k[:-1]) for k, v in flattened.items()
if any(key == k[-1] and v == keyvals[key] for key in keyvals)}
filtered = {k: v for k, v in flattened.items()
if _in_pruned(k, pruned)}
else:
filtered = {
k: v for k, v in flattened.items()
if any(key == k[-1] and v == keyvals[key] for key in keyvals)}
elif logic == "AND":
pruned = {}
for k, v in flattened.items():
if any(key == k[-1] and v == keyvals[key] for key in keyvals):
pruned[tuple(k[:-1])] = pruned.get(tuple(k[:-1]), []) + [k[-1]]
all_keys = set(keyvals.keys())
pruned = [k for k, v in pruned.items() if set(v) == all_keys]
if keep_siblings:
filtered = {k: v for k, v in flattened.items()
if _in_pruned(k, pruned)}
else:
filtered = {k: v for k, v in flattened.items(
) if k[-1] in all_keys and _in_pruned(k, pruned)}
else:
raise ValueError("logic must be AND or OR: {}".format(logic))
return unflatten(filtered, list_of_dicts=list_of_dicts, deepcopy=deepcopy) | python | def filter_keyvals(d, keyvals, logic="OR", keep_siblings=False,
list_of_dicts=False, deepcopy=True):
""" filters leaf nodes key:value pairs of nested dictionary
Parameters
----------
d : dict
keyvals : dict or list[tuple]
(key,value) pairs to filter by
logic : str
"OR" or "AND" for matching pairs
keep_siblings : bool
keep all sibling paths
list_of_dicts : bool
treat list of dicts as additional branches
deepcopy: bool
deepcopy values
Examples
--------
>>> from pprint import pprint
>>> d = {1:{6:'a'},3:{7:'a'},2:{6:"b"},4:{5:{6:'a'}}}
>>> pprint(filter_keyvals(d,[(6,'a')]))
{1: {6: 'a'}, 4: {5: {6: 'a'}}}
>>> d2 = {'a':{'b':1,'c':2,'d':3}, 'e':4}
>>> pprint(filter_keyvals(d2, {'b': 1, 'e': 4}, logic="OR", keep_siblings=False))
{'a': {'b': 1}, 'e': 4}
>>> pprint(filter_keyvals(d2,[('b',1)], logic="OR", keep_siblings=True))
{'a': {'b': 1, 'c': 2, 'd': 3}}
>>> pprint(filter_keyvals(d2, {'b': 1, 'e': 4}, logic="AND", keep_siblings=False))
{}
>>> pprint(filter_keyvals(d2, {'b': 1, 'c': 2}, logic="AND", keep_siblings=False))
{'a': {'b': 1, 'c': 2}}
>>> pprint(filter_keyvals(d2,[('b',1), ('c',2)], logic="AND", keep_siblings=True))
{'a': {'b': 1, 'c': 2, 'd': 3}}
>>> d3 = {"a": {"b": 1, "f": {"d": 3}}, "e": {"b": 1, "c": 2, "f": {"d": 3}}, "g": 5}
>>> pprint(filter_keyvals(d3,[('b',1), ('c', 2)], logic="OR", keep_siblings=True))
{'a': {'b': 1, 'f': {'d': 3}}, 'e': {'b': 1, 'c': 2, 'f': {'d': 3}}}
>>> pprint(filter_keyvals(d3,[('b',1), ('c', 2)], logic="AND", keep_siblings=True))
{'e': {'b': 1, 'c': 2, 'f': {'d': 3}}}
""" # noqa: E501
if len(keyvals) != len(dict(keyvals)):
raise ValueError("repeating keys in keyvals: {}".format(keyvals))
keyvals = dict(keyvals)
list_of_dicts = '__list__' if list_of_dicts else None
flattened = flatten(d, list_of_dicts=list_of_dicts)
if logic == "OR":
if keep_siblings:
pruned = {
tuple(k[:-1]) for k, v in flattened.items()
if any(key == k[-1] and v == keyvals[key] for key in keyvals)}
filtered = {k: v for k, v in flattened.items()
if _in_pruned(k, pruned)}
else:
filtered = {
k: v for k, v in flattened.items()
if any(key == k[-1] and v == keyvals[key] for key in keyvals)}
elif logic == "AND":
pruned = {}
for k, v in flattened.items():
if any(key == k[-1] and v == keyvals[key] for key in keyvals):
pruned[tuple(k[:-1])] = pruned.get(tuple(k[:-1]), []) + [k[-1]]
all_keys = set(keyvals.keys())
pruned = [k for k, v in pruned.items() if set(v) == all_keys]
if keep_siblings:
filtered = {k: v for k, v in flattened.items()
if _in_pruned(k, pruned)}
else:
filtered = {k: v for k, v in flattened.items(
) if k[-1] in all_keys and _in_pruned(k, pruned)}
else:
raise ValueError("logic must be AND or OR: {}".format(logic))
return unflatten(filtered, list_of_dicts=list_of_dicts, deepcopy=deepcopy) | [
"def",
"filter_keyvals",
"(",
"d",
",",
"keyvals",
",",
"logic",
"=",
"\"OR\"",
",",
"keep_siblings",
"=",
"False",
",",
"list_of_dicts",
"=",
"False",
",",
"deepcopy",
"=",
"True",
")",
":",
"# noqa: E501",
"if",
"len",
"(",
"keyvals",
")",
"!=",
"len",
"(",
"dict",
"(",
"keyvals",
")",
")",
":",
"raise",
"ValueError",
"(",
"\"repeating keys in keyvals: {}\"",
".",
"format",
"(",
"keyvals",
")",
")",
"keyvals",
"=",
"dict",
"(",
"keyvals",
")",
"list_of_dicts",
"=",
"'__list__'",
"if",
"list_of_dicts",
"else",
"None",
"flattened",
"=",
"flatten",
"(",
"d",
",",
"list_of_dicts",
"=",
"list_of_dicts",
")",
"if",
"logic",
"==",
"\"OR\"",
":",
"if",
"keep_siblings",
":",
"pruned",
"=",
"{",
"tuple",
"(",
"k",
"[",
":",
"-",
"1",
"]",
")",
"for",
"k",
",",
"v",
"in",
"flattened",
".",
"items",
"(",
")",
"if",
"any",
"(",
"key",
"==",
"k",
"[",
"-",
"1",
"]",
"and",
"v",
"==",
"keyvals",
"[",
"key",
"]",
"for",
"key",
"in",
"keyvals",
")",
"}",
"filtered",
"=",
"{",
"k",
":",
"v",
"for",
"k",
",",
"v",
"in",
"flattened",
".",
"items",
"(",
")",
"if",
"_in_pruned",
"(",
"k",
",",
"pruned",
")",
"}",
"else",
":",
"filtered",
"=",
"{",
"k",
":",
"v",
"for",
"k",
",",
"v",
"in",
"flattened",
".",
"items",
"(",
")",
"if",
"any",
"(",
"key",
"==",
"k",
"[",
"-",
"1",
"]",
"and",
"v",
"==",
"keyvals",
"[",
"key",
"]",
"for",
"key",
"in",
"keyvals",
")",
"}",
"elif",
"logic",
"==",
"\"AND\"",
":",
"pruned",
"=",
"{",
"}",
"for",
"k",
",",
"v",
"in",
"flattened",
".",
"items",
"(",
")",
":",
"if",
"any",
"(",
"key",
"==",
"k",
"[",
"-",
"1",
"]",
"and",
"v",
"==",
"keyvals",
"[",
"key",
"]",
"for",
"key",
"in",
"keyvals",
")",
":",
"pruned",
"[",
"tuple",
"(",
"k",
"[",
":",
"-",
"1",
"]",
")",
"]",
"=",
"pruned",
".",
"get",
"(",
"tuple",
"(",
"k",
"[",
":",
"-",
"1",
"]",
")",
",",
"[",
"]",
")",
"+",
"[",
"k",
"[",
"-",
"1",
"]",
"]",
"all_keys",
"=",
"set",
"(",
"keyvals",
".",
"keys",
"(",
")",
")",
"pruned",
"=",
"[",
"k",
"for",
"k",
",",
"v",
"in",
"pruned",
".",
"items",
"(",
")",
"if",
"set",
"(",
"v",
")",
"==",
"all_keys",
"]",
"if",
"keep_siblings",
":",
"filtered",
"=",
"{",
"k",
":",
"v",
"for",
"k",
",",
"v",
"in",
"flattened",
".",
"items",
"(",
")",
"if",
"_in_pruned",
"(",
"k",
",",
"pruned",
")",
"}",
"else",
":",
"filtered",
"=",
"{",
"k",
":",
"v",
"for",
"k",
",",
"v",
"in",
"flattened",
".",
"items",
"(",
")",
"if",
"k",
"[",
"-",
"1",
"]",
"in",
"all_keys",
"and",
"_in_pruned",
"(",
"k",
",",
"pruned",
")",
"}",
"else",
":",
"raise",
"ValueError",
"(",
"\"logic must be AND or OR: {}\"",
".",
"format",
"(",
"logic",
")",
")",
"return",
"unflatten",
"(",
"filtered",
",",
"list_of_dicts",
"=",
"list_of_dicts",
",",
"deepcopy",
"=",
"deepcopy",
")"
] | filters leaf nodes key:value pairs of nested dictionary
Parameters
----------
d : dict
keyvals : dict or list[tuple]
(key,value) pairs to filter by
logic : str
"OR" or "AND" for matching pairs
keep_siblings : bool
keep all sibling paths
list_of_dicts : bool
treat list of dicts as additional branches
deepcopy: bool
deepcopy values
Examples
--------
>>> from pprint import pprint
>>> d = {1:{6:'a'},3:{7:'a'},2:{6:"b"},4:{5:{6:'a'}}}
>>> pprint(filter_keyvals(d,[(6,'a')]))
{1: {6: 'a'}, 4: {5: {6: 'a'}}}
>>> d2 = {'a':{'b':1,'c':2,'d':3}, 'e':4}
>>> pprint(filter_keyvals(d2, {'b': 1, 'e': 4}, logic="OR", keep_siblings=False))
{'a': {'b': 1}, 'e': 4}
>>> pprint(filter_keyvals(d2,[('b',1)], logic="OR", keep_siblings=True))
{'a': {'b': 1, 'c': 2, 'd': 3}}
>>> pprint(filter_keyvals(d2, {'b': 1, 'e': 4}, logic="AND", keep_siblings=False))
{}
>>> pprint(filter_keyvals(d2, {'b': 1, 'c': 2}, logic="AND", keep_siblings=False))
{'a': {'b': 1, 'c': 2}}
>>> pprint(filter_keyvals(d2,[('b',1), ('c',2)], logic="AND", keep_siblings=True))
{'a': {'b': 1, 'c': 2, 'd': 3}}
>>> d3 = {"a": {"b": 1, "f": {"d": 3}}, "e": {"b": 1, "c": 2, "f": {"d": 3}}, "g": 5}
>>> pprint(filter_keyvals(d3,[('b',1), ('c', 2)], logic="OR", keep_siblings=True))
{'a': {'b': 1, 'f': {'d': 3}}, 'e': {'b': 1, 'c': 2, 'f': {'d': 3}}}
>>> pprint(filter_keyvals(d3,[('b',1), ('c', 2)], logic="AND", keep_siblings=True))
{'e': {'b': 1, 'c': 2, 'f': {'d': 3}}} | [
"filters",
"leaf",
"nodes",
"key",
":",
"value",
"pairs",
"of",
"nested",
"dictionary"
] | train | https://github.com/chrisjsewell/jsonextended/blob/c3a7a880cc09789b3c61204265dcbb127be76c8a/jsonextended/edict.py#L1088-L1177 |
chrisjsewell/jsonextended | jsonextended/edict.py | filter_keys | def filter_keys(d, keys, use_wildcards=False,
list_of_dicts=False, deepcopy=True):
""" filter dict by certain keys
Parameters
----------
d : dict
keys: list
use_wildcards : bool
if true, can use * (matches everything)
and ? (matches any single character)
list_of_dicts: bool
treat list of dicts as additional branches
deepcopy: bool
deepcopy values
Examples
--------
>>> from pprint import pprint
>>> d = {1:{"a":"A"},2:{"b":"B"},4:{5:{6:'a',7:'b'}}}
>>> pprint(filter_keys(d,['a',6]))
{1: {'a': 'A'}, 4: {5: {6: 'a'}}}
>>> d = {1:{"axxxx":"A"},2:{"b":"B"}}
>>> pprint(filter_keys(d,['a*'],use_wildcards=True))
{1: {'axxxx': 'A'}}
"""
list_of_dicts = '__list__' if list_of_dicts else None
flatd = flatten(d, list_of_dicts=list_of_dicts)
def is_in(a, bs):
if use_wildcards:
for b in bs:
try:
if a == b:
return True
if fnmatch(b, a):
return True
except Exception:
pass
return False
else:
try:
return a in bs
except Exception:
return False
flatd = {paths: v for paths, v in flatd.items() if any(
[is_in(k, paths) for k in keys])}
return unflatten(flatd, list_of_dicts=list_of_dicts, deepcopy=deepcopy) | python | def filter_keys(d, keys, use_wildcards=False,
list_of_dicts=False, deepcopy=True):
""" filter dict by certain keys
Parameters
----------
d : dict
keys: list
use_wildcards : bool
if true, can use * (matches everything)
and ? (matches any single character)
list_of_dicts: bool
treat list of dicts as additional branches
deepcopy: bool
deepcopy values
Examples
--------
>>> from pprint import pprint
>>> d = {1:{"a":"A"},2:{"b":"B"},4:{5:{6:'a',7:'b'}}}
>>> pprint(filter_keys(d,['a',6]))
{1: {'a': 'A'}, 4: {5: {6: 'a'}}}
>>> d = {1:{"axxxx":"A"},2:{"b":"B"}}
>>> pprint(filter_keys(d,['a*'],use_wildcards=True))
{1: {'axxxx': 'A'}}
"""
list_of_dicts = '__list__' if list_of_dicts else None
flatd = flatten(d, list_of_dicts=list_of_dicts)
def is_in(a, bs):
if use_wildcards:
for b in bs:
try:
if a == b:
return True
if fnmatch(b, a):
return True
except Exception:
pass
return False
else:
try:
return a in bs
except Exception:
return False
flatd = {paths: v for paths, v in flatd.items() if any(
[is_in(k, paths) for k in keys])}
return unflatten(flatd, list_of_dicts=list_of_dicts, deepcopy=deepcopy) | [
"def",
"filter_keys",
"(",
"d",
",",
"keys",
",",
"use_wildcards",
"=",
"False",
",",
"list_of_dicts",
"=",
"False",
",",
"deepcopy",
"=",
"True",
")",
":",
"list_of_dicts",
"=",
"'__list__'",
"if",
"list_of_dicts",
"else",
"None",
"flatd",
"=",
"flatten",
"(",
"d",
",",
"list_of_dicts",
"=",
"list_of_dicts",
")",
"def",
"is_in",
"(",
"a",
",",
"bs",
")",
":",
"if",
"use_wildcards",
":",
"for",
"b",
"in",
"bs",
":",
"try",
":",
"if",
"a",
"==",
"b",
":",
"return",
"True",
"if",
"fnmatch",
"(",
"b",
",",
"a",
")",
":",
"return",
"True",
"except",
"Exception",
":",
"pass",
"return",
"False",
"else",
":",
"try",
":",
"return",
"a",
"in",
"bs",
"except",
"Exception",
":",
"return",
"False",
"flatd",
"=",
"{",
"paths",
":",
"v",
"for",
"paths",
",",
"v",
"in",
"flatd",
".",
"items",
"(",
")",
"if",
"any",
"(",
"[",
"is_in",
"(",
"k",
",",
"paths",
")",
"for",
"k",
"in",
"keys",
"]",
")",
"}",
"return",
"unflatten",
"(",
"flatd",
",",
"list_of_dicts",
"=",
"list_of_dicts",
",",
"deepcopy",
"=",
"deepcopy",
")"
] | filter dict by certain keys
Parameters
----------
d : dict
keys: list
use_wildcards : bool
if true, can use * (matches everything)
and ? (matches any single character)
list_of_dicts: bool
treat list of dicts as additional branches
deepcopy: bool
deepcopy values
Examples
--------
>>> from pprint import pprint
>>> d = {1:{"a":"A"},2:{"b":"B"},4:{5:{6:'a',7:'b'}}}
>>> pprint(filter_keys(d,['a',6]))
{1: {'a': 'A'}, 4: {5: {6: 'a'}}}
>>> d = {1:{"axxxx":"A"},2:{"b":"B"}}
>>> pprint(filter_keys(d,['a*'],use_wildcards=True))
{1: {'axxxx': 'A'}} | [
"filter",
"dict",
"by",
"certain",
"keys"
] | train | https://github.com/chrisjsewell/jsonextended/blob/c3a7a880cc09789b3c61204265dcbb127be76c8a/jsonextended/edict.py#L1264-L1316 |
chrisjsewell/jsonextended | jsonextended/edict.py | filter_paths | def filter_paths(d, paths, list_of_dicts=False, deepcopy=True):
""" filter dict by certain paths containing key sets
Parameters
----------
d : dict
paths : list[str] or list[tuple]
list_of_dicts: bool
treat list of dicts as additional branches
deepcopy: bool
deepcopy values
Examples
--------
>>> from pprint import pprint
>>> d = {'a':{'b':1,'c':{'d':2}},'e':{'c':3}}
>>> filter_paths(d,[('c','d')])
{'a': {'c': {'d': 2}}}
>>> d2 = {'a':[{'b':1,'c':3},{'b':1,'c':2}]}
>>> pprint(filter_paths(d2,["b"],list_of_dicts=False))
{}
>>> pprint(filter_paths(d2,["c"],list_of_dicts=True))
{'a': [{'c': 3}, {'c': 2}]}
"""
list_of_dicts = '__list__' if list_of_dicts else None
all_keys = [x for y in paths if isinstance(y, tuple) for x in y]
all_keys += [x for x in paths if not isinstance(x, tuple)]
# faster to filter first I think
new_d = filter_keys(d, all_keys, list_of_dicts=list_of_dicts)
new_d = flatten(d, list_of_dicts=list_of_dicts)
for key in list(new_d.keys()):
if not any([
set(key).issuperset(path if isinstance(path, tuple) else[path])
for path in paths]):
new_d.pop(key)
return unflatten(new_d, list_of_dicts=list_of_dicts, deepcopy=deepcopy) | python | def filter_paths(d, paths, list_of_dicts=False, deepcopy=True):
""" filter dict by certain paths containing key sets
Parameters
----------
d : dict
paths : list[str] or list[tuple]
list_of_dicts: bool
treat list of dicts as additional branches
deepcopy: bool
deepcopy values
Examples
--------
>>> from pprint import pprint
>>> d = {'a':{'b':1,'c':{'d':2}},'e':{'c':3}}
>>> filter_paths(d,[('c','d')])
{'a': {'c': {'d': 2}}}
>>> d2 = {'a':[{'b':1,'c':3},{'b':1,'c':2}]}
>>> pprint(filter_paths(d2,["b"],list_of_dicts=False))
{}
>>> pprint(filter_paths(d2,["c"],list_of_dicts=True))
{'a': [{'c': 3}, {'c': 2}]}
"""
list_of_dicts = '__list__' if list_of_dicts else None
all_keys = [x for y in paths if isinstance(y, tuple) for x in y]
all_keys += [x for x in paths if not isinstance(x, tuple)]
# faster to filter first I think
new_d = filter_keys(d, all_keys, list_of_dicts=list_of_dicts)
new_d = flatten(d, list_of_dicts=list_of_dicts)
for key in list(new_d.keys()):
if not any([
set(key).issuperset(path if isinstance(path, tuple) else[path])
for path in paths]):
new_d.pop(key)
return unflatten(new_d, list_of_dicts=list_of_dicts, deepcopy=deepcopy) | [
"def",
"filter_paths",
"(",
"d",
",",
"paths",
",",
"list_of_dicts",
"=",
"False",
",",
"deepcopy",
"=",
"True",
")",
":",
"list_of_dicts",
"=",
"'__list__'",
"if",
"list_of_dicts",
"else",
"None",
"all_keys",
"=",
"[",
"x",
"for",
"y",
"in",
"paths",
"if",
"isinstance",
"(",
"y",
",",
"tuple",
")",
"for",
"x",
"in",
"y",
"]",
"all_keys",
"+=",
"[",
"x",
"for",
"x",
"in",
"paths",
"if",
"not",
"isinstance",
"(",
"x",
",",
"tuple",
")",
"]",
"# faster to filter first I think",
"new_d",
"=",
"filter_keys",
"(",
"d",
",",
"all_keys",
",",
"list_of_dicts",
"=",
"list_of_dicts",
")",
"new_d",
"=",
"flatten",
"(",
"d",
",",
"list_of_dicts",
"=",
"list_of_dicts",
")",
"for",
"key",
"in",
"list",
"(",
"new_d",
".",
"keys",
"(",
")",
")",
":",
"if",
"not",
"any",
"(",
"[",
"set",
"(",
"key",
")",
".",
"issuperset",
"(",
"path",
"if",
"isinstance",
"(",
"path",
",",
"tuple",
")",
"else",
"[",
"path",
"]",
")",
"for",
"path",
"in",
"paths",
"]",
")",
":",
"new_d",
".",
"pop",
"(",
"key",
")",
"return",
"unflatten",
"(",
"new_d",
",",
"list_of_dicts",
"=",
"list_of_dicts",
",",
"deepcopy",
"=",
"deepcopy",
")"
] | filter dict by certain paths containing key sets
Parameters
----------
d : dict
paths : list[str] or list[tuple]
list_of_dicts: bool
treat list of dicts as additional branches
deepcopy: bool
deepcopy values
Examples
--------
>>> from pprint import pprint
>>> d = {'a':{'b':1,'c':{'d':2}},'e':{'c':3}}
>>> filter_paths(d,[('c','d')])
{'a': {'c': {'d': 2}}}
>>> d2 = {'a':[{'b':1,'c':3},{'b':1,'c':2}]}
>>> pprint(filter_paths(d2,["b"],list_of_dicts=False))
{}
>>> pprint(filter_paths(d2,["c"],list_of_dicts=True))
{'a': [{'c': 3}, {'c': 2}]} | [
"filter",
"dict",
"by",
"certain",
"paths",
"containing",
"key",
"sets"
] | train | https://github.com/chrisjsewell/jsonextended/blob/c3a7a880cc09789b3c61204265dcbb127be76c8a/jsonextended/edict.py#L1319-L1359 |
chrisjsewell/jsonextended | jsonextended/edict.py | rename_keys | def rename_keys(d, keymap=None, list_of_dicts=False, deepcopy=True):
""" rename keys in dict
Parameters
----------
d : dict
keymap : dict
dictionary of key name mappings
list_of_dicts: bool
treat list of dicts as additional branches
deepcopy: bool
deepcopy values
Examples
--------
>>> from pprint import pprint
>>> d = {'a':{'old_name':1}}
>>> pprint(rename_keys(d,{'old_name':'new_name'}))
{'a': {'new_name': 1}}
"""
list_of_dicts = '__list__' if list_of_dicts else None
keymap = {} if keymap is None else keymap
flatd = flatten(d, list_of_dicts=list_of_dicts)
flatd = {
tuple([keymap.get(k, k) for k in path]): v for path, v in flatd.items()
}
return unflatten(flatd, list_of_dicts=list_of_dicts, deepcopy=deepcopy) | python | def rename_keys(d, keymap=None, list_of_dicts=False, deepcopy=True):
""" rename keys in dict
Parameters
----------
d : dict
keymap : dict
dictionary of key name mappings
list_of_dicts: bool
treat list of dicts as additional branches
deepcopy: bool
deepcopy values
Examples
--------
>>> from pprint import pprint
>>> d = {'a':{'old_name':1}}
>>> pprint(rename_keys(d,{'old_name':'new_name'}))
{'a': {'new_name': 1}}
"""
list_of_dicts = '__list__' if list_of_dicts else None
keymap = {} if keymap is None else keymap
flatd = flatten(d, list_of_dicts=list_of_dicts)
flatd = {
tuple([keymap.get(k, k) for k in path]): v for path, v in flatd.items()
}
return unflatten(flatd, list_of_dicts=list_of_dicts, deepcopy=deepcopy) | [
"def",
"rename_keys",
"(",
"d",
",",
"keymap",
"=",
"None",
",",
"list_of_dicts",
"=",
"False",
",",
"deepcopy",
"=",
"True",
")",
":",
"list_of_dicts",
"=",
"'__list__'",
"if",
"list_of_dicts",
"else",
"None",
"keymap",
"=",
"{",
"}",
"if",
"keymap",
"is",
"None",
"else",
"keymap",
"flatd",
"=",
"flatten",
"(",
"d",
",",
"list_of_dicts",
"=",
"list_of_dicts",
")",
"flatd",
"=",
"{",
"tuple",
"(",
"[",
"keymap",
".",
"get",
"(",
"k",
",",
"k",
")",
"for",
"k",
"in",
"path",
"]",
")",
":",
"v",
"for",
"path",
",",
"v",
"in",
"flatd",
".",
"items",
"(",
")",
"}",
"return",
"unflatten",
"(",
"flatd",
",",
"list_of_dicts",
"=",
"list_of_dicts",
",",
"deepcopy",
"=",
"deepcopy",
")"
] | rename keys in dict
Parameters
----------
d : dict
keymap : dict
dictionary of key name mappings
list_of_dicts: bool
treat list of dicts as additional branches
deepcopy: bool
deepcopy values
Examples
--------
>>> from pprint import pprint
>>> d = {'a':{'old_name':1}}
>>> pprint(rename_keys(d,{'old_name':'new_name'}))
{'a': {'new_name': 1}} | [
"rename",
"keys",
"in",
"dict"
] | train | https://github.com/chrisjsewell/jsonextended/blob/c3a7a880cc09789b3c61204265dcbb127be76c8a/jsonextended/edict.py#L1362-L1393 |
chrisjsewell/jsonextended | jsonextended/edict.py | split_key | def split_key(d, key, new_keys, before=True,
list_of_dicts=False, deepcopy=True):
""" split an existing key(s) into multiple levels
Parameters
----------
d : dict
or dict like
key: str
existing key value
new_keys: list[str]
new levels to add
before: bool
add level before existing key (else after)
list_of_dicts: bool
treat list of dicts as additional branches
Examples
--------
>>> from pprint import pprint
>>> d = {'a':1,'b':2}
>>> pprint(split_key(d,'a',['c','d']))
{'b': 2, 'c': {'d': {'a': 1}}}
>>> pprint(split_key(d,'a',['c','d'],before=False))
{'a': {'c': {'d': 1}}, 'b': 2}
>>> d2 = [{'a':1},{'a':2},{'a':3}]
>>> pprint(split_key(d2,'a',['b'],list_of_dicts=True))
[{'b': {'a': 1}}, {'b': {'a': 2}}, {'b': {'a': 3}}]
"""
list_of_dicts = '__list__' if list_of_dicts else None
flatd = flatten(d, list_of_dicts=list_of_dicts)
newd = {}
for path, v in flatd.items():
if key in path:
newk = []
for k in path:
if k == key:
if before:
newk = newk + new_keys + [k]
else:
newk = newk + [k] + new_keys
else:
newk.append(k)
newd[tuple(newk)] = v
else:
newd[path] = v
return unflatten(newd, list_of_dicts=list_of_dicts, deepcopy=deepcopy) | python | def split_key(d, key, new_keys, before=True,
list_of_dicts=False, deepcopy=True):
""" split an existing key(s) into multiple levels
Parameters
----------
d : dict
or dict like
key: str
existing key value
new_keys: list[str]
new levels to add
before: bool
add level before existing key (else after)
list_of_dicts: bool
treat list of dicts as additional branches
Examples
--------
>>> from pprint import pprint
>>> d = {'a':1,'b':2}
>>> pprint(split_key(d,'a',['c','d']))
{'b': 2, 'c': {'d': {'a': 1}}}
>>> pprint(split_key(d,'a',['c','d'],before=False))
{'a': {'c': {'d': 1}}, 'b': 2}
>>> d2 = [{'a':1},{'a':2},{'a':3}]
>>> pprint(split_key(d2,'a',['b'],list_of_dicts=True))
[{'b': {'a': 1}}, {'b': {'a': 2}}, {'b': {'a': 3}}]
"""
list_of_dicts = '__list__' if list_of_dicts else None
flatd = flatten(d, list_of_dicts=list_of_dicts)
newd = {}
for path, v in flatd.items():
if key in path:
newk = []
for k in path:
if k == key:
if before:
newk = newk + new_keys + [k]
else:
newk = newk + [k] + new_keys
else:
newk.append(k)
newd[tuple(newk)] = v
else:
newd[path] = v
return unflatten(newd, list_of_dicts=list_of_dicts, deepcopy=deepcopy) | [
"def",
"split_key",
"(",
"d",
",",
"key",
",",
"new_keys",
",",
"before",
"=",
"True",
",",
"list_of_dicts",
"=",
"False",
",",
"deepcopy",
"=",
"True",
")",
":",
"list_of_dicts",
"=",
"'__list__'",
"if",
"list_of_dicts",
"else",
"None",
"flatd",
"=",
"flatten",
"(",
"d",
",",
"list_of_dicts",
"=",
"list_of_dicts",
")",
"newd",
"=",
"{",
"}",
"for",
"path",
",",
"v",
"in",
"flatd",
".",
"items",
"(",
")",
":",
"if",
"key",
"in",
"path",
":",
"newk",
"=",
"[",
"]",
"for",
"k",
"in",
"path",
":",
"if",
"k",
"==",
"key",
":",
"if",
"before",
":",
"newk",
"=",
"newk",
"+",
"new_keys",
"+",
"[",
"k",
"]",
"else",
":",
"newk",
"=",
"newk",
"+",
"[",
"k",
"]",
"+",
"new_keys",
"else",
":",
"newk",
".",
"append",
"(",
"k",
")",
"newd",
"[",
"tuple",
"(",
"newk",
")",
"]",
"=",
"v",
"else",
":",
"newd",
"[",
"path",
"]",
"=",
"v",
"return",
"unflatten",
"(",
"newd",
",",
"list_of_dicts",
"=",
"list_of_dicts",
",",
"deepcopy",
"=",
"deepcopy",
")"
] | split an existing key(s) into multiple levels
Parameters
----------
d : dict
or dict like
key: str
existing key value
new_keys: list[str]
new levels to add
before: bool
add level before existing key (else after)
list_of_dicts: bool
treat list of dicts as additional branches
Examples
--------
>>> from pprint import pprint
>>> d = {'a':1,'b':2}
>>> pprint(split_key(d,'a',['c','d']))
{'b': 2, 'c': {'d': {'a': 1}}}
>>> pprint(split_key(d,'a',['c','d'],before=False))
{'a': {'c': {'d': 1}}, 'b': 2}
>>> d2 = [{'a':1},{'a':2},{'a':3}]
>>> pprint(split_key(d2,'a',['b'],list_of_dicts=True))
[{'b': {'a': 1}}, {'b': {'a': 2}}, {'b': {'a': 3}}] | [
"split",
"an",
"existing",
"key",
"(",
"s",
")",
"into",
"multiple",
"levels"
] | train | https://github.com/chrisjsewell/jsonextended/blob/c3a7a880cc09789b3c61204265dcbb127be76c8a/jsonextended/edict.py#L1398-L1449 |
chrisjsewell/jsonextended | jsonextended/edict.py | apply | def apply(d, leaf_key, func, new_name=None, remove_lkey=True,
list_of_dicts=False, unflatten_level=0, deepcopy=True, **kwargs):
""" apply a function to all values with a certain leaf (terminal) key
Parameters
----------
d : dict
leaf_key : str
name of leaf key
func : callable
function to apply
new_name : str
if not None, rename leaf_key
remove_lkey: bool
whether to remove original leaf_key (if new_name is not None)
list_of_dicts: bool
treat list of dicts as additional branches
unflatten_level : int or None
the number of levels to leave unflattened before combining,
for instance if you need dicts as inputs
deepcopy: bool
deepcopy values
kwargs : dict
additional keywords to parse to function
Examples
--------
>>> from pprint import pprint
>>> d = {'a':1,'b':1}
>>> func = lambda x: x+1
>>> pprint(apply(d,'a',func))
{'a': 2, 'b': 1}
>>> pprint(apply(d,'a',func,new_name='c'))
{'b': 1, 'c': 2}
>>> pprint(apply(d,'a',func,new_name='c', remove_lkey=False))
{'a': 1, 'b': 1, 'c': 2}
>>> test_dict = {"a":[{"b":[{"c":1, "d": 2}, {"e":3, "f": 4}]}, {"b":[{"c":5, "d": 6}, {"e":7, "f": 8}]}]}
>>> pprint(apply(test_dict, "b", lambda x: x[-1], list_of_dicts=True, unflatten_level=2))
{'a': [{'b': {'e': 3, 'f': 4}}, {'b': {'e': 7, 'f': 8}}]}
""" # noqa: E501
list_of_dicts = '__list__' if list_of_dicts else None
if unflatten_level == 0:
flatd = flatten(d, list_of_dicts=list_of_dicts)
else:
flatd = flattennd(d, unflatten_level, list_of_dicts=list_of_dicts)
newd = {k: (func(v, **kwargs) if k[-1] == leaf_key else v)
for k, v in flatd.items()}
if new_name is not None:
newd = {(tuple(list(k[:-1]) + [new_name]) if k[-1]
== leaf_key else k): v for k, v in newd.items()}
if not remove_lkey:
newd.update(flatd)
return unflatten(newd, list_of_dicts=list_of_dicts, deepcopy=deepcopy) | python | def apply(d, leaf_key, func, new_name=None, remove_lkey=True,
list_of_dicts=False, unflatten_level=0, deepcopy=True, **kwargs):
""" apply a function to all values with a certain leaf (terminal) key
Parameters
----------
d : dict
leaf_key : str
name of leaf key
func : callable
function to apply
new_name : str
if not None, rename leaf_key
remove_lkey: bool
whether to remove original leaf_key (if new_name is not None)
list_of_dicts: bool
treat list of dicts as additional branches
unflatten_level : int or None
the number of levels to leave unflattened before combining,
for instance if you need dicts as inputs
deepcopy: bool
deepcopy values
kwargs : dict
additional keywords to parse to function
Examples
--------
>>> from pprint import pprint
>>> d = {'a':1,'b':1}
>>> func = lambda x: x+1
>>> pprint(apply(d,'a',func))
{'a': 2, 'b': 1}
>>> pprint(apply(d,'a',func,new_name='c'))
{'b': 1, 'c': 2}
>>> pprint(apply(d,'a',func,new_name='c', remove_lkey=False))
{'a': 1, 'b': 1, 'c': 2}
>>> test_dict = {"a":[{"b":[{"c":1, "d": 2}, {"e":3, "f": 4}]}, {"b":[{"c":5, "d": 6}, {"e":7, "f": 8}]}]}
>>> pprint(apply(test_dict, "b", lambda x: x[-1], list_of_dicts=True, unflatten_level=2))
{'a': [{'b': {'e': 3, 'f': 4}}, {'b': {'e': 7, 'f': 8}}]}
""" # noqa: E501
list_of_dicts = '__list__' if list_of_dicts else None
if unflatten_level == 0:
flatd = flatten(d, list_of_dicts=list_of_dicts)
else:
flatd = flattennd(d, unflatten_level, list_of_dicts=list_of_dicts)
newd = {k: (func(v, **kwargs) if k[-1] == leaf_key else v)
for k, v in flatd.items()}
if new_name is not None:
newd = {(tuple(list(k[:-1]) + [new_name]) if k[-1]
== leaf_key else k): v for k, v in newd.items()}
if not remove_lkey:
newd.update(flatd)
return unflatten(newd, list_of_dicts=list_of_dicts, deepcopy=deepcopy) | [
"def",
"apply",
"(",
"d",
",",
"leaf_key",
",",
"func",
",",
"new_name",
"=",
"None",
",",
"remove_lkey",
"=",
"True",
",",
"list_of_dicts",
"=",
"False",
",",
"unflatten_level",
"=",
"0",
",",
"deepcopy",
"=",
"True",
",",
"*",
"*",
"kwargs",
")",
":",
"# noqa: E501",
"list_of_dicts",
"=",
"'__list__'",
"if",
"list_of_dicts",
"else",
"None",
"if",
"unflatten_level",
"==",
"0",
":",
"flatd",
"=",
"flatten",
"(",
"d",
",",
"list_of_dicts",
"=",
"list_of_dicts",
")",
"else",
":",
"flatd",
"=",
"flattennd",
"(",
"d",
",",
"unflatten_level",
",",
"list_of_dicts",
"=",
"list_of_dicts",
")",
"newd",
"=",
"{",
"k",
":",
"(",
"func",
"(",
"v",
",",
"*",
"*",
"kwargs",
")",
"if",
"k",
"[",
"-",
"1",
"]",
"==",
"leaf_key",
"else",
"v",
")",
"for",
"k",
",",
"v",
"in",
"flatd",
".",
"items",
"(",
")",
"}",
"if",
"new_name",
"is",
"not",
"None",
":",
"newd",
"=",
"{",
"(",
"tuple",
"(",
"list",
"(",
"k",
"[",
":",
"-",
"1",
"]",
")",
"+",
"[",
"new_name",
"]",
")",
"if",
"k",
"[",
"-",
"1",
"]",
"==",
"leaf_key",
"else",
"k",
")",
":",
"v",
"for",
"k",
",",
"v",
"in",
"newd",
".",
"items",
"(",
")",
"}",
"if",
"not",
"remove_lkey",
":",
"newd",
".",
"update",
"(",
"flatd",
")",
"return",
"unflatten",
"(",
"newd",
",",
"list_of_dicts",
"=",
"list_of_dicts",
",",
"deepcopy",
"=",
"deepcopy",
")"
] | apply a function to all values with a certain leaf (terminal) key
Parameters
----------
d : dict
leaf_key : str
name of leaf key
func : callable
function to apply
new_name : str
if not None, rename leaf_key
remove_lkey: bool
whether to remove original leaf_key (if new_name is not None)
list_of_dicts: bool
treat list of dicts as additional branches
unflatten_level : int or None
the number of levels to leave unflattened before combining,
for instance if you need dicts as inputs
deepcopy: bool
deepcopy values
kwargs : dict
additional keywords to parse to function
Examples
--------
>>> from pprint import pprint
>>> d = {'a':1,'b':1}
>>> func = lambda x: x+1
>>> pprint(apply(d,'a',func))
{'a': 2, 'b': 1}
>>> pprint(apply(d,'a',func,new_name='c'))
{'b': 1, 'c': 2}
>>> pprint(apply(d,'a',func,new_name='c', remove_lkey=False))
{'a': 1, 'b': 1, 'c': 2}
>>> test_dict = {"a":[{"b":[{"c":1, "d": 2}, {"e":3, "f": 4}]}, {"b":[{"c":5, "d": 6}, {"e":7, "f": 8}]}]}
>>> pprint(apply(test_dict, "b", lambda x: x[-1], list_of_dicts=True, unflatten_level=2))
{'a': [{'b': {'e': 3, 'f': 4}}, {'b': {'e': 7, 'f': 8}}]} | [
"apply",
"a",
"function",
"to",
"all",
"values",
"with",
"a",
"certain",
"leaf",
"(",
"terminal",
")",
"key"
] | train | https://github.com/chrisjsewell/jsonextended/blob/c3a7a880cc09789b3c61204265dcbb127be76c8a/jsonextended/edict.py#L1452-L1508 |
chrisjsewell/jsonextended | jsonextended/edict.py | combine_apply | def combine_apply(d, leaf_keys, func, new_name,
unflatten_level=1, remove_lkeys=True, overwrite=False,
list_of_dicts=False, deepcopy=True, **kwargs):
""" combine values with certain leaf (terminal) keys by a function
Parameters
----------
d : dict
leaf_keys : list
names of leaf keys
func : callable
function to apply,
must take at least len(leaf_keys) arguments
new_name : str
new key name
unflatten_level : int or None
the number of levels to leave unflattened before combining,
for instance if you need dicts as inputs (None means all)
remove_lkeys: bool
whether to remove original leaf_keys
overwrite: bool
whether to overwrite any existing new_name key
list_of_dicts: bool
treat list of dicts as additional branches
deepcopy: bool
deepcopy values
kwargs : dict
additional keywords to parse to function
Examples
--------
>>> from pprint import pprint
>>> d = {'a':1,'b':2}
>>> func = lambda x,y: x+y
>>> pprint(combine_apply(d,['a','b'],func,'c'))
{'c': 3}
>>> pprint(combine_apply(d,['a','b'],func,'c',remove_lkeys=False))
{'a': 1, 'b': 2, 'c': 3}
>>> d = {1:{'a':1,'b':2},2:{'a':4,'b':5},3:{'a':1}}
>>> pprint(combine_apply(d,['a','b'],func,'c'))
{1: {'c': 3}, 2: {'c': 9}, 3: {'a': 1}}
>>> func2 = lambda x: sorted(list(x.keys()))
>>> d2 = {'d':{'a':{'b':1,'c':2}}}
>>> pprint(combine_apply(d2,['a'],func2,'a',unflatten_level=2))
{'d': {'a': ['b', 'c']}}
"""
list_of_dicts = '__list__' if list_of_dicts else None
if unflatten_level is not None:
flatd = flattennd(d, levels=unflatten_level,
list_of_dicts=list_of_dicts)
else:
# TODO could do this better?
flatd = unflatten(d, key_as_tuple=False,
delim='*@#$', deepcopy=deepcopy)
for dic in flatd.values():
if not is_dict_like(dic):
continue
if all([k in list(dic.keys()) for k in leaf_keys]):
if remove_lkeys:
vals = [dic.pop(k) for k in leaf_keys]
else:
vals = [dic[k] for k in leaf_keys]
if new_name in dic and not overwrite:
raise ValueError('{} already in sub-dict'.format(new_name))
dic[new_name] = func(*vals, **kwargs)
if unflatten_level is not None:
return unflatten(flatd, list_of_dicts=list_of_dicts, deepcopy=deepcopy)
else:
return flatd | python | def combine_apply(d, leaf_keys, func, new_name,
unflatten_level=1, remove_lkeys=True, overwrite=False,
list_of_dicts=False, deepcopy=True, **kwargs):
""" combine values with certain leaf (terminal) keys by a function
Parameters
----------
d : dict
leaf_keys : list
names of leaf keys
func : callable
function to apply,
must take at least len(leaf_keys) arguments
new_name : str
new key name
unflatten_level : int or None
the number of levels to leave unflattened before combining,
for instance if you need dicts as inputs (None means all)
remove_lkeys: bool
whether to remove original leaf_keys
overwrite: bool
whether to overwrite any existing new_name key
list_of_dicts: bool
treat list of dicts as additional branches
deepcopy: bool
deepcopy values
kwargs : dict
additional keywords to parse to function
Examples
--------
>>> from pprint import pprint
>>> d = {'a':1,'b':2}
>>> func = lambda x,y: x+y
>>> pprint(combine_apply(d,['a','b'],func,'c'))
{'c': 3}
>>> pprint(combine_apply(d,['a','b'],func,'c',remove_lkeys=False))
{'a': 1, 'b': 2, 'c': 3}
>>> d = {1:{'a':1,'b':2},2:{'a':4,'b':5},3:{'a':1}}
>>> pprint(combine_apply(d,['a','b'],func,'c'))
{1: {'c': 3}, 2: {'c': 9}, 3: {'a': 1}}
>>> func2 = lambda x: sorted(list(x.keys()))
>>> d2 = {'d':{'a':{'b':1,'c':2}}}
>>> pprint(combine_apply(d2,['a'],func2,'a',unflatten_level=2))
{'d': {'a': ['b', 'c']}}
"""
list_of_dicts = '__list__' if list_of_dicts else None
if unflatten_level is not None:
flatd = flattennd(d, levels=unflatten_level,
list_of_dicts=list_of_dicts)
else:
# TODO could do this better?
flatd = unflatten(d, key_as_tuple=False,
delim='*@#$', deepcopy=deepcopy)
for dic in flatd.values():
if not is_dict_like(dic):
continue
if all([k in list(dic.keys()) for k in leaf_keys]):
if remove_lkeys:
vals = [dic.pop(k) for k in leaf_keys]
else:
vals = [dic[k] for k in leaf_keys]
if new_name in dic and not overwrite:
raise ValueError('{} already in sub-dict'.format(new_name))
dic[new_name] = func(*vals, **kwargs)
if unflatten_level is not None:
return unflatten(flatd, list_of_dicts=list_of_dicts, deepcopy=deepcopy)
else:
return flatd | [
"def",
"combine_apply",
"(",
"d",
",",
"leaf_keys",
",",
"func",
",",
"new_name",
",",
"unflatten_level",
"=",
"1",
",",
"remove_lkeys",
"=",
"True",
",",
"overwrite",
"=",
"False",
",",
"list_of_dicts",
"=",
"False",
",",
"deepcopy",
"=",
"True",
",",
"*",
"*",
"kwargs",
")",
":",
"list_of_dicts",
"=",
"'__list__'",
"if",
"list_of_dicts",
"else",
"None",
"if",
"unflatten_level",
"is",
"not",
"None",
":",
"flatd",
"=",
"flattennd",
"(",
"d",
",",
"levels",
"=",
"unflatten_level",
",",
"list_of_dicts",
"=",
"list_of_dicts",
")",
"else",
":",
"# TODO could do this better?",
"flatd",
"=",
"unflatten",
"(",
"d",
",",
"key_as_tuple",
"=",
"False",
",",
"delim",
"=",
"'*@#$'",
",",
"deepcopy",
"=",
"deepcopy",
")",
"for",
"dic",
"in",
"flatd",
".",
"values",
"(",
")",
":",
"if",
"not",
"is_dict_like",
"(",
"dic",
")",
":",
"continue",
"if",
"all",
"(",
"[",
"k",
"in",
"list",
"(",
"dic",
".",
"keys",
"(",
")",
")",
"for",
"k",
"in",
"leaf_keys",
"]",
")",
":",
"if",
"remove_lkeys",
":",
"vals",
"=",
"[",
"dic",
".",
"pop",
"(",
"k",
")",
"for",
"k",
"in",
"leaf_keys",
"]",
"else",
":",
"vals",
"=",
"[",
"dic",
"[",
"k",
"]",
"for",
"k",
"in",
"leaf_keys",
"]",
"if",
"new_name",
"in",
"dic",
"and",
"not",
"overwrite",
":",
"raise",
"ValueError",
"(",
"'{} already in sub-dict'",
".",
"format",
"(",
"new_name",
")",
")",
"dic",
"[",
"new_name",
"]",
"=",
"func",
"(",
"*",
"vals",
",",
"*",
"*",
"kwargs",
")",
"if",
"unflatten_level",
"is",
"not",
"None",
":",
"return",
"unflatten",
"(",
"flatd",
",",
"list_of_dicts",
"=",
"list_of_dicts",
",",
"deepcopy",
"=",
"deepcopy",
")",
"else",
":",
"return",
"flatd"
] | combine values with certain leaf (terminal) keys by a function
Parameters
----------
d : dict
leaf_keys : list
names of leaf keys
func : callable
function to apply,
must take at least len(leaf_keys) arguments
new_name : str
new key name
unflatten_level : int or None
the number of levels to leave unflattened before combining,
for instance if you need dicts as inputs (None means all)
remove_lkeys: bool
whether to remove original leaf_keys
overwrite: bool
whether to overwrite any existing new_name key
list_of_dicts: bool
treat list of dicts as additional branches
deepcopy: bool
deepcopy values
kwargs : dict
additional keywords to parse to function
Examples
--------
>>> from pprint import pprint
>>> d = {'a':1,'b':2}
>>> func = lambda x,y: x+y
>>> pprint(combine_apply(d,['a','b'],func,'c'))
{'c': 3}
>>> pprint(combine_apply(d,['a','b'],func,'c',remove_lkeys=False))
{'a': 1, 'b': 2, 'c': 3}
>>> d = {1:{'a':1,'b':2},2:{'a':4,'b':5},3:{'a':1}}
>>> pprint(combine_apply(d,['a','b'],func,'c'))
{1: {'c': 3}, 2: {'c': 9}, 3: {'a': 1}}
>>> func2 = lambda x: sorted(list(x.keys()))
>>> d2 = {'d':{'a':{'b':1,'c':2}}}
>>> pprint(combine_apply(d2,['a'],func2,'a',unflatten_level=2))
{'d': {'a': ['b', 'c']}} | [
"combine",
"values",
"with",
"certain",
"leaf",
"(",
"terminal",
")",
"keys",
"by",
"a",
"function"
] | train | https://github.com/chrisjsewell/jsonextended/blob/c3a7a880cc09789b3c61204265dcbb127be76c8a/jsonextended/edict.py#L1511-L1585 |
chrisjsewell/jsonextended | jsonextended/edict.py | split_lists | def split_lists(d, split_keys, new_name='split',
check_length=True, deepcopy=True):
"""split_lists key:list pairs into dicts for each item in the lists
NB: will only split if all split_keys are present
Parameters
----------
d : dict
split_keys : list
keys to split
new_name : str
top level key for split items
check_length : bool
if true, raise error if any lists are of a different length
deepcopy: bool
deepcopy values
Examples
--------
>>> from pprint import pprint
>>> d = {'path_key':{'x':[1,2],'y':[3,4],'a':1}}
>>> new_d = split_lists(d,['x','y'])
>>> pprint(new_d)
{'path_key': {'a': 1, 'split': [{'x': 1, 'y': 3}, {'x': 2, 'y': 4}]}}
>>> split_lists(d,['x','a'])
Traceback (most recent call last):
...
ValueError: "a" data at the following path is not a list ('path_key',)
>>> d2 = {'path_key':{'x':[1,7],'y':[3,4,5]}}
>>> split_lists(d2,['x','y'])
Traceback (most recent call last):
...
ValueError: lists at the following path do not have the same size ('path_key',)
""" # noqa: E501
flattened = flatten2d(d)
new_d = {}
for key, value in flattened.items():
if set(split_keys).issubset(value.keys()):
# combine_d = {}
combine_d = []
sub_d = {}
length = None
for subkey, subvalue in value.items():
if subkey in split_keys:
if not isinstance(subvalue, list):
raise ValueError(
'"{0}" data at the following path is not a list '
'{1}'.format(subkey, key))
if check_length and length is not None:
if len(subvalue) != length:
raise ValueError(
'lists at the following path '
'do not have the same size {0}'.format(key))
if length is None:
combine_d = [{subkey: v} for v in subvalue]
else:
for item, val in zip(combine_d, subvalue):
item[subkey] = val
length = len(subvalue)
# new_combine = {k:{subkey:v}
# for k,v in enumerate(subvalue)}
# combine_d = merge([combine_d,new_combine])
else:
sub_d[subkey] = subvalue
try:
new_d[key] = merge([sub_d, {new_name: combine_d}])
except ValueError:
raise ValueError(
'split data key: {0}, already exists at '
'this level for {1}'.format(new_name, key))
else:
new_d[key] = value
return unflatten(new_d, deepcopy=deepcopy) | python | def split_lists(d, split_keys, new_name='split',
check_length=True, deepcopy=True):
"""split_lists key:list pairs into dicts for each item in the lists
NB: will only split if all split_keys are present
Parameters
----------
d : dict
split_keys : list
keys to split
new_name : str
top level key for split items
check_length : bool
if true, raise error if any lists are of a different length
deepcopy: bool
deepcopy values
Examples
--------
>>> from pprint import pprint
>>> d = {'path_key':{'x':[1,2],'y':[3,4],'a':1}}
>>> new_d = split_lists(d,['x','y'])
>>> pprint(new_d)
{'path_key': {'a': 1, 'split': [{'x': 1, 'y': 3}, {'x': 2, 'y': 4}]}}
>>> split_lists(d,['x','a'])
Traceback (most recent call last):
...
ValueError: "a" data at the following path is not a list ('path_key',)
>>> d2 = {'path_key':{'x':[1,7],'y':[3,4,5]}}
>>> split_lists(d2,['x','y'])
Traceback (most recent call last):
...
ValueError: lists at the following path do not have the same size ('path_key',)
""" # noqa: E501
flattened = flatten2d(d)
new_d = {}
for key, value in flattened.items():
if set(split_keys).issubset(value.keys()):
# combine_d = {}
combine_d = []
sub_d = {}
length = None
for subkey, subvalue in value.items():
if subkey in split_keys:
if not isinstance(subvalue, list):
raise ValueError(
'"{0}" data at the following path is not a list '
'{1}'.format(subkey, key))
if check_length and length is not None:
if len(subvalue) != length:
raise ValueError(
'lists at the following path '
'do not have the same size {0}'.format(key))
if length is None:
combine_d = [{subkey: v} for v in subvalue]
else:
for item, val in zip(combine_d, subvalue):
item[subkey] = val
length = len(subvalue)
# new_combine = {k:{subkey:v}
# for k,v in enumerate(subvalue)}
# combine_d = merge([combine_d,new_combine])
else:
sub_d[subkey] = subvalue
try:
new_d[key] = merge([sub_d, {new_name: combine_d}])
except ValueError:
raise ValueError(
'split data key: {0}, already exists at '
'this level for {1}'.format(new_name, key))
else:
new_d[key] = value
return unflatten(new_d, deepcopy=deepcopy) | [
"def",
"split_lists",
"(",
"d",
",",
"split_keys",
",",
"new_name",
"=",
"'split'",
",",
"check_length",
"=",
"True",
",",
"deepcopy",
"=",
"True",
")",
":",
"# noqa: E501",
"flattened",
"=",
"flatten2d",
"(",
"d",
")",
"new_d",
"=",
"{",
"}",
"for",
"key",
",",
"value",
"in",
"flattened",
".",
"items",
"(",
")",
":",
"if",
"set",
"(",
"split_keys",
")",
".",
"issubset",
"(",
"value",
".",
"keys",
"(",
")",
")",
":",
"# combine_d = {}",
"combine_d",
"=",
"[",
"]",
"sub_d",
"=",
"{",
"}",
"length",
"=",
"None",
"for",
"subkey",
",",
"subvalue",
"in",
"value",
".",
"items",
"(",
")",
":",
"if",
"subkey",
"in",
"split_keys",
":",
"if",
"not",
"isinstance",
"(",
"subvalue",
",",
"list",
")",
":",
"raise",
"ValueError",
"(",
"'\"{0}\" data at the following path is not a list '",
"'{1}'",
".",
"format",
"(",
"subkey",
",",
"key",
")",
")",
"if",
"check_length",
"and",
"length",
"is",
"not",
"None",
":",
"if",
"len",
"(",
"subvalue",
")",
"!=",
"length",
":",
"raise",
"ValueError",
"(",
"'lists at the following path '",
"'do not have the same size {0}'",
".",
"format",
"(",
"key",
")",
")",
"if",
"length",
"is",
"None",
":",
"combine_d",
"=",
"[",
"{",
"subkey",
":",
"v",
"}",
"for",
"v",
"in",
"subvalue",
"]",
"else",
":",
"for",
"item",
",",
"val",
"in",
"zip",
"(",
"combine_d",
",",
"subvalue",
")",
":",
"item",
"[",
"subkey",
"]",
"=",
"val",
"length",
"=",
"len",
"(",
"subvalue",
")",
"# new_combine = {k:{subkey:v}",
"# for k,v in enumerate(subvalue)}",
"# combine_d = merge([combine_d,new_combine])",
"else",
":",
"sub_d",
"[",
"subkey",
"]",
"=",
"subvalue",
"try",
":",
"new_d",
"[",
"key",
"]",
"=",
"merge",
"(",
"[",
"sub_d",
",",
"{",
"new_name",
":",
"combine_d",
"}",
"]",
")",
"except",
"ValueError",
":",
"raise",
"ValueError",
"(",
"'split data key: {0}, already exists at '",
"'this level for {1}'",
".",
"format",
"(",
"new_name",
",",
"key",
")",
")",
"else",
":",
"new_d",
"[",
"key",
"]",
"=",
"value",
"return",
"unflatten",
"(",
"new_d",
",",
"deepcopy",
"=",
"deepcopy",
")"
] | split_lists key:list pairs into dicts for each item in the lists
NB: will only split if all split_keys are present
Parameters
----------
d : dict
split_keys : list
keys to split
new_name : str
top level key for split items
check_length : bool
if true, raise error if any lists are of a different length
deepcopy: bool
deepcopy values
Examples
--------
>>> from pprint import pprint
>>> d = {'path_key':{'x':[1,2],'y':[3,4],'a':1}}
>>> new_d = split_lists(d,['x','y'])
>>> pprint(new_d)
{'path_key': {'a': 1, 'split': [{'x': 1, 'y': 3}, {'x': 2, 'y': 4}]}}
>>> split_lists(d,['x','a'])
Traceback (most recent call last):
...
ValueError: "a" data at the following path is not a list ('path_key',)
>>> d2 = {'path_key':{'x':[1,7],'y':[3,4,5]}}
>>> split_lists(d2,['x','y'])
Traceback (most recent call last):
...
ValueError: lists at the following path do not have the same size ('path_key',) | [
"split_lists",
"key",
":",
"list",
"pairs",
"into",
"dicts",
"for",
"each",
"item",
"in",
"the",
"lists",
"NB",
":",
"will",
"only",
"split",
"if",
"all",
"split_keys",
"are",
"present"
] | train | https://github.com/chrisjsewell/jsonextended/blob/c3a7a880cc09789b3c61204265dcbb127be76c8a/jsonextended/edict.py#L1588-L1670 |
chrisjsewell/jsonextended | jsonextended/edict.py | combine_lists | def combine_lists(d, keys=None, deepcopy=True):
"""combine lists of dicts
Parameters
----------
d : dict or list[dict]
keys : list
keys to combine (all if None)
deepcopy: bool
deepcopy values
Example
-------
>>> from pprint import pprint
>>> d = {'path_key': {'a': 1, 'split': [{'x': 1, 'y': 3}, {'x': 2, 'y': 4}]}}
>>> pprint(combine_lists(d,['split']))
{'path_key': {'a': 1, 'split': {'x': [1, 2], 'y': [3, 4]}}}
>>> combine_lists([{"a":2}, {"a":1}])
{'a': [2, 1]}
""" # noqa: E501
if isinstance(d, list):
init_list = True
d = {'dummy_key843': d}
else:
init_list = False
flattened = flatten(d, list_of_dicts=None)
for key, value in list(flattened.items()):
if keys is not None:
try:
if not key[-1] in keys:
continue
except Exception:
continue
if not isinstance(value, list):
continue
if not all([is_dict_like(d) for d in value]):
continue
newd = {}
for subdic in value:
for subk, subv in subdic.items():
if subk not in newd:
newd[subk] = []
newd[subk].append(subv)
flattened[key] = newd
final = unflatten(flattened, list_of_dicts=None, deepcopy=deepcopy)
if init_list:
return list(final.values())[0]
else:
return final | python | def combine_lists(d, keys=None, deepcopy=True):
"""combine lists of dicts
Parameters
----------
d : dict or list[dict]
keys : list
keys to combine (all if None)
deepcopy: bool
deepcopy values
Example
-------
>>> from pprint import pprint
>>> d = {'path_key': {'a': 1, 'split': [{'x': 1, 'y': 3}, {'x': 2, 'y': 4}]}}
>>> pprint(combine_lists(d,['split']))
{'path_key': {'a': 1, 'split': {'x': [1, 2], 'y': [3, 4]}}}
>>> combine_lists([{"a":2}, {"a":1}])
{'a': [2, 1]}
""" # noqa: E501
if isinstance(d, list):
init_list = True
d = {'dummy_key843': d}
else:
init_list = False
flattened = flatten(d, list_of_dicts=None)
for key, value in list(flattened.items()):
if keys is not None:
try:
if not key[-1] in keys:
continue
except Exception:
continue
if not isinstance(value, list):
continue
if not all([is_dict_like(d) for d in value]):
continue
newd = {}
for subdic in value:
for subk, subv in subdic.items():
if subk not in newd:
newd[subk] = []
newd[subk].append(subv)
flattened[key] = newd
final = unflatten(flattened, list_of_dicts=None, deepcopy=deepcopy)
if init_list:
return list(final.values())[0]
else:
return final | [
"def",
"combine_lists",
"(",
"d",
",",
"keys",
"=",
"None",
",",
"deepcopy",
"=",
"True",
")",
":",
"# noqa: E501",
"if",
"isinstance",
"(",
"d",
",",
"list",
")",
":",
"init_list",
"=",
"True",
"d",
"=",
"{",
"'dummy_key843'",
":",
"d",
"}",
"else",
":",
"init_list",
"=",
"False",
"flattened",
"=",
"flatten",
"(",
"d",
",",
"list_of_dicts",
"=",
"None",
")",
"for",
"key",
",",
"value",
"in",
"list",
"(",
"flattened",
".",
"items",
"(",
")",
")",
":",
"if",
"keys",
"is",
"not",
"None",
":",
"try",
":",
"if",
"not",
"key",
"[",
"-",
"1",
"]",
"in",
"keys",
":",
"continue",
"except",
"Exception",
":",
"continue",
"if",
"not",
"isinstance",
"(",
"value",
",",
"list",
")",
":",
"continue",
"if",
"not",
"all",
"(",
"[",
"is_dict_like",
"(",
"d",
")",
"for",
"d",
"in",
"value",
"]",
")",
":",
"continue",
"newd",
"=",
"{",
"}",
"for",
"subdic",
"in",
"value",
":",
"for",
"subk",
",",
"subv",
"in",
"subdic",
".",
"items",
"(",
")",
":",
"if",
"subk",
"not",
"in",
"newd",
":",
"newd",
"[",
"subk",
"]",
"=",
"[",
"]",
"newd",
"[",
"subk",
"]",
".",
"append",
"(",
"subv",
")",
"flattened",
"[",
"key",
"]",
"=",
"newd",
"final",
"=",
"unflatten",
"(",
"flattened",
",",
"list_of_dicts",
"=",
"None",
",",
"deepcopy",
"=",
"deepcopy",
")",
"if",
"init_list",
":",
"return",
"list",
"(",
"final",
".",
"values",
"(",
")",
")",
"[",
"0",
"]",
"else",
":",
"return",
"final"
] | combine lists of dicts
Parameters
----------
d : dict or list[dict]
keys : list
keys to combine (all if None)
deepcopy: bool
deepcopy values
Example
-------
>>> from pprint import pprint
>>> d = {'path_key': {'a': 1, 'split': [{'x': 1, 'y': 3}, {'x': 2, 'y': 4}]}}
>>> pprint(combine_lists(d,['split']))
{'path_key': {'a': 1, 'split': {'x': [1, 2], 'y': [3, 4]}}}
>>> combine_lists([{"a":2}, {"a":1}])
{'a': [2, 1]} | [
"combine",
"lists",
"of",
"dicts"
] | train | https://github.com/chrisjsewell/jsonextended/blob/c3a7a880cc09789b3c61204265dcbb127be76c8a/jsonextended/edict.py#L1673-L1727 |
chrisjsewell/jsonextended | jsonextended/edict.py | list_to_dict | def list_to_dict(lst, key=None, remove_key=True):
""" convert a list of dicts to a dict with root keys
Parameters
----------
lst : list[dict]
key : str or None
a key contained by all of the dicts
if None use index number string
remove_key : bool
remove key from dicts in list
Examples
--------
>>> from pprint import pprint
>>> lst = [{'name':'f','b':1},{'name':'g','c':2}]
>>> pprint(list_to_dict(lst))
{'0': {'b': 1, 'name': 'f'}, '1': {'c': 2, 'name': 'g'}}
>>> pprint(list_to_dict(lst,'name'))
{'f': {'b': 1}, 'g': {'c': 2}}
"""
assert all([is_dict_like(d) for d in lst])
if key is not None:
assert all([key in d for d in lst])
new_dict = {}
for i, d in enumerate(lst):
d = unflatten(flatten(d))
if key is None:
new_dict[str(i)] = d
else:
if remove_key:
k = d.pop(key)
else:
k = d[key]
new_dict[k] = d
return new_dict | python | def list_to_dict(lst, key=None, remove_key=True):
""" convert a list of dicts to a dict with root keys
Parameters
----------
lst : list[dict]
key : str or None
a key contained by all of the dicts
if None use index number string
remove_key : bool
remove key from dicts in list
Examples
--------
>>> from pprint import pprint
>>> lst = [{'name':'f','b':1},{'name':'g','c':2}]
>>> pprint(list_to_dict(lst))
{'0': {'b': 1, 'name': 'f'}, '1': {'c': 2, 'name': 'g'}}
>>> pprint(list_to_dict(lst,'name'))
{'f': {'b': 1}, 'g': {'c': 2}}
"""
assert all([is_dict_like(d) for d in lst])
if key is not None:
assert all([key in d for d in lst])
new_dict = {}
for i, d in enumerate(lst):
d = unflatten(flatten(d))
if key is None:
new_dict[str(i)] = d
else:
if remove_key:
k = d.pop(key)
else:
k = d[key]
new_dict[k] = d
return new_dict | [
"def",
"list_to_dict",
"(",
"lst",
",",
"key",
"=",
"None",
",",
"remove_key",
"=",
"True",
")",
":",
"assert",
"all",
"(",
"[",
"is_dict_like",
"(",
"d",
")",
"for",
"d",
"in",
"lst",
"]",
")",
"if",
"key",
"is",
"not",
"None",
":",
"assert",
"all",
"(",
"[",
"key",
"in",
"d",
"for",
"d",
"in",
"lst",
"]",
")",
"new_dict",
"=",
"{",
"}",
"for",
"i",
",",
"d",
"in",
"enumerate",
"(",
"lst",
")",
":",
"d",
"=",
"unflatten",
"(",
"flatten",
"(",
"d",
")",
")",
"if",
"key",
"is",
"None",
":",
"new_dict",
"[",
"str",
"(",
"i",
")",
"]",
"=",
"d",
"else",
":",
"if",
"remove_key",
":",
"k",
"=",
"d",
".",
"pop",
"(",
"key",
")",
"else",
":",
"k",
"=",
"d",
"[",
"key",
"]",
"new_dict",
"[",
"k",
"]",
"=",
"d",
"return",
"new_dict"
] | convert a list of dicts to a dict with root keys
Parameters
----------
lst : list[dict]
key : str or None
a key contained by all of the dicts
if None use index number string
remove_key : bool
remove key from dicts in list
Examples
--------
>>> from pprint import pprint
>>> lst = [{'name':'f','b':1},{'name':'g','c':2}]
>>> pprint(list_to_dict(lst))
{'0': {'b': 1, 'name': 'f'}, '1': {'c': 2, 'name': 'g'}}
>>> pprint(list_to_dict(lst,'name'))
{'f': {'b': 1}, 'g': {'c': 2}} | [
"convert",
"a",
"list",
"of",
"dicts",
"to",
"a",
"dict",
"with",
"root",
"keys"
] | train | https://github.com/chrisjsewell/jsonextended/blob/c3a7a880cc09789b3c61204265dcbb127be76c8a/jsonextended/edict.py#L1730-L1769 |
chrisjsewell/jsonextended | jsonextended/edict.py | diff | def diff(new_dict, old_dict, iter_prefix='__iter__',
np_allclose=False, **kwargs):
""" return the difference between two dict_like objects
Parameters
----------
new_dict: dict
old_dict: dict
iter_prefix: str
prefix to use for list and tuple indexes
np_allclose: bool
if True, try using numpy.allclose to assess differences
**kwargs:
keyword arguments to parse to numpy.allclose
Returns
-------
outcome: dict
Containing none or more of:
- "insertions" : list of (path, val)
- "deletions" : list of (path, val)
- "changes" : list of (path, (val1, val2))
- "uncomparable" : list of (path, (val1, val2))
Examples
--------
>>> from pprint import pprint
>>> diff({'a':1},{'a':1})
{}
>>> pprint(diff({'a': 1, 'b': 2, 'c': 5},{'b': 3, 'c': 4, 'd': 6}))
{'changes': [(('b',), (2, 3)), (('c',), (5, 4))],
'deletions': [(('d',), 6)],
'insertions': [(('a',), 1)]}
>>> pprint(diff({'a': [{"b":1}, {"c":2}, 1]},{'a': [{"b":1}, {"d":2}, 2]}))
{'changes': [(('a', '__iter__2'), (1, 2))],
'deletions': [(('a', '__iter__1', 'd'), 2)],
'insertions': [(('a', '__iter__1', 'c'), 2)]}
>>> diff({'a':1}, {'a':1+1e-10})
{'changes': [(('a',), (1, 1.0000000001))]}
>>> diff({'a':1}, {'a':1+1e-10}, np_allclose=True)
{}
"""
if np_allclose:
try:
import numpy
except ImportError:
raise ValueError("to use np_allclose, numpy must be installed")
dct1_flat = flatten(new_dict, all_iters=iter_prefix)
dct2_flat = flatten(old_dict, all_iters=iter_prefix)
outcome = {'insertions': [], 'deletions': [],
'changes': [], 'uncomparable': []}
for path, val in dct1_flat.items():
if path not in dct2_flat:
outcome['insertions'].append((path, val))
continue
other_val = dct2_flat.pop(path)
if np_allclose:
try:
if numpy.allclose(val, other_val, **kwargs):
continue
except Exception:
pass
try:
if val != other_val:
outcome['changes'].append((path, (val, other_val)))
except Exception:
outcome['uncomparable'].append((path, (val, other_val)))
for path2, val2 in dct2_flat.items():
outcome['deletions'].append((path2, val2))
# remove any empty lists and sort
for key in list(outcome.keys()):
if not outcome[key]:
outcome.pop(key)
try:
outcome[key] = sorted(outcome[key])
except Exception:
pass
return outcome | python | def diff(new_dict, old_dict, iter_prefix='__iter__',
np_allclose=False, **kwargs):
""" return the difference between two dict_like objects
Parameters
----------
new_dict: dict
old_dict: dict
iter_prefix: str
prefix to use for list and tuple indexes
np_allclose: bool
if True, try using numpy.allclose to assess differences
**kwargs:
keyword arguments to parse to numpy.allclose
Returns
-------
outcome: dict
Containing none or more of:
- "insertions" : list of (path, val)
- "deletions" : list of (path, val)
- "changes" : list of (path, (val1, val2))
- "uncomparable" : list of (path, (val1, val2))
Examples
--------
>>> from pprint import pprint
>>> diff({'a':1},{'a':1})
{}
>>> pprint(diff({'a': 1, 'b': 2, 'c': 5},{'b': 3, 'c': 4, 'd': 6}))
{'changes': [(('b',), (2, 3)), (('c',), (5, 4))],
'deletions': [(('d',), 6)],
'insertions': [(('a',), 1)]}
>>> pprint(diff({'a': [{"b":1}, {"c":2}, 1]},{'a': [{"b":1}, {"d":2}, 2]}))
{'changes': [(('a', '__iter__2'), (1, 2))],
'deletions': [(('a', '__iter__1', 'd'), 2)],
'insertions': [(('a', '__iter__1', 'c'), 2)]}
>>> diff({'a':1}, {'a':1+1e-10})
{'changes': [(('a',), (1, 1.0000000001))]}
>>> diff({'a':1}, {'a':1+1e-10}, np_allclose=True)
{}
"""
if np_allclose:
try:
import numpy
except ImportError:
raise ValueError("to use np_allclose, numpy must be installed")
dct1_flat = flatten(new_dict, all_iters=iter_prefix)
dct2_flat = flatten(old_dict, all_iters=iter_prefix)
outcome = {'insertions': [], 'deletions': [],
'changes': [], 'uncomparable': []}
for path, val in dct1_flat.items():
if path not in dct2_flat:
outcome['insertions'].append((path, val))
continue
other_val = dct2_flat.pop(path)
if np_allclose:
try:
if numpy.allclose(val, other_val, **kwargs):
continue
except Exception:
pass
try:
if val != other_val:
outcome['changes'].append((path, (val, other_val)))
except Exception:
outcome['uncomparable'].append((path, (val, other_val)))
for path2, val2 in dct2_flat.items():
outcome['deletions'].append((path2, val2))
# remove any empty lists and sort
for key in list(outcome.keys()):
if not outcome[key]:
outcome.pop(key)
try:
outcome[key] = sorted(outcome[key])
except Exception:
pass
return outcome | [
"def",
"diff",
"(",
"new_dict",
",",
"old_dict",
",",
"iter_prefix",
"=",
"'__iter__'",
",",
"np_allclose",
"=",
"False",
",",
"*",
"*",
"kwargs",
")",
":",
"if",
"np_allclose",
":",
"try",
":",
"import",
"numpy",
"except",
"ImportError",
":",
"raise",
"ValueError",
"(",
"\"to use np_allclose, numpy must be installed\"",
")",
"dct1_flat",
"=",
"flatten",
"(",
"new_dict",
",",
"all_iters",
"=",
"iter_prefix",
")",
"dct2_flat",
"=",
"flatten",
"(",
"old_dict",
",",
"all_iters",
"=",
"iter_prefix",
")",
"outcome",
"=",
"{",
"'insertions'",
":",
"[",
"]",
",",
"'deletions'",
":",
"[",
"]",
",",
"'changes'",
":",
"[",
"]",
",",
"'uncomparable'",
":",
"[",
"]",
"}",
"for",
"path",
",",
"val",
"in",
"dct1_flat",
".",
"items",
"(",
")",
":",
"if",
"path",
"not",
"in",
"dct2_flat",
":",
"outcome",
"[",
"'insertions'",
"]",
".",
"append",
"(",
"(",
"path",
",",
"val",
")",
")",
"continue",
"other_val",
"=",
"dct2_flat",
".",
"pop",
"(",
"path",
")",
"if",
"np_allclose",
":",
"try",
":",
"if",
"numpy",
".",
"allclose",
"(",
"val",
",",
"other_val",
",",
"*",
"*",
"kwargs",
")",
":",
"continue",
"except",
"Exception",
":",
"pass",
"try",
":",
"if",
"val",
"!=",
"other_val",
":",
"outcome",
"[",
"'changes'",
"]",
".",
"append",
"(",
"(",
"path",
",",
"(",
"val",
",",
"other_val",
")",
")",
")",
"except",
"Exception",
":",
"outcome",
"[",
"'uncomparable'",
"]",
".",
"append",
"(",
"(",
"path",
",",
"(",
"val",
",",
"other_val",
")",
")",
")",
"for",
"path2",
",",
"val2",
"in",
"dct2_flat",
".",
"items",
"(",
")",
":",
"outcome",
"[",
"'deletions'",
"]",
".",
"append",
"(",
"(",
"path2",
",",
"val2",
")",
")",
"# remove any empty lists and sort",
"for",
"key",
"in",
"list",
"(",
"outcome",
".",
"keys",
"(",
")",
")",
":",
"if",
"not",
"outcome",
"[",
"key",
"]",
":",
"outcome",
".",
"pop",
"(",
"key",
")",
"try",
":",
"outcome",
"[",
"key",
"]",
"=",
"sorted",
"(",
"outcome",
"[",
"key",
"]",
")",
"except",
"Exception",
":",
"pass",
"return",
"outcome"
] | return the difference between two dict_like objects
Parameters
----------
new_dict: dict
old_dict: dict
iter_prefix: str
prefix to use for list and tuple indexes
np_allclose: bool
if True, try using numpy.allclose to assess differences
**kwargs:
keyword arguments to parse to numpy.allclose
Returns
-------
outcome: dict
Containing none or more of:
- "insertions" : list of (path, val)
- "deletions" : list of (path, val)
- "changes" : list of (path, (val1, val2))
- "uncomparable" : list of (path, (val1, val2))
Examples
--------
>>> from pprint import pprint
>>> diff({'a':1},{'a':1})
{}
>>> pprint(diff({'a': 1, 'b': 2, 'c': 5},{'b': 3, 'c': 4, 'd': 6}))
{'changes': [(('b',), (2, 3)), (('c',), (5, 4))],
'deletions': [(('d',), 6)],
'insertions': [(('a',), 1)]}
>>> pprint(diff({'a': [{"b":1}, {"c":2}, 1]},{'a': [{"b":1}, {"d":2}, 2]}))
{'changes': [(('a', '__iter__2'), (1, 2))],
'deletions': [(('a', '__iter__1', 'd'), 2)],
'insertions': [(('a', '__iter__1', 'c'), 2)]}
>>> diff({'a':1}, {'a':1+1e-10})
{'changes': [(('a',), (1, 1.0000000001))]}
>>> diff({'a':1}, {'a':1+1e-10}, np_allclose=True)
{} | [
"return",
"the",
"difference",
"between",
"two",
"dict_like",
"objects"
] | train | https://github.com/chrisjsewell/jsonextended/blob/c3a7a880cc09789b3c61204265dcbb127be76c8a/jsonextended/edict.py#L1772-L1862 |
chrisjsewell/jsonextended | jsonextended/edict.py | to_json | def to_json(dct, jfile, overwrite=False, dirlevel=0, sort_keys=True, indent=2,
default_name='root.json', **kwargs):
""" output dict to json
Parameters
----------
dct : dict
jfile : str or file_like
if file_like, must have write method
overwrite : bool
whether to overwrite existing files
dirlevel : int
if jfile is path to folder,
defines how many key levels to set as sub-folders
sort_keys : bool
if true then the output of dictionaries will be sorted by key
indent : int
if non-negative integer, then JSON array elements and object members
will be pretty-printed on new lines with that indent level spacing.
kwargs : dict
keywords for json.dump
Examples
--------
>>> from jsonextended.utils import MockPath
>>> file_obj = MockPath('test.json',is_file=True,exists=False)
>>> dct = {'a':{'b':1}}
>>> to_json(dct, file_obj)
>>> print(file_obj.to_string())
File("test.json") Contents:
{
"a": {
"b": 1
}
}
>>> from jsonextended.utils import MockPath
>>> folder_obj = MockPath()
>>> dct = {'x':{'a':{'b':1},'c':{'d':3}}}
>>> to_json(dct, folder_obj, dirlevel=0,indent=None)
>>> print(folder_obj.to_string(file_content=True))
Folder("root")
File("x.json") Contents:
{"a": {"b": 1}, "c": {"d": 3}}
>>> folder_obj = MockPath()
>>> to_json(dct, folder_obj, dirlevel=1,indent=None)
>>> print(folder_obj.to_string(file_content=True))
Folder("root")
Folder("x")
File("a.json") Contents:
{"b": 1}
File("c.json") Contents:
{"d": 3}
"""
if hasattr(jfile, 'write'):
json.dump(dct, jfile, sort_keys=sort_keys,
indent=indent, default=encode)
return
if isinstance(jfile, basestring):
path = pathlib.Path(jfile)
else:
path = jfile
file_attrs = ['exists', 'is_dir', 'is_file', 'touch', 'open']
if not all([hasattr(path, attr) for attr in file_attrs]):
raise ValueError(
'jfile should be a str or file_like object: {}'.format(jfile))
if path.is_file() and path.exists() and not overwrite:
raise IOError('jfile already exists and '
'overwrite is set to false: {}'.format(jfile))
if not path.is_dir() and dirlevel <= 0:
path.touch() # try to create file if doesn't already exist
with path.open('w') as outfile:
outfile.write(unicode(json.dumps(
dct, sort_keys=sort_keys,
indent=indent, default=encode, **kwargs)))
return
if not path.is_dir():
path.mkdir()
dirlevel -= 1
# if one or more values if not a nested dict
if not all([hasattr(v, 'items') for v in dct.values()]):
newpath = path.joinpath(default_name)
newpath.touch()
with newpath.open('w') as outfile:
outfile.write(unicode(json.dumps(
dct, sort_keys=sort_keys,
indent=indent, default=encode, **kwargs)))
return
for key, val in dct.items():
if dirlevel <= 0:
newpath = path.joinpath('{}.json'.format(key))
newpath.touch()
with newpath.open('w') as outfile:
outfile.write(unicode(json.dumps(
val, ensure_ascii=False, sort_keys=sort_keys,
indent=indent, default=encode, **kwargs)))
else:
newpath = path.joinpath('{}'.format(key))
if not newpath.exists():
newpath.mkdir()
to_json(val, newpath, overwrite=overwrite, dirlevel=dirlevel - 1,
sort_keys=sort_keys, indent=indent,
default_name='{}.json'.format(key), **kwargs) | python | def to_json(dct, jfile, overwrite=False, dirlevel=0, sort_keys=True, indent=2,
default_name='root.json', **kwargs):
""" output dict to json
Parameters
----------
dct : dict
jfile : str or file_like
if file_like, must have write method
overwrite : bool
whether to overwrite existing files
dirlevel : int
if jfile is path to folder,
defines how many key levels to set as sub-folders
sort_keys : bool
if true then the output of dictionaries will be sorted by key
indent : int
if non-negative integer, then JSON array elements and object members
will be pretty-printed on new lines with that indent level spacing.
kwargs : dict
keywords for json.dump
Examples
--------
>>> from jsonextended.utils import MockPath
>>> file_obj = MockPath('test.json',is_file=True,exists=False)
>>> dct = {'a':{'b':1}}
>>> to_json(dct, file_obj)
>>> print(file_obj.to_string())
File("test.json") Contents:
{
"a": {
"b": 1
}
}
>>> from jsonextended.utils import MockPath
>>> folder_obj = MockPath()
>>> dct = {'x':{'a':{'b':1},'c':{'d':3}}}
>>> to_json(dct, folder_obj, dirlevel=0,indent=None)
>>> print(folder_obj.to_string(file_content=True))
Folder("root")
File("x.json") Contents:
{"a": {"b": 1}, "c": {"d": 3}}
>>> folder_obj = MockPath()
>>> to_json(dct, folder_obj, dirlevel=1,indent=None)
>>> print(folder_obj.to_string(file_content=True))
Folder("root")
Folder("x")
File("a.json") Contents:
{"b": 1}
File("c.json") Contents:
{"d": 3}
"""
if hasattr(jfile, 'write'):
json.dump(dct, jfile, sort_keys=sort_keys,
indent=indent, default=encode)
return
if isinstance(jfile, basestring):
path = pathlib.Path(jfile)
else:
path = jfile
file_attrs = ['exists', 'is_dir', 'is_file', 'touch', 'open']
if not all([hasattr(path, attr) for attr in file_attrs]):
raise ValueError(
'jfile should be a str or file_like object: {}'.format(jfile))
if path.is_file() and path.exists() and not overwrite:
raise IOError('jfile already exists and '
'overwrite is set to false: {}'.format(jfile))
if not path.is_dir() and dirlevel <= 0:
path.touch() # try to create file if doesn't already exist
with path.open('w') as outfile:
outfile.write(unicode(json.dumps(
dct, sort_keys=sort_keys,
indent=indent, default=encode, **kwargs)))
return
if not path.is_dir():
path.mkdir()
dirlevel -= 1
# if one or more values if not a nested dict
if not all([hasattr(v, 'items') for v in dct.values()]):
newpath = path.joinpath(default_name)
newpath.touch()
with newpath.open('w') as outfile:
outfile.write(unicode(json.dumps(
dct, sort_keys=sort_keys,
indent=indent, default=encode, **kwargs)))
return
for key, val in dct.items():
if dirlevel <= 0:
newpath = path.joinpath('{}.json'.format(key))
newpath.touch()
with newpath.open('w') as outfile:
outfile.write(unicode(json.dumps(
val, ensure_ascii=False, sort_keys=sort_keys,
indent=indent, default=encode, **kwargs)))
else:
newpath = path.joinpath('{}'.format(key))
if not newpath.exists():
newpath.mkdir()
to_json(val, newpath, overwrite=overwrite, dirlevel=dirlevel - 1,
sort_keys=sort_keys, indent=indent,
default_name='{}.json'.format(key), **kwargs) | [
"def",
"to_json",
"(",
"dct",
",",
"jfile",
",",
"overwrite",
"=",
"False",
",",
"dirlevel",
"=",
"0",
",",
"sort_keys",
"=",
"True",
",",
"indent",
"=",
"2",
",",
"default_name",
"=",
"'root.json'",
",",
"*",
"*",
"kwargs",
")",
":",
"if",
"hasattr",
"(",
"jfile",
",",
"'write'",
")",
":",
"json",
".",
"dump",
"(",
"dct",
",",
"jfile",
",",
"sort_keys",
"=",
"sort_keys",
",",
"indent",
"=",
"indent",
",",
"default",
"=",
"encode",
")",
"return",
"if",
"isinstance",
"(",
"jfile",
",",
"basestring",
")",
":",
"path",
"=",
"pathlib",
".",
"Path",
"(",
"jfile",
")",
"else",
":",
"path",
"=",
"jfile",
"file_attrs",
"=",
"[",
"'exists'",
",",
"'is_dir'",
",",
"'is_file'",
",",
"'touch'",
",",
"'open'",
"]",
"if",
"not",
"all",
"(",
"[",
"hasattr",
"(",
"path",
",",
"attr",
")",
"for",
"attr",
"in",
"file_attrs",
"]",
")",
":",
"raise",
"ValueError",
"(",
"'jfile should be a str or file_like object: {}'",
".",
"format",
"(",
"jfile",
")",
")",
"if",
"path",
".",
"is_file",
"(",
")",
"and",
"path",
".",
"exists",
"(",
")",
"and",
"not",
"overwrite",
":",
"raise",
"IOError",
"(",
"'jfile already exists and '",
"'overwrite is set to false: {}'",
".",
"format",
"(",
"jfile",
")",
")",
"if",
"not",
"path",
".",
"is_dir",
"(",
")",
"and",
"dirlevel",
"<=",
"0",
":",
"path",
".",
"touch",
"(",
")",
"# try to create file if doesn't already exist",
"with",
"path",
".",
"open",
"(",
"'w'",
")",
"as",
"outfile",
":",
"outfile",
".",
"write",
"(",
"unicode",
"(",
"json",
".",
"dumps",
"(",
"dct",
",",
"sort_keys",
"=",
"sort_keys",
",",
"indent",
"=",
"indent",
",",
"default",
"=",
"encode",
",",
"*",
"*",
"kwargs",
")",
")",
")",
"return",
"if",
"not",
"path",
".",
"is_dir",
"(",
")",
":",
"path",
".",
"mkdir",
"(",
")",
"dirlevel",
"-=",
"1",
"# if one or more values if not a nested dict",
"if",
"not",
"all",
"(",
"[",
"hasattr",
"(",
"v",
",",
"'items'",
")",
"for",
"v",
"in",
"dct",
".",
"values",
"(",
")",
"]",
")",
":",
"newpath",
"=",
"path",
".",
"joinpath",
"(",
"default_name",
")",
"newpath",
".",
"touch",
"(",
")",
"with",
"newpath",
".",
"open",
"(",
"'w'",
")",
"as",
"outfile",
":",
"outfile",
".",
"write",
"(",
"unicode",
"(",
"json",
".",
"dumps",
"(",
"dct",
",",
"sort_keys",
"=",
"sort_keys",
",",
"indent",
"=",
"indent",
",",
"default",
"=",
"encode",
",",
"*",
"*",
"kwargs",
")",
")",
")",
"return",
"for",
"key",
",",
"val",
"in",
"dct",
".",
"items",
"(",
")",
":",
"if",
"dirlevel",
"<=",
"0",
":",
"newpath",
"=",
"path",
".",
"joinpath",
"(",
"'{}.json'",
".",
"format",
"(",
"key",
")",
")",
"newpath",
".",
"touch",
"(",
")",
"with",
"newpath",
".",
"open",
"(",
"'w'",
")",
"as",
"outfile",
":",
"outfile",
".",
"write",
"(",
"unicode",
"(",
"json",
".",
"dumps",
"(",
"val",
",",
"ensure_ascii",
"=",
"False",
",",
"sort_keys",
"=",
"sort_keys",
",",
"indent",
"=",
"indent",
",",
"default",
"=",
"encode",
",",
"*",
"*",
"kwargs",
")",
")",
")",
"else",
":",
"newpath",
"=",
"path",
".",
"joinpath",
"(",
"'{}'",
".",
"format",
"(",
"key",
")",
")",
"if",
"not",
"newpath",
".",
"exists",
"(",
")",
":",
"newpath",
".",
"mkdir",
"(",
")",
"to_json",
"(",
"val",
",",
"newpath",
",",
"overwrite",
"=",
"overwrite",
",",
"dirlevel",
"=",
"dirlevel",
"-",
"1",
",",
"sort_keys",
"=",
"sort_keys",
",",
"indent",
"=",
"indent",
",",
"default_name",
"=",
"'{}.json'",
".",
"format",
"(",
"key",
")",
",",
"*",
"*",
"kwargs",
")"
] | output dict to json
Parameters
----------
dct : dict
jfile : str or file_like
if file_like, must have write method
overwrite : bool
whether to overwrite existing files
dirlevel : int
if jfile is path to folder,
defines how many key levels to set as sub-folders
sort_keys : bool
if true then the output of dictionaries will be sorted by key
indent : int
if non-negative integer, then JSON array elements and object members
will be pretty-printed on new lines with that indent level spacing.
kwargs : dict
keywords for json.dump
Examples
--------
>>> from jsonextended.utils import MockPath
>>> file_obj = MockPath('test.json',is_file=True,exists=False)
>>> dct = {'a':{'b':1}}
>>> to_json(dct, file_obj)
>>> print(file_obj.to_string())
File("test.json") Contents:
{
"a": {
"b": 1
}
}
>>> from jsonextended.utils import MockPath
>>> folder_obj = MockPath()
>>> dct = {'x':{'a':{'b':1},'c':{'d':3}}}
>>> to_json(dct, folder_obj, dirlevel=0,indent=None)
>>> print(folder_obj.to_string(file_content=True))
Folder("root")
File("x.json") Contents:
{"a": {"b": 1}, "c": {"d": 3}}
>>> folder_obj = MockPath()
>>> to_json(dct, folder_obj, dirlevel=1,indent=None)
>>> print(folder_obj.to_string(file_content=True))
Folder("root")
Folder("x")
File("a.json") Contents:
{"b": 1}
File("c.json") Contents:
{"d": 3} | [
"output",
"dict",
"to",
"json"
] | train | https://github.com/chrisjsewell/jsonextended/blob/c3a7a880cc09789b3c61204265dcbb127be76c8a/jsonextended/edict.py#L1865-L1978 |
chrisjsewell/jsonextended | jsonextended/edict.py | dump | def dump(dct, jfile, overwrite=False, dirlevel=0, sort_keys=True,
indent=2, default_name='root.json', **kwargs):
""" output dict to json
Parameters
----------
dct : dict
jfile : str or file_like
if file_like, must have write method
overwrite : bool
whether to overwrite existing files
dirlevel : int
if jfile is path to folder,
defines how many key levels to set as sub-folders
sort_keys : bool
if true then the output of dictionaries will be sorted by key
indent : int
if non-negative integer, then JSON array elements and object members
will be pretty-printed on new lines with that indent level spacing.
kwargs : dict
keywords for json.dump
"""
to_json(dct, jfile, overwrite=overwrite, dirlevel=dirlevel,
sort_keys=sort_keys, indent=indent,
default_name=default_name, **kwargs) | python | def dump(dct, jfile, overwrite=False, dirlevel=0, sort_keys=True,
indent=2, default_name='root.json', **kwargs):
""" output dict to json
Parameters
----------
dct : dict
jfile : str or file_like
if file_like, must have write method
overwrite : bool
whether to overwrite existing files
dirlevel : int
if jfile is path to folder,
defines how many key levels to set as sub-folders
sort_keys : bool
if true then the output of dictionaries will be sorted by key
indent : int
if non-negative integer, then JSON array elements and object members
will be pretty-printed on new lines with that indent level spacing.
kwargs : dict
keywords for json.dump
"""
to_json(dct, jfile, overwrite=overwrite, dirlevel=dirlevel,
sort_keys=sort_keys, indent=indent,
default_name=default_name, **kwargs) | [
"def",
"dump",
"(",
"dct",
",",
"jfile",
",",
"overwrite",
"=",
"False",
",",
"dirlevel",
"=",
"0",
",",
"sort_keys",
"=",
"True",
",",
"indent",
"=",
"2",
",",
"default_name",
"=",
"'root.json'",
",",
"*",
"*",
"kwargs",
")",
":",
"to_json",
"(",
"dct",
",",
"jfile",
",",
"overwrite",
"=",
"overwrite",
",",
"dirlevel",
"=",
"dirlevel",
",",
"sort_keys",
"=",
"sort_keys",
",",
"indent",
"=",
"indent",
",",
"default_name",
"=",
"default_name",
",",
"*",
"*",
"kwargs",
")"
] | output dict to json
Parameters
----------
dct : dict
jfile : str or file_like
if file_like, must have write method
overwrite : bool
whether to overwrite existing files
dirlevel : int
if jfile is path to folder,
defines how many key levels to set as sub-folders
sort_keys : bool
if true then the output of dictionaries will be sorted by key
indent : int
if non-negative integer, then JSON array elements and object members
will be pretty-printed on new lines with that indent level spacing.
kwargs : dict
keywords for json.dump | [
"output",
"dict",
"to",
"json"
] | train | https://github.com/chrisjsewell/jsonextended/blob/c3a7a880cc09789b3c61204265dcbb127be76c8a/jsonextended/edict.py#L1981-L2005 |
bjodah/pycompilation | pycompilation/util.py | expand_collection_in_dict | def expand_collection_in_dict(d, key, new_items, no_duplicates=True):
"""
Parameters
d: dict
dict in which a key will be inserted/expanded
key: hashable
key in d
new_items: iterable
d[key] will be extended with items in new_items
no_duplicates: bool
avoid inserting duplicates in d[key] (default: True)
"""
if key in d:
if no_duplicates:
new_items = filter(lambda x: x not in d[key], new_items)
if isinstance(d[key], set):
map(d[key].add, new_items)
elif isinstance(d[key], list):
map(d[key].append, new_items)
else:
d[key] = d[key] + new_items
else:
d[key] = new_items | python | def expand_collection_in_dict(d, key, new_items, no_duplicates=True):
"""
Parameters
d: dict
dict in which a key will be inserted/expanded
key: hashable
key in d
new_items: iterable
d[key] will be extended with items in new_items
no_duplicates: bool
avoid inserting duplicates in d[key] (default: True)
"""
if key in d:
if no_duplicates:
new_items = filter(lambda x: x not in d[key], new_items)
if isinstance(d[key], set):
map(d[key].add, new_items)
elif isinstance(d[key], list):
map(d[key].append, new_items)
else:
d[key] = d[key] + new_items
else:
d[key] = new_items | [
"def",
"expand_collection_in_dict",
"(",
"d",
",",
"key",
",",
"new_items",
",",
"no_duplicates",
"=",
"True",
")",
":",
"if",
"key",
"in",
"d",
":",
"if",
"no_duplicates",
":",
"new_items",
"=",
"filter",
"(",
"lambda",
"x",
":",
"x",
"not",
"in",
"d",
"[",
"key",
"]",
",",
"new_items",
")",
"if",
"isinstance",
"(",
"d",
"[",
"key",
"]",
",",
"set",
")",
":",
"map",
"(",
"d",
"[",
"key",
"]",
".",
"add",
",",
"new_items",
")",
"elif",
"isinstance",
"(",
"d",
"[",
"key",
"]",
",",
"list",
")",
":",
"map",
"(",
"d",
"[",
"key",
"]",
".",
"append",
",",
"new_items",
")",
"else",
":",
"d",
"[",
"key",
"]",
"=",
"d",
"[",
"key",
"]",
"+",
"new_items",
"else",
":",
"d",
"[",
"key",
"]",
"=",
"new_items"
] | Parameters
d: dict
dict in which a key will be inserted/expanded
key: hashable
key in d
new_items: iterable
d[key] will be extended with items in new_items
no_duplicates: bool
avoid inserting duplicates in d[key] (default: True) | [
"Parameters",
"d",
":",
"dict",
"dict",
"in",
"which",
"a",
"key",
"will",
"be",
"inserted",
"/",
"expanded",
"key",
":",
"hashable",
"key",
"in",
"d",
"new_items",
":",
"iterable",
"d",
"[",
"key",
"]",
"will",
"be",
"extended",
"with",
"items",
"in",
"new_items",
"no_duplicates",
":",
"bool",
"avoid",
"inserting",
"duplicates",
"in",
"d",
"[",
"key",
"]",
"(",
"default",
":",
"True",
")"
] | train | https://github.com/bjodah/pycompilation/blob/43eac8d82f8258d30d4df77fd2ad3f3e4f4dca18/pycompilation/util.py#L22-L44 |
bjodah/pycompilation | pycompilation/util.py | copy | def copy(src, dst, only_update=False, copystat=True, cwd=None,
dest_is_dir=False, create_dest_dirs=False, logger=None):
"""
Augmented shutil.copy with extra options and slightly
modified behaviour
Parameters
==========
src: string
path to source file
dst: string
path to destingation
only_update: bool
only copy if source is newer than destination
(returns None if it was newer), default: False
copystat: bool
See shutil.copystat. default: True
cwd: string
Path to working directory (root of relative paths)
dest_is_dir: bool
ensures that dst is treated as a directory. default: False
create_dest_dirs: bool
creates directories if needed.
logger: logging.Looger
debug level info emitted. Passed onto make_dirs.
Returns
=======
Path to the copied file.
"""
# Handle virtual working directory
if cwd:
if not os.path.isabs(src):
src = os.path.join(cwd, src)
if not os.path.isabs(dst):
dst = os.path.join(cwd, dst)
# Make sure source file extists
if not os.path.exists(src):
# Source needs to exist
msg = "Source: `{}` does not exist".format(src)
raise FileNotFoundError(msg)
# We accept both (re)naming destination file _or_
# passing a (possible non-existant) destination directory
if dest_is_dir:
if not dst[-1] == '/':
dst = dst+'/'
else:
if os.path.exists(dst) and os.path.isdir(dst):
dest_is_dir = True
if dest_is_dir:
dest_dir = dst
dest_fname = os.path.basename(src)
dst = os.path.join(dest_dir, dest_fname)
else:
dest_dir = os.path.dirname(dst)
dest_fname = os.path.basename(dst)
if not os.path.exists(dest_dir):
if create_dest_dirs:
make_dirs(dest_dir, logger=logger)
else:
msg = "You must create directory first."
raise FileNotFoundError(msg)
if only_update:
if not missing_or_other_newer(dst, src):
if logger:
logger.debug(
"Did not copy {} to {} (source not newer)".format(
src, dst))
return
if os.path.islink(dst):
if os.path.abspath(os.path.realpath(dst)) == \
os.path.abspath(dst):
pass # destination is a symlic pointing to src
else:
if logger:
logger.debug("Copying {} to {}".format(src, dst))
shutil.copy(src, dst)
if copystat:
shutil.copystat(src, dst)
return dst | python | def copy(src, dst, only_update=False, copystat=True, cwd=None,
dest_is_dir=False, create_dest_dirs=False, logger=None):
"""
Augmented shutil.copy with extra options and slightly
modified behaviour
Parameters
==========
src: string
path to source file
dst: string
path to destingation
only_update: bool
only copy if source is newer than destination
(returns None if it was newer), default: False
copystat: bool
See shutil.copystat. default: True
cwd: string
Path to working directory (root of relative paths)
dest_is_dir: bool
ensures that dst is treated as a directory. default: False
create_dest_dirs: bool
creates directories if needed.
logger: logging.Looger
debug level info emitted. Passed onto make_dirs.
Returns
=======
Path to the copied file.
"""
# Handle virtual working directory
if cwd:
if not os.path.isabs(src):
src = os.path.join(cwd, src)
if not os.path.isabs(dst):
dst = os.path.join(cwd, dst)
# Make sure source file extists
if not os.path.exists(src):
# Source needs to exist
msg = "Source: `{}` does not exist".format(src)
raise FileNotFoundError(msg)
# We accept both (re)naming destination file _or_
# passing a (possible non-existant) destination directory
if dest_is_dir:
if not dst[-1] == '/':
dst = dst+'/'
else:
if os.path.exists(dst) and os.path.isdir(dst):
dest_is_dir = True
if dest_is_dir:
dest_dir = dst
dest_fname = os.path.basename(src)
dst = os.path.join(dest_dir, dest_fname)
else:
dest_dir = os.path.dirname(dst)
dest_fname = os.path.basename(dst)
if not os.path.exists(dest_dir):
if create_dest_dirs:
make_dirs(dest_dir, logger=logger)
else:
msg = "You must create directory first."
raise FileNotFoundError(msg)
if only_update:
if not missing_or_other_newer(dst, src):
if logger:
logger.debug(
"Did not copy {} to {} (source not newer)".format(
src, dst))
return
if os.path.islink(dst):
if os.path.abspath(os.path.realpath(dst)) == \
os.path.abspath(dst):
pass # destination is a symlic pointing to src
else:
if logger:
logger.debug("Copying {} to {}".format(src, dst))
shutil.copy(src, dst)
if copystat:
shutil.copystat(src, dst)
return dst | [
"def",
"copy",
"(",
"src",
",",
"dst",
",",
"only_update",
"=",
"False",
",",
"copystat",
"=",
"True",
",",
"cwd",
"=",
"None",
",",
"dest_is_dir",
"=",
"False",
",",
"create_dest_dirs",
"=",
"False",
",",
"logger",
"=",
"None",
")",
":",
"# Handle virtual working directory",
"if",
"cwd",
":",
"if",
"not",
"os",
".",
"path",
".",
"isabs",
"(",
"src",
")",
":",
"src",
"=",
"os",
".",
"path",
".",
"join",
"(",
"cwd",
",",
"src",
")",
"if",
"not",
"os",
".",
"path",
".",
"isabs",
"(",
"dst",
")",
":",
"dst",
"=",
"os",
".",
"path",
".",
"join",
"(",
"cwd",
",",
"dst",
")",
"# Make sure source file extists",
"if",
"not",
"os",
".",
"path",
".",
"exists",
"(",
"src",
")",
":",
"# Source needs to exist",
"msg",
"=",
"\"Source: `{}` does not exist\"",
".",
"format",
"(",
"src",
")",
"raise",
"FileNotFoundError",
"(",
"msg",
")",
"# We accept both (re)naming destination file _or_",
"# passing a (possible non-existant) destination directory",
"if",
"dest_is_dir",
":",
"if",
"not",
"dst",
"[",
"-",
"1",
"]",
"==",
"'/'",
":",
"dst",
"=",
"dst",
"+",
"'/'",
"else",
":",
"if",
"os",
".",
"path",
".",
"exists",
"(",
"dst",
")",
"and",
"os",
".",
"path",
".",
"isdir",
"(",
"dst",
")",
":",
"dest_is_dir",
"=",
"True",
"if",
"dest_is_dir",
":",
"dest_dir",
"=",
"dst",
"dest_fname",
"=",
"os",
".",
"path",
".",
"basename",
"(",
"src",
")",
"dst",
"=",
"os",
".",
"path",
".",
"join",
"(",
"dest_dir",
",",
"dest_fname",
")",
"else",
":",
"dest_dir",
"=",
"os",
".",
"path",
".",
"dirname",
"(",
"dst",
")",
"dest_fname",
"=",
"os",
".",
"path",
".",
"basename",
"(",
"dst",
")",
"if",
"not",
"os",
".",
"path",
".",
"exists",
"(",
"dest_dir",
")",
":",
"if",
"create_dest_dirs",
":",
"make_dirs",
"(",
"dest_dir",
",",
"logger",
"=",
"logger",
")",
"else",
":",
"msg",
"=",
"\"You must create directory first.\"",
"raise",
"FileNotFoundError",
"(",
"msg",
")",
"if",
"only_update",
":",
"if",
"not",
"missing_or_other_newer",
"(",
"dst",
",",
"src",
")",
":",
"if",
"logger",
":",
"logger",
".",
"debug",
"(",
"\"Did not copy {} to {} (source not newer)\"",
".",
"format",
"(",
"src",
",",
"dst",
")",
")",
"return",
"if",
"os",
".",
"path",
".",
"islink",
"(",
"dst",
")",
":",
"if",
"os",
".",
"path",
".",
"abspath",
"(",
"os",
".",
"path",
".",
"realpath",
"(",
"dst",
")",
")",
"==",
"os",
".",
"path",
".",
"abspath",
"(",
"dst",
")",
":",
"pass",
"# destination is a symlic pointing to src",
"else",
":",
"if",
"logger",
":",
"logger",
".",
"debug",
"(",
"\"Copying {} to {}\"",
".",
"format",
"(",
"src",
",",
"dst",
")",
")",
"shutil",
".",
"copy",
"(",
"src",
",",
"dst",
")",
"if",
"copystat",
":",
"shutil",
".",
"copystat",
"(",
"src",
",",
"dst",
")",
"return",
"dst"
] | Augmented shutil.copy with extra options and slightly
modified behaviour
Parameters
==========
src: string
path to source file
dst: string
path to destingation
only_update: bool
only copy if source is newer than destination
(returns None if it was newer), default: False
copystat: bool
See shutil.copystat. default: True
cwd: string
Path to working directory (root of relative paths)
dest_is_dir: bool
ensures that dst is treated as a directory. default: False
create_dest_dirs: bool
creates directories if needed.
logger: logging.Looger
debug level info emitted. Passed onto make_dirs.
Returns
=======
Path to the copied file. | [
"Augmented",
"shutil",
".",
"copy",
"with",
"extra",
"options",
"and",
"slightly",
"modified",
"behaviour"
] | train | https://github.com/bjodah/pycompilation/blob/43eac8d82f8258d30d4df77fd2ad3f3e4f4dca18/pycompilation/util.py#L92-L178 |
bjodah/pycompilation | pycompilation/util.py | md5_of_file | def md5_of_file(path, nblocks=128):
"""
Computes the md5 hash of a file.
Parameters
==========
path: string
path to file to compute hash of
Returns
=======
hashlib md5 hash object. Use .digest() or .hexdigest()
on returned object to get binary or hex encoded string.
"""
md = md5()
with open(path, 'rb') as f:
for chunk in iter(lambda: f.read(nblocks*md.block_size), b''):
md.update(chunk)
return md | python | def md5_of_file(path, nblocks=128):
"""
Computes the md5 hash of a file.
Parameters
==========
path: string
path to file to compute hash of
Returns
=======
hashlib md5 hash object. Use .digest() or .hexdigest()
on returned object to get binary or hex encoded string.
"""
md = md5()
with open(path, 'rb') as f:
for chunk in iter(lambda: f.read(nblocks*md.block_size), b''):
md.update(chunk)
return md | [
"def",
"md5_of_file",
"(",
"path",
",",
"nblocks",
"=",
"128",
")",
":",
"md",
"=",
"md5",
"(",
")",
"with",
"open",
"(",
"path",
",",
"'rb'",
")",
"as",
"f",
":",
"for",
"chunk",
"in",
"iter",
"(",
"lambda",
":",
"f",
".",
"read",
"(",
"nblocks",
"*",
"md",
".",
"block_size",
")",
",",
"b''",
")",
":",
"md",
".",
"update",
"(",
"chunk",
")",
"return",
"md"
] | Computes the md5 hash of a file.
Parameters
==========
path: string
path to file to compute hash of
Returns
=======
hashlib md5 hash object. Use .digest() or .hexdigest()
on returned object to get binary or hex encoded string. | [
"Computes",
"the",
"md5",
"hash",
"of",
"a",
"file",
"."
] | train | https://github.com/bjodah/pycompilation/blob/43eac8d82f8258d30d4df77fd2ad3f3e4f4dca18/pycompilation/util.py#L181-L199 |
bjodah/pycompilation | pycompilation/util.py | missing_or_other_newer | def missing_or_other_newer(path, other_path, cwd=None):
"""
Investigate if path is non-existant or older than provided reference
path.
Parameters
==========
path: string
path to path which might be missing or too old
other_path: string
reference path
cwd: string
working directory (root of relative paths)
Returns
=======
True if path is older or missing.
"""
cwd = cwd or '.'
path = get_abspath(path, cwd=cwd)
other_path = get_abspath(other_path, cwd=cwd)
if not os.path.exists(path):
return True
if os.path.getmtime(other_path) - 1e-6 >= os.path.getmtime(path):
# 1e-6 is needed beacuse http://stackoverflow.com/questions/17086426/
return True
return False | python | def missing_or_other_newer(path, other_path, cwd=None):
"""
Investigate if path is non-existant or older than provided reference
path.
Parameters
==========
path: string
path to path which might be missing or too old
other_path: string
reference path
cwd: string
working directory (root of relative paths)
Returns
=======
True if path is older or missing.
"""
cwd = cwd or '.'
path = get_abspath(path, cwd=cwd)
other_path = get_abspath(other_path, cwd=cwd)
if not os.path.exists(path):
return True
if os.path.getmtime(other_path) - 1e-6 >= os.path.getmtime(path):
# 1e-6 is needed beacuse http://stackoverflow.com/questions/17086426/
return True
return False | [
"def",
"missing_or_other_newer",
"(",
"path",
",",
"other_path",
",",
"cwd",
"=",
"None",
")",
":",
"cwd",
"=",
"cwd",
"or",
"'.'",
"path",
"=",
"get_abspath",
"(",
"path",
",",
"cwd",
"=",
"cwd",
")",
"other_path",
"=",
"get_abspath",
"(",
"other_path",
",",
"cwd",
"=",
"cwd",
")",
"if",
"not",
"os",
".",
"path",
".",
"exists",
"(",
"path",
")",
":",
"return",
"True",
"if",
"os",
".",
"path",
".",
"getmtime",
"(",
"other_path",
")",
"-",
"1e-6",
">=",
"os",
".",
"path",
".",
"getmtime",
"(",
"path",
")",
":",
"# 1e-6 is needed beacuse http://stackoverflow.com/questions/17086426/",
"return",
"True",
"return",
"False"
] | Investigate if path is non-existant or older than provided reference
path.
Parameters
==========
path: string
path to path which might be missing or too old
other_path: string
reference path
cwd: string
working directory (root of relative paths)
Returns
=======
True if path is older or missing. | [
"Investigate",
"if",
"path",
"is",
"non",
"-",
"existant",
"or",
"older",
"than",
"provided",
"reference",
"path",
"."
] | train | https://github.com/bjodah/pycompilation/blob/43eac8d82f8258d30d4df77fd2ad3f3e4f4dca18/pycompilation/util.py#L208-L234 |
bjodah/pycompilation | pycompilation/util.py | import_module_from_file | def import_module_from_file(filename, only_if_newer_than=None):
"""
Imports (cython generated) shared object file (.so)
Provide a list of paths in `only_if_newer_than` to check
timestamps of dependencies. import_ raises an ImportError
if any is newer.
Word of warning: Python's caching or the OS caching (unclear to author)
is horrible for reimporting same path of an .so file. It will
not detect the new time stamp nor new checksum but will use old
module.
Use unique names for this reason.
Parameters
==========
filename: string
path to shared object
only_if_newer_than: iterable of strings
paths to dependencies of the shared object
Raises
======
ImportError if any of the files specified in only_if_newer_than are newer
than the file given by filename.
"""
import imp
path, name = os.path.split(filename)
name, ext = os.path.splitext(name)
name = name.split('.')[0]
fobj, filename, data = imp.find_module(name, [path])
if only_if_newer_than:
for dep in only_if_newer_than:
if os.path.getmtime(filename) < os.path.getmtime(dep):
raise ImportError("{} is newer than {}".format(dep, filename))
mod = imp.load_module(name, fobj, filename, data)
return mod | python | def import_module_from_file(filename, only_if_newer_than=None):
"""
Imports (cython generated) shared object file (.so)
Provide a list of paths in `only_if_newer_than` to check
timestamps of dependencies. import_ raises an ImportError
if any is newer.
Word of warning: Python's caching or the OS caching (unclear to author)
is horrible for reimporting same path of an .so file. It will
not detect the new time stamp nor new checksum but will use old
module.
Use unique names for this reason.
Parameters
==========
filename: string
path to shared object
only_if_newer_than: iterable of strings
paths to dependencies of the shared object
Raises
======
ImportError if any of the files specified in only_if_newer_than are newer
than the file given by filename.
"""
import imp
path, name = os.path.split(filename)
name, ext = os.path.splitext(name)
name = name.split('.')[0]
fobj, filename, data = imp.find_module(name, [path])
if only_if_newer_than:
for dep in only_if_newer_than:
if os.path.getmtime(filename) < os.path.getmtime(dep):
raise ImportError("{} is newer than {}".format(dep, filename))
mod = imp.load_module(name, fobj, filename, data)
return mod | [
"def",
"import_module_from_file",
"(",
"filename",
",",
"only_if_newer_than",
"=",
"None",
")",
":",
"import",
"imp",
"path",
",",
"name",
"=",
"os",
".",
"path",
".",
"split",
"(",
"filename",
")",
"name",
",",
"ext",
"=",
"os",
".",
"path",
".",
"splitext",
"(",
"name",
")",
"name",
"=",
"name",
".",
"split",
"(",
"'.'",
")",
"[",
"0",
"]",
"fobj",
",",
"filename",
",",
"data",
"=",
"imp",
".",
"find_module",
"(",
"name",
",",
"[",
"path",
"]",
")",
"if",
"only_if_newer_than",
":",
"for",
"dep",
"in",
"only_if_newer_than",
":",
"if",
"os",
".",
"path",
".",
"getmtime",
"(",
"filename",
")",
"<",
"os",
".",
"path",
".",
"getmtime",
"(",
"dep",
")",
":",
"raise",
"ImportError",
"(",
"\"{} is newer than {}\"",
".",
"format",
"(",
"dep",
",",
"filename",
")",
")",
"mod",
"=",
"imp",
".",
"load_module",
"(",
"name",
",",
"fobj",
",",
"filename",
",",
"data",
")",
"return",
"mod"
] | Imports (cython generated) shared object file (.so)
Provide a list of paths in `only_if_newer_than` to check
timestamps of dependencies. import_ raises an ImportError
if any is newer.
Word of warning: Python's caching or the OS caching (unclear to author)
is horrible for reimporting same path of an .so file. It will
not detect the new time stamp nor new checksum but will use old
module.
Use unique names for this reason.
Parameters
==========
filename: string
path to shared object
only_if_newer_than: iterable of strings
paths to dependencies of the shared object
Raises
======
ImportError if any of the files specified in only_if_newer_than are newer
than the file given by filename. | [
"Imports",
"(",
"cython",
"generated",
")",
"shared",
"object",
"file",
"(",
".",
"so",
")"
] | train | https://github.com/bjodah/pycompilation/blob/43eac8d82f8258d30d4df77fd2ad3f3e4f4dca18/pycompilation/util.py#L281-L318 |
Subsets and Splits