repository_name
stringlengths 7
55
| func_path_in_repository
stringlengths 4
223
| func_name
stringlengths 1
134
| whole_func_string
stringlengths 75
104k
| language
stringclasses 1
value | func_code_string
stringlengths 75
104k
| func_code_tokens
sequencelengths 19
28.4k
| func_documentation_string
stringlengths 1
46.9k
| func_documentation_tokens
sequencelengths 1
1.97k
| split_name
stringclasses 1
value | func_code_url
stringlengths 87
315
|
---|---|---|---|---|---|---|---|---|---|---|
pmacosta/pcsv | pcsv/csv_file.py | CsvFile._validate_rfilter | def _validate_rfilter(self, rfilter, letter="d"):
"""Validate that all columns in filter are in header."""
if letter == "d":
pexdoc.exh.addai(
"dfilter",
(
(not self._has_header)
and any([not isinstance(item, int) for item in rfilter.keys()])
),
)
else:
pexdoc.exh.addai(
"rfilter",
(
(not self._has_header)
and any([not isinstance(item, int) for item in rfilter.keys()])
),
)
for key in rfilter:
self._in_header(key)
rfilter[key] = (
[rfilter[key]] if isinstance(rfilter[key], str) else rfilter[key]
) | python | def _validate_rfilter(self, rfilter, letter="d"):
"""Validate that all columns in filter are in header."""
if letter == "d":
pexdoc.exh.addai(
"dfilter",
(
(not self._has_header)
and any([not isinstance(item, int) for item in rfilter.keys()])
),
)
else:
pexdoc.exh.addai(
"rfilter",
(
(not self._has_header)
and any([not isinstance(item, int) for item in rfilter.keys()])
),
)
for key in rfilter:
self._in_header(key)
rfilter[key] = (
[rfilter[key]] if isinstance(rfilter[key], str) else rfilter[key]
) | [
"def",
"_validate_rfilter",
"(",
"self",
",",
"rfilter",
",",
"letter",
"=",
"\"d\"",
")",
":",
"if",
"letter",
"==",
"\"d\"",
":",
"pexdoc",
".",
"exh",
".",
"addai",
"(",
"\"dfilter\"",
",",
"(",
"(",
"not",
"self",
".",
"_has_header",
")",
"and",
"any",
"(",
"[",
"not",
"isinstance",
"(",
"item",
",",
"int",
")",
"for",
"item",
"in",
"rfilter",
".",
"keys",
"(",
")",
"]",
")",
")",
",",
")",
"else",
":",
"pexdoc",
".",
"exh",
".",
"addai",
"(",
"\"rfilter\"",
",",
"(",
"(",
"not",
"self",
".",
"_has_header",
")",
"and",
"any",
"(",
"[",
"not",
"isinstance",
"(",
"item",
",",
"int",
")",
"for",
"item",
"in",
"rfilter",
".",
"keys",
"(",
")",
"]",
")",
")",
",",
")",
"for",
"key",
"in",
"rfilter",
":",
"self",
".",
"_in_header",
"(",
"key",
")",
"rfilter",
"[",
"key",
"]",
"=",
"(",
"[",
"rfilter",
"[",
"key",
"]",
"]",
"if",
"isinstance",
"(",
"rfilter",
"[",
"key",
"]",
",",
"str",
")",
"else",
"rfilter",
"[",
"key",
"]",
")"
] | Validate that all columns in filter are in header. | [
"Validate",
"that",
"all",
"columns",
"in",
"filter",
"are",
"in",
"header",
"."
] | train | https://github.com/pmacosta/pcsv/blob/cd1588c19b0cd58c38bc672e396db940f88ffbd7/pcsv/csv_file.py#L495-L517 |
pmacosta/pcsv | pcsv/csv_file.py | CsvFile.data | def data(self, filtered=False, no_empty=False):
r"""
Return (filtered) file data.
The returned object is a list, each item is a sub-list corresponding
to a row of data; each item in the sub-lists contains data
corresponding to a particular column
:param filtered: Filtering type
:type filtered: :ref:`CsvFiltered`
:param no_empty: Flag that indicates whether rows with empty columns
should be filtered out (True) or not (False)
:type no_empty: bool
:rtype: list
.. [[[cog cog.out(exobj.get_sphinx_autodoc()) ]]]
.. Auto-generated exceptions documentation for
.. pcsv.csv_file.CsvFile.data
:raises:
* RuntimeError (Argument \`filtered\` is not valid)
* RuntimeError (Argument \`no_empty\` is not valid)
.. [[[end]]]
"""
return self._apply_filter(filtered, no_empty) | python | def data(self, filtered=False, no_empty=False):
r"""
Return (filtered) file data.
The returned object is a list, each item is a sub-list corresponding
to a row of data; each item in the sub-lists contains data
corresponding to a particular column
:param filtered: Filtering type
:type filtered: :ref:`CsvFiltered`
:param no_empty: Flag that indicates whether rows with empty columns
should be filtered out (True) or not (False)
:type no_empty: bool
:rtype: list
.. [[[cog cog.out(exobj.get_sphinx_autodoc()) ]]]
.. Auto-generated exceptions documentation for
.. pcsv.csv_file.CsvFile.data
:raises:
* RuntimeError (Argument \`filtered\` is not valid)
* RuntimeError (Argument \`no_empty\` is not valid)
.. [[[end]]]
"""
return self._apply_filter(filtered, no_empty) | [
"def",
"data",
"(",
"self",
",",
"filtered",
"=",
"False",
",",
"no_empty",
"=",
"False",
")",
":",
"return",
"self",
".",
"_apply_filter",
"(",
"filtered",
",",
"no_empty",
")"
] | r"""
Return (filtered) file data.
The returned object is a list, each item is a sub-list corresponding
to a row of data; each item in the sub-lists contains data
corresponding to a particular column
:param filtered: Filtering type
:type filtered: :ref:`CsvFiltered`
:param no_empty: Flag that indicates whether rows with empty columns
should be filtered out (True) or not (False)
:type no_empty: bool
:rtype: list
.. [[[cog cog.out(exobj.get_sphinx_autodoc()) ]]]
.. Auto-generated exceptions documentation for
.. pcsv.csv_file.CsvFile.data
:raises:
* RuntimeError (Argument \`filtered\` is not valid)
* RuntimeError (Argument \`no_empty\` is not valid)
.. [[[end]]] | [
"r",
"Return",
"(",
"filtered",
")",
"file",
"data",
"."
] | train | https://github.com/pmacosta/pcsv/blob/cd1588c19b0cd58c38bc672e396db940f88ffbd7/pcsv/csv_file.py#L565-L593 |
pmacosta/pcsv | pcsv/csv_file.py | CsvFile.dsort | def dsort(self, order):
r"""
Sort rows.
:param order: Sort order
:type order: :ref:`CsvColFilter`
.. [[[cog cog.out(exobj.get_sphinx_autodoc()) ]]]
.. Auto-generated exceptions documentation for
.. pcsv.csv_file.CsvFile.dsort
:raises:
* RuntimeError (Argument \`order\` is not valid)
* RuntimeError (Invalid column specification)
* ValueError (Column *[column_identifier]* not found)
.. [[[end]]]
"""
# Make order conforming to a list of dictionaries
order = order if isinstance(order, list) else [order]
norder = [{item: "A"} if not isinstance(item, dict) else item for item in order]
# Verify that all columns exist in file
self._in_header([list(item.keys())[0] for item in norder])
# Get column indexes
clist = []
for nitem in norder:
for key, value in nitem.items():
clist.append(
(
key
if isinstance(key, int)
else self._header_upper.index(key.upper()),
value.upper() == "D",
)
)
# From the Python documentation:
# "Starting with Python 2.3, the sort() method is guaranteed to be
# stable. A sort is stable if it guarantees not to change the
# relative order of elements that compare equal - this is helpful
# for sorting in multiple passes (for example, sort by department,
# then by salary grade)."
# This means that the sorts have to be done from "minor" column to
# "major" column
for (cindex, rvalue) in reversed(clist):
fpointer = operator.itemgetter(cindex)
self._data.sort(key=fpointer, reverse=rvalue) | python | def dsort(self, order):
r"""
Sort rows.
:param order: Sort order
:type order: :ref:`CsvColFilter`
.. [[[cog cog.out(exobj.get_sphinx_autodoc()) ]]]
.. Auto-generated exceptions documentation for
.. pcsv.csv_file.CsvFile.dsort
:raises:
* RuntimeError (Argument \`order\` is not valid)
* RuntimeError (Invalid column specification)
* ValueError (Column *[column_identifier]* not found)
.. [[[end]]]
"""
# Make order conforming to a list of dictionaries
order = order if isinstance(order, list) else [order]
norder = [{item: "A"} if not isinstance(item, dict) else item for item in order]
# Verify that all columns exist in file
self._in_header([list(item.keys())[0] for item in norder])
# Get column indexes
clist = []
for nitem in norder:
for key, value in nitem.items():
clist.append(
(
key
if isinstance(key, int)
else self._header_upper.index(key.upper()),
value.upper() == "D",
)
)
# From the Python documentation:
# "Starting with Python 2.3, the sort() method is guaranteed to be
# stable. A sort is stable if it guarantees not to change the
# relative order of elements that compare equal - this is helpful
# for sorting in multiple passes (for example, sort by department,
# then by salary grade)."
# This means that the sorts have to be done from "minor" column to
# "major" column
for (cindex, rvalue) in reversed(clist):
fpointer = operator.itemgetter(cindex)
self._data.sort(key=fpointer, reverse=rvalue) | [
"def",
"dsort",
"(",
"self",
",",
"order",
")",
":",
"# Make order conforming to a list of dictionaries",
"order",
"=",
"order",
"if",
"isinstance",
"(",
"order",
",",
"list",
")",
"else",
"[",
"order",
"]",
"norder",
"=",
"[",
"{",
"item",
":",
"\"A\"",
"}",
"if",
"not",
"isinstance",
"(",
"item",
",",
"dict",
")",
"else",
"item",
"for",
"item",
"in",
"order",
"]",
"# Verify that all columns exist in file",
"self",
".",
"_in_header",
"(",
"[",
"list",
"(",
"item",
".",
"keys",
"(",
")",
")",
"[",
"0",
"]",
"for",
"item",
"in",
"norder",
"]",
")",
"# Get column indexes",
"clist",
"=",
"[",
"]",
"for",
"nitem",
"in",
"norder",
":",
"for",
"key",
",",
"value",
"in",
"nitem",
".",
"items",
"(",
")",
":",
"clist",
".",
"append",
"(",
"(",
"key",
"if",
"isinstance",
"(",
"key",
",",
"int",
")",
"else",
"self",
".",
"_header_upper",
".",
"index",
"(",
"key",
".",
"upper",
"(",
")",
")",
",",
"value",
".",
"upper",
"(",
")",
"==",
"\"D\"",
",",
")",
")",
"# From the Python documentation:",
"# \"Starting with Python 2.3, the sort() method is guaranteed to be",
"# stable. A sort is stable if it guarantees not to change the",
"# relative order of elements that compare equal - this is helpful",
"# for sorting in multiple passes (for example, sort by department,",
"# then by salary grade).\"",
"# This means that the sorts have to be done from \"minor\" column to",
"# \"major\" column",
"for",
"(",
"cindex",
",",
"rvalue",
")",
"in",
"reversed",
"(",
"clist",
")",
":",
"fpointer",
"=",
"operator",
".",
"itemgetter",
"(",
"cindex",
")",
"self",
".",
"_data",
".",
"sort",
"(",
"key",
"=",
"fpointer",
",",
"reverse",
"=",
"rvalue",
")"
] | r"""
Sort rows.
:param order: Sort order
:type order: :ref:`CsvColFilter`
.. [[[cog cog.out(exobj.get_sphinx_autodoc()) ]]]
.. Auto-generated exceptions documentation for
.. pcsv.csv_file.CsvFile.dsort
:raises:
* RuntimeError (Argument \`order\` is not valid)
* RuntimeError (Invalid column specification)
* ValueError (Column *[column_identifier]* not found)
.. [[[end]]] | [
"r",
"Sort",
"rows",
"."
] | train | https://github.com/pmacosta/pcsv/blob/cd1588c19b0cd58c38bc672e396db940f88ffbd7/pcsv/csv_file.py#L596-L643 |
pmacosta/pcsv | pcsv/csv_file.py | CsvFile.header | def header(self, filtered=False):
r"""
Return data header.
When the raw (input) data is used the data header is a list of the
comma-separated values file header if the file is loaded with header
(each list item is a column header) or a list of column numbers if the
file is loaded without header (column zero is the leftmost column).
When filtered data is used the data header is the active column filter,
if any, otherwise it is the same as the raw (input) data header
:param filtered: Flag that indicates whether the raw (input) data
should be used (False) or whether filtered data
should be used (True)
:type filtered: boolean
:rtype: list of strings or integers
.. [[[cog cog.out(exobj.get_sphinx_autodoc()) ]]]
.. Auto-generated exceptions documentation for
.. pcsv.csv_file.CsvFile.header
:raises: RuntimeError (Argument \`filtered\` is not valid)
.. [[[end]]]
"""
return (
self._header
if (not filtered) or (filtered and self._cfilter is None)
else self._cfilter
) | python | def header(self, filtered=False):
r"""
Return data header.
When the raw (input) data is used the data header is a list of the
comma-separated values file header if the file is loaded with header
(each list item is a column header) or a list of column numbers if the
file is loaded without header (column zero is the leftmost column).
When filtered data is used the data header is the active column filter,
if any, otherwise it is the same as the raw (input) data header
:param filtered: Flag that indicates whether the raw (input) data
should be used (False) or whether filtered data
should be used (True)
:type filtered: boolean
:rtype: list of strings or integers
.. [[[cog cog.out(exobj.get_sphinx_autodoc()) ]]]
.. Auto-generated exceptions documentation for
.. pcsv.csv_file.CsvFile.header
:raises: RuntimeError (Argument \`filtered\` is not valid)
.. [[[end]]]
"""
return (
self._header
if (not filtered) or (filtered and self._cfilter is None)
else self._cfilter
) | [
"def",
"header",
"(",
"self",
",",
"filtered",
"=",
"False",
")",
":",
"return",
"(",
"self",
".",
"_header",
"if",
"(",
"not",
"filtered",
")",
"or",
"(",
"filtered",
"and",
"self",
".",
"_cfilter",
"is",
"None",
")",
"else",
"self",
".",
"_cfilter",
")"
] | r"""
Return data header.
When the raw (input) data is used the data header is a list of the
comma-separated values file header if the file is loaded with header
(each list item is a column header) or a list of column numbers if the
file is loaded without header (column zero is the leftmost column).
When filtered data is used the data header is the active column filter,
if any, otherwise it is the same as the raw (input) data header
:param filtered: Flag that indicates whether the raw (input) data
should be used (False) or whether filtered data
should be used (True)
:type filtered: boolean
:rtype: list of strings or integers
.. [[[cog cog.out(exobj.get_sphinx_autodoc()) ]]]
.. Auto-generated exceptions documentation for
.. pcsv.csv_file.CsvFile.header
:raises: RuntimeError (Argument \`filtered\` is not valid)
.. [[[end]]] | [
"r",
"Return",
"data",
"header",
"."
] | train | https://github.com/pmacosta/pcsv/blob/cd1588c19b0cd58c38bc672e396db940f88ffbd7/pcsv/csv_file.py#L646-L676 |
pmacosta/pcsv | pcsv/csv_file.py | CsvFile.replace | def replace(self, rdata, filtered=False):
r"""
Replace data.
:param rdata: Replacement data
:type rdata: list of lists
:param filtered: Filtering type
:type filtered: :ref:`CsvFiltered`
.. [[[cog cog.out(exobj.get_sphinx_autodoc(width=63)) ]]]
.. Auto-generated exceptions documentation for
.. pcsv.csv_file.CsvFile.replace
:raises:
* RuntimeError (Argument \`filtered\` is not valid)
* RuntimeError (Argument \`rdata\` is not valid)
* ValueError (Number of columns mismatch between input and
replacement data)
* ValueError (Number of rows mismatch between input and
replacement data)
.. [[[end]]]
"""
# pylint: disable=R0914
rdata_ex = pexdoc.exh.addai("rdata")
rows_ex = pexdoc.exh.addex(
ValueError, "Number of rows mismatch between input and replacement data"
)
cols_ex = pexdoc.exh.addex(
ValueError, "Number of columns mismatch between input and replacement data"
)
rdata_ex(any([len(item) != len(rdata[0]) for item in rdata]))
# Use all columns if no specification has been given
cfilter = (
self._cfilter if filtered in [True, "B", "b", "C", "c"] else self._header
)
# Verify column names, has to be done before getting data
col_num = len(self._data[0]) - 1
odata = self._apply_filter(filtered)
cfilter = (
self._cfilter if filtered in [True, "B", "b", "C", "c"] else self._header
)
col_index = [
self._header_upper.index(col_id.upper())
if isinstance(col_id, str)
else col_id
for col_id in cfilter
]
rows_ex(len(odata) != len(rdata))
cols_ex(len(odata[0]) != len(rdata[0]))
df_tuples = self._format_rfilter(self._rfilter)
rnum = 0
for row in self._data:
if (not filtered) or (
filtered
and all([row[col_num] in col_value for col_num, col_value in df_tuples])
):
for col_num, new_data in zip(col_index, rdata[rnum]):
row[col_num] = new_data
rnum = rnum + 1 | python | def replace(self, rdata, filtered=False):
r"""
Replace data.
:param rdata: Replacement data
:type rdata: list of lists
:param filtered: Filtering type
:type filtered: :ref:`CsvFiltered`
.. [[[cog cog.out(exobj.get_sphinx_autodoc(width=63)) ]]]
.. Auto-generated exceptions documentation for
.. pcsv.csv_file.CsvFile.replace
:raises:
* RuntimeError (Argument \`filtered\` is not valid)
* RuntimeError (Argument \`rdata\` is not valid)
* ValueError (Number of columns mismatch between input and
replacement data)
* ValueError (Number of rows mismatch between input and
replacement data)
.. [[[end]]]
"""
# pylint: disable=R0914
rdata_ex = pexdoc.exh.addai("rdata")
rows_ex = pexdoc.exh.addex(
ValueError, "Number of rows mismatch between input and replacement data"
)
cols_ex = pexdoc.exh.addex(
ValueError, "Number of columns mismatch between input and replacement data"
)
rdata_ex(any([len(item) != len(rdata[0]) for item in rdata]))
# Use all columns if no specification has been given
cfilter = (
self._cfilter if filtered in [True, "B", "b", "C", "c"] else self._header
)
# Verify column names, has to be done before getting data
col_num = len(self._data[0]) - 1
odata = self._apply_filter(filtered)
cfilter = (
self._cfilter if filtered in [True, "B", "b", "C", "c"] else self._header
)
col_index = [
self._header_upper.index(col_id.upper())
if isinstance(col_id, str)
else col_id
for col_id in cfilter
]
rows_ex(len(odata) != len(rdata))
cols_ex(len(odata[0]) != len(rdata[0]))
df_tuples = self._format_rfilter(self._rfilter)
rnum = 0
for row in self._data:
if (not filtered) or (
filtered
and all([row[col_num] in col_value for col_num, col_value in df_tuples])
):
for col_num, new_data in zip(col_index, rdata[rnum]):
row[col_num] = new_data
rnum = rnum + 1 | [
"def",
"replace",
"(",
"self",
",",
"rdata",
",",
"filtered",
"=",
"False",
")",
":",
"# pylint: disable=R0914",
"rdata_ex",
"=",
"pexdoc",
".",
"exh",
".",
"addai",
"(",
"\"rdata\"",
")",
"rows_ex",
"=",
"pexdoc",
".",
"exh",
".",
"addex",
"(",
"ValueError",
",",
"\"Number of rows mismatch between input and replacement data\"",
")",
"cols_ex",
"=",
"pexdoc",
".",
"exh",
".",
"addex",
"(",
"ValueError",
",",
"\"Number of columns mismatch between input and replacement data\"",
")",
"rdata_ex",
"(",
"any",
"(",
"[",
"len",
"(",
"item",
")",
"!=",
"len",
"(",
"rdata",
"[",
"0",
"]",
")",
"for",
"item",
"in",
"rdata",
"]",
")",
")",
"# Use all columns if no specification has been given",
"cfilter",
"=",
"(",
"self",
".",
"_cfilter",
"if",
"filtered",
"in",
"[",
"True",
",",
"\"B\"",
",",
"\"b\"",
",",
"\"C\"",
",",
"\"c\"",
"]",
"else",
"self",
".",
"_header",
")",
"# Verify column names, has to be done before getting data",
"col_num",
"=",
"len",
"(",
"self",
".",
"_data",
"[",
"0",
"]",
")",
"-",
"1",
"odata",
"=",
"self",
".",
"_apply_filter",
"(",
"filtered",
")",
"cfilter",
"=",
"(",
"self",
".",
"_cfilter",
"if",
"filtered",
"in",
"[",
"True",
",",
"\"B\"",
",",
"\"b\"",
",",
"\"C\"",
",",
"\"c\"",
"]",
"else",
"self",
".",
"_header",
")",
"col_index",
"=",
"[",
"self",
".",
"_header_upper",
".",
"index",
"(",
"col_id",
".",
"upper",
"(",
")",
")",
"if",
"isinstance",
"(",
"col_id",
",",
"str",
")",
"else",
"col_id",
"for",
"col_id",
"in",
"cfilter",
"]",
"rows_ex",
"(",
"len",
"(",
"odata",
")",
"!=",
"len",
"(",
"rdata",
")",
")",
"cols_ex",
"(",
"len",
"(",
"odata",
"[",
"0",
"]",
")",
"!=",
"len",
"(",
"rdata",
"[",
"0",
"]",
")",
")",
"df_tuples",
"=",
"self",
".",
"_format_rfilter",
"(",
"self",
".",
"_rfilter",
")",
"rnum",
"=",
"0",
"for",
"row",
"in",
"self",
".",
"_data",
":",
"if",
"(",
"not",
"filtered",
")",
"or",
"(",
"filtered",
"and",
"all",
"(",
"[",
"row",
"[",
"col_num",
"]",
"in",
"col_value",
"for",
"col_num",
",",
"col_value",
"in",
"df_tuples",
"]",
")",
")",
":",
"for",
"col_num",
",",
"new_data",
"in",
"zip",
"(",
"col_index",
",",
"rdata",
"[",
"rnum",
"]",
")",
":",
"row",
"[",
"col_num",
"]",
"=",
"new_data",
"rnum",
"=",
"rnum",
"+",
"1"
] | r"""
Replace data.
:param rdata: Replacement data
:type rdata: list of lists
:param filtered: Filtering type
:type filtered: :ref:`CsvFiltered`
.. [[[cog cog.out(exobj.get_sphinx_autodoc(width=63)) ]]]
.. Auto-generated exceptions documentation for
.. pcsv.csv_file.CsvFile.replace
:raises:
* RuntimeError (Argument \`filtered\` is not valid)
* RuntimeError (Argument \`rdata\` is not valid)
* ValueError (Number of columns mismatch between input and
replacement data)
* ValueError (Number of rows mismatch between input and
replacement data)
.. [[[end]]] | [
"r",
"Replace",
"data",
"."
] | train | https://github.com/pmacosta/pcsv/blob/cd1588c19b0cd58c38bc672e396db940f88ffbd7/pcsv/csv_file.py#L679-L742 |
pmacosta/pcsv | pcsv/csv_file.py | CsvFile.write | def write(self, fname=None, filtered=False, header=True, append=False):
r"""
Write (processed) data to a specified comma-separated values (CSV) file.
:param fname: Name of the comma-separated values file to be
written. If None the file from which the data originated
is overwritten
:type fname: FileName_
:param filtered: Filtering type
:type filtered: :ref:`CsvFiltered`
:param header: If a list, column headers to use in the file. If
boolean, flag that indicates whether the input column
headers should be written (True) or not (False)
:type header: string, list of strings or boolean
:param append: Flag that indicates whether data is added to an
existing file (or a new file is created if it does not
exist) (True), or whether data overwrites the file
contents (if the file exists) or creates a new file if
the file does not exists (False)
:type append: boolean
.. [[[cog cog.out(exobj.get_sphinx_autodoc()) ]]]
.. Auto-generated exceptions documentation for
.. pcsv.csv_file.CsvFile.write
:raises:
* OSError (File *[fname]* could not be created: *[reason]*)
* RuntimeError (Argument \`append\` is not valid)
* RuntimeError (Argument \`filtered\` is not valid)
* RuntimeError (Argument \`fname\` is not valid)
* RuntimeError (Argument \`header\` is not valid)
* RuntimeError (Argument \`no_empty\` is not valid)
* ValueError (There is no data to save to file)
.. [[[end]]]
"""
# pylint: disable=R0913
write_ex = pexdoc.exh.addex(ValueError, "There is no data to save to file")
fname = self._fname if fname is None else fname
data = self.data(filtered=filtered)
write_ex((len(data) == 0) or ((len(data) == 1) and (len(data[0]) == 0)))
if header:
header = [header] if isinstance(header, str) else header
cfilter = self._gen_col_index(filtered=filtered)
filtered_header = (
[self._header[item] for item in cfilter]
if self._has_header
else cfilter
)
file_header = filtered_header if isinstance(header, bool) else header
# Convert None's to ''
data = [["''" if item is None else item for item in row] for row in data]
_write_int(fname, [file_header] + data if header else data, append=append) | python | def write(self, fname=None, filtered=False, header=True, append=False):
r"""
Write (processed) data to a specified comma-separated values (CSV) file.
:param fname: Name of the comma-separated values file to be
written. If None the file from which the data originated
is overwritten
:type fname: FileName_
:param filtered: Filtering type
:type filtered: :ref:`CsvFiltered`
:param header: If a list, column headers to use in the file. If
boolean, flag that indicates whether the input column
headers should be written (True) or not (False)
:type header: string, list of strings or boolean
:param append: Flag that indicates whether data is added to an
existing file (or a new file is created if it does not
exist) (True), or whether data overwrites the file
contents (if the file exists) or creates a new file if
the file does not exists (False)
:type append: boolean
.. [[[cog cog.out(exobj.get_sphinx_autodoc()) ]]]
.. Auto-generated exceptions documentation for
.. pcsv.csv_file.CsvFile.write
:raises:
* OSError (File *[fname]* could not be created: *[reason]*)
* RuntimeError (Argument \`append\` is not valid)
* RuntimeError (Argument \`filtered\` is not valid)
* RuntimeError (Argument \`fname\` is not valid)
* RuntimeError (Argument \`header\` is not valid)
* RuntimeError (Argument \`no_empty\` is not valid)
* ValueError (There is no data to save to file)
.. [[[end]]]
"""
# pylint: disable=R0913
write_ex = pexdoc.exh.addex(ValueError, "There is no data to save to file")
fname = self._fname if fname is None else fname
data = self.data(filtered=filtered)
write_ex((len(data) == 0) or ((len(data) == 1) and (len(data[0]) == 0)))
if header:
header = [header] if isinstance(header, str) else header
cfilter = self._gen_col_index(filtered=filtered)
filtered_header = (
[self._header[item] for item in cfilter]
if self._has_header
else cfilter
)
file_header = filtered_header if isinstance(header, bool) else header
# Convert None's to ''
data = [["''" if item is None else item for item in row] for row in data]
_write_int(fname, [file_header] + data if header else data, append=append) | [
"def",
"write",
"(",
"self",
",",
"fname",
"=",
"None",
",",
"filtered",
"=",
"False",
",",
"header",
"=",
"True",
",",
"append",
"=",
"False",
")",
":",
"# pylint: disable=R0913",
"write_ex",
"=",
"pexdoc",
".",
"exh",
".",
"addex",
"(",
"ValueError",
",",
"\"There is no data to save to file\"",
")",
"fname",
"=",
"self",
".",
"_fname",
"if",
"fname",
"is",
"None",
"else",
"fname",
"data",
"=",
"self",
".",
"data",
"(",
"filtered",
"=",
"filtered",
")",
"write_ex",
"(",
"(",
"len",
"(",
"data",
")",
"==",
"0",
")",
"or",
"(",
"(",
"len",
"(",
"data",
")",
"==",
"1",
")",
"and",
"(",
"len",
"(",
"data",
"[",
"0",
"]",
")",
"==",
"0",
")",
")",
")",
"if",
"header",
":",
"header",
"=",
"[",
"header",
"]",
"if",
"isinstance",
"(",
"header",
",",
"str",
")",
"else",
"header",
"cfilter",
"=",
"self",
".",
"_gen_col_index",
"(",
"filtered",
"=",
"filtered",
")",
"filtered_header",
"=",
"(",
"[",
"self",
".",
"_header",
"[",
"item",
"]",
"for",
"item",
"in",
"cfilter",
"]",
"if",
"self",
".",
"_has_header",
"else",
"cfilter",
")",
"file_header",
"=",
"filtered_header",
"if",
"isinstance",
"(",
"header",
",",
"bool",
")",
"else",
"header",
"# Convert None's to ''",
"data",
"=",
"[",
"[",
"\"''\"",
"if",
"item",
"is",
"None",
"else",
"item",
"for",
"item",
"in",
"row",
"]",
"for",
"row",
"in",
"data",
"]",
"_write_int",
"(",
"fname",
",",
"[",
"file_header",
"]",
"+",
"data",
"if",
"header",
"else",
"data",
",",
"append",
"=",
"append",
")"
] | r"""
Write (processed) data to a specified comma-separated values (CSV) file.
:param fname: Name of the comma-separated values file to be
written. If None the file from which the data originated
is overwritten
:type fname: FileName_
:param filtered: Filtering type
:type filtered: :ref:`CsvFiltered`
:param header: If a list, column headers to use in the file. If
boolean, flag that indicates whether the input column
headers should be written (True) or not (False)
:type header: string, list of strings or boolean
:param append: Flag that indicates whether data is added to an
existing file (or a new file is created if it does not
exist) (True), or whether data overwrites the file
contents (if the file exists) or creates a new file if
the file does not exists (False)
:type append: boolean
.. [[[cog cog.out(exobj.get_sphinx_autodoc()) ]]]
.. Auto-generated exceptions documentation for
.. pcsv.csv_file.CsvFile.write
:raises:
* OSError (File *[fname]* could not be created: *[reason]*)
* RuntimeError (Argument \`append\` is not valid)
* RuntimeError (Argument \`filtered\` is not valid)
* RuntimeError (Argument \`fname\` is not valid)
* RuntimeError (Argument \`header\` is not valid)
* RuntimeError (Argument \`no_empty\` is not valid)
* ValueError (There is no data to save to file)
.. [[[end]]] | [
"r",
"Write",
"(",
"processed",
")",
"data",
"to",
"a",
"specified",
"comma",
"-",
"separated",
"values",
"(",
"CSV",
")",
"file",
"."
] | train | https://github.com/pmacosta/pcsv/blob/cd1588c19b0cd58c38bc672e396db940f88ffbd7/pcsv/csv_file.py#L788-L849 |
MSchnei/pyprf_feature | pyprf_feature/analysis/model_creation_main.py | model_creation | def model_creation(dicCnfg, varRat=None, strPathHrf=None):
"""
Create or load pRF model time courses.
Parameters
----------
dicCnfg : dict
Dictionary containing config parameters.
varRat : float, default None
Ratio of size suppressive surround to size of center pRF
strPathHrf : str or None:
Path to npy file with custom hrf parameters. If None, default
parameters will be used.
Returns
-------
aryPrfTc : np.array
4D numpy array with pRF time course models, with following dimensions:
'aryPrfTc[x-position, y-position, SD, volume]'.
lgcMdlInc : np.array, boolean
Logical to only include models with pRF center on stimulated area.
"""
# *************************************************************************
# *** Load parameters from config file
# Load config parameters from dictionary into namespace:
cfg = cls_set_config(dicCnfg)
# *************************************************************************
if cfg.lgcCrteMdl:
# *********************************************************************
# *** Load spatial condition information
print('------Load spatial condition information')
arySptExpInf = np.load(cfg.strSptExpInf)
# Here we assume scientific convention and orientation of images where
# the origin should fall in the lower left corner, the x-axis occupies
# the width and the y-axis occupies the height dimension of the screen.
# We also assume that the first dimension that the user provides
# indexes x and the second indexes the y-axis. Since python is column
# major (i.e. first indexes columns, only then rows), we need to rotate
# arySptExpInf by 90 degrees rightward. This will insure that with the
# 0th axis we index the scientific x-axis and higher values move us to
# the right on that x-axis. It will also ensure that the 1st
# python axis indexes the scientific y-axis and higher values will
# move us up.
arySptExpInf = np.rot90(arySptExpInf, k=3)
# Calculate the areas that were stimulated during the experiment
aryStimArea = np.sum(arySptExpInf, axis=-1).astype(np.bool)
# *********************************************************************
# *********************************************************************
# *** Load temporal condition information
print('------Load temporal condition information')
aryTmpExpInf = np.load(cfg.strTmpExpInf)
# add fourth column to make it appropriate for pyprf_feature
if aryTmpExpInf.shape[-1] == 3:
print('---------Added fourth column')
vecNewCol = np.greater(aryTmpExpInf[:, 0], 0).astype(np.float16)
aryTmpExpInf = np.concatenate(
(aryTmpExpInf, np.expand_dims(vecNewCol, axis=1)), axis=1)
# *********************************************************************
# *********************************************************************
# *** Create model parameter combination, for now in pixel.
aryMdlParams = crt_mdl_prms((int(cfg.varVslSpcSzeX),
int(cfg.varVslSpcSzeY)), cfg.varNum1,
cfg.varExtXmin, cfg.varExtXmax,
cfg.varNum2, cfg.varExtYmin,
cfg.varExtYmax, cfg.varNumPrfSizes,
cfg.varPrfStdMin, cfg.varPrfStdMax,
kwUnt='pix', kwCrd=cfg.strKwCrd)
# If desired by user, also create model parameters for supp surround
if varRat is not None:
aryMdlParamsSur = np.copy(aryMdlParams)
aryMdlParamsSur[:, 2] = aryMdlParamsSur[:, 2] * varRat
# Exclude model parameters whose prf center would lie outside the
# stimulated area
print('------Exclude model params with prf center outside stim area')
varNumMdlBfr = aryMdlParams.shape[0]
# Get logical for model inclusion
lgcMdlInc = aryStimArea[aryMdlParams[:, 0].astype(np.int32),
aryMdlParams[:, 1].astype(np.int32)]
# Exclude models with prf center outside stimulated area
aryMdlParams = aryMdlParams[lgcMdlInc, :]
# Also apply the logical to the surround parameters, if they exist
if varRat is not None:
aryMdlParamsSur = aryMdlParamsSur[lgcMdlInc, :]
print('---------Number of models excluded: ' +
str(varNumMdlBfr-aryMdlParams.shape[0]))
# *********************************************************************
# *********************************************************************
# *** Create 2D Gauss model responses to spatial conditions.
print('------Create 2D Gauss model responses to spatial conditions')
aryMdlRsp = crt_mdl_rsp(arySptExpInf, (int(cfg.varVslSpcSzeX),
int(cfg.varVslSpcSzeY)),
aryMdlParams, cfg.varPar)
# If desired by user, also create model responses for supp surround
if varRat is not None:
aryMdlRspSur = crt_mdl_rsp(arySptExpInf, (int(cfg.varVslSpcSzeX),
int(cfg.varVslSpcSzeY)),
aryMdlParamsSur, cfg.varPar)
# Delete array to save memory
del(arySptExpInf)
# *********************************************************************
# *********************************************************************
# *** Create prf time course models
print('------Create prf time course models')
# Check whether path to npy file with hrf parameters was provided
if strPathHrf is not None:
print('---------Load custom hrf parameters')
aryCstPrm = np.load(strPathHrf)
dctPrm = {}
dctPrm['peak_delay'] = aryCstPrm[0]
dctPrm['under_delay'] = aryCstPrm[1]
dctPrm['peak_disp'] = aryCstPrm[2]
dctPrm['under_disp'] = aryCstPrm[3]
dctPrm['p_u_ratio'] = aryCstPrm[4]
# If not, set dctPrm to None, which will result in default hrf params
else:
print('---------Use default hrf parameters')
dctPrm = None
aryPrfTc = crt_prf_ftr_tc(aryMdlRsp, aryTmpExpInf, cfg.varNumVol,
cfg.varTr, cfg.varTmpOvsmpl,
cfg.switchHrfSet, (int(cfg.varVslSpcSzeX),
int(cfg.varVslSpcSzeY)),
cfg.varPar, dctPrm=dctPrm)
# If desired by user, create prf time course models for supp surround
if varRat is not None:
print('---------Add suppressive surround')
aryPrfTcSur = crt_prf_ftr_tc(aryMdlRspSur, aryTmpExpInf,
cfg.varNumVol, cfg.varTr,
cfg.varTmpOvsmpl, cfg.switchHrfSet,
(int(cfg.varVslSpcSzeX),
int(cfg.varVslSpcSzeY)),
cfg.varPar, dctPrm=dctPrm)
# Concatenate aryPrfTc and aryPrfTcSur
aryPrfTc = np.concatenate((aryPrfTc, aryPrfTcSur), axis=1)
# *********************************************************************
# *********************************************************************
# *** Save pRF time course models, corresponding params and responses
print('------Save pRF time course models to disk')
# Prepare file name extensions
strNmeExtMdl = ''
strNmeExtPrm = '_params'
strNmeExtRsp = '_mdlRsp'
strNmeExtMdlInc = '_lgcMdlInc'
# Check whether extensions need to be modified with ratio name
if varRat is not None:
strNmeExtMdl = strNmeExtMdl + '_' + str(varRat)
strNmeExtPrm = strNmeExtPrm + '_' + str(varRat)
strNmeExtRsp = strNmeExtRsp + '_' + str(varRat)
# Also include model the parameters and responses of the surround
# For the pRF time course models, the surround is included above
aryMdlParams = np.stack((aryMdlParams, aryMdlParamsSur),
axis=1)
aryMdlRsp = np.stack((aryMdlRsp, aryMdlRspSur),
axis=1)
# Append the npy file name for model exlusion in unstimulated area
# with general _supsur suffic since it does not depend on specific
# surround
strNmeExtMdlInc = '_supsur' + strNmeExtMdlInc
# Save pRF time course models
np.save(cfg.strPathMdl + strNmeExtMdl, aryPrfTc)
# Save the corresponding model parameters
np.save(cfg.strPathMdl + strNmeExtPrm, aryMdlParams)
# Save the corresponding model responses
np.save(cfg.strPathMdl + strNmeExtRsp, aryMdlRsp)
# Save logical for parameter exclusion in unstimulated area
np.save(cfg.strPathMdl + strNmeExtMdlInc, lgcMdlInc)
del(aryMdlParams)
del(aryMdlRsp)
# *********************************************************************
else:
# *********************************************************************
# %% Load existing pRF time course models
print('------Load pRF time course models from disk')
# Load the file:
aryPrfTc = np.load((cfg.strPathMdl + '.npy'))
# Check whether pRF time course model matrix has the expected
# dimensions:
vecPrfTcShp = aryPrfTc.shape
# Logical test for correct dimensions:
strErrMsg = ('---Error: Dimensions of specified pRF time course ' +
'models do not agree with specified model parameters')
assert vecPrfTcShp[0] == cfg.varNum1 * \
cfg.varNum2 * cfg.varNumPrfSizes, strErrMsg
assert vecPrfTcShp[-1] == cfg.varNumVol, strErrMsg
# Check number of feature. If fitting is performed with sup surround,
# number of features will be twice as many as simple fitting
if varRat is None:
assert vecPrfTcShp[1] == cfg.switchHrfSet, strErrMsg
else:
assert vecPrfTcShp[1] == cfg.switchHrfSet*2, strErrMsg
# Load logical for parameter exclusion in unstimulated area
lgcMdlInc = np.load(cfg.strPathMdl + '_lgcMdlInc.npy')
# *************************************************************************
return aryPrfTc, lgcMdlInc | python | def model_creation(dicCnfg, varRat=None, strPathHrf=None):
"""
Create or load pRF model time courses.
Parameters
----------
dicCnfg : dict
Dictionary containing config parameters.
varRat : float, default None
Ratio of size suppressive surround to size of center pRF
strPathHrf : str or None:
Path to npy file with custom hrf parameters. If None, default
parameters will be used.
Returns
-------
aryPrfTc : np.array
4D numpy array with pRF time course models, with following dimensions:
'aryPrfTc[x-position, y-position, SD, volume]'.
lgcMdlInc : np.array, boolean
Logical to only include models with pRF center on stimulated area.
"""
# *************************************************************************
# *** Load parameters from config file
# Load config parameters from dictionary into namespace:
cfg = cls_set_config(dicCnfg)
# *************************************************************************
if cfg.lgcCrteMdl:
# *********************************************************************
# *** Load spatial condition information
print('------Load spatial condition information')
arySptExpInf = np.load(cfg.strSptExpInf)
# Here we assume scientific convention and orientation of images where
# the origin should fall in the lower left corner, the x-axis occupies
# the width and the y-axis occupies the height dimension of the screen.
# We also assume that the first dimension that the user provides
# indexes x and the second indexes the y-axis. Since python is column
# major (i.e. first indexes columns, only then rows), we need to rotate
# arySptExpInf by 90 degrees rightward. This will insure that with the
# 0th axis we index the scientific x-axis and higher values move us to
# the right on that x-axis. It will also ensure that the 1st
# python axis indexes the scientific y-axis and higher values will
# move us up.
arySptExpInf = np.rot90(arySptExpInf, k=3)
# Calculate the areas that were stimulated during the experiment
aryStimArea = np.sum(arySptExpInf, axis=-1).astype(np.bool)
# *********************************************************************
# *********************************************************************
# *** Load temporal condition information
print('------Load temporal condition information')
aryTmpExpInf = np.load(cfg.strTmpExpInf)
# add fourth column to make it appropriate for pyprf_feature
if aryTmpExpInf.shape[-1] == 3:
print('---------Added fourth column')
vecNewCol = np.greater(aryTmpExpInf[:, 0], 0).astype(np.float16)
aryTmpExpInf = np.concatenate(
(aryTmpExpInf, np.expand_dims(vecNewCol, axis=1)), axis=1)
# *********************************************************************
# *********************************************************************
# *** Create model parameter combination, for now in pixel.
aryMdlParams = crt_mdl_prms((int(cfg.varVslSpcSzeX),
int(cfg.varVslSpcSzeY)), cfg.varNum1,
cfg.varExtXmin, cfg.varExtXmax,
cfg.varNum2, cfg.varExtYmin,
cfg.varExtYmax, cfg.varNumPrfSizes,
cfg.varPrfStdMin, cfg.varPrfStdMax,
kwUnt='pix', kwCrd=cfg.strKwCrd)
# If desired by user, also create model parameters for supp surround
if varRat is not None:
aryMdlParamsSur = np.copy(aryMdlParams)
aryMdlParamsSur[:, 2] = aryMdlParamsSur[:, 2] * varRat
# Exclude model parameters whose prf center would lie outside the
# stimulated area
print('------Exclude model params with prf center outside stim area')
varNumMdlBfr = aryMdlParams.shape[0]
# Get logical for model inclusion
lgcMdlInc = aryStimArea[aryMdlParams[:, 0].astype(np.int32),
aryMdlParams[:, 1].astype(np.int32)]
# Exclude models with prf center outside stimulated area
aryMdlParams = aryMdlParams[lgcMdlInc, :]
# Also apply the logical to the surround parameters, if they exist
if varRat is not None:
aryMdlParamsSur = aryMdlParamsSur[lgcMdlInc, :]
print('---------Number of models excluded: ' +
str(varNumMdlBfr-aryMdlParams.shape[0]))
# *********************************************************************
# *********************************************************************
# *** Create 2D Gauss model responses to spatial conditions.
print('------Create 2D Gauss model responses to spatial conditions')
aryMdlRsp = crt_mdl_rsp(arySptExpInf, (int(cfg.varVslSpcSzeX),
int(cfg.varVslSpcSzeY)),
aryMdlParams, cfg.varPar)
# If desired by user, also create model responses for supp surround
if varRat is not None:
aryMdlRspSur = crt_mdl_rsp(arySptExpInf, (int(cfg.varVslSpcSzeX),
int(cfg.varVslSpcSzeY)),
aryMdlParamsSur, cfg.varPar)
# Delete array to save memory
del(arySptExpInf)
# *********************************************************************
# *********************************************************************
# *** Create prf time course models
print('------Create prf time course models')
# Check whether path to npy file with hrf parameters was provided
if strPathHrf is not None:
print('---------Load custom hrf parameters')
aryCstPrm = np.load(strPathHrf)
dctPrm = {}
dctPrm['peak_delay'] = aryCstPrm[0]
dctPrm['under_delay'] = aryCstPrm[1]
dctPrm['peak_disp'] = aryCstPrm[2]
dctPrm['under_disp'] = aryCstPrm[3]
dctPrm['p_u_ratio'] = aryCstPrm[4]
# If not, set dctPrm to None, which will result in default hrf params
else:
print('---------Use default hrf parameters')
dctPrm = None
aryPrfTc = crt_prf_ftr_tc(aryMdlRsp, aryTmpExpInf, cfg.varNumVol,
cfg.varTr, cfg.varTmpOvsmpl,
cfg.switchHrfSet, (int(cfg.varVslSpcSzeX),
int(cfg.varVslSpcSzeY)),
cfg.varPar, dctPrm=dctPrm)
# If desired by user, create prf time course models for supp surround
if varRat is not None:
print('---------Add suppressive surround')
aryPrfTcSur = crt_prf_ftr_tc(aryMdlRspSur, aryTmpExpInf,
cfg.varNumVol, cfg.varTr,
cfg.varTmpOvsmpl, cfg.switchHrfSet,
(int(cfg.varVslSpcSzeX),
int(cfg.varVslSpcSzeY)),
cfg.varPar, dctPrm=dctPrm)
# Concatenate aryPrfTc and aryPrfTcSur
aryPrfTc = np.concatenate((aryPrfTc, aryPrfTcSur), axis=1)
# *********************************************************************
# *********************************************************************
# *** Save pRF time course models, corresponding params and responses
print('------Save pRF time course models to disk')
# Prepare file name extensions
strNmeExtMdl = ''
strNmeExtPrm = '_params'
strNmeExtRsp = '_mdlRsp'
strNmeExtMdlInc = '_lgcMdlInc'
# Check whether extensions need to be modified with ratio name
if varRat is not None:
strNmeExtMdl = strNmeExtMdl + '_' + str(varRat)
strNmeExtPrm = strNmeExtPrm + '_' + str(varRat)
strNmeExtRsp = strNmeExtRsp + '_' + str(varRat)
# Also include model the parameters and responses of the surround
# For the pRF time course models, the surround is included above
aryMdlParams = np.stack((aryMdlParams, aryMdlParamsSur),
axis=1)
aryMdlRsp = np.stack((aryMdlRsp, aryMdlRspSur),
axis=1)
# Append the npy file name for model exlusion in unstimulated area
# with general _supsur suffic since it does not depend on specific
# surround
strNmeExtMdlInc = '_supsur' + strNmeExtMdlInc
# Save pRF time course models
np.save(cfg.strPathMdl + strNmeExtMdl, aryPrfTc)
# Save the corresponding model parameters
np.save(cfg.strPathMdl + strNmeExtPrm, aryMdlParams)
# Save the corresponding model responses
np.save(cfg.strPathMdl + strNmeExtRsp, aryMdlRsp)
# Save logical for parameter exclusion in unstimulated area
np.save(cfg.strPathMdl + strNmeExtMdlInc, lgcMdlInc)
del(aryMdlParams)
del(aryMdlRsp)
# *********************************************************************
else:
# *********************************************************************
# %% Load existing pRF time course models
print('------Load pRF time course models from disk')
# Load the file:
aryPrfTc = np.load((cfg.strPathMdl + '.npy'))
# Check whether pRF time course model matrix has the expected
# dimensions:
vecPrfTcShp = aryPrfTc.shape
# Logical test for correct dimensions:
strErrMsg = ('---Error: Dimensions of specified pRF time course ' +
'models do not agree with specified model parameters')
assert vecPrfTcShp[0] == cfg.varNum1 * \
cfg.varNum2 * cfg.varNumPrfSizes, strErrMsg
assert vecPrfTcShp[-1] == cfg.varNumVol, strErrMsg
# Check number of feature. If fitting is performed with sup surround,
# number of features will be twice as many as simple fitting
if varRat is None:
assert vecPrfTcShp[1] == cfg.switchHrfSet, strErrMsg
else:
assert vecPrfTcShp[1] == cfg.switchHrfSet*2, strErrMsg
# Load logical for parameter exclusion in unstimulated area
lgcMdlInc = np.load(cfg.strPathMdl + '_lgcMdlInc.npy')
# *************************************************************************
return aryPrfTc, lgcMdlInc | [
"def",
"model_creation",
"(",
"dicCnfg",
",",
"varRat",
"=",
"None",
",",
"strPathHrf",
"=",
"None",
")",
":",
"# *************************************************************************",
"# *** Load parameters from config file",
"# Load config parameters from dictionary into namespace:",
"cfg",
"=",
"cls_set_config",
"(",
"dicCnfg",
")",
"# *************************************************************************",
"if",
"cfg",
".",
"lgcCrteMdl",
":",
"# *********************************************************************",
"# *** Load spatial condition information",
"print",
"(",
"'------Load spatial condition information'",
")",
"arySptExpInf",
"=",
"np",
".",
"load",
"(",
"cfg",
".",
"strSptExpInf",
")",
"# Here we assume scientific convention and orientation of images where",
"# the origin should fall in the lower left corner, the x-axis occupies",
"# the width and the y-axis occupies the height dimension of the screen.",
"# We also assume that the first dimension that the user provides",
"# indexes x and the second indexes the y-axis. Since python is column",
"# major (i.e. first indexes columns, only then rows), we need to rotate",
"# arySptExpInf by 90 degrees rightward. This will insure that with the",
"# 0th axis we index the scientific x-axis and higher values move us to",
"# the right on that x-axis. It will also ensure that the 1st",
"# python axis indexes the scientific y-axis and higher values will",
"# move us up.",
"arySptExpInf",
"=",
"np",
".",
"rot90",
"(",
"arySptExpInf",
",",
"k",
"=",
"3",
")",
"# Calculate the areas that were stimulated during the experiment",
"aryStimArea",
"=",
"np",
".",
"sum",
"(",
"arySptExpInf",
",",
"axis",
"=",
"-",
"1",
")",
".",
"astype",
"(",
"np",
".",
"bool",
")",
"# *********************************************************************",
"# *********************************************************************",
"# *** Load temporal condition information",
"print",
"(",
"'------Load temporal condition information'",
")",
"aryTmpExpInf",
"=",
"np",
".",
"load",
"(",
"cfg",
".",
"strTmpExpInf",
")",
"# add fourth column to make it appropriate for pyprf_feature",
"if",
"aryTmpExpInf",
".",
"shape",
"[",
"-",
"1",
"]",
"==",
"3",
":",
"print",
"(",
"'---------Added fourth column'",
")",
"vecNewCol",
"=",
"np",
".",
"greater",
"(",
"aryTmpExpInf",
"[",
":",
",",
"0",
"]",
",",
"0",
")",
".",
"astype",
"(",
"np",
".",
"float16",
")",
"aryTmpExpInf",
"=",
"np",
".",
"concatenate",
"(",
"(",
"aryTmpExpInf",
",",
"np",
".",
"expand_dims",
"(",
"vecNewCol",
",",
"axis",
"=",
"1",
")",
")",
",",
"axis",
"=",
"1",
")",
"# *********************************************************************",
"# *********************************************************************",
"# *** Create model parameter combination, for now in pixel.",
"aryMdlParams",
"=",
"crt_mdl_prms",
"(",
"(",
"int",
"(",
"cfg",
".",
"varVslSpcSzeX",
")",
",",
"int",
"(",
"cfg",
".",
"varVslSpcSzeY",
")",
")",
",",
"cfg",
".",
"varNum1",
",",
"cfg",
".",
"varExtXmin",
",",
"cfg",
".",
"varExtXmax",
",",
"cfg",
".",
"varNum2",
",",
"cfg",
".",
"varExtYmin",
",",
"cfg",
".",
"varExtYmax",
",",
"cfg",
".",
"varNumPrfSizes",
",",
"cfg",
".",
"varPrfStdMin",
",",
"cfg",
".",
"varPrfStdMax",
",",
"kwUnt",
"=",
"'pix'",
",",
"kwCrd",
"=",
"cfg",
".",
"strKwCrd",
")",
"# If desired by user, also create model parameters for supp surround",
"if",
"varRat",
"is",
"not",
"None",
":",
"aryMdlParamsSur",
"=",
"np",
".",
"copy",
"(",
"aryMdlParams",
")",
"aryMdlParamsSur",
"[",
":",
",",
"2",
"]",
"=",
"aryMdlParamsSur",
"[",
":",
",",
"2",
"]",
"*",
"varRat",
"# Exclude model parameters whose prf center would lie outside the",
"# stimulated area",
"print",
"(",
"'------Exclude model params with prf center outside stim area'",
")",
"varNumMdlBfr",
"=",
"aryMdlParams",
".",
"shape",
"[",
"0",
"]",
"# Get logical for model inclusion",
"lgcMdlInc",
"=",
"aryStimArea",
"[",
"aryMdlParams",
"[",
":",
",",
"0",
"]",
".",
"astype",
"(",
"np",
".",
"int32",
")",
",",
"aryMdlParams",
"[",
":",
",",
"1",
"]",
".",
"astype",
"(",
"np",
".",
"int32",
")",
"]",
"# Exclude models with prf center outside stimulated area",
"aryMdlParams",
"=",
"aryMdlParams",
"[",
"lgcMdlInc",
",",
":",
"]",
"# Also apply the logical to the surround parameters, if they exist",
"if",
"varRat",
"is",
"not",
"None",
":",
"aryMdlParamsSur",
"=",
"aryMdlParamsSur",
"[",
"lgcMdlInc",
",",
":",
"]",
"print",
"(",
"'---------Number of models excluded: '",
"+",
"str",
"(",
"varNumMdlBfr",
"-",
"aryMdlParams",
".",
"shape",
"[",
"0",
"]",
")",
")",
"# *********************************************************************",
"# *********************************************************************",
"# *** Create 2D Gauss model responses to spatial conditions.",
"print",
"(",
"'------Create 2D Gauss model responses to spatial conditions'",
")",
"aryMdlRsp",
"=",
"crt_mdl_rsp",
"(",
"arySptExpInf",
",",
"(",
"int",
"(",
"cfg",
".",
"varVslSpcSzeX",
")",
",",
"int",
"(",
"cfg",
".",
"varVslSpcSzeY",
")",
")",
",",
"aryMdlParams",
",",
"cfg",
".",
"varPar",
")",
"# If desired by user, also create model responses for supp surround",
"if",
"varRat",
"is",
"not",
"None",
":",
"aryMdlRspSur",
"=",
"crt_mdl_rsp",
"(",
"arySptExpInf",
",",
"(",
"int",
"(",
"cfg",
".",
"varVslSpcSzeX",
")",
",",
"int",
"(",
"cfg",
".",
"varVslSpcSzeY",
")",
")",
",",
"aryMdlParamsSur",
",",
"cfg",
".",
"varPar",
")",
"# Delete array to save memory",
"del",
"(",
"arySptExpInf",
")",
"# *********************************************************************",
"# *********************************************************************",
"# *** Create prf time course models",
"print",
"(",
"'------Create prf time course models'",
")",
"# Check whether path to npy file with hrf parameters was provided",
"if",
"strPathHrf",
"is",
"not",
"None",
":",
"print",
"(",
"'---------Load custom hrf parameters'",
")",
"aryCstPrm",
"=",
"np",
".",
"load",
"(",
"strPathHrf",
")",
"dctPrm",
"=",
"{",
"}",
"dctPrm",
"[",
"'peak_delay'",
"]",
"=",
"aryCstPrm",
"[",
"0",
"]",
"dctPrm",
"[",
"'under_delay'",
"]",
"=",
"aryCstPrm",
"[",
"1",
"]",
"dctPrm",
"[",
"'peak_disp'",
"]",
"=",
"aryCstPrm",
"[",
"2",
"]",
"dctPrm",
"[",
"'under_disp'",
"]",
"=",
"aryCstPrm",
"[",
"3",
"]",
"dctPrm",
"[",
"'p_u_ratio'",
"]",
"=",
"aryCstPrm",
"[",
"4",
"]",
"# If not, set dctPrm to None, which will result in default hrf params",
"else",
":",
"print",
"(",
"'---------Use default hrf parameters'",
")",
"dctPrm",
"=",
"None",
"aryPrfTc",
"=",
"crt_prf_ftr_tc",
"(",
"aryMdlRsp",
",",
"aryTmpExpInf",
",",
"cfg",
".",
"varNumVol",
",",
"cfg",
".",
"varTr",
",",
"cfg",
".",
"varTmpOvsmpl",
",",
"cfg",
".",
"switchHrfSet",
",",
"(",
"int",
"(",
"cfg",
".",
"varVslSpcSzeX",
")",
",",
"int",
"(",
"cfg",
".",
"varVslSpcSzeY",
")",
")",
",",
"cfg",
".",
"varPar",
",",
"dctPrm",
"=",
"dctPrm",
")",
"# If desired by user, create prf time course models for supp surround",
"if",
"varRat",
"is",
"not",
"None",
":",
"print",
"(",
"'---------Add suppressive surround'",
")",
"aryPrfTcSur",
"=",
"crt_prf_ftr_tc",
"(",
"aryMdlRspSur",
",",
"aryTmpExpInf",
",",
"cfg",
".",
"varNumVol",
",",
"cfg",
".",
"varTr",
",",
"cfg",
".",
"varTmpOvsmpl",
",",
"cfg",
".",
"switchHrfSet",
",",
"(",
"int",
"(",
"cfg",
".",
"varVslSpcSzeX",
")",
",",
"int",
"(",
"cfg",
".",
"varVslSpcSzeY",
")",
")",
",",
"cfg",
".",
"varPar",
",",
"dctPrm",
"=",
"dctPrm",
")",
"# Concatenate aryPrfTc and aryPrfTcSur",
"aryPrfTc",
"=",
"np",
".",
"concatenate",
"(",
"(",
"aryPrfTc",
",",
"aryPrfTcSur",
")",
",",
"axis",
"=",
"1",
")",
"# *********************************************************************",
"# *********************************************************************",
"# *** Save pRF time course models, corresponding params and responses",
"print",
"(",
"'------Save pRF time course models to disk'",
")",
"# Prepare file name extensions",
"strNmeExtMdl",
"=",
"''",
"strNmeExtPrm",
"=",
"'_params'",
"strNmeExtRsp",
"=",
"'_mdlRsp'",
"strNmeExtMdlInc",
"=",
"'_lgcMdlInc'",
"# Check whether extensions need to be modified with ratio name",
"if",
"varRat",
"is",
"not",
"None",
":",
"strNmeExtMdl",
"=",
"strNmeExtMdl",
"+",
"'_'",
"+",
"str",
"(",
"varRat",
")",
"strNmeExtPrm",
"=",
"strNmeExtPrm",
"+",
"'_'",
"+",
"str",
"(",
"varRat",
")",
"strNmeExtRsp",
"=",
"strNmeExtRsp",
"+",
"'_'",
"+",
"str",
"(",
"varRat",
")",
"# Also include model the parameters and responses of the surround",
"# For the pRF time course models, the surround is included above",
"aryMdlParams",
"=",
"np",
".",
"stack",
"(",
"(",
"aryMdlParams",
",",
"aryMdlParamsSur",
")",
",",
"axis",
"=",
"1",
")",
"aryMdlRsp",
"=",
"np",
".",
"stack",
"(",
"(",
"aryMdlRsp",
",",
"aryMdlRspSur",
")",
",",
"axis",
"=",
"1",
")",
"# Append the npy file name for model exlusion in unstimulated area",
"# with general _supsur suffic since it does not depend on specific",
"# surround",
"strNmeExtMdlInc",
"=",
"'_supsur'",
"+",
"strNmeExtMdlInc",
"# Save pRF time course models",
"np",
".",
"save",
"(",
"cfg",
".",
"strPathMdl",
"+",
"strNmeExtMdl",
",",
"aryPrfTc",
")",
"# Save the corresponding model parameters",
"np",
".",
"save",
"(",
"cfg",
".",
"strPathMdl",
"+",
"strNmeExtPrm",
",",
"aryMdlParams",
")",
"# Save the corresponding model responses",
"np",
".",
"save",
"(",
"cfg",
".",
"strPathMdl",
"+",
"strNmeExtRsp",
",",
"aryMdlRsp",
")",
"# Save logical for parameter exclusion in unstimulated area",
"np",
".",
"save",
"(",
"cfg",
".",
"strPathMdl",
"+",
"strNmeExtMdlInc",
",",
"lgcMdlInc",
")",
"del",
"(",
"aryMdlParams",
")",
"del",
"(",
"aryMdlRsp",
")",
"# *********************************************************************",
"else",
":",
"# *********************************************************************",
"# %% Load existing pRF time course models",
"print",
"(",
"'------Load pRF time course models from disk'",
")",
"# Load the file:",
"aryPrfTc",
"=",
"np",
".",
"load",
"(",
"(",
"cfg",
".",
"strPathMdl",
"+",
"'.npy'",
")",
")",
"# Check whether pRF time course model matrix has the expected",
"# dimensions:",
"vecPrfTcShp",
"=",
"aryPrfTc",
".",
"shape",
"# Logical test for correct dimensions:",
"strErrMsg",
"=",
"(",
"'---Error: Dimensions of specified pRF time course '",
"+",
"'models do not agree with specified model parameters'",
")",
"assert",
"vecPrfTcShp",
"[",
"0",
"]",
"==",
"cfg",
".",
"varNum1",
"*",
"cfg",
".",
"varNum2",
"*",
"cfg",
".",
"varNumPrfSizes",
",",
"strErrMsg",
"assert",
"vecPrfTcShp",
"[",
"-",
"1",
"]",
"==",
"cfg",
".",
"varNumVol",
",",
"strErrMsg",
"# Check number of feature. If fitting is performed with sup surround,",
"# number of features will be twice as many as simple fitting",
"if",
"varRat",
"is",
"None",
":",
"assert",
"vecPrfTcShp",
"[",
"1",
"]",
"==",
"cfg",
".",
"switchHrfSet",
",",
"strErrMsg",
"else",
":",
"assert",
"vecPrfTcShp",
"[",
"1",
"]",
"==",
"cfg",
".",
"switchHrfSet",
"*",
"2",
",",
"strErrMsg",
"# Load logical for parameter exclusion in unstimulated area",
"lgcMdlInc",
"=",
"np",
".",
"load",
"(",
"cfg",
".",
"strPathMdl",
"+",
"'_lgcMdlInc.npy'",
")",
"# *************************************************************************",
"return",
"aryPrfTc",
",",
"lgcMdlInc"
] | Create or load pRF model time courses.
Parameters
----------
dicCnfg : dict
Dictionary containing config parameters.
varRat : float, default None
Ratio of size suppressive surround to size of center pRF
strPathHrf : str or None:
Path to npy file with custom hrf parameters. If None, default
parameters will be used.
Returns
-------
aryPrfTc : np.array
4D numpy array with pRF time course models, with following dimensions:
'aryPrfTc[x-position, y-position, SD, volume]'.
lgcMdlInc : np.array, boolean
Logical to only include models with pRF center on stimulated area. | [
"Create",
"or",
"load",
"pRF",
"model",
"time",
"courses",
"."
] | train | https://github.com/MSchnei/pyprf_feature/blob/49004ede7ae1ddee07a30afe9ce3e2776750805c/pyprf_feature/analysis/model_creation_main.py#L28-L267 |
linkhub-sdk/popbill.py | popbill/cashbillService.py | CashbillService.registIssue | def registIssue(self, CorpNum, cashbill, Memo, UserID=None):
""" 현금영수증 즉시발행
args
CorpNum : 팝빌회원 사업자번호
cashbill : 등록할 현금영수증 object. made with Cashbill(...)
UserID : 팝빌회원 아이디
return
처리결과. consist of code and message
raise
PopbillException
"""
if cashbill == None:
raise PopbillException(-99999999, "현금영수증 정보가 입력되지 않았습니다.")
postData = ""
if Memo != None or Memo != '':
cashbill.memo = Memo
postData = self._stringtify(cashbill)
return self._httppost('/Cashbill', postData, CorpNum, UserID, "ISSUE") | python | def registIssue(self, CorpNum, cashbill, Memo, UserID=None):
""" 현금영수증 즉시발행
args
CorpNum : 팝빌회원 사업자번호
cashbill : 등록할 현금영수증 object. made with Cashbill(...)
UserID : 팝빌회원 아이디
return
처리결과. consist of code and message
raise
PopbillException
"""
if cashbill == None:
raise PopbillException(-99999999, "현금영수증 정보가 입력되지 않았습니다.")
postData = ""
if Memo != None or Memo != '':
cashbill.memo = Memo
postData = self._stringtify(cashbill)
return self._httppost('/Cashbill', postData, CorpNum, UserID, "ISSUE") | [
"def",
"registIssue",
"(",
"self",
",",
"CorpNum",
",",
"cashbill",
",",
"Memo",
",",
"UserID",
"=",
"None",
")",
":",
"if",
"cashbill",
"==",
"None",
":",
"raise",
"PopbillException",
"(",
"-",
"99999999",
",",
"\"현금영수증 정보가 입력되지 않았습니다.\")\r",
"",
"postData",
"=",
"\"\"",
"if",
"Memo",
"!=",
"None",
"or",
"Memo",
"!=",
"''",
":",
"cashbill",
".",
"memo",
"=",
"Memo",
"postData",
"=",
"self",
".",
"_stringtify",
"(",
"cashbill",
")",
"return",
"self",
".",
"_httppost",
"(",
"'/Cashbill'",
",",
"postData",
",",
"CorpNum",
",",
"UserID",
",",
"\"ISSUE\"",
")"
] | 현금영수증 즉시발행
args
CorpNum : 팝빌회원 사업자번호
cashbill : 등록할 현금영수증 object. made with Cashbill(...)
UserID : 팝빌회원 아이디
return
처리결과. consist of code and message
raise
PopbillException | [
"현금영수증",
"즉시발행",
"args",
"CorpNum",
":",
"팝빌회원",
"사업자번호",
"cashbill",
":",
"등록할",
"현금영수증",
"object",
".",
"made",
"with",
"Cashbill",
"(",
"...",
")",
"UserID",
":",
"팝빌회원",
"아이디",
"return",
"처리결과",
".",
"consist",
"of",
"code",
"and",
"message",
"raise",
"PopbillException"
] | train | https://github.com/linkhub-sdk/popbill.py/blob/68a0dd7f7a937603318e93be321fde73c50b96cc/popbill/cashbillService.py#L95-L116 |
linkhub-sdk/popbill.py | popbill/cashbillService.py | CashbillService.register | def register(self, CorpNum, cashbill, UserID=None):
""" 현금영수증 등록
args
CorpNum : 팝빌회원 사업자번호
cashbill : 등록할 현금영수증 object. made with Cashbill(...)
UserID : 팝빌회원 아이디
return
처리결과. consist of code and message
raise
PopbillException
"""
if cashbill == None:
raise PopbillException(-99999999, "현금영수증 정보가 입력되지 않았습니다.")
postData = self._stringtify(cashbill)
return self._httppost('/Cashbill', postData, CorpNum, UserID) | python | def register(self, CorpNum, cashbill, UserID=None):
""" 현금영수증 등록
args
CorpNum : 팝빌회원 사업자번호
cashbill : 등록할 현금영수증 object. made with Cashbill(...)
UserID : 팝빌회원 아이디
return
처리결과. consist of code and message
raise
PopbillException
"""
if cashbill == None:
raise PopbillException(-99999999, "현금영수증 정보가 입력되지 않았습니다.")
postData = self._stringtify(cashbill)
return self._httppost('/Cashbill', postData, CorpNum, UserID) | [
"def",
"register",
"(",
"self",
",",
"CorpNum",
",",
"cashbill",
",",
"UserID",
"=",
"None",
")",
":",
"if",
"cashbill",
"==",
"None",
":",
"raise",
"PopbillException",
"(",
"-",
"99999999",
",",
"\"현금영수증 정보가 입력되지 않았습니다.\")\r",
"",
"postData",
"=",
"self",
".",
"_stringtify",
"(",
"cashbill",
")",
"return",
"self",
".",
"_httppost",
"(",
"'/Cashbill'",
",",
"postData",
",",
"CorpNum",
",",
"UserID",
")"
] | 현금영수증 등록
args
CorpNum : 팝빌회원 사업자번호
cashbill : 등록할 현금영수증 object. made with Cashbill(...)
UserID : 팝빌회원 아이디
return
처리결과. consist of code and message
raise
PopbillException | [
"현금영수증",
"등록",
"args",
"CorpNum",
":",
"팝빌회원",
"사업자번호",
"cashbill",
":",
"등록할",
"현금영수증",
"object",
".",
"made",
"with",
"Cashbill",
"(",
"...",
")",
"UserID",
":",
"팝빌회원",
"아이디",
"return",
"처리결과",
".",
"consist",
"of",
"code",
"and",
"message",
"raise",
"PopbillException"
] | train | https://github.com/linkhub-sdk/popbill.py/blob/68a0dd7f7a937603318e93be321fde73c50b96cc/popbill/cashbillService.py#L118-L134 |
linkhub-sdk/popbill.py | popbill/cashbillService.py | CashbillService.revokeRegistIssue | def revokeRegistIssue(self, CorpNum, mgtKey, orgConfirmNum, orgTradeDate, smssendYN=False, memo=None, UserID=None,
isPartCancel=False, cancelType=None, supplyCost=None, tax=None, serviceFee=None,
totalAmount=None):
""" 취소현금영수증 즉시발행
args
CorpNum : 팝빌회원 사업자번호
mgtKey : 현금영수증 문서관리번호
orgConfirmNum : 원본현금영수증 승인번호
orgTradeDate : 원본현금영수증 거래일자
smssendYN : 발행안내문자 전송여부
memo : 메모
UserID : 팝빌회원 아이디
isPartCancel : 부분취소여부
cancelType : 취소사유
supplyCost : [취소] 공급가액
tax : [취소] 세액
serviceFee : [취소] 봉사료
totalAmount : [취소] 합계금액
return
처리결과. consist of code and message
raise
PopbillException
"""
postData = self._stringtify({
"mgtKey": mgtKey,
"orgConfirmNum": orgConfirmNum,
"orgTradeDate": orgTradeDate,
"smssendYN": smssendYN,
"memo": memo,
"isPartCancel": isPartCancel,
"cancelType": cancelType,
"supplyCost": supplyCost,
"tax": tax,
"serviceFee": serviceFee,
"totalAmount": totalAmount,
})
return self._httppost('/Cashbill', postData, CorpNum, UserID, "REVOKEISSUE") | python | def revokeRegistIssue(self, CorpNum, mgtKey, orgConfirmNum, orgTradeDate, smssendYN=False, memo=None, UserID=None,
isPartCancel=False, cancelType=None, supplyCost=None, tax=None, serviceFee=None,
totalAmount=None):
""" 취소현금영수증 즉시발행
args
CorpNum : 팝빌회원 사업자번호
mgtKey : 현금영수증 문서관리번호
orgConfirmNum : 원본현금영수증 승인번호
orgTradeDate : 원본현금영수증 거래일자
smssendYN : 발행안내문자 전송여부
memo : 메모
UserID : 팝빌회원 아이디
isPartCancel : 부분취소여부
cancelType : 취소사유
supplyCost : [취소] 공급가액
tax : [취소] 세액
serviceFee : [취소] 봉사료
totalAmount : [취소] 합계금액
return
처리결과. consist of code and message
raise
PopbillException
"""
postData = self._stringtify({
"mgtKey": mgtKey,
"orgConfirmNum": orgConfirmNum,
"orgTradeDate": orgTradeDate,
"smssendYN": smssendYN,
"memo": memo,
"isPartCancel": isPartCancel,
"cancelType": cancelType,
"supplyCost": supplyCost,
"tax": tax,
"serviceFee": serviceFee,
"totalAmount": totalAmount,
})
return self._httppost('/Cashbill', postData, CorpNum, UserID, "REVOKEISSUE") | [
"def",
"revokeRegistIssue",
"(",
"self",
",",
"CorpNum",
",",
"mgtKey",
",",
"orgConfirmNum",
",",
"orgTradeDate",
",",
"smssendYN",
"=",
"False",
",",
"memo",
"=",
"None",
",",
"UserID",
"=",
"None",
",",
"isPartCancel",
"=",
"False",
",",
"cancelType",
"=",
"None",
",",
"supplyCost",
"=",
"None",
",",
"tax",
"=",
"None",
",",
"serviceFee",
"=",
"None",
",",
"totalAmount",
"=",
"None",
")",
":",
"postData",
"=",
"self",
".",
"_stringtify",
"(",
"{",
"\"mgtKey\"",
":",
"mgtKey",
",",
"\"orgConfirmNum\"",
":",
"orgConfirmNum",
",",
"\"orgTradeDate\"",
":",
"orgTradeDate",
",",
"\"smssendYN\"",
":",
"smssendYN",
",",
"\"memo\"",
":",
"memo",
",",
"\"isPartCancel\"",
":",
"isPartCancel",
",",
"\"cancelType\"",
":",
"cancelType",
",",
"\"supplyCost\"",
":",
"supplyCost",
",",
"\"tax\"",
":",
"tax",
",",
"\"serviceFee\"",
":",
"serviceFee",
",",
"\"totalAmount\"",
":",
"totalAmount",
",",
"}",
")",
"return",
"self",
".",
"_httppost",
"(",
"'/Cashbill'",
",",
"postData",
",",
"CorpNum",
",",
"UserID",
",",
"\"REVOKEISSUE\"",
")"
] | 취소현금영수증 즉시발행
args
CorpNum : 팝빌회원 사업자번호
mgtKey : 현금영수증 문서관리번호
orgConfirmNum : 원본현금영수증 승인번호
orgTradeDate : 원본현금영수증 거래일자
smssendYN : 발행안내문자 전송여부
memo : 메모
UserID : 팝빌회원 아이디
isPartCancel : 부분취소여부
cancelType : 취소사유
supplyCost : [취소] 공급가액
tax : [취소] 세액
serviceFee : [취소] 봉사료
totalAmount : [취소] 합계금액
return
처리결과. consist of code and message
raise
PopbillException | [
"취소현금영수증",
"즉시발행",
"args",
"CorpNum",
":",
"팝빌회원",
"사업자번호",
"mgtKey",
":",
"현금영수증",
"문서관리번호",
"orgConfirmNum",
":",
"원본현금영수증",
"승인번호",
"orgTradeDate",
":",
"원본현금영수증",
"거래일자",
"smssendYN",
":",
"발행안내문자",
"전송여부",
"memo",
":",
"메모",
"UserID",
":",
"팝빌회원",
"아이디",
"isPartCancel",
":",
"부분취소여부",
"cancelType",
":",
"취소사유",
"supplyCost",
":",
"[",
"취소",
"]",
"공급가액",
"tax",
":",
"[",
"취소",
"]",
"세액",
"serviceFee",
":",
"[",
"취소",
"]",
"봉사료",
"totalAmount",
":",
"[",
"취소",
"]",
"합계금액",
"return",
"처리결과",
".",
"consist",
"of",
"code",
"and",
"message",
"raise",
"PopbillException"
] | train | https://github.com/linkhub-sdk/popbill.py/blob/68a0dd7f7a937603318e93be321fde73c50b96cc/popbill/cashbillService.py#L136-L174 |
linkhub-sdk/popbill.py | popbill/cashbillService.py | CashbillService.update | def update(self, CorpNum, MgtKey, cashbill, UserID=None):
""" 수정
args
CorpNum : 팝빌회원 사업자번호
MgtKey : 원본 현금영수증 문서관리번호
cashbill : 수정할 현금영수증 object. made with Cashbill(...)
UserID : 팝빌회원 아이디
return
처리결과. consist of code and message
raise
PopbillException
"""
if MgtKey == None or MgtKey == "":
raise PopbillException(-99999999, "관리번호가 입력되지 않았습니다.")
if cashbill == None:
raise PopbillException(-99999999, "현금영수증 정보가 입력되지 않았습니다.")
postData = self._stringtify(cashbill)
return self._httppost('/Cashbill/' + MgtKey, postData, CorpNum, UserID, "PATCH") | python | def update(self, CorpNum, MgtKey, cashbill, UserID=None):
""" 수정
args
CorpNum : 팝빌회원 사업자번호
MgtKey : 원본 현금영수증 문서관리번호
cashbill : 수정할 현금영수증 object. made with Cashbill(...)
UserID : 팝빌회원 아이디
return
처리결과. consist of code and message
raise
PopbillException
"""
if MgtKey == None or MgtKey == "":
raise PopbillException(-99999999, "관리번호가 입력되지 않았습니다.")
if cashbill == None:
raise PopbillException(-99999999, "현금영수증 정보가 입력되지 않았습니다.")
postData = self._stringtify(cashbill)
return self._httppost('/Cashbill/' + MgtKey, postData, CorpNum, UserID, "PATCH") | [
"def",
"update",
"(",
"self",
",",
"CorpNum",
",",
"MgtKey",
",",
"cashbill",
",",
"UserID",
"=",
"None",
")",
":",
"if",
"MgtKey",
"==",
"None",
"or",
"MgtKey",
"==",
"\"\"",
":",
"raise",
"PopbillException",
"(",
"-",
"99999999",
",",
"\"관리번호가 입력되지 않았습니다.\")\r",
"",
"if",
"cashbill",
"==",
"None",
":",
"raise",
"PopbillException",
"(",
"-",
"99999999",
",",
"\"현금영수증 정보가 입력되지 않았습니다.\")\r",
"",
"postData",
"=",
"self",
".",
"_stringtify",
"(",
"cashbill",
")",
"return",
"self",
".",
"_httppost",
"(",
"'/Cashbill/'",
"+",
"MgtKey",
",",
"postData",
",",
"CorpNum",
",",
"UserID",
",",
"\"PATCH\"",
")"
] | 수정
args
CorpNum : 팝빌회원 사업자번호
MgtKey : 원본 현금영수증 문서관리번호
cashbill : 수정할 현금영수증 object. made with Cashbill(...)
UserID : 팝빌회원 아이디
return
처리결과. consist of code and message
raise
PopbillException | [
"수정",
"args",
"CorpNum",
":",
"팝빌회원",
"사업자번호",
"MgtKey",
":",
"원본",
"현금영수증",
"문서관리번호",
"cashbill",
":",
"수정할",
"현금영수증",
"object",
".",
"made",
"with",
"Cashbill",
"(",
"...",
")",
"UserID",
":",
"팝빌회원",
"아이디",
"return",
"처리결과",
".",
"consist",
"of",
"code",
"and",
"message",
"raise",
"PopbillException"
] | train | https://github.com/linkhub-sdk/popbill.py/blob/68a0dd7f7a937603318e93be321fde73c50b96cc/popbill/cashbillService.py#L214-L233 |
linkhub-sdk/popbill.py | popbill/cashbillService.py | CashbillService.issue | def issue(self, CorpNum, MgtKey, Memo=None, UserID=None):
""" 발행
args
CorpNum : 팝빌회원 사업자번호
MgtKey : 원본 현금영수증 문서관리번호
Memo : 발행 메모
UserID : 팝빌회원 아이디
return
처리결과. consist of code and message
raise
PopbillException
"""
if MgtKey == None or MgtKey == "":
raise PopbillException(-99999999, "관리번호가 입력되지 않았습니다.")
postData = ""
req = {}
if Memo != None or Memo != '':
req["memo"] = Memo
postData = self._stringtify(req)
return self._httppost('/Cashbill/' + MgtKey, postData, CorpNum, UserID, "ISSUE") | python | def issue(self, CorpNum, MgtKey, Memo=None, UserID=None):
""" 발행
args
CorpNum : 팝빌회원 사업자번호
MgtKey : 원본 현금영수증 문서관리번호
Memo : 발행 메모
UserID : 팝빌회원 아이디
return
처리결과. consist of code and message
raise
PopbillException
"""
if MgtKey == None or MgtKey == "":
raise PopbillException(-99999999, "관리번호가 입력되지 않았습니다.")
postData = ""
req = {}
if Memo != None or Memo != '':
req["memo"] = Memo
postData = self._stringtify(req)
return self._httppost('/Cashbill/' + MgtKey, postData, CorpNum, UserID, "ISSUE") | [
"def",
"issue",
"(",
"self",
",",
"CorpNum",
",",
"MgtKey",
",",
"Memo",
"=",
"None",
",",
"UserID",
"=",
"None",
")",
":",
"if",
"MgtKey",
"==",
"None",
"or",
"MgtKey",
"==",
"\"\"",
":",
"raise",
"PopbillException",
"(",
"-",
"99999999",
",",
"\"관리번호가 입력되지 않았습니다.\")\r",
"",
"postData",
"=",
"\"\"",
"req",
"=",
"{",
"}",
"if",
"Memo",
"!=",
"None",
"or",
"Memo",
"!=",
"''",
":",
"req",
"[",
"\"memo\"",
"]",
"=",
"Memo",
"postData",
"=",
"self",
".",
"_stringtify",
"(",
"req",
")",
"return",
"self",
".",
"_httppost",
"(",
"'/Cashbill/'",
"+",
"MgtKey",
",",
"postData",
",",
"CorpNum",
",",
"UserID",
",",
"\"ISSUE\"",
")"
] | 발행
args
CorpNum : 팝빌회원 사업자번호
MgtKey : 원본 현금영수증 문서관리번호
Memo : 발행 메모
UserID : 팝빌회원 아이디
return
처리결과. consist of code and message
raise
PopbillException | [
"발행",
"args",
"CorpNum",
":",
"팝빌회원",
"사업자번호",
"MgtKey",
":",
"원본",
"현금영수증",
"문서관리번호",
"Memo",
":",
"발행",
"메모",
"UserID",
":",
"팝빌회원",
"아이디",
"return",
"처리결과",
".",
"consist",
"of",
"code",
"and",
"message",
"raise",
"PopbillException"
] | train | https://github.com/linkhub-sdk/popbill.py/blob/68a0dd7f7a937603318e93be321fde73c50b96cc/popbill/cashbillService.py#L235-L259 |
linkhub-sdk/popbill.py | popbill/cashbillService.py | CashbillService.delete | def delete(self, CorpNum, MgtKey, UserID=None):
""" 삭제
args
CorpNum : 팝빌회원 사업자번호
MgtKey : 원본 현금영수증 문서관리번호
UserID : 팝빌회원 아이디
return
처리결과. consist of code and message
raise
PopbillException
"""
if MgtKey == None or MgtKey == "":
raise PopbillException(-99999999, "관리번호가 입력되지 않았습니다.")
return self._httppost('/Cashbill/' + MgtKey, '', CorpNum, UserID, "DELETE") | python | def delete(self, CorpNum, MgtKey, UserID=None):
""" 삭제
args
CorpNum : 팝빌회원 사업자번호
MgtKey : 원본 현금영수증 문서관리번호
UserID : 팝빌회원 아이디
return
처리결과. consist of code and message
raise
PopbillException
"""
if MgtKey == None or MgtKey == "":
raise PopbillException(-99999999, "관리번호가 입력되지 않았습니다.")
return self._httppost('/Cashbill/' + MgtKey, '', CorpNum, UserID, "DELETE") | [
"def",
"delete",
"(",
"self",
",",
"CorpNum",
",",
"MgtKey",
",",
"UserID",
"=",
"None",
")",
":",
"if",
"MgtKey",
"==",
"None",
"or",
"MgtKey",
"==",
"\"\"",
":",
"raise",
"PopbillException",
"(",
"-",
"99999999",
",",
"\"관리번호가 입력되지 않았습니다.\")\r",
"",
"return",
"self",
".",
"_httppost",
"(",
"'/Cashbill/'",
"+",
"MgtKey",
",",
"''",
",",
"CorpNum",
",",
"UserID",
",",
"\"DELETE\"",
")"
] | 삭제
args
CorpNum : 팝빌회원 사업자번호
MgtKey : 원본 현금영수증 문서관리번호
UserID : 팝빌회원 아이디
return
처리결과. consist of code and message
raise
PopbillException | [
"삭제",
"args",
"CorpNum",
":",
"팝빌회원",
"사업자번호",
"MgtKey",
":",
"원본",
"현금영수증",
"문서관리번호",
"UserID",
":",
"팝빌회원",
"아이디",
"return",
"처리결과",
".",
"consist",
"of",
"code",
"and",
"message",
"raise",
"PopbillException"
] | train | https://github.com/linkhub-sdk/popbill.py/blob/68a0dd7f7a937603318e93be321fde73c50b96cc/popbill/cashbillService.py#L287-L302 |
linkhub-sdk/popbill.py | popbill/cashbillService.py | CashbillService.search | def search(self, CorpNum, DType, SDate, EDate, State, TradeType, TradeUsage, TaxationType, Page, PerPage, Order,
UserID=None, QString=None, TradeOpt=None):
""" 목록 조회
args
CorpNum : 팝빌회원 사업자번호
DType : 일자유형, R-등록일자, T-거래일자, I-발행일자 중 택 1
SDate : 시작일자, 표시형식(yyyyMMdd)
EDate : 종료일자, 표시형식(yyyyMMdd)
State : 상태코드 배열, 2,3번째 자리에 와일드카드(*) 사용가능
TradeType : 문서형태 배열, N-일반현금영수증, C-취소현금영수증
TradeUsage : 거래구분 배열, P-소득공제용, C-지출증빙용
TaxationType : 과세형태 배열, T-과세, N-비과세
Page : 페이지번호
PerPage : 페이지당 검색개수
Order : 정렬방향, D-내림차순, A-오름차순
UserID : 팝빌 회원아이디
QString : 현금영수증 식별번호, 미기재시 전체조회
TradeOpt : 거래유형, N-일반, B-도서공연, T-대중교통
"""
if DType == None or DType == '':
raise PopbillException(-99999999, "일자유형이 입력되지 않았습니다.")
if SDate == None or SDate == '':
raise PopbillException(-99999999, "시작일자가 입력되지 않았습니다.")
if EDate == None or EDate == '':
raise PopbillException(-99999999, "종료일자가 입력되지 않았습니다.")
uri = '/Cashbill/Search'
uri += '?DType=' + DType
uri += '&SDate=' + SDate
uri += '&EDate=' + EDate
uri += '&State=' + ','.join(State)
uri += '&TradeUsage=' + ','.join(TradeUsage)
uri += '&TradeType=' + ','.join(TradeType)
uri += '&TaxationType=' + ','.join(TaxationType)
uri += '&Page=' + str(Page)
uri += '&PerPage=' + str(PerPage)
uri += '&Order=' + Order
if QString is not None:
uri += '&QString=' + QString
if TradeOpt is not None:
uri += '&TradeOpt=' + ','.join(TradeOpt)
return self._httpget(uri, CorpNum, UserID) | python | def search(self, CorpNum, DType, SDate, EDate, State, TradeType, TradeUsage, TaxationType, Page, PerPage, Order,
UserID=None, QString=None, TradeOpt=None):
""" 목록 조회
args
CorpNum : 팝빌회원 사업자번호
DType : 일자유형, R-등록일자, T-거래일자, I-발행일자 중 택 1
SDate : 시작일자, 표시형식(yyyyMMdd)
EDate : 종료일자, 표시형식(yyyyMMdd)
State : 상태코드 배열, 2,3번째 자리에 와일드카드(*) 사용가능
TradeType : 문서형태 배열, N-일반현금영수증, C-취소현금영수증
TradeUsage : 거래구분 배열, P-소득공제용, C-지출증빙용
TaxationType : 과세형태 배열, T-과세, N-비과세
Page : 페이지번호
PerPage : 페이지당 검색개수
Order : 정렬방향, D-내림차순, A-오름차순
UserID : 팝빌 회원아이디
QString : 현금영수증 식별번호, 미기재시 전체조회
TradeOpt : 거래유형, N-일반, B-도서공연, T-대중교통
"""
if DType == None or DType == '':
raise PopbillException(-99999999, "일자유형이 입력되지 않았습니다.")
if SDate == None or SDate == '':
raise PopbillException(-99999999, "시작일자가 입력되지 않았습니다.")
if EDate == None or EDate == '':
raise PopbillException(-99999999, "종료일자가 입력되지 않았습니다.")
uri = '/Cashbill/Search'
uri += '?DType=' + DType
uri += '&SDate=' + SDate
uri += '&EDate=' + EDate
uri += '&State=' + ','.join(State)
uri += '&TradeUsage=' + ','.join(TradeUsage)
uri += '&TradeType=' + ','.join(TradeType)
uri += '&TaxationType=' + ','.join(TaxationType)
uri += '&Page=' + str(Page)
uri += '&PerPage=' + str(PerPage)
uri += '&Order=' + Order
if QString is not None:
uri += '&QString=' + QString
if TradeOpt is not None:
uri += '&TradeOpt=' + ','.join(TradeOpt)
return self._httpget(uri, CorpNum, UserID) | [
"def",
"search",
"(",
"self",
",",
"CorpNum",
",",
"DType",
",",
"SDate",
",",
"EDate",
",",
"State",
",",
"TradeType",
",",
"TradeUsage",
",",
"TaxationType",
",",
"Page",
",",
"PerPage",
",",
"Order",
",",
"UserID",
"=",
"None",
",",
"QString",
"=",
"None",
",",
"TradeOpt",
"=",
"None",
")",
":",
"if",
"DType",
"==",
"None",
"or",
"DType",
"==",
"''",
":",
"raise",
"PopbillException",
"(",
"-",
"99999999",
",",
"\"일자유형이 입력되지 않았습니다.\")\r",
"",
"if",
"SDate",
"==",
"None",
"or",
"SDate",
"==",
"''",
":",
"raise",
"PopbillException",
"(",
"-",
"99999999",
",",
"\"시작일자가 입력되지 않았습니다.\")\r",
"",
"if",
"EDate",
"==",
"None",
"or",
"EDate",
"==",
"''",
":",
"raise",
"PopbillException",
"(",
"-",
"99999999",
",",
"\"종료일자가 입력되지 않았습니다.\")\r",
"",
"uri",
"=",
"'/Cashbill/Search'",
"uri",
"+=",
"'?DType='",
"+",
"DType",
"uri",
"+=",
"'&SDate='",
"+",
"SDate",
"uri",
"+=",
"'&EDate='",
"+",
"EDate",
"uri",
"+=",
"'&State='",
"+",
"','",
".",
"join",
"(",
"State",
")",
"uri",
"+=",
"'&TradeUsage='",
"+",
"','",
".",
"join",
"(",
"TradeUsage",
")",
"uri",
"+=",
"'&TradeType='",
"+",
"','",
".",
"join",
"(",
"TradeType",
")",
"uri",
"+=",
"'&TaxationType='",
"+",
"','",
".",
"join",
"(",
"TaxationType",
")",
"uri",
"+=",
"'&Page='",
"+",
"str",
"(",
"Page",
")",
"uri",
"+=",
"'&PerPage='",
"+",
"str",
"(",
"PerPage",
")",
"uri",
"+=",
"'&Order='",
"+",
"Order",
"if",
"QString",
"is",
"not",
"None",
":",
"uri",
"+=",
"'&QString='",
"+",
"QString",
"if",
"TradeOpt",
"is",
"not",
"None",
":",
"uri",
"+=",
"'&TradeOpt='",
"+",
"','",
".",
"join",
"(",
"TradeOpt",
")",
"return",
"self",
".",
"_httpget",
"(",
"uri",
",",
"CorpNum",
",",
"UserID",
")"
] | 목록 조회
args
CorpNum : 팝빌회원 사업자번호
DType : 일자유형, R-등록일자, T-거래일자, I-발행일자 중 택 1
SDate : 시작일자, 표시형식(yyyyMMdd)
EDate : 종료일자, 표시형식(yyyyMMdd)
State : 상태코드 배열, 2,3번째 자리에 와일드카드(*) 사용가능
TradeType : 문서형태 배열, N-일반현금영수증, C-취소현금영수증
TradeUsage : 거래구분 배열, P-소득공제용, C-지출증빙용
TaxationType : 과세형태 배열, T-과세, N-비과세
Page : 페이지번호
PerPage : 페이지당 검색개수
Order : 정렬방향, D-내림차순, A-오름차순
UserID : 팝빌 회원아이디
QString : 현금영수증 식별번호, 미기재시 전체조회
TradeOpt : 거래유형, N-일반, B-도서공연, T-대중교통 | [
"목록",
"조회",
"args",
"CorpNum",
":",
"팝빌회원",
"사업자번호",
"DType",
":",
"일자유형",
"R",
"-",
"등록일자",
"T",
"-",
"거래일자",
"I",
"-",
"발행일자",
"중",
"택",
"1",
"SDate",
":",
"시작일자",
"표시형식",
"(",
"yyyyMMdd",
")",
"EDate",
":",
"종료일자",
"표시형식",
"(",
"yyyyMMdd",
")",
"State",
":",
"상태코드",
"배열",
"2",
"3번째",
"자리에",
"와일드카드",
"(",
"*",
")",
"사용가능",
"TradeType",
":",
"문서형태",
"배열",
"N",
"-",
"일반현금영수증",
"C",
"-",
"취소현금영수증",
"TradeUsage",
":",
"거래구분",
"배열",
"P",
"-",
"소득공제용",
"C",
"-",
"지출증빙용",
"TaxationType",
":",
"과세형태",
"배열",
"T",
"-",
"과세",
"N",
"-",
"비과세",
"Page",
":",
"페이지번호",
"PerPage",
":",
"페이지당",
"검색개수",
"Order",
":",
"정렬방향",
"D",
"-",
"내림차순",
"A",
"-",
"오름차순",
"UserID",
":",
"팝빌",
"회원아이디",
"QString",
":",
"현금영수증",
"식별번호",
"미기재시",
"전체조회",
"TradeOpt",
":",
"거래유형",
"N",
"-",
"일반",
"B",
"-",
"도서공연",
"T",
"-",
"대중교통"
] | train | https://github.com/linkhub-sdk/popbill.py/blob/68a0dd7f7a937603318e93be321fde73c50b96cc/popbill/cashbillService.py#L304-L351 |
linkhub-sdk/popbill.py | popbill/cashbillService.py | CashbillService.getInfo | def getInfo(self, CorpNum, MgtKey):
""" 상태/요약 정보 조회
args
CorpNum : 팝빌회원 사업자번호
MgtKey : 문서관리번호
return
문서 상태/요약 정보 object
raise
PopbillException
"""
if MgtKey == None or MgtKey == "":
raise PopbillException(-99999999, "관리번호가 입력되지 않았습니다.")
return self._httpget('/Cashbill/' + MgtKey, CorpNum) | python | def getInfo(self, CorpNum, MgtKey):
""" 상태/요약 정보 조회
args
CorpNum : 팝빌회원 사업자번호
MgtKey : 문서관리번호
return
문서 상태/요약 정보 object
raise
PopbillException
"""
if MgtKey == None or MgtKey == "":
raise PopbillException(-99999999, "관리번호가 입력되지 않았습니다.")
return self._httpget('/Cashbill/' + MgtKey, CorpNum) | [
"def",
"getInfo",
"(",
"self",
",",
"CorpNum",
",",
"MgtKey",
")",
":",
"if",
"MgtKey",
"==",
"None",
"or",
"MgtKey",
"==",
"\"\"",
":",
"raise",
"PopbillException",
"(",
"-",
"99999999",
",",
"\"관리번호가 입력되지 않았습니다.\")\r",
"",
"return",
"self",
".",
"_httpget",
"(",
"'/Cashbill/'",
"+",
"MgtKey",
",",
"CorpNum",
")"
] | 상태/요약 정보 조회
args
CorpNum : 팝빌회원 사업자번호
MgtKey : 문서관리번호
return
문서 상태/요약 정보 object
raise
PopbillException | [
"상태",
"/",
"요약",
"정보",
"조회",
"args",
"CorpNum",
":",
"팝빌회원",
"사업자번호",
"MgtKey",
":",
"문서관리번호",
"return",
"문서",
"상태",
"/",
"요약",
"정보",
"object",
"raise",
"PopbillException"
] | train | https://github.com/linkhub-sdk/popbill.py/blob/68a0dd7f7a937603318e93be321fde73c50b96cc/popbill/cashbillService.py#L353-L367 |
linkhub-sdk/popbill.py | popbill/cashbillService.py | CashbillService.getInfos | def getInfos(self, CorpNum, MgtKeyList):
""" 상태정보 다량 확인, 최대 1000건
args
CorpNum : 회원 사업자 번호
MgtKeyList : 문서관리번호 목록
return
상태정보 목록 as List
raise
PopbillException
"""
if MgtKeyList == None or len(MgtKeyList) < 1:
raise PopbillException(-99999999, "관리번호가 입력되지 않았습니다.")
postData = self._stringtify(MgtKeyList)
return self._httppost('/Cashbill/States', postData, CorpNum) | python | def getInfos(self, CorpNum, MgtKeyList):
""" 상태정보 다량 확인, 최대 1000건
args
CorpNum : 회원 사업자 번호
MgtKeyList : 문서관리번호 목록
return
상태정보 목록 as List
raise
PopbillException
"""
if MgtKeyList == None or len(MgtKeyList) < 1:
raise PopbillException(-99999999, "관리번호가 입력되지 않았습니다.")
postData = self._stringtify(MgtKeyList)
return self._httppost('/Cashbill/States', postData, CorpNum) | [
"def",
"getInfos",
"(",
"self",
",",
"CorpNum",
",",
"MgtKeyList",
")",
":",
"if",
"MgtKeyList",
"==",
"None",
"or",
"len",
"(",
"MgtKeyList",
")",
"<",
"1",
":",
"raise",
"PopbillException",
"(",
"-",
"99999999",
",",
"\"관리번호가 입력되지 않았습니다.\")\r",
"",
"postData",
"=",
"self",
".",
"_stringtify",
"(",
"MgtKeyList",
")",
"return",
"self",
".",
"_httppost",
"(",
"'/Cashbill/States'",
",",
"postData",
",",
"CorpNum",
")"
] | 상태정보 다량 확인, 최대 1000건
args
CorpNum : 회원 사업자 번호
MgtKeyList : 문서관리번호 목록
return
상태정보 목록 as List
raise
PopbillException | [
"상태정보",
"다량",
"확인",
"최대",
"1000건",
"args",
"CorpNum",
":",
"회원",
"사업자",
"번호",
"MgtKeyList",
":",
"문서관리번호",
"목록",
"return",
"상태정보",
"목록",
"as",
"List",
"raise",
"PopbillException"
] | train | https://github.com/linkhub-sdk/popbill.py/blob/68a0dd7f7a937603318e93be321fde73c50b96cc/popbill/cashbillService.py#L369-L384 |
linkhub-sdk/popbill.py | popbill/cashbillService.py | CashbillService.getDetailInfo | def getDetailInfo(self, CorpNum, MgtKey):
""" 상세정보 조회
args
CorpNum : 팝빌회원 사업자번호
MgtKey : 문서관리번호
return
문서 상세정보 object
raise
PopbillException
"""
if MgtKey == None or MgtKey == "":
raise PopbillException(-99999999, "관리번호가 입력되지 않았습니다.")
return self._httpget('/Cashbill/' + MgtKey + '?Detail', CorpNum) | python | def getDetailInfo(self, CorpNum, MgtKey):
""" 상세정보 조회
args
CorpNum : 팝빌회원 사업자번호
MgtKey : 문서관리번호
return
문서 상세정보 object
raise
PopbillException
"""
if MgtKey == None or MgtKey == "":
raise PopbillException(-99999999, "관리번호가 입력되지 않았습니다.")
return self._httpget('/Cashbill/' + MgtKey + '?Detail', CorpNum) | [
"def",
"getDetailInfo",
"(",
"self",
",",
"CorpNum",
",",
"MgtKey",
")",
":",
"if",
"MgtKey",
"==",
"None",
"or",
"MgtKey",
"==",
"\"\"",
":",
"raise",
"PopbillException",
"(",
"-",
"99999999",
",",
"\"관리번호가 입력되지 않았습니다.\")\r",
"",
"return",
"self",
".",
"_httpget",
"(",
"'/Cashbill/'",
"+",
"MgtKey",
"+",
"'?Detail'",
",",
"CorpNum",
")"
] | 상세정보 조회
args
CorpNum : 팝빌회원 사업자번호
MgtKey : 문서관리번호
return
문서 상세정보 object
raise
PopbillException | [
"상세정보",
"조회",
"args",
"CorpNum",
":",
"팝빌회원",
"사업자번호",
"MgtKey",
":",
"문서관리번호",
"return",
"문서",
"상세정보",
"object",
"raise",
"PopbillException"
] | train | https://github.com/linkhub-sdk/popbill.py/blob/68a0dd7f7a937603318e93be321fde73c50b96cc/popbill/cashbillService.py#L386-L400 |
linkhub-sdk/popbill.py | popbill/cashbillService.py | CashbillService.sendEmail | def sendEmail(self, CorpNum, MgtKey, ReceiverEmail, UserID=None):
""" 알림메일 재전송
args
CorpNum : 팝빌회원 사업자번호
MgtKey : 문서관리번호
ReceiverEmail : 수신자 이메일 주소
UserID : 팝빌회원 아이디
return
처리결과. consist of code and message
raise
PopbillException
"""
if MgtKey == None or MgtKey == "":
raise PopbillException(-99999999, "관리번호가 입력되지 않았습니다.")
if ReceiverEmail == None or ReceiverEmail == "":
raise PopbillException(-99999999, "수신자 메일주소가 입력되지 않았습니다.")
postData = self._stringtify({"receiver": ReceiverEmail})
return self._httppost('/Cashbill/' + MgtKey, postData, CorpNum, UserID, "EMAIL") | python | def sendEmail(self, CorpNum, MgtKey, ReceiverEmail, UserID=None):
""" 알림메일 재전송
args
CorpNum : 팝빌회원 사업자번호
MgtKey : 문서관리번호
ReceiverEmail : 수신자 이메일 주소
UserID : 팝빌회원 아이디
return
처리결과. consist of code and message
raise
PopbillException
"""
if MgtKey == None or MgtKey == "":
raise PopbillException(-99999999, "관리번호가 입력되지 않았습니다.")
if ReceiverEmail == None or ReceiverEmail == "":
raise PopbillException(-99999999, "수신자 메일주소가 입력되지 않았습니다.")
postData = self._stringtify({"receiver": ReceiverEmail})
return self._httppost('/Cashbill/' + MgtKey, postData, CorpNum, UserID, "EMAIL") | [
"def",
"sendEmail",
"(",
"self",
",",
"CorpNum",
",",
"MgtKey",
",",
"ReceiverEmail",
",",
"UserID",
"=",
"None",
")",
":",
"if",
"MgtKey",
"==",
"None",
"or",
"MgtKey",
"==",
"\"\"",
":",
"raise",
"PopbillException",
"(",
"-",
"99999999",
",",
"\"관리번호가 입력되지 않았습니다.\")\r",
"",
"if",
"ReceiverEmail",
"==",
"None",
"or",
"ReceiverEmail",
"==",
"\"\"",
":",
"raise",
"PopbillException",
"(",
"-",
"99999999",
",",
"\"수신자 메일주소가 입력되지 않았습니다.\")\r",
"",
"postData",
"=",
"self",
".",
"_stringtify",
"(",
"{",
"\"receiver\"",
":",
"ReceiverEmail",
"}",
")",
"return",
"self",
".",
"_httppost",
"(",
"'/Cashbill/'",
"+",
"MgtKey",
",",
"postData",
",",
"CorpNum",
",",
"UserID",
",",
"\"EMAIL\"",
")"
] | 알림메일 재전송
args
CorpNum : 팝빌회원 사업자번호
MgtKey : 문서관리번호
ReceiverEmail : 수신자 이메일 주소
UserID : 팝빌회원 아이디
return
처리결과. consist of code and message
raise
PopbillException | [
"알림메일",
"재전송",
"args",
"CorpNum",
":",
"팝빌회원",
"사업자번호",
"MgtKey",
":",
"문서관리번호",
"ReceiverEmail",
":",
"수신자",
"이메일",
"주소",
"UserID",
":",
"팝빌회원",
"아이디",
"return",
"처리결과",
".",
"consist",
"of",
"code",
"and",
"message",
"raise",
"PopbillException"
] | train | https://github.com/linkhub-sdk/popbill.py/blob/68a0dd7f7a937603318e93be321fde73c50b96cc/popbill/cashbillService.py#L402-L422 |
linkhub-sdk/popbill.py | popbill/cashbillService.py | CashbillService.getLogs | def getLogs(self, CorpNum, MgtKey):
""" 문서이력 조회
args
CorpNum : 팝빌회원 사업자번호
MgtKey : 문서관리번호
return
문서 이력 목록 as List
raise
PopbillException
"""
if MgtKey == None or MgtKey == "":
raise PopbillException(-99999999, "관리번호가 입력되지 않았습니다.")
return self._httpget('/Cashbill/' + MgtKey + '/Logs', CorpNum) | python | def getLogs(self, CorpNum, MgtKey):
""" 문서이력 조회
args
CorpNum : 팝빌회원 사업자번호
MgtKey : 문서관리번호
return
문서 이력 목록 as List
raise
PopbillException
"""
if MgtKey == None or MgtKey == "":
raise PopbillException(-99999999, "관리번호가 입력되지 않았습니다.")
return self._httpget('/Cashbill/' + MgtKey + '/Logs', CorpNum) | [
"def",
"getLogs",
"(",
"self",
",",
"CorpNum",
",",
"MgtKey",
")",
":",
"if",
"MgtKey",
"==",
"None",
"or",
"MgtKey",
"==",
"\"\"",
":",
"raise",
"PopbillException",
"(",
"-",
"99999999",
",",
"\"관리번호가 입력되지 않았습니다.\")\r",
"",
"return",
"self",
".",
"_httpget",
"(",
"'/Cashbill/'",
"+",
"MgtKey",
"+",
"'/Logs'",
",",
"CorpNum",
")"
] | 문서이력 조회
args
CorpNum : 팝빌회원 사업자번호
MgtKey : 문서관리번호
return
문서 이력 목록 as List
raise
PopbillException | [
"문서이력",
"조회",
"args",
"CorpNum",
":",
"팝빌회원",
"사업자번호",
"MgtKey",
":",
"문서관리번호",
"return",
"문서",
"이력",
"목록",
"as",
"List",
"raise",
"PopbillException"
] | train | https://github.com/linkhub-sdk/popbill.py/blob/68a0dd7f7a937603318e93be321fde73c50b96cc/popbill/cashbillService.py#L477-L490 |
linkhub-sdk/popbill.py | popbill/cashbillService.py | CashbillService.getEPrintURL | def getEPrintURL(self, CorpNum, MgtKey, UserID=None):
""" 공급받는자용 인쇄 URL 확인
args
CorpNum : 팝빌회원 사업자번호
MgtKey : 문서관리번호
UserID : 팝빌회원 아이디
return
팝빌 URL as str
raise
PopbillException
"""
if MgtKey == None or MgtKey == "":
raise PopbillException(-99999999, "관리번호가 입력되지 않았습니다.")
result = self._httpget('/Cashbill/' + MgtKey + '?TG=EPRINT', CorpNum, UserID)
return result.url | python | def getEPrintURL(self, CorpNum, MgtKey, UserID=None):
""" 공급받는자용 인쇄 URL 확인
args
CorpNum : 팝빌회원 사업자번호
MgtKey : 문서관리번호
UserID : 팝빌회원 아이디
return
팝빌 URL as str
raise
PopbillException
"""
if MgtKey == None or MgtKey == "":
raise PopbillException(-99999999, "관리번호가 입력되지 않았습니다.")
result = self._httpget('/Cashbill/' + MgtKey + '?TG=EPRINT', CorpNum, UserID)
return result.url | [
"def",
"getEPrintURL",
"(",
"self",
",",
"CorpNum",
",",
"MgtKey",
",",
"UserID",
"=",
"None",
")",
":",
"if",
"MgtKey",
"==",
"None",
"or",
"MgtKey",
"==",
"\"\"",
":",
"raise",
"PopbillException",
"(",
"-",
"99999999",
",",
"\"관리번호가 입력되지 않았습니다.\")\r",
"",
"result",
"=",
"self",
".",
"_httpget",
"(",
"'/Cashbill/'",
"+",
"MgtKey",
"+",
"'?TG=EPRINT'",
",",
"CorpNum",
",",
"UserID",
")",
"return",
"result",
".",
"url"
] | 공급받는자용 인쇄 URL 확인
args
CorpNum : 팝빌회원 사업자번호
MgtKey : 문서관리번호
UserID : 팝빌회원 아이디
return
팝빌 URL as str
raise
PopbillException | [
"공급받는자용",
"인쇄",
"URL",
"확인",
"args",
"CorpNum",
":",
"팝빌회원",
"사업자번호",
"MgtKey",
":",
"문서관리번호",
"UserID",
":",
"팝빌회원",
"아이디",
"return",
"팝빌",
"URL",
"as",
"str",
"raise",
"PopbillException"
] | train | https://github.com/linkhub-sdk/popbill.py/blob/68a0dd7f7a937603318e93be321fde73c50b96cc/popbill/cashbillService.py#L528-L544 |
linkhub-sdk/popbill.py | popbill/cashbillService.py | CashbillService.updateEmailConfig | def updateEmailConfig(self, Corpnum, EmailType, SendYN, UserID=None):
""" 알림메일 전송설정 수정
args
CorpNum : 팝빌회원 사업자번호
EmailType: 메일전송유형
SendYN: 전송여부 (True-전송, False-미전송)
UserID : 팝빌회원 아이디
return
처리결과. consist of code and message
raise
PopbillException
"""
if EmailType == None or EmailType == '':
raise PopbillException(-99999999, "메일전송 타입이 입력되지 않았습니다.")
if SendYN == None or SendYN == '':
raise PopbillException(-99999999, "메일전송 여부 항목이 입력되지 않았습니다.")
uri = "/Cashbill/EmailSendConfig?EmailType=" + EmailType + "&SendYN=" + str(SendYN)
return self._httppost(uri, "", Corpnum, UserID) | python | def updateEmailConfig(self, Corpnum, EmailType, SendYN, UserID=None):
""" 알림메일 전송설정 수정
args
CorpNum : 팝빌회원 사업자번호
EmailType: 메일전송유형
SendYN: 전송여부 (True-전송, False-미전송)
UserID : 팝빌회원 아이디
return
처리결과. consist of code and message
raise
PopbillException
"""
if EmailType == None or EmailType == '':
raise PopbillException(-99999999, "메일전송 타입이 입력되지 않았습니다.")
if SendYN == None or SendYN == '':
raise PopbillException(-99999999, "메일전송 여부 항목이 입력되지 않았습니다.")
uri = "/Cashbill/EmailSendConfig?EmailType=" + EmailType + "&SendYN=" + str(SendYN)
return self._httppost(uri, "", Corpnum, UserID) | [
"def",
"updateEmailConfig",
"(",
"self",
",",
"Corpnum",
",",
"EmailType",
",",
"SendYN",
",",
"UserID",
"=",
"None",
")",
":",
"if",
"EmailType",
"==",
"None",
"or",
"EmailType",
"==",
"''",
":",
"raise",
"PopbillException",
"(",
"-",
"99999999",
",",
"\"메일전송 타입이 입력되지 않았습니다.\")\r",
"",
"if",
"SendYN",
"==",
"None",
"or",
"SendYN",
"==",
"''",
":",
"raise",
"PopbillException",
"(",
"-",
"99999999",
",",
"\"메일전송 여부 항목이 입력되지 않았습니다.\")\r",
"",
"uri",
"=",
"\"/Cashbill/EmailSendConfig?EmailType=\"",
"+",
"EmailType",
"+",
"\"&SendYN=\"",
"+",
"str",
"(",
"SendYN",
")",
"return",
"self",
".",
"_httppost",
"(",
"uri",
",",
"\"\"",
",",
"Corpnum",
",",
"UserID",
")"
] | 알림메일 전송설정 수정
args
CorpNum : 팝빌회원 사업자번호
EmailType: 메일전송유형
SendYN: 전송여부 (True-전송, False-미전송)
UserID : 팝빌회원 아이디
return
처리결과. consist of code and message
raise
PopbillException | [
"알림메일",
"전송설정",
"수정",
"args",
"CorpNum",
":",
"팝빌회원",
"사업자번호",
"EmailType",
":",
"메일전송유형",
"SendYN",
":",
"전송여부",
"(",
"True",
"-",
"전송",
"False",
"-",
"미전송",
")",
"UserID",
":",
"팝빌회원",
"아이디",
"return",
"처리결과",
".",
"consist",
"of",
"code",
"and",
"message",
"raise",
"PopbillException"
] | train | https://github.com/linkhub-sdk/popbill.py/blob/68a0dd7f7a937603318e93be321fde73c50b96cc/popbill/cashbillService.py#L596-L615 |
lvieirajr/mongorest | mongorest/resource.py | ListResourceMixin.list | def list(self, request):
"""
Returns the list of documents found on the collection
"""
pipeline = [{'$match': request.args.pop('match', {})}]
sort = request.args.pop('sort', {})
if sort:
pipeline.append({'$sort': sort})
project = request.args.pop('project', {})
if project:
pipeline.append({'$project': project})
return Response(serialize(self.collection.aggregate(pipeline))) | python | def list(self, request):
"""
Returns the list of documents found on the collection
"""
pipeline = [{'$match': request.args.pop('match', {})}]
sort = request.args.pop('sort', {})
if sort:
pipeline.append({'$sort': sort})
project = request.args.pop('project', {})
if project:
pipeline.append({'$project': project})
return Response(serialize(self.collection.aggregate(pipeline))) | [
"def",
"list",
"(",
"self",
",",
"request",
")",
":",
"pipeline",
"=",
"[",
"{",
"'$match'",
":",
"request",
".",
"args",
".",
"pop",
"(",
"'match'",
",",
"{",
"}",
")",
"}",
"]",
"sort",
"=",
"request",
".",
"args",
".",
"pop",
"(",
"'sort'",
",",
"{",
"}",
")",
"if",
"sort",
":",
"pipeline",
".",
"append",
"(",
"{",
"'$sort'",
":",
"sort",
"}",
")",
"project",
"=",
"request",
".",
"args",
".",
"pop",
"(",
"'project'",
",",
"{",
"}",
")",
"if",
"project",
":",
"pipeline",
".",
"append",
"(",
"{",
"'$project'",
":",
"project",
"}",
")",
"return",
"Response",
"(",
"serialize",
"(",
"self",
".",
"collection",
".",
"aggregate",
"(",
"pipeline",
")",
")",
")"
] | Returns the list of documents found on the collection | [
"Returns",
"the",
"list",
"of",
"documents",
"found",
"on",
"the",
"collection"
] | train | https://github.com/lvieirajr/mongorest/blob/00f4487ded33254434bc51ff09d48c7a936bd465/mongorest/resource.py#L73-L87 |
lvieirajr/mongorest | mongorest/resource.py | CreateResourceMixin.create | def create(self, request):
"""
Creates a new document based on the given data
"""
document = self.collection(request.json)
document.created_at = datetime.utcnow()
document.updated_at = document.created_at
created = document.insert()
return Response(
response=serialize(created),
status=(
201 if not all(
key in created for key in [
'error_code', 'error_type', 'error_message'
]
) else 400
)
) | python | def create(self, request):
"""
Creates a new document based on the given data
"""
document = self.collection(request.json)
document.created_at = datetime.utcnow()
document.updated_at = document.created_at
created = document.insert()
return Response(
response=serialize(created),
status=(
201 if not all(
key in created for key in [
'error_code', 'error_type', 'error_message'
]
) else 400
)
) | [
"def",
"create",
"(",
"self",
",",
"request",
")",
":",
"document",
"=",
"self",
".",
"collection",
"(",
"request",
".",
"json",
")",
"document",
".",
"created_at",
"=",
"datetime",
".",
"utcnow",
"(",
")",
"document",
".",
"updated_at",
"=",
"document",
".",
"created_at",
"created",
"=",
"document",
".",
"insert",
"(",
")",
"return",
"Response",
"(",
"response",
"=",
"serialize",
"(",
"created",
")",
",",
"status",
"=",
"(",
"201",
"if",
"not",
"all",
"(",
"key",
"in",
"created",
"for",
"key",
"in",
"[",
"'error_code'",
",",
"'error_type'",
",",
"'error_message'",
"]",
")",
"else",
"400",
")",
")"
] | Creates a new document based on the given data | [
"Creates",
"a",
"new",
"document",
"based",
"on",
"the",
"given",
"data"
] | train | https://github.com/lvieirajr/mongorest/blob/00f4487ded33254434bc51ff09d48c7a936bd465/mongorest/resource.py#L96-L114 |
lvieirajr/mongorest | mongorest/resource.py | RetrieveResourceMixin.retrieve | def retrieve(self, request, _id):
"""
Returns the document containing the given _id or 404
"""
_id = deserialize(_id)
retrieved = self.collection.find_one({'_id': _id})
if retrieved:
return Response(serialize(retrieved))
else:
return Response(
response=serialize(
DocumentNotFoundError(self.collection.__name__, _id)
),
status=400
) | python | def retrieve(self, request, _id):
"""
Returns the document containing the given _id or 404
"""
_id = deserialize(_id)
retrieved = self.collection.find_one({'_id': _id})
if retrieved:
return Response(serialize(retrieved))
else:
return Response(
response=serialize(
DocumentNotFoundError(self.collection.__name__, _id)
),
status=400
) | [
"def",
"retrieve",
"(",
"self",
",",
"request",
",",
"_id",
")",
":",
"_id",
"=",
"deserialize",
"(",
"_id",
")",
"retrieved",
"=",
"self",
".",
"collection",
".",
"find_one",
"(",
"{",
"'_id'",
":",
"_id",
"}",
")",
"if",
"retrieved",
":",
"return",
"Response",
"(",
"serialize",
"(",
"retrieved",
")",
")",
"else",
":",
"return",
"Response",
"(",
"response",
"=",
"serialize",
"(",
"DocumentNotFoundError",
"(",
"self",
".",
"collection",
".",
"__name__",
",",
"_id",
")",
")",
",",
"status",
"=",
"400",
")"
] | Returns the document containing the given _id or 404 | [
"Returns",
"the",
"document",
"containing",
"the",
"given",
"_id",
"or",
"404"
] | train | https://github.com/lvieirajr/mongorest/blob/00f4487ded33254434bc51ff09d48c7a936bd465/mongorest/resource.py#L123-L138 |
lvieirajr/mongorest | mongorest/resource.py | UpdateResourceMixin.update | def update(self, request, _id):
"""
Updates the document with the given _id using the given data
"""
_id = deserialize(_id)
to_update = self.collection.find_one({'_id': _id})
if to_update:
document = self.collection(dict(to_update, **request.json))
document.updated_at = datetime.utcnow()
updated = document.update()
return Response(
response=serialize(updated),
status=(
200 if not all(
key in updated for key in [
'error_code', 'error_type', 'error_message'
]
) else 400
)
)
else:
return Response(
response=serialize(
DocumentNotFoundError(self.collection.__name__, _id)
),
status=400
) | python | def update(self, request, _id):
"""
Updates the document with the given _id using the given data
"""
_id = deserialize(_id)
to_update = self.collection.find_one({'_id': _id})
if to_update:
document = self.collection(dict(to_update, **request.json))
document.updated_at = datetime.utcnow()
updated = document.update()
return Response(
response=serialize(updated),
status=(
200 if not all(
key in updated for key in [
'error_code', 'error_type', 'error_message'
]
) else 400
)
)
else:
return Response(
response=serialize(
DocumentNotFoundError(self.collection.__name__, _id)
),
status=400
) | [
"def",
"update",
"(",
"self",
",",
"request",
",",
"_id",
")",
":",
"_id",
"=",
"deserialize",
"(",
"_id",
")",
"to_update",
"=",
"self",
".",
"collection",
".",
"find_one",
"(",
"{",
"'_id'",
":",
"_id",
"}",
")",
"if",
"to_update",
":",
"document",
"=",
"self",
".",
"collection",
"(",
"dict",
"(",
"to_update",
",",
"*",
"*",
"request",
".",
"json",
")",
")",
"document",
".",
"updated_at",
"=",
"datetime",
".",
"utcnow",
"(",
")",
"updated",
"=",
"document",
".",
"update",
"(",
")",
"return",
"Response",
"(",
"response",
"=",
"serialize",
"(",
"updated",
")",
",",
"status",
"=",
"(",
"200",
"if",
"not",
"all",
"(",
"key",
"in",
"updated",
"for",
"key",
"in",
"[",
"'error_code'",
",",
"'error_type'",
",",
"'error_message'",
"]",
")",
"else",
"400",
")",
")",
"else",
":",
"return",
"Response",
"(",
"response",
"=",
"serialize",
"(",
"DocumentNotFoundError",
"(",
"self",
".",
"collection",
".",
"__name__",
",",
"_id",
")",
")",
",",
"status",
"=",
"400",
")"
] | Updates the document with the given _id using the given data | [
"Updates",
"the",
"document",
"with",
"the",
"given",
"_id",
"using",
"the",
"given",
"data"
] | train | https://github.com/lvieirajr/mongorest/blob/00f4487ded33254434bc51ff09d48c7a936bd465/mongorest/resource.py#L147-L175 |
lvieirajr/mongorest | mongorest/resource.py | DeleteResourceMixin.delete | def delete(self, request, _id):
"""
Deletes the document with the given _id if it exists
"""
_id = deserialize(_id)
to_delete = self.collection.get({'_id': _id})
if to_delete:
deleted = to_delete.delete()
return Response(
response=serialize(deleted),
status=(
200 if not all(
key in deleted for key in [
'error_code', 'error_type', 'error_message'
]
) else 400
)
)
else:
return Response(
response=serialize(
DocumentNotFoundError(self.collection.__name__, _id)
),
status=404
) | python | def delete(self, request, _id):
"""
Deletes the document with the given _id if it exists
"""
_id = deserialize(_id)
to_delete = self.collection.get({'_id': _id})
if to_delete:
deleted = to_delete.delete()
return Response(
response=serialize(deleted),
status=(
200 if not all(
key in deleted for key in [
'error_code', 'error_type', 'error_message'
]
) else 400
)
)
else:
return Response(
response=serialize(
DocumentNotFoundError(self.collection.__name__, _id)
),
status=404
) | [
"def",
"delete",
"(",
"self",
",",
"request",
",",
"_id",
")",
":",
"_id",
"=",
"deserialize",
"(",
"_id",
")",
"to_delete",
"=",
"self",
".",
"collection",
".",
"get",
"(",
"{",
"'_id'",
":",
"_id",
"}",
")",
"if",
"to_delete",
":",
"deleted",
"=",
"to_delete",
".",
"delete",
"(",
")",
"return",
"Response",
"(",
"response",
"=",
"serialize",
"(",
"deleted",
")",
",",
"status",
"=",
"(",
"200",
"if",
"not",
"all",
"(",
"key",
"in",
"deleted",
"for",
"key",
"in",
"[",
"'error_code'",
",",
"'error_type'",
",",
"'error_message'",
"]",
")",
"else",
"400",
")",
")",
"else",
":",
"return",
"Response",
"(",
"response",
"=",
"serialize",
"(",
"DocumentNotFoundError",
"(",
"self",
".",
"collection",
".",
"__name__",
",",
"_id",
")",
")",
",",
"status",
"=",
"404",
")"
] | Deletes the document with the given _id if it exists | [
"Deletes",
"the",
"document",
"with",
"the",
"given",
"_id",
"if",
"it",
"exists"
] | train | https://github.com/lvieirajr/mongorest/blob/00f4487ded33254434bc51ff09d48c7a936bd465/mongorest/resource.py#L184-L210 |
pydron/utwist | utwist/_utwist.py | with_reactor | def with_reactor(*dec_args, **dec_kwargs):
"""
Decorator for test functions that require a running reactor.
Can be used like this::
@with_reactor
def test_connect_to_server(self):
...
Or like this::
@with_reactor(timeout=10)
def test_connect_to_server(self):
...
If the test function returns a deferred then the test will
be successful if the deferred resolves to a value or unsuccessful
if the deferred errbacks.
The test must not leave any connections or a like open. This will
otherwise result in a reactor-unclean failure of the test.
If there is a function called `twisted_setup()` in the same class
as the test function is defined, then this function will be invoked
before the test, but already in the context of the reactor. Note that
the regular setup function provided by the testing framework will
be executed too, but not in the reactor context.
Accordingly, if there is a `twisted_teardown()` it executes after the
test function, even if the test failed.
If the test, including `twisted_setup` and `twisted_teardown`, has
not completed within the timout, the test fails. The timeout defaults
to two minutes. A timeout duration of zero disables the timeout.
"""
# This method takes care of the decorator protocol, it
# distinguishes between using the decorator with brackets
# and without brackets. It then calls `_twisted_test_sync()`.
if len(dec_args) == 1 and callable(dec_args[0]) and not dec_kwargs:
# decorator used without brackets:
# @twisted_test
# def test_xxx():
# ....
callee = dec_args[0]
dec_args = ()
dec_kwargs = {}
@functools.wraps(callee)
def wrapper(*call_args, **call_kwargs):
return _twisted_test_sync(callee, call_args, call_kwargs)
return wrapper
else:
# decorator used with brackets:
# @twisted_test(*dec_args, **dec_args)
# def test_xxx():
# ....
def decorator(callee):
@functools.wraps(callee)
def wrapper(*call_args, **call_kwargs):
return _twisted_test_sync(callee, call_args, call_kwargs, *dec_args, **dec_kwargs)
return wrapper
return decorator | python | def with_reactor(*dec_args, **dec_kwargs):
"""
Decorator for test functions that require a running reactor.
Can be used like this::
@with_reactor
def test_connect_to_server(self):
...
Or like this::
@with_reactor(timeout=10)
def test_connect_to_server(self):
...
If the test function returns a deferred then the test will
be successful if the deferred resolves to a value or unsuccessful
if the deferred errbacks.
The test must not leave any connections or a like open. This will
otherwise result in a reactor-unclean failure of the test.
If there is a function called `twisted_setup()` in the same class
as the test function is defined, then this function will be invoked
before the test, but already in the context of the reactor. Note that
the regular setup function provided by the testing framework will
be executed too, but not in the reactor context.
Accordingly, if there is a `twisted_teardown()` it executes after the
test function, even if the test failed.
If the test, including `twisted_setup` and `twisted_teardown`, has
not completed within the timout, the test fails. The timeout defaults
to two minutes. A timeout duration of zero disables the timeout.
"""
# This method takes care of the decorator protocol, it
# distinguishes between using the decorator with brackets
# and without brackets. It then calls `_twisted_test_sync()`.
if len(dec_args) == 1 and callable(dec_args[0]) and not dec_kwargs:
# decorator used without brackets:
# @twisted_test
# def test_xxx():
# ....
callee = dec_args[0]
dec_args = ()
dec_kwargs = {}
@functools.wraps(callee)
def wrapper(*call_args, **call_kwargs):
return _twisted_test_sync(callee, call_args, call_kwargs)
return wrapper
else:
# decorator used with brackets:
# @twisted_test(*dec_args, **dec_args)
# def test_xxx():
# ....
def decorator(callee):
@functools.wraps(callee)
def wrapper(*call_args, **call_kwargs):
return _twisted_test_sync(callee, call_args, call_kwargs, *dec_args, **dec_kwargs)
return wrapper
return decorator | [
"def",
"with_reactor",
"(",
"*",
"dec_args",
",",
"*",
"*",
"dec_kwargs",
")",
":",
"# This method takes care of the decorator protocol, it",
"# distinguishes between using the decorator with brackets",
"# and without brackets. It then calls `_twisted_test_sync()`.",
"if",
"len",
"(",
"dec_args",
")",
"==",
"1",
"and",
"callable",
"(",
"dec_args",
"[",
"0",
"]",
")",
"and",
"not",
"dec_kwargs",
":",
"# decorator used without brackets:",
"# @twisted_test",
"# def test_xxx():",
"# ....",
"callee",
"=",
"dec_args",
"[",
"0",
"]",
"dec_args",
"=",
"(",
")",
"dec_kwargs",
"=",
"{",
"}",
"@",
"functools",
".",
"wraps",
"(",
"callee",
")",
"def",
"wrapper",
"(",
"*",
"call_args",
",",
"*",
"*",
"call_kwargs",
")",
":",
"return",
"_twisted_test_sync",
"(",
"callee",
",",
"call_args",
",",
"call_kwargs",
")",
"return",
"wrapper",
"else",
":",
"# decorator used with brackets:",
"# @twisted_test(*dec_args, **dec_args)",
"# def test_xxx():",
"# ....",
"def",
"decorator",
"(",
"callee",
")",
":",
"@",
"functools",
".",
"wraps",
"(",
"callee",
")",
"def",
"wrapper",
"(",
"*",
"call_args",
",",
"*",
"*",
"call_kwargs",
")",
":",
"return",
"_twisted_test_sync",
"(",
"callee",
",",
"call_args",
",",
"call_kwargs",
",",
"*",
"dec_args",
",",
"*",
"*",
"dec_kwargs",
")",
"return",
"wrapper",
"return",
"decorator"
] | Decorator for test functions that require a running reactor.
Can be used like this::
@with_reactor
def test_connect_to_server(self):
...
Or like this::
@with_reactor(timeout=10)
def test_connect_to_server(self):
...
If the test function returns a deferred then the test will
be successful if the deferred resolves to a value or unsuccessful
if the deferred errbacks.
The test must not leave any connections or a like open. This will
otherwise result in a reactor-unclean failure of the test.
If there is a function called `twisted_setup()` in the same class
as the test function is defined, then this function will be invoked
before the test, but already in the context of the reactor. Note that
the regular setup function provided by the testing framework will
be executed too, but not in the reactor context.
Accordingly, if there is a `twisted_teardown()` it executes after the
test function, even if the test failed.
If the test, including `twisted_setup` and `twisted_teardown`, has
not completed within the timout, the test fails. The timeout defaults
to two minutes. A timeout duration of zero disables the timeout. | [
"Decorator",
"for",
"test",
"functions",
"that",
"require",
"a",
"running",
"reactor",
".",
"Can",
"be",
"used",
"like",
"this",
"::"
] | train | https://github.com/pydron/utwist/blob/31670bdd7630874e2d24e663dbfce8b863b1f02e/utwist/_utwist.py#L33-L98 |
pydron/utwist | utwist/_utwist.py | _ensure_reactor_running | def _ensure_reactor_running():
"""
Starts the twisted reactor if it is not running already.
The reactor is started in a new daemon-thread.
Has to perform dirty hacks so that twisted can register
signals even if it is not running in the main-thread.
"""
if not reactor.running:
# Some of the `signal` API can only be called
# from the main-thread. So we do a dirty workaround.
#
# `signal.signal()` and `signal.wakeup_fd_capture()`
# are temporarily monkey-patched while the reactor is
# starting.
#
# The patched functions record the invocations in
# `signal_registrations`.
#
# Once the reactor is started, the main-thread
# is used to playback the recorded invocations.
signal_registrations = []
# do the monkey patching
def signal_capture(*args, **kwargs):
signal_registrations.append((orig_signal, args, kwargs))
def set_wakeup_fd_capture(*args, **kwargs):
signal_registrations.append((orig_set_wakeup_fd, args, kwargs))
orig_signal = signal.signal
signal.signal = signal_capture
orig_set_wakeup_fd = signal.set_wakeup_fd
signal.set_wakeup_fd = set_wakeup_fd_capture
# start the reactor in a daemon-thread
reactor_thread = threading.Thread(target=reactor.run, name="reactor")
reactor_thread.daemon = True
reactor_thread.start()
while not reactor.running:
time.sleep(0.01)
# Give the reactor a moment to register the signals.
# Apparently the 'running' flag is set before that.
time.sleep(0.01)
# Undo the monkey-paching
signal.signal = orig_signal
signal.set_wakeup_fd = orig_set_wakeup_fd
# Playback the recorded calls
for func, args, kwargs in signal_registrations:
func(*args, **kwargs) | python | def _ensure_reactor_running():
"""
Starts the twisted reactor if it is not running already.
The reactor is started in a new daemon-thread.
Has to perform dirty hacks so that twisted can register
signals even if it is not running in the main-thread.
"""
if not reactor.running:
# Some of the `signal` API can only be called
# from the main-thread. So we do a dirty workaround.
#
# `signal.signal()` and `signal.wakeup_fd_capture()`
# are temporarily monkey-patched while the reactor is
# starting.
#
# The patched functions record the invocations in
# `signal_registrations`.
#
# Once the reactor is started, the main-thread
# is used to playback the recorded invocations.
signal_registrations = []
# do the monkey patching
def signal_capture(*args, **kwargs):
signal_registrations.append((orig_signal, args, kwargs))
def set_wakeup_fd_capture(*args, **kwargs):
signal_registrations.append((orig_set_wakeup_fd, args, kwargs))
orig_signal = signal.signal
signal.signal = signal_capture
orig_set_wakeup_fd = signal.set_wakeup_fd
signal.set_wakeup_fd = set_wakeup_fd_capture
# start the reactor in a daemon-thread
reactor_thread = threading.Thread(target=reactor.run, name="reactor")
reactor_thread.daemon = True
reactor_thread.start()
while not reactor.running:
time.sleep(0.01)
# Give the reactor a moment to register the signals.
# Apparently the 'running' flag is set before that.
time.sleep(0.01)
# Undo the monkey-paching
signal.signal = orig_signal
signal.set_wakeup_fd = orig_set_wakeup_fd
# Playback the recorded calls
for func, args, kwargs in signal_registrations:
func(*args, **kwargs) | [
"def",
"_ensure_reactor_running",
"(",
")",
":",
"if",
"not",
"reactor",
".",
"running",
":",
"# Some of the `signal` API can only be called",
"# from the main-thread. So we do a dirty workaround.",
"#",
"# `signal.signal()` and `signal.wakeup_fd_capture()`",
"# are temporarily monkey-patched while the reactor is",
"# starting.",
"#",
"# The patched functions record the invocations in",
"# `signal_registrations`. ",
"#",
"# Once the reactor is started, the main-thread",
"# is used to playback the recorded invocations.",
"signal_registrations",
"=",
"[",
"]",
"# do the monkey patching",
"def",
"signal_capture",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"signal_registrations",
".",
"append",
"(",
"(",
"orig_signal",
",",
"args",
",",
"kwargs",
")",
")",
"def",
"set_wakeup_fd_capture",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"signal_registrations",
".",
"append",
"(",
"(",
"orig_set_wakeup_fd",
",",
"args",
",",
"kwargs",
")",
")",
"orig_signal",
"=",
"signal",
".",
"signal",
"signal",
".",
"signal",
"=",
"signal_capture",
"orig_set_wakeup_fd",
"=",
"signal",
".",
"set_wakeup_fd",
"signal",
".",
"set_wakeup_fd",
"=",
"set_wakeup_fd_capture",
"# start the reactor in a daemon-thread",
"reactor_thread",
"=",
"threading",
".",
"Thread",
"(",
"target",
"=",
"reactor",
".",
"run",
",",
"name",
"=",
"\"reactor\"",
")",
"reactor_thread",
".",
"daemon",
"=",
"True",
"reactor_thread",
".",
"start",
"(",
")",
"while",
"not",
"reactor",
".",
"running",
":",
"time",
".",
"sleep",
"(",
"0.01",
")",
"# Give the reactor a moment to register the signals. ",
"# Apparently the 'running' flag is set before that.",
"time",
".",
"sleep",
"(",
"0.01",
")",
"# Undo the monkey-paching",
"signal",
".",
"signal",
"=",
"orig_signal",
"signal",
".",
"set_wakeup_fd",
"=",
"orig_set_wakeup_fd",
"# Playback the recorded calls",
"for",
"func",
",",
"args",
",",
"kwargs",
"in",
"signal_registrations",
":",
"func",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")"
] | Starts the twisted reactor if it is not running already.
The reactor is started in a new daemon-thread.
Has to perform dirty hacks so that twisted can register
signals even if it is not running in the main-thread. | [
"Starts",
"the",
"twisted",
"reactor",
"if",
"it",
"is",
"not",
"running",
"already",
".",
"The",
"reactor",
"is",
"started",
"in",
"a",
"new",
"daemon",
"-",
"thread",
".",
"Has",
"to",
"perform",
"dirty",
"hacks",
"so",
"that",
"twisted",
"can",
"register",
"signals",
"even",
"if",
"it",
"is",
"not",
"running",
"in",
"the",
"main",
"-",
"thread",
"."
] | train | https://github.com/pydron/utwist/blob/31670bdd7630874e2d24e663dbfce8b863b1f02e/utwist/_utwist.py#L254-L308 |
pydron/utwist | utwist/_utwist.py | _timeoutDeferred | def _timeoutDeferred(deferred, timeout):
"""
Cancels the given deferred after the given time, if it has not yet callbacked/errbacked it.
"""
delayedCall = reactor.callLater(timeout, deferred.cancel)
def gotResult(result):
if delayedCall.active():
delayedCall.cancel()
return result
deferred.addBoth(gotResult) | python | def _timeoutDeferred(deferred, timeout):
"""
Cancels the given deferred after the given time, if it has not yet callbacked/errbacked it.
"""
delayedCall = reactor.callLater(timeout, deferred.cancel)
def gotResult(result):
if delayedCall.active():
delayedCall.cancel()
return result
deferred.addBoth(gotResult) | [
"def",
"_timeoutDeferred",
"(",
"deferred",
",",
"timeout",
")",
":",
"delayedCall",
"=",
"reactor",
".",
"callLater",
"(",
"timeout",
",",
"deferred",
".",
"cancel",
")",
"def",
"gotResult",
"(",
"result",
")",
":",
"if",
"delayedCall",
".",
"active",
"(",
")",
":",
"delayedCall",
".",
"cancel",
"(",
")",
"return",
"result",
"deferred",
".",
"addBoth",
"(",
"gotResult",
")"
] | Cancels the given deferred after the given time, if it has not yet callbacked/errbacked it. | [
"Cancels",
"the",
"given",
"deferred",
"after",
"the",
"given",
"time",
"if",
"it",
"has",
"not",
"yet",
"callbacked",
"/",
"errbacked",
"it",
"."
] | train | https://github.com/pydron/utwist/blob/31670bdd7630874e2d24e663dbfce8b863b1f02e/utwist/_utwist.py#L311-L320 |
MSchnei/pyprf_feature | pyprf_feature/analysis/find_prf_cpu.py | find_prf_cpu | def find_prf_cpu(idxPrc, aryFuncChnk, aryPrfTc, aryMdlParams, strVersion,
lgcXval, varNumXval, queOut, lgcRstr=None, lgcPrint=True):
"""
Find best fitting pRF model for voxel time course, using the CPU.
Parameters
----------
idxPrc : int
Process ID of the process calling this function (for CPU
multi-threading). In GPU version, this parameter is 0 (just one thread
on CPU).
aryFunc : np.array
2D array with functional MRI data, with shape aryFunc[voxel, time].
aryPrfTc : np.array
Array with pRF model time courses, with shape
aryPrfTc[x-pos*y-pos*SD, number of feautures, number of volumes]
aryMdlParams : np.array
2D array with all pRF model parameter combinations.
strVersion : str
Which version to use for pRF finding; 'numpy' or 'cython'.
lgcXval: boolean
Logical to determine whether we cross-validate.
varNumXval: int
Number of folds for k-fold cross-validation.
queOut : multiprocessing.queues.Queue
Queue to put the results on.
lgcRstr : boolean numpy array or None, default None
Logical to restrict certain models to particular voxels.
lgcPrint : boolean
Whether print statements should be executed.
Returns
-------
lstOut : list
List containing the following objects:
idxPrc : int
Process ID of the process calling this function (for CPU
multi-threading). In GPU version, this parameter is 0.
vecBstXpos : np.array
1D array with best fitting x-position for each voxel, with shape
vecBstXpos[voxel].
vecBstYpos : np.array
1D array with best fitting y-position for each voxel, with shape
vecBstYpos[voxel].
vecBstSd : np.array
1D array with best fitting pRF size for each voxel, with shape
vecBstSd[voxel].
vecBstR2 : np.array
1D array with R2 value of 'winning' pRF model for each voxel, with
shape vecBstR2[voxel].
aryBstBts : np.array
2D array with beta parameter estimates of 'winning' pRF model for
each voxel, with shape aryBstBts[voxel, feautures].
Notes
-----
The list with results is not returned directly, but placed on a
multiprocessing queue. This version performs the model finding on the CPU,
using numpy or cython (depending on the value of `strVersion`).
"""
# Number of models in the visual space:
varNumMdls = aryPrfTc.shape[0]
# Number of feautures
varNumFtr = aryPrfTc.shape[1]
# Number of voxels to be fitted in this chunk:
varNumVoxChnk = aryFuncChnk.shape[0]
# Vectors for pRF finding results [number-of-voxels times one]:
# make sure they have the same precision as aryMdlParams, since this
# is important for later comparison
vecBstXpos = np.zeros(varNumVoxChnk, dtype=aryMdlParams.dtype)
vecBstYpos = np.zeros(varNumVoxChnk, dtype=aryMdlParams.dtype)
vecBstSd = np.zeros(varNumVoxChnk, dtype=aryMdlParams.dtype)
# Vector for best R-square value. For each model fit, the R-square value is
# compared to this, and updated if it is lower than the best-fitting
# solution so far. We initialise with an arbitrary, high value
vecBstRes = np.add(np.zeros(varNumVoxChnk), np.inf).astype(np.float32)
# array for best beta values. If we update the residual value above because
# it is lower, we also update the beta values of these voxels
aryBstBts = np.zeros((varNumVoxChnk, varNumFtr)).astype(np.float32)
# In case we cross-validate we also save and replace the best
# residual values for every fold (not only mean across folds):
if lgcXval:
aryBstResFlds = np.zeros((varNumVoxChnk, varNumXval), dtype=np.float32)
# We reshape the voxel time courses, so that time goes down the column,
# i.e. from top to bottom.
aryFuncChnk = aryFuncChnk.T
# Change type to float 32:
aryFuncChnk = aryFuncChnk.astype(np.float32)
aryPrfTc = aryPrfTc.astype(np.float32)
# if lgc for Xval is true we already prepare indices for xvalidation
if lgcXval:
# obtain iterator for cross-validation
itXval = KFold(n_splits=varNumXval)
vecSplts = np.arange(aryPrfTc.shape[-1], dtype=np.int32)
# prepare lists that will hold indices for xvalidation
lstIdxTrn = []
lstIdxtst = []
# Loop over the cross-validations to put indcies in array
for idxTrn, idxTst in itXval.split(vecSplts):
lstIdxTrn.append(idxTrn)
lstIdxtst.append(idxTst)
# trun lists into array
aryIdxTrn = np.stack(lstIdxTrn, axis=-1).astype(np.int32)
aryIdxTst = np.stack(lstIdxtst, axis=-1).astype(np.int32)
# Prepare status indicator if this is the first of the parallel processes:
if idxPrc == 0:
# We create a status indicator for the time consuming pRF model finding
# algorithm. Number of steps of the status indicator:
varStsStpSze = 20
# Vector with pRF values at which to give status feedback:
vecStatPrf = np.linspace(0,
varNumMdls,
num=(varStsStpSze+1),
endpoint=True)
vecStatPrf = np.ceil(vecStatPrf)
vecStatPrf = vecStatPrf.astype(int)
# Vector with corresponding percentage values at which to give status
# feedback:
vecStatPrc = np.linspace(0,
100,
num=(varStsStpSze+1),
endpoint=True)
vecStatPrc = np.ceil(vecStatPrc)
vecStatPrc = vecStatPrc.astype(int)
# Counter for status indicator:
varCntSts01 = 0
varCntSts02 = 0
# If user does not restrict model space for particular voxels, select
# all voxels
if lgcRstr is None:
lgcVxl = np.arange(varNumVoxChnk, dtype=np.int32)
# There can be pRF model time courses with a variance of zero (i.e. pRF
# models that are not actually responsive to the stimuli). For time
# efficiency, and in order to avoid division by zero, we ignore these
# model time courses.
aryPrfTcVar = np.var(aryPrfTc, axis=-1)
# Zero with float32 precision for comparison:
varZero32 = np.array(([0.0])).astype(np.float32)[0]
# Loop through pRF models:
for idxMdl in range(0, varNumMdls):
# If desired by user, restrict the model fitting such that certain
# models are restricted to particular voxels
if lgcRstr is not None:
# Apply flatnonzero, so we can use cascaded integer indexing later
lgcVxl = np.flatnonzero(lgcRstr[:, idxMdl])
# Status indicator (only used in the first of the parallel
# processes):
if idxPrc == 0:
# Status indicator:
if varCntSts02 == vecStatPrf[varCntSts01]:
# Prepare status message:
strStsMsg = ('------------Progress: ' +
str(vecStatPrc[varCntSts01]) +
' % --- ' +
str(vecStatPrf[varCntSts01]) +
' pRF models out of ' +
str(varNumMdls))
if lgcPrint:
print(strStsMsg)
# Only increment counter if the last value has not been
# reached yet:
if varCntSts01 < varStsStpSze:
varCntSts01 = varCntSts01 + int(1)
# Only fit pRF model if:
# 1) all feature predictors have a variance greater than zero AND
# 2) at least one voxel is being tested
if np.logical_and(np.all(np.greater(aryPrfTcVar[idxMdl], varZero32),
axis=0),
np.greater(lgcVxl.size, 0)):
# Get predictor time courses for this specific model
vecMdl = aryPrfTc[idxMdl, :, :].T
# Check whether we need to crossvalidate
if lgcXval:
# We do crossvalidate. In this case, we loop through
# the different folds of the crossvalidation and
# calculate the cross-validation error for the current
# model for all voxel time courses.
# Cython version:
if strVersion == 'cython':
# A cython function is used to calculate the residuals and
# beta parameter estimates of the current model:
if varNumFtr == 1:
# For time course with one predictors
aryResXval = cy_lst_sq_xval_one(np.squeeze(vecMdl),
aryFuncChnk[:, lgcVxl],
aryIdxTrn,
aryIdxTst)
elif varNumFtr == 2:
# For time course with two predictors
aryResXval = cy_lst_sq_xval_two(vecMdl,
aryFuncChnk[:, lgcVxl],
aryIdxTrn,
aryIdxTst)
else:
if lgcPrint:
print('Cython currently not implemented for ' +
'more than two predictors.')
# Numpy version:
elif strVersion == 'numpy':
aryResXval = np_lst_sq_xval(vecMdl, aryFuncChnk[:, lgcVxl],
aryIdxTrn, aryIdxTst)
# calculate the average cross validation error across
# all folds
vecTmpRes = np.mean(aryResXval, axis=1)
else:
# We do not crossvalidate. In this case, we calculate
# the ratio of the explained variance (R squared)
# for the current model for all voxel time courses.
# Cython version:
if strVersion == 'cython':
# A cython function is used to calculate the residuals and
# beta parameter estimates of the current model:
if varNumFtr == 1:
# For time course with one predictor
aryTmpBts, vecTmpRes = cy_lst_sq_one(
np.squeeze(vecMdl), aryFuncChnk[:, lgcVxl])
elif varNumFtr == 2:
# For time course with two predictors
aryTmpBts, vecTmpRes = \
cy_lst_sq_two(vecMdl, aryFuncChnk[:, lgcVxl])
else:
if lgcPrint:
print('Cython currently not implemented for ' +
'more than two two predictors.')
# Numpy version:
elif strVersion == 'numpy':
# Numpy linalg.lstsq is used to calculate the
# beta values and residuals of the current model:
aryTmpBts, vecTmpRes = np_lst_sq(vecMdl,
aryFuncChnk[:, lgcVxl])
# Check whether current crossvalidation error (xval=True)
# or residuals (xval=False) are lower than previously
# calculated ones:
vecLgcTmpRes = np.less(vecTmpRes, vecBstRes[lgcVxl])
# Apply np.flatnonzero for cascaded integer-indexing
vecLgcTmpRes = np.flatnonzero(vecLgcTmpRes)
# Replace best x and y position values, and SD values:
vecBstXpos[lgcVxl[vecLgcTmpRes]] = aryMdlParams[idxMdl, 0]
vecBstYpos[lgcVxl[vecLgcTmpRes]] = aryMdlParams[idxMdl, 1]
vecBstSd[lgcVxl[vecLgcTmpRes]] = aryMdlParams[idxMdl, 2]
# Replace best mean residual values:
vecBstRes[lgcVxl[vecLgcTmpRes]] = vecTmpRes[vecLgcTmpRes]
if not lgcXval:
# Replace best beta values:
aryBstBts[lgcVxl[vecLgcTmpRes], :] = \
aryTmpBts[:, vecLgcTmpRes].T
# In case we cross-validate we also save and replace the best
# residual values for every fold (not only mean across folds):
if lgcXval:
aryBstResFlds[lgcVxl[vecLgcTmpRes], :] = \
aryResXval[vecLgcTmpRes, :]
# Status indicator (only used in the first of the parallel
# processes):
if idxPrc == 0:
# Increment status indicator counter:
varCntSts02 = varCntSts02 + 1
# After finding the best fitting model for each voxel, we still have to
# calculate the average correlation coefficient between predicted and
# measured time course (xval=True) or the coefficient of determination
# (xval=False) for each voxel.
if lgcXval:
# create vector that allows to check whether every voxel is visited
# exactly once
vecVxlTst = np.zeros(aryFuncChnk.shape[1])
# Since we did not do this during finding the best model, we still need
# to calculate deviation from a mean model for every voxel and fold
# arySsTotXval as well as calculate the best betas for the full model
# concatenate vectors with best x, y, sigma params
aryBstPrm = np.stack((vecBstXpos, vecBstYpos, vecBstSd), axis=1)
# Find unique rows
aryUnqRows = fnd_unq_rws(aryBstPrm, return_index=False,
return_inverse=False)
# Calculate deviation from a mean model for every voxel and fold
arySsTotXval = np.zeros((aryBstResFlds.shape),
dtype=aryBstResFlds.dtype)
# Loop over all best-fitting model parameter combinations found
for vecPrm in aryUnqRows:
# Get logical for voxels for which this prm combi was the best
lgcPrm = np.isclose(aryBstPrm, vecPrm, atol=1e-04).all(axis=1)
# Get logical index for the model number
# This can only be 1 index, so we directly get 1st entry of array
lgcIndMdl = np.where(np.isclose(aryMdlParams, vecPrm,
atol=1e-04).all(axis=1))[0][0]
if np.all(np.invert(lgcPrm)):
if lgcPrint:
print('------------No voxel found, process ' + str(idxPrc))
# Mark those voxels that were visited
vecVxlTst[lgcPrm] += 1
# Get voxel time course
aryVxlTc = aryFuncChnk[:, lgcPrm]
# Get model time courses
aryMdlTc = aryPrfTc[lgcIndMdl, :, :].T
# Calculate beta parameter estimates for entire model
aryBstBts[lgcPrm, :] = np.linalg.lstsq(aryMdlTc,
aryVxlTc,
rcond=-1)[0].T
# loop over cross-validation folds
for idxXval in range(varNumXval):
# Get functional data for tst:
aryFuncChnkTst = aryVxlTc[
aryIdxTst[:, idxXval], :]
# Deviation from the mean for each datapoint:
aryFuncDev = np.subtract(aryFuncChnkTst,
np.mean(aryFuncChnkTst,
axis=0)[None, :])
# Sum of squares:
vecSsTot = np.sum(np.power(aryFuncDev,
2.0),
axis=0)
arySsTotXval[lgcPrm, idxXval] = vecSsTot
# check that every voxel was visited exactly once
errMsg = 'At least one voxel visited more than once for SStot calc'
assert len(vecVxlTst) == np.sum(vecVxlTst), errMsg
# Calculate coefficient of determination by comparing:
# aryBstResFlds vs. arySsTotXval
# get logical to check that arySsTotXval is greater than zero in all
# voxels and folds
lgcExclZeros = np.all(np.greater(arySsTotXval, np.array([0.0])),
axis=1)
if lgcPrint:
print('------------Nr of voxels: ' + str(len(lgcExclZeros)))
print('------------Nr of voxels avove 0: ' +
str(np.sum(lgcExclZeros)))
# Calculate R2 for every crossvalidation fold seperately
aryBstR2fld = np.subtract(
1.0, np.divide(aryBstResFlds,
arySsTotXval))
# Calculate mean R2 across folds here
vecBstR2 = np.subtract(
1.0, np.mean(np.divide(aryBstResFlds,
arySsTotXval),
axis=1))
# Output list:
lstOut = [idxPrc,
vecBstXpos,
vecBstYpos,
vecBstSd,
vecBstR2,
aryBstBts,
aryBstR2fld]
queOut.put(lstOut)
else:
# To calculate the coefficient of determination, we start with the
# total sum of squares (i.e. the deviation of the data from the mean).
# The mean of each time course:
vecFuncMean = np.mean(aryFuncChnk, axis=0)
# Deviation from the mean for each datapoint:
aryFuncDev = np.subtract(aryFuncChnk, vecFuncMean[None, :])
# Sum of squares:
vecSsTot = np.sum(np.power(aryFuncDev,
2.0),
axis=0)
# Coefficient of determination:
vecBstR2 = np.subtract(1.0,
np.divide(vecBstRes,
vecSsTot))
# Output list:
lstOut = [idxPrc,
vecBstXpos,
vecBstYpos,
vecBstSd,
vecBstR2,
aryBstBts]
queOut.put(lstOut) | python | def find_prf_cpu(idxPrc, aryFuncChnk, aryPrfTc, aryMdlParams, strVersion,
lgcXval, varNumXval, queOut, lgcRstr=None, lgcPrint=True):
"""
Find best fitting pRF model for voxel time course, using the CPU.
Parameters
----------
idxPrc : int
Process ID of the process calling this function (for CPU
multi-threading). In GPU version, this parameter is 0 (just one thread
on CPU).
aryFunc : np.array
2D array with functional MRI data, with shape aryFunc[voxel, time].
aryPrfTc : np.array
Array with pRF model time courses, with shape
aryPrfTc[x-pos*y-pos*SD, number of feautures, number of volumes]
aryMdlParams : np.array
2D array with all pRF model parameter combinations.
strVersion : str
Which version to use for pRF finding; 'numpy' or 'cython'.
lgcXval: boolean
Logical to determine whether we cross-validate.
varNumXval: int
Number of folds for k-fold cross-validation.
queOut : multiprocessing.queues.Queue
Queue to put the results on.
lgcRstr : boolean numpy array or None, default None
Logical to restrict certain models to particular voxels.
lgcPrint : boolean
Whether print statements should be executed.
Returns
-------
lstOut : list
List containing the following objects:
idxPrc : int
Process ID of the process calling this function (for CPU
multi-threading). In GPU version, this parameter is 0.
vecBstXpos : np.array
1D array with best fitting x-position for each voxel, with shape
vecBstXpos[voxel].
vecBstYpos : np.array
1D array with best fitting y-position for each voxel, with shape
vecBstYpos[voxel].
vecBstSd : np.array
1D array with best fitting pRF size for each voxel, with shape
vecBstSd[voxel].
vecBstR2 : np.array
1D array with R2 value of 'winning' pRF model for each voxel, with
shape vecBstR2[voxel].
aryBstBts : np.array
2D array with beta parameter estimates of 'winning' pRF model for
each voxel, with shape aryBstBts[voxel, feautures].
Notes
-----
The list with results is not returned directly, but placed on a
multiprocessing queue. This version performs the model finding on the CPU,
using numpy or cython (depending on the value of `strVersion`).
"""
# Number of models in the visual space:
varNumMdls = aryPrfTc.shape[0]
# Number of feautures
varNumFtr = aryPrfTc.shape[1]
# Number of voxels to be fitted in this chunk:
varNumVoxChnk = aryFuncChnk.shape[0]
# Vectors for pRF finding results [number-of-voxels times one]:
# make sure they have the same precision as aryMdlParams, since this
# is important for later comparison
vecBstXpos = np.zeros(varNumVoxChnk, dtype=aryMdlParams.dtype)
vecBstYpos = np.zeros(varNumVoxChnk, dtype=aryMdlParams.dtype)
vecBstSd = np.zeros(varNumVoxChnk, dtype=aryMdlParams.dtype)
# Vector for best R-square value. For each model fit, the R-square value is
# compared to this, and updated if it is lower than the best-fitting
# solution so far. We initialise with an arbitrary, high value
vecBstRes = np.add(np.zeros(varNumVoxChnk), np.inf).astype(np.float32)
# array for best beta values. If we update the residual value above because
# it is lower, we also update the beta values of these voxels
aryBstBts = np.zeros((varNumVoxChnk, varNumFtr)).astype(np.float32)
# In case we cross-validate we also save and replace the best
# residual values for every fold (not only mean across folds):
if lgcXval:
aryBstResFlds = np.zeros((varNumVoxChnk, varNumXval), dtype=np.float32)
# We reshape the voxel time courses, so that time goes down the column,
# i.e. from top to bottom.
aryFuncChnk = aryFuncChnk.T
# Change type to float 32:
aryFuncChnk = aryFuncChnk.astype(np.float32)
aryPrfTc = aryPrfTc.astype(np.float32)
# if lgc for Xval is true we already prepare indices for xvalidation
if lgcXval:
# obtain iterator for cross-validation
itXval = KFold(n_splits=varNumXval)
vecSplts = np.arange(aryPrfTc.shape[-1], dtype=np.int32)
# prepare lists that will hold indices for xvalidation
lstIdxTrn = []
lstIdxtst = []
# Loop over the cross-validations to put indcies in array
for idxTrn, idxTst in itXval.split(vecSplts):
lstIdxTrn.append(idxTrn)
lstIdxtst.append(idxTst)
# trun lists into array
aryIdxTrn = np.stack(lstIdxTrn, axis=-1).astype(np.int32)
aryIdxTst = np.stack(lstIdxtst, axis=-1).astype(np.int32)
# Prepare status indicator if this is the first of the parallel processes:
if idxPrc == 0:
# We create a status indicator for the time consuming pRF model finding
# algorithm. Number of steps of the status indicator:
varStsStpSze = 20
# Vector with pRF values at which to give status feedback:
vecStatPrf = np.linspace(0,
varNumMdls,
num=(varStsStpSze+1),
endpoint=True)
vecStatPrf = np.ceil(vecStatPrf)
vecStatPrf = vecStatPrf.astype(int)
# Vector with corresponding percentage values at which to give status
# feedback:
vecStatPrc = np.linspace(0,
100,
num=(varStsStpSze+1),
endpoint=True)
vecStatPrc = np.ceil(vecStatPrc)
vecStatPrc = vecStatPrc.astype(int)
# Counter for status indicator:
varCntSts01 = 0
varCntSts02 = 0
# If user does not restrict model space for particular voxels, select
# all voxels
if lgcRstr is None:
lgcVxl = np.arange(varNumVoxChnk, dtype=np.int32)
# There can be pRF model time courses with a variance of zero (i.e. pRF
# models that are not actually responsive to the stimuli). For time
# efficiency, and in order to avoid division by zero, we ignore these
# model time courses.
aryPrfTcVar = np.var(aryPrfTc, axis=-1)
# Zero with float32 precision for comparison:
varZero32 = np.array(([0.0])).astype(np.float32)[0]
# Loop through pRF models:
for idxMdl in range(0, varNumMdls):
# If desired by user, restrict the model fitting such that certain
# models are restricted to particular voxels
if lgcRstr is not None:
# Apply flatnonzero, so we can use cascaded integer indexing later
lgcVxl = np.flatnonzero(lgcRstr[:, idxMdl])
# Status indicator (only used in the first of the parallel
# processes):
if idxPrc == 0:
# Status indicator:
if varCntSts02 == vecStatPrf[varCntSts01]:
# Prepare status message:
strStsMsg = ('------------Progress: ' +
str(vecStatPrc[varCntSts01]) +
' % --- ' +
str(vecStatPrf[varCntSts01]) +
' pRF models out of ' +
str(varNumMdls))
if lgcPrint:
print(strStsMsg)
# Only increment counter if the last value has not been
# reached yet:
if varCntSts01 < varStsStpSze:
varCntSts01 = varCntSts01 + int(1)
# Only fit pRF model if:
# 1) all feature predictors have a variance greater than zero AND
# 2) at least one voxel is being tested
if np.logical_and(np.all(np.greater(aryPrfTcVar[idxMdl], varZero32),
axis=0),
np.greater(lgcVxl.size, 0)):
# Get predictor time courses for this specific model
vecMdl = aryPrfTc[idxMdl, :, :].T
# Check whether we need to crossvalidate
if lgcXval:
# We do crossvalidate. In this case, we loop through
# the different folds of the crossvalidation and
# calculate the cross-validation error for the current
# model for all voxel time courses.
# Cython version:
if strVersion == 'cython':
# A cython function is used to calculate the residuals and
# beta parameter estimates of the current model:
if varNumFtr == 1:
# For time course with one predictors
aryResXval = cy_lst_sq_xval_one(np.squeeze(vecMdl),
aryFuncChnk[:, lgcVxl],
aryIdxTrn,
aryIdxTst)
elif varNumFtr == 2:
# For time course with two predictors
aryResXval = cy_lst_sq_xval_two(vecMdl,
aryFuncChnk[:, lgcVxl],
aryIdxTrn,
aryIdxTst)
else:
if lgcPrint:
print('Cython currently not implemented for ' +
'more than two predictors.')
# Numpy version:
elif strVersion == 'numpy':
aryResXval = np_lst_sq_xval(vecMdl, aryFuncChnk[:, lgcVxl],
aryIdxTrn, aryIdxTst)
# calculate the average cross validation error across
# all folds
vecTmpRes = np.mean(aryResXval, axis=1)
else:
# We do not crossvalidate. In this case, we calculate
# the ratio of the explained variance (R squared)
# for the current model for all voxel time courses.
# Cython version:
if strVersion == 'cython':
# A cython function is used to calculate the residuals and
# beta parameter estimates of the current model:
if varNumFtr == 1:
# For time course with one predictor
aryTmpBts, vecTmpRes = cy_lst_sq_one(
np.squeeze(vecMdl), aryFuncChnk[:, lgcVxl])
elif varNumFtr == 2:
# For time course with two predictors
aryTmpBts, vecTmpRes = \
cy_lst_sq_two(vecMdl, aryFuncChnk[:, lgcVxl])
else:
if lgcPrint:
print('Cython currently not implemented for ' +
'more than two two predictors.')
# Numpy version:
elif strVersion == 'numpy':
# Numpy linalg.lstsq is used to calculate the
# beta values and residuals of the current model:
aryTmpBts, vecTmpRes = np_lst_sq(vecMdl,
aryFuncChnk[:, lgcVxl])
# Check whether current crossvalidation error (xval=True)
# or residuals (xval=False) are lower than previously
# calculated ones:
vecLgcTmpRes = np.less(vecTmpRes, vecBstRes[lgcVxl])
# Apply np.flatnonzero for cascaded integer-indexing
vecLgcTmpRes = np.flatnonzero(vecLgcTmpRes)
# Replace best x and y position values, and SD values:
vecBstXpos[lgcVxl[vecLgcTmpRes]] = aryMdlParams[idxMdl, 0]
vecBstYpos[lgcVxl[vecLgcTmpRes]] = aryMdlParams[idxMdl, 1]
vecBstSd[lgcVxl[vecLgcTmpRes]] = aryMdlParams[idxMdl, 2]
# Replace best mean residual values:
vecBstRes[lgcVxl[vecLgcTmpRes]] = vecTmpRes[vecLgcTmpRes]
if not lgcXval:
# Replace best beta values:
aryBstBts[lgcVxl[vecLgcTmpRes], :] = \
aryTmpBts[:, vecLgcTmpRes].T
# In case we cross-validate we also save and replace the best
# residual values for every fold (not only mean across folds):
if lgcXval:
aryBstResFlds[lgcVxl[vecLgcTmpRes], :] = \
aryResXval[vecLgcTmpRes, :]
# Status indicator (only used in the first of the parallel
# processes):
if idxPrc == 0:
# Increment status indicator counter:
varCntSts02 = varCntSts02 + 1
# After finding the best fitting model for each voxel, we still have to
# calculate the average correlation coefficient between predicted and
# measured time course (xval=True) or the coefficient of determination
# (xval=False) for each voxel.
if lgcXval:
# create vector that allows to check whether every voxel is visited
# exactly once
vecVxlTst = np.zeros(aryFuncChnk.shape[1])
# Since we did not do this during finding the best model, we still need
# to calculate deviation from a mean model for every voxel and fold
# arySsTotXval as well as calculate the best betas for the full model
# concatenate vectors with best x, y, sigma params
aryBstPrm = np.stack((vecBstXpos, vecBstYpos, vecBstSd), axis=1)
# Find unique rows
aryUnqRows = fnd_unq_rws(aryBstPrm, return_index=False,
return_inverse=False)
# Calculate deviation from a mean model for every voxel and fold
arySsTotXval = np.zeros((aryBstResFlds.shape),
dtype=aryBstResFlds.dtype)
# Loop over all best-fitting model parameter combinations found
for vecPrm in aryUnqRows:
# Get logical for voxels for which this prm combi was the best
lgcPrm = np.isclose(aryBstPrm, vecPrm, atol=1e-04).all(axis=1)
# Get logical index for the model number
# This can only be 1 index, so we directly get 1st entry of array
lgcIndMdl = np.where(np.isclose(aryMdlParams, vecPrm,
atol=1e-04).all(axis=1))[0][0]
if np.all(np.invert(lgcPrm)):
if lgcPrint:
print('------------No voxel found, process ' + str(idxPrc))
# Mark those voxels that were visited
vecVxlTst[lgcPrm] += 1
# Get voxel time course
aryVxlTc = aryFuncChnk[:, lgcPrm]
# Get model time courses
aryMdlTc = aryPrfTc[lgcIndMdl, :, :].T
# Calculate beta parameter estimates for entire model
aryBstBts[lgcPrm, :] = np.linalg.lstsq(aryMdlTc,
aryVxlTc,
rcond=-1)[0].T
# loop over cross-validation folds
for idxXval in range(varNumXval):
# Get functional data for tst:
aryFuncChnkTst = aryVxlTc[
aryIdxTst[:, idxXval], :]
# Deviation from the mean for each datapoint:
aryFuncDev = np.subtract(aryFuncChnkTst,
np.mean(aryFuncChnkTst,
axis=0)[None, :])
# Sum of squares:
vecSsTot = np.sum(np.power(aryFuncDev,
2.0),
axis=0)
arySsTotXval[lgcPrm, idxXval] = vecSsTot
# check that every voxel was visited exactly once
errMsg = 'At least one voxel visited more than once for SStot calc'
assert len(vecVxlTst) == np.sum(vecVxlTst), errMsg
# Calculate coefficient of determination by comparing:
# aryBstResFlds vs. arySsTotXval
# get logical to check that arySsTotXval is greater than zero in all
# voxels and folds
lgcExclZeros = np.all(np.greater(arySsTotXval, np.array([0.0])),
axis=1)
if lgcPrint:
print('------------Nr of voxels: ' + str(len(lgcExclZeros)))
print('------------Nr of voxels avove 0: ' +
str(np.sum(lgcExclZeros)))
# Calculate R2 for every crossvalidation fold seperately
aryBstR2fld = np.subtract(
1.0, np.divide(aryBstResFlds,
arySsTotXval))
# Calculate mean R2 across folds here
vecBstR2 = np.subtract(
1.0, np.mean(np.divide(aryBstResFlds,
arySsTotXval),
axis=1))
# Output list:
lstOut = [idxPrc,
vecBstXpos,
vecBstYpos,
vecBstSd,
vecBstR2,
aryBstBts,
aryBstR2fld]
queOut.put(lstOut)
else:
# To calculate the coefficient of determination, we start with the
# total sum of squares (i.e. the deviation of the data from the mean).
# The mean of each time course:
vecFuncMean = np.mean(aryFuncChnk, axis=0)
# Deviation from the mean for each datapoint:
aryFuncDev = np.subtract(aryFuncChnk, vecFuncMean[None, :])
# Sum of squares:
vecSsTot = np.sum(np.power(aryFuncDev,
2.0),
axis=0)
# Coefficient of determination:
vecBstR2 = np.subtract(1.0,
np.divide(vecBstRes,
vecSsTot))
# Output list:
lstOut = [idxPrc,
vecBstXpos,
vecBstYpos,
vecBstSd,
vecBstR2,
aryBstBts]
queOut.put(lstOut) | [
"def",
"find_prf_cpu",
"(",
"idxPrc",
",",
"aryFuncChnk",
",",
"aryPrfTc",
",",
"aryMdlParams",
",",
"strVersion",
",",
"lgcXval",
",",
"varNumXval",
",",
"queOut",
",",
"lgcRstr",
"=",
"None",
",",
"lgcPrint",
"=",
"True",
")",
":",
"# Number of models in the visual space:",
"varNumMdls",
"=",
"aryPrfTc",
".",
"shape",
"[",
"0",
"]",
"# Number of feautures",
"varNumFtr",
"=",
"aryPrfTc",
".",
"shape",
"[",
"1",
"]",
"# Number of voxels to be fitted in this chunk:",
"varNumVoxChnk",
"=",
"aryFuncChnk",
".",
"shape",
"[",
"0",
"]",
"# Vectors for pRF finding results [number-of-voxels times one]:",
"# make sure they have the same precision as aryMdlParams, since this",
"# is important for later comparison",
"vecBstXpos",
"=",
"np",
".",
"zeros",
"(",
"varNumVoxChnk",
",",
"dtype",
"=",
"aryMdlParams",
".",
"dtype",
")",
"vecBstYpos",
"=",
"np",
".",
"zeros",
"(",
"varNumVoxChnk",
",",
"dtype",
"=",
"aryMdlParams",
".",
"dtype",
")",
"vecBstSd",
"=",
"np",
".",
"zeros",
"(",
"varNumVoxChnk",
",",
"dtype",
"=",
"aryMdlParams",
".",
"dtype",
")",
"# Vector for best R-square value. For each model fit, the R-square value is",
"# compared to this, and updated if it is lower than the best-fitting",
"# solution so far. We initialise with an arbitrary, high value",
"vecBstRes",
"=",
"np",
".",
"add",
"(",
"np",
".",
"zeros",
"(",
"varNumVoxChnk",
")",
",",
"np",
".",
"inf",
")",
".",
"astype",
"(",
"np",
".",
"float32",
")",
"# array for best beta values. If we update the residual value above because",
"# it is lower, we also update the beta values of these voxels",
"aryBstBts",
"=",
"np",
".",
"zeros",
"(",
"(",
"varNumVoxChnk",
",",
"varNumFtr",
")",
")",
".",
"astype",
"(",
"np",
".",
"float32",
")",
"# In case we cross-validate we also save and replace the best",
"# residual values for every fold (not only mean across folds):",
"if",
"lgcXval",
":",
"aryBstResFlds",
"=",
"np",
".",
"zeros",
"(",
"(",
"varNumVoxChnk",
",",
"varNumXval",
")",
",",
"dtype",
"=",
"np",
".",
"float32",
")",
"# We reshape the voxel time courses, so that time goes down the column,",
"# i.e. from top to bottom.",
"aryFuncChnk",
"=",
"aryFuncChnk",
".",
"T",
"# Change type to float 32:",
"aryFuncChnk",
"=",
"aryFuncChnk",
".",
"astype",
"(",
"np",
".",
"float32",
")",
"aryPrfTc",
"=",
"aryPrfTc",
".",
"astype",
"(",
"np",
".",
"float32",
")",
"# if lgc for Xval is true we already prepare indices for xvalidation",
"if",
"lgcXval",
":",
"# obtain iterator for cross-validation",
"itXval",
"=",
"KFold",
"(",
"n_splits",
"=",
"varNumXval",
")",
"vecSplts",
"=",
"np",
".",
"arange",
"(",
"aryPrfTc",
".",
"shape",
"[",
"-",
"1",
"]",
",",
"dtype",
"=",
"np",
".",
"int32",
")",
"# prepare lists that will hold indices for xvalidation",
"lstIdxTrn",
"=",
"[",
"]",
"lstIdxtst",
"=",
"[",
"]",
"# Loop over the cross-validations to put indcies in array",
"for",
"idxTrn",
",",
"idxTst",
"in",
"itXval",
".",
"split",
"(",
"vecSplts",
")",
":",
"lstIdxTrn",
".",
"append",
"(",
"idxTrn",
")",
"lstIdxtst",
".",
"append",
"(",
"idxTst",
")",
"# trun lists into array",
"aryIdxTrn",
"=",
"np",
".",
"stack",
"(",
"lstIdxTrn",
",",
"axis",
"=",
"-",
"1",
")",
".",
"astype",
"(",
"np",
".",
"int32",
")",
"aryIdxTst",
"=",
"np",
".",
"stack",
"(",
"lstIdxtst",
",",
"axis",
"=",
"-",
"1",
")",
".",
"astype",
"(",
"np",
".",
"int32",
")",
"# Prepare status indicator if this is the first of the parallel processes:",
"if",
"idxPrc",
"==",
"0",
":",
"# We create a status indicator for the time consuming pRF model finding",
"# algorithm. Number of steps of the status indicator:",
"varStsStpSze",
"=",
"20",
"# Vector with pRF values at which to give status feedback:",
"vecStatPrf",
"=",
"np",
".",
"linspace",
"(",
"0",
",",
"varNumMdls",
",",
"num",
"=",
"(",
"varStsStpSze",
"+",
"1",
")",
",",
"endpoint",
"=",
"True",
")",
"vecStatPrf",
"=",
"np",
".",
"ceil",
"(",
"vecStatPrf",
")",
"vecStatPrf",
"=",
"vecStatPrf",
".",
"astype",
"(",
"int",
")",
"# Vector with corresponding percentage values at which to give status",
"# feedback:",
"vecStatPrc",
"=",
"np",
".",
"linspace",
"(",
"0",
",",
"100",
",",
"num",
"=",
"(",
"varStsStpSze",
"+",
"1",
")",
",",
"endpoint",
"=",
"True",
")",
"vecStatPrc",
"=",
"np",
".",
"ceil",
"(",
"vecStatPrc",
")",
"vecStatPrc",
"=",
"vecStatPrc",
".",
"astype",
"(",
"int",
")",
"# Counter for status indicator:",
"varCntSts01",
"=",
"0",
"varCntSts02",
"=",
"0",
"# If user does not restrict model space for particular voxels, select",
"# all voxels",
"if",
"lgcRstr",
"is",
"None",
":",
"lgcVxl",
"=",
"np",
".",
"arange",
"(",
"varNumVoxChnk",
",",
"dtype",
"=",
"np",
".",
"int32",
")",
"# There can be pRF model time courses with a variance of zero (i.e. pRF",
"# models that are not actually responsive to the stimuli). For time",
"# efficiency, and in order to avoid division by zero, we ignore these",
"# model time courses.",
"aryPrfTcVar",
"=",
"np",
".",
"var",
"(",
"aryPrfTc",
",",
"axis",
"=",
"-",
"1",
")",
"# Zero with float32 precision for comparison:",
"varZero32",
"=",
"np",
".",
"array",
"(",
"(",
"[",
"0.0",
"]",
")",
")",
".",
"astype",
"(",
"np",
".",
"float32",
")",
"[",
"0",
"]",
"# Loop through pRF models:",
"for",
"idxMdl",
"in",
"range",
"(",
"0",
",",
"varNumMdls",
")",
":",
"# If desired by user, restrict the model fitting such that certain",
"# models are restricted to particular voxels",
"if",
"lgcRstr",
"is",
"not",
"None",
":",
"# Apply flatnonzero, so we can use cascaded integer indexing later",
"lgcVxl",
"=",
"np",
".",
"flatnonzero",
"(",
"lgcRstr",
"[",
":",
",",
"idxMdl",
"]",
")",
"# Status indicator (only used in the first of the parallel",
"# processes):",
"if",
"idxPrc",
"==",
"0",
":",
"# Status indicator:",
"if",
"varCntSts02",
"==",
"vecStatPrf",
"[",
"varCntSts01",
"]",
":",
"# Prepare status message:",
"strStsMsg",
"=",
"(",
"'------------Progress: '",
"+",
"str",
"(",
"vecStatPrc",
"[",
"varCntSts01",
"]",
")",
"+",
"' % --- '",
"+",
"str",
"(",
"vecStatPrf",
"[",
"varCntSts01",
"]",
")",
"+",
"' pRF models out of '",
"+",
"str",
"(",
"varNumMdls",
")",
")",
"if",
"lgcPrint",
":",
"print",
"(",
"strStsMsg",
")",
"# Only increment counter if the last value has not been",
"# reached yet:",
"if",
"varCntSts01",
"<",
"varStsStpSze",
":",
"varCntSts01",
"=",
"varCntSts01",
"+",
"int",
"(",
"1",
")",
"# Only fit pRF model if:",
"# 1) all feature predictors have a variance greater than zero AND",
"# 2) at least one voxel is being tested",
"if",
"np",
".",
"logical_and",
"(",
"np",
".",
"all",
"(",
"np",
".",
"greater",
"(",
"aryPrfTcVar",
"[",
"idxMdl",
"]",
",",
"varZero32",
")",
",",
"axis",
"=",
"0",
")",
",",
"np",
".",
"greater",
"(",
"lgcVxl",
".",
"size",
",",
"0",
")",
")",
":",
"# Get predictor time courses for this specific model",
"vecMdl",
"=",
"aryPrfTc",
"[",
"idxMdl",
",",
":",
",",
":",
"]",
".",
"T",
"# Check whether we need to crossvalidate",
"if",
"lgcXval",
":",
"# We do crossvalidate. In this case, we loop through",
"# the different folds of the crossvalidation and",
"# calculate the cross-validation error for the current",
"# model for all voxel time courses.",
"# Cython version:",
"if",
"strVersion",
"==",
"'cython'",
":",
"# A cython function is used to calculate the residuals and",
"# beta parameter estimates of the current model:",
"if",
"varNumFtr",
"==",
"1",
":",
"# For time course with one predictors",
"aryResXval",
"=",
"cy_lst_sq_xval_one",
"(",
"np",
".",
"squeeze",
"(",
"vecMdl",
")",
",",
"aryFuncChnk",
"[",
":",
",",
"lgcVxl",
"]",
",",
"aryIdxTrn",
",",
"aryIdxTst",
")",
"elif",
"varNumFtr",
"==",
"2",
":",
"# For time course with two predictors",
"aryResXval",
"=",
"cy_lst_sq_xval_two",
"(",
"vecMdl",
",",
"aryFuncChnk",
"[",
":",
",",
"lgcVxl",
"]",
",",
"aryIdxTrn",
",",
"aryIdxTst",
")",
"else",
":",
"if",
"lgcPrint",
":",
"print",
"(",
"'Cython currently not implemented for '",
"+",
"'more than two predictors.'",
")",
"# Numpy version:",
"elif",
"strVersion",
"==",
"'numpy'",
":",
"aryResXval",
"=",
"np_lst_sq_xval",
"(",
"vecMdl",
",",
"aryFuncChnk",
"[",
":",
",",
"lgcVxl",
"]",
",",
"aryIdxTrn",
",",
"aryIdxTst",
")",
"# calculate the average cross validation error across",
"# all folds",
"vecTmpRes",
"=",
"np",
".",
"mean",
"(",
"aryResXval",
",",
"axis",
"=",
"1",
")",
"else",
":",
"# We do not crossvalidate. In this case, we calculate",
"# the ratio of the explained variance (R squared)",
"# for the current model for all voxel time courses.",
"# Cython version:",
"if",
"strVersion",
"==",
"'cython'",
":",
"# A cython function is used to calculate the residuals and",
"# beta parameter estimates of the current model:",
"if",
"varNumFtr",
"==",
"1",
":",
"# For time course with one predictor",
"aryTmpBts",
",",
"vecTmpRes",
"=",
"cy_lst_sq_one",
"(",
"np",
".",
"squeeze",
"(",
"vecMdl",
")",
",",
"aryFuncChnk",
"[",
":",
",",
"lgcVxl",
"]",
")",
"elif",
"varNumFtr",
"==",
"2",
":",
"# For time course with two predictors",
"aryTmpBts",
",",
"vecTmpRes",
"=",
"cy_lst_sq_two",
"(",
"vecMdl",
",",
"aryFuncChnk",
"[",
":",
",",
"lgcVxl",
"]",
")",
"else",
":",
"if",
"lgcPrint",
":",
"print",
"(",
"'Cython currently not implemented for '",
"+",
"'more than two two predictors.'",
")",
"# Numpy version:",
"elif",
"strVersion",
"==",
"'numpy'",
":",
"# Numpy linalg.lstsq is used to calculate the",
"# beta values and residuals of the current model:",
"aryTmpBts",
",",
"vecTmpRes",
"=",
"np_lst_sq",
"(",
"vecMdl",
",",
"aryFuncChnk",
"[",
":",
",",
"lgcVxl",
"]",
")",
"# Check whether current crossvalidation error (xval=True)",
"# or residuals (xval=False) are lower than previously",
"# calculated ones:",
"vecLgcTmpRes",
"=",
"np",
".",
"less",
"(",
"vecTmpRes",
",",
"vecBstRes",
"[",
"lgcVxl",
"]",
")",
"# Apply np.flatnonzero for cascaded integer-indexing",
"vecLgcTmpRes",
"=",
"np",
".",
"flatnonzero",
"(",
"vecLgcTmpRes",
")",
"# Replace best x and y position values, and SD values:",
"vecBstXpos",
"[",
"lgcVxl",
"[",
"vecLgcTmpRes",
"]",
"]",
"=",
"aryMdlParams",
"[",
"idxMdl",
",",
"0",
"]",
"vecBstYpos",
"[",
"lgcVxl",
"[",
"vecLgcTmpRes",
"]",
"]",
"=",
"aryMdlParams",
"[",
"idxMdl",
",",
"1",
"]",
"vecBstSd",
"[",
"lgcVxl",
"[",
"vecLgcTmpRes",
"]",
"]",
"=",
"aryMdlParams",
"[",
"idxMdl",
",",
"2",
"]",
"# Replace best mean residual values:",
"vecBstRes",
"[",
"lgcVxl",
"[",
"vecLgcTmpRes",
"]",
"]",
"=",
"vecTmpRes",
"[",
"vecLgcTmpRes",
"]",
"if",
"not",
"lgcXval",
":",
"# Replace best beta values:",
"aryBstBts",
"[",
"lgcVxl",
"[",
"vecLgcTmpRes",
"]",
",",
":",
"]",
"=",
"aryTmpBts",
"[",
":",
",",
"vecLgcTmpRes",
"]",
".",
"T",
"# In case we cross-validate we also save and replace the best",
"# residual values for every fold (not only mean across folds):",
"if",
"lgcXval",
":",
"aryBstResFlds",
"[",
"lgcVxl",
"[",
"vecLgcTmpRes",
"]",
",",
":",
"]",
"=",
"aryResXval",
"[",
"vecLgcTmpRes",
",",
":",
"]",
"# Status indicator (only used in the first of the parallel",
"# processes):",
"if",
"idxPrc",
"==",
"0",
":",
"# Increment status indicator counter:",
"varCntSts02",
"=",
"varCntSts02",
"+",
"1",
"# After finding the best fitting model for each voxel, we still have to",
"# calculate the average correlation coefficient between predicted and",
"# measured time course (xval=True) or the coefficient of determination",
"# (xval=False) for each voxel.",
"if",
"lgcXval",
":",
"# create vector that allows to check whether every voxel is visited",
"# exactly once",
"vecVxlTst",
"=",
"np",
".",
"zeros",
"(",
"aryFuncChnk",
".",
"shape",
"[",
"1",
"]",
")",
"# Since we did not do this during finding the best model, we still need",
"# to calculate deviation from a mean model for every voxel and fold",
"# arySsTotXval as well as calculate the best betas for the full model",
"# concatenate vectors with best x, y, sigma params",
"aryBstPrm",
"=",
"np",
".",
"stack",
"(",
"(",
"vecBstXpos",
",",
"vecBstYpos",
",",
"vecBstSd",
")",
",",
"axis",
"=",
"1",
")",
"# Find unique rows",
"aryUnqRows",
"=",
"fnd_unq_rws",
"(",
"aryBstPrm",
",",
"return_index",
"=",
"False",
",",
"return_inverse",
"=",
"False",
")",
"# Calculate deviation from a mean model for every voxel and fold",
"arySsTotXval",
"=",
"np",
".",
"zeros",
"(",
"(",
"aryBstResFlds",
".",
"shape",
")",
",",
"dtype",
"=",
"aryBstResFlds",
".",
"dtype",
")",
"# Loop over all best-fitting model parameter combinations found",
"for",
"vecPrm",
"in",
"aryUnqRows",
":",
"# Get logical for voxels for which this prm combi was the best",
"lgcPrm",
"=",
"np",
".",
"isclose",
"(",
"aryBstPrm",
",",
"vecPrm",
",",
"atol",
"=",
"1e-04",
")",
".",
"all",
"(",
"axis",
"=",
"1",
")",
"# Get logical index for the model number",
"# This can only be 1 index, so we directly get 1st entry of array",
"lgcIndMdl",
"=",
"np",
".",
"where",
"(",
"np",
".",
"isclose",
"(",
"aryMdlParams",
",",
"vecPrm",
",",
"atol",
"=",
"1e-04",
")",
".",
"all",
"(",
"axis",
"=",
"1",
")",
")",
"[",
"0",
"]",
"[",
"0",
"]",
"if",
"np",
".",
"all",
"(",
"np",
".",
"invert",
"(",
"lgcPrm",
")",
")",
":",
"if",
"lgcPrint",
":",
"print",
"(",
"'------------No voxel found, process '",
"+",
"str",
"(",
"idxPrc",
")",
")",
"# Mark those voxels that were visited",
"vecVxlTst",
"[",
"lgcPrm",
"]",
"+=",
"1",
"# Get voxel time course",
"aryVxlTc",
"=",
"aryFuncChnk",
"[",
":",
",",
"lgcPrm",
"]",
"# Get model time courses",
"aryMdlTc",
"=",
"aryPrfTc",
"[",
"lgcIndMdl",
",",
":",
",",
":",
"]",
".",
"T",
"# Calculate beta parameter estimates for entire model",
"aryBstBts",
"[",
"lgcPrm",
",",
":",
"]",
"=",
"np",
".",
"linalg",
".",
"lstsq",
"(",
"aryMdlTc",
",",
"aryVxlTc",
",",
"rcond",
"=",
"-",
"1",
")",
"[",
"0",
"]",
".",
"T",
"# loop over cross-validation folds",
"for",
"idxXval",
"in",
"range",
"(",
"varNumXval",
")",
":",
"# Get functional data for tst:",
"aryFuncChnkTst",
"=",
"aryVxlTc",
"[",
"aryIdxTst",
"[",
":",
",",
"idxXval",
"]",
",",
":",
"]",
"# Deviation from the mean for each datapoint:",
"aryFuncDev",
"=",
"np",
".",
"subtract",
"(",
"aryFuncChnkTst",
",",
"np",
".",
"mean",
"(",
"aryFuncChnkTst",
",",
"axis",
"=",
"0",
")",
"[",
"None",
",",
":",
"]",
")",
"# Sum of squares:",
"vecSsTot",
"=",
"np",
".",
"sum",
"(",
"np",
".",
"power",
"(",
"aryFuncDev",
",",
"2.0",
")",
",",
"axis",
"=",
"0",
")",
"arySsTotXval",
"[",
"lgcPrm",
",",
"idxXval",
"]",
"=",
"vecSsTot",
"# check that every voxel was visited exactly once",
"errMsg",
"=",
"'At least one voxel visited more than once for SStot calc'",
"assert",
"len",
"(",
"vecVxlTst",
")",
"==",
"np",
".",
"sum",
"(",
"vecVxlTst",
")",
",",
"errMsg",
"# Calculate coefficient of determination by comparing:",
"# aryBstResFlds vs. arySsTotXval",
"# get logical to check that arySsTotXval is greater than zero in all",
"# voxels and folds",
"lgcExclZeros",
"=",
"np",
".",
"all",
"(",
"np",
".",
"greater",
"(",
"arySsTotXval",
",",
"np",
".",
"array",
"(",
"[",
"0.0",
"]",
")",
")",
",",
"axis",
"=",
"1",
")",
"if",
"lgcPrint",
":",
"print",
"(",
"'------------Nr of voxels: '",
"+",
"str",
"(",
"len",
"(",
"lgcExclZeros",
")",
")",
")",
"print",
"(",
"'------------Nr of voxels avove 0: '",
"+",
"str",
"(",
"np",
".",
"sum",
"(",
"lgcExclZeros",
")",
")",
")",
"# Calculate R2 for every crossvalidation fold seperately",
"aryBstR2fld",
"=",
"np",
".",
"subtract",
"(",
"1.0",
",",
"np",
".",
"divide",
"(",
"aryBstResFlds",
",",
"arySsTotXval",
")",
")",
"# Calculate mean R2 across folds here",
"vecBstR2",
"=",
"np",
".",
"subtract",
"(",
"1.0",
",",
"np",
".",
"mean",
"(",
"np",
".",
"divide",
"(",
"aryBstResFlds",
",",
"arySsTotXval",
")",
",",
"axis",
"=",
"1",
")",
")",
"# Output list:",
"lstOut",
"=",
"[",
"idxPrc",
",",
"vecBstXpos",
",",
"vecBstYpos",
",",
"vecBstSd",
",",
"vecBstR2",
",",
"aryBstBts",
",",
"aryBstR2fld",
"]",
"queOut",
".",
"put",
"(",
"lstOut",
")",
"else",
":",
"# To calculate the coefficient of determination, we start with the",
"# total sum of squares (i.e. the deviation of the data from the mean).",
"# The mean of each time course:",
"vecFuncMean",
"=",
"np",
".",
"mean",
"(",
"aryFuncChnk",
",",
"axis",
"=",
"0",
")",
"# Deviation from the mean for each datapoint:",
"aryFuncDev",
"=",
"np",
".",
"subtract",
"(",
"aryFuncChnk",
",",
"vecFuncMean",
"[",
"None",
",",
":",
"]",
")",
"# Sum of squares:",
"vecSsTot",
"=",
"np",
".",
"sum",
"(",
"np",
".",
"power",
"(",
"aryFuncDev",
",",
"2.0",
")",
",",
"axis",
"=",
"0",
")",
"# Coefficient of determination:",
"vecBstR2",
"=",
"np",
".",
"subtract",
"(",
"1.0",
",",
"np",
".",
"divide",
"(",
"vecBstRes",
",",
"vecSsTot",
")",
")",
"# Output list:",
"lstOut",
"=",
"[",
"idxPrc",
",",
"vecBstXpos",
",",
"vecBstYpos",
",",
"vecBstSd",
",",
"vecBstR2",
",",
"aryBstBts",
"]",
"queOut",
".",
"put",
"(",
"lstOut",
")"
] | Find best fitting pRF model for voxel time course, using the CPU.
Parameters
----------
idxPrc : int
Process ID of the process calling this function (for CPU
multi-threading). In GPU version, this parameter is 0 (just one thread
on CPU).
aryFunc : np.array
2D array with functional MRI data, with shape aryFunc[voxel, time].
aryPrfTc : np.array
Array with pRF model time courses, with shape
aryPrfTc[x-pos*y-pos*SD, number of feautures, number of volumes]
aryMdlParams : np.array
2D array with all pRF model parameter combinations.
strVersion : str
Which version to use for pRF finding; 'numpy' or 'cython'.
lgcXval: boolean
Logical to determine whether we cross-validate.
varNumXval: int
Number of folds for k-fold cross-validation.
queOut : multiprocessing.queues.Queue
Queue to put the results on.
lgcRstr : boolean numpy array or None, default None
Logical to restrict certain models to particular voxels.
lgcPrint : boolean
Whether print statements should be executed.
Returns
-------
lstOut : list
List containing the following objects:
idxPrc : int
Process ID of the process calling this function (for CPU
multi-threading). In GPU version, this parameter is 0.
vecBstXpos : np.array
1D array with best fitting x-position for each voxel, with shape
vecBstXpos[voxel].
vecBstYpos : np.array
1D array with best fitting y-position for each voxel, with shape
vecBstYpos[voxel].
vecBstSd : np.array
1D array with best fitting pRF size for each voxel, with shape
vecBstSd[voxel].
vecBstR2 : np.array
1D array with R2 value of 'winning' pRF model for each voxel, with
shape vecBstR2[voxel].
aryBstBts : np.array
2D array with beta parameter estimates of 'winning' pRF model for
each voxel, with shape aryBstBts[voxel, feautures].
Notes
-----
The list with results is not returned directly, but placed on a
multiprocessing queue. This version performs the model finding on the CPU,
using numpy or cython (depending on the value of `strVersion`). | [
"Find",
"best",
"fitting",
"pRF",
"model",
"for",
"voxel",
"time",
"course",
"using",
"the",
"CPU",
"."
] | train | https://github.com/MSchnei/pyprf_feature/blob/49004ede7ae1ddee07a30afe9ce3e2776750805c/pyprf_feature/analysis/find_prf_cpu.py#L30-L465 |
mediawiki-utilities/python-mwreverts | mwreverts/db.py | check | def check(schema, rev_id, page_id=None, radius=defaults.RADIUS,
before=None, window=None):
"""
Checks the revert status of a revision. With this method, you can
determine whether an edit is a 'reverting' edit, was 'reverted' by another
edit and/or was 'reverted_to' by another edit.
:Parameters:
session : :class:`mwapi.Session`
An API session to make use of
rev_id : int
the ID of the revision to check
page_id : int
the ID of the page the revision occupies (slower if not provided)
radius : int
a positive integer indicating the maximum number of revisions
that can be reverted
before : :class:`mwtypes.Timestamp`
if set, limits the search for *reverting* revisions to those which
were saved before this timestamp
window : int
if set, limits the search for *reverting* revisions to those which
were saved within `window` seconds after the reverted edit
rvprop : set( str )
a set of properties to include in revisions
:Returns:
A triple :class:`mwreverts.Revert` | `None`
* reverting -- If this edit reverted other edit(s)
* reverted -- If this edit was reverted by another edit
* reverted_to -- If this edit was reverted to by another edit
:Example:
>>> import mwdb
>>> import mwreverts.api
>>>
>>> schema = mwdb.Schema("mysql+pymysql://enwiki.labsdb/enwiki_p" +
"?read_default_file=~/replica.my.cnf")
>>>
>>> def print_revert(revert):
... if revert is None:
... print(None)
... else:
... print(revert.reverting['rev_id'],
... [r['rev_id'] for r in revert.reverteds],
... revert.reverted_to['rev_id'])
...
>>> reverting, reverted, reverted_to = \\
... mwreverts.db.check(schema, 679778587)
>>> print_revert(reverting)
None
>>> print_revert(reverted)
679778743 [679778587] 679742862
>>> print_revert(reverted_to)
None
"""
rev_id = int(rev_id)
radius = int(radius)
if radius < 1:
raise TypeError("invalid radius. Expected a positive integer.")
page_id = int(page_id) if page_id is not None else None
before = Timestamp(before) if before is not None else None
# If we don't have the page_id, we're going to need to look them up
if page_id is None:
page_id = get_page_id(schema, rev_id)
# Load history and current rev
current_and_past_revs = list(n_edits_before(
schema, rev_id + 1, page_id, n=radius + 1))
if len(current_and_past_revs) < 1:
raise KeyError("Revision {0} not found in page {1}."
.format(rev_id, page_id))
current_rev, past_revs = (
current_and_past_revs[-1], # Current rev is the last one returned
current_and_past_revs[:-1] # The rest are past revs
)
if current_rev.rev_id != rev_id:
raise KeyError("Revision {0} not found in page {1}."
.format(rev_id, page_id))
if window is not None and before is None:
before = Timestamp(current_rev.rev_timestamp) + window
# Load future revisions
future_revs = list(n_edits_after(
schema, rev_id, page_id, n=radius, before=before))
return build_revert_tuple(
rev_id, past_revs, current_rev, future_revs, radius) | python | def check(schema, rev_id, page_id=None, radius=defaults.RADIUS,
before=None, window=None):
"""
Checks the revert status of a revision. With this method, you can
determine whether an edit is a 'reverting' edit, was 'reverted' by another
edit and/or was 'reverted_to' by another edit.
:Parameters:
session : :class:`mwapi.Session`
An API session to make use of
rev_id : int
the ID of the revision to check
page_id : int
the ID of the page the revision occupies (slower if not provided)
radius : int
a positive integer indicating the maximum number of revisions
that can be reverted
before : :class:`mwtypes.Timestamp`
if set, limits the search for *reverting* revisions to those which
were saved before this timestamp
window : int
if set, limits the search for *reverting* revisions to those which
were saved within `window` seconds after the reverted edit
rvprop : set( str )
a set of properties to include in revisions
:Returns:
A triple :class:`mwreverts.Revert` | `None`
* reverting -- If this edit reverted other edit(s)
* reverted -- If this edit was reverted by another edit
* reverted_to -- If this edit was reverted to by another edit
:Example:
>>> import mwdb
>>> import mwreverts.api
>>>
>>> schema = mwdb.Schema("mysql+pymysql://enwiki.labsdb/enwiki_p" +
"?read_default_file=~/replica.my.cnf")
>>>
>>> def print_revert(revert):
... if revert is None:
... print(None)
... else:
... print(revert.reverting['rev_id'],
... [r['rev_id'] for r in revert.reverteds],
... revert.reverted_to['rev_id'])
...
>>> reverting, reverted, reverted_to = \\
... mwreverts.db.check(schema, 679778587)
>>> print_revert(reverting)
None
>>> print_revert(reverted)
679778743 [679778587] 679742862
>>> print_revert(reverted_to)
None
"""
rev_id = int(rev_id)
radius = int(radius)
if radius < 1:
raise TypeError("invalid radius. Expected a positive integer.")
page_id = int(page_id) if page_id is not None else None
before = Timestamp(before) if before is not None else None
# If we don't have the page_id, we're going to need to look them up
if page_id is None:
page_id = get_page_id(schema, rev_id)
# Load history and current rev
current_and_past_revs = list(n_edits_before(
schema, rev_id + 1, page_id, n=radius + 1))
if len(current_and_past_revs) < 1:
raise KeyError("Revision {0} not found in page {1}."
.format(rev_id, page_id))
current_rev, past_revs = (
current_and_past_revs[-1], # Current rev is the last one returned
current_and_past_revs[:-1] # The rest are past revs
)
if current_rev.rev_id != rev_id:
raise KeyError("Revision {0} not found in page {1}."
.format(rev_id, page_id))
if window is not None and before is None:
before = Timestamp(current_rev.rev_timestamp) + window
# Load future revisions
future_revs = list(n_edits_after(
schema, rev_id, page_id, n=radius, before=before))
return build_revert_tuple(
rev_id, past_revs, current_rev, future_revs, radius) | [
"def",
"check",
"(",
"schema",
",",
"rev_id",
",",
"page_id",
"=",
"None",
",",
"radius",
"=",
"defaults",
".",
"RADIUS",
",",
"before",
"=",
"None",
",",
"window",
"=",
"None",
")",
":",
"rev_id",
"=",
"int",
"(",
"rev_id",
")",
"radius",
"=",
"int",
"(",
"radius",
")",
"if",
"radius",
"<",
"1",
":",
"raise",
"TypeError",
"(",
"\"invalid radius. Expected a positive integer.\"",
")",
"page_id",
"=",
"int",
"(",
"page_id",
")",
"if",
"page_id",
"is",
"not",
"None",
"else",
"None",
"before",
"=",
"Timestamp",
"(",
"before",
")",
"if",
"before",
"is",
"not",
"None",
"else",
"None",
"# If we don't have the page_id, we're going to need to look them up",
"if",
"page_id",
"is",
"None",
":",
"page_id",
"=",
"get_page_id",
"(",
"schema",
",",
"rev_id",
")",
"# Load history and current rev",
"current_and_past_revs",
"=",
"list",
"(",
"n_edits_before",
"(",
"schema",
",",
"rev_id",
"+",
"1",
",",
"page_id",
",",
"n",
"=",
"radius",
"+",
"1",
")",
")",
"if",
"len",
"(",
"current_and_past_revs",
")",
"<",
"1",
":",
"raise",
"KeyError",
"(",
"\"Revision {0} not found in page {1}.\"",
".",
"format",
"(",
"rev_id",
",",
"page_id",
")",
")",
"current_rev",
",",
"past_revs",
"=",
"(",
"current_and_past_revs",
"[",
"-",
"1",
"]",
",",
"# Current rev is the last one returned",
"current_and_past_revs",
"[",
":",
"-",
"1",
"]",
"# The rest are past revs",
")",
"if",
"current_rev",
".",
"rev_id",
"!=",
"rev_id",
":",
"raise",
"KeyError",
"(",
"\"Revision {0} not found in page {1}.\"",
".",
"format",
"(",
"rev_id",
",",
"page_id",
")",
")",
"if",
"window",
"is",
"not",
"None",
"and",
"before",
"is",
"None",
":",
"before",
"=",
"Timestamp",
"(",
"current_rev",
".",
"rev_timestamp",
")",
"+",
"window",
"# Load future revisions",
"future_revs",
"=",
"list",
"(",
"n_edits_after",
"(",
"schema",
",",
"rev_id",
",",
"page_id",
",",
"n",
"=",
"radius",
",",
"before",
"=",
"before",
")",
")",
"return",
"build_revert_tuple",
"(",
"rev_id",
",",
"past_revs",
",",
"current_rev",
",",
"future_revs",
",",
"radius",
")"
] | Checks the revert status of a revision. With this method, you can
determine whether an edit is a 'reverting' edit, was 'reverted' by another
edit and/or was 'reverted_to' by another edit.
:Parameters:
session : :class:`mwapi.Session`
An API session to make use of
rev_id : int
the ID of the revision to check
page_id : int
the ID of the page the revision occupies (slower if not provided)
radius : int
a positive integer indicating the maximum number of revisions
that can be reverted
before : :class:`mwtypes.Timestamp`
if set, limits the search for *reverting* revisions to those which
were saved before this timestamp
window : int
if set, limits the search for *reverting* revisions to those which
were saved within `window` seconds after the reverted edit
rvprop : set( str )
a set of properties to include in revisions
:Returns:
A triple :class:`mwreverts.Revert` | `None`
* reverting -- If this edit reverted other edit(s)
* reverted -- If this edit was reverted by another edit
* reverted_to -- If this edit was reverted to by another edit
:Example:
>>> import mwdb
>>> import mwreverts.api
>>>
>>> schema = mwdb.Schema("mysql+pymysql://enwiki.labsdb/enwiki_p" +
"?read_default_file=~/replica.my.cnf")
>>>
>>> def print_revert(revert):
... if revert is None:
... print(None)
... else:
... print(revert.reverting['rev_id'],
... [r['rev_id'] for r in revert.reverteds],
... revert.reverted_to['rev_id'])
...
>>> reverting, reverted, reverted_to = \\
... mwreverts.db.check(schema, 679778587)
>>> print_revert(reverting)
None
>>> print_revert(reverted)
679778743 [679778587] 679742862
>>> print_revert(reverted_to)
None | [
"Checks",
"the",
"revert",
"status",
"of",
"a",
"revision",
".",
"With",
"this",
"method",
"you",
"can",
"determine",
"whether",
"an",
"edit",
"is",
"a",
"reverting",
"edit",
"was",
"reverted",
"by",
"another",
"edit",
"and",
"/",
"or",
"was",
"reverted_to",
"by",
"another",
"edit",
"."
] | train | https://github.com/mediawiki-utilities/python-mwreverts/blob/d379ac941e14e235ad82a48bd445a3dfa6cc022e/mwreverts/db.py#L57-L153 |
mediawiki-utilities/python-mwreverts | mwreverts/db.py | check_archive | def check_archive(schema, rev_id, namespace=None, title=None, timestamp=None,
radius=defaults.RADIUS,
before=None, window=None):
"""
Checks the revert status of an archived revision (from a deleted page).
With this method, you can determine whether an edit is a 'reverting'
edit, was 'reverted' by another edit and/or was 'reverted_to' by
another edit.
:Parameters:
session : :class:`mwapi.Session`
An API session to make use of
rev_id : int
the ID of the revision to check
namespace : int
the namespace ID of the page the revision exists in
title : str
the title of the page the revision exists in
timestamp : :class:`mwtypes.Timestamp`
the timestamp that the revision for `rev_id` was saved
radius : int
a positive integer indicating the maximum number of revisions
that can be reverted
before : :class:`mwtypes.Timestamp`
if set, limits the search for *reverting* revisions to those which
were saved before this timestamp
window : int
if set, limits the search for *reverting* revisions to those which
were saved within `window` seconds after the reverted edit
rvprop : set( str )
a set of properties to include in revisions
:Returns:
A triple :class:`mwreverts.Revert`
* reverting -- If this edit reverted other edit(s)
* reverted -- If this edit was reverted by another edit
* reverted_to -- If this edit was reverted to by another edit
"""
rev_id = int(rev_id)
radius = int(radius)
if radius < 1:
raise TypeError("invalid radius. Expected a positive integer.")
namespace = int(namespace) if namespace is not None else None
title = str(title) if title is not None else None
timestamp = Timestamp(timestamp) if timestamp is not None else None
before = Timestamp(before) if before is not None else None
# If we don't have the page_id, we're going to need to look them up
if namespace is None or title is None or timestamp is None:
namespace, title, timestamp = \
get_archived_namespace_title_and_timestamp(schema, rev_id)
# Load history and current rev
current_and_past_revs = list(n_archived_edits_before(
schema, rev_id + 1, namespace, title, timestamp + 1, n=radius + 1))
if len(current_and_past_revs) < 1:
raise KeyError("Revision {0} not found in page {1}(ns={2}) @ {3}."
.format(rev_id, title, namespace, timestamp))
current_rev, past_revs = (
current_and_past_revs[-1], # Current rev is the last one returned
current_and_past_revs[:-1] # The rest are past revs
)
if current_rev.ar_rev_id != rev_id:
raise KeyError("Revision {0} not found in page {1}(ns={2}) @ {3}."
.format(rev_id, title, namespace, timestamp))
if window is not None and before is None:
before = Timestamp(current_rev.ar_timestamp) + window
# Load future revisions
future_revs = list(n_archived_edits_after(
schema, rev_id, namespace, title, timestamp, n=radius, before=before))
return build_revert_tuple(
rev_id, past_revs, current_rev, future_revs, radius) | python | def check_archive(schema, rev_id, namespace=None, title=None, timestamp=None,
radius=defaults.RADIUS,
before=None, window=None):
"""
Checks the revert status of an archived revision (from a deleted page).
With this method, you can determine whether an edit is a 'reverting'
edit, was 'reverted' by another edit and/or was 'reverted_to' by
another edit.
:Parameters:
session : :class:`mwapi.Session`
An API session to make use of
rev_id : int
the ID of the revision to check
namespace : int
the namespace ID of the page the revision exists in
title : str
the title of the page the revision exists in
timestamp : :class:`mwtypes.Timestamp`
the timestamp that the revision for `rev_id` was saved
radius : int
a positive integer indicating the maximum number of revisions
that can be reverted
before : :class:`mwtypes.Timestamp`
if set, limits the search for *reverting* revisions to those which
were saved before this timestamp
window : int
if set, limits the search for *reverting* revisions to those which
were saved within `window` seconds after the reverted edit
rvprop : set( str )
a set of properties to include in revisions
:Returns:
A triple :class:`mwreverts.Revert`
* reverting -- If this edit reverted other edit(s)
* reverted -- If this edit was reverted by another edit
* reverted_to -- If this edit was reverted to by another edit
"""
rev_id = int(rev_id)
radius = int(radius)
if radius < 1:
raise TypeError("invalid radius. Expected a positive integer.")
namespace = int(namespace) if namespace is not None else None
title = str(title) if title is not None else None
timestamp = Timestamp(timestamp) if timestamp is not None else None
before = Timestamp(before) if before is not None else None
# If we don't have the page_id, we're going to need to look them up
if namespace is None or title is None or timestamp is None:
namespace, title, timestamp = \
get_archived_namespace_title_and_timestamp(schema, rev_id)
# Load history and current rev
current_and_past_revs = list(n_archived_edits_before(
schema, rev_id + 1, namespace, title, timestamp + 1, n=radius + 1))
if len(current_and_past_revs) < 1:
raise KeyError("Revision {0} not found in page {1}(ns={2}) @ {3}."
.format(rev_id, title, namespace, timestamp))
current_rev, past_revs = (
current_and_past_revs[-1], # Current rev is the last one returned
current_and_past_revs[:-1] # The rest are past revs
)
if current_rev.ar_rev_id != rev_id:
raise KeyError("Revision {0} not found in page {1}(ns={2}) @ {3}."
.format(rev_id, title, namespace, timestamp))
if window is not None and before is None:
before = Timestamp(current_rev.ar_timestamp) + window
# Load future revisions
future_revs = list(n_archived_edits_after(
schema, rev_id, namespace, title, timestamp, n=radius, before=before))
return build_revert_tuple(
rev_id, past_revs, current_rev, future_revs, radius) | [
"def",
"check_archive",
"(",
"schema",
",",
"rev_id",
",",
"namespace",
"=",
"None",
",",
"title",
"=",
"None",
",",
"timestamp",
"=",
"None",
",",
"radius",
"=",
"defaults",
".",
"RADIUS",
",",
"before",
"=",
"None",
",",
"window",
"=",
"None",
")",
":",
"rev_id",
"=",
"int",
"(",
"rev_id",
")",
"radius",
"=",
"int",
"(",
"radius",
")",
"if",
"radius",
"<",
"1",
":",
"raise",
"TypeError",
"(",
"\"invalid radius. Expected a positive integer.\"",
")",
"namespace",
"=",
"int",
"(",
"namespace",
")",
"if",
"namespace",
"is",
"not",
"None",
"else",
"None",
"title",
"=",
"str",
"(",
"title",
")",
"if",
"title",
"is",
"not",
"None",
"else",
"None",
"timestamp",
"=",
"Timestamp",
"(",
"timestamp",
")",
"if",
"timestamp",
"is",
"not",
"None",
"else",
"None",
"before",
"=",
"Timestamp",
"(",
"before",
")",
"if",
"before",
"is",
"not",
"None",
"else",
"None",
"# If we don't have the page_id, we're going to need to look them up",
"if",
"namespace",
"is",
"None",
"or",
"title",
"is",
"None",
"or",
"timestamp",
"is",
"None",
":",
"namespace",
",",
"title",
",",
"timestamp",
"=",
"get_archived_namespace_title_and_timestamp",
"(",
"schema",
",",
"rev_id",
")",
"# Load history and current rev",
"current_and_past_revs",
"=",
"list",
"(",
"n_archived_edits_before",
"(",
"schema",
",",
"rev_id",
"+",
"1",
",",
"namespace",
",",
"title",
",",
"timestamp",
"+",
"1",
",",
"n",
"=",
"radius",
"+",
"1",
")",
")",
"if",
"len",
"(",
"current_and_past_revs",
")",
"<",
"1",
":",
"raise",
"KeyError",
"(",
"\"Revision {0} not found in page {1}(ns={2}) @ {3}.\"",
".",
"format",
"(",
"rev_id",
",",
"title",
",",
"namespace",
",",
"timestamp",
")",
")",
"current_rev",
",",
"past_revs",
"=",
"(",
"current_and_past_revs",
"[",
"-",
"1",
"]",
",",
"# Current rev is the last one returned",
"current_and_past_revs",
"[",
":",
"-",
"1",
"]",
"# The rest are past revs",
")",
"if",
"current_rev",
".",
"ar_rev_id",
"!=",
"rev_id",
":",
"raise",
"KeyError",
"(",
"\"Revision {0} not found in page {1}(ns={2}) @ {3}.\"",
".",
"format",
"(",
"rev_id",
",",
"title",
",",
"namespace",
",",
"timestamp",
")",
")",
"if",
"window",
"is",
"not",
"None",
"and",
"before",
"is",
"None",
":",
"before",
"=",
"Timestamp",
"(",
"current_rev",
".",
"ar_timestamp",
")",
"+",
"window",
"# Load future revisions",
"future_revs",
"=",
"list",
"(",
"n_archived_edits_after",
"(",
"schema",
",",
"rev_id",
",",
"namespace",
",",
"title",
",",
"timestamp",
",",
"n",
"=",
"radius",
",",
"before",
"=",
"before",
")",
")",
"return",
"build_revert_tuple",
"(",
"rev_id",
",",
"past_revs",
",",
"current_rev",
",",
"future_revs",
",",
"radius",
")"
] | Checks the revert status of an archived revision (from a deleted page).
With this method, you can determine whether an edit is a 'reverting'
edit, was 'reverted' by another edit and/or was 'reverted_to' by
another edit.
:Parameters:
session : :class:`mwapi.Session`
An API session to make use of
rev_id : int
the ID of the revision to check
namespace : int
the namespace ID of the page the revision exists in
title : str
the title of the page the revision exists in
timestamp : :class:`mwtypes.Timestamp`
the timestamp that the revision for `rev_id` was saved
radius : int
a positive integer indicating the maximum number of revisions
that can be reverted
before : :class:`mwtypes.Timestamp`
if set, limits the search for *reverting* revisions to those which
were saved before this timestamp
window : int
if set, limits the search for *reverting* revisions to those which
were saved within `window` seconds after the reverted edit
rvprop : set( str )
a set of properties to include in revisions
:Returns:
A triple :class:`mwreverts.Revert`
* reverting -- If this edit reverted other edit(s)
* reverted -- If this edit was reverted by another edit
* reverted_to -- If this edit was reverted to by another edit | [
"Checks",
"the",
"revert",
"status",
"of",
"an",
"archived",
"revision",
"(",
"from",
"a",
"deleted",
"page",
")",
".",
"With",
"this",
"method",
"you",
"can",
"determine",
"whether",
"an",
"edit",
"is",
"a",
"reverting",
"edit",
"was",
"reverted",
"by",
"another",
"edit",
"and",
"/",
"or",
"was",
"reverted_to",
"by",
"another",
"edit",
"."
] | train | https://github.com/mediawiki-utilities/python-mwreverts/blob/d379ac941e14e235ad82a48bd445a3dfa6cc022e/mwreverts/db.py#L202-L282 |
genialis/resolwe-runtime-utils | resolwe_runtime_utils.py | _get_json | def _get_json(value):
"""Convert the given value to a JSON object."""
if hasattr(value, 'replace'):
value = value.replace('\n', ' ')
try:
return json.loads(value)
except json.JSONDecodeError:
# Escape double quotes.
if hasattr(value, 'replace'):
value = value.replace('"', '\\"')
# try putting the value into a string
return json.loads('"{}"'.format(value)) | python | def _get_json(value):
"""Convert the given value to a JSON object."""
if hasattr(value, 'replace'):
value = value.replace('\n', ' ')
try:
return json.loads(value)
except json.JSONDecodeError:
# Escape double quotes.
if hasattr(value, 'replace'):
value = value.replace('"', '\\"')
# try putting the value into a string
return json.loads('"{}"'.format(value)) | [
"def",
"_get_json",
"(",
"value",
")",
":",
"if",
"hasattr",
"(",
"value",
",",
"'replace'",
")",
":",
"value",
"=",
"value",
".",
"replace",
"(",
"'\\n'",
",",
"' '",
")",
"try",
":",
"return",
"json",
".",
"loads",
"(",
"value",
")",
"except",
"json",
".",
"JSONDecodeError",
":",
"# Escape double quotes.",
"if",
"hasattr",
"(",
"value",
",",
"'replace'",
")",
":",
"value",
"=",
"value",
".",
"replace",
"(",
"'\"'",
",",
"'\\\\\"'",
")",
"# try putting the value into a string",
"return",
"json",
".",
"loads",
"(",
"'\"{}\"'",
".",
"format",
"(",
"value",
")",
")"
] | Convert the given value to a JSON object. | [
"Convert",
"the",
"given",
"value",
"to",
"a",
"JSON",
"object",
"."
] | train | https://github.com/genialis/resolwe-runtime-utils/blob/5657d7cf981972a5259b9b475eae220479401001/resolwe_runtime_utils.py#L37-L48 |
genialis/resolwe-runtime-utils | resolwe_runtime_utils.py | save_list | def save_list(key, *values):
"""Convert the given list of parameters to a JSON object.
JSON object is of the form:
{ key: [values[0], values[1], ... ] },
where values represent the given list of parameters.
"""
return json.dumps({key: [_get_json(value) for value in values]}) | python | def save_list(key, *values):
"""Convert the given list of parameters to a JSON object.
JSON object is of the form:
{ key: [values[0], values[1], ... ] },
where values represent the given list of parameters.
"""
return json.dumps({key: [_get_json(value) for value in values]}) | [
"def",
"save_list",
"(",
"key",
",",
"*",
"values",
")",
":",
"return",
"json",
".",
"dumps",
"(",
"{",
"key",
":",
"[",
"_get_json",
"(",
"value",
")",
"for",
"value",
"in",
"values",
"]",
"}",
")"
] | Convert the given list of parameters to a JSON object.
JSON object is of the form:
{ key: [values[0], values[1], ... ] },
where values represent the given list of parameters. | [
"Convert",
"the",
"given",
"list",
"of",
"parameters",
"to",
"a",
"JSON",
"object",
"."
] | train | https://github.com/genialis/resolwe-runtime-utils/blob/5657d7cf981972a5259b9b475eae220479401001/resolwe_runtime_utils.py#L62-L70 |
genialis/resolwe-runtime-utils | resolwe_runtime_utils.py | save_file | def save_file(key, file_path, *refs):
"""Convert the given parameters to a special JSON object.
JSON object is of the form:
{ key: {"file": file_path}}, or
{ key: {"file": file_path, "refs": [refs[0], refs[1], ... ]}}
"""
if not os.path.isfile(file_path):
return error("Output '{}' set to a missing file: '{}'.".format(key, file_path))
result = {key: {"file": file_path}}
if refs:
missing_refs = [
ref for ref in refs if not (os.path.isfile(ref) or os.path.isdir(ref))
]
if len(missing_refs) > 0:
return error(
"Output '{}' set to missing references: '{}'.".format(
key, ', '.join(missing_refs)
)
)
result[key]['refs'] = refs
return json.dumps(result) | python | def save_file(key, file_path, *refs):
"""Convert the given parameters to a special JSON object.
JSON object is of the form:
{ key: {"file": file_path}}, or
{ key: {"file": file_path, "refs": [refs[0], refs[1], ... ]}}
"""
if not os.path.isfile(file_path):
return error("Output '{}' set to a missing file: '{}'.".format(key, file_path))
result = {key: {"file": file_path}}
if refs:
missing_refs = [
ref for ref in refs if not (os.path.isfile(ref) or os.path.isdir(ref))
]
if len(missing_refs) > 0:
return error(
"Output '{}' set to missing references: '{}'.".format(
key, ', '.join(missing_refs)
)
)
result[key]['refs'] = refs
return json.dumps(result) | [
"def",
"save_file",
"(",
"key",
",",
"file_path",
",",
"*",
"refs",
")",
":",
"if",
"not",
"os",
".",
"path",
".",
"isfile",
"(",
"file_path",
")",
":",
"return",
"error",
"(",
"\"Output '{}' set to a missing file: '{}'.\"",
".",
"format",
"(",
"key",
",",
"file_path",
")",
")",
"result",
"=",
"{",
"key",
":",
"{",
"\"file\"",
":",
"file_path",
"}",
"}",
"if",
"refs",
":",
"missing_refs",
"=",
"[",
"ref",
"for",
"ref",
"in",
"refs",
"if",
"not",
"(",
"os",
".",
"path",
".",
"isfile",
"(",
"ref",
")",
"or",
"os",
".",
"path",
".",
"isdir",
"(",
"ref",
")",
")",
"]",
"if",
"len",
"(",
"missing_refs",
")",
">",
"0",
":",
"return",
"error",
"(",
"\"Output '{}' set to missing references: '{}'.\"",
".",
"format",
"(",
"key",
",",
"', '",
".",
"join",
"(",
"missing_refs",
")",
")",
")",
"result",
"[",
"key",
"]",
"[",
"'refs'",
"]",
"=",
"refs",
"return",
"json",
".",
"dumps",
"(",
"result",
")"
] | Convert the given parameters to a special JSON object.
JSON object is of the form:
{ key: {"file": file_path}}, or
{ key: {"file": file_path, "refs": [refs[0], refs[1], ... ]}} | [
"Convert",
"the",
"given",
"parameters",
"to",
"a",
"special",
"JSON",
"object",
"."
] | train | https://github.com/genialis/resolwe-runtime-utils/blob/5657d7cf981972a5259b9b475eae220479401001/resolwe_runtime_utils.py#L73-L98 |
genialis/resolwe-runtime-utils | resolwe_runtime_utils.py | save_file_list | def save_file_list(key, *files_refs):
"""Convert the given parameters to a special JSON object.
Each parameter is a file-refs specification of the form:
<file-path>:<reference1>,<reference2>, ...,
where the colon ':' and the list of references are optional.
JSON object is of the form:
{ key: {"file": file_path}}, or
{ key: {"file": file_path, "refs": [refs[0], refs[1], ... ]}}
"""
file_list = []
for file_refs in files_refs:
if ':' in file_refs:
try:
file_name, refs = file_refs.split(':')
except ValueError as e:
return error("Only one colon ':' allowed in file-refs specification.")
else:
file_name, refs = file_refs, None
if not os.path.isfile(file_name):
return error(
"Output '{}' set to a missing file: '{}'.".format(key, file_name)
)
file_obj = {'file': file_name}
if refs:
refs = [ref_path.strip() for ref_path in refs.split(',')]
missing_refs = [
ref for ref in refs if not (os.path.isfile(ref) or os.path.isdir(ref))
]
if len(missing_refs) > 0:
return error(
"Output '{}' set to missing references: '{}'.".format(
key, ', '.join(missing_refs)
)
)
file_obj['refs'] = refs
file_list.append(file_obj)
return json.dumps({key: file_list}) | python | def save_file_list(key, *files_refs):
"""Convert the given parameters to a special JSON object.
Each parameter is a file-refs specification of the form:
<file-path>:<reference1>,<reference2>, ...,
where the colon ':' and the list of references are optional.
JSON object is of the form:
{ key: {"file": file_path}}, or
{ key: {"file": file_path, "refs": [refs[0], refs[1], ... ]}}
"""
file_list = []
for file_refs in files_refs:
if ':' in file_refs:
try:
file_name, refs = file_refs.split(':')
except ValueError as e:
return error("Only one colon ':' allowed in file-refs specification.")
else:
file_name, refs = file_refs, None
if not os.path.isfile(file_name):
return error(
"Output '{}' set to a missing file: '{}'.".format(key, file_name)
)
file_obj = {'file': file_name}
if refs:
refs = [ref_path.strip() for ref_path in refs.split(',')]
missing_refs = [
ref for ref in refs if not (os.path.isfile(ref) or os.path.isdir(ref))
]
if len(missing_refs) > 0:
return error(
"Output '{}' set to missing references: '{}'.".format(
key, ', '.join(missing_refs)
)
)
file_obj['refs'] = refs
file_list.append(file_obj)
return json.dumps({key: file_list}) | [
"def",
"save_file_list",
"(",
"key",
",",
"*",
"files_refs",
")",
":",
"file_list",
"=",
"[",
"]",
"for",
"file_refs",
"in",
"files_refs",
":",
"if",
"':'",
"in",
"file_refs",
":",
"try",
":",
"file_name",
",",
"refs",
"=",
"file_refs",
".",
"split",
"(",
"':'",
")",
"except",
"ValueError",
"as",
"e",
":",
"return",
"error",
"(",
"\"Only one colon ':' allowed in file-refs specification.\"",
")",
"else",
":",
"file_name",
",",
"refs",
"=",
"file_refs",
",",
"None",
"if",
"not",
"os",
".",
"path",
".",
"isfile",
"(",
"file_name",
")",
":",
"return",
"error",
"(",
"\"Output '{}' set to a missing file: '{}'.\"",
".",
"format",
"(",
"key",
",",
"file_name",
")",
")",
"file_obj",
"=",
"{",
"'file'",
":",
"file_name",
"}",
"if",
"refs",
":",
"refs",
"=",
"[",
"ref_path",
".",
"strip",
"(",
")",
"for",
"ref_path",
"in",
"refs",
".",
"split",
"(",
"','",
")",
"]",
"missing_refs",
"=",
"[",
"ref",
"for",
"ref",
"in",
"refs",
"if",
"not",
"(",
"os",
".",
"path",
".",
"isfile",
"(",
"ref",
")",
"or",
"os",
".",
"path",
".",
"isdir",
"(",
"ref",
")",
")",
"]",
"if",
"len",
"(",
"missing_refs",
")",
">",
"0",
":",
"return",
"error",
"(",
"\"Output '{}' set to missing references: '{}'.\"",
".",
"format",
"(",
"key",
",",
"', '",
".",
"join",
"(",
"missing_refs",
")",
")",
")",
"file_obj",
"[",
"'refs'",
"]",
"=",
"refs",
"file_list",
".",
"append",
"(",
"file_obj",
")",
"return",
"json",
".",
"dumps",
"(",
"{",
"key",
":",
"file_list",
"}",
")"
] | Convert the given parameters to a special JSON object.
Each parameter is a file-refs specification of the form:
<file-path>:<reference1>,<reference2>, ...,
where the colon ':' and the list of references are optional.
JSON object is of the form:
{ key: {"file": file_path}}, or
{ key: {"file": file_path, "refs": [refs[0], refs[1], ... ]}} | [
"Convert",
"the",
"given",
"parameters",
"to",
"a",
"special",
"JSON",
"object",
"."
] | train | https://github.com/genialis/resolwe-runtime-utils/blob/5657d7cf981972a5259b9b475eae220479401001/resolwe_runtime_utils.py#L101-L143 |
genialis/resolwe-runtime-utils | resolwe_runtime_utils.py | save_dir | def save_dir(key, dir_path, *refs):
"""Convert the given parameters to a special JSON object.
JSON object is of the form:
{ key: {"dir": dir_path}}, or
{ key: {"dir": dir_path, "refs": [refs[0], refs[1], ... ]}}
"""
if not os.path.isdir(dir_path):
return error(
"Output '{}' set to a missing directory: '{}'.".format(key, dir_path)
)
result = {key: {"dir": dir_path}}
if refs:
missing_refs = [
ref for ref in refs if not (os.path.isfile(ref) or os.path.isdir(ref))
]
if len(missing_refs) > 0:
return error(
"Output '{}' set to missing references: '{}'.".format(
key, ', '.join(missing_refs)
)
)
result[key]["refs"] = refs
return json.dumps(result) | python | def save_dir(key, dir_path, *refs):
"""Convert the given parameters to a special JSON object.
JSON object is of the form:
{ key: {"dir": dir_path}}, or
{ key: {"dir": dir_path, "refs": [refs[0], refs[1], ... ]}}
"""
if not os.path.isdir(dir_path):
return error(
"Output '{}' set to a missing directory: '{}'.".format(key, dir_path)
)
result = {key: {"dir": dir_path}}
if refs:
missing_refs = [
ref for ref in refs if not (os.path.isfile(ref) or os.path.isdir(ref))
]
if len(missing_refs) > 0:
return error(
"Output '{}' set to missing references: '{}'.".format(
key, ', '.join(missing_refs)
)
)
result[key]["refs"] = refs
return json.dumps(result) | [
"def",
"save_dir",
"(",
"key",
",",
"dir_path",
",",
"*",
"refs",
")",
":",
"if",
"not",
"os",
".",
"path",
".",
"isdir",
"(",
"dir_path",
")",
":",
"return",
"error",
"(",
"\"Output '{}' set to a missing directory: '{}'.\"",
".",
"format",
"(",
"key",
",",
"dir_path",
")",
")",
"result",
"=",
"{",
"key",
":",
"{",
"\"dir\"",
":",
"dir_path",
"}",
"}",
"if",
"refs",
":",
"missing_refs",
"=",
"[",
"ref",
"for",
"ref",
"in",
"refs",
"if",
"not",
"(",
"os",
".",
"path",
".",
"isfile",
"(",
"ref",
")",
"or",
"os",
".",
"path",
".",
"isdir",
"(",
"ref",
")",
")",
"]",
"if",
"len",
"(",
"missing_refs",
")",
">",
"0",
":",
"return",
"error",
"(",
"\"Output '{}' set to missing references: '{}'.\"",
".",
"format",
"(",
"key",
",",
"', '",
".",
"join",
"(",
"missing_refs",
")",
")",
")",
"result",
"[",
"key",
"]",
"[",
"\"refs\"",
"]",
"=",
"refs",
"return",
"json",
".",
"dumps",
"(",
"result",
")"
] | Convert the given parameters to a special JSON object.
JSON object is of the form:
{ key: {"dir": dir_path}}, or
{ key: {"dir": dir_path, "refs": [refs[0], refs[1], ... ]}} | [
"Convert",
"the",
"given",
"parameters",
"to",
"a",
"special",
"JSON",
"object",
"."
] | train | https://github.com/genialis/resolwe-runtime-utils/blob/5657d7cf981972a5259b9b475eae220479401001/resolwe_runtime_utils.py#L146-L173 |
genialis/resolwe-runtime-utils | resolwe_runtime_utils.py | save_dir_list | def save_dir_list(key, *dirs_refs):
"""Convert the given parameters to a special JSON object.
Each parameter is a dir-refs specification of the form:
<dir-path>:<reference1>,<reference2>, ...,
where the colon ':' and the list of references are optional.
JSON object is of the form:
{ key: {"dir": dir_path}}, or
{ key: {"dir": dir_path, "refs": [refs[0], refs[1], ... ]}}
"""
dir_list = []
for dir_refs in dirs_refs:
if ':' in dir_refs:
try:
dir_path, refs = dir_refs.split(':')
except ValueError as e:
return error("Only one colon ':' allowed in dir-refs specification.")
else:
dir_path, refs = dir_refs, None
if not os.path.isdir(dir_path):
return error(
"Output '{}' set to a missing directory: '{}'.".format(key, dir_path)
)
dir_obj = {'dir': dir_path}
if refs:
refs = [ref_path.strip() for ref_path in refs.split(',')]
missing_refs = [
ref for ref in refs if not (os.path.isfile(ref) or os.path.isdir(ref))
]
if len(missing_refs) > 0:
return error(
"Output '{}' set to missing references: '{}'.".format(
key, ', '.join(missing_refs)
)
)
dir_obj['refs'] = refs
dir_list.append(dir_obj)
return json.dumps({key: dir_list}) | python | def save_dir_list(key, *dirs_refs):
"""Convert the given parameters to a special JSON object.
Each parameter is a dir-refs specification of the form:
<dir-path>:<reference1>,<reference2>, ...,
where the colon ':' and the list of references are optional.
JSON object is of the form:
{ key: {"dir": dir_path}}, or
{ key: {"dir": dir_path, "refs": [refs[0], refs[1], ... ]}}
"""
dir_list = []
for dir_refs in dirs_refs:
if ':' in dir_refs:
try:
dir_path, refs = dir_refs.split(':')
except ValueError as e:
return error("Only one colon ':' allowed in dir-refs specification.")
else:
dir_path, refs = dir_refs, None
if not os.path.isdir(dir_path):
return error(
"Output '{}' set to a missing directory: '{}'.".format(key, dir_path)
)
dir_obj = {'dir': dir_path}
if refs:
refs = [ref_path.strip() for ref_path in refs.split(',')]
missing_refs = [
ref for ref in refs if not (os.path.isfile(ref) or os.path.isdir(ref))
]
if len(missing_refs) > 0:
return error(
"Output '{}' set to missing references: '{}'.".format(
key, ', '.join(missing_refs)
)
)
dir_obj['refs'] = refs
dir_list.append(dir_obj)
return json.dumps({key: dir_list}) | [
"def",
"save_dir_list",
"(",
"key",
",",
"*",
"dirs_refs",
")",
":",
"dir_list",
"=",
"[",
"]",
"for",
"dir_refs",
"in",
"dirs_refs",
":",
"if",
"':'",
"in",
"dir_refs",
":",
"try",
":",
"dir_path",
",",
"refs",
"=",
"dir_refs",
".",
"split",
"(",
"':'",
")",
"except",
"ValueError",
"as",
"e",
":",
"return",
"error",
"(",
"\"Only one colon ':' allowed in dir-refs specification.\"",
")",
"else",
":",
"dir_path",
",",
"refs",
"=",
"dir_refs",
",",
"None",
"if",
"not",
"os",
".",
"path",
".",
"isdir",
"(",
"dir_path",
")",
":",
"return",
"error",
"(",
"\"Output '{}' set to a missing directory: '{}'.\"",
".",
"format",
"(",
"key",
",",
"dir_path",
")",
")",
"dir_obj",
"=",
"{",
"'dir'",
":",
"dir_path",
"}",
"if",
"refs",
":",
"refs",
"=",
"[",
"ref_path",
".",
"strip",
"(",
")",
"for",
"ref_path",
"in",
"refs",
".",
"split",
"(",
"','",
")",
"]",
"missing_refs",
"=",
"[",
"ref",
"for",
"ref",
"in",
"refs",
"if",
"not",
"(",
"os",
".",
"path",
".",
"isfile",
"(",
"ref",
")",
"or",
"os",
".",
"path",
".",
"isdir",
"(",
"ref",
")",
")",
"]",
"if",
"len",
"(",
"missing_refs",
")",
">",
"0",
":",
"return",
"error",
"(",
"\"Output '{}' set to missing references: '{}'.\"",
".",
"format",
"(",
"key",
",",
"', '",
".",
"join",
"(",
"missing_refs",
")",
")",
")",
"dir_obj",
"[",
"'refs'",
"]",
"=",
"refs",
"dir_list",
".",
"append",
"(",
"dir_obj",
")",
"return",
"json",
".",
"dumps",
"(",
"{",
"key",
":",
"dir_list",
"}",
")"
] | Convert the given parameters to a special JSON object.
Each parameter is a dir-refs specification of the form:
<dir-path>:<reference1>,<reference2>, ...,
where the colon ':' and the list of references are optional.
JSON object is of the form:
{ key: {"dir": dir_path}}, or
{ key: {"dir": dir_path, "refs": [refs[0], refs[1], ... ]}} | [
"Convert",
"the",
"given",
"parameters",
"to",
"a",
"special",
"JSON",
"object",
"."
] | train | https://github.com/genialis/resolwe-runtime-utils/blob/5657d7cf981972a5259b9b475eae220479401001/resolwe_runtime_utils.py#L176-L218 |
genialis/resolwe-runtime-utils | resolwe_runtime_utils.py | progress | def progress(progress):
"""Convert given progress to a JSON object.
Check that progress can be represented as float between 0 and 1 and
return it in JSON of the form:
{"proc.progress": progress}
"""
if isinstance(progress, int) or isinstance(progress, float):
progress = float(progress)
else:
try:
progress = float(json.loads(progress))
except (TypeError, ValueError):
return warning("Progress must be a float.")
if not 0 <= progress <= 1:
return warning("Progress must be a float between 0 and 1.")
return json.dumps({'proc.progress': progress}) | python | def progress(progress):
"""Convert given progress to a JSON object.
Check that progress can be represented as float between 0 and 1 and
return it in JSON of the form:
{"proc.progress": progress}
"""
if isinstance(progress, int) or isinstance(progress, float):
progress = float(progress)
else:
try:
progress = float(json.loads(progress))
except (TypeError, ValueError):
return warning("Progress must be a float.")
if not 0 <= progress <= 1:
return warning("Progress must be a float between 0 and 1.")
return json.dumps({'proc.progress': progress}) | [
"def",
"progress",
"(",
"progress",
")",
":",
"if",
"isinstance",
"(",
"progress",
",",
"int",
")",
"or",
"isinstance",
"(",
"progress",
",",
"float",
")",
":",
"progress",
"=",
"float",
"(",
"progress",
")",
"else",
":",
"try",
":",
"progress",
"=",
"float",
"(",
"json",
".",
"loads",
"(",
"progress",
")",
")",
"except",
"(",
"TypeError",
",",
"ValueError",
")",
":",
"return",
"warning",
"(",
"\"Progress must be a float.\"",
")",
"if",
"not",
"0",
"<=",
"progress",
"<=",
"1",
":",
"return",
"warning",
"(",
"\"Progress must be a float between 0 and 1.\"",
")",
"return",
"json",
".",
"dumps",
"(",
"{",
"'proc.progress'",
":",
"progress",
"}",
")"
] | Convert given progress to a JSON object.
Check that progress can be represented as float between 0 and 1 and
return it in JSON of the form:
{"proc.progress": progress} | [
"Convert",
"given",
"progress",
"to",
"a",
"JSON",
"object",
"."
] | train | https://github.com/genialis/resolwe-runtime-utils/blob/5657d7cf981972a5259b9b475eae220479401001/resolwe_runtime_utils.py#L236-L256 |
genialis/resolwe-runtime-utils | resolwe_runtime_utils.py | checkrc | def checkrc(rc, *args):
"""Check if ``rc`` (return code) meets requirements.
Check if ``rc`` is 0 or is in ``args`` list that contains
acceptable return codes.
Last argument of ``args`` can optionally be error message that
is printed if ``rc`` doesn't meet requirements.
Output is JSON of the form:
{"proc.rc": <rc>,
"proc.error": "<error_msg>"},
where "proc.error" entry is omitted if empty.
"""
try:
rc = int(rc)
except (TypeError, ValueError):
return error("Invalid return code: '{}'.".format(rc))
acceptable_rcs = []
error_msg = ""
if len(args):
for code in args[:-1]:
try:
acceptable_rcs.append(int(code))
except (TypeError, ValueError):
return error("Invalid return code: '{}'.".format(code))
try:
acceptable_rcs.append(int(args[-1]))
except (TypeError, ValueError):
error_msg = args[-1]
if rc in acceptable_rcs:
rc = 0
ret = {'proc.rc': rc}
if rc and error_msg:
ret['proc.error'] = error_msg
return json.dumps(ret) | python | def checkrc(rc, *args):
"""Check if ``rc`` (return code) meets requirements.
Check if ``rc`` is 0 or is in ``args`` list that contains
acceptable return codes.
Last argument of ``args`` can optionally be error message that
is printed if ``rc`` doesn't meet requirements.
Output is JSON of the form:
{"proc.rc": <rc>,
"proc.error": "<error_msg>"},
where "proc.error" entry is omitted if empty.
"""
try:
rc = int(rc)
except (TypeError, ValueError):
return error("Invalid return code: '{}'.".format(rc))
acceptable_rcs = []
error_msg = ""
if len(args):
for code in args[:-1]:
try:
acceptable_rcs.append(int(code))
except (TypeError, ValueError):
return error("Invalid return code: '{}'.".format(code))
try:
acceptable_rcs.append(int(args[-1]))
except (TypeError, ValueError):
error_msg = args[-1]
if rc in acceptable_rcs:
rc = 0
ret = {'proc.rc': rc}
if rc and error_msg:
ret['proc.error'] = error_msg
return json.dumps(ret) | [
"def",
"checkrc",
"(",
"rc",
",",
"*",
"args",
")",
":",
"try",
":",
"rc",
"=",
"int",
"(",
"rc",
")",
"except",
"(",
"TypeError",
",",
"ValueError",
")",
":",
"return",
"error",
"(",
"\"Invalid return code: '{}'.\"",
".",
"format",
"(",
"rc",
")",
")",
"acceptable_rcs",
"=",
"[",
"]",
"error_msg",
"=",
"\"\"",
"if",
"len",
"(",
"args",
")",
":",
"for",
"code",
"in",
"args",
"[",
":",
"-",
"1",
"]",
":",
"try",
":",
"acceptable_rcs",
".",
"append",
"(",
"int",
"(",
"code",
")",
")",
"except",
"(",
"TypeError",
",",
"ValueError",
")",
":",
"return",
"error",
"(",
"\"Invalid return code: '{}'.\"",
".",
"format",
"(",
"code",
")",
")",
"try",
":",
"acceptable_rcs",
".",
"append",
"(",
"int",
"(",
"args",
"[",
"-",
"1",
"]",
")",
")",
"except",
"(",
"TypeError",
",",
"ValueError",
")",
":",
"error_msg",
"=",
"args",
"[",
"-",
"1",
"]",
"if",
"rc",
"in",
"acceptable_rcs",
":",
"rc",
"=",
"0",
"ret",
"=",
"{",
"'proc.rc'",
":",
"rc",
"}",
"if",
"rc",
"and",
"error_msg",
":",
"ret",
"[",
"'proc.error'",
"]",
"=",
"error_msg",
"return",
"json",
".",
"dumps",
"(",
"ret",
")"
] | Check if ``rc`` (return code) meets requirements.
Check if ``rc`` is 0 or is in ``args`` list that contains
acceptable return codes.
Last argument of ``args`` can optionally be error message that
is printed if ``rc`` doesn't meet requirements.
Output is JSON of the form:
{"proc.rc": <rc>,
"proc.error": "<error_msg>"},
where "proc.error" entry is omitted if empty. | [
"Check",
"if",
"rc",
"(",
"return",
"code",
")",
"meets",
"requirements",
"."
] | train | https://github.com/genialis/resolwe-runtime-utils/blob/5657d7cf981972a5259b9b475eae220479401001/resolwe_runtime_utils.py#L259-L302 |
genialis/resolwe-runtime-utils | resolwe_runtime_utils.py | export_file | def export_file(file_path):
"""Prepend the given parameter with ``export``"""
if not os.path.isfile(file_path):
return error("Referenced file does not exist: '{}'.".format(file_path))
return "export {}".format(file_path) | python | def export_file(file_path):
"""Prepend the given parameter with ``export``"""
if not os.path.isfile(file_path):
return error("Referenced file does not exist: '{}'.".format(file_path))
return "export {}".format(file_path) | [
"def",
"export_file",
"(",
"file_path",
")",
":",
"if",
"not",
"os",
".",
"path",
".",
"isfile",
"(",
"file_path",
")",
":",
"return",
"error",
"(",
"\"Referenced file does not exist: '{}'.\"",
".",
"format",
"(",
"file_path",
")",
")",
"return",
"\"export {}\"",
".",
"format",
"(",
"file_path",
")"
] | Prepend the given parameter with ``export`` | [
"Prepend",
"the",
"given",
"parameter",
"with",
"export"
] | train | https://github.com/genialis/resolwe-runtime-utils/blob/5657d7cf981972a5259b9b475eae220479401001/resolwe_runtime_utils.py#L305-L311 |
genialis/resolwe-runtime-utils | resolwe_runtime_utils.py | import_file | def import_file(
src,
file_name,
imported_format=ImportedFormat.BOTH,
progress_from=0.0,
progress_to=None,
):
"""Import file to working directory.
:param src: Source file path or URL
:param file_name: Source file name
:param imported_format: Import file format (extracted, compressed or both)
:param progress_from: Initial progress value
:param progress_to: Final progress value
:return: Destination file path (if extracted and compressed, extracted path given)
"""
if progress_to is not None:
if not isinstance(progress_from, float) or not isinstance(progress_to, float):
raise ValueError("Progress_from and progress_to must be float")
if progress_from < 0 or progress_from > 1:
raise ValueError("Progress_from must be between 0 and 1")
if progress_to < 0 or progress_to > 1:
raise ValueError("Progress_to must be between 0 and 1")
if progress_from >= progress_to:
raise ValueError("Progress_to must be higher than progress_from")
print("Importing and compressing {}...".format(file_name))
def importGz():
"""Import gzipped file.
The file_name must have .gz extension.
"""
if imported_format != ImportedFormat.COMPRESSED: # Extracted file required
with open(file_name[:-3], 'wb') as f_out, gzip.open(src, 'rb') as f_in:
try:
shutil.copyfileobj(f_in, f_out, CHUNK_SIZE)
except zlib.error:
raise ValueError("Invalid gzip file format: {}".format(file_name))
else: # Extracted file not-required
# Verify the compressed file.
with gzip.open(src, 'rb') as f:
try:
while f.read(CHUNK_SIZE) != b'':
pass
except zlib.error:
raise ValueError("Invalid gzip file format: {}".format(file_name))
if imported_format != ImportedFormat.EXTRACTED: # Compressed file required
try:
shutil.copyfile(src, file_name)
except shutil.SameFileError:
pass # Skip copy of downloaded files
if imported_format == ImportedFormat.COMPRESSED:
return file_name
else:
return file_name[:-3]
def import7z():
"""Import compressed file in various formats.
Supported extensions: .bz2, .zip, .rar, .7z, .tar.gz, and .tar.bz2.
"""
extracted_name, _ = os.path.splitext(file_name)
destination_name = extracted_name
temp_dir = 'temp_{}'.format(extracted_name)
cmd = '7z x -y -o{} {}'.format(shlex.quote(temp_dir), shlex.quote(src))
try:
subprocess.check_call(cmd, shell=True)
except subprocess.CalledProcessError as err:
if err.returncode == 2:
raise ValueError("Failed to extract file: {}".format(file_name))
else:
raise
paths = os.listdir(temp_dir)
if len(paths) == 1 and os.path.isfile(os.path.join(temp_dir, paths[0])):
# Single file in archive.
temp_file = os.path.join(temp_dir, paths[0])
if imported_format != ImportedFormat.EXTRACTED: # Compressed file required
with open(temp_file, 'rb') as f_in, gzip.open(
extracted_name + '.gz', 'wb'
) as f_out:
shutil.copyfileobj(f_in, f_out, CHUNK_SIZE)
if imported_format != ImportedFormat.COMPRESSED: # Extracted file required
shutil.move(temp_file, './{}'.format(extracted_name))
if extracted_name.endswith('.tar'):
with tarfile.open(extracted_name) as tar:
tar.extractall()
os.remove(extracted_name)
destination_name, _ = os.path.splitext(extracted_name)
else:
destination_name = extracted_name + '.gz'
else:
# Directory or several files in archive.
if imported_format != ImportedFormat.EXTRACTED: # Compressed file required
with tarfile.open(extracted_name + '.tar.gz', 'w:gz') as tar:
for fname in glob.glob(os.path.join(temp_dir, '*')):
tar.add(fname, os.path.basename(fname))
if imported_format != ImportedFormat.COMPRESSED: # Extracted file required
for path in os.listdir(temp_dir):
shutil.move(os.path.join(temp_dir, path), './{}'.format(path))
else:
destination_name = extracted_name + '.tar.gz'
shutil.rmtree(temp_dir)
return destination_name
def importUncompressed():
"""Import uncompressed file."""
if imported_format != ImportedFormat.EXTRACTED: # Compressed file required
with open(src, 'rb') as f_in, gzip.open(file_name + '.gz', 'wb') as f_out:
shutil.copyfileobj(f_in, f_out, CHUNK_SIZE)
if imported_format != ImportedFormat.COMPRESSED: # Extracted file required
try:
shutil.copyfile(src, file_name)
except shutil.SameFileError:
pass # Skip copy of downloaded files
return (
file_name + '.gz'
if imported_format == ImportedFormat.COMPRESSED
else file_name
)
# Large file download from Google Drive requires cookie and token.
try:
response = None
if re.match(
r'^https://drive.google.com/[-A-Za-z0-9\+&@#/%?=~_|!:,.;]*[-A-Za-z0-9\+&@#/%=~_|]$',
src,
):
session = requests.Session()
response = session.get(src, stream=True)
token = None
for key, value in response.cookies.items():
if key.startswith('download_warning'):
token = value
break
if token is not None:
params = {'confirm': token}
response = session.get(src, params=params, stream=True)
elif re.match(
r'^(https?|ftp)://[-A-Za-z0-9\+&@#/%?=~_|!:,.;]*[-A-Za-z0-9\+&@#/%=~_|]$',
src,
):
response = requests.get(src, stream=True)
except requests.exceptions.ConnectionError:
raise requests.exceptions.ConnectionError("Could not connect to {}".format(src))
if response:
with open(file_name, 'wb') as f:
total = response.headers.get('content-length')
total = float(total) if total else None
downloaded = 0
current_progress = 0
for content in response.iter_content(chunk_size=CHUNK_SIZE):
f.write(content)
if total is not None and progress_to is not None:
downloaded += len(content)
progress_span = progress_to - progress_from
next_progress = progress_from + progress_span * downloaded / total
next_progress = round(next_progress, 2)
if next_progress > current_progress:
print(progress(next_progress))
current_progress = next_progress
# Check if a temporary file exists.
if not os.path.isfile(file_name):
raise ValueError("Downloaded file not found {}".format(file_name))
src = file_name
else:
if not os.path.isfile(src):
raise ValueError("Source file not found {}".format(src))
# Decide which import should be used.
if re.search(r'\.(bz2|zip|rar|7z|tgz|tar\.gz|tar\.bz2)$', file_name):
destination_file_name = import7z()
elif file_name.endswith('.gz'):
destination_file_name = importGz()
else:
destination_file_name = importUncompressed()
if progress_to is not None:
print(progress(progress_to))
return destination_file_name | python | def import_file(
src,
file_name,
imported_format=ImportedFormat.BOTH,
progress_from=0.0,
progress_to=None,
):
"""Import file to working directory.
:param src: Source file path or URL
:param file_name: Source file name
:param imported_format: Import file format (extracted, compressed or both)
:param progress_from: Initial progress value
:param progress_to: Final progress value
:return: Destination file path (if extracted and compressed, extracted path given)
"""
if progress_to is not None:
if not isinstance(progress_from, float) or not isinstance(progress_to, float):
raise ValueError("Progress_from and progress_to must be float")
if progress_from < 0 or progress_from > 1:
raise ValueError("Progress_from must be between 0 and 1")
if progress_to < 0 or progress_to > 1:
raise ValueError("Progress_to must be between 0 and 1")
if progress_from >= progress_to:
raise ValueError("Progress_to must be higher than progress_from")
print("Importing and compressing {}...".format(file_name))
def importGz():
"""Import gzipped file.
The file_name must have .gz extension.
"""
if imported_format != ImportedFormat.COMPRESSED: # Extracted file required
with open(file_name[:-3], 'wb') as f_out, gzip.open(src, 'rb') as f_in:
try:
shutil.copyfileobj(f_in, f_out, CHUNK_SIZE)
except zlib.error:
raise ValueError("Invalid gzip file format: {}".format(file_name))
else: # Extracted file not-required
# Verify the compressed file.
with gzip.open(src, 'rb') as f:
try:
while f.read(CHUNK_SIZE) != b'':
pass
except zlib.error:
raise ValueError("Invalid gzip file format: {}".format(file_name))
if imported_format != ImportedFormat.EXTRACTED: # Compressed file required
try:
shutil.copyfile(src, file_name)
except shutil.SameFileError:
pass # Skip copy of downloaded files
if imported_format == ImportedFormat.COMPRESSED:
return file_name
else:
return file_name[:-3]
def import7z():
"""Import compressed file in various formats.
Supported extensions: .bz2, .zip, .rar, .7z, .tar.gz, and .tar.bz2.
"""
extracted_name, _ = os.path.splitext(file_name)
destination_name = extracted_name
temp_dir = 'temp_{}'.format(extracted_name)
cmd = '7z x -y -o{} {}'.format(shlex.quote(temp_dir), shlex.quote(src))
try:
subprocess.check_call(cmd, shell=True)
except subprocess.CalledProcessError as err:
if err.returncode == 2:
raise ValueError("Failed to extract file: {}".format(file_name))
else:
raise
paths = os.listdir(temp_dir)
if len(paths) == 1 and os.path.isfile(os.path.join(temp_dir, paths[0])):
# Single file in archive.
temp_file = os.path.join(temp_dir, paths[0])
if imported_format != ImportedFormat.EXTRACTED: # Compressed file required
with open(temp_file, 'rb') as f_in, gzip.open(
extracted_name + '.gz', 'wb'
) as f_out:
shutil.copyfileobj(f_in, f_out, CHUNK_SIZE)
if imported_format != ImportedFormat.COMPRESSED: # Extracted file required
shutil.move(temp_file, './{}'.format(extracted_name))
if extracted_name.endswith('.tar'):
with tarfile.open(extracted_name) as tar:
tar.extractall()
os.remove(extracted_name)
destination_name, _ = os.path.splitext(extracted_name)
else:
destination_name = extracted_name + '.gz'
else:
# Directory or several files in archive.
if imported_format != ImportedFormat.EXTRACTED: # Compressed file required
with tarfile.open(extracted_name + '.tar.gz', 'w:gz') as tar:
for fname in glob.glob(os.path.join(temp_dir, '*')):
tar.add(fname, os.path.basename(fname))
if imported_format != ImportedFormat.COMPRESSED: # Extracted file required
for path in os.listdir(temp_dir):
shutil.move(os.path.join(temp_dir, path), './{}'.format(path))
else:
destination_name = extracted_name + '.tar.gz'
shutil.rmtree(temp_dir)
return destination_name
def importUncompressed():
"""Import uncompressed file."""
if imported_format != ImportedFormat.EXTRACTED: # Compressed file required
with open(src, 'rb') as f_in, gzip.open(file_name + '.gz', 'wb') as f_out:
shutil.copyfileobj(f_in, f_out, CHUNK_SIZE)
if imported_format != ImportedFormat.COMPRESSED: # Extracted file required
try:
shutil.copyfile(src, file_name)
except shutil.SameFileError:
pass # Skip copy of downloaded files
return (
file_name + '.gz'
if imported_format == ImportedFormat.COMPRESSED
else file_name
)
# Large file download from Google Drive requires cookie and token.
try:
response = None
if re.match(
r'^https://drive.google.com/[-A-Za-z0-9\+&@#/%?=~_|!:,.;]*[-A-Za-z0-9\+&@#/%=~_|]$',
src,
):
session = requests.Session()
response = session.get(src, stream=True)
token = None
for key, value in response.cookies.items():
if key.startswith('download_warning'):
token = value
break
if token is not None:
params = {'confirm': token}
response = session.get(src, params=params, stream=True)
elif re.match(
r'^(https?|ftp)://[-A-Za-z0-9\+&@#/%?=~_|!:,.;]*[-A-Za-z0-9\+&@#/%=~_|]$',
src,
):
response = requests.get(src, stream=True)
except requests.exceptions.ConnectionError:
raise requests.exceptions.ConnectionError("Could not connect to {}".format(src))
if response:
with open(file_name, 'wb') as f:
total = response.headers.get('content-length')
total = float(total) if total else None
downloaded = 0
current_progress = 0
for content in response.iter_content(chunk_size=CHUNK_SIZE):
f.write(content)
if total is not None and progress_to is not None:
downloaded += len(content)
progress_span = progress_to - progress_from
next_progress = progress_from + progress_span * downloaded / total
next_progress = round(next_progress, 2)
if next_progress > current_progress:
print(progress(next_progress))
current_progress = next_progress
# Check if a temporary file exists.
if not os.path.isfile(file_name):
raise ValueError("Downloaded file not found {}".format(file_name))
src = file_name
else:
if not os.path.isfile(src):
raise ValueError("Source file not found {}".format(src))
# Decide which import should be used.
if re.search(r'\.(bz2|zip|rar|7z|tgz|tar\.gz|tar\.bz2)$', file_name):
destination_file_name = import7z()
elif file_name.endswith('.gz'):
destination_file_name = importGz()
else:
destination_file_name = importUncompressed()
if progress_to is not None:
print(progress(progress_to))
return destination_file_name | [
"def",
"import_file",
"(",
"src",
",",
"file_name",
",",
"imported_format",
"=",
"ImportedFormat",
".",
"BOTH",
",",
"progress_from",
"=",
"0.0",
",",
"progress_to",
"=",
"None",
",",
")",
":",
"if",
"progress_to",
"is",
"not",
"None",
":",
"if",
"not",
"isinstance",
"(",
"progress_from",
",",
"float",
")",
"or",
"not",
"isinstance",
"(",
"progress_to",
",",
"float",
")",
":",
"raise",
"ValueError",
"(",
"\"Progress_from and progress_to must be float\"",
")",
"if",
"progress_from",
"<",
"0",
"or",
"progress_from",
">",
"1",
":",
"raise",
"ValueError",
"(",
"\"Progress_from must be between 0 and 1\"",
")",
"if",
"progress_to",
"<",
"0",
"or",
"progress_to",
">",
"1",
":",
"raise",
"ValueError",
"(",
"\"Progress_to must be between 0 and 1\"",
")",
"if",
"progress_from",
">=",
"progress_to",
":",
"raise",
"ValueError",
"(",
"\"Progress_to must be higher than progress_from\"",
")",
"print",
"(",
"\"Importing and compressing {}...\"",
".",
"format",
"(",
"file_name",
")",
")",
"def",
"importGz",
"(",
")",
":",
"\"\"\"Import gzipped file.\n\n The file_name must have .gz extension.\n \"\"\"",
"if",
"imported_format",
"!=",
"ImportedFormat",
".",
"COMPRESSED",
":",
"# Extracted file required",
"with",
"open",
"(",
"file_name",
"[",
":",
"-",
"3",
"]",
",",
"'wb'",
")",
"as",
"f_out",
",",
"gzip",
".",
"open",
"(",
"src",
",",
"'rb'",
")",
"as",
"f_in",
":",
"try",
":",
"shutil",
".",
"copyfileobj",
"(",
"f_in",
",",
"f_out",
",",
"CHUNK_SIZE",
")",
"except",
"zlib",
".",
"error",
":",
"raise",
"ValueError",
"(",
"\"Invalid gzip file format: {}\"",
".",
"format",
"(",
"file_name",
")",
")",
"else",
":",
"# Extracted file not-required",
"# Verify the compressed file.",
"with",
"gzip",
".",
"open",
"(",
"src",
",",
"'rb'",
")",
"as",
"f",
":",
"try",
":",
"while",
"f",
".",
"read",
"(",
"CHUNK_SIZE",
")",
"!=",
"b''",
":",
"pass",
"except",
"zlib",
".",
"error",
":",
"raise",
"ValueError",
"(",
"\"Invalid gzip file format: {}\"",
".",
"format",
"(",
"file_name",
")",
")",
"if",
"imported_format",
"!=",
"ImportedFormat",
".",
"EXTRACTED",
":",
"# Compressed file required",
"try",
":",
"shutil",
".",
"copyfile",
"(",
"src",
",",
"file_name",
")",
"except",
"shutil",
".",
"SameFileError",
":",
"pass",
"# Skip copy of downloaded files",
"if",
"imported_format",
"==",
"ImportedFormat",
".",
"COMPRESSED",
":",
"return",
"file_name",
"else",
":",
"return",
"file_name",
"[",
":",
"-",
"3",
"]",
"def",
"import7z",
"(",
")",
":",
"\"\"\"Import compressed file in various formats.\n\n Supported extensions: .bz2, .zip, .rar, .7z, .tar.gz, and .tar.bz2.\n \"\"\"",
"extracted_name",
",",
"_",
"=",
"os",
".",
"path",
".",
"splitext",
"(",
"file_name",
")",
"destination_name",
"=",
"extracted_name",
"temp_dir",
"=",
"'temp_{}'",
".",
"format",
"(",
"extracted_name",
")",
"cmd",
"=",
"'7z x -y -o{} {}'",
".",
"format",
"(",
"shlex",
".",
"quote",
"(",
"temp_dir",
")",
",",
"shlex",
".",
"quote",
"(",
"src",
")",
")",
"try",
":",
"subprocess",
".",
"check_call",
"(",
"cmd",
",",
"shell",
"=",
"True",
")",
"except",
"subprocess",
".",
"CalledProcessError",
"as",
"err",
":",
"if",
"err",
".",
"returncode",
"==",
"2",
":",
"raise",
"ValueError",
"(",
"\"Failed to extract file: {}\"",
".",
"format",
"(",
"file_name",
")",
")",
"else",
":",
"raise",
"paths",
"=",
"os",
".",
"listdir",
"(",
"temp_dir",
")",
"if",
"len",
"(",
"paths",
")",
"==",
"1",
"and",
"os",
".",
"path",
".",
"isfile",
"(",
"os",
".",
"path",
".",
"join",
"(",
"temp_dir",
",",
"paths",
"[",
"0",
"]",
")",
")",
":",
"# Single file in archive.",
"temp_file",
"=",
"os",
".",
"path",
".",
"join",
"(",
"temp_dir",
",",
"paths",
"[",
"0",
"]",
")",
"if",
"imported_format",
"!=",
"ImportedFormat",
".",
"EXTRACTED",
":",
"# Compressed file required",
"with",
"open",
"(",
"temp_file",
",",
"'rb'",
")",
"as",
"f_in",
",",
"gzip",
".",
"open",
"(",
"extracted_name",
"+",
"'.gz'",
",",
"'wb'",
")",
"as",
"f_out",
":",
"shutil",
".",
"copyfileobj",
"(",
"f_in",
",",
"f_out",
",",
"CHUNK_SIZE",
")",
"if",
"imported_format",
"!=",
"ImportedFormat",
".",
"COMPRESSED",
":",
"# Extracted file required",
"shutil",
".",
"move",
"(",
"temp_file",
",",
"'./{}'",
".",
"format",
"(",
"extracted_name",
")",
")",
"if",
"extracted_name",
".",
"endswith",
"(",
"'.tar'",
")",
":",
"with",
"tarfile",
".",
"open",
"(",
"extracted_name",
")",
"as",
"tar",
":",
"tar",
".",
"extractall",
"(",
")",
"os",
".",
"remove",
"(",
"extracted_name",
")",
"destination_name",
",",
"_",
"=",
"os",
".",
"path",
".",
"splitext",
"(",
"extracted_name",
")",
"else",
":",
"destination_name",
"=",
"extracted_name",
"+",
"'.gz'",
"else",
":",
"# Directory or several files in archive.",
"if",
"imported_format",
"!=",
"ImportedFormat",
".",
"EXTRACTED",
":",
"# Compressed file required",
"with",
"tarfile",
".",
"open",
"(",
"extracted_name",
"+",
"'.tar.gz'",
",",
"'w:gz'",
")",
"as",
"tar",
":",
"for",
"fname",
"in",
"glob",
".",
"glob",
"(",
"os",
".",
"path",
".",
"join",
"(",
"temp_dir",
",",
"'*'",
")",
")",
":",
"tar",
".",
"add",
"(",
"fname",
",",
"os",
".",
"path",
".",
"basename",
"(",
"fname",
")",
")",
"if",
"imported_format",
"!=",
"ImportedFormat",
".",
"COMPRESSED",
":",
"# Extracted file required",
"for",
"path",
"in",
"os",
".",
"listdir",
"(",
"temp_dir",
")",
":",
"shutil",
".",
"move",
"(",
"os",
".",
"path",
".",
"join",
"(",
"temp_dir",
",",
"path",
")",
",",
"'./{}'",
".",
"format",
"(",
"path",
")",
")",
"else",
":",
"destination_name",
"=",
"extracted_name",
"+",
"'.tar.gz'",
"shutil",
".",
"rmtree",
"(",
"temp_dir",
")",
"return",
"destination_name",
"def",
"importUncompressed",
"(",
")",
":",
"\"\"\"Import uncompressed file.\"\"\"",
"if",
"imported_format",
"!=",
"ImportedFormat",
".",
"EXTRACTED",
":",
"# Compressed file required",
"with",
"open",
"(",
"src",
",",
"'rb'",
")",
"as",
"f_in",
",",
"gzip",
".",
"open",
"(",
"file_name",
"+",
"'.gz'",
",",
"'wb'",
")",
"as",
"f_out",
":",
"shutil",
".",
"copyfileobj",
"(",
"f_in",
",",
"f_out",
",",
"CHUNK_SIZE",
")",
"if",
"imported_format",
"!=",
"ImportedFormat",
".",
"COMPRESSED",
":",
"# Extracted file required",
"try",
":",
"shutil",
".",
"copyfile",
"(",
"src",
",",
"file_name",
")",
"except",
"shutil",
".",
"SameFileError",
":",
"pass",
"# Skip copy of downloaded files",
"return",
"(",
"file_name",
"+",
"'.gz'",
"if",
"imported_format",
"==",
"ImportedFormat",
".",
"COMPRESSED",
"else",
"file_name",
")",
"# Large file download from Google Drive requires cookie and token.",
"try",
":",
"response",
"=",
"None",
"if",
"re",
".",
"match",
"(",
"r'^https://drive.google.com/[-A-Za-z0-9\\+&@#/%?=~_|!:,.;]*[-A-Za-z0-9\\+&@#/%=~_|]$'",
",",
"src",
",",
")",
":",
"session",
"=",
"requests",
".",
"Session",
"(",
")",
"response",
"=",
"session",
".",
"get",
"(",
"src",
",",
"stream",
"=",
"True",
")",
"token",
"=",
"None",
"for",
"key",
",",
"value",
"in",
"response",
".",
"cookies",
".",
"items",
"(",
")",
":",
"if",
"key",
".",
"startswith",
"(",
"'download_warning'",
")",
":",
"token",
"=",
"value",
"break",
"if",
"token",
"is",
"not",
"None",
":",
"params",
"=",
"{",
"'confirm'",
":",
"token",
"}",
"response",
"=",
"session",
".",
"get",
"(",
"src",
",",
"params",
"=",
"params",
",",
"stream",
"=",
"True",
")",
"elif",
"re",
".",
"match",
"(",
"r'^(https?|ftp)://[-A-Za-z0-9\\+&@#/%?=~_|!:,.;]*[-A-Za-z0-9\\+&@#/%=~_|]$'",
",",
"src",
",",
")",
":",
"response",
"=",
"requests",
".",
"get",
"(",
"src",
",",
"stream",
"=",
"True",
")",
"except",
"requests",
".",
"exceptions",
".",
"ConnectionError",
":",
"raise",
"requests",
".",
"exceptions",
".",
"ConnectionError",
"(",
"\"Could not connect to {}\"",
".",
"format",
"(",
"src",
")",
")",
"if",
"response",
":",
"with",
"open",
"(",
"file_name",
",",
"'wb'",
")",
"as",
"f",
":",
"total",
"=",
"response",
".",
"headers",
".",
"get",
"(",
"'content-length'",
")",
"total",
"=",
"float",
"(",
"total",
")",
"if",
"total",
"else",
"None",
"downloaded",
"=",
"0",
"current_progress",
"=",
"0",
"for",
"content",
"in",
"response",
".",
"iter_content",
"(",
"chunk_size",
"=",
"CHUNK_SIZE",
")",
":",
"f",
".",
"write",
"(",
"content",
")",
"if",
"total",
"is",
"not",
"None",
"and",
"progress_to",
"is",
"not",
"None",
":",
"downloaded",
"+=",
"len",
"(",
"content",
")",
"progress_span",
"=",
"progress_to",
"-",
"progress_from",
"next_progress",
"=",
"progress_from",
"+",
"progress_span",
"*",
"downloaded",
"/",
"total",
"next_progress",
"=",
"round",
"(",
"next_progress",
",",
"2",
")",
"if",
"next_progress",
">",
"current_progress",
":",
"print",
"(",
"progress",
"(",
"next_progress",
")",
")",
"current_progress",
"=",
"next_progress",
"# Check if a temporary file exists.",
"if",
"not",
"os",
".",
"path",
".",
"isfile",
"(",
"file_name",
")",
":",
"raise",
"ValueError",
"(",
"\"Downloaded file not found {}\"",
".",
"format",
"(",
"file_name",
")",
")",
"src",
"=",
"file_name",
"else",
":",
"if",
"not",
"os",
".",
"path",
".",
"isfile",
"(",
"src",
")",
":",
"raise",
"ValueError",
"(",
"\"Source file not found {}\"",
".",
"format",
"(",
"src",
")",
")",
"# Decide which import should be used.",
"if",
"re",
".",
"search",
"(",
"r'\\.(bz2|zip|rar|7z|tgz|tar\\.gz|tar\\.bz2)$'",
",",
"file_name",
")",
":",
"destination_file_name",
"=",
"import7z",
"(",
")",
"elif",
"file_name",
".",
"endswith",
"(",
"'.gz'",
")",
":",
"destination_file_name",
"=",
"importGz",
"(",
")",
"else",
":",
"destination_file_name",
"=",
"importUncompressed",
"(",
")",
"if",
"progress_to",
"is",
"not",
"None",
":",
"print",
"(",
"progress",
"(",
"progress_to",
")",
")",
"return",
"destination_file_name"
] | Import file to working directory.
:param src: Source file path or URL
:param file_name: Source file name
:param imported_format: Import file format (extracted, compressed or both)
:param progress_from: Initial progress value
:param progress_to: Final progress value
:return: Destination file path (if extracted and compressed, extracted path given) | [
"Import",
"file",
"to",
"working",
"directory",
"."
] | train | https://github.com/genialis/resolwe-runtime-utils/blob/5657d7cf981972a5259b9b475eae220479401001/resolwe_runtime_utils.py#L325-L530 |
MSchnei/pyprf_feature | pyprf_feature/analysis/old/pRF_utils.py | loadNiiData | def loadNiiData(lstNiiFls,
strPathNiiMask=None,
strPathNiiFunc=None):
"""load nii data.
Parameters
----------
lstNiiFls : list, list of str with nii file names
strPathNiiMask : str, path to nii file with mask (optional)
strPathNiiFunc : str, parent path to nii files (optional)
Returns
-------
aryFunc : np.array
Nii data
"""
print('---------Loading nii data')
# check whether a mask is available
if strPathNiiMask is not None:
aryMask = nb.load(strPathNiiMask).get_data().astype('bool')
# check a parent path is available that needs to be preprended to nii files
if strPathNiiFunc is not None:
lstNiiFls = [os.path.join(strPathNiiFunc, i) for i in lstNiiFls]
aryFunc = []
for idx, path in enumerate(lstNiiFls):
print('------------Loading run: ' + str(idx+1))
# Load 4D nii data:
niiFunc = nb.load(path).get_data()
# append to list
if strPathNiiMask is not None:
aryFunc.append(niiFunc[aryMask, :])
else:
aryFunc.append(niiFunc)
# concatenate arrys in list along time dimension
aryFunc = np.concatenate(aryFunc, axis=-1)
# set to type float32
aryFunc = aryFunc.astype('float32')
return aryFunc | python | def loadNiiData(lstNiiFls,
strPathNiiMask=None,
strPathNiiFunc=None):
"""load nii data.
Parameters
----------
lstNiiFls : list, list of str with nii file names
strPathNiiMask : str, path to nii file with mask (optional)
strPathNiiFunc : str, parent path to nii files (optional)
Returns
-------
aryFunc : np.array
Nii data
"""
print('---------Loading nii data')
# check whether a mask is available
if strPathNiiMask is not None:
aryMask = nb.load(strPathNiiMask).get_data().astype('bool')
# check a parent path is available that needs to be preprended to nii files
if strPathNiiFunc is not None:
lstNiiFls = [os.path.join(strPathNiiFunc, i) for i in lstNiiFls]
aryFunc = []
for idx, path in enumerate(lstNiiFls):
print('------------Loading run: ' + str(idx+1))
# Load 4D nii data:
niiFunc = nb.load(path).get_data()
# append to list
if strPathNiiMask is not None:
aryFunc.append(niiFunc[aryMask, :])
else:
aryFunc.append(niiFunc)
# concatenate arrys in list along time dimension
aryFunc = np.concatenate(aryFunc, axis=-1)
# set to type float32
aryFunc = aryFunc.astype('float32')
return aryFunc | [
"def",
"loadNiiData",
"(",
"lstNiiFls",
",",
"strPathNiiMask",
"=",
"None",
",",
"strPathNiiFunc",
"=",
"None",
")",
":",
"print",
"(",
"'---------Loading nii data'",
")",
"# check whether a mask is available",
"if",
"strPathNiiMask",
"is",
"not",
"None",
":",
"aryMask",
"=",
"nb",
".",
"load",
"(",
"strPathNiiMask",
")",
".",
"get_data",
"(",
")",
".",
"astype",
"(",
"'bool'",
")",
"# check a parent path is available that needs to be preprended to nii files",
"if",
"strPathNiiFunc",
"is",
"not",
"None",
":",
"lstNiiFls",
"=",
"[",
"os",
".",
"path",
".",
"join",
"(",
"strPathNiiFunc",
",",
"i",
")",
"for",
"i",
"in",
"lstNiiFls",
"]",
"aryFunc",
"=",
"[",
"]",
"for",
"idx",
",",
"path",
"in",
"enumerate",
"(",
"lstNiiFls",
")",
":",
"print",
"(",
"'------------Loading run: '",
"+",
"str",
"(",
"idx",
"+",
"1",
")",
")",
"# Load 4D nii data:",
"niiFunc",
"=",
"nb",
".",
"load",
"(",
"path",
")",
".",
"get_data",
"(",
")",
"# append to list",
"if",
"strPathNiiMask",
"is",
"not",
"None",
":",
"aryFunc",
".",
"append",
"(",
"niiFunc",
"[",
"aryMask",
",",
":",
"]",
")",
"else",
":",
"aryFunc",
".",
"append",
"(",
"niiFunc",
")",
"# concatenate arrys in list along time dimension",
"aryFunc",
"=",
"np",
".",
"concatenate",
"(",
"aryFunc",
",",
"axis",
"=",
"-",
"1",
")",
"# set to type float32",
"aryFunc",
"=",
"aryFunc",
".",
"astype",
"(",
"'float32'",
")",
"return",
"aryFunc"
] | load nii data.
Parameters
----------
lstNiiFls : list, list of str with nii file names
strPathNiiMask : str, path to nii file with mask (optional)
strPathNiiFunc : str, parent path to nii files (optional)
Returns
-------
aryFunc : np.array
Nii data | [
"load",
"nii",
"data",
"."
] | train | https://github.com/MSchnei/pyprf_feature/blob/49004ede7ae1ddee07a30afe9ce3e2776750805c/pyprf_feature/analysis/old/pRF_utils.py#L29-L67 |
MSchnei/pyprf_feature | pyprf_feature/analysis/old/pRF_utils.py | calcR2 | def calcR2(predTst, yTest, axis=0):
"""calculate coefficient of determination. Assumes that axis=0 is time
Parameters
----------
predTst : np.array, predicted reponse for yTest
yTest : np.array, acxtually observed response for yTest
Returns
-------
aryFunc : np.array
R2
"""
rss = np.sum((yTest - predTst) ** 2, axis=axis)
tss = np.sum((yTest - yTest.mean()) ** 2, axis=axis)
return 1 - rss/tss | python | def calcR2(predTst, yTest, axis=0):
"""calculate coefficient of determination. Assumes that axis=0 is time
Parameters
----------
predTst : np.array, predicted reponse for yTest
yTest : np.array, acxtually observed response for yTest
Returns
-------
aryFunc : np.array
R2
"""
rss = np.sum((yTest - predTst) ** 2, axis=axis)
tss = np.sum((yTest - yTest.mean()) ** 2, axis=axis)
return 1 - rss/tss | [
"def",
"calcR2",
"(",
"predTst",
",",
"yTest",
",",
"axis",
"=",
"0",
")",
":",
"rss",
"=",
"np",
".",
"sum",
"(",
"(",
"yTest",
"-",
"predTst",
")",
"**",
"2",
",",
"axis",
"=",
"axis",
")",
"tss",
"=",
"np",
".",
"sum",
"(",
"(",
"yTest",
"-",
"yTest",
".",
"mean",
"(",
")",
")",
"**",
"2",
",",
"axis",
"=",
"axis",
")",
"return",
"1",
"-",
"rss",
"/",
"tss"
] | calculate coefficient of determination. Assumes that axis=0 is time
Parameters
----------
predTst : np.array, predicted reponse for yTest
yTest : np.array, acxtually observed response for yTest
Returns
-------
aryFunc : np.array
R2 | [
"calculate",
"coefficient",
"of",
"determination",
".",
"Assumes",
"that",
"axis",
"=",
"0",
"is",
"time"
] | train | https://github.com/MSchnei/pyprf_feature/blob/49004ede7ae1ddee07a30afe9ce3e2776750805c/pyprf_feature/analysis/old/pRF_utils.py#L90-L105 |
MSchnei/pyprf_feature | pyprf_feature/analysis/old/pRF_utils.py | calcFstats | def calcFstats(predTst, yTest, p, axis=0):
"""calculate coefficient of determination. Assumes that axis=0 is time
Parameters
----------
predTst : np.array, predicted reponse for yTest
yTest : np.array, acxtually observed response for yTest
p: float, number of predictors
Returns
-------
aryFunc : np.array
R2
"""
rss = np.sum((yTest - predTst) ** 2, axis=axis)
tss = np.sum((yTest - yTest.mean()) ** 2, axis=axis)
# derive number of measurements
n = yTest.shape[0]
# calculate Fvalues
vecFvals = ((tss - rss)/p)/(rss/(n-p-1))
# calculate corresponding po values
df1 = p - 1
df2 = n-1
vecPvals = stats.f.cdf(vecFvals, df1, df2)
return vecFvals, vecPvals | python | def calcFstats(predTst, yTest, p, axis=0):
"""calculate coefficient of determination. Assumes that axis=0 is time
Parameters
----------
predTst : np.array, predicted reponse for yTest
yTest : np.array, acxtually observed response for yTest
p: float, number of predictors
Returns
-------
aryFunc : np.array
R2
"""
rss = np.sum((yTest - predTst) ** 2, axis=axis)
tss = np.sum((yTest - yTest.mean()) ** 2, axis=axis)
# derive number of measurements
n = yTest.shape[0]
# calculate Fvalues
vecFvals = ((tss - rss)/p)/(rss/(n-p-1))
# calculate corresponding po values
df1 = p - 1
df2 = n-1
vecPvals = stats.f.cdf(vecFvals, df1, df2)
return vecFvals, vecPvals | [
"def",
"calcFstats",
"(",
"predTst",
",",
"yTest",
",",
"p",
",",
"axis",
"=",
"0",
")",
":",
"rss",
"=",
"np",
".",
"sum",
"(",
"(",
"yTest",
"-",
"predTst",
")",
"**",
"2",
",",
"axis",
"=",
"axis",
")",
"tss",
"=",
"np",
".",
"sum",
"(",
"(",
"yTest",
"-",
"yTest",
".",
"mean",
"(",
")",
")",
"**",
"2",
",",
"axis",
"=",
"axis",
")",
"# derive number of measurements",
"n",
"=",
"yTest",
".",
"shape",
"[",
"0",
"]",
"# calculate Fvalues",
"vecFvals",
"=",
"(",
"(",
"tss",
"-",
"rss",
")",
"/",
"p",
")",
"/",
"(",
"rss",
"/",
"(",
"n",
"-",
"p",
"-",
"1",
")",
")",
"# calculate corresponding po values",
"df1",
"=",
"p",
"-",
"1",
"df2",
"=",
"n",
"-",
"1",
"vecPvals",
"=",
"stats",
".",
"f",
".",
"cdf",
"(",
"vecFvals",
",",
"df1",
",",
"df2",
")",
"return",
"vecFvals",
",",
"vecPvals"
] | calculate coefficient of determination. Assumes that axis=0 is time
Parameters
----------
predTst : np.array, predicted reponse for yTest
yTest : np.array, acxtually observed response for yTest
p: float, number of predictors
Returns
-------
aryFunc : np.array
R2 | [
"calculate",
"coefficient",
"of",
"determination",
".",
"Assumes",
"that",
"axis",
"=",
"0",
"is",
"time"
] | train | https://github.com/MSchnei/pyprf_feature/blob/49004ede7ae1ddee07a30afe9ce3e2776750805c/pyprf_feature/analysis/old/pRF_utils.py#L108-L132 |
MSchnei/pyprf_feature | pyprf_feature/analysis/old/pRF_utils.py | calcMse | def calcMse(predTst, yTest, axis=0):
"""calculate mean squared error. Assumes that axis=0 is time
Parameters
----------
predTst : np.array, predicted reponse for yTest
yTest : np.array, acxtually observed response for yTest
Returns
-------
aryFunc : np.array
MSE
"""
return np.mean((yTest - predTst) ** 2, axis=axis) | python | def calcMse(predTst, yTest, axis=0):
"""calculate mean squared error. Assumes that axis=0 is time
Parameters
----------
predTst : np.array, predicted reponse for yTest
yTest : np.array, acxtually observed response for yTest
Returns
-------
aryFunc : np.array
MSE
"""
return np.mean((yTest - predTst) ** 2, axis=axis) | [
"def",
"calcMse",
"(",
"predTst",
",",
"yTest",
",",
"axis",
"=",
"0",
")",
":",
"return",
"np",
".",
"mean",
"(",
"(",
"yTest",
"-",
"predTst",
")",
"**",
"2",
",",
"axis",
"=",
"axis",
")"
] | calculate mean squared error. Assumes that axis=0 is time
Parameters
----------
predTst : np.array, predicted reponse for yTest
yTest : np.array, acxtually observed response for yTest
Returns
-------
aryFunc : np.array
MSE | [
"calculate",
"mean",
"squared",
"error",
".",
"Assumes",
"that",
"axis",
"=",
"0",
"is",
"time"
] | train | https://github.com/MSchnei/pyprf_feature/blob/49004ede7ae1ddee07a30afe9ce3e2776750805c/pyprf_feature/analysis/old/pRF_utils.py#L135-L147 |
askedrelic/journal | journal/parse.py | Parse.n_day | def n_day(date_string):
"""
date_string string in format "(number|a) day(s) ago"
"""
today = datetime.date.today()
match = re.match(r'(\d{1,3}|a) days? ago', date_string)
groups = match.groups()
if groups:
decrement = groups[0]
if decrement == 'a':
decrement = 1
return today - datetime.timedelta(days=int(decrement))
return None | python | def n_day(date_string):
"""
date_string string in format "(number|a) day(s) ago"
"""
today = datetime.date.today()
match = re.match(r'(\d{1,3}|a) days? ago', date_string)
groups = match.groups()
if groups:
decrement = groups[0]
if decrement == 'a':
decrement = 1
return today - datetime.timedelta(days=int(decrement))
return None | [
"def",
"n_day",
"(",
"date_string",
")",
":",
"today",
"=",
"datetime",
".",
"date",
".",
"today",
"(",
")",
"match",
"=",
"re",
".",
"match",
"(",
"r'(\\d{1,3}|a) days? ago'",
",",
"date_string",
")",
"groups",
"=",
"match",
".",
"groups",
"(",
")",
"if",
"groups",
":",
"decrement",
"=",
"groups",
"[",
"0",
"]",
"if",
"decrement",
"==",
"'a'",
":",
"decrement",
"=",
"1",
"return",
"today",
"-",
"datetime",
".",
"timedelta",
"(",
"days",
"=",
"int",
"(",
"decrement",
")",
")",
"return",
"None"
] | date_string string in format "(number|a) day(s) ago" | [
"date_string",
"string",
"in",
"format",
"(",
"number|a",
")",
"day",
"(",
"s",
")",
"ago"
] | train | https://github.com/askedrelic/journal/blob/848b8ec67ed124ec112926211ebeccbc8d11f2b0/journal/parse.py#L27-L39 |
mediawiki-utilities/python-mwreverts | mwreverts/functions.py | detect | def detect(checksum_revisions, radius=defaults.RADIUS):
"""
Detects reverts that occur in a sequence of revisions. Note that,
`revision` data meta will simply be returned in the case of a revert.
This function serves as a convenience wrapper around calls to
:class:`mwreverts.Detector`'s :func:`~mwreverts.Detector.process`
method.
:Parameters:
checksum_revisions : `iterable` ( (checksum, revision) )
an iterable over tuples of checksum and revision meta data
radius : int
a positive integer indicating the maximum revision distance that a
revert can span.
:Return:
a iterator over :class:`mwreverts.Revert`
:Example:
>>> import mwreverts
>>>
>>> checksum_revisions = [
... ("aaa", {'rev_id': 1}),
... ("bbb", {'rev_id': 2}),
... ("aaa", {'rev_id': 3}),
... ("ccc", {'rev_id': 4})
... ]
>>>
>>> list(mwreverts.detect(checksum_revisions))
[Revert(reverting={'rev_id': 3},
reverteds=[{'rev_id': 2}],
reverted_to={'rev_id': 1})]
"""
revert_detector = Detector(radius)
for checksum, revision in checksum_revisions:
revert = revert_detector.process(checksum, revision)
if revert is not None:
yield revert | python | def detect(checksum_revisions, radius=defaults.RADIUS):
"""
Detects reverts that occur in a sequence of revisions. Note that,
`revision` data meta will simply be returned in the case of a revert.
This function serves as a convenience wrapper around calls to
:class:`mwreverts.Detector`'s :func:`~mwreverts.Detector.process`
method.
:Parameters:
checksum_revisions : `iterable` ( (checksum, revision) )
an iterable over tuples of checksum and revision meta data
radius : int
a positive integer indicating the maximum revision distance that a
revert can span.
:Return:
a iterator over :class:`mwreverts.Revert`
:Example:
>>> import mwreverts
>>>
>>> checksum_revisions = [
... ("aaa", {'rev_id': 1}),
... ("bbb", {'rev_id': 2}),
... ("aaa", {'rev_id': 3}),
... ("ccc", {'rev_id': 4})
... ]
>>>
>>> list(mwreverts.detect(checksum_revisions))
[Revert(reverting={'rev_id': 3},
reverteds=[{'rev_id': 2}],
reverted_to={'rev_id': 1})]
"""
revert_detector = Detector(radius)
for checksum, revision in checksum_revisions:
revert = revert_detector.process(checksum, revision)
if revert is not None:
yield revert | [
"def",
"detect",
"(",
"checksum_revisions",
",",
"radius",
"=",
"defaults",
".",
"RADIUS",
")",
":",
"revert_detector",
"=",
"Detector",
"(",
"radius",
")",
"for",
"checksum",
",",
"revision",
"in",
"checksum_revisions",
":",
"revert",
"=",
"revert_detector",
".",
"process",
"(",
"checksum",
",",
"revision",
")",
"if",
"revert",
"is",
"not",
"None",
":",
"yield",
"revert"
] | Detects reverts that occur in a sequence of revisions. Note that,
`revision` data meta will simply be returned in the case of a revert.
This function serves as a convenience wrapper around calls to
:class:`mwreverts.Detector`'s :func:`~mwreverts.Detector.process`
method.
:Parameters:
checksum_revisions : `iterable` ( (checksum, revision) )
an iterable over tuples of checksum and revision meta data
radius : int
a positive integer indicating the maximum revision distance that a
revert can span.
:Return:
a iterator over :class:`mwreverts.Revert`
:Example:
>>> import mwreverts
>>>
>>> checksum_revisions = [
... ("aaa", {'rev_id': 1}),
... ("bbb", {'rev_id': 2}),
... ("aaa", {'rev_id': 3}),
... ("ccc", {'rev_id': 4})
... ]
>>>
>>> list(mwreverts.detect(checksum_revisions))
[Revert(reverting={'rev_id': 3},
reverteds=[{'rev_id': 2}],
reverted_to={'rev_id': 1})] | [
"Detects",
"reverts",
"that",
"occur",
"in",
"a",
"sequence",
"of",
"revisions",
".",
"Note",
"that",
"revision",
"data",
"meta",
"will",
"simply",
"be",
"returned",
"in",
"the",
"case",
"of",
"a",
"revert",
"."
] | train | https://github.com/mediawiki-utilities/python-mwreverts/blob/d379ac941e14e235ad82a48bd445a3dfa6cc022e/mwreverts/functions.py#L5-L46 |
linkhub-sdk/popbill.py | popbill/statementService.py | StatementService.getChargeInfo | def getChargeInfo(self, CorpNum, ItemCode, UserID=None):
""" 과금정보 확인
args
CorpNum : 회원 사업자번호
ItemCode : 전자명세서 종류코드
UserID : 팝빌 회원아이디
return
과금정보 객체
raise
PopbillException
"""
if ItemCode == None or ItemCode == '':
raise PopbillException(-99999999, "명세서 종류 코드가 입력되지 않았습니다.")
return self._httpget('/Statement/ChargeInfo/' + ItemCode, CorpNum, UserID) | python | def getChargeInfo(self, CorpNum, ItemCode, UserID=None):
""" 과금정보 확인
args
CorpNum : 회원 사업자번호
ItemCode : 전자명세서 종류코드
UserID : 팝빌 회원아이디
return
과금정보 객체
raise
PopbillException
"""
if ItemCode == None or ItemCode == '':
raise PopbillException(-99999999, "명세서 종류 코드가 입력되지 않았습니다.")
return self._httpget('/Statement/ChargeInfo/' + ItemCode, CorpNum, UserID) | [
"def",
"getChargeInfo",
"(",
"self",
",",
"CorpNum",
",",
"ItemCode",
",",
"UserID",
"=",
"None",
")",
":",
"if",
"ItemCode",
"==",
"None",
"or",
"ItemCode",
"==",
"''",
":",
"raise",
"PopbillException",
"(",
"-",
"99999999",
",",
"\"명세서 종류 코드가 입력되지 않았습니다.\")\r",
"",
"return",
"self",
".",
"_httpget",
"(",
"'/Statement/ChargeInfo/'",
"+",
"ItemCode",
",",
"CorpNum",
",",
"UserID",
")"
] | 과금정보 확인
args
CorpNum : 회원 사업자번호
ItemCode : 전자명세서 종류코드
UserID : 팝빌 회원아이디
return
과금정보 객체
raise
PopbillException | [
"과금정보",
"확인",
"args",
"CorpNum",
":",
"회원",
"사업자번호",
"ItemCode",
":",
"전자명세서",
"종류코드",
"UserID",
":",
"팝빌",
"회원아이디",
"return",
"과금정보",
"객체",
"raise",
"PopbillException"
] | train | https://github.com/linkhub-sdk/popbill.py/blob/68a0dd7f7a937603318e93be321fde73c50b96cc/popbill/statementService.py#L31-L45 |
linkhub-sdk/popbill.py | popbill/statementService.py | StatementService.getUnitCost | def getUnitCost(self, CorpNum, ItemCode):
""" 전자명세서 발행단가 확인.
args
CorpNum : 팝빌회원 사업자번호
ItemCode : 명세서 종류 코드
[121 - 거래명세서], [122 - 청구서], [123 - 견적서],
[124 - 발주서], [125 - 입금표], [126 - 영수증]
return
발행단가 by float
raise
PopbillException
"""
if ItemCode == None or ItemCode == "":
raise PopbillException(-99999999, "명세서 종류 코드가 입력되지 않았습니다.")
result = self._httpget('/Statement/' + str(ItemCode) + '?cfg=UNITCOST', CorpNum)
return float(result.unitCost) | python | def getUnitCost(self, CorpNum, ItemCode):
""" 전자명세서 발행단가 확인.
args
CorpNum : 팝빌회원 사업자번호
ItemCode : 명세서 종류 코드
[121 - 거래명세서], [122 - 청구서], [123 - 견적서],
[124 - 발주서], [125 - 입금표], [126 - 영수증]
return
발행단가 by float
raise
PopbillException
"""
if ItemCode == None or ItemCode == "":
raise PopbillException(-99999999, "명세서 종류 코드가 입력되지 않았습니다.")
result = self._httpget('/Statement/' + str(ItemCode) + '?cfg=UNITCOST', CorpNum)
return float(result.unitCost) | [
"def",
"getUnitCost",
"(",
"self",
",",
"CorpNum",
",",
"ItemCode",
")",
":",
"if",
"ItemCode",
"==",
"None",
"or",
"ItemCode",
"==",
"\"\"",
":",
"raise",
"PopbillException",
"(",
"-",
"99999999",
",",
"\"명세서 종류 코드가 입력되지 않았습니다.\")\r",
"",
"result",
"=",
"self",
".",
"_httpget",
"(",
"'/Statement/'",
"+",
"str",
"(",
"ItemCode",
")",
"+",
"'?cfg=UNITCOST'",
",",
"CorpNum",
")",
"return",
"float",
"(",
"result",
".",
"unitCost",
")"
] | 전자명세서 발행단가 확인.
args
CorpNum : 팝빌회원 사업자번호
ItemCode : 명세서 종류 코드
[121 - 거래명세서], [122 - 청구서], [123 - 견적서],
[124 - 발주서], [125 - 입금표], [126 - 영수증]
return
발행단가 by float
raise
PopbillException | [
"전자명세서",
"발행단가",
"확인",
".",
"args",
"CorpNum",
":",
"팝빌회원",
"사업자번호",
"ItemCode",
":",
"명세서",
"종류",
"코드",
"[",
"121",
"-",
"거래명세서",
"]",
"[",
"122",
"-",
"청구서",
"]",
"[",
"123",
"-",
"견적서",
"]",
"[",
"124",
"-",
"발주서",
"]",
"[",
"125",
"-",
"입금표",
"]",
"[",
"126",
"-",
"영수증",
"]",
"return",
"발행단가",
"by",
"float",
"raise",
"PopbillException"
] | train | https://github.com/linkhub-sdk/popbill.py/blob/68a0dd7f7a937603318e93be321fde73c50b96cc/popbill/statementService.py#L64-L80 |
linkhub-sdk/popbill.py | popbill/statementService.py | StatementService.FAXSend | def FAXSend(self, CorpNum, statement, SendNum, ReceiveNum, UserID=None):
""" 선팩스 전송
args
CorpNum : 팝빌회원 사업자번호
statement : 전자명세서 객체
SendNum : 팩스 발신번호
ReceiveNum : 팩스 수신번호
UserID : 팝빌회원 아이디
return
팩스전송 접수번호(receiptNum)
raise
PopbillException
"""
if statement == None:
raise PopbillException(-99999999, "전송할 전자명세서 정보가 입력되지 않았습니다.")
if SendNum == None or SendNum == '':
raise PopbillException(-99999999, "팩스전송 발신번호가 올바르지 않았습니다.")
if ReceiveNum == None or ReceiveNum == '':
raise PopbillException(-99999999, "팩스전송 수신번호가 올바르지 않습니다.")
statement.sendNum = SendNum
statement.receiveNum = ReceiveNum
postData = self._stringtify(statement)
return self._httppost('/Statement', postData, CorpNum, UserID, "FAX").receiptNum | python | def FAXSend(self, CorpNum, statement, SendNum, ReceiveNum, UserID=None):
""" 선팩스 전송
args
CorpNum : 팝빌회원 사업자번호
statement : 전자명세서 객체
SendNum : 팩스 발신번호
ReceiveNum : 팩스 수신번호
UserID : 팝빌회원 아이디
return
팩스전송 접수번호(receiptNum)
raise
PopbillException
"""
if statement == None:
raise PopbillException(-99999999, "전송할 전자명세서 정보가 입력되지 않았습니다.")
if SendNum == None or SendNum == '':
raise PopbillException(-99999999, "팩스전송 발신번호가 올바르지 않았습니다.")
if ReceiveNum == None or ReceiveNum == '':
raise PopbillException(-99999999, "팩스전송 수신번호가 올바르지 않습니다.")
statement.sendNum = SendNum
statement.receiveNum = ReceiveNum
postData = self._stringtify(statement)
return self._httppost('/Statement', postData, CorpNum, UserID, "FAX").receiptNum | [
"def",
"FAXSend",
"(",
"self",
",",
"CorpNum",
",",
"statement",
",",
"SendNum",
",",
"ReceiveNum",
",",
"UserID",
"=",
"None",
")",
":",
"if",
"statement",
"==",
"None",
":",
"raise",
"PopbillException",
"(",
"-",
"99999999",
",",
"\"전송할 전자명세서 정보가 입력되지 않았습니다.\")\r",
"",
"if",
"SendNum",
"==",
"None",
"or",
"SendNum",
"==",
"''",
":",
"raise",
"PopbillException",
"(",
"-",
"99999999",
",",
"\"팩스전송 발신번호가 올바르지 않았습니다.\")\r",
"",
"if",
"ReceiveNum",
"==",
"None",
"or",
"ReceiveNum",
"==",
"''",
":",
"raise",
"PopbillException",
"(",
"-",
"99999999",
",",
"\"팩스전송 수신번호가 올바르지 않습니다.\")\r",
"",
"statement",
".",
"sendNum",
"=",
"SendNum",
"statement",
".",
"receiveNum",
"=",
"ReceiveNum",
"postData",
"=",
"self",
".",
"_stringtify",
"(",
"statement",
")",
"return",
"self",
".",
"_httppost",
"(",
"'/Statement'",
",",
"postData",
",",
"CorpNum",
",",
"UserID",
",",
"\"FAX\"",
")",
".",
"receiptNum"
] | 선팩스 전송
args
CorpNum : 팝빌회원 사업자번호
statement : 전자명세서 객체
SendNum : 팩스 발신번호
ReceiveNum : 팩스 수신번호
UserID : 팝빌회원 아이디
return
팩스전송 접수번호(receiptNum)
raise
PopbillException | [
"선팩스",
"전송",
"args",
"CorpNum",
":",
"팝빌회원",
"사업자번호",
"statement",
":",
"전자명세서",
"객체",
"SendNum",
":",
"팩스",
"발신번호",
"ReceiveNum",
":",
"팩스",
"수신번호",
"UserID",
":",
"팝빌회원",
"아이디",
"return",
"팩스전송",
"접수번호",
"(",
"receiptNum",
")",
"raise",
"PopbillException"
] | train | https://github.com/linkhub-sdk/popbill.py/blob/68a0dd7f7a937603318e93be321fde73c50b96cc/popbill/statementService.py#L109-L134 |
linkhub-sdk/popbill.py | popbill/statementService.py | StatementService.registIssue | def registIssue(self, CorpNum, statement, Memo=None, UserID=None):
""" 즉시발행
args
CorpNum : 팝빌회원 사업자번호
statement : 등록할 전자명세서 object. made with Statement(...)
Memo : 즉시발행메모
UserID : 팝빌회원 아이디
return
처리결과. consist of code and message
raise
PopbillException
"""
if statement == None:
raise PopbillException(-99999999, "등록할 전자명세서 정보가 입력되지 않았습니다.")
if Memo != None or Memo != '':
statement.memo = Memo
postData = self._stringtify(statement)
return self._httppost('/Statement', postData, CorpNum, UserID, "ISSUE") | python | def registIssue(self, CorpNum, statement, Memo=None, UserID=None):
""" 즉시발행
args
CorpNum : 팝빌회원 사업자번호
statement : 등록할 전자명세서 object. made with Statement(...)
Memo : 즉시발행메모
UserID : 팝빌회원 아이디
return
처리결과. consist of code and message
raise
PopbillException
"""
if statement == None:
raise PopbillException(-99999999, "등록할 전자명세서 정보가 입력되지 않았습니다.")
if Memo != None or Memo != '':
statement.memo = Memo
postData = self._stringtify(statement)
return self._httppost('/Statement', postData, CorpNum, UserID, "ISSUE") | [
"def",
"registIssue",
"(",
"self",
",",
"CorpNum",
",",
"statement",
",",
"Memo",
"=",
"None",
",",
"UserID",
"=",
"None",
")",
":",
"if",
"statement",
"==",
"None",
":",
"raise",
"PopbillException",
"(",
"-",
"99999999",
",",
"\"등록할 전자명세서 정보가 입력되지 않았습니다.\")\r",
"",
"if",
"Memo",
"!=",
"None",
"or",
"Memo",
"!=",
"''",
":",
"statement",
".",
"memo",
"=",
"Memo",
"postData",
"=",
"self",
".",
"_stringtify",
"(",
"statement",
")",
"return",
"self",
".",
"_httppost",
"(",
"'/Statement'",
",",
"postData",
",",
"CorpNum",
",",
"UserID",
",",
"\"ISSUE\"",
")"
] | 즉시발행
args
CorpNum : 팝빌회원 사업자번호
statement : 등록할 전자명세서 object. made with Statement(...)
Memo : 즉시발행메모
UserID : 팝빌회원 아이디
return
처리결과. consist of code and message
raise
PopbillException | [
"즉시발행",
"args",
"CorpNum",
":",
"팝빌회원",
"사업자번호",
"statement",
":",
"등록할",
"전자명세서",
"object",
".",
"made",
"with",
"Statement",
"(",
"...",
")",
"Memo",
":",
"즉시발행메모",
"UserID",
":",
"팝빌회원",
"아이디",
"return",
"처리결과",
".",
"consist",
"of",
"code",
"and",
"message",
"raise",
"PopbillException"
] | train | https://github.com/linkhub-sdk/popbill.py/blob/68a0dd7f7a937603318e93be321fde73c50b96cc/popbill/statementService.py#L136-L157 |
linkhub-sdk/popbill.py | popbill/statementService.py | StatementService.register | def register(self, CorpNum, statement, UserID=None):
""" 임시저장
args
CorpNum : 팝빌회원 사업자번호
statement : 등록할 전자명세서 object. made with Statement(...)
UserID : 팝빌회원 아이디
return
처리결과. consist of code and message
raise
PopbillException
"""
if statement == None:
raise PopbillException(-99999999, "등록할 전자명세서 정보가 입력되지 않았습니다.")
postData = self._stringtify(statement)
return self._httppost('/Statement', postData, CorpNum, UserID) | python | def register(self, CorpNum, statement, UserID=None):
""" 임시저장
args
CorpNum : 팝빌회원 사업자번호
statement : 등록할 전자명세서 object. made with Statement(...)
UserID : 팝빌회원 아이디
return
처리결과. consist of code and message
raise
PopbillException
"""
if statement == None:
raise PopbillException(-99999999, "등록할 전자명세서 정보가 입력되지 않았습니다.")
postData = self._stringtify(statement)
return self._httppost('/Statement', postData, CorpNum, UserID) | [
"def",
"register",
"(",
"self",
",",
"CorpNum",
",",
"statement",
",",
"UserID",
"=",
"None",
")",
":",
"if",
"statement",
"==",
"None",
":",
"raise",
"PopbillException",
"(",
"-",
"99999999",
",",
"\"등록할 전자명세서 정보가 입력되지 않았습니다.\")\r",
"",
"postData",
"=",
"self",
".",
"_stringtify",
"(",
"statement",
")",
"return",
"self",
".",
"_httppost",
"(",
"'/Statement'",
",",
"postData",
",",
"CorpNum",
",",
"UserID",
")"
] | 임시저장
args
CorpNum : 팝빌회원 사업자번호
statement : 등록할 전자명세서 object. made with Statement(...)
UserID : 팝빌회원 아이디
return
처리결과. consist of code and message
raise
PopbillException | [
"임시저장",
"args",
"CorpNum",
":",
"팝빌회원",
"사업자번호",
"statement",
":",
"등록할",
"전자명세서",
"object",
".",
"made",
"with",
"Statement",
"(",
"...",
")",
"UserID",
":",
"팝빌회원",
"아이디",
"return",
"처리결과",
".",
"consist",
"of",
"code",
"and",
"message",
"raise",
"PopbillException"
] | train | https://github.com/linkhub-sdk/popbill.py/blob/68a0dd7f7a937603318e93be321fde73c50b96cc/popbill/statementService.py#L159-L175 |
linkhub-sdk/popbill.py | popbill/statementService.py | StatementService.update | def update(self, CorpNum, ItemCode, MgtKey, Statement, UserID=None):
""" 임시저장
args
CorpNum : 팝빌회원 사업자번호
ItemCode : 명세서 종류 코드
[121 - 거래명세서], [122 - 청구서], [123 - 견적서],
[124 - 발주서], [125 - 입금표], [126 - 영수증]
MgtKey : 파트너 문서관리번호
Statement : 등록할 전자명세서 object. made with Statement(...)
UserID : 팝빌회원 아이디
return
처리결과. consist of code and message
raise
PopbillException
"""
if Statement == None:
raise PopbillException(-99999999, "등록할 전자명세서 정보가 입력되지 않았습니다.")
if MgtKey == None or MgtKey == "":
raise PopbillException(-99999999, "관리번호가 입력되지 않았습니다.")
if ItemCode == None or ItemCode == "":
raise PopbillException(-99999999, "명세서 종류 코드가 입력되지 않았습니다.")
postData = self._stringtify(Statement)
return self._httppost('/Statement/' + str(ItemCode) + '/' + MgtKey, postData, CorpNum, UserID, 'PATCH') | python | def update(self, CorpNum, ItemCode, MgtKey, Statement, UserID=None):
""" 임시저장
args
CorpNum : 팝빌회원 사업자번호
ItemCode : 명세서 종류 코드
[121 - 거래명세서], [122 - 청구서], [123 - 견적서],
[124 - 발주서], [125 - 입금표], [126 - 영수증]
MgtKey : 파트너 문서관리번호
Statement : 등록할 전자명세서 object. made with Statement(...)
UserID : 팝빌회원 아이디
return
처리결과. consist of code and message
raise
PopbillException
"""
if Statement == None:
raise PopbillException(-99999999, "등록할 전자명세서 정보가 입력되지 않았습니다.")
if MgtKey == None or MgtKey == "":
raise PopbillException(-99999999, "관리번호가 입력되지 않았습니다.")
if ItemCode == None or ItemCode == "":
raise PopbillException(-99999999, "명세서 종류 코드가 입력되지 않았습니다.")
postData = self._stringtify(Statement)
return self._httppost('/Statement/' + str(ItemCode) + '/' + MgtKey, postData, CorpNum, UserID, 'PATCH') | [
"def",
"update",
"(",
"self",
",",
"CorpNum",
",",
"ItemCode",
",",
"MgtKey",
",",
"Statement",
",",
"UserID",
"=",
"None",
")",
":",
"if",
"Statement",
"==",
"None",
":",
"raise",
"PopbillException",
"(",
"-",
"99999999",
",",
"\"등록할 전자명세서 정보가 입력되지 않았습니다.\")\r",
"",
"if",
"MgtKey",
"==",
"None",
"or",
"MgtKey",
"==",
"\"\"",
":",
"raise",
"PopbillException",
"(",
"-",
"99999999",
",",
"\"관리번호가 입력되지 않았습니다.\")\r",
"",
"if",
"ItemCode",
"==",
"None",
"or",
"ItemCode",
"==",
"\"\"",
":",
"raise",
"PopbillException",
"(",
"-",
"99999999",
",",
"\"명세서 종류 코드가 입력되지 않았습니다.\")\r",
"",
"postData",
"=",
"self",
".",
"_stringtify",
"(",
"Statement",
")",
"return",
"self",
".",
"_httppost",
"(",
"'/Statement/'",
"+",
"str",
"(",
"ItemCode",
")",
"+",
"'/'",
"+",
"MgtKey",
",",
"postData",
",",
"CorpNum",
",",
"UserID",
",",
"'PATCH'",
")"
] | 임시저장
args
CorpNum : 팝빌회원 사업자번호
ItemCode : 명세서 종류 코드
[121 - 거래명세서], [122 - 청구서], [123 - 견적서],
[124 - 발주서], [125 - 입금표], [126 - 영수증]
MgtKey : 파트너 문서관리번호
Statement : 등록할 전자명세서 object. made with Statement(...)
UserID : 팝빌회원 아이디
return
처리결과. consist of code and message
raise
PopbillException | [
"임시저장",
"args",
"CorpNum",
":",
"팝빌회원",
"사업자번호",
"ItemCode",
":",
"명세서",
"종류",
"코드",
"[",
"121",
"-",
"거래명세서",
"]",
"[",
"122",
"-",
"청구서",
"]",
"[",
"123",
"-",
"견적서",
"]",
"[",
"124",
"-",
"발주서",
"]",
"[",
"125",
"-",
"입금표",
"]",
"[",
"126",
"-",
"영수증",
"]",
"MgtKey",
":",
"파트너",
"문서관리번호",
"Statement",
":",
"등록할",
"전자명세서",
"object",
".",
"made",
"with",
"Statement",
"(",
"...",
")",
"UserID",
":",
"팝빌회원",
"아이디",
"return",
"처리결과",
".",
"consist",
"of",
"code",
"and",
"message",
"raise",
"PopbillException"
] | train | https://github.com/linkhub-sdk/popbill.py/blob/68a0dd7f7a937603318e93be321fde73c50b96cc/popbill/statementService.py#L177-L202 |
linkhub-sdk/popbill.py | popbill/statementService.py | StatementService.issue | def issue(self, CorpNum, ItemCode, MgtKey, Memo=None, EmailSubject=None, UserID=None):
""" 발행
args
CorpNum : 팝빌회원 사업자번호
ItemCode : 명세서 종류 코드
[121 - 거래명세서], [122 - 청구서], [123 - 견적서],
[124 - 발주서], [125 - 입금표], [126 - 영수증]
MgtKey : 파트너 문서관리번호
Memo : 처리메모
EmailSubject : 발행메일 제목(미기재시 기본양식으로 전송)
UserID : 팝빌회원 아이디
return
처리결과. consist of code and message
raise
PopbillException
"""
if MgtKey == None or MgtKey == "":
raise PopbillException(-99999999, "관리번호가 입력되지 않았습니다.")
if ItemCode == None or ItemCode == "":
raise PopbillException(-99999999, "명세서 종류 코드가 입력되지 않았습니다.")
req = {}
postData = ""
if Memo != None and Memo != '':
req["memo"] = Memo
if EmailSubject != None and EmailSubject != '':
req["emailSubject"] = EmailSubject
postData = self._stringtify(req)
return self._httppost('/Statement/' + str(ItemCode) + '/' + MgtKey, postData, CorpNum, UserID, "ISSUE") | python | def issue(self, CorpNum, ItemCode, MgtKey, Memo=None, EmailSubject=None, UserID=None):
""" 발행
args
CorpNum : 팝빌회원 사업자번호
ItemCode : 명세서 종류 코드
[121 - 거래명세서], [122 - 청구서], [123 - 견적서],
[124 - 발주서], [125 - 입금표], [126 - 영수증]
MgtKey : 파트너 문서관리번호
Memo : 처리메모
EmailSubject : 발행메일 제목(미기재시 기본양식으로 전송)
UserID : 팝빌회원 아이디
return
처리결과. consist of code and message
raise
PopbillException
"""
if MgtKey == None or MgtKey == "":
raise PopbillException(-99999999, "관리번호가 입력되지 않았습니다.")
if ItemCode == None or ItemCode == "":
raise PopbillException(-99999999, "명세서 종류 코드가 입력되지 않았습니다.")
req = {}
postData = ""
if Memo != None and Memo != '':
req["memo"] = Memo
if EmailSubject != None and EmailSubject != '':
req["emailSubject"] = EmailSubject
postData = self._stringtify(req)
return self._httppost('/Statement/' + str(ItemCode) + '/' + MgtKey, postData, CorpNum, UserID, "ISSUE") | [
"def",
"issue",
"(",
"self",
",",
"CorpNum",
",",
"ItemCode",
",",
"MgtKey",
",",
"Memo",
"=",
"None",
",",
"EmailSubject",
"=",
"None",
",",
"UserID",
"=",
"None",
")",
":",
"if",
"MgtKey",
"==",
"None",
"or",
"MgtKey",
"==",
"\"\"",
":",
"raise",
"PopbillException",
"(",
"-",
"99999999",
",",
"\"관리번호가 입력되지 않았습니다.\")\r",
"",
"if",
"ItemCode",
"==",
"None",
"or",
"ItemCode",
"==",
"\"\"",
":",
"raise",
"PopbillException",
"(",
"-",
"99999999",
",",
"\"명세서 종류 코드가 입력되지 않았습니다.\")\r",
"",
"req",
"=",
"{",
"}",
"postData",
"=",
"\"\"",
"if",
"Memo",
"!=",
"None",
"and",
"Memo",
"!=",
"''",
":",
"req",
"[",
"\"memo\"",
"]",
"=",
"Memo",
"if",
"EmailSubject",
"!=",
"None",
"and",
"EmailSubject",
"!=",
"''",
":",
"req",
"[",
"\"emailSubject\"",
"]",
"=",
"EmailSubject",
"postData",
"=",
"self",
".",
"_stringtify",
"(",
"req",
")",
"return",
"self",
".",
"_httppost",
"(",
"'/Statement/'",
"+",
"str",
"(",
"ItemCode",
")",
"+",
"'/'",
"+",
"MgtKey",
",",
"postData",
",",
"CorpNum",
",",
"UserID",
",",
"\"ISSUE\"",
")"
] | 발행
args
CorpNum : 팝빌회원 사업자번호
ItemCode : 명세서 종류 코드
[121 - 거래명세서], [122 - 청구서], [123 - 견적서],
[124 - 발주서], [125 - 입금표], [126 - 영수증]
MgtKey : 파트너 문서관리번호
Memo : 처리메모
EmailSubject : 발행메일 제목(미기재시 기본양식으로 전송)
UserID : 팝빌회원 아이디
return
처리결과. consist of code and message
raise
PopbillException | [
"발행",
"args",
"CorpNum",
":",
"팝빌회원",
"사업자번호",
"ItemCode",
":",
"명세서",
"종류",
"코드",
"[",
"121",
"-",
"거래명세서",
"]",
"[",
"122",
"-",
"청구서",
"]",
"[",
"123",
"-",
"견적서",
"]",
"[",
"124",
"-",
"발주서",
"]",
"[",
"125",
"-",
"입금표",
"]",
"[",
"126",
"-",
"영수증",
"]",
"MgtKey",
":",
"파트너",
"문서관리번호",
"Memo",
":",
"처리메모",
"EmailSubject",
":",
"발행메일",
"제목",
"(",
"미기재시",
"기본양식으로",
"전송",
")",
"UserID",
":",
"팝빌회원",
"아이디",
"return",
"처리결과",
".",
"consist",
"of",
"code",
"and",
"message",
"raise",
"PopbillException"
] | train | https://github.com/linkhub-sdk/popbill.py/blob/68a0dd7f7a937603318e93be321fde73c50b96cc/popbill/statementService.py#L204-L235 |
linkhub-sdk/popbill.py | popbill/statementService.py | StatementService.cancel | def cancel(self, CorpNum, ItemCode, MgtKey, Memo=None, UserID=None):
""" 발행취소
args
CorpNum : 팝빌회원 사업자번호
ItemCode : 명세서 종류 코드
[121 - 거래명세서], [122 - 청구서], [123 - 견적서],
[124 - 발주서], [125 - 입금표], [126 - 영수증]
MgtKey : 파트너 문서관리번호
Memo : 처리메모
UserID : 팝빌회원 아이디
return
처리결과. consist of code and message
raise
PopbillException
"""
if MgtKey == None or MgtKey == "":
raise PopbillException(-99999999, "관리번호가 입력되지 않았습니다.")
if ItemCode == None or ItemCode == "":
raise PopbillException(-99999999, "명세서 종류 코드가 입력되지 않았습니다.")
postData = ''
if Memo != None and Memo != '':
postData = self._stringtify({"memo": Memo})
return self._httppost('/Statement/' + str(ItemCode) + '/' + MgtKey, postData, CorpNum, UserID, "CANCEL") | python | def cancel(self, CorpNum, ItemCode, MgtKey, Memo=None, UserID=None):
""" 발행취소
args
CorpNum : 팝빌회원 사업자번호
ItemCode : 명세서 종류 코드
[121 - 거래명세서], [122 - 청구서], [123 - 견적서],
[124 - 발주서], [125 - 입금표], [126 - 영수증]
MgtKey : 파트너 문서관리번호
Memo : 처리메모
UserID : 팝빌회원 아이디
return
처리결과. consist of code and message
raise
PopbillException
"""
if MgtKey == None or MgtKey == "":
raise PopbillException(-99999999, "관리번호가 입력되지 않았습니다.")
if ItemCode == None or ItemCode == "":
raise PopbillException(-99999999, "명세서 종류 코드가 입력되지 않았습니다.")
postData = ''
if Memo != None and Memo != '':
postData = self._stringtify({"memo": Memo})
return self._httppost('/Statement/' + str(ItemCode) + '/' + MgtKey, postData, CorpNum, UserID, "CANCEL") | [
"def",
"cancel",
"(",
"self",
",",
"CorpNum",
",",
"ItemCode",
",",
"MgtKey",
",",
"Memo",
"=",
"None",
",",
"UserID",
"=",
"None",
")",
":",
"if",
"MgtKey",
"==",
"None",
"or",
"MgtKey",
"==",
"\"\"",
":",
"raise",
"PopbillException",
"(",
"-",
"99999999",
",",
"\"관리번호가 입력되지 않았습니다.\")\r",
"",
"if",
"ItemCode",
"==",
"None",
"or",
"ItemCode",
"==",
"\"\"",
":",
"raise",
"PopbillException",
"(",
"-",
"99999999",
",",
"\"명세서 종류 코드가 입력되지 않았습니다.\")\r",
"",
"postData",
"=",
"''",
"if",
"Memo",
"!=",
"None",
"and",
"Memo",
"!=",
"''",
":",
"postData",
"=",
"self",
".",
"_stringtify",
"(",
"{",
"\"memo\"",
":",
"Memo",
"}",
")",
"return",
"self",
".",
"_httppost",
"(",
"'/Statement/'",
"+",
"str",
"(",
"ItemCode",
")",
"+",
"'/'",
"+",
"MgtKey",
",",
"postData",
",",
"CorpNum",
",",
"UserID",
",",
"\"CANCEL\"",
")"
] | 발행취소
args
CorpNum : 팝빌회원 사업자번호
ItemCode : 명세서 종류 코드
[121 - 거래명세서], [122 - 청구서], [123 - 견적서],
[124 - 발주서], [125 - 입금표], [126 - 영수증]
MgtKey : 파트너 문서관리번호
Memo : 처리메모
UserID : 팝빌회원 아이디
return
처리결과. consist of code and message
raise
PopbillException | [
"발행취소",
"args",
"CorpNum",
":",
"팝빌회원",
"사업자번호",
"ItemCode",
":",
"명세서",
"종류",
"코드",
"[",
"121",
"-",
"거래명세서",
"]",
"[",
"122",
"-",
"청구서",
"]",
"[",
"123",
"-",
"견적서",
"]",
"[",
"124",
"-",
"발주서",
"]",
"[",
"125",
"-",
"입금표",
"]",
"[",
"126",
"-",
"영수증",
"]",
"MgtKey",
":",
"파트너",
"문서관리번호",
"Memo",
":",
"처리메모",
"UserID",
":",
"팝빌회원",
"아이디",
"return",
"처리결과",
".",
"consist",
"of",
"code",
"and",
"message",
"raise",
"PopbillException"
] | train | https://github.com/linkhub-sdk/popbill.py/blob/68a0dd7f7a937603318e93be321fde73c50b96cc/popbill/statementService.py#L237-L263 |
linkhub-sdk/popbill.py | popbill/statementService.py | StatementService.delete | def delete(self, CorpNum, ItemCode, MgtKey, UserID=None):
""" 삭제
args
CorpNum : 팝빌회원 사업자번호
ItemCode : 명세서 종류 코드
[121 - 거래명세서], [122 - 청구서], [123 - 견적서],
[124 - 발주서], [125 - 입금표], [126 - 영수증]
MgtKey : 파트너 문서관리번호
UserID : 팝빌회원 아이디
return
처리결과. consist of code and message
raise
PopbillException
"""
if MgtKey == None or MgtKey == "":
raise PopbillException(-99999999, "관리번호가 입력되지 않았습니다.")
if ItemCode == None or ItemCode == "":
raise PopbillException(-99999999, "명세서 종류 코드가 입력되지 않았습니다.")
return self._httppost('/Statement/' + str(ItemCode) + '/' + MgtKey, '', CorpNum, UserID, "DELETE") | python | def delete(self, CorpNum, ItemCode, MgtKey, UserID=None):
""" 삭제
args
CorpNum : 팝빌회원 사업자번호
ItemCode : 명세서 종류 코드
[121 - 거래명세서], [122 - 청구서], [123 - 견적서],
[124 - 발주서], [125 - 입금표], [126 - 영수증]
MgtKey : 파트너 문서관리번호
UserID : 팝빌회원 아이디
return
처리결과. consist of code and message
raise
PopbillException
"""
if MgtKey == None or MgtKey == "":
raise PopbillException(-99999999, "관리번호가 입력되지 않았습니다.")
if ItemCode == None or ItemCode == "":
raise PopbillException(-99999999, "명세서 종류 코드가 입력되지 않았습니다.")
return self._httppost('/Statement/' + str(ItemCode) + '/' + MgtKey, '', CorpNum, UserID, "DELETE") | [
"def",
"delete",
"(",
"self",
",",
"CorpNum",
",",
"ItemCode",
",",
"MgtKey",
",",
"UserID",
"=",
"None",
")",
":",
"if",
"MgtKey",
"==",
"None",
"or",
"MgtKey",
"==",
"\"\"",
":",
"raise",
"PopbillException",
"(",
"-",
"99999999",
",",
"\"관리번호가 입력되지 않았습니다.\")\r",
"",
"if",
"ItemCode",
"==",
"None",
"or",
"ItemCode",
"==",
"\"\"",
":",
"raise",
"PopbillException",
"(",
"-",
"99999999",
",",
"\"명세서 종류 코드가 입력되지 않았습니다.\")\r",
"",
"return",
"self",
".",
"_httppost",
"(",
"'/Statement/'",
"+",
"str",
"(",
"ItemCode",
")",
"+",
"'/'",
"+",
"MgtKey",
",",
"''",
",",
"CorpNum",
",",
"UserID",
",",
"\"DELETE\"",
")"
] | 삭제
args
CorpNum : 팝빌회원 사업자번호
ItemCode : 명세서 종류 코드
[121 - 거래명세서], [122 - 청구서], [123 - 견적서],
[124 - 발주서], [125 - 입금표], [126 - 영수증]
MgtKey : 파트너 문서관리번호
UserID : 팝빌회원 아이디
return
처리결과. consist of code and message
raise
PopbillException | [
"삭제",
"args",
"CorpNum",
":",
"팝빌회원",
"사업자번호",
"ItemCode",
":",
"명세서",
"종류",
"코드",
"[",
"121",
"-",
"거래명세서",
"]",
"[",
"122",
"-",
"청구서",
"]",
"[",
"123",
"-",
"견적서",
"]",
"[",
"124",
"-",
"발주서",
"]",
"[",
"125",
"-",
"입금표",
"]",
"[",
"126",
"-",
"영수증",
"]",
"MgtKey",
":",
"파트너",
"문서관리번호",
"UserID",
":",
"팝빌회원",
"아이디",
"return",
"처리결과",
".",
"consist",
"of",
"code",
"and",
"message",
"raise",
"PopbillException"
] | train | https://github.com/linkhub-sdk/popbill.py/blob/68a0dd7f7a937603318e93be321fde73c50b96cc/popbill/statementService.py#L265-L284 |
linkhub-sdk/popbill.py | popbill/statementService.py | StatementService.search | def search(self, CorpNum, DType, SDate, EDate, State, ItemCode, Page, PerPage, Order, UserID=None, QString=None):
""" 목록 조회
args
CorpNum : 팝빌회원 사업자번호
DType : 일자유형, R-등록일시, W-작성일자, I-발행일시 중 택 1
SDate : 시작일자, 표시형식(yyyyMMdd)
EDate : 종료일자, 표시형식(yyyyMMdd)
State : 상태코드, 2,3번째 자리에 와일드카드(*) 사용가능
ItemCode : 명세서 종류코드 배열, 121-명세서, 122-청구서, 123-견적서, 124-발주서 125-입금표, 126-영수증
Page : 페이지번호
PerPage : 페이지당 목록개수
Order : 정렬방향, D-내림차순, A-오름차순
QString : 거래처 정보, 거래처 상호 또는 사업자등록번호 기재, 미기재시 전체조회
UserID : 팝빌 회원아이디
"""
if DType == None or DType == '':
raise PopbillException(-99999999, "일자유형이 입력되지 않았습니다.")
if SDate == None or SDate == '':
raise PopbillException(-99999999, "시작일자가 입력되지 않았습니다.")
if EDate == None or EDate == '':
raise PopbillException(-99999999, "종료일자가 입력되지 않았습니다.")
uri = '/Statement/Search'
uri += '?DType=' + DType
uri += '&SDate=' + SDate
uri += '&EDate=' + EDate
uri += '&State=' + ','.join(State)
uri += '&ItemCode=' + ','.join(ItemCode)
uri += '&Page=' + str(Page)
uri += '&PerPage=' + str(PerPage)
uri += '&Order=' + Order
if QString is not None:
uri += '&QString=' + QString
return self._httpget(uri, CorpNum, UserID) | python | def search(self, CorpNum, DType, SDate, EDate, State, ItemCode, Page, PerPage, Order, UserID=None, QString=None):
""" 목록 조회
args
CorpNum : 팝빌회원 사업자번호
DType : 일자유형, R-등록일시, W-작성일자, I-발행일시 중 택 1
SDate : 시작일자, 표시형식(yyyyMMdd)
EDate : 종료일자, 표시형식(yyyyMMdd)
State : 상태코드, 2,3번째 자리에 와일드카드(*) 사용가능
ItemCode : 명세서 종류코드 배열, 121-명세서, 122-청구서, 123-견적서, 124-발주서 125-입금표, 126-영수증
Page : 페이지번호
PerPage : 페이지당 목록개수
Order : 정렬방향, D-내림차순, A-오름차순
QString : 거래처 정보, 거래처 상호 또는 사업자등록번호 기재, 미기재시 전체조회
UserID : 팝빌 회원아이디
"""
if DType == None or DType == '':
raise PopbillException(-99999999, "일자유형이 입력되지 않았습니다.")
if SDate == None or SDate == '':
raise PopbillException(-99999999, "시작일자가 입력되지 않았습니다.")
if EDate == None or EDate == '':
raise PopbillException(-99999999, "종료일자가 입력되지 않았습니다.")
uri = '/Statement/Search'
uri += '?DType=' + DType
uri += '&SDate=' + SDate
uri += '&EDate=' + EDate
uri += '&State=' + ','.join(State)
uri += '&ItemCode=' + ','.join(ItemCode)
uri += '&Page=' + str(Page)
uri += '&PerPage=' + str(PerPage)
uri += '&Order=' + Order
if QString is not None:
uri += '&QString=' + QString
return self._httpget(uri, CorpNum, UserID) | [
"def",
"search",
"(",
"self",
",",
"CorpNum",
",",
"DType",
",",
"SDate",
",",
"EDate",
",",
"State",
",",
"ItemCode",
",",
"Page",
",",
"PerPage",
",",
"Order",
",",
"UserID",
"=",
"None",
",",
"QString",
"=",
"None",
")",
":",
"if",
"DType",
"==",
"None",
"or",
"DType",
"==",
"''",
":",
"raise",
"PopbillException",
"(",
"-",
"99999999",
",",
"\"일자유형이 입력되지 않았습니다.\")\r",
"",
"if",
"SDate",
"==",
"None",
"or",
"SDate",
"==",
"''",
":",
"raise",
"PopbillException",
"(",
"-",
"99999999",
",",
"\"시작일자가 입력되지 않았습니다.\")\r",
"",
"if",
"EDate",
"==",
"None",
"or",
"EDate",
"==",
"''",
":",
"raise",
"PopbillException",
"(",
"-",
"99999999",
",",
"\"종료일자가 입력되지 않았습니다.\")\r",
"",
"uri",
"=",
"'/Statement/Search'",
"uri",
"+=",
"'?DType='",
"+",
"DType",
"uri",
"+=",
"'&SDate='",
"+",
"SDate",
"uri",
"+=",
"'&EDate='",
"+",
"EDate",
"uri",
"+=",
"'&State='",
"+",
"','",
".",
"join",
"(",
"State",
")",
"uri",
"+=",
"'&ItemCode='",
"+",
"','",
".",
"join",
"(",
"ItemCode",
")",
"uri",
"+=",
"'&Page='",
"+",
"str",
"(",
"Page",
")",
"uri",
"+=",
"'&PerPage='",
"+",
"str",
"(",
"PerPage",
")",
"uri",
"+=",
"'&Order='",
"+",
"Order",
"if",
"QString",
"is",
"not",
"None",
":",
"uri",
"+=",
"'&QString='",
"+",
"QString",
"return",
"self",
".",
"_httpget",
"(",
"uri",
",",
"CorpNum",
",",
"UserID",
")"
] | 목록 조회
args
CorpNum : 팝빌회원 사업자번호
DType : 일자유형, R-등록일시, W-작성일자, I-발행일시 중 택 1
SDate : 시작일자, 표시형식(yyyyMMdd)
EDate : 종료일자, 표시형식(yyyyMMdd)
State : 상태코드, 2,3번째 자리에 와일드카드(*) 사용가능
ItemCode : 명세서 종류코드 배열, 121-명세서, 122-청구서, 123-견적서, 124-발주서 125-입금표, 126-영수증
Page : 페이지번호
PerPage : 페이지당 목록개수
Order : 정렬방향, D-내림차순, A-오름차순
QString : 거래처 정보, 거래처 상호 또는 사업자등록번호 기재, 미기재시 전체조회
UserID : 팝빌 회원아이디 | [
"목록",
"조회",
"args",
"CorpNum",
":",
"팝빌회원",
"사업자번호",
"DType",
":",
"일자유형",
"R",
"-",
"등록일시",
"W",
"-",
"작성일자",
"I",
"-",
"발행일시",
"중",
"택",
"1",
"SDate",
":",
"시작일자",
"표시형식",
"(",
"yyyyMMdd",
")",
"EDate",
":",
"종료일자",
"표시형식",
"(",
"yyyyMMdd",
")",
"State",
":",
"상태코드",
"2",
"3번째",
"자리에",
"와일드카드",
"(",
"*",
")",
"사용가능",
"ItemCode",
":",
"명세서",
"종류코드",
"배열",
"121",
"-",
"명세서",
"122",
"-",
"청구서",
"123",
"-",
"견적서",
"124",
"-",
"발주서",
"125",
"-",
"입금표",
"126",
"-",
"영수증",
"Page",
":",
"페이지번호",
"PerPage",
":",
"페이지당",
"목록개수",
"Order",
":",
"정렬방향",
"D",
"-",
"내림차순",
"A",
"-",
"오름차순",
"QString",
":",
"거래처",
"정보",
"거래처",
"상호",
"또는",
"사업자등록번호",
"기재",
"미기재시",
"전체조회",
"UserID",
":",
"팝빌",
"회원아이디"
] | train | https://github.com/linkhub-sdk/popbill.py/blob/68a0dd7f7a937603318e93be321fde73c50b96cc/popbill/statementService.py#L286-L324 |
linkhub-sdk/popbill.py | popbill/statementService.py | StatementService.getInfo | def getInfo(self, CorpNum, ItemCode, MgtKey):
""" 상태/요약 정보 확인
args
CorpNum : 팝빌회원 사업자번호
ItemCode : 명세서 종류 코드
[121 - 거래명세서], [122 - 청구서], [123 - 견적서],
[124 - 발주서], [125 - 입금표], [126 - 영수증]
MgtKey : 파트너 문서관리번호
return
문서 상태/요약정보 object
raise
PopbillException
"""
if MgtKey == None or MgtKey == "":
raise PopbillException(-99999999, "관리번호가 입력되지 않았습니다.")
if ItemCode == None or ItemCode == "":
raise PopbillException(-99999999, "명세서 종류 코드가 입력되지 않았습니다.")
return self._httpget('/Statement/' + str(ItemCode) + '/' + MgtKey, CorpNum) | python | def getInfo(self, CorpNum, ItemCode, MgtKey):
""" 상태/요약 정보 확인
args
CorpNum : 팝빌회원 사업자번호
ItemCode : 명세서 종류 코드
[121 - 거래명세서], [122 - 청구서], [123 - 견적서],
[124 - 발주서], [125 - 입금표], [126 - 영수증]
MgtKey : 파트너 문서관리번호
return
문서 상태/요약정보 object
raise
PopbillException
"""
if MgtKey == None or MgtKey == "":
raise PopbillException(-99999999, "관리번호가 입력되지 않았습니다.")
if ItemCode == None or ItemCode == "":
raise PopbillException(-99999999, "명세서 종류 코드가 입력되지 않았습니다.")
return self._httpget('/Statement/' + str(ItemCode) + '/' + MgtKey, CorpNum) | [
"def",
"getInfo",
"(",
"self",
",",
"CorpNum",
",",
"ItemCode",
",",
"MgtKey",
")",
":",
"if",
"MgtKey",
"==",
"None",
"or",
"MgtKey",
"==",
"\"\"",
":",
"raise",
"PopbillException",
"(",
"-",
"99999999",
",",
"\"관리번호가 입력되지 않았습니다.\")\r",
"",
"if",
"ItemCode",
"==",
"None",
"or",
"ItemCode",
"==",
"\"\"",
":",
"raise",
"PopbillException",
"(",
"-",
"99999999",
",",
"\"명세서 종류 코드가 입력되지 않았습니다.\")\r",
"",
"return",
"self",
".",
"_httpget",
"(",
"'/Statement/'",
"+",
"str",
"(",
"ItemCode",
")",
"+",
"'/'",
"+",
"MgtKey",
",",
"CorpNum",
")"
] | 상태/요약 정보 확인
args
CorpNum : 팝빌회원 사업자번호
ItemCode : 명세서 종류 코드
[121 - 거래명세서], [122 - 청구서], [123 - 견적서],
[124 - 발주서], [125 - 입금표], [126 - 영수증]
MgtKey : 파트너 문서관리번호
return
문서 상태/요약정보 object
raise
PopbillException | [
"상태",
"/",
"요약",
"정보",
"확인",
"args",
"CorpNum",
":",
"팝빌회원",
"사업자번호",
"ItemCode",
":",
"명세서",
"종류",
"코드",
"[",
"121",
"-",
"거래명세서",
"]",
"[",
"122",
"-",
"청구서",
"]",
"[",
"123",
"-",
"견적서",
"]",
"[",
"124",
"-",
"발주서",
"]",
"[",
"125",
"-",
"입금표",
"]",
"[",
"126",
"-",
"영수증",
"]",
"MgtKey",
":",
"파트너",
"문서관리번호",
"return",
"문서",
"상태",
"/",
"요약정보",
"object",
"raise",
"PopbillException"
] | train | https://github.com/linkhub-sdk/popbill.py/blob/68a0dd7f7a937603318e93be321fde73c50b96cc/popbill/statementService.py#L326-L344 |
linkhub-sdk/popbill.py | popbill/statementService.py | StatementService.getDetailInfo | def getDetailInfo(self, CorpNum, ItemCode, MgtKey):
""" 전자명세서 상세정보 확인
args
CorpNum : 팝빌회원 사업자번호
ItemCode : 명세서 종류 코드
[121 - 거래명세서], [122 - 청구서], [123 - 견적서],
[124 - 발주서], [125 - 입금표], [126 - 영수증]
MgtKey : 파트너 문서관리번호
return
문서 상세정보 object
raise
PopbillException
"""
if MgtKey == None or MgtKey == "":
raise PopbillException(-99999999, "관리번호가 입력되지 않았습니다.")
if ItemCode == None or ItemCode == "":
raise PopbillException(-99999999, "명세서 종류 코드가 입력되지 않았습니다.")
return self._httpget('/Statement/' + str(ItemCode) + '/' + MgtKey + '?Detail', CorpNum) | python | def getDetailInfo(self, CorpNum, ItemCode, MgtKey):
""" 전자명세서 상세정보 확인
args
CorpNum : 팝빌회원 사업자번호
ItemCode : 명세서 종류 코드
[121 - 거래명세서], [122 - 청구서], [123 - 견적서],
[124 - 발주서], [125 - 입금표], [126 - 영수증]
MgtKey : 파트너 문서관리번호
return
문서 상세정보 object
raise
PopbillException
"""
if MgtKey == None or MgtKey == "":
raise PopbillException(-99999999, "관리번호가 입력되지 않았습니다.")
if ItemCode == None or ItemCode == "":
raise PopbillException(-99999999, "명세서 종류 코드가 입력되지 않았습니다.")
return self._httpget('/Statement/' + str(ItemCode) + '/' + MgtKey + '?Detail', CorpNum) | [
"def",
"getDetailInfo",
"(",
"self",
",",
"CorpNum",
",",
"ItemCode",
",",
"MgtKey",
")",
":",
"if",
"MgtKey",
"==",
"None",
"or",
"MgtKey",
"==",
"\"\"",
":",
"raise",
"PopbillException",
"(",
"-",
"99999999",
",",
"\"관리번호가 입력되지 않았습니다.\")\r",
"",
"if",
"ItemCode",
"==",
"None",
"or",
"ItemCode",
"==",
"\"\"",
":",
"raise",
"PopbillException",
"(",
"-",
"99999999",
",",
"\"명세서 종류 코드가 입력되지 않았습니다.\")\r",
"",
"return",
"self",
".",
"_httpget",
"(",
"'/Statement/'",
"+",
"str",
"(",
"ItemCode",
")",
"+",
"'/'",
"+",
"MgtKey",
"+",
"'?Detail'",
",",
"CorpNum",
")"
] | 전자명세서 상세정보 확인
args
CorpNum : 팝빌회원 사업자번호
ItemCode : 명세서 종류 코드
[121 - 거래명세서], [122 - 청구서], [123 - 견적서],
[124 - 발주서], [125 - 입금표], [126 - 영수증]
MgtKey : 파트너 문서관리번호
return
문서 상세정보 object
raise
PopbillException | [
"전자명세서",
"상세정보",
"확인",
"args",
"CorpNum",
":",
"팝빌회원",
"사업자번호",
"ItemCode",
":",
"명세서",
"종류",
"코드",
"[",
"121",
"-",
"거래명세서",
"]",
"[",
"122",
"-",
"청구서",
"]",
"[",
"123",
"-",
"견적서",
"]",
"[",
"124",
"-",
"발주서",
"]",
"[",
"125",
"-",
"입금표",
"]",
"[",
"126",
"-",
"영수증",
"]",
"MgtKey",
":",
"파트너",
"문서관리번호",
"return",
"문서",
"상세정보",
"object",
"raise",
"PopbillException"
] | train | https://github.com/linkhub-sdk/popbill.py/blob/68a0dd7f7a937603318e93be321fde73c50b96cc/popbill/statementService.py#L366-L384 |
linkhub-sdk/popbill.py | popbill/statementService.py | StatementService.sendSMS | def sendSMS(self, CorpNum, ItemCode, MgtKey, Sender, Receiver, Contents, UserID=None):
""" 알림문자 전송
args
CorpNum : 팝빌회원 사업자번호
ItemCode : 명세서 종류 코드
[121 - 거래명세서], [122 - 청구서], [123 - 견적서],
[124 - 발주서], [125 - 입금표], [126 - 영수증]
MgtKey : 파트너 문서관리번호
Sender : 발신번호
Receiver : 수신번호
Contents : 문자메시지 내용(최대 90Byte), 최대길이를 초과한경우 길이가 조정되어 전송됨
UserID : 팝빌 회원아이디
return
처리결과. consist of code and message
raise
PopbillException
"""
if MgtKey == None or MgtKey == "":
raise PopbillException(-99999999, "관리번호가 입력되지 않았습니다.")
if ItemCode == None or ItemCode == "":
raise PopbillException(-99999999, "명세서 종류 코드가 입력되지 않았습니다.")
postData = self._stringtify({
"sender": Sender,
"receiver": Receiver,
"contents": Contents
})
return self._httppost('/Statement/' + str(ItemCode) + '/' + MgtKey, postData, CorpNum, UserID, "SMS") | python | def sendSMS(self, CorpNum, ItemCode, MgtKey, Sender, Receiver, Contents, UserID=None):
""" 알림문자 전송
args
CorpNum : 팝빌회원 사업자번호
ItemCode : 명세서 종류 코드
[121 - 거래명세서], [122 - 청구서], [123 - 견적서],
[124 - 발주서], [125 - 입금표], [126 - 영수증]
MgtKey : 파트너 문서관리번호
Sender : 발신번호
Receiver : 수신번호
Contents : 문자메시지 내용(최대 90Byte), 최대길이를 초과한경우 길이가 조정되어 전송됨
UserID : 팝빌 회원아이디
return
처리결과. consist of code and message
raise
PopbillException
"""
if MgtKey == None or MgtKey == "":
raise PopbillException(-99999999, "관리번호가 입력되지 않았습니다.")
if ItemCode == None or ItemCode == "":
raise PopbillException(-99999999, "명세서 종류 코드가 입력되지 않았습니다.")
postData = self._stringtify({
"sender": Sender,
"receiver": Receiver,
"contents": Contents
})
return self._httppost('/Statement/' + str(ItemCode) + '/' + MgtKey, postData, CorpNum, UserID, "SMS") | [
"def",
"sendSMS",
"(",
"self",
",",
"CorpNum",
",",
"ItemCode",
",",
"MgtKey",
",",
"Sender",
",",
"Receiver",
",",
"Contents",
",",
"UserID",
"=",
"None",
")",
":",
"if",
"MgtKey",
"==",
"None",
"or",
"MgtKey",
"==",
"\"\"",
":",
"raise",
"PopbillException",
"(",
"-",
"99999999",
",",
"\"관리번호가 입력되지 않았습니다.\")\r",
"",
"if",
"ItemCode",
"==",
"None",
"or",
"ItemCode",
"==",
"\"\"",
":",
"raise",
"PopbillException",
"(",
"-",
"99999999",
",",
"\"명세서 종류 코드가 입력되지 않았습니다.\")\r",
"",
"postData",
"=",
"self",
".",
"_stringtify",
"(",
"{",
"\"sender\"",
":",
"Sender",
",",
"\"receiver\"",
":",
"Receiver",
",",
"\"contents\"",
":",
"Contents",
"}",
")",
"return",
"self",
".",
"_httppost",
"(",
"'/Statement/'",
"+",
"str",
"(",
"ItemCode",
")",
"+",
"'/'",
"+",
"MgtKey",
",",
"postData",
",",
"CorpNum",
",",
"UserID",
",",
"\"SMS\"",
")"
] | 알림문자 전송
args
CorpNum : 팝빌회원 사업자번호
ItemCode : 명세서 종류 코드
[121 - 거래명세서], [122 - 청구서], [123 - 견적서],
[124 - 발주서], [125 - 입금표], [126 - 영수증]
MgtKey : 파트너 문서관리번호
Sender : 발신번호
Receiver : 수신번호
Contents : 문자메시지 내용(최대 90Byte), 최대길이를 초과한경우 길이가 조정되어 전송됨
UserID : 팝빌 회원아이디
return
처리결과. consist of code and message
raise
PopbillException | [
"알림문자",
"전송",
"args",
"CorpNum",
":",
"팝빌회원",
"사업자번호",
"ItemCode",
":",
"명세서",
"종류",
"코드",
"[",
"121",
"-",
"거래명세서",
"]",
"[",
"122",
"-",
"청구서",
"]",
"[",
"123",
"-",
"견적서",
"]",
"[",
"124",
"-",
"발주서",
"]",
"[",
"125",
"-",
"입금표",
"]",
"[",
"126",
"-",
"영수증",
"]",
"MgtKey",
":",
"파트너",
"문서관리번호",
"Sender",
":",
"발신번호",
"Receiver",
":",
"수신번호",
"Contents",
":",
"문자메시지",
"내용",
"(",
"최대",
"90Byte",
")",
"최대길이를",
"초과한경우",
"길이가",
"조정되어",
"전송됨",
"UserID",
":",
"팝빌",
"회원아이디",
"return",
"처리결과",
".",
"consist",
"of",
"code",
"and",
"message",
"raise",
"PopbillException"
] | train | https://github.com/linkhub-sdk/popbill.py/blob/68a0dd7f7a937603318e93be321fde73c50b96cc/popbill/statementService.py#L410-L439 |
linkhub-sdk/popbill.py | popbill/statementService.py | StatementService.getLogs | def getLogs(self, CorpNum, ItemCode, MgtKey):
""" 전자명세서 문서이력 목록 확인
args
CorpNum : 팝빌회원 사업자번호
ItemCode : 명세서 종류 코드
[121 - 거래명세서], [122 - 청구서], [123 - 견적서],
[124 - 발주서], [125 - 입금표], [126 - 영수증]
MgtKey : 파트너 문서관리번호
return
문서이력 정보 목록 as List
raise
PopbillException
"""
if MgtKey == None or MgtKey == "":
raise PopbillException(-99999999, "관리번호가 입력되지 않았습니다.")
if ItemCode == None or ItemCode == "":
raise PopbillException(-99999999, "명세서 종류 코드가 입력되지 않았습니다.")
return self._httpget('/Statement/' + str(ItemCode) + '/' + MgtKey + '/Logs', CorpNum) | python | def getLogs(self, CorpNum, ItemCode, MgtKey):
""" 전자명세서 문서이력 목록 확인
args
CorpNum : 팝빌회원 사업자번호
ItemCode : 명세서 종류 코드
[121 - 거래명세서], [122 - 청구서], [123 - 견적서],
[124 - 발주서], [125 - 입금표], [126 - 영수증]
MgtKey : 파트너 문서관리번호
return
문서이력 정보 목록 as List
raise
PopbillException
"""
if MgtKey == None or MgtKey == "":
raise PopbillException(-99999999, "관리번호가 입력되지 않았습니다.")
if ItemCode == None or ItemCode == "":
raise PopbillException(-99999999, "명세서 종류 코드가 입력되지 않았습니다.")
return self._httpget('/Statement/' + str(ItemCode) + '/' + MgtKey + '/Logs', CorpNum) | [
"def",
"getLogs",
"(",
"self",
",",
"CorpNum",
",",
"ItemCode",
",",
"MgtKey",
")",
":",
"if",
"MgtKey",
"==",
"None",
"or",
"MgtKey",
"==",
"\"\"",
":",
"raise",
"PopbillException",
"(",
"-",
"99999999",
",",
"\"관리번호가 입력되지 않았습니다.\")\r",
"",
"if",
"ItemCode",
"==",
"None",
"or",
"ItemCode",
"==",
"\"\"",
":",
"raise",
"PopbillException",
"(",
"-",
"99999999",
",",
"\"명세서 종류 코드가 입력되지 않았습니다.\")\r",
"",
"return",
"self",
".",
"_httpget",
"(",
"'/Statement/'",
"+",
"str",
"(",
"ItemCode",
")",
"+",
"'/'",
"+",
"MgtKey",
"+",
"'/Logs'",
",",
"CorpNum",
")"
] | 전자명세서 문서이력 목록 확인
args
CorpNum : 팝빌회원 사업자번호
ItemCode : 명세서 종류 코드
[121 - 거래명세서], [122 - 청구서], [123 - 견적서],
[124 - 발주서], [125 - 입금표], [126 - 영수증]
MgtKey : 파트너 문서관리번호
return
문서이력 정보 목록 as List
raise
PopbillException | [
"전자명세서",
"문서이력",
"목록",
"확인",
"args",
"CorpNum",
":",
"팝빌회원",
"사업자번호",
"ItemCode",
":",
"명세서",
"종류",
"코드",
"[",
"121",
"-",
"거래명세서",
"]",
"[",
"122",
"-",
"청구서",
"]",
"[",
"123",
"-",
"견적서",
"]",
"[",
"124",
"-",
"발주서",
"]",
"[",
"125",
"-",
"입금표",
"]",
"[",
"126",
"-",
"영수증",
"]",
"MgtKey",
":",
"파트너",
"문서관리번호",
"return",
"문서이력",
"정보",
"목록",
"as",
"List",
"raise",
"PopbillException"
] | train | https://github.com/linkhub-sdk/popbill.py/blob/68a0dd7f7a937603318e93be321fde73c50b96cc/popbill/statementService.py#L469-L487 |
linkhub-sdk/popbill.py | popbill/statementService.py | StatementService.attachFile | def attachFile(self, CorpNum, ItemCode, MgtKey, FilePath, UserID=None):
""" 파일 첨부
args
CorpNum : 팝빌회원 사업자번호
ItemCode : 명세서 종류 코드
[121 - 거래명세서], [122 - 청구서], [123 - 견적서],
[124 - 발주서], [125 - 입금표], [126 - 영수증]
MgtKey : 파트너 문서관리번호
FilePath : 첨부파일의 경로
UserID : 팝빌 회원아이디
return
처리결과. consist of code and message
raise
PopbillException
"""
if MgtKey == None or MgtKey == "":
raise PopbillException(-99999999, "관리번호가 입력되지 않았습니다.")
if FilePath == None or FilePath == "":
raise PopbillException(-99999999, "파일경로가 입력되지 않았습니다.")
if ItemCode == None or ItemCode == "":
raise PopbillException(-99999999, "명세서 종류 코드가 입력되지 않았습니다.")
files = []
try:
with open(FilePath, "rb") as F:
files = [File(fieldName='Filedata',
fileName=F.name,
fileData=F.read())]
except IOError:
raise PopbillException(-99999999, "해당경로에 파일이 없거나 읽을 수 없습니다.")
return self._httppost_files('/Statement/' + str(ItemCode) + '/' + MgtKey + '/Files', None, files, CorpNum,
UserID) | python | def attachFile(self, CorpNum, ItemCode, MgtKey, FilePath, UserID=None):
""" 파일 첨부
args
CorpNum : 팝빌회원 사업자번호
ItemCode : 명세서 종류 코드
[121 - 거래명세서], [122 - 청구서], [123 - 견적서],
[124 - 발주서], [125 - 입금표], [126 - 영수증]
MgtKey : 파트너 문서관리번호
FilePath : 첨부파일의 경로
UserID : 팝빌 회원아이디
return
처리결과. consist of code and message
raise
PopbillException
"""
if MgtKey == None or MgtKey == "":
raise PopbillException(-99999999, "관리번호가 입력되지 않았습니다.")
if FilePath == None or FilePath == "":
raise PopbillException(-99999999, "파일경로가 입력되지 않았습니다.")
if ItemCode == None or ItemCode == "":
raise PopbillException(-99999999, "명세서 종류 코드가 입력되지 않았습니다.")
files = []
try:
with open(FilePath, "rb") as F:
files = [File(fieldName='Filedata',
fileName=F.name,
fileData=F.read())]
except IOError:
raise PopbillException(-99999999, "해당경로에 파일이 없거나 읽을 수 없습니다.")
return self._httppost_files('/Statement/' + str(ItemCode) + '/' + MgtKey + '/Files', None, files, CorpNum,
UserID) | [
"def",
"attachFile",
"(",
"self",
",",
"CorpNum",
",",
"ItemCode",
",",
"MgtKey",
",",
"FilePath",
",",
"UserID",
"=",
"None",
")",
":",
"if",
"MgtKey",
"==",
"None",
"or",
"MgtKey",
"==",
"\"\"",
":",
"raise",
"PopbillException",
"(",
"-",
"99999999",
",",
"\"관리번호가 입력되지 않았습니다.\")\r",
"",
"if",
"FilePath",
"==",
"None",
"or",
"FilePath",
"==",
"\"\"",
":",
"raise",
"PopbillException",
"(",
"-",
"99999999",
",",
"\"파일경로가 입력되지 않았습니다.\")\r",
"",
"if",
"ItemCode",
"==",
"None",
"or",
"ItemCode",
"==",
"\"\"",
":",
"raise",
"PopbillException",
"(",
"-",
"99999999",
",",
"\"명세서 종류 코드가 입력되지 않았습니다.\")\r",
"",
"files",
"=",
"[",
"]",
"try",
":",
"with",
"open",
"(",
"FilePath",
",",
"\"rb\"",
")",
"as",
"F",
":",
"files",
"=",
"[",
"File",
"(",
"fieldName",
"=",
"'Filedata'",
",",
"fileName",
"=",
"F",
".",
"name",
",",
"fileData",
"=",
"F",
".",
"read",
"(",
")",
")",
"]",
"except",
"IOError",
":",
"raise",
"PopbillException",
"(",
"-",
"99999999",
",",
"\"해당경로에 파일이 없거나 읽을 수 없습니다.\")\r",
"",
"return",
"self",
".",
"_httppost_files",
"(",
"'/Statement/'",
"+",
"str",
"(",
"ItemCode",
")",
"+",
"'/'",
"+",
"MgtKey",
"+",
"'/Files'",
",",
"None",
",",
"files",
",",
"CorpNum",
",",
"UserID",
")"
] | 파일 첨부
args
CorpNum : 팝빌회원 사업자번호
ItemCode : 명세서 종류 코드
[121 - 거래명세서], [122 - 청구서], [123 - 견적서],
[124 - 발주서], [125 - 입금표], [126 - 영수증]
MgtKey : 파트너 문서관리번호
FilePath : 첨부파일의 경로
UserID : 팝빌 회원아이디
return
처리결과. consist of code and message
raise
PopbillException | [
"파일",
"첨부",
"args",
"CorpNum",
":",
"팝빌회원",
"사업자번호",
"ItemCode",
":",
"명세서",
"종류",
"코드",
"[",
"121",
"-",
"거래명세서",
"]",
"[",
"122",
"-",
"청구서",
"]",
"[",
"123",
"-",
"견적서",
"]",
"[",
"124",
"-",
"발주서",
"]",
"[",
"125",
"-",
"입금표",
"]",
"[",
"126",
"-",
"영수증",
"]",
"MgtKey",
":",
"파트너",
"문서관리번호",
"FilePath",
":",
"첨부파일의",
"경로",
"UserID",
":",
"팝빌",
"회원아이디",
"return",
"처리결과",
".",
"consist",
"of",
"code",
"and",
"message",
"raise",
"PopbillException"
] | train | https://github.com/linkhub-sdk/popbill.py/blob/68a0dd7f7a937603318e93be321fde73c50b96cc/popbill/statementService.py#L489-L521 |
linkhub-sdk/popbill.py | popbill/statementService.py | StatementService.getFiles | def getFiles(self, CorpNum, ItemCode, MgtKey):
""" 첨부파일 목록 확인
args
CorpNum : 팝빌회원 사업자번호
ItemCode : 명세서 종류 코드
[121 - 거래명세서], [122 - 청구서], [123 - 견적서],
[124 - 발주서], [125 - 입금표], [126 - 영수증]
MgtKey : 파트너 문서관리번호
return
첨부파일 목록 as List
raise
PopbillException
"""
if MgtKey == None or MgtKey == "":
raise PopbillException(-99999999, "관리번호가 입력되지 않았습니다.")
if ItemCode == None or ItemCode == "":
raise PopbillException(-99999999, "명세서 종류 코드가 입력되지 않았습니다.")
return self._httpget('/Statement/' + str(ItemCode) + '/' + MgtKey + '/Files', CorpNum) | python | def getFiles(self, CorpNum, ItemCode, MgtKey):
""" 첨부파일 목록 확인
args
CorpNum : 팝빌회원 사업자번호
ItemCode : 명세서 종류 코드
[121 - 거래명세서], [122 - 청구서], [123 - 견적서],
[124 - 발주서], [125 - 입금표], [126 - 영수증]
MgtKey : 파트너 문서관리번호
return
첨부파일 목록 as List
raise
PopbillException
"""
if MgtKey == None or MgtKey == "":
raise PopbillException(-99999999, "관리번호가 입력되지 않았습니다.")
if ItemCode == None or ItemCode == "":
raise PopbillException(-99999999, "명세서 종류 코드가 입력되지 않았습니다.")
return self._httpget('/Statement/' + str(ItemCode) + '/' + MgtKey + '/Files', CorpNum) | [
"def",
"getFiles",
"(",
"self",
",",
"CorpNum",
",",
"ItemCode",
",",
"MgtKey",
")",
":",
"if",
"MgtKey",
"==",
"None",
"or",
"MgtKey",
"==",
"\"\"",
":",
"raise",
"PopbillException",
"(",
"-",
"99999999",
",",
"\"관리번호가 입력되지 않았습니다.\")\r",
"",
"if",
"ItemCode",
"==",
"None",
"or",
"ItemCode",
"==",
"\"\"",
":",
"raise",
"PopbillException",
"(",
"-",
"99999999",
",",
"\"명세서 종류 코드가 입력되지 않았습니다.\")\r",
"",
"return",
"self",
".",
"_httpget",
"(",
"'/Statement/'",
"+",
"str",
"(",
"ItemCode",
")",
"+",
"'/'",
"+",
"MgtKey",
"+",
"'/Files'",
",",
"CorpNum",
")"
] | 첨부파일 목록 확인
args
CorpNum : 팝빌회원 사업자번호
ItemCode : 명세서 종류 코드
[121 - 거래명세서], [122 - 청구서], [123 - 견적서],
[124 - 발주서], [125 - 입금표], [126 - 영수증]
MgtKey : 파트너 문서관리번호
return
첨부파일 목록 as List
raise
PopbillException | [
"첨부파일",
"목록",
"확인",
"args",
"CorpNum",
":",
"팝빌회원",
"사업자번호",
"ItemCode",
":",
"명세서",
"종류",
"코드",
"[",
"121",
"-",
"거래명세서",
"]",
"[",
"122",
"-",
"청구서",
"]",
"[",
"123",
"-",
"견적서",
"]",
"[",
"124",
"-",
"발주서",
"]",
"[",
"125",
"-",
"입금표",
"]",
"[",
"126",
"-",
"영수증",
"]",
"MgtKey",
":",
"파트너",
"문서관리번호",
"return",
"첨부파일",
"목록",
"as",
"List",
"raise",
"PopbillException"
] | train | https://github.com/linkhub-sdk/popbill.py/blob/68a0dd7f7a937603318e93be321fde73c50b96cc/popbill/statementService.py#L523-L541 |
linkhub-sdk/popbill.py | popbill/statementService.py | StatementService.deleteFile | def deleteFile(self, CorpNum, ItemCode, MgtKey, FileID, UserID=None):
""" 첨부파일 삭제
args
CorpNum : 팝빌회원 사업자번호
ItemCode : 명세서 종류 코드
[121 - 거래명세서], [122 - 청구서], [123 - 견적서],
[124 - 발주서], [125 - 입금표], [126 - 영수증]
MgtKey : 파트너 문서관리번호
FileID : 파일아이디, 첨부파일 목록확인(getFiles) API 응답전문의 AttachedFile 변수값
UserID : 팝빌회원 아이디
return
첨부파일 정보 목록 as List
raise
PopbillException
"""
if MgtKey == None or MgtKey == "":
raise PopbillException(-99999999, "관리번호가 입력되지 않았습니다.")
if ItemCode == None or ItemCode == "":
raise PopbillException(-99999999, "명세서 종류 코드가 입력되지 않았습니다.")
if FileID == None or FileID == "":
raise PopbillException(-99999999, "파일아이디가 입력되지 않았습니다.")
postData = ''
return self._httppost('/Statement/' + str(ItemCode) + '/' + MgtKey + '/Files/' + FileID, postData, CorpNum,
UserID, 'DELETE') | python | def deleteFile(self, CorpNum, ItemCode, MgtKey, FileID, UserID=None):
""" 첨부파일 삭제
args
CorpNum : 팝빌회원 사업자번호
ItemCode : 명세서 종류 코드
[121 - 거래명세서], [122 - 청구서], [123 - 견적서],
[124 - 발주서], [125 - 입금표], [126 - 영수증]
MgtKey : 파트너 문서관리번호
FileID : 파일아이디, 첨부파일 목록확인(getFiles) API 응답전문의 AttachedFile 변수값
UserID : 팝빌회원 아이디
return
첨부파일 정보 목록 as List
raise
PopbillException
"""
if MgtKey == None or MgtKey == "":
raise PopbillException(-99999999, "관리번호가 입력되지 않았습니다.")
if ItemCode == None or ItemCode == "":
raise PopbillException(-99999999, "명세서 종류 코드가 입력되지 않았습니다.")
if FileID == None or FileID == "":
raise PopbillException(-99999999, "파일아이디가 입력되지 않았습니다.")
postData = ''
return self._httppost('/Statement/' + str(ItemCode) + '/' + MgtKey + '/Files/' + FileID, postData, CorpNum,
UserID, 'DELETE') | [
"def",
"deleteFile",
"(",
"self",
",",
"CorpNum",
",",
"ItemCode",
",",
"MgtKey",
",",
"FileID",
",",
"UserID",
"=",
"None",
")",
":",
"if",
"MgtKey",
"==",
"None",
"or",
"MgtKey",
"==",
"\"\"",
":",
"raise",
"PopbillException",
"(",
"-",
"99999999",
",",
"\"관리번호가 입력되지 않았습니다.\")\r",
"",
"if",
"ItemCode",
"==",
"None",
"or",
"ItemCode",
"==",
"\"\"",
":",
"raise",
"PopbillException",
"(",
"-",
"99999999",
",",
"\"명세서 종류 코드가 입력되지 않았습니다.\")\r",
"",
"if",
"FileID",
"==",
"None",
"or",
"FileID",
"==",
"\"\"",
":",
"raise",
"PopbillException",
"(",
"-",
"99999999",
",",
"\"파일아이디가 입력되지 않았습니다.\")\r",
"",
"postData",
"=",
"''",
"return",
"self",
".",
"_httppost",
"(",
"'/Statement/'",
"+",
"str",
"(",
"ItemCode",
")",
"+",
"'/'",
"+",
"MgtKey",
"+",
"'/Files/'",
"+",
"FileID",
",",
"postData",
",",
"CorpNum",
",",
"UserID",
",",
"'DELETE'",
")"
] | 첨부파일 삭제
args
CorpNum : 팝빌회원 사업자번호
ItemCode : 명세서 종류 코드
[121 - 거래명세서], [122 - 청구서], [123 - 견적서],
[124 - 발주서], [125 - 입금표], [126 - 영수증]
MgtKey : 파트너 문서관리번호
FileID : 파일아이디, 첨부파일 목록확인(getFiles) API 응답전문의 AttachedFile 변수값
UserID : 팝빌회원 아이디
return
첨부파일 정보 목록 as List
raise
PopbillException | [
"첨부파일",
"삭제",
"args",
"CorpNum",
":",
"팝빌회원",
"사업자번호",
"ItemCode",
":",
"명세서",
"종류",
"코드",
"[",
"121",
"-",
"거래명세서",
"]",
"[",
"122",
"-",
"청구서",
"]",
"[",
"123",
"-",
"견적서",
"]",
"[",
"124",
"-",
"발주서",
"]",
"[",
"125",
"-",
"입금표",
"]",
"[",
"126",
"-",
"영수증",
"]",
"MgtKey",
":",
"파트너",
"문서관리번호",
"FileID",
":",
"파일아이디",
"첨부파일",
"목록확인",
"(",
"getFiles",
")",
"API",
"응답전문의",
"AttachedFile",
"변수값",
"UserID",
":",
"팝빌회원",
"아이디",
"return",
"첨부파일",
"정보",
"목록",
"as",
"List",
"raise",
"PopbillException"
] | train | https://github.com/linkhub-sdk/popbill.py/blob/68a0dd7f7a937603318e93be321fde73c50b96cc/popbill/statementService.py#L543-L568 |
linkhub-sdk/popbill.py | popbill/statementService.py | StatementService.getMassPrintURL | def getMassPrintURL(self, CorpNum, ItemCode, MgtKeyList, UserID=None):
""" 다량 인쇄 URL 확인
args
CorpNum : 팝빌회원 사업자번호
ItemCode : 명세서 종류 코드
[121 - 거래명세서], [122 - 청구서], [123 - 견적서],
[124 - 발주서], [125 - 입금표], [126 - 영수증]
MgtKeyList : 파트너 문서관리번호 목록
UserID : 팝빌회원 아이디
return
팝빌 URL as str
raise
PopbillException
"""
if MgtKeyList == None:
raise PopbillException(-99999999, "관리번호 배열이 입력되지 않았습니다.")
if ItemCode == None or ItemCode == "":
raise PopbillException(-99999999, "명세서 종류 코드가 입력되지 않았습니다.")
postData = self._stringtify(MgtKeyList)
result = self._httppost('/Statement/' + str(ItemCode) + '?Print', postData, CorpNum, UserID)
return result.url | python | def getMassPrintURL(self, CorpNum, ItemCode, MgtKeyList, UserID=None):
""" 다량 인쇄 URL 확인
args
CorpNum : 팝빌회원 사업자번호
ItemCode : 명세서 종류 코드
[121 - 거래명세서], [122 - 청구서], [123 - 견적서],
[124 - 발주서], [125 - 입금표], [126 - 영수증]
MgtKeyList : 파트너 문서관리번호 목록
UserID : 팝빌회원 아이디
return
팝빌 URL as str
raise
PopbillException
"""
if MgtKeyList == None:
raise PopbillException(-99999999, "관리번호 배열이 입력되지 않았습니다.")
if ItemCode == None or ItemCode == "":
raise PopbillException(-99999999, "명세서 종류 코드가 입력되지 않았습니다.")
postData = self._stringtify(MgtKeyList)
result = self._httppost('/Statement/' + str(ItemCode) + '?Print', postData, CorpNum, UserID)
return result.url | [
"def",
"getMassPrintURL",
"(",
"self",
",",
"CorpNum",
",",
"ItemCode",
",",
"MgtKeyList",
",",
"UserID",
"=",
"None",
")",
":",
"if",
"MgtKeyList",
"==",
"None",
":",
"raise",
"PopbillException",
"(",
"-",
"99999999",
",",
"\"관리번호 배열이 입력되지 않았습니다.\")\r",
"",
"if",
"ItemCode",
"==",
"None",
"or",
"ItemCode",
"==",
"\"\"",
":",
"raise",
"PopbillException",
"(",
"-",
"99999999",
",",
"\"명세서 종류 코드가 입력되지 않았습니다.\")\r",
"",
"postData",
"=",
"self",
".",
"_stringtify",
"(",
"MgtKeyList",
")",
"result",
"=",
"self",
".",
"_httppost",
"(",
"'/Statement/'",
"+",
"str",
"(",
"ItemCode",
")",
"+",
"'?Print'",
",",
"postData",
",",
"CorpNum",
",",
"UserID",
")",
"return",
"result",
".",
"url"
] | 다량 인쇄 URL 확인
args
CorpNum : 팝빌회원 사업자번호
ItemCode : 명세서 종류 코드
[121 - 거래명세서], [122 - 청구서], [123 - 견적서],
[124 - 발주서], [125 - 입금표], [126 - 영수증]
MgtKeyList : 파트너 문서관리번호 목록
UserID : 팝빌회원 아이디
return
팝빌 URL as str
raise
PopbillException | [
"다량",
"인쇄",
"URL",
"확인",
"args",
"CorpNum",
":",
"팝빌회원",
"사업자번호",
"ItemCode",
":",
"명세서",
"종류",
"코드",
"[",
"121",
"-",
"거래명세서",
"]",
"[",
"122",
"-",
"청구서",
"]",
"[",
"123",
"-",
"견적서",
"]",
"[",
"124",
"-",
"발주서",
"]",
"[",
"125",
"-",
"입금표",
"]",
"[",
"126",
"-",
"영수증",
"]",
"MgtKeyList",
":",
"파트너",
"문서관리번호",
"목록",
"UserID",
":",
"팝빌회원",
"아이디",
"return",
"팝빌",
"URL",
"as",
"str",
"raise",
"PopbillException"
] | train | https://github.com/linkhub-sdk/popbill.py/blob/68a0dd7f7a937603318e93be321fde73c50b96cc/popbill/statementService.py#L662-L685 |
linkhub-sdk/popbill.py | popbill/statementService.py | StatementService.attachStatement | def attachStatement(self, CorpNum, ItemCode, MgtKey, SubItemCode, SubMgtKey, UserID=None):
""" 다른 전자명세서 첨부
args
CorpNum : 팝빌회원 사업자번호
ItemCode : 전자명세서 종류코드, 121-명세서, 122-청구서, 123-견적서, 124-발주서 125-입금표, 126-영수증
MgtKey : 전자명세서 문서관리번호
SubItemCode : 첨부할 명세서 종류코드, 121-명세서, 122-청구서, 123-견적서, 124-발주서 125-입금표, 126-영수증
SubMgtKey : 첨부할 전자명세서 문서관리번호
UserID : 팝빌회원 아이디
return
처리결과. consist of code and message
raise
PopbillException
"""
if MgtKey == None or MgtKey == "":
raise PopbillException(-99999999, "관리번호가 입력되지 않았습니다.")
if ItemCode == None or ItemCode == "":
raise PopbillException(-99999999, "명세서 종류 코드가 입력되지 않았습니다.")
uri = '/Statement/' + ItemCode + '/' + MgtKey + '/AttachStmt'
postData = self._stringtify({"ItemCode": ItemCode, "MgtKey": SubMgtKey})
return self._httppost(uri, postData, CorpNum, UserID) | python | def attachStatement(self, CorpNum, ItemCode, MgtKey, SubItemCode, SubMgtKey, UserID=None):
""" 다른 전자명세서 첨부
args
CorpNum : 팝빌회원 사업자번호
ItemCode : 전자명세서 종류코드, 121-명세서, 122-청구서, 123-견적서, 124-발주서 125-입금표, 126-영수증
MgtKey : 전자명세서 문서관리번호
SubItemCode : 첨부할 명세서 종류코드, 121-명세서, 122-청구서, 123-견적서, 124-발주서 125-입금표, 126-영수증
SubMgtKey : 첨부할 전자명세서 문서관리번호
UserID : 팝빌회원 아이디
return
처리결과. consist of code and message
raise
PopbillException
"""
if MgtKey == None or MgtKey == "":
raise PopbillException(-99999999, "관리번호가 입력되지 않았습니다.")
if ItemCode == None or ItemCode == "":
raise PopbillException(-99999999, "명세서 종류 코드가 입력되지 않았습니다.")
uri = '/Statement/' + ItemCode + '/' + MgtKey + '/AttachStmt'
postData = self._stringtify({"ItemCode": ItemCode, "MgtKey": SubMgtKey})
return self._httppost(uri, postData, CorpNum, UserID) | [
"def",
"attachStatement",
"(",
"self",
",",
"CorpNum",
",",
"ItemCode",
",",
"MgtKey",
",",
"SubItemCode",
",",
"SubMgtKey",
",",
"UserID",
"=",
"None",
")",
":",
"if",
"MgtKey",
"==",
"None",
"or",
"MgtKey",
"==",
"\"\"",
":",
"raise",
"PopbillException",
"(",
"-",
"99999999",
",",
"\"관리번호가 입력되지 않았습니다.\")\r",
"",
"if",
"ItemCode",
"==",
"None",
"or",
"ItemCode",
"==",
"\"\"",
":",
"raise",
"PopbillException",
"(",
"-",
"99999999",
",",
"\"명세서 종류 코드가 입력되지 않았습니다.\")\r",
"",
"uri",
"=",
"'/Statement/'",
"+",
"ItemCode",
"+",
"'/'",
"+",
"MgtKey",
"+",
"'/AttachStmt'",
"postData",
"=",
"self",
".",
"_stringtify",
"(",
"{",
"\"ItemCode\"",
":",
"ItemCode",
",",
"\"MgtKey\"",
":",
"SubMgtKey",
"}",
")",
"return",
"self",
".",
"_httppost",
"(",
"uri",
",",
"postData",
",",
"CorpNum",
",",
"UserID",
")"
] | 다른 전자명세서 첨부
args
CorpNum : 팝빌회원 사업자번호
ItemCode : 전자명세서 종류코드, 121-명세서, 122-청구서, 123-견적서, 124-발주서 125-입금표, 126-영수증
MgtKey : 전자명세서 문서관리번호
SubItemCode : 첨부할 명세서 종류코드, 121-명세서, 122-청구서, 123-견적서, 124-발주서 125-입금표, 126-영수증
SubMgtKey : 첨부할 전자명세서 문서관리번호
UserID : 팝빌회원 아이디
return
처리결과. consist of code and message
raise
PopbillException | [
"다른",
"전자명세서",
"첨부",
"args",
"CorpNum",
":",
"팝빌회원",
"사업자번호",
"ItemCode",
":",
"전자명세서",
"종류코드",
"121",
"-",
"명세서",
"122",
"-",
"청구서",
"123",
"-",
"견적서",
"124",
"-",
"발주서",
"125",
"-",
"입금표",
"126",
"-",
"영수증",
"MgtKey",
":",
"전자명세서",
"문서관리번호",
"SubItemCode",
":",
"첨부할",
"명세서",
"종류코드",
"121",
"-",
"명세서",
"122",
"-",
"청구서",
"123",
"-",
"견적서",
"124",
"-",
"발주서",
"125",
"-",
"입금표",
"126",
"-",
"영수증",
"SubMgtKey",
":",
"첨부할",
"전자명세서",
"문서관리번호",
"UserID",
":",
"팝빌회원",
"아이디",
"return",
"처리결과",
".",
"consist",
"of",
"code",
"and",
"message",
"raise",
"PopbillException"
] | train | https://github.com/linkhub-sdk/popbill.py/blob/68a0dd7f7a937603318e93be321fde73c50b96cc/popbill/statementService.py#L687-L710 |
SavinaRoja/OpenAccess_EPUB | src/openaccess_epub/package/__init__.py | Package.process | def process(self, article):
"""
Ingests an article and processes it for metadata and elements to provide
proper references in the EPUB spine.
This method may only be called once unless the Package was instantiated
in collection mode using ``Package(collection=True)``. It places entries
in an internal spine list for the Main Content Document, the
Bibliographic Content Document (if there are ref elements in Back), and
the Tables Content Document (if there are table elements). It then
employs the publisher specific methods for extracting article metadata
using the article's publisher attribute (an instance of a Publisher
class).
Parameters
----------
article : openaccess_epub.article.Article instance
An article to be included in the EPUB, to be processed for metadata
and appropriate content document references.
"""
if self.article is not None and not self.collection:
log.warning('Could not process additional article. Package only \
handles one article unless collection mode is set.')
return False
if article.publisher is None:
log.error('''Package cannot be generated for an Article \
without a publisher!''')
return
self.article = article
self.article_doi = self.article.doi.split('/')[1]
self.all_dois.append(self.article.doi)
#Analyze the article to add entries to the spine
dash_doi = self.article_doi.replace('.', '-')
#Entry for the main content document
main_idref = 'main-{0}-xhtml'.format(dash_doi)
self.spine_list.append(spine_item(main_idref, True))
#Entry for the biblio content document
biblio_idref = 'biblio-{0}-xhtml'.format(dash_doi)
if self.article.root.xpath('./back/ref-list/ref'):
self.spine_list.append(spine_item(biblio_idref, True))
#Entry for the tables content document
tables_idref = 'tables-{0}-xhtml'.format(dash_doi)
if self.article.publisher.has_out_of_flow_tables():
self.spine_list.append(spine_item(tables_idref, False))
self.acquire_metadata() | python | def process(self, article):
"""
Ingests an article and processes it for metadata and elements to provide
proper references in the EPUB spine.
This method may only be called once unless the Package was instantiated
in collection mode using ``Package(collection=True)``. It places entries
in an internal spine list for the Main Content Document, the
Bibliographic Content Document (if there are ref elements in Back), and
the Tables Content Document (if there are table elements). It then
employs the publisher specific methods for extracting article metadata
using the article's publisher attribute (an instance of a Publisher
class).
Parameters
----------
article : openaccess_epub.article.Article instance
An article to be included in the EPUB, to be processed for metadata
and appropriate content document references.
"""
if self.article is not None and not self.collection:
log.warning('Could not process additional article. Package only \
handles one article unless collection mode is set.')
return False
if article.publisher is None:
log.error('''Package cannot be generated for an Article \
without a publisher!''')
return
self.article = article
self.article_doi = self.article.doi.split('/')[1]
self.all_dois.append(self.article.doi)
#Analyze the article to add entries to the spine
dash_doi = self.article_doi.replace('.', '-')
#Entry for the main content document
main_idref = 'main-{0}-xhtml'.format(dash_doi)
self.spine_list.append(spine_item(main_idref, True))
#Entry for the biblio content document
biblio_idref = 'biblio-{0}-xhtml'.format(dash_doi)
if self.article.root.xpath('./back/ref-list/ref'):
self.spine_list.append(spine_item(biblio_idref, True))
#Entry for the tables content document
tables_idref = 'tables-{0}-xhtml'.format(dash_doi)
if self.article.publisher.has_out_of_flow_tables():
self.spine_list.append(spine_item(tables_idref, False))
self.acquire_metadata() | [
"def",
"process",
"(",
"self",
",",
"article",
")",
":",
"if",
"self",
".",
"article",
"is",
"not",
"None",
"and",
"not",
"self",
".",
"collection",
":",
"log",
".",
"warning",
"(",
"'Could not process additional article. Package only \\\nhandles one article unless collection mode is set.'",
")",
"return",
"False",
"if",
"article",
".",
"publisher",
"is",
"None",
":",
"log",
".",
"error",
"(",
"'''Package cannot be generated for an Article \\\nwithout a publisher!'''",
")",
"return",
"self",
".",
"article",
"=",
"article",
"self",
".",
"article_doi",
"=",
"self",
".",
"article",
".",
"doi",
".",
"split",
"(",
"'/'",
")",
"[",
"1",
"]",
"self",
".",
"all_dois",
".",
"append",
"(",
"self",
".",
"article",
".",
"doi",
")",
"#Analyze the article to add entries to the spine",
"dash_doi",
"=",
"self",
".",
"article_doi",
".",
"replace",
"(",
"'.'",
",",
"'-'",
")",
"#Entry for the main content document",
"main_idref",
"=",
"'main-{0}-xhtml'",
".",
"format",
"(",
"dash_doi",
")",
"self",
".",
"spine_list",
".",
"append",
"(",
"spine_item",
"(",
"main_idref",
",",
"True",
")",
")",
"#Entry for the biblio content document",
"biblio_idref",
"=",
"'biblio-{0}-xhtml'",
".",
"format",
"(",
"dash_doi",
")",
"if",
"self",
".",
"article",
".",
"root",
".",
"xpath",
"(",
"'./back/ref-list/ref'",
")",
":",
"self",
".",
"spine_list",
".",
"append",
"(",
"spine_item",
"(",
"biblio_idref",
",",
"True",
")",
")",
"#Entry for the tables content document",
"tables_idref",
"=",
"'tables-{0}-xhtml'",
".",
"format",
"(",
"dash_doi",
")",
"if",
"self",
".",
"article",
".",
"publisher",
".",
"has_out_of_flow_tables",
"(",
")",
":",
"self",
".",
"spine_list",
".",
"append",
"(",
"spine_item",
"(",
"tables_idref",
",",
"False",
")",
")",
"self",
".",
"acquire_metadata",
"(",
")"
] | Ingests an article and processes it for metadata and elements to provide
proper references in the EPUB spine.
This method may only be called once unless the Package was instantiated
in collection mode using ``Package(collection=True)``. It places entries
in an internal spine list for the Main Content Document, the
Bibliographic Content Document (if there are ref elements in Back), and
the Tables Content Document (if there are table elements). It then
employs the publisher specific methods for extracting article metadata
using the article's publisher attribute (an instance of a Publisher
class).
Parameters
----------
article : openaccess_epub.article.Article instance
An article to be included in the EPUB, to be processed for metadata
and appropriate content document references. | [
"Ingests",
"an",
"article",
"and",
"processes",
"it",
"for",
"metadata",
"and",
"elements",
"to",
"provide",
"proper",
"references",
"in",
"the",
"EPUB",
"spine",
"."
] | train | https://github.com/SavinaRoja/OpenAccess_EPUB/blob/6b77ba30b7394fd003920e7a7957bca963a90656/src/openaccess_epub/package/__init__.py#L70-L120 |
SavinaRoja/OpenAccess_EPUB | src/openaccess_epub/package/__init__.py | Package.acquire_metadata | def acquire_metadata(self):
"""
Handles the acquisition of metadata for both collection mode and single
mode, uses the metadata methods belonging to the article's publisher
attribute.
"""
#For space economy
publisher = self.article.publisher
if self.collection: # collection mode metadata gathering
pass
else: # single mode metadata gathering
self.pub_id = publisher.package_identifier()
self.title = publisher.package_title()
for date in publisher.package_date():
self.dates.add(date)
#Common metadata gathering
for lang in publisher.package_language():
self.languages.add(lang) # languages
for contributor in publisher.package_contributors(): # contributors
self.contributors.add(contributor)
self.publishers.add(publisher.package_publisher()) # publisher names
desc = publisher.package_description()
if desc is not None:
self.descriptions.add(desc)
for subj in publisher.package_subject():
self.subjects.add(subj) # subjects
#Rights
art_rights = publisher.package_rights()
self.rights.add(art_rights)
if art_rights not in self.rights_associations:
self.rights_associations[art_rights] = [self.article.doi]
else:
self.rights_associations[art_rights].append(self.article.doi) | python | def acquire_metadata(self):
"""
Handles the acquisition of metadata for both collection mode and single
mode, uses the metadata methods belonging to the article's publisher
attribute.
"""
#For space economy
publisher = self.article.publisher
if self.collection: # collection mode metadata gathering
pass
else: # single mode metadata gathering
self.pub_id = publisher.package_identifier()
self.title = publisher.package_title()
for date in publisher.package_date():
self.dates.add(date)
#Common metadata gathering
for lang in publisher.package_language():
self.languages.add(lang) # languages
for contributor in publisher.package_contributors(): # contributors
self.contributors.add(contributor)
self.publishers.add(publisher.package_publisher()) # publisher names
desc = publisher.package_description()
if desc is not None:
self.descriptions.add(desc)
for subj in publisher.package_subject():
self.subjects.add(subj) # subjects
#Rights
art_rights = publisher.package_rights()
self.rights.add(art_rights)
if art_rights not in self.rights_associations:
self.rights_associations[art_rights] = [self.article.doi]
else:
self.rights_associations[art_rights].append(self.article.doi) | [
"def",
"acquire_metadata",
"(",
"self",
")",
":",
"#For space economy",
"publisher",
"=",
"self",
".",
"article",
".",
"publisher",
"if",
"self",
".",
"collection",
":",
"# collection mode metadata gathering",
"pass",
"else",
":",
"# single mode metadata gathering",
"self",
".",
"pub_id",
"=",
"publisher",
".",
"package_identifier",
"(",
")",
"self",
".",
"title",
"=",
"publisher",
".",
"package_title",
"(",
")",
"for",
"date",
"in",
"publisher",
".",
"package_date",
"(",
")",
":",
"self",
".",
"dates",
".",
"add",
"(",
"date",
")",
"#Common metadata gathering",
"for",
"lang",
"in",
"publisher",
".",
"package_language",
"(",
")",
":",
"self",
".",
"languages",
".",
"add",
"(",
"lang",
")",
"# languages",
"for",
"contributor",
"in",
"publisher",
".",
"package_contributors",
"(",
")",
":",
"# contributors",
"self",
".",
"contributors",
".",
"add",
"(",
"contributor",
")",
"self",
".",
"publishers",
".",
"add",
"(",
"publisher",
".",
"package_publisher",
"(",
")",
")",
"# publisher names",
"desc",
"=",
"publisher",
".",
"package_description",
"(",
")",
"if",
"desc",
"is",
"not",
"None",
":",
"self",
".",
"descriptions",
".",
"add",
"(",
"desc",
")",
"for",
"subj",
"in",
"publisher",
".",
"package_subject",
"(",
")",
":",
"self",
".",
"subjects",
".",
"add",
"(",
"subj",
")",
"# subjects",
"#Rights",
"art_rights",
"=",
"publisher",
".",
"package_rights",
"(",
")",
"self",
".",
"rights",
".",
"add",
"(",
"art_rights",
")",
"if",
"art_rights",
"not",
"in",
"self",
".",
"rights_associations",
":",
"self",
".",
"rights_associations",
"[",
"art_rights",
"]",
"=",
"[",
"self",
".",
"article",
".",
"doi",
"]",
"else",
":",
"self",
".",
"rights_associations",
"[",
"art_rights",
"]",
".",
"append",
"(",
"self",
".",
"article",
".",
"doi",
")"
] | Handles the acquisition of metadata for both collection mode and single
mode, uses the metadata methods belonging to the article's publisher
attribute. | [
"Handles",
"the",
"acquisition",
"of",
"metadata",
"for",
"both",
"collection",
"mode",
"and",
"single",
"mode",
"uses",
"the",
"metadata",
"methods",
"belonging",
"to",
"the",
"article",
"s",
"publisher",
"attribute",
"."
] | train | https://github.com/SavinaRoja/OpenAccess_EPUB/blob/6b77ba30b7394fd003920e7a7957bca963a90656/src/openaccess_epub/package/__init__.py#L122-L156 |
SavinaRoja/OpenAccess_EPUB | src/openaccess_epub/package/__init__.py | Package.file_manifest | def file_manifest(self, location):
"""
An iterator through the files in a location which yields item elements
suitable for insertion into the package manifest.
"""
#Maps file extensions to mimetypes
mimetypes = {'.jpg': 'image/jpeg',
'.jpeg': 'image/jpeg',
'.xml': 'application/xhtml+xml',
'.png': 'image/png',
'.css': 'text/css',
'.ncx': 'application/x-dtbncx+xml',
'.gif': 'image/gif',
'.tif': 'image/tif',
'.pdf': 'application/pdf',
'.xhtml': 'application/xhtml+xml',
'.ttf': 'application/vnd.ms-opentype',
'.otf': 'application/vnd.ms-opentype'}
current_dir = os.getcwd()
os.chdir(location)
for dirpath, _dirnames, filenames in os.walk('.'):
dirpath = dirpath[2:] # A means to avoid dirpath prefix of './'
for fn in filenames:
fn_ext = os.path.splitext(fn)[-1]
item = etree.Element('item')
#Here we set three attributes: href, media-type, and id
if not dirpath:
item.attrib['href'] = fn
else:
item.attrib['href'] = '/'.join([dirpath, fn])
item.attrib['media-type'] = mimetypes[fn_ext]
#Special handling for common image types
if fn_ext in ['.jpg', '.png', '.tif', '.jpeg']:
#the following lines assume we are using the convention
#where the article doi is prefixed by 'images-'
item.attrib['id'] = '-'.join([dirpath[7:],
fn.replace('.', '-')])
else:
item.attrib['id'] = fn.replace('.', '-')
yield item
os.chdir(current_dir) | python | def file_manifest(self, location):
"""
An iterator through the files in a location which yields item elements
suitable for insertion into the package manifest.
"""
#Maps file extensions to mimetypes
mimetypes = {'.jpg': 'image/jpeg',
'.jpeg': 'image/jpeg',
'.xml': 'application/xhtml+xml',
'.png': 'image/png',
'.css': 'text/css',
'.ncx': 'application/x-dtbncx+xml',
'.gif': 'image/gif',
'.tif': 'image/tif',
'.pdf': 'application/pdf',
'.xhtml': 'application/xhtml+xml',
'.ttf': 'application/vnd.ms-opentype',
'.otf': 'application/vnd.ms-opentype'}
current_dir = os.getcwd()
os.chdir(location)
for dirpath, _dirnames, filenames in os.walk('.'):
dirpath = dirpath[2:] # A means to avoid dirpath prefix of './'
for fn in filenames:
fn_ext = os.path.splitext(fn)[-1]
item = etree.Element('item')
#Here we set three attributes: href, media-type, and id
if not dirpath:
item.attrib['href'] = fn
else:
item.attrib['href'] = '/'.join([dirpath, fn])
item.attrib['media-type'] = mimetypes[fn_ext]
#Special handling for common image types
if fn_ext in ['.jpg', '.png', '.tif', '.jpeg']:
#the following lines assume we are using the convention
#where the article doi is prefixed by 'images-'
item.attrib['id'] = '-'.join([dirpath[7:],
fn.replace('.', '-')])
else:
item.attrib['id'] = fn.replace('.', '-')
yield item
os.chdir(current_dir) | [
"def",
"file_manifest",
"(",
"self",
",",
"location",
")",
":",
"#Maps file extensions to mimetypes",
"mimetypes",
"=",
"{",
"'.jpg'",
":",
"'image/jpeg'",
",",
"'.jpeg'",
":",
"'image/jpeg'",
",",
"'.xml'",
":",
"'application/xhtml+xml'",
",",
"'.png'",
":",
"'image/png'",
",",
"'.css'",
":",
"'text/css'",
",",
"'.ncx'",
":",
"'application/x-dtbncx+xml'",
",",
"'.gif'",
":",
"'image/gif'",
",",
"'.tif'",
":",
"'image/tif'",
",",
"'.pdf'",
":",
"'application/pdf'",
",",
"'.xhtml'",
":",
"'application/xhtml+xml'",
",",
"'.ttf'",
":",
"'application/vnd.ms-opentype'",
",",
"'.otf'",
":",
"'application/vnd.ms-opentype'",
"}",
"current_dir",
"=",
"os",
".",
"getcwd",
"(",
")",
"os",
".",
"chdir",
"(",
"location",
")",
"for",
"dirpath",
",",
"_dirnames",
",",
"filenames",
"in",
"os",
".",
"walk",
"(",
"'.'",
")",
":",
"dirpath",
"=",
"dirpath",
"[",
"2",
":",
"]",
"# A means to avoid dirpath prefix of './'",
"for",
"fn",
"in",
"filenames",
":",
"fn_ext",
"=",
"os",
".",
"path",
".",
"splitext",
"(",
"fn",
")",
"[",
"-",
"1",
"]",
"item",
"=",
"etree",
".",
"Element",
"(",
"'item'",
")",
"#Here we set three attributes: href, media-type, and id",
"if",
"not",
"dirpath",
":",
"item",
".",
"attrib",
"[",
"'href'",
"]",
"=",
"fn",
"else",
":",
"item",
".",
"attrib",
"[",
"'href'",
"]",
"=",
"'/'",
".",
"join",
"(",
"[",
"dirpath",
",",
"fn",
"]",
")",
"item",
".",
"attrib",
"[",
"'media-type'",
"]",
"=",
"mimetypes",
"[",
"fn_ext",
"]",
"#Special handling for common image types",
"if",
"fn_ext",
"in",
"[",
"'.jpg'",
",",
"'.png'",
",",
"'.tif'",
",",
"'.jpeg'",
"]",
":",
"#the following lines assume we are using the convention",
"#where the article doi is prefixed by 'images-'",
"item",
".",
"attrib",
"[",
"'id'",
"]",
"=",
"'-'",
".",
"join",
"(",
"[",
"dirpath",
"[",
"7",
":",
"]",
",",
"fn",
".",
"replace",
"(",
"'.'",
",",
"'-'",
")",
"]",
")",
"else",
":",
"item",
".",
"attrib",
"[",
"'id'",
"]",
"=",
"fn",
".",
"replace",
"(",
"'.'",
",",
"'-'",
")",
"yield",
"item",
"os",
".",
"chdir",
"(",
"current_dir",
")"
] | An iterator through the files in a location which yields item elements
suitable for insertion into the package manifest. | [
"An",
"iterator",
"through",
"the",
"files",
"in",
"a",
"location",
"which",
"yields",
"item",
"elements",
"suitable",
"for",
"insertion",
"into",
"the",
"package",
"manifest",
"."
] | train | https://github.com/SavinaRoja/OpenAccess_EPUB/blob/6b77ba30b7394fd003920e7a7957bca963a90656/src/openaccess_epub/package/__init__.py#L158-L199 |
SavinaRoja/OpenAccess_EPUB | src/openaccess_epub/publisher/plos.py | PLoS.get_contrib_names | def get_contrib_names(self, contrib):
"""
Returns an appropriate Name and File-As-Name for a contrib element.
This code was refactored out of nav_contributors and
package_contributors to provide a single definition point for a common
job. This is a useful utility that may be well-employed for other
publishers as well.
"""
collab = contrib.find('collab')
anon = contrib.find('anonymous')
if collab is not None:
proper_name = serialize(collab, strip=True)
file_as_name = proper_name
elif anon is not None:
proper_name = 'Anonymous'
file_as_name = proper_name
else:
name = contrib.find('name')
surname = name.find('surname').text
given = name.find('given-names')
if given is not None:
if given.text: # Sometimes these tags are empty
proper_name = ' '.join([surname, given.text])
#File-as name is <surname>, <given-initial-char>
file_as_name = ', '.join([surname, given.text[0]])
else:
proper_name = surname
file_as_name = proper_name
else:
proper_name = surname
file_as_name = proper_name
return proper_name, file_as_name | python | def get_contrib_names(self, contrib):
"""
Returns an appropriate Name and File-As-Name for a contrib element.
This code was refactored out of nav_contributors and
package_contributors to provide a single definition point for a common
job. This is a useful utility that may be well-employed for other
publishers as well.
"""
collab = contrib.find('collab')
anon = contrib.find('anonymous')
if collab is not None:
proper_name = serialize(collab, strip=True)
file_as_name = proper_name
elif anon is not None:
proper_name = 'Anonymous'
file_as_name = proper_name
else:
name = contrib.find('name')
surname = name.find('surname').text
given = name.find('given-names')
if given is not None:
if given.text: # Sometimes these tags are empty
proper_name = ' '.join([surname, given.text])
#File-as name is <surname>, <given-initial-char>
file_as_name = ', '.join([surname, given.text[0]])
else:
proper_name = surname
file_as_name = proper_name
else:
proper_name = surname
file_as_name = proper_name
return proper_name, file_as_name | [
"def",
"get_contrib_names",
"(",
"self",
",",
"contrib",
")",
":",
"collab",
"=",
"contrib",
".",
"find",
"(",
"'collab'",
")",
"anon",
"=",
"contrib",
".",
"find",
"(",
"'anonymous'",
")",
"if",
"collab",
"is",
"not",
"None",
":",
"proper_name",
"=",
"serialize",
"(",
"collab",
",",
"strip",
"=",
"True",
")",
"file_as_name",
"=",
"proper_name",
"elif",
"anon",
"is",
"not",
"None",
":",
"proper_name",
"=",
"'Anonymous'",
"file_as_name",
"=",
"proper_name",
"else",
":",
"name",
"=",
"contrib",
".",
"find",
"(",
"'name'",
")",
"surname",
"=",
"name",
".",
"find",
"(",
"'surname'",
")",
".",
"text",
"given",
"=",
"name",
".",
"find",
"(",
"'given-names'",
")",
"if",
"given",
"is",
"not",
"None",
":",
"if",
"given",
".",
"text",
":",
"# Sometimes these tags are empty",
"proper_name",
"=",
"' '",
".",
"join",
"(",
"[",
"surname",
",",
"given",
".",
"text",
"]",
")",
"#File-as name is <surname>, <given-initial-char>",
"file_as_name",
"=",
"', '",
".",
"join",
"(",
"[",
"surname",
",",
"given",
".",
"text",
"[",
"0",
"]",
"]",
")",
"else",
":",
"proper_name",
"=",
"surname",
"file_as_name",
"=",
"proper_name",
"else",
":",
"proper_name",
"=",
"surname",
"file_as_name",
"=",
"proper_name",
"return",
"proper_name",
",",
"file_as_name"
] | Returns an appropriate Name and File-As-Name for a contrib element.
This code was refactored out of nav_contributors and
package_contributors to provide a single definition point for a common
job. This is a useful utility that may be well-employed for other
publishers as well. | [
"Returns",
"an",
"appropriate",
"Name",
"and",
"File",
"-",
"As",
"-",
"Name",
"for",
"a",
"contrib",
"element",
"."
] | train | https://github.com/SavinaRoja/OpenAccess_EPUB/blob/6b77ba30b7394fd003920e7a7957bca963a90656/src/openaccess_epub/publisher/plos.py#L31-L63 |
SavinaRoja/OpenAccess_EPUB | src/openaccess_epub/publisher/plos.py | PLoS.package_description | def package_description(self):
"""
Given an Article class instance, this is responsible for returning an
article description. For this method I have taken the approach of
serializing the article's first abstract, if it has one. This results
in 0 or 1 descriptions per article.
"""
abstract = self.article.root.xpath('./front/article-meta/abstract')
return serialize(abstract[0], strip=True) if abstract else None | python | def package_description(self):
"""
Given an Article class instance, this is responsible for returning an
article description. For this method I have taken the approach of
serializing the article's first abstract, if it has one. This results
in 0 or 1 descriptions per article.
"""
abstract = self.article.root.xpath('./front/article-meta/abstract')
return serialize(abstract[0], strip=True) if abstract else None | [
"def",
"package_description",
"(",
"self",
")",
":",
"abstract",
"=",
"self",
".",
"article",
".",
"root",
".",
"xpath",
"(",
"'./front/article-meta/abstract'",
")",
"return",
"serialize",
"(",
"abstract",
"[",
"0",
"]",
",",
"strip",
"=",
"True",
")",
"if",
"abstract",
"else",
"None"
] | Given an Article class instance, this is responsible for returning an
article description. For this method I have taken the approach of
serializing the article's first abstract, if it has one. This results
in 0 or 1 descriptions per article. | [
"Given",
"an",
"Article",
"class",
"instance",
"this",
"is",
"responsible",
"for",
"returning",
"an",
"article",
"description",
".",
"For",
"this",
"method",
"I",
"have",
"taken",
"the",
"approach",
"of",
"serializing",
"the",
"article",
"s",
"first",
"abstract",
"if",
"it",
"has",
"one",
".",
"This",
"results",
"in",
"0",
"or",
"1",
"descriptions",
"per",
"article",
"."
] | train | https://github.com/SavinaRoja/OpenAccess_EPUB/blob/6b77ba30b7394fd003920e7a7957bca963a90656/src/openaccess_epub/publisher/plos.py#L112-L120 |
SavinaRoja/OpenAccess_EPUB | src/openaccess_epub/publisher/plos.py | PLoS.heading_title | def heading_title(self):
"""
Makes the Article Title for the Heading.
Metadata element, content derived from FrontMatter
"""
art_title = self.article.root.xpath('./front/article-meta/title-group/article-title')[0]
article_title = deepcopy(art_title)
article_title.tag = 'h1'
article_title.attrib['id'] = 'title'
article_title.attrib['class'] = 'article-title'
return article_title | python | def heading_title(self):
"""
Makes the Article Title for the Heading.
Metadata element, content derived from FrontMatter
"""
art_title = self.article.root.xpath('./front/article-meta/title-group/article-title')[0]
article_title = deepcopy(art_title)
article_title.tag = 'h1'
article_title.attrib['id'] = 'title'
article_title.attrib['class'] = 'article-title'
return article_title | [
"def",
"heading_title",
"(",
"self",
")",
":",
"art_title",
"=",
"self",
".",
"article",
".",
"root",
".",
"xpath",
"(",
"'./front/article-meta/title-group/article-title'",
")",
"[",
"0",
"]",
"article_title",
"=",
"deepcopy",
"(",
"art_title",
")",
"article_title",
".",
"tag",
"=",
"'h1'",
"article_title",
".",
"attrib",
"[",
"'id'",
"]",
"=",
"'title'",
"article_title",
".",
"attrib",
"[",
"'class'",
"]",
"=",
"'article-title'",
"return",
"article_title"
] | Makes the Article Title for the Heading.
Metadata element, content derived from FrontMatter | [
"Makes",
"the",
"Article",
"Title",
"for",
"the",
"Heading",
"."
] | train | https://github.com/SavinaRoja/OpenAccess_EPUB/blob/6b77ba30b7394fd003920e7a7957bca963a90656/src/openaccess_epub/publisher/plos.py#L182-L193 |
SavinaRoja/OpenAccess_EPUB | src/openaccess_epub/publisher/plos.py | PLoS.make_heading_authors | def make_heading_authors(self, authors):
"""
Constructs the Authors content for the Heading. This should display
directly after the Article Title.
Metadata element, content derived from FrontMatter
"""
author_element = etree.Element('h3', {'class': 'authors'})
#Construct content for the author element
first = True
for author in authors:
if first:
first = False
else:
append_new_text(author_element, ',', join_str='')
collab = author.find('collab')
anon = author.find('anon')
if collab is not None:
append_all_below(author_element, collab)
elif anon is not None: # If anonymous, just add "Anonymous"
append_new_text(author_element, 'Anonymous')
else: # Author is neither Anonymous or a Collaboration
author_name, _ = self.get_contrib_names(author)
append_new_text(author_element, author_name)
#TODO: Handle author footnote references, also put footnotes in the ArticleInfo
#Example: journal.pbio.0040370.xml
first = True
for xref in author.xpath("./xref[@ref-type='corresp' or @ref-type='aff']"):
_sup = xref.find('sup')
sup_text = all_text(_sup) if _sup is not None else ''
auth_sup = etree.SubElement(author_element, 'sup')
sup_link = etree.SubElement(auth_sup,
'a',
{'href': self.main_fragment.format(xref.attrib['rid'])})
sup_link.text = sup_text
if first:
first = False
else:
append_new_text(auth_sup, ', ', join_str='')
#for xref in author.findall('xref'):
#if xref.attrs['ref-type'] in ['corresp', 'aff']:
#try:
#sup_element = xref.sup[0].node
#except IndexError:
#sup_text = ''
#else:
#sup_text = all_text(sup_element)
#new_sup = etree.SubElement(author_element, 'sup')
#sup_link = etree.SubElement(new_sup, 'a')
#sup_link.attrib['href'] = self.main_fragment.format(xref.attrs['rid'])
#sup_link.text = sup_text
#if first:
#first = False
#else:
#new_sup.text = ','
return author_element | python | def make_heading_authors(self, authors):
"""
Constructs the Authors content for the Heading. This should display
directly after the Article Title.
Metadata element, content derived from FrontMatter
"""
author_element = etree.Element('h3', {'class': 'authors'})
#Construct content for the author element
first = True
for author in authors:
if first:
first = False
else:
append_new_text(author_element, ',', join_str='')
collab = author.find('collab')
anon = author.find('anon')
if collab is not None:
append_all_below(author_element, collab)
elif anon is not None: # If anonymous, just add "Anonymous"
append_new_text(author_element, 'Anonymous')
else: # Author is neither Anonymous or a Collaboration
author_name, _ = self.get_contrib_names(author)
append_new_text(author_element, author_name)
#TODO: Handle author footnote references, also put footnotes in the ArticleInfo
#Example: journal.pbio.0040370.xml
first = True
for xref in author.xpath("./xref[@ref-type='corresp' or @ref-type='aff']"):
_sup = xref.find('sup')
sup_text = all_text(_sup) if _sup is not None else ''
auth_sup = etree.SubElement(author_element, 'sup')
sup_link = etree.SubElement(auth_sup,
'a',
{'href': self.main_fragment.format(xref.attrib['rid'])})
sup_link.text = sup_text
if first:
first = False
else:
append_new_text(auth_sup, ', ', join_str='')
#for xref in author.findall('xref'):
#if xref.attrs['ref-type'] in ['corresp', 'aff']:
#try:
#sup_element = xref.sup[0].node
#except IndexError:
#sup_text = ''
#else:
#sup_text = all_text(sup_element)
#new_sup = etree.SubElement(author_element, 'sup')
#sup_link = etree.SubElement(new_sup, 'a')
#sup_link.attrib['href'] = self.main_fragment.format(xref.attrs['rid'])
#sup_link.text = sup_text
#if first:
#first = False
#else:
#new_sup.text = ','
return author_element | [
"def",
"make_heading_authors",
"(",
"self",
",",
"authors",
")",
":",
"author_element",
"=",
"etree",
".",
"Element",
"(",
"'h3'",
",",
"{",
"'class'",
":",
"'authors'",
"}",
")",
"#Construct content for the author element",
"first",
"=",
"True",
"for",
"author",
"in",
"authors",
":",
"if",
"first",
":",
"first",
"=",
"False",
"else",
":",
"append_new_text",
"(",
"author_element",
",",
"','",
",",
"join_str",
"=",
"''",
")",
"collab",
"=",
"author",
".",
"find",
"(",
"'collab'",
")",
"anon",
"=",
"author",
".",
"find",
"(",
"'anon'",
")",
"if",
"collab",
"is",
"not",
"None",
":",
"append_all_below",
"(",
"author_element",
",",
"collab",
")",
"elif",
"anon",
"is",
"not",
"None",
":",
"# If anonymous, just add \"Anonymous\"",
"append_new_text",
"(",
"author_element",
",",
"'Anonymous'",
")",
"else",
":",
"# Author is neither Anonymous or a Collaboration",
"author_name",
",",
"_",
"=",
"self",
".",
"get_contrib_names",
"(",
"author",
")",
"append_new_text",
"(",
"author_element",
",",
"author_name",
")",
"#TODO: Handle author footnote references, also put footnotes in the ArticleInfo",
"#Example: journal.pbio.0040370.xml",
"first",
"=",
"True",
"for",
"xref",
"in",
"author",
".",
"xpath",
"(",
"\"./xref[@ref-type='corresp' or @ref-type='aff']\"",
")",
":",
"_sup",
"=",
"xref",
".",
"find",
"(",
"'sup'",
")",
"sup_text",
"=",
"all_text",
"(",
"_sup",
")",
"if",
"_sup",
"is",
"not",
"None",
"else",
"''",
"auth_sup",
"=",
"etree",
".",
"SubElement",
"(",
"author_element",
",",
"'sup'",
")",
"sup_link",
"=",
"etree",
".",
"SubElement",
"(",
"auth_sup",
",",
"'a'",
",",
"{",
"'href'",
":",
"self",
".",
"main_fragment",
".",
"format",
"(",
"xref",
".",
"attrib",
"[",
"'rid'",
"]",
")",
"}",
")",
"sup_link",
".",
"text",
"=",
"sup_text",
"if",
"first",
":",
"first",
"=",
"False",
"else",
":",
"append_new_text",
"(",
"auth_sup",
",",
"', '",
",",
"join_str",
"=",
"''",
")",
"#for xref in author.findall('xref'):",
"#if xref.attrs['ref-type'] in ['corresp', 'aff']:",
"#try:",
"#sup_element = xref.sup[0].node",
"#except IndexError:",
"#sup_text = ''",
"#else:",
"#sup_text = all_text(sup_element)",
"#new_sup = etree.SubElement(author_element, 'sup')",
"#sup_link = etree.SubElement(new_sup, 'a')",
"#sup_link.attrib['href'] = self.main_fragment.format(xref.attrs['rid'])",
"#sup_link.text = sup_text",
"#if first:",
"#first = False",
"#else:",
"#new_sup.text = ','",
"return",
"author_element"
] | Constructs the Authors content for the Heading. This should display
directly after the Article Title.
Metadata element, content derived from FrontMatter | [
"Constructs",
"the",
"Authors",
"content",
"for",
"the",
"Heading",
".",
"This",
"should",
"display",
"directly",
"after",
"the",
"Article",
"Title",
"."
] | train | https://github.com/SavinaRoja/OpenAccess_EPUB/blob/6b77ba30b7394fd003920e7a7957bca963a90656/src/openaccess_epub/publisher/plos.py#L195-L251 |
SavinaRoja/OpenAccess_EPUB | src/openaccess_epub/publisher/plos.py | PLoS.make_heading_affiliations | def make_heading_affiliations(self, heading_div):
"""
Makes the content for the Author Affiliations, displays after the
Authors segment in the Heading.
Metadata element, content derived from FrontMatter
"""
#Get all of the aff element tuples from the metadata
affs = self.article.root.xpath('./front/article-meta/aff')
#Create a list of all those pertaining to the authors
author_affs = [i for i in affs if 'aff' in i.attrib['id']]
#Count them, used for formatting
if len(author_affs) == 0:
return None
else:
affs_list = etree.SubElement(heading_div,
'ul',
{'id': 'affiliations',
'class': 'simple'})
for aff in author_affs:
#Create a span element to accept extracted content
aff_item = etree.SubElement(affs_list, 'li')
aff_item.attrib['id'] = aff.attrib['id']
#Get the first label node and the first addr-line node
label = aff.find('label')
addr_line = aff.find('addr-line')
if label is not None:
bold = etree.SubElement(aff_item, 'b')
bold.text = all_text(label) + ' '
if addr_line is not None:
append_new_text(aff_item, all_text(addr_line))
else:
append_new_text(aff_item, all_text(aff)) | python | def make_heading_affiliations(self, heading_div):
"""
Makes the content for the Author Affiliations, displays after the
Authors segment in the Heading.
Metadata element, content derived from FrontMatter
"""
#Get all of the aff element tuples from the metadata
affs = self.article.root.xpath('./front/article-meta/aff')
#Create a list of all those pertaining to the authors
author_affs = [i for i in affs if 'aff' in i.attrib['id']]
#Count them, used for formatting
if len(author_affs) == 0:
return None
else:
affs_list = etree.SubElement(heading_div,
'ul',
{'id': 'affiliations',
'class': 'simple'})
for aff in author_affs:
#Create a span element to accept extracted content
aff_item = etree.SubElement(affs_list, 'li')
aff_item.attrib['id'] = aff.attrib['id']
#Get the first label node and the first addr-line node
label = aff.find('label')
addr_line = aff.find('addr-line')
if label is not None:
bold = etree.SubElement(aff_item, 'b')
bold.text = all_text(label) + ' '
if addr_line is not None:
append_new_text(aff_item, all_text(addr_line))
else:
append_new_text(aff_item, all_text(aff)) | [
"def",
"make_heading_affiliations",
"(",
"self",
",",
"heading_div",
")",
":",
"#Get all of the aff element tuples from the metadata",
"affs",
"=",
"self",
".",
"article",
".",
"root",
".",
"xpath",
"(",
"'./front/article-meta/aff'",
")",
"#Create a list of all those pertaining to the authors",
"author_affs",
"=",
"[",
"i",
"for",
"i",
"in",
"affs",
"if",
"'aff'",
"in",
"i",
".",
"attrib",
"[",
"'id'",
"]",
"]",
"#Count them, used for formatting",
"if",
"len",
"(",
"author_affs",
")",
"==",
"0",
":",
"return",
"None",
"else",
":",
"affs_list",
"=",
"etree",
".",
"SubElement",
"(",
"heading_div",
",",
"'ul'",
",",
"{",
"'id'",
":",
"'affiliations'",
",",
"'class'",
":",
"'simple'",
"}",
")",
"for",
"aff",
"in",
"author_affs",
":",
"#Create a span element to accept extracted content",
"aff_item",
"=",
"etree",
".",
"SubElement",
"(",
"affs_list",
",",
"'li'",
")",
"aff_item",
".",
"attrib",
"[",
"'id'",
"]",
"=",
"aff",
".",
"attrib",
"[",
"'id'",
"]",
"#Get the first label node and the first addr-line node",
"label",
"=",
"aff",
".",
"find",
"(",
"'label'",
")",
"addr_line",
"=",
"aff",
".",
"find",
"(",
"'addr-line'",
")",
"if",
"label",
"is",
"not",
"None",
":",
"bold",
"=",
"etree",
".",
"SubElement",
"(",
"aff_item",
",",
"'b'",
")",
"bold",
".",
"text",
"=",
"all_text",
"(",
"label",
")",
"+",
"' '",
"if",
"addr_line",
"is",
"not",
"None",
":",
"append_new_text",
"(",
"aff_item",
",",
"all_text",
"(",
"addr_line",
")",
")",
"else",
":",
"append_new_text",
"(",
"aff_item",
",",
"all_text",
"(",
"aff",
")",
")"
] | Makes the content for the Author Affiliations, displays after the
Authors segment in the Heading.
Metadata element, content derived from FrontMatter | [
"Makes",
"the",
"content",
"for",
"the",
"Author",
"Affiliations",
"displays",
"after",
"the",
"Authors",
"segment",
"in",
"the",
"Heading",
"."
] | train | https://github.com/SavinaRoja/OpenAccess_EPUB/blob/6b77ba30b7394fd003920e7a7957bca963a90656/src/openaccess_epub/publisher/plos.py#L253-L286 |
SavinaRoja/OpenAccess_EPUB | src/openaccess_epub/publisher/plos.py | PLoS.make_heading_abstracts | def make_heading_abstracts(self, heading_div):
"""
An article may contain data for various kinds of abstracts. This method
works on those that are included in the Heading. This is displayed
after the Authors and Affiliations.
Metadata element, content derived from FrontMatter
"""
for abstract in self.article.root.xpath('./front/article-meta/abstract'):
#Make a copy of the abstract
abstract_copy = deepcopy(abstract)
abstract_copy.tag = 'div'
#Abstracts are a rather diverse bunch, keep an eye on them!
title_text = abstract_copy.xpath('./title[1]/text()')
for title in abstract_copy.findall('.//title'):
remove(title)
#Create a header for the abstract
abstract_header = etree.Element('h2')
remove_all_attributes(abstract_copy)
#Set the header text and abstract id according to abstract type
abstract_type = abstract.attrib.get('abstract-type')
log.debug('Handling Abstrace of with abstract-type="{0}"'.format(abstract_type))
if abstract_type == 'summary':
abstract_header.text = 'Author Summary'
abstract_copy.attrib['id'] = 'author-summary'
elif abstract_type == 'editors-summary':
abstract_header.text = 'Editors\' Summary'
abstract_copy.attrib['id'] = 'editor-summary'
elif abstract_type == 'synopsis':
abstract_header.text = 'Synopsis'
abstract_copy.attrib['id'] = 'synopsis'
elif abstract_type == 'alternate':
#Right now, these will only be included if there is a title to
#give it
if title_text:
abstract_header.text= title_text[0]
abstract_copy.attrib['id'] = 'alternate'
else:
continue
elif abstract_type is None:
abstract_header.text = 'Abstract'
abstract_copy.attrib['id'] = 'abstract'
elif abstract_type == 'toc': # We don't include these
continue
else: # Warn about these, then skip
log.warning('No handling for abstract-type="{0}"'.format(abstract_type))
continue
#abstract_header.text = abstract_type
#abstract_copy.attrib['id'] = abstract_type
heading_div.append(abstract_header)
heading_div.append(abstract_copy) | python | def make_heading_abstracts(self, heading_div):
"""
An article may contain data for various kinds of abstracts. This method
works on those that are included in the Heading. This is displayed
after the Authors and Affiliations.
Metadata element, content derived from FrontMatter
"""
for abstract in self.article.root.xpath('./front/article-meta/abstract'):
#Make a copy of the abstract
abstract_copy = deepcopy(abstract)
abstract_copy.tag = 'div'
#Abstracts are a rather diverse bunch, keep an eye on them!
title_text = abstract_copy.xpath('./title[1]/text()')
for title in abstract_copy.findall('.//title'):
remove(title)
#Create a header for the abstract
abstract_header = etree.Element('h2')
remove_all_attributes(abstract_copy)
#Set the header text and abstract id according to abstract type
abstract_type = abstract.attrib.get('abstract-type')
log.debug('Handling Abstrace of with abstract-type="{0}"'.format(abstract_type))
if abstract_type == 'summary':
abstract_header.text = 'Author Summary'
abstract_copy.attrib['id'] = 'author-summary'
elif abstract_type == 'editors-summary':
abstract_header.text = 'Editors\' Summary'
abstract_copy.attrib['id'] = 'editor-summary'
elif abstract_type == 'synopsis':
abstract_header.text = 'Synopsis'
abstract_copy.attrib['id'] = 'synopsis'
elif abstract_type == 'alternate':
#Right now, these will only be included if there is a title to
#give it
if title_text:
abstract_header.text= title_text[0]
abstract_copy.attrib['id'] = 'alternate'
else:
continue
elif abstract_type is None:
abstract_header.text = 'Abstract'
abstract_copy.attrib['id'] = 'abstract'
elif abstract_type == 'toc': # We don't include these
continue
else: # Warn about these, then skip
log.warning('No handling for abstract-type="{0}"'.format(abstract_type))
continue
#abstract_header.text = abstract_type
#abstract_copy.attrib['id'] = abstract_type
heading_div.append(abstract_header)
heading_div.append(abstract_copy) | [
"def",
"make_heading_abstracts",
"(",
"self",
",",
"heading_div",
")",
":",
"for",
"abstract",
"in",
"self",
".",
"article",
".",
"root",
".",
"xpath",
"(",
"'./front/article-meta/abstract'",
")",
":",
"#Make a copy of the abstract",
"abstract_copy",
"=",
"deepcopy",
"(",
"abstract",
")",
"abstract_copy",
".",
"tag",
"=",
"'div'",
"#Abstracts are a rather diverse bunch, keep an eye on them!",
"title_text",
"=",
"abstract_copy",
".",
"xpath",
"(",
"'./title[1]/text()'",
")",
"for",
"title",
"in",
"abstract_copy",
".",
"findall",
"(",
"'.//title'",
")",
":",
"remove",
"(",
"title",
")",
"#Create a header for the abstract",
"abstract_header",
"=",
"etree",
".",
"Element",
"(",
"'h2'",
")",
"remove_all_attributes",
"(",
"abstract_copy",
")",
"#Set the header text and abstract id according to abstract type",
"abstract_type",
"=",
"abstract",
".",
"attrib",
".",
"get",
"(",
"'abstract-type'",
")",
"log",
".",
"debug",
"(",
"'Handling Abstrace of with abstract-type=\"{0}\"'",
".",
"format",
"(",
"abstract_type",
")",
")",
"if",
"abstract_type",
"==",
"'summary'",
":",
"abstract_header",
".",
"text",
"=",
"'Author Summary'",
"abstract_copy",
".",
"attrib",
"[",
"'id'",
"]",
"=",
"'author-summary'",
"elif",
"abstract_type",
"==",
"'editors-summary'",
":",
"abstract_header",
".",
"text",
"=",
"'Editors\\' Summary'",
"abstract_copy",
".",
"attrib",
"[",
"'id'",
"]",
"=",
"'editor-summary'",
"elif",
"abstract_type",
"==",
"'synopsis'",
":",
"abstract_header",
".",
"text",
"=",
"'Synopsis'",
"abstract_copy",
".",
"attrib",
"[",
"'id'",
"]",
"=",
"'synopsis'",
"elif",
"abstract_type",
"==",
"'alternate'",
":",
"#Right now, these will only be included if there is a title to",
"#give it",
"if",
"title_text",
":",
"abstract_header",
".",
"text",
"=",
"title_text",
"[",
"0",
"]",
"abstract_copy",
".",
"attrib",
"[",
"'id'",
"]",
"=",
"'alternate'",
"else",
":",
"continue",
"elif",
"abstract_type",
"is",
"None",
":",
"abstract_header",
".",
"text",
"=",
"'Abstract'",
"abstract_copy",
".",
"attrib",
"[",
"'id'",
"]",
"=",
"'abstract'",
"elif",
"abstract_type",
"==",
"'toc'",
":",
"# We don't include these",
"continue",
"else",
":",
"# Warn about these, then skip",
"log",
".",
"warning",
"(",
"'No handling for abstract-type=\"{0}\"'",
".",
"format",
"(",
"abstract_type",
")",
")",
"continue",
"#abstract_header.text = abstract_type",
"#abstract_copy.attrib['id'] = abstract_type",
"heading_div",
".",
"append",
"(",
"abstract_header",
")",
"heading_div",
".",
"append",
"(",
"abstract_copy",
")"
] | An article may contain data for various kinds of abstracts. This method
works on those that are included in the Heading. This is displayed
after the Authors and Affiliations.
Metadata element, content derived from FrontMatter | [
"An",
"article",
"may",
"contain",
"data",
"for",
"various",
"kinds",
"of",
"abstracts",
".",
"This",
"method",
"works",
"on",
"those",
"that",
"are",
"included",
"in",
"the",
"Heading",
".",
"This",
"is",
"displayed",
"after",
"the",
"Authors",
"and",
"Affiliations",
"."
] | train | https://github.com/SavinaRoja/OpenAccess_EPUB/blob/6b77ba30b7394fd003920e7a7957bca963a90656/src/openaccess_epub/publisher/plos.py#L298-L348 |
SavinaRoja/OpenAccess_EPUB | src/openaccess_epub/publisher/plos.py | PLoS.make_article_info | def make_article_info(self):
"""
The Article Info contains the (self) Citation, Editors, Dates,
Copyright, Funding Statement, Competing Interests Statement,
Correspondence, and Footnotes. Maybe more...
This content follows the Heading and precedes the Main segment in the
output.
This function accepts the receiving_node argument, which will receive
all generated output as new childNodes.
"""
body = self.main.getroot().find('body')
#Create a div for ArticleInfo, exposing it to linking and formatting
article_info_div = etree.Element('div', {'id': 'ArticleInfo'})
body.insert(1, article_info_div)
#Creation of the self Citation
article_info_div.append(self.make_article_info_citation())
#Creation of the Editors
editors = self.article.root.xpath("./front/article-meta/contrib-group/contrib[@contrib-type='editor']")
self.make_article_info_editors(editors, article_info_div)
#Creation of the important Dates segment
article_info_div.append(self.make_article_info_dates())
#Creation of the Copyright statement
self.make_article_info_copyright(article_info_div)
#Creation of the Funding statement
self.make_article_info_funding(article_info_div)
#Creation of the Competing Interests statement
self.make_article_info_competing_interests(article_info_div)
#Creation of the Correspondences (contact information) for the article
self.make_article_info_correspondences(article_info_div)
#Creation of the Footnotes (other) for the ArticleInfo
self.make_article_info_footnotes_other(article_info_div) | python | def make_article_info(self):
"""
The Article Info contains the (self) Citation, Editors, Dates,
Copyright, Funding Statement, Competing Interests Statement,
Correspondence, and Footnotes. Maybe more...
This content follows the Heading and precedes the Main segment in the
output.
This function accepts the receiving_node argument, which will receive
all generated output as new childNodes.
"""
body = self.main.getroot().find('body')
#Create a div for ArticleInfo, exposing it to linking and formatting
article_info_div = etree.Element('div', {'id': 'ArticleInfo'})
body.insert(1, article_info_div)
#Creation of the self Citation
article_info_div.append(self.make_article_info_citation())
#Creation of the Editors
editors = self.article.root.xpath("./front/article-meta/contrib-group/contrib[@contrib-type='editor']")
self.make_article_info_editors(editors, article_info_div)
#Creation of the important Dates segment
article_info_div.append(self.make_article_info_dates())
#Creation of the Copyright statement
self.make_article_info_copyright(article_info_div)
#Creation of the Funding statement
self.make_article_info_funding(article_info_div)
#Creation of the Competing Interests statement
self.make_article_info_competing_interests(article_info_div)
#Creation of the Correspondences (contact information) for the article
self.make_article_info_correspondences(article_info_div)
#Creation of the Footnotes (other) for the ArticleInfo
self.make_article_info_footnotes_other(article_info_div) | [
"def",
"make_article_info",
"(",
"self",
")",
":",
"body",
"=",
"self",
".",
"main",
".",
"getroot",
"(",
")",
".",
"find",
"(",
"'body'",
")",
"#Create a div for ArticleInfo, exposing it to linking and formatting",
"article_info_div",
"=",
"etree",
".",
"Element",
"(",
"'div'",
",",
"{",
"'id'",
":",
"'ArticleInfo'",
"}",
")",
"body",
".",
"insert",
"(",
"1",
",",
"article_info_div",
")",
"#Creation of the self Citation",
"article_info_div",
".",
"append",
"(",
"self",
".",
"make_article_info_citation",
"(",
")",
")",
"#Creation of the Editors",
"editors",
"=",
"self",
".",
"article",
".",
"root",
".",
"xpath",
"(",
"\"./front/article-meta/contrib-group/contrib[@contrib-type='editor']\"",
")",
"self",
".",
"make_article_info_editors",
"(",
"editors",
",",
"article_info_div",
")",
"#Creation of the important Dates segment",
"article_info_div",
".",
"append",
"(",
"self",
".",
"make_article_info_dates",
"(",
")",
")",
"#Creation of the Copyright statement",
"self",
".",
"make_article_info_copyright",
"(",
"article_info_div",
")",
"#Creation of the Funding statement",
"self",
".",
"make_article_info_funding",
"(",
"article_info_div",
")",
"#Creation of the Competing Interests statement",
"self",
".",
"make_article_info_competing_interests",
"(",
"article_info_div",
")",
"#Creation of the Correspondences (contact information) for the article",
"self",
".",
"make_article_info_correspondences",
"(",
"article_info_div",
")",
"#Creation of the Footnotes (other) for the ArticleInfo",
"self",
".",
"make_article_info_footnotes_other",
"(",
"article_info_div",
")"
] | The Article Info contains the (self) Citation, Editors, Dates,
Copyright, Funding Statement, Competing Interests Statement,
Correspondence, and Footnotes. Maybe more...
This content follows the Heading and precedes the Main segment in the
output.
This function accepts the receiving_node argument, which will receive
all generated output as new childNodes. | [
"The",
"Article",
"Info",
"contains",
"the",
"(",
"self",
")",
"Citation",
"Editors",
"Dates",
"Copyright",
"Funding",
"Statement",
"Competing",
"Interests",
"Statement",
"Correspondence",
"and",
"Footnotes",
".",
"Maybe",
"more",
"..."
] | train | https://github.com/SavinaRoja/OpenAccess_EPUB/blob/6b77ba30b7394fd003920e7a7957bca963a90656/src/openaccess_epub/publisher/plos.py#L352-L384 |
SavinaRoja/OpenAccess_EPUB | src/openaccess_epub/publisher/plos.py | PLoS.make_article_info_citation | def make_article_info_citation(self):
"""
Creates a self citation node for the ArticleInfo of the article.
This method uses code from this page as a reference implementation:
https://github.com/PLOS/ambra/blob/master/base/src/main/resources/articleTransform-v3.xsl
"""
citation_div = etree.Element('div', {'id': 'article-citation'})
b = etree.SubElement(citation_div, 'b')
b.text = 'Citation: '
#Add author stuff to the citation
authors = self.article.root.xpath("./front/article-meta/contrib-group/contrib[@contrib-type='author']")
for author in authors:
author_index = authors.index(author)
#At the 6th author, simply append an et al., then stop iterating
if author_index == 5:
append_new_text(citation_div, 'et al.', join_str='')
break
else:
#Check if the author contrib has a collab
collab = author.find('collab')
if collab is not None:
collab_copy = deepcopy(collab)
for contrib_group in collab_copy.findall('contrib_group'):
remove(contrib_group)
append_all_below(citation_div, collab, join_str='')
else: # Author element is not a collab
name = author.find('name')
#Note that this does not support eastern names
#Grab the surname information
surname = name.find('surname')
given_names = name.find('given-names')
suffix = name.find('suffix')
append_new_text(citation_div, surname.text, join_str='')
#Make initials from the given-name information
if given_names is not None:
#Add a space
append_new_text(citation_div, ' ', join_str='')
#Split by whitespace and take first character
given_initials = [i[0] for i in given_names.text.split() if i]
for initial in given_initials:
append_new_text(citation_div, initial, join_str='')
#If there is a suffix, add its text, but don't include the
#trailing period if there is one
if suffix is not None:
#Add a space
append_new_text(citation_div, ' ', join_str='')
suffix_text = suffix.text
#Check for the trailing period
if suffix_text[-1] == '.':
suffix_text = suffix_text[:-1]
append_new_text(citation_div, suffix_text, join_str='')
#If this is not the last author to be added, add a ", "
#This is satisfied by being less than the 6th author, or less
#than the length of the author_list - 1
if author_index < 5 or author_index < len(author_list) -1:
append_new_text(citation_div, ', ', join_str='')
#Add Publication Year to the citation
#Find pub-date elements, use pub-type=collection, or else pub-type=ppub
d = './front/article-meta/pub-date'
coll = self.article.root.xpath(d + "[@pub-type='collection']")
ppub = self.article.root.xpath(d + "[@pub-type='ppub']")
if coll:
pub_year = coll[0].find('year').text
elif ppub:
pub_year = ppub[0].find('year').text
append_new_text(citation_div, ' ({0}) '.format(pub_year), join_str='')
#Add the Article Title to the Citation
#As best as I can tell from the reference implementation, they
#serialize the article title to text-only, and expunge redundant spaces
#This might need later review
article_title = self.article.root.xpath('./front/article-meta/title-group/article-title')[0]
article_title_text = serialize(article_title)
normalized = ' '.join(article_title_text.split()) # Remove redundant whitespace
#Add a period unless there is some other valid punctuation
if normalized[-1] not in '.?!':
normalized += '.'
append_new_text(citation_div, normalized + ' ', join_str='')
#Add the article's journal name using the journal-id of type "nlm-ta"
journal = self.article.root.xpath("./front/journal-meta/journal-id[@journal-id-type='nlm-ta']")
append_new_text(citation_div, journal[0].text + ' ', join_str='')
#Add the article's volume, issue, and elocation_id values
volume = self.article.root.xpath('./front/article-meta/volume')[0].text
issue = self.article.root.xpath('./front/article-meta/issue')[0].text
elocation_id = self.article.root.xpath('./front/article-meta/elocation-id')[0].text
form = '{0}({1}): {2}. '.format(volume, issue, elocation_id)
append_new_text(citation_div, form, join_str='')
append_new_text(citation_div, 'doi:{0}'.format(self.article.doi), join_str='')
return citation_div | python | def make_article_info_citation(self):
"""
Creates a self citation node for the ArticleInfo of the article.
This method uses code from this page as a reference implementation:
https://github.com/PLOS/ambra/blob/master/base/src/main/resources/articleTransform-v3.xsl
"""
citation_div = etree.Element('div', {'id': 'article-citation'})
b = etree.SubElement(citation_div, 'b')
b.text = 'Citation: '
#Add author stuff to the citation
authors = self.article.root.xpath("./front/article-meta/contrib-group/contrib[@contrib-type='author']")
for author in authors:
author_index = authors.index(author)
#At the 6th author, simply append an et al., then stop iterating
if author_index == 5:
append_new_text(citation_div, 'et al.', join_str='')
break
else:
#Check if the author contrib has a collab
collab = author.find('collab')
if collab is not None:
collab_copy = deepcopy(collab)
for contrib_group in collab_copy.findall('contrib_group'):
remove(contrib_group)
append_all_below(citation_div, collab, join_str='')
else: # Author element is not a collab
name = author.find('name')
#Note that this does not support eastern names
#Grab the surname information
surname = name.find('surname')
given_names = name.find('given-names')
suffix = name.find('suffix')
append_new_text(citation_div, surname.text, join_str='')
#Make initials from the given-name information
if given_names is not None:
#Add a space
append_new_text(citation_div, ' ', join_str='')
#Split by whitespace and take first character
given_initials = [i[0] for i in given_names.text.split() if i]
for initial in given_initials:
append_new_text(citation_div, initial, join_str='')
#If there is a suffix, add its text, but don't include the
#trailing period if there is one
if suffix is not None:
#Add a space
append_new_text(citation_div, ' ', join_str='')
suffix_text = suffix.text
#Check for the trailing period
if suffix_text[-1] == '.':
suffix_text = suffix_text[:-1]
append_new_text(citation_div, suffix_text, join_str='')
#If this is not the last author to be added, add a ", "
#This is satisfied by being less than the 6th author, or less
#than the length of the author_list - 1
if author_index < 5 or author_index < len(author_list) -1:
append_new_text(citation_div, ', ', join_str='')
#Add Publication Year to the citation
#Find pub-date elements, use pub-type=collection, or else pub-type=ppub
d = './front/article-meta/pub-date'
coll = self.article.root.xpath(d + "[@pub-type='collection']")
ppub = self.article.root.xpath(d + "[@pub-type='ppub']")
if coll:
pub_year = coll[0].find('year').text
elif ppub:
pub_year = ppub[0].find('year').text
append_new_text(citation_div, ' ({0}) '.format(pub_year), join_str='')
#Add the Article Title to the Citation
#As best as I can tell from the reference implementation, they
#serialize the article title to text-only, and expunge redundant spaces
#This might need later review
article_title = self.article.root.xpath('./front/article-meta/title-group/article-title')[0]
article_title_text = serialize(article_title)
normalized = ' '.join(article_title_text.split()) # Remove redundant whitespace
#Add a period unless there is some other valid punctuation
if normalized[-1] not in '.?!':
normalized += '.'
append_new_text(citation_div, normalized + ' ', join_str='')
#Add the article's journal name using the journal-id of type "nlm-ta"
journal = self.article.root.xpath("./front/journal-meta/journal-id[@journal-id-type='nlm-ta']")
append_new_text(citation_div, journal[0].text + ' ', join_str='')
#Add the article's volume, issue, and elocation_id values
volume = self.article.root.xpath('./front/article-meta/volume')[0].text
issue = self.article.root.xpath('./front/article-meta/issue')[0].text
elocation_id = self.article.root.xpath('./front/article-meta/elocation-id')[0].text
form = '{0}({1}): {2}. '.format(volume, issue, elocation_id)
append_new_text(citation_div, form, join_str='')
append_new_text(citation_div, 'doi:{0}'.format(self.article.doi), join_str='')
return citation_div | [
"def",
"make_article_info_citation",
"(",
"self",
")",
":",
"citation_div",
"=",
"etree",
".",
"Element",
"(",
"'div'",
",",
"{",
"'id'",
":",
"'article-citation'",
"}",
")",
"b",
"=",
"etree",
".",
"SubElement",
"(",
"citation_div",
",",
"'b'",
")",
"b",
".",
"text",
"=",
"'Citation: '",
"#Add author stuff to the citation",
"authors",
"=",
"self",
".",
"article",
".",
"root",
".",
"xpath",
"(",
"\"./front/article-meta/contrib-group/contrib[@contrib-type='author']\"",
")",
"for",
"author",
"in",
"authors",
":",
"author_index",
"=",
"authors",
".",
"index",
"(",
"author",
")",
"#At the 6th author, simply append an et al., then stop iterating",
"if",
"author_index",
"==",
"5",
":",
"append_new_text",
"(",
"citation_div",
",",
"'et al.'",
",",
"join_str",
"=",
"''",
")",
"break",
"else",
":",
"#Check if the author contrib has a collab",
"collab",
"=",
"author",
".",
"find",
"(",
"'collab'",
")",
"if",
"collab",
"is",
"not",
"None",
":",
"collab_copy",
"=",
"deepcopy",
"(",
"collab",
")",
"for",
"contrib_group",
"in",
"collab_copy",
".",
"findall",
"(",
"'contrib_group'",
")",
":",
"remove",
"(",
"contrib_group",
")",
"append_all_below",
"(",
"citation_div",
",",
"collab",
",",
"join_str",
"=",
"''",
")",
"else",
":",
"# Author element is not a collab",
"name",
"=",
"author",
".",
"find",
"(",
"'name'",
")",
"#Note that this does not support eastern names",
"#Grab the surname information",
"surname",
"=",
"name",
".",
"find",
"(",
"'surname'",
")",
"given_names",
"=",
"name",
".",
"find",
"(",
"'given-names'",
")",
"suffix",
"=",
"name",
".",
"find",
"(",
"'suffix'",
")",
"append_new_text",
"(",
"citation_div",
",",
"surname",
".",
"text",
",",
"join_str",
"=",
"''",
")",
"#Make initials from the given-name information",
"if",
"given_names",
"is",
"not",
"None",
":",
"#Add a space",
"append_new_text",
"(",
"citation_div",
",",
"' '",
",",
"join_str",
"=",
"''",
")",
"#Split by whitespace and take first character",
"given_initials",
"=",
"[",
"i",
"[",
"0",
"]",
"for",
"i",
"in",
"given_names",
".",
"text",
".",
"split",
"(",
")",
"if",
"i",
"]",
"for",
"initial",
"in",
"given_initials",
":",
"append_new_text",
"(",
"citation_div",
",",
"initial",
",",
"join_str",
"=",
"''",
")",
"#If there is a suffix, add its text, but don't include the",
"#trailing period if there is one",
"if",
"suffix",
"is",
"not",
"None",
":",
"#Add a space",
"append_new_text",
"(",
"citation_div",
",",
"' '",
",",
"join_str",
"=",
"''",
")",
"suffix_text",
"=",
"suffix",
".",
"text",
"#Check for the trailing period",
"if",
"suffix_text",
"[",
"-",
"1",
"]",
"==",
"'.'",
":",
"suffix_text",
"=",
"suffix_text",
"[",
":",
"-",
"1",
"]",
"append_new_text",
"(",
"citation_div",
",",
"suffix_text",
",",
"join_str",
"=",
"''",
")",
"#If this is not the last author to be added, add a \", \"",
"#This is satisfied by being less than the 6th author, or less",
"#than the length of the author_list - 1",
"if",
"author_index",
"<",
"5",
"or",
"author_index",
"<",
"len",
"(",
"author_list",
")",
"-",
"1",
":",
"append_new_text",
"(",
"citation_div",
",",
"', '",
",",
"join_str",
"=",
"''",
")",
"#Add Publication Year to the citation",
"#Find pub-date elements, use pub-type=collection, or else pub-type=ppub",
"d",
"=",
"'./front/article-meta/pub-date'",
"coll",
"=",
"self",
".",
"article",
".",
"root",
".",
"xpath",
"(",
"d",
"+",
"\"[@pub-type='collection']\"",
")",
"ppub",
"=",
"self",
".",
"article",
".",
"root",
".",
"xpath",
"(",
"d",
"+",
"\"[@pub-type='ppub']\"",
")",
"if",
"coll",
":",
"pub_year",
"=",
"coll",
"[",
"0",
"]",
".",
"find",
"(",
"'year'",
")",
".",
"text",
"elif",
"ppub",
":",
"pub_year",
"=",
"ppub",
"[",
"0",
"]",
".",
"find",
"(",
"'year'",
")",
".",
"text",
"append_new_text",
"(",
"citation_div",
",",
"' ({0}) '",
".",
"format",
"(",
"pub_year",
")",
",",
"join_str",
"=",
"''",
")",
"#Add the Article Title to the Citation",
"#As best as I can tell from the reference implementation, they",
"#serialize the article title to text-only, and expunge redundant spaces",
"#This might need later review",
"article_title",
"=",
"self",
".",
"article",
".",
"root",
".",
"xpath",
"(",
"'./front/article-meta/title-group/article-title'",
")",
"[",
"0",
"]",
"article_title_text",
"=",
"serialize",
"(",
"article_title",
")",
"normalized",
"=",
"' '",
".",
"join",
"(",
"article_title_text",
".",
"split",
"(",
")",
")",
"# Remove redundant whitespace",
"#Add a period unless there is some other valid punctuation",
"if",
"normalized",
"[",
"-",
"1",
"]",
"not",
"in",
"'.?!'",
":",
"normalized",
"+=",
"'.'",
"append_new_text",
"(",
"citation_div",
",",
"normalized",
"+",
"' '",
",",
"join_str",
"=",
"''",
")",
"#Add the article's journal name using the journal-id of type \"nlm-ta\"",
"journal",
"=",
"self",
".",
"article",
".",
"root",
".",
"xpath",
"(",
"\"./front/journal-meta/journal-id[@journal-id-type='nlm-ta']\"",
")",
"append_new_text",
"(",
"citation_div",
",",
"journal",
"[",
"0",
"]",
".",
"text",
"+",
"' '",
",",
"join_str",
"=",
"''",
")",
"#Add the article's volume, issue, and elocation_id values",
"volume",
"=",
"self",
".",
"article",
".",
"root",
".",
"xpath",
"(",
"'./front/article-meta/volume'",
")",
"[",
"0",
"]",
".",
"text",
"issue",
"=",
"self",
".",
"article",
".",
"root",
".",
"xpath",
"(",
"'./front/article-meta/issue'",
")",
"[",
"0",
"]",
".",
"text",
"elocation_id",
"=",
"self",
".",
"article",
".",
"root",
".",
"xpath",
"(",
"'./front/article-meta/elocation-id'",
")",
"[",
"0",
"]",
".",
"text",
"form",
"=",
"'{0}({1}): {2}. '",
".",
"format",
"(",
"volume",
",",
"issue",
",",
"elocation_id",
")",
"append_new_text",
"(",
"citation_div",
",",
"form",
",",
"join_str",
"=",
"''",
")",
"append_new_text",
"(",
"citation_div",
",",
"'doi:{0}'",
".",
"format",
"(",
"self",
".",
"article",
".",
"doi",
")",
",",
"join_str",
"=",
"''",
")",
"return",
"citation_div"
] | Creates a self citation node for the ArticleInfo of the article.
This method uses code from this page as a reference implementation:
https://github.com/PLOS/ambra/blob/master/base/src/main/resources/articleTransform-v3.xsl | [
"Creates",
"a",
"self",
"citation",
"node",
"for",
"the",
"ArticleInfo",
"of",
"the",
"article",
"."
] | train | https://github.com/SavinaRoja/OpenAccess_EPUB/blob/6b77ba30b7394fd003920e7a7957bca963a90656/src/openaccess_epub/publisher/plos.py#L386-L476 |
SavinaRoja/OpenAccess_EPUB | src/openaccess_epub/publisher/plos.py | PLoS.make_article_info_dates | def make_article_info_dates(self):
"""
Makes the section containing important dates for the article: typically
Received, Accepted, and Published.
"""
dates_div = etree.Element('div', {'id': 'article-dates'})
d = './front/article-meta/history/date'
received = self.article.root.xpath(d + "[@date-type='received']")
accepted = self.article.root.xpath(d + "[@date-type='accepted']")
if received:
b = etree.SubElement(dates_div, 'b')
b.text = 'Received: '
dt = self.date_tuple_from_date(received[0], 'Received')
formatted_date_string = self.format_date_string(dt)
append_new_text(dates_div, formatted_date_string + '; ')
if accepted:
b = etree.SubElement(dates_div, 'b')
b.text = 'Accepted: '
dt = self.date_tuple_from_date(accepted[0], 'Accepted')
formatted_date_string = self.format_date_string(dt)
append_new_text(dates_div, formatted_date_string + '; ')
#Published date is required
pub_date = self.article.root.xpath("./front/article-meta/pub-date[@pub-type='epub']")[0]
b = etree.SubElement(dates_div, 'b')
b.text = 'Published: '
dt = self.date_tuple_from_date(pub_date, 'Published')
formatted_date_string = self.format_date_string(dt)
append_new_text(dates_div, formatted_date_string)
return dates_div | python | def make_article_info_dates(self):
"""
Makes the section containing important dates for the article: typically
Received, Accepted, and Published.
"""
dates_div = etree.Element('div', {'id': 'article-dates'})
d = './front/article-meta/history/date'
received = self.article.root.xpath(d + "[@date-type='received']")
accepted = self.article.root.xpath(d + "[@date-type='accepted']")
if received:
b = etree.SubElement(dates_div, 'b')
b.text = 'Received: '
dt = self.date_tuple_from_date(received[0], 'Received')
formatted_date_string = self.format_date_string(dt)
append_new_text(dates_div, formatted_date_string + '; ')
if accepted:
b = etree.SubElement(dates_div, 'b')
b.text = 'Accepted: '
dt = self.date_tuple_from_date(accepted[0], 'Accepted')
formatted_date_string = self.format_date_string(dt)
append_new_text(dates_div, formatted_date_string + '; ')
#Published date is required
pub_date = self.article.root.xpath("./front/article-meta/pub-date[@pub-type='epub']")[0]
b = etree.SubElement(dates_div, 'b')
b.text = 'Published: '
dt = self.date_tuple_from_date(pub_date, 'Published')
formatted_date_string = self.format_date_string(dt)
append_new_text(dates_div, formatted_date_string)
return dates_div | [
"def",
"make_article_info_dates",
"(",
"self",
")",
":",
"dates_div",
"=",
"etree",
".",
"Element",
"(",
"'div'",
",",
"{",
"'id'",
":",
"'article-dates'",
"}",
")",
"d",
"=",
"'./front/article-meta/history/date'",
"received",
"=",
"self",
".",
"article",
".",
"root",
".",
"xpath",
"(",
"d",
"+",
"\"[@date-type='received']\"",
")",
"accepted",
"=",
"self",
".",
"article",
".",
"root",
".",
"xpath",
"(",
"d",
"+",
"\"[@date-type='accepted']\"",
")",
"if",
"received",
":",
"b",
"=",
"etree",
".",
"SubElement",
"(",
"dates_div",
",",
"'b'",
")",
"b",
".",
"text",
"=",
"'Received: '",
"dt",
"=",
"self",
".",
"date_tuple_from_date",
"(",
"received",
"[",
"0",
"]",
",",
"'Received'",
")",
"formatted_date_string",
"=",
"self",
".",
"format_date_string",
"(",
"dt",
")",
"append_new_text",
"(",
"dates_div",
",",
"formatted_date_string",
"+",
"'; '",
")",
"if",
"accepted",
":",
"b",
"=",
"etree",
".",
"SubElement",
"(",
"dates_div",
",",
"'b'",
")",
"b",
".",
"text",
"=",
"'Accepted: '",
"dt",
"=",
"self",
".",
"date_tuple_from_date",
"(",
"accepted",
"[",
"0",
"]",
",",
"'Accepted'",
")",
"formatted_date_string",
"=",
"self",
".",
"format_date_string",
"(",
"dt",
")",
"append_new_text",
"(",
"dates_div",
",",
"formatted_date_string",
"+",
"'; '",
")",
"#Published date is required",
"pub_date",
"=",
"self",
".",
"article",
".",
"root",
".",
"xpath",
"(",
"\"./front/article-meta/pub-date[@pub-type='epub']\"",
")",
"[",
"0",
"]",
"b",
"=",
"etree",
".",
"SubElement",
"(",
"dates_div",
",",
"'b'",
")",
"b",
".",
"text",
"=",
"'Published: '",
"dt",
"=",
"self",
".",
"date_tuple_from_date",
"(",
"pub_date",
",",
"'Published'",
")",
"formatted_date_string",
"=",
"self",
".",
"format_date_string",
"(",
"dt",
")",
"append_new_text",
"(",
"dates_div",
",",
"formatted_date_string",
")",
"return",
"dates_div"
] | Makes the section containing important dates for the article: typically
Received, Accepted, and Published. | [
"Makes",
"the",
"section",
"containing",
"important",
"dates",
"for",
"the",
"article",
":",
"typically",
"Received",
"Accepted",
"and",
"Published",
"."
] | train | https://github.com/SavinaRoja/OpenAccess_EPUB/blob/6b77ba30b7394fd003920e7a7957bca963a90656/src/openaccess_epub/publisher/plos.py#L524-L554 |
SavinaRoja/OpenAccess_EPUB | src/openaccess_epub/publisher/plos.py | PLoS.make_article_info_copyright | def make_article_info_copyright(self, article_info_div):
"""
Makes the copyright section for the ArticleInfo. For PLoS, this means
handling the information contained in the metadata <permissions>
element.
"""
perm = self.article.root.xpath('./front/article-meta/permissions')
if not perm:
return
copyright_div = etree.SubElement(article_info_div, 'div', {'id': 'copyright'})
cp_bold = etree.SubElement(copyright_div, 'b')
cp_bold.text = 'Copyright: '
copyright_string = '\u00A9 '
copyright_holder = perm[0].find('copyright-holder')
if copyright_holder is not None:
copyright_string += all_text(copyright_holder) + '. '
lic = perm[0].find('license')
if lic is not None:
copyright_string += all_text(lic.find('license-p'))
append_new_text(copyright_div, copyright_string) | python | def make_article_info_copyright(self, article_info_div):
"""
Makes the copyright section for the ArticleInfo. For PLoS, this means
handling the information contained in the metadata <permissions>
element.
"""
perm = self.article.root.xpath('./front/article-meta/permissions')
if not perm:
return
copyright_div = etree.SubElement(article_info_div, 'div', {'id': 'copyright'})
cp_bold = etree.SubElement(copyright_div, 'b')
cp_bold.text = 'Copyright: '
copyright_string = '\u00A9 '
copyright_holder = perm[0].find('copyright-holder')
if copyright_holder is not None:
copyright_string += all_text(copyright_holder) + '. '
lic = perm[0].find('license')
if lic is not None:
copyright_string += all_text(lic.find('license-p'))
append_new_text(copyright_div, copyright_string) | [
"def",
"make_article_info_copyright",
"(",
"self",
",",
"article_info_div",
")",
":",
"perm",
"=",
"self",
".",
"article",
".",
"root",
".",
"xpath",
"(",
"'./front/article-meta/permissions'",
")",
"if",
"not",
"perm",
":",
"return",
"copyright_div",
"=",
"etree",
".",
"SubElement",
"(",
"article_info_div",
",",
"'div'",
",",
"{",
"'id'",
":",
"'copyright'",
"}",
")",
"cp_bold",
"=",
"etree",
".",
"SubElement",
"(",
"copyright_div",
",",
"'b'",
")",
"cp_bold",
".",
"text",
"=",
"'Copyright: '",
"copyright_string",
"=",
"'\\u00A9 '",
"copyright_holder",
"=",
"perm",
"[",
"0",
"]",
".",
"find",
"(",
"'copyright-holder'",
")",
"if",
"copyright_holder",
"is",
"not",
"None",
":",
"copyright_string",
"+=",
"all_text",
"(",
"copyright_holder",
")",
"+",
"'. '",
"lic",
"=",
"perm",
"[",
"0",
"]",
".",
"find",
"(",
"'license'",
")",
"if",
"lic",
"is",
"not",
"None",
":",
"copyright_string",
"+=",
"all_text",
"(",
"lic",
".",
"find",
"(",
"'license-p'",
")",
")",
"append_new_text",
"(",
"copyright_div",
",",
"copyright_string",
")"
] | Makes the copyright section for the ArticleInfo. For PLoS, this means
handling the information contained in the metadata <permissions>
element. | [
"Makes",
"the",
"copyright",
"section",
"for",
"the",
"ArticleInfo",
".",
"For",
"PLoS",
"this",
"means",
"handling",
"the",
"information",
"contained",
"in",
"the",
"metadata",
"<permissions",
">",
"element",
"."
] | train | https://github.com/SavinaRoja/OpenAccess_EPUB/blob/6b77ba30b7394fd003920e7a7957bca963a90656/src/openaccess_epub/publisher/plos.py#L556-L575 |
SavinaRoja/OpenAccess_EPUB | src/openaccess_epub/publisher/plos.py | PLoS.make_article_info_funding | def make_article_info_funding(self, article_info_div):
"""
Creates the element for declaring Funding in the article info.
"""
funding_group = self.article.root.xpath('./front/article-meta/funding-group')
if funding_group:
funding_div = etree.SubElement(article_info_div,
'div',
{'id': 'funding'})
funding_b = etree.SubElement(funding_div, 'b')
funding_b.text = 'Funding: '
#As far as I can tell, PLoS only uses one funding-statement
funding_statement = funding_group[0].find('funding-statement')
append_all_below(funding_div, funding_statement) | python | def make_article_info_funding(self, article_info_div):
"""
Creates the element for declaring Funding in the article info.
"""
funding_group = self.article.root.xpath('./front/article-meta/funding-group')
if funding_group:
funding_div = etree.SubElement(article_info_div,
'div',
{'id': 'funding'})
funding_b = etree.SubElement(funding_div, 'b')
funding_b.text = 'Funding: '
#As far as I can tell, PLoS only uses one funding-statement
funding_statement = funding_group[0].find('funding-statement')
append_all_below(funding_div, funding_statement) | [
"def",
"make_article_info_funding",
"(",
"self",
",",
"article_info_div",
")",
":",
"funding_group",
"=",
"self",
".",
"article",
".",
"root",
".",
"xpath",
"(",
"'./front/article-meta/funding-group'",
")",
"if",
"funding_group",
":",
"funding_div",
"=",
"etree",
".",
"SubElement",
"(",
"article_info_div",
",",
"'div'",
",",
"{",
"'id'",
":",
"'funding'",
"}",
")",
"funding_b",
"=",
"etree",
".",
"SubElement",
"(",
"funding_div",
",",
"'b'",
")",
"funding_b",
".",
"text",
"=",
"'Funding: '",
"#As far as I can tell, PLoS only uses one funding-statement",
"funding_statement",
"=",
"funding_group",
"[",
"0",
"]",
".",
"find",
"(",
"'funding-statement'",
")",
"append_all_below",
"(",
"funding_div",
",",
"funding_statement",
")"
] | Creates the element for declaring Funding in the article info. | [
"Creates",
"the",
"element",
"for",
"declaring",
"Funding",
"in",
"the",
"article",
"info",
"."
] | train | https://github.com/SavinaRoja/OpenAccess_EPUB/blob/6b77ba30b7394fd003920e7a7957bca963a90656/src/openaccess_epub/publisher/plos.py#L577-L590 |
SavinaRoja/OpenAccess_EPUB | src/openaccess_epub/publisher/plos.py | PLoS.make_article_info_competing_interests | def make_article_info_competing_interests(self, article_info_div):
"""
Creates the element for declaring competing interests in the article
info.
"""
#Check for author-notes
con_expr = "./front/article-meta/author-notes/fn[@fn-type='conflict']"
conflict = self.article.root.xpath(con_expr)
if not conflict:
return
conflict_div = etree.SubElement(article_info_div,
'div',
{'id': 'conflict'})
b = etree.SubElement(conflict_div, 'b')
b.text = 'Competing Interests: '
fn_p = conflict[0].find('p')
if fn_p is not None:
append_all_below(conflict_div, fn_p) | python | def make_article_info_competing_interests(self, article_info_div):
"""
Creates the element for declaring competing interests in the article
info.
"""
#Check for author-notes
con_expr = "./front/article-meta/author-notes/fn[@fn-type='conflict']"
conflict = self.article.root.xpath(con_expr)
if not conflict:
return
conflict_div = etree.SubElement(article_info_div,
'div',
{'id': 'conflict'})
b = etree.SubElement(conflict_div, 'b')
b.text = 'Competing Interests: '
fn_p = conflict[0].find('p')
if fn_p is not None:
append_all_below(conflict_div, fn_p) | [
"def",
"make_article_info_competing_interests",
"(",
"self",
",",
"article_info_div",
")",
":",
"#Check for author-notes",
"con_expr",
"=",
"\"./front/article-meta/author-notes/fn[@fn-type='conflict']\"",
"conflict",
"=",
"self",
".",
"article",
".",
"root",
".",
"xpath",
"(",
"con_expr",
")",
"if",
"not",
"conflict",
":",
"return",
"conflict_div",
"=",
"etree",
".",
"SubElement",
"(",
"article_info_div",
",",
"'div'",
",",
"{",
"'id'",
":",
"'conflict'",
"}",
")",
"b",
"=",
"etree",
".",
"SubElement",
"(",
"conflict_div",
",",
"'b'",
")",
"b",
".",
"text",
"=",
"'Competing Interests: '",
"fn_p",
"=",
"conflict",
"[",
"0",
"]",
".",
"find",
"(",
"'p'",
")",
"if",
"fn_p",
"is",
"not",
"None",
":",
"append_all_below",
"(",
"conflict_div",
",",
"fn_p",
")"
] | Creates the element for declaring competing interests in the article
info. | [
"Creates",
"the",
"element",
"for",
"declaring",
"competing",
"interests",
"in",
"the",
"article",
"info",
"."
] | train | https://github.com/SavinaRoja/OpenAccess_EPUB/blob/6b77ba30b7394fd003920e7a7957bca963a90656/src/openaccess_epub/publisher/plos.py#L592-L609 |
SavinaRoja/OpenAccess_EPUB | src/openaccess_epub/publisher/plos.py | PLoS.make_article_info_correspondences | def make_article_info_correspondences(self, article_info_div):
"""
Articles generally provide a first contact, typically an email address
for one of the authors. This will supply that content.
"""
corresps = self.article.root.xpath('./front/article-meta/author-notes/corresp')
if corresps:
corresp_div = etree.SubElement(article_info_div,
'div',
{'id': 'correspondence'})
for corresp in corresps:
sub_div = etree.SubElement(corresp_div,
'div',
{'id': corresp.attrib['id']})
append_all_below(sub_div, corresp) | python | def make_article_info_correspondences(self, article_info_div):
"""
Articles generally provide a first contact, typically an email address
for one of the authors. This will supply that content.
"""
corresps = self.article.root.xpath('./front/article-meta/author-notes/corresp')
if corresps:
corresp_div = etree.SubElement(article_info_div,
'div',
{'id': 'correspondence'})
for corresp in corresps:
sub_div = etree.SubElement(corresp_div,
'div',
{'id': corresp.attrib['id']})
append_all_below(sub_div, corresp) | [
"def",
"make_article_info_correspondences",
"(",
"self",
",",
"article_info_div",
")",
":",
"corresps",
"=",
"self",
".",
"article",
".",
"root",
".",
"xpath",
"(",
"'./front/article-meta/author-notes/corresp'",
")",
"if",
"corresps",
":",
"corresp_div",
"=",
"etree",
".",
"SubElement",
"(",
"article_info_div",
",",
"'div'",
",",
"{",
"'id'",
":",
"'correspondence'",
"}",
")",
"for",
"corresp",
"in",
"corresps",
":",
"sub_div",
"=",
"etree",
".",
"SubElement",
"(",
"corresp_div",
",",
"'div'",
",",
"{",
"'id'",
":",
"corresp",
".",
"attrib",
"[",
"'id'",
"]",
"}",
")",
"append_all_below",
"(",
"sub_div",
",",
"corresp",
")"
] | Articles generally provide a first contact, typically an email address
for one of the authors. This will supply that content. | [
"Articles",
"generally",
"provide",
"a",
"first",
"contact",
"typically",
"an",
"email",
"address",
"for",
"one",
"of",
"the",
"authors",
".",
"This",
"will",
"supply",
"that",
"content",
"."
] | train | https://github.com/SavinaRoja/OpenAccess_EPUB/blob/6b77ba30b7394fd003920e7a7957bca963a90656/src/openaccess_epub/publisher/plos.py#L611-L625 |
SavinaRoja/OpenAccess_EPUB | src/openaccess_epub/publisher/plos.py | PLoS.make_article_info_footnotes_other | def make_article_info_footnotes_other(self, article_info_div):
"""
This will catch all of the footnotes of type 'other' in the <fn-group>
of the <back> element.
"""
other_fn_expr = "./back/fn-group/fn[@fn-type='other']"
other_fns = self.article.root.xpath(other_fn_expr)
if other_fns:
other_fn_div = etree.SubElement(article_info_div,
'div',
{'class': 'back-fn-other'})
for other_fn in other_fns:
append_all_below(other_fn_div, other_fn) | python | def make_article_info_footnotes_other(self, article_info_div):
"""
This will catch all of the footnotes of type 'other' in the <fn-group>
of the <back> element.
"""
other_fn_expr = "./back/fn-group/fn[@fn-type='other']"
other_fns = self.article.root.xpath(other_fn_expr)
if other_fns:
other_fn_div = etree.SubElement(article_info_div,
'div',
{'class': 'back-fn-other'})
for other_fn in other_fns:
append_all_below(other_fn_div, other_fn) | [
"def",
"make_article_info_footnotes_other",
"(",
"self",
",",
"article_info_div",
")",
":",
"other_fn_expr",
"=",
"\"./back/fn-group/fn[@fn-type='other']\"",
"other_fns",
"=",
"self",
".",
"article",
".",
"root",
".",
"xpath",
"(",
"other_fn_expr",
")",
"if",
"other_fns",
":",
"other_fn_div",
"=",
"etree",
".",
"SubElement",
"(",
"article_info_div",
",",
"'div'",
",",
"{",
"'class'",
":",
"'back-fn-other'",
"}",
")",
"for",
"other_fn",
"in",
"other_fns",
":",
"append_all_below",
"(",
"other_fn_div",
",",
"other_fn",
")"
] | This will catch all of the footnotes of type 'other' in the <fn-group>
of the <back> element. | [
"This",
"will",
"catch",
"all",
"of",
"the",
"footnotes",
"of",
"type",
"other",
"in",
"the",
"<fn",
"-",
"group",
">",
"of",
"the",
"<back",
">",
"element",
"."
] | train | https://github.com/SavinaRoja/OpenAccess_EPUB/blob/6b77ba30b7394fd003920e7a7957bca963a90656/src/openaccess_epub/publisher/plos.py#L627-L639 |
SavinaRoja/OpenAccess_EPUB | src/openaccess_epub/publisher/plos.py | PLoS.make_back_matter | def make_back_matter(self):
"""
The <back> element may have 0 or 1 <label> elements and 0 or 1 <title>
elements. Then it may have any combination of the following: <ack>,
<app-group>, <bio>, <fn-group>, <glossary>, <ref-list>, <notes>, and
<sec>. <sec> is employed here as a catch-all for material that does not
fall under the other categories.
The Back should generally be thought of as a non-linear element, though
some of its content will be parsed to the linear flow of the document.
This can be thought of as critically important meta-information that
should accompany the main text (e.g. Acknowledgments and Contributions)
Because the content of <back> contains a set of tags that intersects
with that of the Body, this method should always be called before the
general post-processing steps; keep in mind that this is also the
opportunity to permit special handling of content in the Back
"""
#Back is technically metadata content that needs to be interpreted to
#presentable content
body = self.main.getroot().find('body')
if self.article.root.find('back') is None:
return
#The following things are ordered in such a way to adhere to what
#appears to be a consistent presentation order for PLoS
#Acknowledgments
back_ack = self.make_back_acknowledgments()
if back_ack is not None:
body.append(back_ack)
#Author Contributions
self.make_back_author_contributions(body)
#Glossaries
self.make_back_glossary(body)
#Notes
self.make_back_notes(body) | python | def make_back_matter(self):
"""
The <back> element may have 0 or 1 <label> elements and 0 or 1 <title>
elements. Then it may have any combination of the following: <ack>,
<app-group>, <bio>, <fn-group>, <glossary>, <ref-list>, <notes>, and
<sec>. <sec> is employed here as a catch-all for material that does not
fall under the other categories.
The Back should generally be thought of as a non-linear element, though
some of its content will be parsed to the linear flow of the document.
This can be thought of as critically important meta-information that
should accompany the main text (e.g. Acknowledgments and Contributions)
Because the content of <back> contains a set of tags that intersects
with that of the Body, this method should always be called before the
general post-processing steps; keep in mind that this is also the
opportunity to permit special handling of content in the Back
"""
#Back is technically metadata content that needs to be interpreted to
#presentable content
body = self.main.getroot().find('body')
if self.article.root.find('back') is None:
return
#The following things are ordered in such a way to adhere to what
#appears to be a consistent presentation order for PLoS
#Acknowledgments
back_ack = self.make_back_acknowledgments()
if back_ack is not None:
body.append(back_ack)
#Author Contributions
self.make_back_author_contributions(body)
#Glossaries
self.make_back_glossary(body)
#Notes
self.make_back_notes(body) | [
"def",
"make_back_matter",
"(",
"self",
")",
":",
"#Back is technically metadata content that needs to be interpreted to",
"#presentable content",
"body",
"=",
"self",
".",
"main",
".",
"getroot",
"(",
")",
".",
"find",
"(",
"'body'",
")",
"if",
"self",
".",
"article",
".",
"root",
".",
"find",
"(",
"'back'",
")",
"is",
"None",
":",
"return",
"#The following things are ordered in such a way to adhere to what",
"#appears to be a consistent presentation order for PLoS",
"#Acknowledgments",
"back_ack",
"=",
"self",
".",
"make_back_acknowledgments",
"(",
")",
"if",
"back_ack",
"is",
"not",
"None",
":",
"body",
".",
"append",
"(",
"back_ack",
")",
"#Author Contributions",
"self",
".",
"make_back_author_contributions",
"(",
"body",
")",
"#Glossaries",
"self",
".",
"make_back_glossary",
"(",
"body",
")",
"#Notes",
"self",
".",
"make_back_notes",
"(",
"body",
")"
] | The <back> element may have 0 or 1 <label> elements and 0 or 1 <title>
elements. Then it may have any combination of the following: <ack>,
<app-group>, <bio>, <fn-group>, <glossary>, <ref-list>, <notes>, and
<sec>. <sec> is employed here as a catch-all for material that does not
fall under the other categories.
The Back should generally be thought of as a non-linear element, though
some of its content will be parsed to the linear flow of the document.
This can be thought of as critically important meta-information that
should accompany the main text (e.g. Acknowledgments and Contributions)
Because the content of <back> contains a set of tags that intersects
with that of the Body, this method should always be called before the
general post-processing steps; keep in mind that this is also the
opportunity to permit special handling of content in the Back | [
"The",
"<back",
">",
"element",
"may",
"have",
"0",
"or",
"1",
"<label",
">",
"elements",
"and",
"0",
"or",
"1",
"<title",
">",
"elements",
".",
"Then",
"it",
"may",
"have",
"any",
"combination",
"of",
"the",
"following",
":",
"<ack",
">",
"<app",
"-",
"group",
">",
"<bio",
">",
"<fn",
"-",
"group",
">",
"<glossary",
">",
"<ref",
"-",
"list",
">",
"<notes",
">",
"and",
"<sec",
">",
".",
"<sec",
">",
"is",
"employed",
"here",
"as",
"a",
"catch",
"-",
"all",
"for",
"material",
"that",
"does",
"not",
"fall",
"under",
"the",
"other",
"categories",
"."
] | train | https://github.com/SavinaRoja/OpenAccess_EPUB/blob/6b77ba30b7394fd003920e7a7957bca963a90656/src/openaccess_epub/publisher/plos.py#L643-L678 |
SavinaRoja/OpenAccess_EPUB | src/openaccess_epub/publisher/plos.py | PLoS.move_back_boxed_texts | def move_back_boxed_texts(self):
"""
The only intended use for this function is to patch a problem seen in
at least one PLoS article (journal.pgen.0020002). This will move any
<boxed-text> elements over to the receiving element, which is probably
the main body.
"""
body = self.main.getroot().find('body')
back = self.article.root.find('back')
if back is None:
return
boxed_texts = back.xpath('.//boxed-text')
for boxed_text in boxed_texts:
body.append(deepcopy(boxed_text)) | python | def move_back_boxed_texts(self):
"""
The only intended use for this function is to patch a problem seen in
at least one PLoS article (journal.pgen.0020002). This will move any
<boxed-text> elements over to the receiving element, which is probably
the main body.
"""
body = self.main.getroot().find('body')
back = self.article.root.find('back')
if back is None:
return
boxed_texts = back.xpath('.//boxed-text')
for boxed_text in boxed_texts:
body.append(deepcopy(boxed_text)) | [
"def",
"move_back_boxed_texts",
"(",
"self",
")",
":",
"body",
"=",
"self",
".",
"main",
".",
"getroot",
"(",
")",
".",
"find",
"(",
"'body'",
")",
"back",
"=",
"self",
".",
"article",
".",
"root",
".",
"find",
"(",
"'back'",
")",
"if",
"back",
"is",
"None",
":",
"return",
"boxed_texts",
"=",
"back",
".",
"xpath",
"(",
"'.//boxed-text'",
")",
"for",
"boxed_text",
"in",
"boxed_texts",
":",
"body",
".",
"append",
"(",
"deepcopy",
"(",
"boxed_text",
")",
")"
] | The only intended use for this function is to patch a problem seen in
at least one PLoS article (journal.pgen.0020002). This will move any
<boxed-text> elements over to the receiving element, which is probably
the main body. | [
"The",
"only",
"intended",
"use",
"for",
"this",
"function",
"is",
"to",
"patch",
"a",
"problem",
"seen",
"in",
"at",
"least",
"one",
"PLoS",
"article",
"(",
"journal",
".",
"pgen",
".",
"0020002",
")",
".",
"This",
"will",
"move",
"any",
"<boxed",
"-",
"text",
">",
"elements",
"over",
"to",
"the",
"receiving",
"element",
"which",
"is",
"probably",
"the",
"main",
"body",
"."
] | train | https://github.com/SavinaRoja/OpenAccess_EPUB/blob/6b77ba30b7394fd003920e7a7957bca963a90656/src/openaccess_epub/publisher/plos.py#L682-L695 |
SavinaRoja/OpenAccess_EPUB | src/openaccess_epub/publisher/plos.py | PLoS.make_back_acknowledgments | def make_back_acknowledgments(self):
"""
The <ack> is an important piece of back matter information, and will be
including immediately after the main text.
This element should only occur once, optionally, for PLoS, if a need
becomes known, then multiple instances may be supported.
"""
acks = self.article.root.xpath('./back/ack')
if not acks:
return
ack = deepcopy(acks[0])
#Modify the tag to div
ack.tag = 'div'
#Give it an id
ack.attrib['id'] = 'acknowledgments'
#Give it a title element--this is not an EPUB element but doing so will
#allow it to later be depth-formatted by self.convert_div_titles()
ack_title = etree.Element('title')
ack_title.text = 'Acknowledgments'
ack.insert(0, ack_title) # Make it the first element
return ack | python | def make_back_acknowledgments(self):
"""
The <ack> is an important piece of back matter information, and will be
including immediately after the main text.
This element should only occur once, optionally, for PLoS, if a need
becomes known, then multiple instances may be supported.
"""
acks = self.article.root.xpath('./back/ack')
if not acks:
return
ack = deepcopy(acks[0])
#Modify the tag to div
ack.tag = 'div'
#Give it an id
ack.attrib['id'] = 'acknowledgments'
#Give it a title element--this is not an EPUB element but doing so will
#allow it to later be depth-formatted by self.convert_div_titles()
ack_title = etree.Element('title')
ack_title.text = 'Acknowledgments'
ack.insert(0, ack_title) # Make it the first element
return ack | [
"def",
"make_back_acknowledgments",
"(",
"self",
")",
":",
"acks",
"=",
"self",
".",
"article",
".",
"root",
".",
"xpath",
"(",
"'./back/ack'",
")",
"if",
"not",
"acks",
":",
"return",
"ack",
"=",
"deepcopy",
"(",
"acks",
"[",
"0",
"]",
")",
"#Modify the tag to div",
"ack",
".",
"tag",
"=",
"'div'",
"#Give it an id",
"ack",
".",
"attrib",
"[",
"'id'",
"]",
"=",
"'acknowledgments'",
"#Give it a title element--this is not an EPUB element but doing so will",
"#allow it to later be depth-formatted by self.convert_div_titles()",
"ack_title",
"=",
"etree",
".",
"Element",
"(",
"'title'",
")",
"ack_title",
".",
"text",
"=",
"'Acknowledgments'",
"ack",
".",
"insert",
"(",
"0",
",",
"ack_title",
")",
"# Make it the first element",
"return",
"ack"
] | The <ack> is an important piece of back matter information, and will be
including immediately after the main text.
This element should only occur once, optionally, for PLoS, if a need
becomes known, then multiple instances may be supported. | [
"The",
"<ack",
">",
"is",
"an",
"important",
"piece",
"of",
"back",
"matter",
"information",
"and",
"will",
"be",
"including",
"immediately",
"after",
"the",
"main",
"text",
"."
] | train | https://github.com/SavinaRoja/OpenAccess_EPUB/blob/6b77ba30b7394fd003920e7a7957bca963a90656/src/openaccess_epub/publisher/plos.py#L697-L718 |
SavinaRoja/OpenAccess_EPUB | src/openaccess_epub/publisher/plos.py | PLoS.make_back_author_contributions | def make_back_author_contributions(self, body):
"""
Though this goes in the back of the document with the rest of the back
matter, it is not an element found under <back>.
I don't expect to see more than one of these. Compare this method to
make_article_info_competing_interests()
"""
cont_expr = "./front/article-meta/author-notes/fn[@fn-type='con']"
contribution = self.article.root.xpath(cont_expr)
if contribution:
author_contrib = deepcopy(contribution[0])
remove_all_attributes(author_contrib)
author_contrib.tag = 'div'
author_contrib.attrib['id'] = 'author-contributions'
#This title element will be parsed later
title = etree.Element('title')
title.text = 'Author Contributions'
author_contrib.insert(0, title)
body.append(author_contrib) | python | def make_back_author_contributions(self, body):
"""
Though this goes in the back of the document with the rest of the back
matter, it is not an element found under <back>.
I don't expect to see more than one of these. Compare this method to
make_article_info_competing_interests()
"""
cont_expr = "./front/article-meta/author-notes/fn[@fn-type='con']"
contribution = self.article.root.xpath(cont_expr)
if contribution:
author_contrib = deepcopy(contribution[0])
remove_all_attributes(author_contrib)
author_contrib.tag = 'div'
author_contrib.attrib['id'] = 'author-contributions'
#This title element will be parsed later
title = etree.Element('title')
title.text = 'Author Contributions'
author_contrib.insert(0, title)
body.append(author_contrib) | [
"def",
"make_back_author_contributions",
"(",
"self",
",",
"body",
")",
":",
"cont_expr",
"=",
"\"./front/article-meta/author-notes/fn[@fn-type='con']\"",
"contribution",
"=",
"self",
".",
"article",
".",
"root",
".",
"xpath",
"(",
"cont_expr",
")",
"if",
"contribution",
":",
"author_contrib",
"=",
"deepcopy",
"(",
"contribution",
"[",
"0",
"]",
")",
"remove_all_attributes",
"(",
"author_contrib",
")",
"author_contrib",
".",
"tag",
"=",
"'div'",
"author_contrib",
".",
"attrib",
"[",
"'id'",
"]",
"=",
"'author-contributions'",
"#This title element will be parsed later",
"title",
"=",
"etree",
".",
"Element",
"(",
"'title'",
")",
"title",
".",
"text",
"=",
"'Author Contributions'",
"author_contrib",
".",
"insert",
"(",
"0",
",",
"title",
")",
"body",
".",
"append",
"(",
"author_contrib",
")"
] | Though this goes in the back of the document with the rest of the back
matter, it is not an element found under <back>.
I don't expect to see more than one of these. Compare this method to
make_article_info_competing_interests() | [
"Though",
"this",
"goes",
"in",
"the",
"back",
"of",
"the",
"document",
"with",
"the",
"rest",
"of",
"the",
"back",
"matter",
"it",
"is",
"not",
"an",
"element",
"found",
"under",
"<back",
">",
"."
] | train | https://github.com/SavinaRoja/OpenAccess_EPUB/blob/6b77ba30b7394fd003920e7a7957bca963a90656/src/openaccess_epub/publisher/plos.py#L720-L739 |
SavinaRoja/OpenAccess_EPUB | src/openaccess_epub/publisher/plos.py | PLoS.make_back_glossary | def make_back_glossary(self, body):
"""
Glossaries are a fairly common item in papers for PLoS, but it also
seems that they are rarely incorporated into the PLoS web-site or PDF
formats. They are included in the ePub output however because they are
helpful and because we can.
"""
for glossary in self.article.root.xpath('./back/glossary'):
gloss_copy = deepcopy(glossary)
gloss_copy.tag = 'div'
gloss_copy.attrib['class'] = 'back-glossary'
body.append(gloss_copy) | python | def make_back_glossary(self, body):
"""
Glossaries are a fairly common item in papers for PLoS, but it also
seems that they are rarely incorporated into the PLoS web-site or PDF
formats. They are included in the ePub output however because they are
helpful and because we can.
"""
for glossary in self.article.root.xpath('./back/glossary'):
gloss_copy = deepcopy(glossary)
gloss_copy.tag = 'div'
gloss_copy.attrib['class'] = 'back-glossary'
body.append(gloss_copy) | [
"def",
"make_back_glossary",
"(",
"self",
",",
"body",
")",
":",
"for",
"glossary",
"in",
"self",
".",
"article",
".",
"root",
".",
"xpath",
"(",
"'./back/glossary'",
")",
":",
"gloss_copy",
"=",
"deepcopy",
"(",
"glossary",
")",
"gloss_copy",
".",
"tag",
"=",
"'div'",
"gloss_copy",
".",
"attrib",
"[",
"'class'",
"]",
"=",
"'back-glossary'",
"body",
".",
"append",
"(",
"gloss_copy",
")"
] | Glossaries are a fairly common item in papers for PLoS, but it also
seems that they are rarely incorporated into the PLoS web-site or PDF
formats. They are included in the ePub output however because they are
helpful and because we can. | [
"Glossaries",
"are",
"a",
"fairly",
"common",
"item",
"in",
"papers",
"for",
"PLoS",
"but",
"it",
"also",
"seems",
"that",
"they",
"are",
"rarely",
"incorporated",
"into",
"the",
"PLoS",
"web",
"-",
"site",
"or",
"PDF",
"formats",
".",
"They",
"are",
"included",
"in",
"the",
"ePub",
"output",
"however",
"because",
"they",
"are",
"helpful",
"and",
"because",
"we",
"can",
"."
] | train | https://github.com/SavinaRoja/OpenAccess_EPUB/blob/6b77ba30b7394fd003920e7a7957bca963a90656/src/openaccess_epub/publisher/plos.py#L741-L752 |
SavinaRoja/OpenAccess_EPUB | src/openaccess_epub/publisher/plos.py | PLoS.make_back_notes | def make_back_notes(self, body):
"""
The notes element in PLoS articles can be employed for posting notices
of corrections or adjustments in proof. The <notes> element has a very
diverse content model, but PLoS practice appears to be fairly
consistent: a single <sec> containing a <title> and a <p>
"""
for notes in self.article.root.xpath('./back/notes'):
notes_sec = deepcopy(notes.find('sec'))
notes_sec.tag = 'div'
notes_sec.attrib['class'] = 'back-notes'
body.append(notes_sec) | python | def make_back_notes(self, body):
"""
The notes element in PLoS articles can be employed for posting notices
of corrections or adjustments in proof. The <notes> element has a very
diverse content model, but PLoS practice appears to be fairly
consistent: a single <sec> containing a <title> and a <p>
"""
for notes in self.article.root.xpath('./back/notes'):
notes_sec = deepcopy(notes.find('sec'))
notes_sec.tag = 'div'
notes_sec.attrib['class'] = 'back-notes'
body.append(notes_sec) | [
"def",
"make_back_notes",
"(",
"self",
",",
"body",
")",
":",
"for",
"notes",
"in",
"self",
".",
"article",
".",
"root",
".",
"xpath",
"(",
"'./back/notes'",
")",
":",
"notes_sec",
"=",
"deepcopy",
"(",
"notes",
".",
"find",
"(",
"'sec'",
")",
")",
"notes_sec",
".",
"tag",
"=",
"'div'",
"notes_sec",
".",
"attrib",
"[",
"'class'",
"]",
"=",
"'back-notes'",
"body",
".",
"append",
"(",
"notes_sec",
")"
] | The notes element in PLoS articles can be employed for posting notices
of corrections or adjustments in proof. The <notes> element has a very
diverse content model, but PLoS practice appears to be fairly
consistent: a single <sec> containing a <title> and a <p> | [
"The",
"notes",
"element",
"in",
"PLoS",
"articles",
"can",
"be",
"employed",
"for",
"posting",
"notices",
"of",
"corrections",
"or",
"adjustments",
"in",
"proof",
".",
"The",
"<notes",
">",
"element",
"has",
"a",
"very",
"diverse",
"content",
"model",
"but",
"PLoS",
"practice",
"appears",
"to",
"be",
"fairly",
"consistent",
":",
"a",
"single",
"<sec",
">",
"containing",
"a",
"<title",
">",
"and",
"a",
"<p",
">"
] | train | https://github.com/SavinaRoja/OpenAccess_EPUB/blob/6b77ba30b7394fd003920e7a7957bca963a90656/src/openaccess_epub/publisher/plos.py#L754-L765 |
SavinaRoja/OpenAccess_EPUB | src/openaccess_epub/publisher/plos.py | PLoS.convert_disp_formula_elements | def convert_disp_formula_elements(self):
"""
<disp-formula> elements must be converted to conforming elements
"""
for disp in self.main.getroot().findall('.//disp-formula'):
#find label element
label_el = disp.find('label')
graphic_el = disp.find('graphic')
if graphic_el is None: # No graphic, assume math as text instead
text_span = etree.Element('span', {'class': 'disp-formula'})
if 'id' in disp.attrib:
text_span.attrib['id'] = disp.attrib['id']
append_all_below(text_span, disp)
#Insert the text span before the disp-formula
insert_before(disp, text_span)
#If a label exists, modify and insert before text_span
if label_el is not None:
label_el.tag = 'b'
insert_before(text_span, label_el)
#Remove the disp-formula
remove(disp)
#Skip the rest, which deals with the graphic element
continue
#The graphic element is present
#Create a file reference for the image
xlink_href = ns_format(graphic_el, 'xlink:href')
graphic_xlink_href = graphic_el.attrib[xlink_href]
file_name = graphic_xlink_href.split('.')[-1] + '.png'
img_dir = 'images-' + self.doi_suffix()
img_path = '/'.join([img_dir, file_name])
#Create the img element
img_element = etree.Element('img', {'alt': 'A Display Formula',
'class': 'disp-formula',
'src': img_path})
#Transfer the id attribute
if 'id' in disp.attrib:
img_element.attrib['id'] = disp.attrib['id']
#Insert the img element
insert_before(disp, img_element)
#Create content for the label
if label_el is not None:
label_el.tag = 'b'
insert_before(img_element, label_el)
#Remove the old disp-formula element
remove(disp) | python | def convert_disp_formula_elements(self):
"""
<disp-formula> elements must be converted to conforming elements
"""
for disp in self.main.getroot().findall('.//disp-formula'):
#find label element
label_el = disp.find('label')
graphic_el = disp.find('graphic')
if graphic_el is None: # No graphic, assume math as text instead
text_span = etree.Element('span', {'class': 'disp-formula'})
if 'id' in disp.attrib:
text_span.attrib['id'] = disp.attrib['id']
append_all_below(text_span, disp)
#Insert the text span before the disp-formula
insert_before(disp, text_span)
#If a label exists, modify and insert before text_span
if label_el is not None:
label_el.tag = 'b'
insert_before(text_span, label_el)
#Remove the disp-formula
remove(disp)
#Skip the rest, which deals with the graphic element
continue
#The graphic element is present
#Create a file reference for the image
xlink_href = ns_format(graphic_el, 'xlink:href')
graphic_xlink_href = graphic_el.attrib[xlink_href]
file_name = graphic_xlink_href.split('.')[-1] + '.png'
img_dir = 'images-' + self.doi_suffix()
img_path = '/'.join([img_dir, file_name])
#Create the img element
img_element = etree.Element('img', {'alt': 'A Display Formula',
'class': 'disp-formula',
'src': img_path})
#Transfer the id attribute
if 'id' in disp.attrib:
img_element.attrib['id'] = disp.attrib['id']
#Insert the img element
insert_before(disp, img_element)
#Create content for the label
if label_el is not None:
label_el.tag = 'b'
insert_before(img_element, label_el)
#Remove the old disp-formula element
remove(disp) | [
"def",
"convert_disp_formula_elements",
"(",
"self",
")",
":",
"for",
"disp",
"in",
"self",
".",
"main",
".",
"getroot",
"(",
")",
".",
"findall",
"(",
"'.//disp-formula'",
")",
":",
"#find label element",
"label_el",
"=",
"disp",
".",
"find",
"(",
"'label'",
")",
"graphic_el",
"=",
"disp",
".",
"find",
"(",
"'graphic'",
")",
"if",
"graphic_el",
"is",
"None",
":",
"# No graphic, assume math as text instead",
"text_span",
"=",
"etree",
".",
"Element",
"(",
"'span'",
",",
"{",
"'class'",
":",
"'disp-formula'",
"}",
")",
"if",
"'id'",
"in",
"disp",
".",
"attrib",
":",
"text_span",
".",
"attrib",
"[",
"'id'",
"]",
"=",
"disp",
".",
"attrib",
"[",
"'id'",
"]",
"append_all_below",
"(",
"text_span",
",",
"disp",
")",
"#Insert the text span before the disp-formula",
"insert_before",
"(",
"disp",
",",
"text_span",
")",
"#If a label exists, modify and insert before text_span",
"if",
"label_el",
"is",
"not",
"None",
":",
"label_el",
".",
"tag",
"=",
"'b'",
"insert_before",
"(",
"text_span",
",",
"label_el",
")",
"#Remove the disp-formula",
"remove",
"(",
"disp",
")",
"#Skip the rest, which deals with the graphic element",
"continue",
"#The graphic element is present",
"#Create a file reference for the image",
"xlink_href",
"=",
"ns_format",
"(",
"graphic_el",
",",
"'xlink:href'",
")",
"graphic_xlink_href",
"=",
"graphic_el",
".",
"attrib",
"[",
"xlink_href",
"]",
"file_name",
"=",
"graphic_xlink_href",
".",
"split",
"(",
"'.'",
")",
"[",
"-",
"1",
"]",
"+",
"'.png'",
"img_dir",
"=",
"'images-'",
"+",
"self",
".",
"doi_suffix",
"(",
")",
"img_path",
"=",
"'/'",
".",
"join",
"(",
"[",
"img_dir",
",",
"file_name",
"]",
")",
"#Create the img element",
"img_element",
"=",
"etree",
".",
"Element",
"(",
"'img'",
",",
"{",
"'alt'",
":",
"'A Display Formula'",
",",
"'class'",
":",
"'disp-formula'",
",",
"'src'",
":",
"img_path",
"}",
")",
"#Transfer the id attribute",
"if",
"'id'",
"in",
"disp",
".",
"attrib",
":",
"img_element",
".",
"attrib",
"[",
"'id'",
"]",
"=",
"disp",
".",
"attrib",
"[",
"'id'",
"]",
"#Insert the img element",
"insert_before",
"(",
"disp",
",",
"img_element",
")",
"#Create content for the label",
"if",
"label_el",
"is",
"not",
"None",
":",
"label_el",
".",
"tag",
"=",
"'b'",
"insert_before",
"(",
"img_element",
",",
"label_el",
")",
"#Remove the old disp-formula element",
"remove",
"(",
"disp",
")"
] | <disp-formula> elements must be converted to conforming elements | [
"<disp",
"-",
"formula",
">",
"elements",
"must",
"be",
"converted",
"to",
"conforming",
"elements"
] | train | https://github.com/SavinaRoja/OpenAccess_EPUB/blob/6b77ba30b7394fd003920e7a7957bca963a90656/src/openaccess_epub/publisher/plos.py#L769-L814 |
SavinaRoja/OpenAccess_EPUB | src/openaccess_epub/publisher/plos.py | PLoS.convert_inline_formula_elements | def convert_inline_formula_elements(self):
"""
<inline-formula> elements must be converted to be conforming
These elements may contain <inline-graphic> elements, textual content,
or both.
"""
for inline in self.main.getroot().findall('.//inline-formula'):
#inline-formula elements will be modified in situ
remove_all_attributes(inline)
inline.tag = 'span'
inline.attrib['class'] = 'inline-formula'
inline_graphic = inline.find('inline-graphic')
if inline_graphic is None:
# Do nothing more if there is no graphic
continue
#Need to conver the inline-graphic element to an img element
inline_graphic.tag = 'img'
#Get a copy of the attributes, then remove them
inline_graphic_attributes = copy(inline_graphic.attrib)
remove_all_attributes(inline_graphic)
#Create a file reference for the image
xlink_href = ns_format(inline_graphic, 'xlink:href')
graphic_xlink_href = inline_graphic_attributes[xlink_href]
file_name = graphic_xlink_href.split('.')[-1] + '.png'
img_dir = 'images-' + self.doi_suffix()
img_path = '/'.join([img_dir, file_name])
#Set the source to the image path
inline_graphic.attrib['src'] = img_path
inline_graphic.attrib['class'] = 'inline-formula'
inline_graphic.attrib['alt'] = 'An Inline Formula' | python | def convert_inline_formula_elements(self):
"""
<inline-formula> elements must be converted to be conforming
These elements may contain <inline-graphic> elements, textual content,
or both.
"""
for inline in self.main.getroot().findall('.//inline-formula'):
#inline-formula elements will be modified in situ
remove_all_attributes(inline)
inline.tag = 'span'
inline.attrib['class'] = 'inline-formula'
inline_graphic = inline.find('inline-graphic')
if inline_graphic is None:
# Do nothing more if there is no graphic
continue
#Need to conver the inline-graphic element to an img element
inline_graphic.tag = 'img'
#Get a copy of the attributes, then remove them
inline_graphic_attributes = copy(inline_graphic.attrib)
remove_all_attributes(inline_graphic)
#Create a file reference for the image
xlink_href = ns_format(inline_graphic, 'xlink:href')
graphic_xlink_href = inline_graphic_attributes[xlink_href]
file_name = graphic_xlink_href.split('.')[-1] + '.png'
img_dir = 'images-' + self.doi_suffix()
img_path = '/'.join([img_dir, file_name])
#Set the source to the image path
inline_graphic.attrib['src'] = img_path
inline_graphic.attrib['class'] = 'inline-formula'
inline_graphic.attrib['alt'] = 'An Inline Formula' | [
"def",
"convert_inline_formula_elements",
"(",
"self",
")",
":",
"for",
"inline",
"in",
"self",
".",
"main",
".",
"getroot",
"(",
")",
".",
"findall",
"(",
"'.//inline-formula'",
")",
":",
"#inline-formula elements will be modified in situ",
"remove_all_attributes",
"(",
"inline",
")",
"inline",
".",
"tag",
"=",
"'span'",
"inline",
".",
"attrib",
"[",
"'class'",
"]",
"=",
"'inline-formula'",
"inline_graphic",
"=",
"inline",
".",
"find",
"(",
"'inline-graphic'",
")",
"if",
"inline_graphic",
"is",
"None",
":",
"# Do nothing more if there is no graphic",
"continue",
"#Need to conver the inline-graphic element to an img element",
"inline_graphic",
".",
"tag",
"=",
"'img'",
"#Get a copy of the attributes, then remove them",
"inline_graphic_attributes",
"=",
"copy",
"(",
"inline_graphic",
".",
"attrib",
")",
"remove_all_attributes",
"(",
"inline_graphic",
")",
"#Create a file reference for the image",
"xlink_href",
"=",
"ns_format",
"(",
"inline_graphic",
",",
"'xlink:href'",
")",
"graphic_xlink_href",
"=",
"inline_graphic_attributes",
"[",
"xlink_href",
"]",
"file_name",
"=",
"graphic_xlink_href",
".",
"split",
"(",
"'.'",
")",
"[",
"-",
"1",
"]",
"+",
"'.png'",
"img_dir",
"=",
"'images-'",
"+",
"self",
".",
"doi_suffix",
"(",
")",
"img_path",
"=",
"'/'",
".",
"join",
"(",
"[",
"img_dir",
",",
"file_name",
"]",
")",
"#Set the source to the image path",
"inline_graphic",
".",
"attrib",
"[",
"'src'",
"]",
"=",
"img_path",
"inline_graphic",
".",
"attrib",
"[",
"'class'",
"]",
"=",
"'inline-formula'",
"inline_graphic",
".",
"attrib",
"[",
"'alt'",
"]",
"=",
"'An Inline Formula'"
] | <inline-formula> elements must be converted to be conforming
These elements may contain <inline-graphic> elements, textual content,
or both. | [
"<inline",
"-",
"formula",
">",
"elements",
"must",
"be",
"converted",
"to",
"be",
"conforming"
] | train | https://github.com/SavinaRoja/OpenAccess_EPUB/blob/6b77ba30b7394fd003920e7a7957bca963a90656/src/openaccess_epub/publisher/plos.py#L818-L848 |
SavinaRoja/OpenAccess_EPUB | src/openaccess_epub/publisher/plos.py | PLoS.convert_disp_quote_elements | def convert_disp_quote_elements(self):
"""
Extract or extended quoted passage from another work, usually made
typographically distinct from surrounding text
<disp-quote> elements have a relatively complex content model, but PLoS
appears to employ either <p>s or <list>s.
"""
for disp_quote in self.main.getroot().findall('.//disp-quote'):
if disp_quote.getparent().tag == 'p':
elevate_element(disp_quote)
disp_quote.tag = 'div'
disp_quote.attrib['class'] = 'disp-quote' | python | def convert_disp_quote_elements(self):
"""
Extract or extended quoted passage from another work, usually made
typographically distinct from surrounding text
<disp-quote> elements have a relatively complex content model, but PLoS
appears to employ either <p>s or <list>s.
"""
for disp_quote in self.main.getroot().findall('.//disp-quote'):
if disp_quote.getparent().tag == 'p':
elevate_element(disp_quote)
disp_quote.tag = 'div'
disp_quote.attrib['class'] = 'disp-quote' | [
"def",
"convert_disp_quote_elements",
"(",
"self",
")",
":",
"for",
"disp_quote",
"in",
"self",
".",
"main",
".",
"getroot",
"(",
")",
".",
"findall",
"(",
"'.//disp-quote'",
")",
":",
"if",
"disp_quote",
".",
"getparent",
"(",
")",
".",
"tag",
"==",
"'p'",
":",
"elevate_element",
"(",
"disp_quote",
")",
"disp_quote",
".",
"tag",
"=",
"'div'",
"disp_quote",
".",
"attrib",
"[",
"'class'",
"]",
"=",
"'disp-quote'"
] | Extract or extended quoted passage from another work, usually made
typographically distinct from surrounding text
<disp-quote> elements have a relatively complex content model, but PLoS
appears to employ either <p>s or <list>s. | [
"Extract",
"or",
"extended",
"quoted",
"passage",
"from",
"another",
"work",
"usually",
"made",
"typographically",
"distinct",
"from",
"surrounding",
"text"
] | train | https://github.com/SavinaRoja/OpenAccess_EPUB/blob/6b77ba30b7394fd003920e7a7957bca963a90656/src/openaccess_epub/publisher/plos.py#L852-L864 |
SavinaRoja/OpenAccess_EPUB | src/openaccess_epub/publisher/plos.py | PLoS.convert_boxed_text_elements | def convert_boxed_text_elements(self):
"""
Textual material that is part of the body of text but outside the
flow of the narrative text, for example, a sidebar, marginalia, text
insert (whether enclosed in a box or not), caution, tip, note box, etc.
<boxed-text> elements for PLoS appear to all contain a single <sec>
element which frequently contains a <title> and various other content.
This method will elevate the <sec> element, adding class information as
well as processing the title.
"""
for boxed_text in self.main.getroot().findall('.//boxed-text'):
sec_el = boxed_text.find('sec')
if sec_el is not None:
sec_el.tag = 'div'
title = sec_el.find('title')
if title is not None:
title.tag = 'b'
sec_el.attrib['class'] = 'boxed-text'
if 'id' in boxed_text.attrib:
sec_el.attrib['id'] = boxed_text.attrib['id']
replace(boxed_text, sec_el)
else:
div_el = etree.Element('div', {'class': 'boxed-text'})
if 'id' in boxed_text.attrib:
div_el.attrib['id'] = boxed_text.attrib['id']
append_all_below(div_el, boxed_text)
replace(boxed_text, div_el) | python | def convert_boxed_text_elements(self):
"""
Textual material that is part of the body of text but outside the
flow of the narrative text, for example, a sidebar, marginalia, text
insert (whether enclosed in a box or not), caution, tip, note box, etc.
<boxed-text> elements for PLoS appear to all contain a single <sec>
element which frequently contains a <title> and various other content.
This method will elevate the <sec> element, adding class information as
well as processing the title.
"""
for boxed_text in self.main.getroot().findall('.//boxed-text'):
sec_el = boxed_text.find('sec')
if sec_el is not None:
sec_el.tag = 'div'
title = sec_el.find('title')
if title is not None:
title.tag = 'b'
sec_el.attrib['class'] = 'boxed-text'
if 'id' in boxed_text.attrib:
sec_el.attrib['id'] = boxed_text.attrib['id']
replace(boxed_text, sec_el)
else:
div_el = etree.Element('div', {'class': 'boxed-text'})
if 'id' in boxed_text.attrib:
div_el.attrib['id'] = boxed_text.attrib['id']
append_all_below(div_el, boxed_text)
replace(boxed_text, div_el) | [
"def",
"convert_boxed_text_elements",
"(",
"self",
")",
":",
"for",
"boxed_text",
"in",
"self",
".",
"main",
".",
"getroot",
"(",
")",
".",
"findall",
"(",
"'.//boxed-text'",
")",
":",
"sec_el",
"=",
"boxed_text",
".",
"find",
"(",
"'sec'",
")",
"if",
"sec_el",
"is",
"not",
"None",
":",
"sec_el",
".",
"tag",
"=",
"'div'",
"title",
"=",
"sec_el",
".",
"find",
"(",
"'title'",
")",
"if",
"title",
"is",
"not",
"None",
":",
"title",
".",
"tag",
"=",
"'b'",
"sec_el",
".",
"attrib",
"[",
"'class'",
"]",
"=",
"'boxed-text'",
"if",
"'id'",
"in",
"boxed_text",
".",
"attrib",
":",
"sec_el",
".",
"attrib",
"[",
"'id'",
"]",
"=",
"boxed_text",
".",
"attrib",
"[",
"'id'",
"]",
"replace",
"(",
"boxed_text",
",",
"sec_el",
")",
"else",
":",
"div_el",
"=",
"etree",
".",
"Element",
"(",
"'div'",
",",
"{",
"'class'",
":",
"'boxed-text'",
"}",
")",
"if",
"'id'",
"in",
"boxed_text",
".",
"attrib",
":",
"div_el",
".",
"attrib",
"[",
"'id'",
"]",
"=",
"boxed_text",
".",
"attrib",
"[",
"'id'",
"]",
"append_all_below",
"(",
"div_el",
",",
"boxed_text",
")",
"replace",
"(",
"boxed_text",
",",
"div_el",
")"
] | Textual material that is part of the body of text but outside the
flow of the narrative text, for example, a sidebar, marginalia, text
insert (whether enclosed in a box or not), caution, tip, note box, etc.
<boxed-text> elements for PLoS appear to all contain a single <sec>
element which frequently contains a <title> and various other content.
This method will elevate the <sec> element, adding class information as
well as processing the title. | [
"Textual",
"material",
"that",
"is",
"part",
"of",
"the",
"body",
"of",
"text",
"but",
"outside",
"the",
"flow",
"of",
"the",
"narrative",
"text",
"for",
"example",
"a",
"sidebar",
"marginalia",
"text",
"insert",
"(",
"whether",
"enclosed",
"in",
"a",
"box",
"or",
"not",
")",
"caution",
"tip",
"note",
"box",
"etc",
"."
] | train | https://github.com/SavinaRoja/OpenAccess_EPUB/blob/6b77ba30b7394fd003920e7a7957bca963a90656/src/openaccess_epub/publisher/plos.py#L868-L895 |
SavinaRoja/OpenAccess_EPUB | src/openaccess_epub/publisher/plos.py | PLoS.convert_supplementary_material_elements | def convert_supplementary_material_elements(self):
"""
Supplementary material are not, nor are they generally expected to be,
packaged into the epub file. Though this is a technical possibility,
and certain epub reading systems (such as those run on a PC) might be
reasonably capable of the external handling of diverse file formats
I presume that supplementary material will remain separate from the
document. So special cases aside, external links to supplementary
material will be employed; this will require internet connection for
access.
As for content in <supplementary-material>, they appear to strictly
contain 1 <label> element, followed by a <caption><title><p></caption>
substructure.
"""
for supplementary in self.main.getroot().findall('.//supplementary-material'):
#Create a div element to hold the supplementary content
suppl_div = etree.Element('div')
if 'id' in supplementary.attrib:
suppl_div.attrib['id'] = supplementary.attrib['id']
insert_before(supplementary, suppl_div)
#Get the sub elements
label = supplementary.find('label')
caption = supplementary.find('caption')
#Get the external resource URL for the supplementary information
ns_xlink_href = ns_format(supplementary, 'xlink:href')
xlink_href = supplementary.attrib[ns_xlink_href]
resource_url = self.fetch_single_representation(xlink_href)
if label is not None:
label.tag = 'a'
label.attrib['href'] = resource_url
append_new_text(label, '. ', join_str='')
suppl_div.append(label)
if caption is not None:
title = caption.find('title')
paragraphs = caption.findall('p')
if title is not None:
title.tag = 'b'
suppl_div.append(title)
for paragraph in paragraphs:
suppl_div.append(paragraph)
#This is a fix for odd articles with <p>s outside of <caption>
#See journal.pctr.0020006, PLoS themselves fail to format this for
#the website, though the .pdf is good
#It should be noted that journal.pctr.0020006 does not pass
#validation because it places a <p> before a <caption>
#By placing this at the end of the method, it conforms to the spec
#by expecting such p tags after caption. This causes a hiccup in
#the rendering for journal.pctr.0020006, but it's better than
#skipping the data entirely AND it should also work for conforming
#articles.
for paragraph in supplementary.findall('p'):
suppl_div.append(paragraph)
remove(supplementary) | python | def convert_supplementary_material_elements(self):
"""
Supplementary material are not, nor are they generally expected to be,
packaged into the epub file. Though this is a technical possibility,
and certain epub reading systems (such as those run on a PC) might be
reasonably capable of the external handling of diverse file formats
I presume that supplementary material will remain separate from the
document. So special cases aside, external links to supplementary
material will be employed; this will require internet connection for
access.
As for content in <supplementary-material>, they appear to strictly
contain 1 <label> element, followed by a <caption><title><p></caption>
substructure.
"""
for supplementary in self.main.getroot().findall('.//supplementary-material'):
#Create a div element to hold the supplementary content
suppl_div = etree.Element('div')
if 'id' in supplementary.attrib:
suppl_div.attrib['id'] = supplementary.attrib['id']
insert_before(supplementary, suppl_div)
#Get the sub elements
label = supplementary.find('label')
caption = supplementary.find('caption')
#Get the external resource URL for the supplementary information
ns_xlink_href = ns_format(supplementary, 'xlink:href')
xlink_href = supplementary.attrib[ns_xlink_href]
resource_url = self.fetch_single_representation(xlink_href)
if label is not None:
label.tag = 'a'
label.attrib['href'] = resource_url
append_new_text(label, '. ', join_str='')
suppl_div.append(label)
if caption is not None:
title = caption.find('title')
paragraphs = caption.findall('p')
if title is not None:
title.tag = 'b'
suppl_div.append(title)
for paragraph in paragraphs:
suppl_div.append(paragraph)
#This is a fix for odd articles with <p>s outside of <caption>
#See journal.pctr.0020006, PLoS themselves fail to format this for
#the website, though the .pdf is good
#It should be noted that journal.pctr.0020006 does not pass
#validation because it places a <p> before a <caption>
#By placing this at the end of the method, it conforms to the spec
#by expecting such p tags after caption. This causes a hiccup in
#the rendering for journal.pctr.0020006, but it's better than
#skipping the data entirely AND it should also work for conforming
#articles.
for paragraph in supplementary.findall('p'):
suppl_div.append(paragraph)
remove(supplementary) | [
"def",
"convert_supplementary_material_elements",
"(",
"self",
")",
":",
"for",
"supplementary",
"in",
"self",
".",
"main",
".",
"getroot",
"(",
")",
".",
"findall",
"(",
"'.//supplementary-material'",
")",
":",
"#Create a div element to hold the supplementary content",
"suppl_div",
"=",
"etree",
".",
"Element",
"(",
"'div'",
")",
"if",
"'id'",
"in",
"supplementary",
".",
"attrib",
":",
"suppl_div",
".",
"attrib",
"[",
"'id'",
"]",
"=",
"supplementary",
".",
"attrib",
"[",
"'id'",
"]",
"insert_before",
"(",
"supplementary",
",",
"suppl_div",
")",
"#Get the sub elements",
"label",
"=",
"supplementary",
".",
"find",
"(",
"'label'",
")",
"caption",
"=",
"supplementary",
".",
"find",
"(",
"'caption'",
")",
"#Get the external resource URL for the supplementary information",
"ns_xlink_href",
"=",
"ns_format",
"(",
"supplementary",
",",
"'xlink:href'",
")",
"xlink_href",
"=",
"supplementary",
".",
"attrib",
"[",
"ns_xlink_href",
"]",
"resource_url",
"=",
"self",
".",
"fetch_single_representation",
"(",
"xlink_href",
")",
"if",
"label",
"is",
"not",
"None",
":",
"label",
".",
"tag",
"=",
"'a'",
"label",
".",
"attrib",
"[",
"'href'",
"]",
"=",
"resource_url",
"append_new_text",
"(",
"label",
",",
"'. '",
",",
"join_str",
"=",
"''",
")",
"suppl_div",
".",
"append",
"(",
"label",
")",
"if",
"caption",
"is",
"not",
"None",
":",
"title",
"=",
"caption",
".",
"find",
"(",
"'title'",
")",
"paragraphs",
"=",
"caption",
".",
"findall",
"(",
"'p'",
")",
"if",
"title",
"is",
"not",
"None",
":",
"title",
".",
"tag",
"=",
"'b'",
"suppl_div",
".",
"append",
"(",
"title",
")",
"for",
"paragraph",
"in",
"paragraphs",
":",
"suppl_div",
".",
"append",
"(",
"paragraph",
")",
"#This is a fix for odd articles with <p>s outside of <caption>",
"#See journal.pctr.0020006, PLoS themselves fail to format this for",
"#the website, though the .pdf is good",
"#It should be noted that journal.pctr.0020006 does not pass",
"#validation because it places a <p> before a <caption>",
"#By placing this at the end of the method, it conforms to the spec",
"#by expecting such p tags after caption. This causes a hiccup in",
"#the rendering for journal.pctr.0020006, but it's better than",
"#skipping the data entirely AND it should also work for conforming",
"#articles.",
"for",
"paragraph",
"in",
"supplementary",
".",
"findall",
"(",
"'p'",
")",
":",
"suppl_div",
".",
"append",
"(",
"paragraph",
")",
"remove",
"(",
"supplementary",
")"
] | Supplementary material are not, nor are they generally expected to be,
packaged into the epub file. Though this is a technical possibility,
and certain epub reading systems (such as those run on a PC) might be
reasonably capable of the external handling of diverse file formats
I presume that supplementary material will remain separate from the
document. So special cases aside, external links to supplementary
material will be employed; this will require internet connection for
access.
As for content in <supplementary-material>, they appear to strictly
contain 1 <label> element, followed by a <caption><title><p></caption>
substructure. | [
"Supplementary",
"material",
"are",
"not",
"nor",
"are",
"they",
"generally",
"expected",
"to",
"be",
"packaged",
"into",
"the",
"epub",
"file",
".",
"Though",
"this",
"is",
"a",
"technical",
"possibility",
"and",
"certain",
"epub",
"reading",
"systems",
"(",
"such",
"as",
"those",
"run",
"on",
"a",
"PC",
")",
"might",
"be",
"reasonably",
"capable",
"of",
"the",
"external",
"handling",
"of",
"diverse",
"file",
"formats",
"I",
"presume",
"that",
"supplementary",
"material",
"will",
"remain",
"separate",
"from",
"the",
"document",
".",
"So",
"special",
"cases",
"aside",
"external",
"links",
"to",
"supplementary",
"material",
"will",
"be",
"employed",
";",
"this",
"will",
"require",
"internet",
"connection",
"for",
"access",
"."
] | train | https://github.com/SavinaRoja/OpenAccess_EPUB/blob/6b77ba30b7394fd003920e7a7957bca963a90656/src/openaccess_epub/publisher/plos.py#L899-L952 |
SavinaRoja/OpenAccess_EPUB | src/openaccess_epub/publisher/plos.py | PLoS.fetch_single_representation | def fetch_single_representation(self, item_xlink_href):
"""
This function will render a formatted URL for accessing the PLoS' server
SingleRepresentation of an object.
"""
#A dict of URLs for PLoS subjournals
journal_urls = {'pgen': 'http://www.plosgenetics.org/article/{0}',
'pcbi': 'http://www.ploscompbiol.org/article/{0}',
'ppat': 'http://www.plospathogens.org/article/{0}',
'pntd': 'http://www.plosntds.org/article/{0}',
'pmed': 'http://www.plosmedicine.org/article/{0}',
'pbio': 'http://www.plosbiology.org/article/{0}',
'pone': 'http://www.plosone.org/article/{0}',
'pctr': 'http://clinicaltrials.ploshubs.org/article/{0}'}
#Identify subjournal name for base URl
subjournal_name = self.article.doi.split('.')[2]
base_url = journal_urls[subjournal_name]
#Compose the address for fetchSingleRepresentation
resource = 'fetchSingleRepresentation.action?uri=' + item_xlink_href
return base_url.format(resource) | python | def fetch_single_representation(self, item_xlink_href):
"""
This function will render a formatted URL for accessing the PLoS' server
SingleRepresentation of an object.
"""
#A dict of URLs for PLoS subjournals
journal_urls = {'pgen': 'http://www.plosgenetics.org/article/{0}',
'pcbi': 'http://www.ploscompbiol.org/article/{0}',
'ppat': 'http://www.plospathogens.org/article/{0}',
'pntd': 'http://www.plosntds.org/article/{0}',
'pmed': 'http://www.plosmedicine.org/article/{0}',
'pbio': 'http://www.plosbiology.org/article/{0}',
'pone': 'http://www.plosone.org/article/{0}',
'pctr': 'http://clinicaltrials.ploshubs.org/article/{0}'}
#Identify subjournal name for base URl
subjournal_name = self.article.doi.split('.')[2]
base_url = journal_urls[subjournal_name]
#Compose the address for fetchSingleRepresentation
resource = 'fetchSingleRepresentation.action?uri=' + item_xlink_href
return base_url.format(resource) | [
"def",
"fetch_single_representation",
"(",
"self",
",",
"item_xlink_href",
")",
":",
"#A dict of URLs for PLoS subjournals",
"journal_urls",
"=",
"{",
"'pgen'",
":",
"'http://www.plosgenetics.org/article/{0}'",
",",
"'pcbi'",
":",
"'http://www.ploscompbiol.org/article/{0}'",
",",
"'ppat'",
":",
"'http://www.plospathogens.org/article/{0}'",
",",
"'pntd'",
":",
"'http://www.plosntds.org/article/{0}'",
",",
"'pmed'",
":",
"'http://www.plosmedicine.org/article/{0}'",
",",
"'pbio'",
":",
"'http://www.plosbiology.org/article/{0}'",
",",
"'pone'",
":",
"'http://www.plosone.org/article/{0}'",
",",
"'pctr'",
":",
"'http://clinicaltrials.ploshubs.org/article/{0}'",
"}",
"#Identify subjournal name for base URl",
"subjournal_name",
"=",
"self",
".",
"article",
".",
"doi",
".",
"split",
"(",
"'.'",
")",
"[",
"2",
"]",
"base_url",
"=",
"journal_urls",
"[",
"subjournal_name",
"]",
"#Compose the address for fetchSingleRepresentation",
"resource",
"=",
"'fetchSingleRepresentation.action?uri='",
"+",
"item_xlink_href",
"return",
"base_url",
".",
"format",
"(",
"resource",
")"
] | This function will render a formatted URL for accessing the PLoS' server
SingleRepresentation of an object. | [
"This",
"function",
"will",
"render",
"a",
"formatted",
"URL",
"for",
"accessing",
"the",
"PLoS",
"server",
"SingleRepresentation",
"of",
"an",
"object",
"."
] | train | https://github.com/SavinaRoja/OpenAccess_EPUB/blob/6b77ba30b7394fd003920e7a7957bca963a90656/src/openaccess_epub/publisher/plos.py#L954-L973 |
SavinaRoja/OpenAccess_EPUB | src/openaccess_epub/publisher/plos.py | PLoS.convert_fig_elements | def convert_fig_elements(self):
"""
Responsible for the correct conversion of JPTS 3.0 <fig> elements to
EPUB xhtml. Aside from translating <fig> to <img>, the content model
must be edited.
"""
for fig in self.main.getroot().findall('.//fig'):
if fig.getparent().tag == 'p':
elevate_element(fig)
for fig in self.main.getroot().findall('.//fig'):
#self.convert_fn_elements(fig)
#self.convert_disp_formula_elements(fig)
#Find label and caption
label_el = fig.find('label')
caption_el = fig.find('caption')
#Get the graphic node, this should be mandatory later on
graphic_el = fig.find('graphic')
#Create a file reference for the image
xlink_href = ns_format(graphic_el, 'xlink:href')
graphic_xlink_href = graphic_el.attrib[xlink_href]
file_name = graphic_xlink_href.split('.')[-1] + '.png'
img_dir = 'images-' + self.doi_suffix()
img_path = '/'.join([img_dir, file_name])
#Create the content: using image path, label, and caption
img_el = etree.Element('img', {'alt': 'A Figure', 'src': img_path,
'class': 'figure'})
if 'id' in fig.attrib:
img_el.attrib['id'] = fig.attrib['id']
insert_before(fig, img_el)
#Create content for the label and caption
if caption_el is not None or label_el is not None:
img_caption_div = etree.Element('div', {'class': 'figure-caption'})
img_caption_div_b = etree.SubElement(img_caption_div, 'b')
if label_el is not None:
append_all_below(img_caption_div_b, label_el)
append_new_text(img_caption_div_b, '. ', join_str='')
if caption_el is not None:
caption_title = caption_el.find('title')
if caption_title is not None:
append_all_below(img_caption_div_b, caption_title)
append_new_text(img_caption_div_b, ' ', join_str='')
for each_p in caption_el.findall('p'):
append_all_below(img_caption_div, each_p)
insert_before(fig, img_caption_div)
#Remove the original <fig>
remove(fig) | python | def convert_fig_elements(self):
"""
Responsible for the correct conversion of JPTS 3.0 <fig> elements to
EPUB xhtml. Aside from translating <fig> to <img>, the content model
must be edited.
"""
for fig in self.main.getroot().findall('.//fig'):
if fig.getparent().tag == 'p':
elevate_element(fig)
for fig in self.main.getroot().findall('.//fig'):
#self.convert_fn_elements(fig)
#self.convert_disp_formula_elements(fig)
#Find label and caption
label_el = fig.find('label')
caption_el = fig.find('caption')
#Get the graphic node, this should be mandatory later on
graphic_el = fig.find('graphic')
#Create a file reference for the image
xlink_href = ns_format(graphic_el, 'xlink:href')
graphic_xlink_href = graphic_el.attrib[xlink_href]
file_name = graphic_xlink_href.split('.')[-1] + '.png'
img_dir = 'images-' + self.doi_suffix()
img_path = '/'.join([img_dir, file_name])
#Create the content: using image path, label, and caption
img_el = etree.Element('img', {'alt': 'A Figure', 'src': img_path,
'class': 'figure'})
if 'id' in fig.attrib:
img_el.attrib['id'] = fig.attrib['id']
insert_before(fig, img_el)
#Create content for the label and caption
if caption_el is not None or label_el is not None:
img_caption_div = etree.Element('div', {'class': 'figure-caption'})
img_caption_div_b = etree.SubElement(img_caption_div, 'b')
if label_el is not None:
append_all_below(img_caption_div_b, label_el)
append_new_text(img_caption_div_b, '. ', join_str='')
if caption_el is not None:
caption_title = caption_el.find('title')
if caption_title is not None:
append_all_below(img_caption_div_b, caption_title)
append_new_text(img_caption_div_b, ' ', join_str='')
for each_p in caption_el.findall('p'):
append_all_below(img_caption_div, each_p)
insert_before(fig, img_caption_div)
#Remove the original <fig>
remove(fig) | [
"def",
"convert_fig_elements",
"(",
"self",
")",
":",
"for",
"fig",
"in",
"self",
".",
"main",
".",
"getroot",
"(",
")",
".",
"findall",
"(",
"'.//fig'",
")",
":",
"if",
"fig",
".",
"getparent",
"(",
")",
".",
"tag",
"==",
"'p'",
":",
"elevate_element",
"(",
"fig",
")",
"for",
"fig",
"in",
"self",
".",
"main",
".",
"getroot",
"(",
")",
".",
"findall",
"(",
"'.//fig'",
")",
":",
"#self.convert_fn_elements(fig)",
"#self.convert_disp_formula_elements(fig)",
"#Find label and caption",
"label_el",
"=",
"fig",
".",
"find",
"(",
"'label'",
")",
"caption_el",
"=",
"fig",
".",
"find",
"(",
"'caption'",
")",
"#Get the graphic node, this should be mandatory later on",
"graphic_el",
"=",
"fig",
".",
"find",
"(",
"'graphic'",
")",
"#Create a file reference for the image",
"xlink_href",
"=",
"ns_format",
"(",
"graphic_el",
",",
"'xlink:href'",
")",
"graphic_xlink_href",
"=",
"graphic_el",
".",
"attrib",
"[",
"xlink_href",
"]",
"file_name",
"=",
"graphic_xlink_href",
".",
"split",
"(",
"'.'",
")",
"[",
"-",
"1",
"]",
"+",
"'.png'",
"img_dir",
"=",
"'images-'",
"+",
"self",
".",
"doi_suffix",
"(",
")",
"img_path",
"=",
"'/'",
".",
"join",
"(",
"[",
"img_dir",
",",
"file_name",
"]",
")",
"#Create the content: using image path, label, and caption",
"img_el",
"=",
"etree",
".",
"Element",
"(",
"'img'",
",",
"{",
"'alt'",
":",
"'A Figure'",
",",
"'src'",
":",
"img_path",
",",
"'class'",
":",
"'figure'",
"}",
")",
"if",
"'id'",
"in",
"fig",
".",
"attrib",
":",
"img_el",
".",
"attrib",
"[",
"'id'",
"]",
"=",
"fig",
".",
"attrib",
"[",
"'id'",
"]",
"insert_before",
"(",
"fig",
",",
"img_el",
")",
"#Create content for the label and caption",
"if",
"caption_el",
"is",
"not",
"None",
"or",
"label_el",
"is",
"not",
"None",
":",
"img_caption_div",
"=",
"etree",
".",
"Element",
"(",
"'div'",
",",
"{",
"'class'",
":",
"'figure-caption'",
"}",
")",
"img_caption_div_b",
"=",
"etree",
".",
"SubElement",
"(",
"img_caption_div",
",",
"'b'",
")",
"if",
"label_el",
"is",
"not",
"None",
":",
"append_all_below",
"(",
"img_caption_div_b",
",",
"label_el",
")",
"append_new_text",
"(",
"img_caption_div_b",
",",
"'. '",
",",
"join_str",
"=",
"''",
")",
"if",
"caption_el",
"is",
"not",
"None",
":",
"caption_title",
"=",
"caption_el",
".",
"find",
"(",
"'title'",
")",
"if",
"caption_title",
"is",
"not",
"None",
":",
"append_all_below",
"(",
"img_caption_div_b",
",",
"caption_title",
")",
"append_new_text",
"(",
"img_caption_div_b",
",",
"' '",
",",
"join_str",
"=",
"''",
")",
"for",
"each_p",
"in",
"caption_el",
".",
"findall",
"(",
"'p'",
")",
":",
"append_all_below",
"(",
"img_caption_div",
",",
"each_p",
")",
"insert_before",
"(",
"fig",
",",
"img_caption_div",
")",
"#Remove the original <fig>",
"remove",
"(",
"fig",
")"
] | Responsible for the correct conversion of JPTS 3.0 <fig> elements to
EPUB xhtml. Aside from translating <fig> to <img>, the content model
must be edited. | [
"Responsible",
"for",
"the",
"correct",
"conversion",
"of",
"JPTS",
"3",
".",
"0",
"<fig",
">",
"elements",
"to",
"EPUB",
"xhtml",
".",
"Aside",
"from",
"translating",
"<fig",
">",
"to",
"<img",
">",
"the",
"content",
"model",
"must",
"be",
"edited",
"."
] | train | https://github.com/SavinaRoja/OpenAccess_EPUB/blob/6b77ba30b7394fd003920e7a7957bca963a90656/src/openaccess_epub/publisher/plos.py#L977-L1025 |
SavinaRoja/OpenAccess_EPUB | src/openaccess_epub/publisher/plos.py | PLoS.convert_verse_group_elements | def convert_verse_group_elements(self):
"""
A song, poem, or verse
Implementor’s Note: No attempt has been made to retain the look or
visual form of the original poetry.
This unusual element, <verse-group> is used to convey poetry and is
recursive in nature (it may contain further <verse-group> elements).
Examples of these tags are sparse, so it remains difficult to ensure
full implementation. This method will attempt to handle the label,
title, and subtitle elements correctly, while converting <verse-lines>
to italicized lines.
"""
for verse_group in self.main.getroot().findall('.//verse-group'):
#Find some possible sub elements for the heading
label = verse_group.find('label')
title = verse_group.find('title')
subtitle = verse_group.find('subtitle')
#Modify the verse-group element
verse_group.tag = 'div'
verse_group.attrib['id'] = 'verse-group'
#Create a title for the verse_group
if label is not None or title is not None or subtitle is not None:
new_verse_title = etree.Element('b')
#Insert it at the beginning
verse_group.insert(0, new_verse_title)
#Induct the title elements into the new title
if label is not None:
append_all_below(new_verse_title, label)
remove(label)
if title is not None:
append_all_below(new_verse_title, title)
remove(title)
if subtitle is not None:
append_all_below(new_verse_title, subtitle)
remove(subtitle)
for verse_line in verse_group.findall('verse-line'):
verse_line.tag = 'p'
verse_line.attrib['class'] = 'verse-line' | python | def convert_verse_group_elements(self):
"""
A song, poem, or verse
Implementor’s Note: No attempt has been made to retain the look or
visual form of the original poetry.
This unusual element, <verse-group> is used to convey poetry and is
recursive in nature (it may contain further <verse-group> elements).
Examples of these tags are sparse, so it remains difficult to ensure
full implementation. This method will attempt to handle the label,
title, and subtitle elements correctly, while converting <verse-lines>
to italicized lines.
"""
for verse_group in self.main.getroot().findall('.//verse-group'):
#Find some possible sub elements for the heading
label = verse_group.find('label')
title = verse_group.find('title')
subtitle = verse_group.find('subtitle')
#Modify the verse-group element
verse_group.tag = 'div'
verse_group.attrib['id'] = 'verse-group'
#Create a title for the verse_group
if label is not None or title is not None or subtitle is not None:
new_verse_title = etree.Element('b')
#Insert it at the beginning
verse_group.insert(0, new_verse_title)
#Induct the title elements into the new title
if label is not None:
append_all_below(new_verse_title, label)
remove(label)
if title is not None:
append_all_below(new_verse_title, title)
remove(title)
if subtitle is not None:
append_all_below(new_verse_title, subtitle)
remove(subtitle)
for verse_line in verse_group.findall('verse-line'):
verse_line.tag = 'p'
verse_line.attrib['class'] = 'verse-line' | [
"def",
"convert_verse_group_elements",
"(",
"self",
")",
":",
"for",
"verse_group",
"in",
"self",
".",
"main",
".",
"getroot",
"(",
")",
".",
"findall",
"(",
"'.//verse-group'",
")",
":",
"#Find some possible sub elements for the heading",
"label",
"=",
"verse_group",
".",
"find",
"(",
"'label'",
")",
"title",
"=",
"verse_group",
".",
"find",
"(",
"'title'",
")",
"subtitle",
"=",
"verse_group",
".",
"find",
"(",
"'subtitle'",
")",
"#Modify the verse-group element",
"verse_group",
".",
"tag",
"=",
"'div'",
"verse_group",
".",
"attrib",
"[",
"'id'",
"]",
"=",
"'verse-group'",
"#Create a title for the verse_group",
"if",
"label",
"is",
"not",
"None",
"or",
"title",
"is",
"not",
"None",
"or",
"subtitle",
"is",
"not",
"None",
":",
"new_verse_title",
"=",
"etree",
".",
"Element",
"(",
"'b'",
")",
"#Insert it at the beginning",
"verse_group",
".",
"insert",
"(",
"0",
",",
"new_verse_title",
")",
"#Induct the title elements into the new title",
"if",
"label",
"is",
"not",
"None",
":",
"append_all_below",
"(",
"new_verse_title",
",",
"label",
")",
"remove",
"(",
"label",
")",
"if",
"title",
"is",
"not",
"None",
":",
"append_all_below",
"(",
"new_verse_title",
",",
"title",
")",
"remove",
"(",
"title",
")",
"if",
"subtitle",
"is",
"not",
"None",
":",
"append_all_below",
"(",
"new_verse_title",
",",
"subtitle",
")",
"remove",
"(",
"subtitle",
")",
"for",
"verse_line",
"in",
"verse_group",
".",
"findall",
"(",
"'verse-line'",
")",
":",
"verse_line",
".",
"tag",
"=",
"'p'",
"verse_line",
".",
"attrib",
"[",
"'class'",
"]",
"=",
"'verse-line'"
] | A song, poem, or verse
Implementor’s Note: No attempt has been made to retain the look or
visual form of the original poetry.
This unusual element, <verse-group> is used to convey poetry and is
recursive in nature (it may contain further <verse-group> elements).
Examples of these tags are sparse, so it remains difficult to ensure
full implementation. This method will attempt to handle the label,
title, and subtitle elements correctly, while converting <verse-lines>
to italicized lines. | [
"A",
"song",
"poem",
"or",
"verse"
] | train | https://github.com/SavinaRoja/OpenAccess_EPUB/blob/6b77ba30b7394fd003920e7a7957bca963a90656/src/openaccess_epub/publisher/plos.py#L1029-L1068 |
SavinaRoja/OpenAccess_EPUB | src/openaccess_epub/publisher/plos.py | PLoS.convert_fn_elements | def convert_fn_elements(self):
"""
<fn> elements may be used in the main text body outside of tables and
figures for purposes such as erratum notes. It appears that PLoS
practice is to not show erratum notes in the web or pdf formats after
the appropriate corrections have been made to the text. The erratum
notes are thus the only record that an error was made.
This method will attempt to display footnotes unless the note can be
identified as an Erratum, in which case it will be removed in
accordance with PLoS' apparent guidelines.
"""
for footnote in self.main.getroot().findall('.//fn'):
#Use only the first paragraph
paragraph = footnote.find('p')
#If no paragraph, move on
if paragraph is None:
remove(footnote)
continue
#Simply remove corrected errata items
paragraph_text = str(etree.tostring(paragraph, method='text', encoding='utf-8'), encoding='utf-8')
if paragraph_text.startswith('Erratum') and 'Corrected' in paragraph_text:
remove(footnote)
continue
#Transfer some attribute information from the fn element to the paragraph
if 'id' in footnote.attrib:
paragraph.attrib['id'] = footnote.attrib['id']
if 'fn-type' in footnote.attrib:
paragraph.attrib['class'] = 'fn-type-{0}'.footnote.attrib['fn-type']
else:
paragraph.attrib['class'] = 'fn'
#Replace the
replace(footnote, paragraph) | python | def convert_fn_elements(self):
"""
<fn> elements may be used in the main text body outside of tables and
figures for purposes such as erratum notes. It appears that PLoS
practice is to not show erratum notes in the web or pdf formats after
the appropriate corrections have been made to the text. The erratum
notes are thus the only record that an error was made.
This method will attempt to display footnotes unless the note can be
identified as an Erratum, in which case it will be removed in
accordance with PLoS' apparent guidelines.
"""
for footnote in self.main.getroot().findall('.//fn'):
#Use only the first paragraph
paragraph = footnote.find('p')
#If no paragraph, move on
if paragraph is None:
remove(footnote)
continue
#Simply remove corrected errata items
paragraph_text = str(etree.tostring(paragraph, method='text', encoding='utf-8'), encoding='utf-8')
if paragraph_text.startswith('Erratum') and 'Corrected' in paragraph_text:
remove(footnote)
continue
#Transfer some attribute information from the fn element to the paragraph
if 'id' in footnote.attrib:
paragraph.attrib['id'] = footnote.attrib['id']
if 'fn-type' in footnote.attrib:
paragraph.attrib['class'] = 'fn-type-{0}'.footnote.attrib['fn-type']
else:
paragraph.attrib['class'] = 'fn'
#Replace the
replace(footnote, paragraph) | [
"def",
"convert_fn_elements",
"(",
"self",
")",
":",
"for",
"footnote",
"in",
"self",
".",
"main",
".",
"getroot",
"(",
")",
".",
"findall",
"(",
"'.//fn'",
")",
":",
"#Use only the first paragraph",
"paragraph",
"=",
"footnote",
".",
"find",
"(",
"'p'",
")",
"#If no paragraph, move on",
"if",
"paragraph",
"is",
"None",
":",
"remove",
"(",
"footnote",
")",
"continue",
"#Simply remove corrected errata items",
"paragraph_text",
"=",
"str",
"(",
"etree",
".",
"tostring",
"(",
"paragraph",
",",
"method",
"=",
"'text'",
",",
"encoding",
"=",
"'utf-8'",
")",
",",
"encoding",
"=",
"'utf-8'",
")",
"if",
"paragraph_text",
".",
"startswith",
"(",
"'Erratum'",
")",
"and",
"'Corrected'",
"in",
"paragraph_text",
":",
"remove",
"(",
"footnote",
")",
"continue",
"#Transfer some attribute information from the fn element to the paragraph",
"if",
"'id'",
"in",
"footnote",
".",
"attrib",
":",
"paragraph",
".",
"attrib",
"[",
"'id'",
"]",
"=",
"footnote",
".",
"attrib",
"[",
"'id'",
"]",
"if",
"'fn-type'",
"in",
"footnote",
".",
"attrib",
":",
"paragraph",
".",
"attrib",
"[",
"'class'",
"]",
"=",
"'fn-type-{0}'",
".",
"footnote",
".",
"attrib",
"[",
"'fn-type'",
"]",
"else",
":",
"paragraph",
".",
"attrib",
"[",
"'class'",
"]",
"=",
"'fn'",
"#Replace the",
"replace",
"(",
"footnote",
",",
"paragraph",
")"
] | <fn> elements may be used in the main text body outside of tables and
figures for purposes such as erratum notes. It appears that PLoS
practice is to not show erratum notes in the web or pdf formats after
the appropriate corrections have been made to the text. The erratum
notes are thus the only record that an error was made.
This method will attempt to display footnotes unless the note can be
identified as an Erratum, in which case it will be removed in
accordance with PLoS' apparent guidelines. | [
"<fn",
">",
"elements",
"may",
"be",
"used",
"in",
"the",
"main",
"text",
"body",
"outside",
"of",
"tables",
"and",
"figures",
"for",
"purposes",
"such",
"as",
"erratum",
"notes",
".",
"It",
"appears",
"that",
"PLoS",
"practice",
"is",
"to",
"not",
"show",
"erratum",
"notes",
"in",
"the",
"web",
"or",
"pdf",
"formats",
"after",
"the",
"appropriate",
"corrections",
"have",
"been",
"made",
"to",
"the",
"text",
".",
"The",
"erratum",
"notes",
"are",
"thus",
"the",
"only",
"record",
"that",
"an",
"error",
"was",
"made",
"."
] | train | https://github.com/SavinaRoja/OpenAccess_EPUB/blob/6b77ba30b7394fd003920e7a7957bca963a90656/src/openaccess_epub/publisher/plos.py#L1072-L1104 |
SavinaRoja/OpenAccess_EPUB | src/openaccess_epub/publisher/plos.py | PLoS.convert_list_elements | def convert_list_elements(self):
"""
A sequence of two or more items, which may or may not be ordered.
The <list> element has an optional <label> element and optional <title>
element, followed by one or more <list-item> elements. This is element
is recursive as the <list-item> elements may contain further <list> or
<def-list> elements. Much of the potential complexity in dealing with
lists comes from this recursion.
"""
#I have yet to gather many examples of this element, and may have to
#write a recursive method for the processing of lists depending on how
#PLoS produces their XML, for now this method is ignorant of nesting
#TODO: prefix-words, one possible solution would be to have this method
#edit the CSS to provide formatting support for arbitrary prefixes...
#This is a block level element, so elevate it if found in p
for list_el in self.main.getroot().findall('.//list'):
if list_el.getparent().tag == 'p':
elevate_element(list_el)
#list_el is used instead of list (list is reserved)
for list_el in self.main.getroot().findall('.//list'):
if 'list-type' not in list_el.attrib:
list_el_type = 'order'
else:
list_el_type = list_el.attrib['list-type']
#Unordered lists
if list_el_type in ['', 'bullet', 'simple']:
list_el.tag = 'ul'
#CSS must be used to recognize the class and suppress bullets
if list_el_type == 'simple':
list_el.attrib['class'] = 'simple'
#Ordered lists
else:
list_el.tag = 'ol'
list_el.attrib['class'] = list_el_type
#Convert the list-item element tags to 'li'
for list_item in list_el.findall('list-item'):
list_item.tag = 'li'
remove_all_attributes(list_el, exclude=['id', 'class']) | python | def convert_list_elements(self):
"""
A sequence of two or more items, which may or may not be ordered.
The <list> element has an optional <label> element and optional <title>
element, followed by one or more <list-item> elements. This is element
is recursive as the <list-item> elements may contain further <list> or
<def-list> elements. Much of the potential complexity in dealing with
lists comes from this recursion.
"""
#I have yet to gather many examples of this element, and may have to
#write a recursive method for the processing of lists depending on how
#PLoS produces their XML, for now this method is ignorant of nesting
#TODO: prefix-words, one possible solution would be to have this method
#edit the CSS to provide formatting support for arbitrary prefixes...
#This is a block level element, so elevate it if found in p
for list_el in self.main.getroot().findall('.//list'):
if list_el.getparent().tag == 'p':
elevate_element(list_el)
#list_el is used instead of list (list is reserved)
for list_el in self.main.getroot().findall('.//list'):
if 'list-type' not in list_el.attrib:
list_el_type = 'order'
else:
list_el_type = list_el.attrib['list-type']
#Unordered lists
if list_el_type in ['', 'bullet', 'simple']:
list_el.tag = 'ul'
#CSS must be used to recognize the class and suppress bullets
if list_el_type == 'simple':
list_el.attrib['class'] = 'simple'
#Ordered lists
else:
list_el.tag = 'ol'
list_el.attrib['class'] = list_el_type
#Convert the list-item element tags to 'li'
for list_item in list_el.findall('list-item'):
list_item.tag = 'li'
remove_all_attributes(list_el, exclude=['id', 'class']) | [
"def",
"convert_list_elements",
"(",
"self",
")",
":",
"#I have yet to gather many examples of this element, and may have to",
"#write a recursive method for the processing of lists depending on how",
"#PLoS produces their XML, for now this method is ignorant of nesting",
"#TODO: prefix-words, one possible solution would be to have this method",
"#edit the CSS to provide formatting support for arbitrary prefixes...",
"#This is a block level element, so elevate it if found in p",
"for",
"list_el",
"in",
"self",
".",
"main",
".",
"getroot",
"(",
")",
".",
"findall",
"(",
"'.//list'",
")",
":",
"if",
"list_el",
".",
"getparent",
"(",
")",
".",
"tag",
"==",
"'p'",
":",
"elevate_element",
"(",
"list_el",
")",
"#list_el is used instead of list (list is reserved)",
"for",
"list_el",
"in",
"self",
".",
"main",
".",
"getroot",
"(",
")",
".",
"findall",
"(",
"'.//list'",
")",
":",
"if",
"'list-type'",
"not",
"in",
"list_el",
".",
"attrib",
":",
"list_el_type",
"=",
"'order'",
"else",
":",
"list_el_type",
"=",
"list_el",
".",
"attrib",
"[",
"'list-type'",
"]",
"#Unordered lists",
"if",
"list_el_type",
"in",
"[",
"''",
",",
"'bullet'",
",",
"'simple'",
"]",
":",
"list_el",
".",
"tag",
"=",
"'ul'",
"#CSS must be used to recognize the class and suppress bullets",
"if",
"list_el_type",
"==",
"'simple'",
":",
"list_el",
".",
"attrib",
"[",
"'class'",
"]",
"=",
"'simple'",
"#Ordered lists",
"else",
":",
"list_el",
".",
"tag",
"=",
"'ol'",
"list_el",
".",
"attrib",
"[",
"'class'",
"]",
"=",
"list_el_type",
"#Convert the list-item element tags to 'li'",
"for",
"list_item",
"in",
"list_el",
".",
"findall",
"(",
"'list-item'",
")",
":",
"list_item",
".",
"tag",
"=",
"'li'",
"remove_all_attributes",
"(",
"list_el",
",",
"exclude",
"=",
"[",
"'id'",
",",
"'class'",
"]",
")"
] | A sequence of two or more items, which may or may not be ordered.
The <list> element has an optional <label> element and optional <title>
element, followed by one or more <list-item> elements. This is element
is recursive as the <list-item> elements may contain further <list> or
<def-list> elements. Much of the potential complexity in dealing with
lists comes from this recursion. | [
"A",
"sequence",
"of",
"two",
"or",
"more",
"items",
"which",
"may",
"or",
"may",
"not",
"be",
"ordered",
"."
] | train | https://github.com/SavinaRoja/OpenAccess_EPUB/blob/6b77ba30b7394fd003920e7a7957bca963a90656/src/openaccess_epub/publisher/plos.py#L1108-L1149 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.