repository_name
stringlengths
7
55
func_path_in_repository
stringlengths
4
223
func_name
stringlengths
1
134
whole_func_string
stringlengths
75
104k
language
stringclasses
1 value
func_code_string
stringlengths
75
104k
func_code_tokens
sequencelengths
19
28.4k
func_documentation_string
stringlengths
1
46.9k
func_documentation_tokens
sequencelengths
1
1.97k
split_name
stringclasses
1 value
func_code_url
stringlengths
87
315
canonical-ols/acceptable
acceptable/_service.py
AcceptableAPI.changelog
def changelog(self, api_version, doc): """Add a changelog entry for this api.""" doc = textwrap.dedent(doc).strip() self._changelog[api_version] = doc self._changelog_locations[api_version] = get_callsite_location()
python
def changelog(self, api_version, doc): """Add a changelog entry for this api.""" doc = textwrap.dedent(doc).strip() self._changelog[api_version] = doc self._changelog_locations[api_version] = get_callsite_location()
[ "def", "changelog", "(", "self", ",", "api_version", ",", "doc", ")", ":", "doc", "=", "textwrap", ".", "dedent", "(", "doc", ")", ".", "strip", "(", ")", "self", ".", "_changelog", "[", "api_version", "]", "=", "doc", "self", ".", "_changelog_locations", "[", "api_version", "]", "=", "get_callsite_location", "(", ")" ]
Add a changelog entry for this api.
[ "Add", "a", "changelog", "entry", "for", "this", "api", "." ]
train
https://github.com/canonical-ols/acceptable/blob/6ccbe969078166a5315d857da38b59b43b29fadc/acceptable/_service.py#L375-L379
elifesciences/elife-tools
elifetools/parseJATS.py
title_prefix
def title_prefix(soup): "titlePrefix for article JSON is only articles with certain display_channel values" prefix = None display_channel_match_list = ['feature article', 'insight', 'editorial'] for d_channel in display_channel(soup): if d_channel.lower() in display_channel_match_list: if raw_parser.sub_display_channel(soup): prefix = node_text(first(raw_parser.sub_display_channel(soup))) return prefix
python
def title_prefix(soup): "titlePrefix for article JSON is only articles with certain display_channel values" prefix = None display_channel_match_list = ['feature article', 'insight', 'editorial'] for d_channel in display_channel(soup): if d_channel.lower() in display_channel_match_list: if raw_parser.sub_display_channel(soup): prefix = node_text(first(raw_parser.sub_display_channel(soup))) return prefix
[ "def", "title_prefix", "(", "soup", ")", ":", "prefix", "=", "None", "display_channel_match_list", "=", "[", "'feature article'", ",", "'insight'", ",", "'editorial'", "]", "for", "d_channel", "in", "display_channel", "(", "soup", ")", ":", "if", "d_channel", ".", "lower", "(", ")", "in", "display_channel_match_list", ":", "if", "raw_parser", ".", "sub_display_channel", "(", "soup", ")", ":", "prefix", "=", "node_text", "(", "first", "(", "raw_parser", ".", "sub_display_channel", "(", "soup", ")", ")", ")", "return", "prefix" ]
titlePrefix for article JSON is only articles with certain display_channel values
[ "titlePrefix", "for", "article", "JSON", "is", "only", "articles", "with", "certain", "display_channel", "values" ]
train
https://github.com/elifesciences/elife-tools/blob/4b9e38cbe485c61a4ed7cbd8970c6b318334fd86/elifetools/parseJATS.py#L37-L45
elifesciences/elife-tools
elifetools/parseJATS.py
title_prefix_json
def title_prefix_json(soup): "titlePrefix with capitalisation changed" prefix = title_prefix(soup) prefix_rewritten = elifetools.json_rewrite.rewrite_json("title_prefix_json", soup, prefix) return prefix_rewritten
python
def title_prefix_json(soup): "titlePrefix with capitalisation changed" prefix = title_prefix(soup) prefix_rewritten = elifetools.json_rewrite.rewrite_json("title_prefix_json", soup, prefix) return prefix_rewritten
[ "def", "title_prefix_json", "(", "soup", ")", ":", "prefix", "=", "title_prefix", "(", "soup", ")", "prefix_rewritten", "=", "elifetools", ".", "json_rewrite", ".", "rewrite_json", "(", "\"title_prefix_json\"", ",", "soup", ",", "prefix", ")", "return", "prefix_rewritten" ]
titlePrefix with capitalisation changed
[ "titlePrefix", "with", "capitalisation", "changed" ]
train
https://github.com/elifesciences/elife-tools/blob/4b9e38cbe485c61a4ed7cbd8970c6b318334fd86/elifetools/parseJATS.py#L47-L51
elifesciences/elife-tools
elifetools/parseJATS.py
research_organism
def research_organism(soup): "Find the research-organism from the set of kwd-group tags" if not raw_parser.research_organism_keywords(soup): return [] return list(map(node_text, raw_parser.research_organism_keywords(soup)))
python
def research_organism(soup): "Find the research-organism from the set of kwd-group tags" if not raw_parser.research_organism_keywords(soup): return [] return list(map(node_text, raw_parser.research_organism_keywords(soup)))
[ "def", "research_organism", "(", "soup", ")", ":", "if", "not", "raw_parser", ".", "research_organism_keywords", "(", "soup", ")", ":", "return", "[", "]", "return", "list", "(", "map", "(", "node_text", ",", "raw_parser", ".", "research_organism_keywords", "(", "soup", ")", ")", ")" ]
Find the research-organism from the set of kwd-group tags
[ "Find", "the", "research", "-", "organism", "from", "the", "set", "of", "kwd", "-", "group", "tags" ]
train
https://github.com/elifesciences/elife-tools/blob/4b9e38cbe485c61a4ed7cbd8970c6b318334fd86/elifetools/parseJATS.py#L86-L90
elifesciences/elife-tools
elifetools/parseJATS.py
full_research_organism
def full_research_organism(soup): "research-organism list including inline tags, such as italic" if not raw_parser.research_organism_keywords(soup): return [] return list(map(node_contents_str, raw_parser.research_organism_keywords(soup)))
python
def full_research_organism(soup): "research-organism list including inline tags, such as italic" if not raw_parser.research_organism_keywords(soup): return [] return list(map(node_contents_str, raw_parser.research_organism_keywords(soup)))
[ "def", "full_research_organism", "(", "soup", ")", ":", "if", "not", "raw_parser", ".", "research_organism_keywords", "(", "soup", ")", ":", "return", "[", "]", "return", "list", "(", "map", "(", "node_contents_str", ",", "raw_parser", ".", "research_organism_keywords", "(", "soup", ")", ")", ")" ]
research-organism list including inline tags, such as italic
[ "research", "-", "organism", "list", "including", "inline", "tags", "such", "as", "italic" ]
train
https://github.com/elifesciences/elife-tools/blob/4b9e38cbe485c61a4ed7cbd8970c6b318334fd86/elifetools/parseJATS.py#L92-L96
elifesciences/elife-tools
elifetools/parseJATS.py
keywords
def keywords(soup): """ Find the keywords from the set of kwd-group tags which are typically labelled as the author keywords """ if not raw_parser.author_keywords(soup): return [] return list(map(node_text, raw_parser.author_keywords(soup)))
python
def keywords(soup): """ Find the keywords from the set of kwd-group tags which are typically labelled as the author keywords """ if not raw_parser.author_keywords(soup): return [] return list(map(node_text, raw_parser.author_keywords(soup)))
[ "def", "keywords", "(", "soup", ")", ":", "if", "not", "raw_parser", ".", "author_keywords", "(", "soup", ")", ":", "return", "[", "]", "return", "list", "(", "map", "(", "node_text", ",", "raw_parser", ".", "author_keywords", "(", "soup", ")", ")", ")" ]
Find the keywords from the set of kwd-group tags which are typically labelled as the author keywords
[ "Find", "the", "keywords", "from", "the", "set", "of", "kwd", "-", "group", "tags", "which", "are", "typically", "labelled", "as", "the", "author", "keywords" ]
train
https://github.com/elifesciences/elife-tools/blob/4b9e38cbe485c61a4ed7cbd8970c6b318334fd86/elifetools/parseJATS.py#L98-L105
elifesciences/elife-tools
elifetools/parseJATS.py
full_keywords
def full_keywords(soup): "author keywords list including inline tags, such as italic" if not raw_parser.author_keywords(soup): return [] return list(map(node_contents_str, raw_parser.author_keywords(soup)))
python
def full_keywords(soup): "author keywords list including inline tags, such as italic" if not raw_parser.author_keywords(soup): return [] return list(map(node_contents_str, raw_parser.author_keywords(soup)))
[ "def", "full_keywords", "(", "soup", ")", ":", "if", "not", "raw_parser", ".", "author_keywords", "(", "soup", ")", ":", "return", "[", "]", "return", "list", "(", "map", "(", "node_contents_str", ",", "raw_parser", ".", "author_keywords", "(", "soup", ")", ")", ")" ]
author keywords list including inline tags, such as italic
[ "author", "keywords", "list", "including", "inline", "tags", "such", "as", "italic" ]
train
https://github.com/elifesciences/elife-tools/blob/4b9e38cbe485c61a4ed7cbd8970c6b318334fd86/elifetools/parseJATS.py#L107-L111
elifesciences/elife-tools
elifetools/parseJATS.py
version_history
def version_history(soup, html_flag=True): "extract the article version history details" convert = lambda xml_string: xml_to_html(html_flag, xml_string) version_history = [] related_object_tags = raw_parser.related_object(raw_parser.article_meta(soup)) for tag in related_object_tags: article_version = OrderedDict() date_tag = first(raw_parser.date(tag)) if date_tag: copy_attribute(date_tag.attrs, 'date-type', article_version, 'version') (day, month, year) = ymd(date_tag) article_version['day'] = day article_version['month'] = month article_version['year'] = year article_version['date'] = date_struct_nn(year, month, day) copy_attribute(tag.attrs, 'xlink:href', article_version, 'xlink_href') set_if_value(article_version, "comment", convert(node_contents_str(first(raw_parser.comment(tag))))) version_history.append(article_version) return version_history
python
def version_history(soup, html_flag=True): "extract the article version history details" convert = lambda xml_string: xml_to_html(html_flag, xml_string) version_history = [] related_object_tags = raw_parser.related_object(raw_parser.article_meta(soup)) for tag in related_object_tags: article_version = OrderedDict() date_tag = first(raw_parser.date(tag)) if date_tag: copy_attribute(date_tag.attrs, 'date-type', article_version, 'version') (day, month, year) = ymd(date_tag) article_version['day'] = day article_version['month'] = month article_version['year'] = year article_version['date'] = date_struct_nn(year, month, day) copy_attribute(tag.attrs, 'xlink:href', article_version, 'xlink_href') set_if_value(article_version, "comment", convert(node_contents_str(first(raw_parser.comment(tag))))) version_history.append(article_version) return version_history
[ "def", "version_history", "(", "soup", ",", "html_flag", "=", "True", ")", ":", "convert", "=", "lambda", "xml_string", ":", "xml_to_html", "(", "html_flag", ",", "xml_string", ")", "version_history", "=", "[", "]", "related_object_tags", "=", "raw_parser", ".", "related_object", "(", "raw_parser", ".", "article_meta", "(", "soup", ")", ")", "for", "tag", "in", "related_object_tags", ":", "article_version", "=", "OrderedDict", "(", ")", "date_tag", "=", "first", "(", "raw_parser", ".", "date", "(", "tag", ")", ")", "if", "date_tag", ":", "copy_attribute", "(", "date_tag", ".", "attrs", ",", "'date-type'", ",", "article_version", ",", "'version'", ")", "(", "day", ",", "month", ",", "year", ")", "=", "ymd", "(", "date_tag", ")", "article_version", "[", "'day'", "]", "=", "day", "article_version", "[", "'month'", "]", "=", "month", "article_version", "[", "'year'", "]", "=", "year", "article_version", "[", "'date'", "]", "=", "date_struct_nn", "(", "year", ",", "month", ",", "day", ")", "copy_attribute", "(", "tag", ".", "attrs", ",", "'xlink:href'", ",", "article_version", ",", "'xlink_href'", ")", "set_if_value", "(", "article_version", ",", "\"comment\"", ",", "convert", "(", "node_contents_str", "(", "first", "(", "raw_parser", ".", "comment", "(", "tag", ")", ")", ")", ")", ")", "version_history", ".", "append", "(", "article_version", ")", "return", "version_history" ]
extract the article version history details
[ "extract", "the", "article", "version", "history", "details" ]
train
https://github.com/elifesciences/elife-tools/blob/4b9e38cbe485c61a4ed7cbd8970c6b318334fd86/elifetools/parseJATS.py#L131-L150
elifesciences/elife-tools
elifetools/parseJATS.py
article_id_list
def article_id_list(soup): """return a list of article-id data""" id_list = [] for article_id_tag in raw_parser.article_id(soup): id_details = OrderedDict() set_if_value(id_details, "type", article_id_tag.get("pub-id-type")) set_if_value(id_details, "value", article_id_tag.text) set_if_value(id_details, "assigning-authority", article_id_tag.get("assigning-authority")) id_list.append(id_details) return id_list
python
def article_id_list(soup): """return a list of article-id data""" id_list = [] for article_id_tag in raw_parser.article_id(soup): id_details = OrderedDict() set_if_value(id_details, "type", article_id_tag.get("pub-id-type")) set_if_value(id_details, "value", article_id_tag.text) set_if_value(id_details, "assigning-authority", article_id_tag.get("assigning-authority")) id_list.append(id_details) return id_list
[ "def", "article_id_list", "(", "soup", ")", ":", "id_list", "=", "[", "]", "for", "article_id_tag", "in", "raw_parser", ".", "article_id", "(", "soup", ")", ":", "id_details", "=", "OrderedDict", "(", ")", "set_if_value", "(", "id_details", ",", "\"type\"", ",", "article_id_tag", ".", "get", "(", "\"pub-id-type\"", ")", ")", "set_if_value", "(", "id_details", ",", "\"value\"", ",", "article_id_tag", ".", "text", ")", "set_if_value", "(", "id_details", ",", "\"assigning-authority\"", ",", "article_id_tag", ".", "get", "(", "\"assigning-authority\"", ")", ")", "id_list", ".", "append", "(", "id_details", ")", "return", "id_list" ]
return a list of article-id data
[ "return", "a", "list", "of", "article", "-", "id", "data" ]
train
https://github.com/elifesciences/elife-tools/blob/4b9e38cbe485c61a4ed7cbd8970c6b318334fd86/elifetools/parseJATS.py#L163-L172
elifesciences/elife-tools
elifetools/parseJATS.py
copyright_holder_json
def copyright_holder_json(soup): "for json output add a full stop if ends in et al" holder = None permissions_tag = raw_parser.article_permissions(soup) if permissions_tag: holder = node_text(raw_parser.copyright_holder(permissions_tag)) if holder is not None and holder.endswith('et al'): holder = holder + '.' return holder
python
def copyright_holder_json(soup): "for json output add a full stop if ends in et al" holder = None permissions_tag = raw_parser.article_permissions(soup) if permissions_tag: holder = node_text(raw_parser.copyright_holder(permissions_tag)) if holder is not None and holder.endswith('et al'): holder = holder + '.' return holder
[ "def", "copyright_holder_json", "(", "soup", ")", ":", "holder", "=", "None", "permissions_tag", "=", "raw_parser", ".", "article_permissions", "(", "soup", ")", "if", "permissions_tag", ":", "holder", "=", "node_text", "(", "raw_parser", ".", "copyright_holder", "(", "permissions_tag", ")", ")", "if", "holder", "is", "not", "None", "and", "holder", ".", "endswith", "(", "'et al'", ")", ":", "holder", "=", "holder", "+", "'.'", "return", "holder" ]
for json output add a full stop if ends in et al
[ "for", "json", "output", "add", "a", "full", "stop", "if", "ends", "in", "et", "al" ]
train
https://github.com/elifesciences/elife-tools/blob/4b9e38cbe485c61a4ed7cbd8970c6b318334fd86/elifetools/parseJATS.py#L241-L249
elifesciences/elife-tools
elifetools/parseJATS.py
subject_area
def subject_area(soup): """ Find the subject areas from article-categories subject tags """ subject_area = [] tags = raw_parser.subject_area(soup) for tag in tags: subject_area.append(node_text(tag)) return subject_area
python
def subject_area(soup): """ Find the subject areas from article-categories subject tags """ subject_area = [] tags = raw_parser.subject_area(soup) for tag in tags: subject_area.append(node_text(tag)) return subject_area
[ "def", "subject_area", "(", "soup", ")", ":", "subject_area", "=", "[", "]", "tags", "=", "raw_parser", ".", "subject_area", "(", "soup", ")", "for", "tag", "in", "tags", ":", "subject_area", ".", "append", "(", "node_text", "(", "tag", ")", ")", "return", "subject_area" ]
Find the subject areas from article-categories subject tags
[ "Find", "the", "subject", "areas", "from", "article", "-", "categories", "subject", "tags" ]
train
https://github.com/elifesciences/elife-tools/blob/4b9e38cbe485c61a4ed7cbd8970c6b318334fd86/elifetools/parseJATS.py#L298-L308
elifesciences/elife-tools
elifetools/parseJATS.py
display_channel
def display_channel(soup): """ Find the subject areas of type display-channel """ display_channel = [] tags = raw_parser.display_channel(soup) for tag in tags: display_channel.append(node_text(tag)) return display_channel
python
def display_channel(soup): """ Find the subject areas of type display-channel """ display_channel = [] tags = raw_parser.display_channel(soup) for tag in tags: display_channel.append(node_text(tag)) return display_channel
[ "def", "display_channel", "(", "soup", ")", ":", "display_channel", "=", "[", "]", "tags", "=", "raw_parser", ".", "display_channel", "(", "soup", ")", "for", "tag", "in", "tags", ":", "display_channel", ".", "append", "(", "node_text", "(", "tag", ")", ")", "return", "display_channel" ]
Find the subject areas of type display-channel
[ "Find", "the", "subject", "areas", "of", "type", "display", "-", "channel" ]
train
https://github.com/elifesciences/elife-tools/blob/4b9e38cbe485c61a4ed7cbd8970c6b318334fd86/elifetools/parseJATS.py#L321-L331
elifesciences/elife-tools
elifetools/parseJATS.py
category
def category(soup): """ Find the category from subject areas """ category = [] tags = raw_parser.category(soup) for tag in tags: category.append(node_text(tag)) return category
python
def category(soup): """ Find the category from subject areas """ category = [] tags = raw_parser.category(soup) for tag in tags: category.append(node_text(tag)) return category
[ "def", "category", "(", "soup", ")", ":", "category", "=", "[", "]", "tags", "=", "raw_parser", ".", "category", "(", "soup", ")", "for", "tag", "in", "tags", ":", "category", ".", "append", "(", "node_text", "(", "tag", ")", ")", "return", "category" ]
Find the category from subject areas
[ "Find", "the", "category", "from", "subject", "areas" ]
train
https://github.com/elifesciences/elife-tools/blob/4b9e38cbe485c61a4ed7cbd8970c6b318334fd86/elifetools/parseJATS.py#L333-L343
elifesciences/elife-tools
elifetools/parseJATS.py
ymd
def ymd(soup): """ Get the year, month and day from child tags """ day = node_text(raw_parser.day(soup)) month = node_text(raw_parser.month(soup)) year = node_text(raw_parser.year(soup)) return (day, month, year)
python
def ymd(soup): """ Get the year, month and day from child tags """ day = node_text(raw_parser.day(soup)) month = node_text(raw_parser.month(soup)) year = node_text(raw_parser.year(soup)) return (day, month, year)
[ "def", "ymd", "(", "soup", ")", ":", "day", "=", "node_text", "(", "raw_parser", ".", "day", "(", "soup", ")", ")", "month", "=", "node_text", "(", "raw_parser", ".", "month", "(", "soup", ")", ")", "year", "=", "node_text", "(", "raw_parser", ".", "year", "(", "soup", ")", ")", "return", "(", "day", ",", "month", ",", "year", ")" ]
Get the year, month and day from child tags
[ "Get", "the", "year", "month", "and", "day", "from", "child", "tags" ]
train
https://github.com/elifesciences/elife-tools/blob/4b9e38cbe485c61a4ed7cbd8970c6b318334fd86/elifetools/parseJATS.py#L345-L352
elifesciences/elife-tools
elifetools/parseJATS.py
pub_date
def pub_date(soup): """ Return the publishing date in struct format pub_date_date, pub_date_day, pub_date_month, pub_date_year, pub_date_timestamp Default date_type is pub """ pub_date = first(raw_parser.pub_date(soup, date_type="pub")) if pub_date is None: pub_date = first(raw_parser.pub_date(soup, date_type="publication")) if pub_date is None: return None (day, month, year) = ymd(pub_date) return date_struct(year, month, day)
python
def pub_date(soup): """ Return the publishing date in struct format pub_date_date, pub_date_day, pub_date_month, pub_date_year, pub_date_timestamp Default date_type is pub """ pub_date = first(raw_parser.pub_date(soup, date_type="pub")) if pub_date is None: pub_date = first(raw_parser.pub_date(soup, date_type="publication")) if pub_date is None: return None (day, month, year) = ymd(pub_date) return date_struct(year, month, day)
[ "def", "pub_date", "(", "soup", ")", ":", "pub_date", "=", "first", "(", "raw_parser", ".", "pub_date", "(", "soup", ",", "date_type", "=", "\"pub\"", ")", ")", "if", "pub_date", "is", "None", ":", "pub_date", "=", "first", "(", "raw_parser", ".", "pub_date", "(", "soup", ",", "date_type", "=", "\"publication\"", ")", ")", "if", "pub_date", "is", "None", ":", "return", "None", "(", "day", ",", "month", ",", "year", ")", "=", "ymd", "(", "pub_date", ")", "return", "date_struct", "(", "year", ",", "month", ",", "day", ")" ]
Return the publishing date in struct format pub_date_date, pub_date_day, pub_date_month, pub_date_year, pub_date_timestamp Default date_type is pub
[ "Return", "the", "publishing", "date", "in", "struct", "format", "pub_date_date", "pub_date_day", "pub_date_month", "pub_date_year", "pub_date_timestamp", "Default", "date_type", "is", "pub" ]
train
https://github.com/elifesciences/elife-tools/blob/4b9e38cbe485c61a4ed7cbd8970c6b318334fd86/elifetools/parseJATS.py#L354-L366
elifesciences/elife-tools
elifetools/parseJATS.py
pub_dates
def pub_dates(soup): """ return a list of all the pub dates """ pub_dates = [] tags = raw_parser.pub_date(soup) for tag in tags: pub_date = OrderedDict() copy_attribute(tag.attrs, 'publication-format', pub_date) copy_attribute(tag.attrs, 'date-type', pub_date) copy_attribute(tag.attrs, 'pub-type', pub_date) for tag_attr in ["date-type", "pub-type"]: if tag_attr in tag.attrs: (day, month, year) = ymd(tag) pub_date['day'] = day pub_date['month'] = month pub_date['year'] = year pub_date['date'] = date_struct_nn(year, month, day) pub_dates.append(pub_date) return pub_dates
python
def pub_dates(soup): """ return a list of all the pub dates """ pub_dates = [] tags = raw_parser.pub_date(soup) for tag in tags: pub_date = OrderedDict() copy_attribute(tag.attrs, 'publication-format', pub_date) copy_attribute(tag.attrs, 'date-type', pub_date) copy_attribute(tag.attrs, 'pub-type', pub_date) for tag_attr in ["date-type", "pub-type"]: if tag_attr in tag.attrs: (day, month, year) = ymd(tag) pub_date['day'] = day pub_date['month'] = month pub_date['year'] = year pub_date['date'] = date_struct_nn(year, month, day) pub_dates.append(pub_date) return pub_dates
[ "def", "pub_dates", "(", "soup", ")", ":", "pub_dates", "=", "[", "]", "tags", "=", "raw_parser", ".", "pub_date", "(", "soup", ")", "for", "tag", "in", "tags", ":", "pub_date", "=", "OrderedDict", "(", ")", "copy_attribute", "(", "tag", ".", "attrs", ",", "'publication-format'", ",", "pub_date", ")", "copy_attribute", "(", "tag", ".", "attrs", ",", "'date-type'", ",", "pub_date", ")", "copy_attribute", "(", "tag", ".", "attrs", ",", "'pub-type'", ",", "pub_date", ")", "for", "tag_attr", "in", "[", "\"date-type\"", ",", "\"pub-type\"", "]", ":", "if", "tag_attr", "in", "tag", ".", "attrs", ":", "(", "day", ",", "month", ",", "year", ")", "=", "ymd", "(", "tag", ")", "pub_date", "[", "'day'", "]", "=", "day", "pub_date", "[", "'month'", "]", "=", "month", "pub_date", "[", "'year'", "]", "=", "year", "pub_date", "[", "'date'", "]", "=", "date_struct_nn", "(", "year", ",", "month", ",", "day", ")", "pub_dates", ".", "append", "(", "pub_date", ")", "return", "pub_dates" ]
return a list of all the pub dates
[ "return", "a", "list", "of", "all", "the", "pub", "dates" ]
train
https://github.com/elifesciences/elife-tools/blob/4b9e38cbe485c61a4ed7cbd8970c6b318334fd86/elifetools/parseJATS.py#L368-L387
elifesciences/elife-tools
elifetools/parseJATS.py
history_date
def history_date(soup, date_type = None): """ Find a date in the history tag for the specific date_type typical date_type values: received, accepted """ if(date_type == None): return None history_date = raw_parser.history_date(soup, date_type) if history_date is None: return None (day, month, year) = ymd(history_date) return date_struct(year, month, day)
python
def history_date(soup, date_type = None): """ Find a date in the history tag for the specific date_type typical date_type values: received, accepted """ if(date_type == None): return None history_date = raw_parser.history_date(soup, date_type) if history_date is None: return None (day, month, year) = ymd(history_date) return date_struct(year, month, day)
[ "def", "history_date", "(", "soup", ",", "date_type", "=", "None", ")", ":", "if", "(", "date_type", "==", "None", ")", ":", "return", "None", "history_date", "=", "raw_parser", ".", "history_date", "(", "soup", ",", "date_type", ")", "if", "history_date", "is", "None", ":", "return", "None", "(", "day", ",", "month", ",", "year", ")", "=", "ymd", "(", "history_date", ")", "return", "date_struct", "(", "year", ",", "month", ",", "day", ")" ]
Find a date in the history tag for the specific date_type typical date_type values: received, accepted
[ "Find", "a", "date", "in", "the", "history", "tag", "for", "the", "specific", "date_type", "typical", "date_type", "values", ":", "received", "accepted" ]
train
https://github.com/elifesciences/elife-tools/blob/4b9e38cbe485c61a4ed7cbd8970c6b318334fd86/elifetools/parseJATS.py#L389-L401
elifesciences/elife-tools
elifetools/parseJATS.py
collection_year
def collection_year(soup): """ Pub date of type collection will hold a year element for VOR articles """ pub_date = first(raw_parser.pub_date(soup, pub_type="collection")) if not pub_date: pub_date = first(raw_parser.pub_date(soup, date_type="collection")) if not pub_date: return None year = None year_tag = raw_parser.year(pub_date) if year_tag: year = int(node_text(year_tag)) return year
python
def collection_year(soup): """ Pub date of type collection will hold a year element for VOR articles """ pub_date = first(raw_parser.pub_date(soup, pub_type="collection")) if not pub_date: pub_date = first(raw_parser.pub_date(soup, date_type="collection")) if not pub_date: return None year = None year_tag = raw_parser.year(pub_date) if year_tag: year = int(node_text(year_tag)) return year
[ "def", "collection_year", "(", "soup", ")", ":", "pub_date", "=", "first", "(", "raw_parser", ".", "pub_date", "(", "soup", ",", "pub_type", "=", "\"collection\"", ")", ")", "if", "not", "pub_date", ":", "pub_date", "=", "first", "(", "raw_parser", ".", "pub_date", "(", "soup", ",", "date_type", "=", "\"collection\"", ")", ")", "if", "not", "pub_date", ":", "return", "None", "year", "=", "None", "year_tag", "=", "raw_parser", ".", "year", "(", "pub_date", ")", "if", "year_tag", ":", "year", "=", "int", "(", "node_text", "(", "year_tag", ")", ")", "return", "year" ]
Pub date of type collection will hold a year element for VOR articles
[ "Pub", "date", "of", "type", "collection", "will", "hold", "a", "year", "element", "for", "VOR", "articles" ]
train
https://github.com/elifesciences/elife-tools/blob/4b9e38cbe485c61a4ed7cbd8970c6b318334fd86/elifetools/parseJATS.py#L493-L508
elifesciences/elife-tools
elifetools/parseJATS.py
abstracts
def abstracts(soup): """ Find the article abstract and format it """ abstracts = [] abstract_tags = raw_parser.abstract(soup) for tag in abstract_tags: abstract = {} abstract["abstract_type"] = tag.get("abstract-type") title_tag = raw_parser.title(tag) if title_tag: abstract["title"] = node_text(title_tag) abstract["content"] = None if raw_parser.paragraph(tag): abstract["content"] = "" abstract["full_content"] = "" good_paragraphs = remove_doi_paragraph(raw_parser.paragraph(tag)) # Plain text content glue = "" for p_tag in good_paragraphs: abstract["content"] += glue + node_text(p_tag) glue = " " # Content including markup tags # When more than one paragraph, wrap each in a <p> tag for p_tag in good_paragraphs: abstract["full_content"] += '<p>' + node_contents_str(p_tag) + '</p>' abstracts.append(abstract) return abstracts
python
def abstracts(soup): """ Find the article abstract and format it """ abstracts = [] abstract_tags = raw_parser.abstract(soup) for tag in abstract_tags: abstract = {} abstract["abstract_type"] = tag.get("abstract-type") title_tag = raw_parser.title(tag) if title_tag: abstract["title"] = node_text(title_tag) abstract["content"] = None if raw_parser.paragraph(tag): abstract["content"] = "" abstract["full_content"] = "" good_paragraphs = remove_doi_paragraph(raw_parser.paragraph(tag)) # Plain text content glue = "" for p_tag in good_paragraphs: abstract["content"] += glue + node_text(p_tag) glue = " " # Content including markup tags # When more than one paragraph, wrap each in a <p> tag for p_tag in good_paragraphs: abstract["full_content"] += '<p>' + node_contents_str(p_tag) + '</p>' abstracts.append(abstract) return abstracts
[ "def", "abstracts", "(", "soup", ")", ":", "abstracts", "=", "[", "]", "abstract_tags", "=", "raw_parser", ".", "abstract", "(", "soup", ")", "for", "tag", "in", "abstract_tags", ":", "abstract", "=", "{", "}", "abstract", "[", "\"abstract_type\"", "]", "=", "tag", ".", "get", "(", "\"abstract-type\"", ")", "title_tag", "=", "raw_parser", ".", "title", "(", "tag", ")", "if", "title_tag", ":", "abstract", "[", "\"title\"", "]", "=", "node_text", "(", "title_tag", ")", "abstract", "[", "\"content\"", "]", "=", "None", "if", "raw_parser", ".", "paragraph", "(", "tag", ")", ":", "abstract", "[", "\"content\"", "]", "=", "\"\"", "abstract", "[", "\"full_content\"", "]", "=", "\"\"", "good_paragraphs", "=", "remove_doi_paragraph", "(", "raw_parser", ".", "paragraph", "(", "tag", ")", ")", "# Plain text content", "glue", "=", "\"\"", "for", "p_tag", "in", "good_paragraphs", ":", "abstract", "[", "\"content\"", "]", "+=", "glue", "+", "node_text", "(", "p_tag", ")", "glue", "=", "\" \"", "# Content including markup tags", "# When more than one paragraph, wrap each in a <p> tag", "for", "p_tag", "in", "good_paragraphs", ":", "abstract", "[", "\"full_content\"", "]", "+=", "'<p>'", "+", "node_contents_str", "(", "p_tag", ")", "+", "'</p>'", "abstracts", ".", "append", "(", "abstract", ")", "return", "abstracts" ]
Find the article abstract and format it
[ "Find", "the", "article", "abstract", "and", "format", "it" ]
train
https://github.com/elifesciences/elife-tools/blob/4b9e38cbe485c61a4ed7cbd8970c6b318334fd86/elifetools/parseJATS.py#L520-L557
elifesciences/elife-tools
elifetools/parseJATS.py
component_doi
def component_doi(soup): """ Look for all object-id of pub-type-id = doi, these are the component DOI tags """ component_doi = [] object_id_tags = raw_parser.object_id(soup, pub_id_type = "doi") # Get components too for later component_list = components(soup) position = 1 for tag in object_id_tags: component_object = {} component_object["doi"] = doi_uri_to_doi(tag.text) component_object["position"] = position # Try to find the type of component for component in component_list: if "doi" in component and component["doi"] == component_object["doi"]: component_object["type"] = component["type"] component_doi.append(component_object) position = position + 1 return component_doi
python
def component_doi(soup): """ Look for all object-id of pub-type-id = doi, these are the component DOI tags """ component_doi = [] object_id_tags = raw_parser.object_id(soup, pub_id_type = "doi") # Get components too for later component_list = components(soup) position = 1 for tag in object_id_tags: component_object = {} component_object["doi"] = doi_uri_to_doi(tag.text) component_object["position"] = position # Try to find the type of component for component in component_list: if "doi" in component and component["doi"] == component_object["doi"]: component_object["type"] = component["type"] component_doi.append(component_object) position = position + 1 return component_doi
[ "def", "component_doi", "(", "soup", ")", ":", "component_doi", "=", "[", "]", "object_id_tags", "=", "raw_parser", ".", "object_id", "(", "soup", ",", "pub_id_type", "=", "\"doi\"", ")", "# Get components too for later", "component_list", "=", "components", "(", "soup", ")", "position", "=", "1", "for", "tag", "in", "object_id_tags", ":", "component_object", "=", "{", "}", "component_object", "[", "\"doi\"", "]", "=", "doi_uri_to_doi", "(", "tag", ".", "text", ")", "component_object", "[", "\"position\"", "]", "=", "position", "# Try to find the type of component", "for", "component", "in", "component_list", ":", "if", "\"doi\"", "in", "component", "and", "component", "[", "\"doi\"", "]", "==", "component_object", "[", "\"doi\"", "]", ":", "component_object", "[", "\"type\"", "]", "=", "component", "[", "\"type\"", "]", "component_doi", ".", "append", "(", "component_object", ")", "position", "=", "position", "+", "1", "return", "component_doi" ]
Look for all object-id of pub-type-id = doi, these are the component DOI tags
[ "Look", "for", "all", "object", "-", "id", "of", "pub", "-", "type", "-", "id", "=", "doi", "these", "are", "the", "component", "DOI", "tags" ]
train
https://github.com/elifesciences/elife-tools/blob/4b9e38cbe485c61a4ed7cbd8970c6b318334fd86/elifetools/parseJATS.py#L648-L675
elifesciences/elife-tools
elifetools/parseJATS.py
tag_details
def tag_details(tag, nodenames): """ Used in media and graphics to extract data from their parent tags """ details = {} details['type'] = tag.name details['ordinal'] = tag_ordinal(tag) # Ordinal value if tag_details_sibling_ordinal(tag): details['sibling_ordinal'] = tag_details_sibling_ordinal(tag) # Asset name if tag_details_asset(tag): details['asset'] = tag_details_asset(tag) object_id_tag = first(raw_parser.object_id(tag, pub_id_type= "doi")) if object_id_tag: details['component_doi'] = extract_component_doi(tag, nodenames) return details
python
def tag_details(tag, nodenames): """ Used in media and graphics to extract data from their parent tags """ details = {} details['type'] = tag.name details['ordinal'] = tag_ordinal(tag) # Ordinal value if tag_details_sibling_ordinal(tag): details['sibling_ordinal'] = tag_details_sibling_ordinal(tag) # Asset name if tag_details_asset(tag): details['asset'] = tag_details_asset(tag) object_id_tag = first(raw_parser.object_id(tag, pub_id_type= "doi")) if object_id_tag: details['component_doi'] = extract_component_doi(tag, nodenames) return details
[ "def", "tag_details", "(", "tag", ",", "nodenames", ")", ":", "details", "=", "{", "}", "details", "[", "'type'", "]", "=", "tag", ".", "name", "details", "[", "'ordinal'", "]", "=", "tag_ordinal", "(", "tag", ")", "# Ordinal value", "if", "tag_details_sibling_ordinal", "(", "tag", ")", ":", "details", "[", "'sibling_ordinal'", "]", "=", "tag_details_sibling_ordinal", "(", "tag", ")", "# Asset name", "if", "tag_details_asset", "(", "tag", ")", ":", "details", "[", "'asset'", "]", "=", "tag_details_asset", "(", "tag", ")", "object_id_tag", "=", "first", "(", "raw_parser", ".", "object_id", "(", "tag", ",", "pub_id_type", "=", "\"doi\"", ")", ")", "if", "object_id_tag", ":", "details", "[", "'component_doi'", "]", "=", "extract_component_doi", "(", "tag", ",", "nodenames", ")", "return", "details" ]
Used in media and graphics to extract data from their parent tags
[ "Used", "in", "media", "and", "graphics", "to", "extract", "data", "from", "their", "parent", "tags" ]
train
https://github.com/elifesciences/elife-tools/blob/4b9e38cbe485c61a4ed7cbd8970c6b318334fd86/elifetools/parseJATS.py#L726-L747
elifesciences/elife-tools
elifetools/parseJATS.py
media
def media(soup): """ All media tags and some associated data about the related component doi and the parent of that doi (not always present) """ media = [] media_tags = raw_parser.media(soup) position = 1 for tag in media_tags: media_item = {} copy_attribute(tag.attrs, 'mime-subtype', media_item) copy_attribute(tag.attrs, 'mimetype', media_item) copy_attribute(tag.attrs, 'xlink:href', media_item, 'xlink_href') copy_attribute(tag.attrs, 'content-type', media_item) nodenames = ["sub-article", "media", "fig-group", "fig", "supplementary-material"] details = tag_details(tag, nodenames) copy_attribute(details, 'component_doi', media_item) copy_attribute(details, 'type', media_item) copy_attribute(details, 'sibling_ordinal', media_item) # Try to get the component DOI of the parent tag parent_tag = first_parent(tag, nodenames) if parent_tag: acting_parent_tag = component_acting_parent_tag(parent_tag, tag) if acting_parent_tag: details = tag_details(acting_parent_tag, nodenames) copy_attribute(details, 'type', media_item, 'parent_type') copy_attribute(details, 'ordinal', media_item, 'parent_ordinal') copy_attribute(details, 'asset', media_item, 'parent_asset') copy_attribute(details, 'sibling_ordinal', media_item, 'parent_sibling_ordinal') copy_attribute(details, 'component_doi', media_item, 'parent_component_doi') # Try to get the parent parent p_parent_tag = first_parent(parent_tag, nodenames) if p_parent_tag: acting_p_parent_tag = component_acting_parent_tag(p_parent_tag, parent_tag) if acting_p_parent_tag: details = tag_details(acting_p_parent_tag, nodenames) copy_attribute(details, 'type', media_item, 'p_parent_type') copy_attribute(details, 'ordinal', media_item, 'p_parent_ordinal') copy_attribute(details, 'asset', media_item, 'p_parent_asset') copy_attribute(details, 'sibling_ordinal', media_item, 'p_parent_sibling_ordinal') copy_attribute(details, 'component_doi', media_item, 'p_parent_component_doi') # Try to get the parent parent parent p_p_parent_tag = first_parent(p_parent_tag, nodenames) if p_p_parent_tag: acting_p_p_parent_tag = component_acting_parent_tag(p_p_parent_tag, p_parent_tag) if acting_p_p_parent_tag: details = tag_details(acting_p_p_parent_tag, nodenames) copy_attribute(details, 'type', media_item, 'p_p_parent_type') copy_attribute(details, 'ordinal', media_item, 'p_p_parent_ordinal') copy_attribute(details, 'asset', media_item, 'p_p_parent_asset') copy_attribute(details, 'sibling_ordinal', media_item, 'p_p_parent_sibling_ordinal') copy_attribute(details, 'component_doi', media_item, 'p_p_parent_component_doi') # Increment the position media_item['position'] = position # Ordinal should be the same as position in this case but set it anyway media_item['ordinal'] = tag_ordinal(tag) media.append(media_item) position += 1 return media
python
def media(soup): """ All media tags and some associated data about the related component doi and the parent of that doi (not always present) """ media = [] media_tags = raw_parser.media(soup) position = 1 for tag in media_tags: media_item = {} copy_attribute(tag.attrs, 'mime-subtype', media_item) copy_attribute(tag.attrs, 'mimetype', media_item) copy_attribute(tag.attrs, 'xlink:href', media_item, 'xlink_href') copy_attribute(tag.attrs, 'content-type', media_item) nodenames = ["sub-article", "media", "fig-group", "fig", "supplementary-material"] details = tag_details(tag, nodenames) copy_attribute(details, 'component_doi', media_item) copy_attribute(details, 'type', media_item) copy_attribute(details, 'sibling_ordinal', media_item) # Try to get the component DOI of the parent tag parent_tag = first_parent(tag, nodenames) if parent_tag: acting_parent_tag = component_acting_parent_tag(parent_tag, tag) if acting_parent_tag: details = tag_details(acting_parent_tag, nodenames) copy_attribute(details, 'type', media_item, 'parent_type') copy_attribute(details, 'ordinal', media_item, 'parent_ordinal') copy_attribute(details, 'asset', media_item, 'parent_asset') copy_attribute(details, 'sibling_ordinal', media_item, 'parent_sibling_ordinal') copy_attribute(details, 'component_doi', media_item, 'parent_component_doi') # Try to get the parent parent p_parent_tag = first_parent(parent_tag, nodenames) if p_parent_tag: acting_p_parent_tag = component_acting_parent_tag(p_parent_tag, parent_tag) if acting_p_parent_tag: details = tag_details(acting_p_parent_tag, nodenames) copy_attribute(details, 'type', media_item, 'p_parent_type') copy_attribute(details, 'ordinal', media_item, 'p_parent_ordinal') copy_attribute(details, 'asset', media_item, 'p_parent_asset') copy_attribute(details, 'sibling_ordinal', media_item, 'p_parent_sibling_ordinal') copy_attribute(details, 'component_doi', media_item, 'p_parent_component_doi') # Try to get the parent parent parent p_p_parent_tag = first_parent(p_parent_tag, nodenames) if p_p_parent_tag: acting_p_p_parent_tag = component_acting_parent_tag(p_p_parent_tag, p_parent_tag) if acting_p_p_parent_tag: details = tag_details(acting_p_p_parent_tag, nodenames) copy_attribute(details, 'type', media_item, 'p_p_parent_type') copy_attribute(details, 'ordinal', media_item, 'p_p_parent_ordinal') copy_attribute(details, 'asset', media_item, 'p_p_parent_asset') copy_attribute(details, 'sibling_ordinal', media_item, 'p_p_parent_sibling_ordinal') copy_attribute(details, 'component_doi', media_item, 'p_p_parent_component_doi') # Increment the position media_item['position'] = position # Ordinal should be the same as position in this case but set it anyway media_item['ordinal'] = tag_ordinal(tag) media.append(media_item) position += 1 return media
[ "def", "media", "(", "soup", ")", ":", "media", "=", "[", "]", "media_tags", "=", "raw_parser", ".", "media", "(", "soup", ")", "position", "=", "1", "for", "tag", "in", "media_tags", ":", "media_item", "=", "{", "}", "copy_attribute", "(", "tag", ".", "attrs", ",", "'mime-subtype'", ",", "media_item", ")", "copy_attribute", "(", "tag", ".", "attrs", ",", "'mimetype'", ",", "media_item", ")", "copy_attribute", "(", "tag", ".", "attrs", ",", "'xlink:href'", ",", "media_item", ",", "'xlink_href'", ")", "copy_attribute", "(", "tag", ".", "attrs", ",", "'content-type'", ",", "media_item", ")", "nodenames", "=", "[", "\"sub-article\"", ",", "\"media\"", ",", "\"fig-group\"", ",", "\"fig\"", ",", "\"supplementary-material\"", "]", "details", "=", "tag_details", "(", "tag", ",", "nodenames", ")", "copy_attribute", "(", "details", ",", "'component_doi'", ",", "media_item", ")", "copy_attribute", "(", "details", ",", "'type'", ",", "media_item", ")", "copy_attribute", "(", "details", ",", "'sibling_ordinal'", ",", "media_item", ")", "# Try to get the component DOI of the parent tag", "parent_tag", "=", "first_parent", "(", "tag", ",", "nodenames", ")", "if", "parent_tag", ":", "acting_parent_tag", "=", "component_acting_parent_tag", "(", "parent_tag", ",", "tag", ")", "if", "acting_parent_tag", ":", "details", "=", "tag_details", "(", "acting_parent_tag", ",", "nodenames", ")", "copy_attribute", "(", "details", ",", "'type'", ",", "media_item", ",", "'parent_type'", ")", "copy_attribute", "(", "details", ",", "'ordinal'", ",", "media_item", ",", "'parent_ordinal'", ")", "copy_attribute", "(", "details", ",", "'asset'", ",", "media_item", ",", "'parent_asset'", ")", "copy_attribute", "(", "details", ",", "'sibling_ordinal'", ",", "media_item", ",", "'parent_sibling_ordinal'", ")", "copy_attribute", "(", "details", ",", "'component_doi'", ",", "media_item", ",", "'parent_component_doi'", ")", "# Try to get the parent parent", "p_parent_tag", "=", "first_parent", "(", "parent_tag", ",", "nodenames", ")", "if", "p_parent_tag", ":", "acting_p_parent_tag", "=", "component_acting_parent_tag", "(", "p_parent_tag", ",", "parent_tag", ")", "if", "acting_p_parent_tag", ":", "details", "=", "tag_details", "(", "acting_p_parent_tag", ",", "nodenames", ")", "copy_attribute", "(", "details", ",", "'type'", ",", "media_item", ",", "'p_parent_type'", ")", "copy_attribute", "(", "details", ",", "'ordinal'", ",", "media_item", ",", "'p_parent_ordinal'", ")", "copy_attribute", "(", "details", ",", "'asset'", ",", "media_item", ",", "'p_parent_asset'", ")", "copy_attribute", "(", "details", ",", "'sibling_ordinal'", ",", "media_item", ",", "'p_parent_sibling_ordinal'", ")", "copy_attribute", "(", "details", ",", "'component_doi'", ",", "media_item", ",", "'p_parent_component_doi'", ")", "# Try to get the parent parent parent", "p_p_parent_tag", "=", "first_parent", "(", "p_parent_tag", ",", "nodenames", ")", "if", "p_p_parent_tag", ":", "acting_p_p_parent_tag", "=", "component_acting_parent_tag", "(", "p_p_parent_tag", ",", "p_parent_tag", ")", "if", "acting_p_p_parent_tag", ":", "details", "=", "tag_details", "(", "acting_p_p_parent_tag", ",", "nodenames", ")", "copy_attribute", "(", "details", ",", "'type'", ",", "media_item", ",", "'p_p_parent_type'", ")", "copy_attribute", "(", "details", ",", "'ordinal'", ",", "media_item", ",", "'p_p_parent_ordinal'", ")", "copy_attribute", "(", "details", ",", "'asset'", ",", "media_item", ",", "'p_p_parent_asset'", ")", "copy_attribute", "(", "details", ",", "'sibling_ordinal'", ",", "media_item", ",", "'p_p_parent_sibling_ordinal'", ")", "copy_attribute", "(", "details", ",", "'component_doi'", ",", "media_item", ",", "'p_p_parent_component_doi'", ")", "# Increment the position", "media_item", "[", "'position'", "]", "=", "position", "# Ordinal should be the same as position in this case but set it anyway", "media_item", "[", "'ordinal'", "]", "=", "tag_ordinal", "(", "tag", ")", "media", ".", "append", "(", "media_item", ")", "position", "+=", "1", "return", "media" ]
All media tags and some associated data about the related component doi and the parent of that doi (not always present)
[ "All", "media", "tags", "and", "some", "associated", "data", "about", "the", "related", "component", "doi", "and", "the", "parent", "of", "that", "doi", "(", "not", "always", "present", ")" ]
train
https://github.com/elifesciences/elife-tools/blob/4b9e38cbe485c61a4ed7cbd8970c6b318334fd86/elifetools/parseJATS.py#L750-L821
elifesciences/elife-tools
elifetools/parseJATS.py
graphics
def graphics(soup): """ All graphic tags and some associated data about the related component doi and the parent of that doi (not always present), and whether it is part of a figure supplement """ graphics = [] graphic_tags = raw_parser.graphic(soup) position = 1 for tag in graphic_tags: graphic_item = {} copy_attribute(tag.attrs, 'xlink:href', graphic_item, 'xlink_href') # Get the tag type nodenames = ["sub-article", "fig-group", "fig", "app"] details = tag_details(tag, nodenames) copy_attribute(details, 'type', graphic_item) parent_tag = first_parent(tag, nodenames) if parent_tag: details = tag_details(parent_tag, nodenames) copy_attribute(details, 'type', graphic_item, 'parent_type') copy_attribute(details, 'ordinal', graphic_item, 'parent_ordinal') copy_attribute(details, 'asset', graphic_item, 'parent_asset') copy_attribute(details, 'sibling_ordinal', graphic_item, 'parent_sibling_ordinal') copy_attribute(details, 'component_doi', graphic_item, 'parent_component_doi') # Try to get the parent parent - special for looking at fig tags # use component_acting_parent_tag p_parent_tag = first_parent(parent_tag, nodenames) if p_parent_tag: acting_p_parent_tag = component_acting_parent_tag(p_parent_tag, parent_tag) if acting_p_parent_tag: details = tag_details(acting_p_parent_tag, nodenames) copy_attribute(details, 'type', graphic_item, 'p_parent_type') copy_attribute(details, 'ordinal', graphic_item, 'p_parent_ordinal') copy_attribute(details, 'asset', graphic_item, 'p_parent_asset') copy_attribute(details, 'sibling_ordinal', graphic_item, 'p_parent_sibling_ordinal') copy_attribute(details, 'component_doi', graphic_item, 'p_parent_component_doi') # Increment the position graphic_item['position'] = position # Ordinal should be the same as position in this case but set it anyway graphic_item['ordinal'] = tag_ordinal(tag) graphics.append(graphic_item) position += 1 return graphics
python
def graphics(soup): """ All graphic tags and some associated data about the related component doi and the parent of that doi (not always present), and whether it is part of a figure supplement """ graphics = [] graphic_tags = raw_parser.graphic(soup) position = 1 for tag in graphic_tags: graphic_item = {} copy_attribute(tag.attrs, 'xlink:href', graphic_item, 'xlink_href') # Get the tag type nodenames = ["sub-article", "fig-group", "fig", "app"] details = tag_details(tag, nodenames) copy_attribute(details, 'type', graphic_item) parent_tag = first_parent(tag, nodenames) if parent_tag: details = tag_details(parent_tag, nodenames) copy_attribute(details, 'type', graphic_item, 'parent_type') copy_attribute(details, 'ordinal', graphic_item, 'parent_ordinal') copy_attribute(details, 'asset', graphic_item, 'parent_asset') copy_attribute(details, 'sibling_ordinal', graphic_item, 'parent_sibling_ordinal') copy_attribute(details, 'component_doi', graphic_item, 'parent_component_doi') # Try to get the parent parent - special for looking at fig tags # use component_acting_parent_tag p_parent_tag = first_parent(parent_tag, nodenames) if p_parent_tag: acting_p_parent_tag = component_acting_parent_tag(p_parent_tag, parent_tag) if acting_p_parent_tag: details = tag_details(acting_p_parent_tag, nodenames) copy_attribute(details, 'type', graphic_item, 'p_parent_type') copy_attribute(details, 'ordinal', graphic_item, 'p_parent_ordinal') copy_attribute(details, 'asset', graphic_item, 'p_parent_asset') copy_attribute(details, 'sibling_ordinal', graphic_item, 'p_parent_sibling_ordinal') copy_attribute(details, 'component_doi', graphic_item, 'p_parent_component_doi') # Increment the position graphic_item['position'] = position # Ordinal should be the same as position in this case but set it anyway graphic_item['ordinal'] = tag_ordinal(tag) graphics.append(graphic_item) position += 1 return graphics
[ "def", "graphics", "(", "soup", ")", ":", "graphics", "=", "[", "]", "graphic_tags", "=", "raw_parser", ".", "graphic", "(", "soup", ")", "position", "=", "1", "for", "tag", "in", "graphic_tags", ":", "graphic_item", "=", "{", "}", "copy_attribute", "(", "tag", ".", "attrs", ",", "'xlink:href'", ",", "graphic_item", ",", "'xlink_href'", ")", "# Get the tag type", "nodenames", "=", "[", "\"sub-article\"", ",", "\"fig-group\"", ",", "\"fig\"", ",", "\"app\"", "]", "details", "=", "tag_details", "(", "tag", ",", "nodenames", ")", "copy_attribute", "(", "details", ",", "'type'", ",", "graphic_item", ")", "parent_tag", "=", "first_parent", "(", "tag", ",", "nodenames", ")", "if", "parent_tag", ":", "details", "=", "tag_details", "(", "parent_tag", ",", "nodenames", ")", "copy_attribute", "(", "details", ",", "'type'", ",", "graphic_item", ",", "'parent_type'", ")", "copy_attribute", "(", "details", ",", "'ordinal'", ",", "graphic_item", ",", "'parent_ordinal'", ")", "copy_attribute", "(", "details", ",", "'asset'", ",", "graphic_item", ",", "'parent_asset'", ")", "copy_attribute", "(", "details", ",", "'sibling_ordinal'", ",", "graphic_item", ",", "'parent_sibling_ordinal'", ")", "copy_attribute", "(", "details", ",", "'component_doi'", ",", "graphic_item", ",", "'parent_component_doi'", ")", "# Try to get the parent parent - special for looking at fig tags", "# use component_acting_parent_tag", "p_parent_tag", "=", "first_parent", "(", "parent_tag", ",", "nodenames", ")", "if", "p_parent_tag", ":", "acting_p_parent_tag", "=", "component_acting_parent_tag", "(", "p_parent_tag", ",", "parent_tag", ")", "if", "acting_p_parent_tag", ":", "details", "=", "tag_details", "(", "acting_p_parent_tag", ",", "nodenames", ")", "copy_attribute", "(", "details", ",", "'type'", ",", "graphic_item", ",", "'p_parent_type'", ")", "copy_attribute", "(", "details", ",", "'ordinal'", ",", "graphic_item", ",", "'p_parent_ordinal'", ")", "copy_attribute", "(", "details", ",", "'asset'", ",", "graphic_item", ",", "'p_parent_asset'", ")", "copy_attribute", "(", "details", ",", "'sibling_ordinal'", ",", "graphic_item", ",", "'p_parent_sibling_ordinal'", ")", "copy_attribute", "(", "details", ",", "'component_doi'", ",", "graphic_item", ",", "'p_parent_component_doi'", ")", "# Increment the position", "graphic_item", "[", "'position'", "]", "=", "position", "# Ordinal should be the same as position in this case but set it anyway", "graphic_item", "[", "'ordinal'", "]", "=", "tag_ordinal", "(", "tag", ")", "graphics", ".", "append", "(", "graphic_item", ")", "position", "+=", "1", "return", "graphics" ]
All graphic tags and some associated data about the related component doi and the parent of that doi (not always present), and whether it is part of a figure supplement
[ "All", "graphic", "tags", "and", "some", "associated", "data", "about", "the", "related", "component", "doi", "and", "the", "parent", "of", "that", "doi", "(", "not", "always", "present", ")", "and", "whether", "it", "is", "part", "of", "a", "figure", "supplement" ]
train
https://github.com/elifesciences/elife-tools/blob/4b9e38cbe485c61a4ed7cbd8970c6b318334fd86/elifetools/parseJATS.py#L824-L877
elifesciences/elife-tools
elifetools/parseJATS.py
inline_graphics
def inline_graphics(soup): """ inline-graphic tags """ inline_graphics = [] inline_graphic_tags = raw_parser.inline_graphic(soup) position = 1 for tag in inline_graphic_tags: item = {} copy_attribute(tag.attrs, 'xlink:href', item, 'xlink_href') # Get the tag type nodenames = ["sub-article"] details = tag_details(tag, nodenames) copy_attribute(details, 'type', item) # Increment the position item['position'] = position # Ordinal should be the same as position in this case but set it anyway item['ordinal'] = tag_ordinal(tag) inline_graphics.append(item) return inline_graphics
python
def inline_graphics(soup): """ inline-graphic tags """ inline_graphics = [] inline_graphic_tags = raw_parser.inline_graphic(soup) position = 1 for tag in inline_graphic_tags: item = {} copy_attribute(tag.attrs, 'xlink:href', item, 'xlink_href') # Get the tag type nodenames = ["sub-article"] details = tag_details(tag, nodenames) copy_attribute(details, 'type', item) # Increment the position item['position'] = position # Ordinal should be the same as position in this case but set it anyway item['ordinal'] = tag_ordinal(tag) inline_graphics.append(item) return inline_graphics
[ "def", "inline_graphics", "(", "soup", ")", ":", "inline_graphics", "=", "[", "]", "inline_graphic_tags", "=", "raw_parser", ".", "inline_graphic", "(", "soup", ")", "position", "=", "1", "for", "tag", "in", "inline_graphic_tags", ":", "item", "=", "{", "}", "copy_attribute", "(", "tag", ".", "attrs", ",", "'xlink:href'", ",", "item", ",", "'xlink_href'", ")", "# Get the tag type", "nodenames", "=", "[", "\"sub-article\"", "]", "details", "=", "tag_details", "(", "tag", ",", "nodenames", ")", "copy_attribute", "(", "details", ",", "'type'", ",", "item", ")", "# Increment the position", "item", "[", "'position'", "]", "=", "position", "# Ordinal should be the same as position in this case but set it anyway", "item", "[", "'ordinal'", "]", "=", "tag_ordinal", "(", "tag", ")", "inline_graphics", ".", "append", "(", "item", ")", "return", "inline_graphics" ]
inline-graphic tags
[ "inline", "-", "graphic", "tags" ]
train
https://github.com/elifesciences/elife-tools/blob/4b9e38cbe485c61a4ed7cbd8970c6b318334fd86/elifetools/parseJATS.py#L879-L906
elifesciences/elife-tools
elifetools/parseJATS.py
self_uri
def self_uri(soup): """ self-uri tags """ self_uri = [] self_uri_tags = raw_parser.self_uri(soup) position = 1 for tag in self_uri_tags: item = {} copy_attribute(tag.attrs, 'xlink:href', item, 'xlink_href') copy_attribute(tag.attrs, 'content-type', item) # Get the tag type nodenames = ["sub-article"] details = tag_details(tag, nodenames) copy_attribute(details, 'type', item) # Increment the position item['position'] = position # Ordinal should be the same as position in this case but set it anyway item['ordinal'] = tag_ordinal(tag) self_uri.append(item) return self_uri
python
def self_uri(soup): """ self-uri tags """ self_uri = [] self_uri_tags = raw_parser.self_uri(soup) position = 1 for tag in self_uri_tags: item = {} copy_attribute(tag.attrs, 'xlink:href', item, 'xlink_href') copy_attribute(tag.attrs, 'content-type', item) # Get the tag type nodenames = ["sub-article"] details = tag_details(tag, nodenames) copy_attribute(details, 'type', item) # Increment the position item['position'] = position # Ordinal should be the same as position in this case but set it anyway item['ordinal'] = tag_ordinal(tag) self_uri.append(item) return self_uri
[ "def", "self_uri", "(", "soup", ")", ":", "self_uri", "=", "[", "]", "self_uri_tags", "=", "raw_parser", ".", "self_uri", "(", "soup", ")", "position", "=", "1", "for", "tag", "in", "self_uri_tags", ":", "item", "=", "{", "}", "copy_attribute", "(", "tag", ".", "attrs", ",", "'xlink:href'", ",", "item", ",", "'xlink_href'", ")", "copy_attribute", "(", "tag", ".", "attrs", ",", "'content-type'", ",", "item", ")", "# Get the tag type", "nodenames", "=", "[", "\"sub-article\"", "]", "details", "=", "tag_details", "(", "tag", ",", "nodenames", ")", "copy_attribute", "(", "details", ",", "'type'", ",", "item", ")", "# Increment the position", "item", "[", "'position'", "]", "=", "position", "# Ordinal should be the same as position in this case but set it anyway", "item", "[", "'ordinal'", "]", "=", "tag_ordinal", "(", "tag", ")", "self_uri", ".", "append", "(", "item", ")", "return", "self_uri" ]
self-uri tags
[ "self", "-", "uri", "tags" ]
train
https://github.com/elifesciences/elife-tools/blob/4b9e38cbe485c61a4ed7cbd8970c6b318334fd86/elifetools/parseJATS.py#L908-L934
elifesciences/elife-tools
elifetools/parseJATS.py
supplementary_material
def supplementary_material(soup): """ supplementary-material tags """ supplementary_material = [] supplementary_material_tags = raw_parser.supplementary_material(soup) position = 1 for tag in supplementary_material_tags: item = {} copy_attribute(tag.attrs, 'id', item) # Get the tag type nodenames = ["supplementary-material"] details = tag_details(tag, nodenames) copy_attribute(details, 'type', item) copy_attribute(details, 'asset', item) copy_attribute(details, 'component_doi', item) copy_attribute(details, 'sibling_ordinal', item) if raw_parser.label(tag): item['label'] = node_text(raw_parser.label(tag)) item['full_label'] = node_contents_str(raw_parser.label(tag)) # Increment the position item['position'] = position # Ordinal should be the same as position in this case but set it anyway item['ordinal'] = tag_ordinal(tag) supplementary_material.append(item) return supplementary_material
python
def supplementary_material(soup): """ supplementary-material tags """ supplementary_material = [] supplementary_material_tags = raw_parser.supplementary_material(soup) position = 1 for tag in supplementary_material_tags: item = {} copy_attribute(tag.attrs, 'id', item) # Get the tag type nodenames = ["supplementary-material"] details = tag_details(tag, nodenames) copy_attribute(details, 'type', item) copy_attribute(details, 'asset', item) copy_attribute(details, 'component_doi', item) copy_attribute(details, 'sibling_ordinal', item) if raw_parser.label(tag): item['label'] = node_text(raw_parser.label(tag)) item['full_label'] = node_contents_str(raw_parser.label(tag)) # Increment the position item['position'] = position # Ordinal should be the same as position in this case but set it anyway item['ordinal'] = tag_ordinal(tag) supplementary_material.append(item) return supplementary_material
[ "def", "supplementary_material", "(", "soup", ")", ":", "supplementary_material", "=", "[", "]", "supplementary_material_tags", "=", "raw_parser", ".", "supplementary_material", "(", "soup", ")", "position", "=", "1", "for", "tag", "in", "supplementary_material_tags", ":", "item", "=", "{", "}", "copy_attribute", "(", "tag", ".", "attrs", ",", "'id'", ",", "item", ")", "# Get the tag type", "nodenames", "=", "[", "\"supplementary-material\"", "]", "details", "=", "tag_details", "(", "tag", ",", "nodenames", ")", "copy_attribute", "(", "details", ",", "'type'", ",", "item", ")", "copy_attribute", "(", "details", ",", "'asset'", ",", "item", ")", "copy_attribute", "(", "details", ",", "'component_doi'", ",", "item", ")", "copy_attribute", "(", "details", ",", "'sibling_ordinal'", ",", "item", ")", "if", "raw_parser", ".", "label", "(", "tag", ")", ":", "item", "[", "'label'", "]", "=", "node_text", "(", "raw_parser", ".", "label", "(", "tag", ")", ")", "item", "[", "'full_label'", "]", "=", "node_contents_str", "(", "raw_parser", ".", "label", "(", "tag", ")", ")", "# Increment the position", "item", "[", "'position'", "]", "=", "position", "# Ordinal should be the same as position in this case but set it anyway", "item", "[", "'ordinal'", "]", "=", "tag_ordinal", "(", "tag", ")", "supplementary_material", ".", "append", "(", "item", ")", "return", "supplementary_material" ]
supplementary-material tags
[ "supplementary", "-", "material", "tags" ]
train
https://github.com/elifesciences/elife-tools/blob/4b9e38cbe485c61a4ed7cbd8970c6b318334fd86/elifetools/parseJATS.py#L936-L970
elifesciences/elife-tools
elifetools/parseJATS.py
contrib_email
def contrib_email(contrib_tag): """ Given a contrib tag, look for an email tag, and only return the value if it is not inside an aff tag """ email = [] for email_tag in extract_nodes(contrib_tag, "email"): if email_tag.parent.name != "aff": email.append(email_tag.text) return email if len(email) > 0 else None
python
def contrib_email(contrib_tag): """ Given a contrib tag, look for an email tag, and only return the value if it is not inside an aff tag """ email = [] for email_tag in extract_nodes(contrib_tag, "email"): if email_tag.parent.name != "aff": email.append(email_tag.text) return email if len(email) > 0 else None
[ "def", "contrib_email", "(", "contrib_tag", ")", ":", "email", "=", "[", "]", "for", "email_tag", "in", "extract_nodes", "(", "contrib_tag", ",", "\"email\"", ")", ":", "if", "email_tag", ".", "parent", ".", "name", "!=", "\"aff\"", ":", "email", ".", "append", "(", "email_tag", ".", "text", ")", "return", "email", "if", "len", "(", "email", ")", ">", "0", "else", "None" ]
Given a contrib tag, look for an email tag, and only return the value if it is not inside an aff tag
[ "Given", "a", "contrib", "tag", "look", "for", "an", "email", "tag", "and", "only", "return", "the", "value", "if", "it", "is", "not", "inside", "an", "aff", "tag" ]
train
https://github.com/elifesciences/elife-tools/blob/4b9e38cbe485c61a4ed7cbd8970c6b318334fd86/elifetools/parseJATS.py#L979-L988
elifesciences/elife-tools
elifetools/parseJATS.py
contrib_phone
def contrib_phone(contrib_tag): """ Given a contrib tag, look for an phone tag """ phone = None if raw_parser.phone(contrib_tag): phone = first(raw_parser.phone(contrib_tag)).text return phone
python
def contrib_phone(contrib_tag): """ Given a contrib tag, look for an phone tag """ phone = None if raw_parser.phone(contrib_tag): phone = first(raw_parser.phone(contrib_tag)).text return phone
[ "def", "contrib_phone", "(", "contrib_tag", ")", ":", "phone", "=", "None", "if", "raw_parser", ".", "phone", "(", "contrib_tag", ")", ":", "phone", "=", "first", "(", "raw_parser", ".", "phone", "(", "contrib_tag", ")", ")", ".", "text", "return", "phone" ]
Given a contrib tag, look for an phone tag
[ "Given", "a", "contrib", "tag", "look", "for", "an", "phone", "tag" ]
train
https://github.com/elifesciences/elife-tools/blob/4b9e38cbe485c61a4ed7cbd8970c6b318334fd86/elifetools/parseJATS.py#L990-L997
elifesciences/elife-tools
elifetools/parseJATS.py
contrib_inline_aff
def contrib_inline_aff(contrib_tag): """ Given a contrib tag, look for an aff tag directly inside it """ aff_tags = [] for child_tag in contrib_tag: if child_tag and child_tag.name and child_tag.name == "aff": aff_tags.append(child_tag) return aff_tags
python
def contrib_inline_aff(contrib_tag): """ Given a contrib tag, look for an aff tag directly inside it """ aff_tags = [] for child_tag in contrib_tag: if child_tag and child_tag.name and child_tag.name == "aff": aff_tags.append(child_tag) return aff_tags
[ "def", "contrib_inline_aff", "(", "contrib_tag", ")", ":", "aff_tags", "=", "[", "]", "for", "child_tag", "in", "contrib_tag", ":", "if", "child_tag", "and", "child_tag", ".", "name", "and", "child_tag", ".", "name", "==", "\"aff\"", ":", "aff_tags", ".", "append", "(", "child_tag", ")", "return", "aff_tags" ]
Given a contrib tag, look for an aff tag directly inside it
[ "Given", "a", "contrib", "tag", "look", "for", "an", "aff", "tag", "directly", "inside", "it" ]
train
https://github.com/elifesciences/elife-tools/blob/4b9e38cbe485c61a4ed7cbd8970c6b318334fd86/elifetools/parseJATS.py#L999-L1007
elifesciences/elife-tools
elifetools/parseJATS.py
contrib_xref
def contrib_xref(contrib_tag, ref_type): """ Given a contrib tag, look for an xref tag of type ref_type directly inside the contrib tag """ aff_tags = [] for child_tag in contrib_tag: if (child_tag and child_tag.name and child_tag.name == "xref" and child_tag.get('ref-type') and child_tag.get('ref-type') == ref_type): aff_tags.append(child_tag) return aff_tags
python
def contrib_xref(contrib_tag, ref_type): """ Given a contrib tag, look for an xref tag of type ref_type directly inside the contrib tag """ aff_tags = [] for child_tag in contrib_tag: if (child_tag and child_tag.name and child_tag.name == "xref" and child_tag.get('ref-type') and child_tag.get('ref-type') == ref_type): aff_tags.append(child_tag) return aff_tags
[ "def", "contrib_xref", "(", "contrib_tag", ",", "ref_type", ")", ":", "aff_tags", "=", "[", "]", "for", "child_tag", "in", "contrib_tag", ":", "if", "(", "child_tag", "and", "child_tag", ".", "name", "and", "child_tag", ".", "name", "==", "\"xref\"", "and", "child_tag", ".", "get", "(", "'ref-type'", ")", "and", "child_tag", ".", "get", "(", "'ref-type'", ")", "==", "ref_type", ")", ":", "aff_tags", ".", "append", "(", "child_tag", ")", "return", "aff_tags" ]
Given a contrib tag, look for an xref tag of type ref_type directly inside the contrib tag
[ "Given", "a", "contrib", "tag", "look", "for", "an", "xref", "tag", "of", "type", "ref_type", "directly", "inside", "the", "contrib", "tag" ]
train
https://github.com/elifesciences/elife-tools/blob/4b9e38cbe485c61a4ed7cbd8970c6b318334fd86/elifetools/parseJATS.py#L1009-L1018
elifesciences/elife-tools
elifetools/parseJATS.py
all_contributors
def all_contributors(soup, detail="brief"): "find all contributors not contrained to only the ones in article meta" contrib_tags = raw_parser.contributors(soup) contributors = format_authors(soup, contrib_tags, detail) return contributors
python
def all_contributors(soup, detail="brief"): "find all contributors not contrained to only the ones in article meta" contrib_tags = raw_parser.contributors(soup) contributors = format_authors(soup, contrib_tags, detail) return contributors
[ "def", "all_contributors", "(", "soup", ",", "detail", "=", "\"brief\"", ")", ":", "contrib_tags", "=", "raw_parser", ".", "contributors", "(", "soup", ")", "contributors", "=", "format_authors", "(", "soup", ",", "contrib_tags", ",", "detail", ")", "return", "contributors" ]
find all contributors not contrained to only the ones in article meta
[ "find", "all", "contributors", "not", "contrained", "to", "only", "the", "ones", "in", "article", "meta" ]
train
https://github.com/elifesciences/elife-tools/blob/4b9e38cbe485c61a4ed7cbd8970c6b318334fd86/elifetools/parseJATS.py#L1205-L1209
elifesciences/elife-tools
elifetools/parseJATS.py
authors_non_byline
def authors_non_byline(soup, detail="full"): """Non-byline authors for group author members""" # Get a filtered list of contributors, in order to get their group-author-id contrib_type = "author non-byline" contributors_ = contributors(soup, detail) non_byline_authors = [author for author in contributors_ if author.get('type', None) == contrib_type] # Then renumber their position attribute position = 1 for author in non_byline_authors: author["position"] = position position = position + 1 return non_byline_authors
python
def authors_non_byline(soup, detail="full"): """Non-byline authors for group author members""" # Get a filtered list of contributors, in order to get their group-author-id contrib_type = "author non-byline" contributors_ = contributors(soup, detail) non_byline_authors = [author for author in contributors_ if author.get('type', None) == contrib_type] # Then renumber their position attribute position = 1 for author in non_byline_authors: author["position"] = position position = position + 1 return non_byline_authors
[ "def", "authors_non_byline", "(", "soup", ",", "detail", "=", "\"full\"", ")", ":", "# Get a filtered list of contributors, in order to get their group-author-id", "contrib_type", "=", "\"author non-byline\"", "contributors_", "=", "contributors", "(", "soup", ",", "detail", ")", "non_byline_authors", "=", "[", "author", "for", "author", "in", "contributors_", "if", "author", ".", "get", "(", "'type'", ",", "None", ")", "==", "contrib_type", "]", "# Then renumber their position attribute", "position", "=", "1", "for", "author", "in", "non_byline_authors", ":", "author", "[", "\"position\"", "]", "=", "position", "position", "=", "position", "+", "1", "return", "non_byline_authors" ]
Non-byline authors for group author members
[ "Non", "-", "byline", "authors", "for", "group", "author", "members" ]
train
https://github.com/elifesciences/elife-tools/blob/4b9e38cbe485c61a4ed7cbd8970c6b318334fd86/elifetools/parseJATS.py#L1222-L1234
elifesciences/elife-tools
elifetools/parseJATS.py
refs
def refs(soup): """Find and return all the references""" tags = raw_parser.ref_list(soup) refs = [] position = 1 article_doi = doi(soup) for tag in tags: ref = {} ref['ref'] = ref_text(tag) # ref_id copy_attribute(tag.attrs, "id", ref) # article_title if raw_parser.article_title(tag): ref['article_title'] = node_text(raw_parser.article_title(tag)) ref['full_article_title'] = node_contents_str(raw_parser.article_title(tag)) if raw_parser.pub_id(tag, "pmid"): ref['pmid'] = node_contents_str(first(raw_parser.pub_id(tag, "pmid"))) if raw_parser.pub_id(tag, "isbn"): ref['isbn'] = node_contents_str(first(raw_parser.pub_id(tag, "isbn"))) if raw_parser.pub_id(tag, "doi"): ref['reference_id'] = node_contents_str(first(raw_parser.pub_id(tag, "doi"))) ref['doi'] = doi_uri_to_doi(node_contents_str(first(raw_parser.pub_id(tag, "doi")))) uri_tag = None if raw_parser.ext_link(tag, "uri"): uri_tag = first(raw_parser.ext_link(tag, "uri")) elif raw_parser.uri(tag): uri_tag = first(raw_parser.uri(tag)) if uri_tag: set_if_value(ref, "uri", uri_tag.get('xlink:href')) set_if_value(ref, "uri_text", node_contents_str(uri_tag)) # look for a pub-id tag if no uri yet if not ref.get('uri') and raw_parser.pub_id(tag, "archive"): pub_id_tag = first(raw_parser.pub_id(tag, pub_id_type="archive")) set_if_value(ref, "uri", pub_id_tag.get('xlink:href')) # accession, could be in either of two tags set_if_value(ref, "accession", node_contents_str(first(raw_parser.object_id(tag, "art-access-id")))) if not ref.get('accession'): set_if_value(ref, "accession", node_contents_str(first(raw_parser.pub_id(tag, pub_id_type="accession")))) if not ref.get('accession'): set_if_value(ref, "accession", node_contents_str(first(raw_parser.pub_id(tag, pub_id_type="archive")))) if(raw_parser.year(tag)): set_if_value(ref, "year", node_text(raw_parser.year(tag))) set_if_value(ref, "year-iso-8601-date", raw_parser.year(tag).get('iso-8601-date')) if(raw_parser.date_in_citation(tag)): set_if_value(ref, "date-in-citation", node_text(first(raw_parser.date_in_citation(tag)))) set_if_value(ref, "iso-8601-date", first(raw_parser.date_in_citation(tag)).get('iso-8601-date')) if(raw_parser.patent(tag)): set_if_value(ref, "patent", node_text(first(raw_parser.patent(tag)))) set_if_value(ref, "country", first(raw_parser.patent(tag)).get('country')) set_if_value(ref, "source", node_text(first(raw_parser.source(tag)))) set_if_value(ref, "elocation-id", node_text(first(raw_parser.elocation_id(tag)))) if raw_parser.element_citation(tag): copy_attribute(first(raw_parser.element_citation(tag)).attrs, "publication-type", ref) if "publication-type" not in ref and raw_parser.mixed_citations(tag): copy_attribute(first(raw_parser.mixed_citations(tag)).attrs, "publication-type", ref) # authors person_group = raw_parser.person_group(tag) authors = [] for group in person_group: author_type = None if "person-group-type" in group.attrs: author_type = group["person-group-type"] # Read name or collab tag in the order they are listed for name_or_collab_tag in extract_nodes(group, ["name", "string-name", "collab"]): author = {} # Shared tag attribute set_if_value(author, "group-type", author_type) # name tag attributes if name_or_collab_tag.name in ["name", "string-name"]: set_if_value(author, "surname", node_text(first(raw_parser.surname(name_or_collab_tag)))) set_if_value(author, "given-names", node_text(first(raw_parser.given_names(name_or_collab_tag)))) set_if_value(author, "suffix", node_text(first(raw_parser.suffix(name_or_collab_tag)))) # collab tag attribute if name_or_collab_tag.name == "collab": set_if_value(author, "collab", node_contents_str(name_or_collab_tag)) if len(author) > 0: authors.append(author) # etal for the person group if first(raw_parser.etal(group)): author = {} author['etal'] = True set_if_value(author, "group-type", author_type) authors.append(author) # Check for collab tag not wrapped in a person-group for backwards compatibility if len(person_group) == 0: collab_tags = raw_parser.collab(tag) for collab_tag in collab_tags: author = {} set_if_value(author, "group-type", "author") set_if_value(author, "collab", node_contents_str(collab_tag)) if len(author) > 0: authors.append(author) if len(authors) > 0: ref['authors'] = authors set_if_value(ref, "volume", node_text(first(raw_parser.volume(tag)))) set_if_value(ref, "issue", node_text(first(raw_parser.issue(tag)))) set_if_value(ref, "fpage", node_text(first(raw_parser.fpage(tag)))) set_if_value(ref, "lpage", node_text(first(raw_parser.lpage(tag)))) set_if_value(ref, "collab", node_text(first(raw_parser.collab(tag)))) set_if_value(ref, "publisher_loc", node_text(first(raw_parser.publisher_loc(tag)))) set_if_value(ref, "publisher_name", node_text(first(raw_parser.publisher_name(tag)))) set_if_value(ref, "edition", node_contents_str(first(raw_parser.edition(tag)))) set_if_value(ref, "version", node_contents_str(first(raw_parser.version(tag)))) set_if_value(ref, "chapter-title", node_contents_str(first(raw_parser.chapter_title(tag)))) set_if_value(ref, "comment", node_text(first(raw_parser.comment(tag)))) set_if_value(ref, "data-title", node_contents_str(first(raw_parser.data_title(tag)))) set_if_value(ref, "conf-name", node_text(first(raw_parser.conf_name(tag)))) # If not empty, add position value, append, then increment the position counter if(len(ref) > 0): ref['article_doi'] = article_doi ref['position'] = position refs.append(ref) position += 1 return refs
python
def refs(soup): """Find and return all the references""" tags = raw_parser.ref_list(soup) refs = [] position = 1 article_doi = doi(soup) for tag in tags: ref = {} ref['ref'] = ref_text(tag) # ref_id copy_attribute(tag.attrs, "id", ref) # article_title if raw_parser.article_title(tag): ref['article_title'] = node_text(raw_parser.article_title(tag)) ref['full_article_title'] = node_contents_str(raw_parser.article_title(tag)) if raw_parser.pub_id(tag, "pmid"): ref['pmid'] = node_contents_str(first(raw_parser.pub_id(tag, "pmid"))) if raw_parser.pub_id(tag, "isbn"): ref['isbn'] = node_contents_str(first(raw_parser.pub_id(tag, "isbn"))) if raw_parser.pub_id(tag, "doi"): ref['reference_id'] = node_contents_str(first(raw_parser.pub_id(tag, "doi"))) ref['doi'] = doi_uri_to_doi(node_contents_str(first(raw_parser.pub_id(tag, "doi")))) uri_tag = None if raw_parser.ext_link(tag, "uri"): uri_tag = first(raw_parser.ext_link(tag, "uri")) elif raw_parser.uri(tag): uri_tag = first(raw_parser.uri(tag)) if uri_tag: set_if_value(ref, "uri", uri_tag.get('xlink:href')) set_if_value(ref, "uri_text", node_contents_str(uri_tag)) # look for a pub-id tag if no uri yet if not ref.get('uri') and raw_parser.pub_id(tag, "archive"): pub_id_tag = first(raw_parser.pub_id(tag, pub_id_type="archive")) set_if_value(ref, "uri", pub_id_tag.get('xlink:href')) # accession, could be in either of two tags set_if_value(ref, "accession", node_contents_str(first(raw_parser.object_id(tag, "art-access-id")))) if not ref.get('accession'): set_if_value(ref, "accession", node_contents_str(first(raw_parser.pub_id(tag, pub_id_type="accession")))) if not ref.get('accession'): set_if_value(ref, "accession", node_contents_str(first(raw_parser.pub_id(tag, pub_id_type="archive")))) if(raw_parser.year(tag)): set_if_value(ref, "year", node_text(raw_parser.year(tag))) set_if_value(ref, "year-iso-8601-date", raw_parser.year(tag).get('iso-8601-date')) if(raw_parser.date_in_citation(tag)): set_if_value(ref, "date-in-citation", node_text(first(raw_parser.date_in_citation(tag)))) set_if_value(ref, "iso-8601-date", first(raw_parser.date_in_citation(tag)).get('iso-8601-date')) if(raw_parser.patent(tag)): set_if_value(ref, "patent", node_text(first(raw_parser.patent(tag)))) set_if_value(ref, "country", first(raw_parser.patent(tag)).get('country')) set_if_value(ref, "source", node_text(first(raw_parser.source(tag)))) set_if_value(ref, "elocation-id", node_text(first(raw_parser.elocation_id(tag)))) if raw_parser.element_citation(tag): copy_attribute(first(raw_parser.element_citation(tag)).attrs, "publication-type", ref) if "publication-type" not in ref and raw_parser.mixed_citations(tag): copy_attribute(first(raw_parser.mixed_citations(tag)).attrs, "publication-type", ref) # authors person_group = raw_parser.person_group(tag) authors = [] for group in person_group: author_type = None if "person-group-type" in group.attrs: author_type = group["person-group-type"] # Read name or collab tag in the order they are listed for name_or_collab_tag in extract_nodes(group, ["name", "string-name", "collab"]): author = {} # Shared tag attribute set_if_value(author, "group-type", author_type) # name tag attributes if name_or_collab_tag.name in ["name", "string-name"]: set_if_value(author, "surname", node_text(first(raw_parser.surname(name_or_collab_tag)))) set_if_value(author, "given-names", node_text(first(raw_parser.given_names(name_or_collab_tag)))) set_if_value(author, "suffix", node_text(first(raw_parser.suffix(name_or_collab_tag)))) # collab tag attribute if name_or_collab_tag.name == "collab": set_if_value(author, "collab", node_contents_str(name_or_collab_tag)) if len(author) > 0: authors.append(author) # etal for the person group if first(raw_parser.etal(group)): author = {} author['etal'] = True set_if_value(author, "group-type", author_type) authors.append(author) # Check for collab tag not wrapped in a person-group for backwards compatibility if len(person_group) == 0: collab_tags = raw_parser.collab(tag) for collab_tag in collab_tags: author = {} set_if_value(author, "group-type", "author") set_if_value(author, "collab", node_contents_str(collab_tag)) if len(author) > 0: authors.append(author) if len(authors) > 0: ref['authors'] = authors set_if_value(ref, "volume", node_text(first(raw_parser.volume(tag)))) set_if_value(ref, "issue", node_text(first(raw_parser.issue(tag)))) set_if_value(ref, "fpage", node_text(first(raw_parser.fpage(tag)))) set_if_value(ref, "lpage", node_text(first(raw_parser.lpage(tag)))) set_if_value(ref, "collab", node_text(first(raw_parser.collab(tag)))) set_if_value(ref, "publisher_loc", node_text(first(raw_parser.publisher_loc(tag)))) set_if_value(ref, "publisher_name", node_text(first(raw_parser.publisher_name(tag)))) set_if_value(ref, "edition", node_contents_str(first(raw_parser.edition(tag)))) set_if_value(ref, "version", node_contents_str(first(raw_parser.version(tag)))) set_if_value(ref, "chapter-title", node_contents_str(first(raw_parser.chapter_title(tag)))) set_if_value(ref, "comment", node_text(first(raw_parser.comment(tag)))) set_if_value(ref, "data-title", node_contents_str(first(raw_parser.data_title(tag)))) set_if_value(ref, "conf-name", node_text(first(raw_parser.conf_name(tag)))) # If not empty, add position value, append, then increment the position counter if(len(ref) > 0): ref['article_doi'] = article_doi ref['position'] = position refs.append(ref) position += 1 return refs
[ "def", "refs", "(", "soup", ")", ":", "tags", "=", "raw_parser", ".", "ref_list", "(", "soup", ")", "refs", "=", "[", "]", "position", "=", "1", "article_doi", "=", "doi", "(", "soup", ")", "for", "tag", "in", "tags", ":", "ref", "=", "{", "}", "ref", "[", "'ref'", "]", "=", "ref_text", "(", "tag", ")", "# ref_id", "copy_attribute", "(", "tag", ".", "attrs", ",", "\"id\"", ",", "ref", ")", "# article_title", "if", "raw_parser", ".", "article_title", "(", "tag", ")", ":", "ref", "[", "'article_title'", "]", "=", "node_text", "(", "raw_parser", ".", "article_title", "(", "tag", ")", ")", "ref", "[", "'full_article_title'", "]", "=", "node_contents_str", "(", "raw_parser", ".", "article_title", "(", "tag", ")", ")", "if", "raw_parser", ".", "pub_id", "(", "tag", ",", "\"pmid\"", ")", ":", "ref", "[", "'pmid'", "]", "=", "node_contents_str", "(", "first", "(", "raw_parser", ".", "pub_id", "(", "tag", ",", "\"pmid\"", ")", ")", ")", "if", "raw_parser", ".", "pub_id", "(", "tag", ",", "\"isbn\"", ")", ":", "ref", "[", "'isbn'", "]", "=", "node_contents_str", "(", "first", "(", "raw_parser", ".", "pub_id", "(", "tag", ",", "\"isbn\"", ")", ")", ")", "if", "raw_parser", ".", "pub_id", "(", "tag", ",", "\"doi\"", ")", ":", "ref", "[", "'reference_id'", "]", "=", "node_contents_str", "(", "first", "(", "raw_parser", ".", "pub_id", "(", "tag", ",", "\"doi\"", ")", ")", ")", "ref", "[", "'doi'", "]", "=", "doi_uri_to_doi", "(", "node_contents_str", "(", "first", "(", "raw_parser", ".", "pub_id", "(", "tag", ",", "\"doi\"", ")", ")", ")", ")", "uri_tag", "=", "None", "if", "raw_parser", ".", "ext_link", "(", "tag", ",", "\"uri\"", ")", ":", "uri_tag", "=", "first", "(", "raw_parser", ".", "ext_link", "(", "tag", ",", "\"uri\"", ")", ")", "elif", "raw_parser", ".", "uri", "(", "tag", ")", ":", "uri_tag", "=", "first", "(", "raw_parser", ".", "uri", "(", "tag", ")", ")", "if", "uri_tag", ":", "set_if_value", "(", "ref", ",", "\"uri\"", ",", "uri_tag", ".", "get", "(", "'xlink:href'", ")", ")", "set_if_value", "(", "ref", ",", "\"uri_text\"", ",", "node_contents_str", "(", "uri_tag", ")", ")", "# look for a pub-id tag if no uri yet", "if", "not", "ref", ".", "get", "(", "'uri'", ")", "and", "raw_parser", ".", "pub_id", "(", "tag", ",", "\"archive\"", ")", ":", "pub_id_tag", "=", "first", "(", "raw_parser", ".", "pub_id", "(", "tag", ",", "pub_id_type", "=", "\"archive\"", ")", ")", "set_if_value", "(", "ref", ",", "\"uri\"", ",", "pub_id_tag", ".", "get", "(", "'xlink:href'", ")", ")", "# accession, could be in either of two tags", "set_if_value", "(", "ref", ",", "\"accession\"", ",", "node_contents_str", "(", "first", "(", "raw_parser", ".", "object_id", "(", "tag", ",", "\"art-access-id\"", ")", ")", ")", ")", "if", "not", "ref", ".", "get", "(", "'accession'", ")", ":", "set_if_value", "(", "ref", ",", "\"accession\"", ",", "node_contents_str", "(", "first", "(", "raw_parser", ".", "pub_id", "(", "tag", ",", "pub_id_type", "=", "\"accession\"", ")", ")", ")", ")", "if", "not", "ref", ".", "get", "(", "'accession'", ")", ":", "set_if_value", "(", "ref", ",", "\"accession\"", ",", "node_contents_str", "(", "first", "(", "raw_parser", ".", "pub_id", "(", "tag", ",", "pub_id_type", "=", "\"archive\"", ")", ")", ")", ")", "if", "(", "raw_parser", ".", "year", "(", "tag", ")", ")", ":", "set_if_value", "(", "ref", ",", "\"year\"", ",", "node_text", "(", "raw_parser", ".", "year", "(", "tag", ")", ")", ")", "set_if_value", "(", "ref", ",", "\"year-iso-8601-date\"", ",", "raw_parser", ".", "year", "(", "tag", ")", ".", "get", "(", "'iso-8601-date'", ")", ")", "if", "(", "raw_parser", ".", "date_in_citation", "(", "tag", ")", ")", ":", "set_if_value", "(", "ref", ",", "\"date-in-citation\"", ",", "node_text", "(", "first", "(", "raw_parser", ".", "date_in_citation", "(", "tag", ")", ")", ")", ")", "set_if_value", "(", "ref", ",", "\"iso-8601-date\"", ",", "first", "(", "raw_parser", ".", "date_in_citation", "(", "tag", ")", ")", ".", "get", "(", "'iso-8601-date'", ")", ")", "if", "(", "raw_parser", ".", "patent", "(", "tag", ")", ")", ":", "set_if_value", "(", "ref", ",", "\"patent\"", ",", "node_text", "(", "first", "(", "raw_parser", ".", "patent", "(", "tag", ")", ")", ")", ")", "set_if_value", "(", "ref", ",", "\"country\"", ",", "first", "(", "raw_parser", ".", "patent", "(", "tag", ")", ")", ".", "get", "(", "'country'", ")", ")", "set_if_value", "(", "ref", ",", "\"source\"", ",", "node_text", "(", "first", "(", "raw_parser", ".", "source", "(", "tag", ")", ")", ")", ")", "set_if_value", "(", "ref", ",", "\"elocation-id\"", ",", "node_text", "(", "first", "(", "raw_parser", ".", "elocation_id", "(", "tag", ")", ")", ")", ")", "if", "raw_parser", ".", "element_citation", "(", "tag", ")", ":", "copy_attribute", "(", "first", "(", "raw_parser", ".", "element_citation", "(", "tag", ")", ")", ".", "attrs", ",", "\"publication-type\"", ",", "ref", ")", "if", "\"publication-type\"", "not", "in", "ref", "and", "raw_parser", ".", "mixed_citations", "(", "tag", ")", ":", "copy_attribute", "(", "first", "(", "raw_parser", ".", "mixed_citations", "(", "tag", ")", ")", ".", "attrs", ",", "\"publication-type\"", ",", "ref", ")", "# authors", "person_group", "=", "raw_parser", ".", "person_group", "(", "tag", ")", "authors", "=", "[", "]", "for", "group", "in", "person_group", ":", "author_type", "=", "None", "if", "\"person-group-type\"", "in", "group", ".", "attrs", ":", "author_type", "=", "group", "[", "\"person-group-type\"", "]", "# Read name or collab tag in the order they are listed", "for", "name_or_collab_tag", "in", "extract_nodes", "(", "group", ",", "[", "\"name\"", ",", "\"string-name\"", ",", "\"collab\"", "]", ")", ":", "author", "=", "{", "}", "# Shared tag attribute", "set_if_value", "(", "author", ",", "\"group-type\"", ",", "author_type", ")", "# name tag attributes", "if", "name_or_collab_tag", ".", "name", "in", "[", "\"name\"", ",", "\"string-name\"", "]", ":", "set_if_value", "(", "author", ",", "\"surname\"", ",", "node_text", "(", "first", "(", "raw_parser", ".", "surname", "(", "name_or_collab_tag", ")", ")", ")", ")", "set_if_value", "(", "author", ",", "\"given-names\"", ",", "node_text", "(", "first", "(", "raw_parser", ".", "given_names", "(", "name_or_collab_tag", ")", ")", ")", ")", "set_if_value", "(", "author", ",", "\"suffix\"", ",", "node_text", "(", "first", "(", "raw_parser", ".", "suffix", "(", "name_or_collab_tag", ")", ")", ")", ")", "# collab tag attribute", "if", "name_or_collab_tag", ".", "name", "==", "\"collab\"", ":", "set_if_value", "(", "author", ",", "\"collab\"", ",", "node_contents_str", "(", "name_or_collab_tag", ")", ")", "if", "len", "(", "author", ")", ">", "0", ":", "authors", ".", "append", "(", "author", ")", "# etal for the person group", "if", "first", "(", "raw_parser", ".", "etal", "(", "group", ")", ")", ":", "author", "=", "{", "}", "author", "[", "'etal'", "]", "=", "True", "set_if_value", "(", "author", ",", "\"group-type\"", ",", "author_type", ")", "authors", ".", "append", "(", "author", ")", "# Check for collab tag not wrapped in a person-group for backwards compatibility", "if", "len", "(", "person_group", ")", "==", "0", ":", "collab_tags", "=", "raw_parser", ".", "collab", "(", "tag", ")", "for", "collab_tag", "in", "collab_tags", ":", "author", "=", "{", "}", "set_if_value", "(", "author", ",", "\"group-type\"", ",", "\"author\"", ")", "set_if_value", "(", "author", ",", "\"collab\"", ",", "node_contents_str", "(", "collab_tag", ")", ")", "if", "len", "(", "author", ")", ">", "0", ":", "authors", ".", "append", "(", "author", ")", "if", "len", "(", "authors", ")", ">", "0", ":", "ref", "[", "'authors'", "]", "=", "authors", "set_if_value", "(", "ref", ",", "\"volume\"", ",", "node_text", "(", "first", "(", "raw_parser", ".", "volume", "(", "tag", ")", ")", ")", ")", "set_if_value", "(", "ref", ",", "\"issue\"", ",", "node_text", "(", "first", "(", "raw_parser", ".", "issue", "(", "tag", ")", ")", ")", ")", "set_if_value", "(", "ref", ",", "\"fpage\"", ",", "node_text", "(", "first", "(", "raw_parser", ".", "fpage", "(", "tag", ")", ")", ")", ")", "set_if_value", "(", "ref", ",", "\"lpage\"", ",", "node_text", "(", "first", "(", "raw_parser", ".", "lpage", "(", "tag", ")", ")", ")", ")", "set_if_value", "(", "ref", ",", "\"collab\"", ",", "node_text", "(", "first", "(", "raw_parser", ".", "collab", "(", "tag", ")", ")", ")", ")", "set_if_value", "(", "ref", ",", "\"publisher_loc\"", ",", "node_text", "(", "first", "(", "raw_parser", ".", "publisher_loc", "(", "tag", ")", ")", ")", ")", "set_if_value", "(", "ref", ",", "\"publisher_name\"", ",", "node_text", "(", "first", "(", "raw_parser", ".", "publisher_name", "(", "tag", ")", ")", ")", ")", "set_if_value", "(", "ref", ",", "\"edition\"", ",", "node_contents_str", "(", "first", "(", "raw_parser", ".", "edition", "(", "tag", ")", ")", ")", ")", "set_if_value", "(", "ref", ",", "\"version\"", ",", "node_contents_str", "(", "first", "(", "raw_parser", ".", "version", "(", "tag", ")", ")", ")", ")", "set_if_value", "(", "ref", ",", "\"chapter-title\"", ",", "node_contents_str", "(", "first", "(", "raw_parser", ".", "chapter_title", "(", "tag", ")", ")", ")", ")", "set_if_value", "(", "ref", ",", "\"comment\"", ",", "node_text", "(", "first", "(", "raw_parser", ".", "comment", "(", "tag", ")", ")", ")", ")", "set_if_value", "(", "ref", ",", "\"data-title\"", ",", "node_contents_str", "(", "first", "(", "raw_parser", ".", "data_title", "(", "tag", ")", ")", ")", ")", "set_if_value", "(", "ref", ",", "\"conf-name\"", ",", "node_text", "(", "first", "(", "raw_parser", ".", "conf_name", "(", "tag", ")", ")", ")", ")", "# If not empty, add position value, append, then increment the position counter", "if", "(", "len", "(", "ref", ")", ">", "0", ")", ":", "ref", "[", "'article_doi'", "]", "=", "article_doi", "ref", "[", "'position'", "]", "=", "position", "refs", ".", "append", "(", "ref", ")", "position", "+=", "1", "return", "refs" ]
Find and return all the references
[ "Find", "and", "return", "all", "the", "references" ]
train
https://github.com/elifesciences/elife-tools/blob/4b9e38cbe485c61a4ed7cbd8970c6b318334fd86/elifetools/parseJATS.py#L1340-L1484
elifesciences/elife-tools
elifetools/parseJATS.py
extract_component_doi
def extract_component_doi(tag, nodenames): """ Used to get component DOI from a tag and confirm it is actually for that tag and it is not for one of its children in the list of nodenames """ component_doi = None if(tag.name == "sub-article"): component_doi = doi_uri_to_doi(node_text(first(raw_parser.article_id(tag, pub_id_type= "doi")))) else: object_id_tag = first(raw_parser.object_id(tag, pub_id_type= "doi")) # Tweak: if it is media and has no object_id_tag then it is not a "component" if tag.name == "media" and not object_id_tag: component_doi = None else: # Check the object id is for this tag and not one of its children # This happens for example when boxed text has a child figure, # the boxed text does not have a DOI, the figure does have one if object_id_tag and first_parent(object_id_tag, nodenames).name == tag.name: component_doi = doi_uri_to_doi(node_text(object_id_tag)) return component_doi
python
def extract_component_doi(tag, nodenames): """ Used to get component DOI from a tag and confirm it is actually for that tag and it is not for one of its children in the list of nodenames """ component_doi = None if(tag.name == "sub-article"): component_doi = doi_uri_to_doi(node_text(first(raw_parser.article_id(tag, pub_id_type= "doi")))) else: object_id_tag = first(raw_parser.object_id(tag, pub_id_type= "doi")) # Tweak: if it is media and has no object_id_tag then it is not a "component" if tag.name == "media" and not object_id_tag: component_doi = None else: # Check the object id is for this tag and not one of its children # This happens for example when boxed text has a child figure, # the boxed text does not have a DOI, the figure does have one if object_id_tag and first_parent(object_id_tag, nodenames).name == tag.name: component_doi = doi_uri_to_doi(node_text(object_id_tag)) return component_doi
[ "def", "extract_component_doi", "(", "tag", ",", "nodenames", ")", ":", "component_doi", "=", "None", "if", "(", "tag", ".", "name", "==", "\"sub-article\"", ")", ":", "component_doi", "=", "doi_uri_to_doi", "(", "node_text", "(", "first", "(", "raw_parser", ".", "article_id", "(", "tag", ",", "pub_id_type", "=", "\"doi\"", ")", ")", ")", ")", "else", ":", "object_id_tag", "=", "first", "(", "raw_parser", ".", "object_id", "(", "tag", ",", "pub_id_type", "=", "\"doi\"", ")", ")", "# Tweak: if it is media and has no object_id_tag then it is not a \"component\"", "if", "tag", ".", "name", "==", "\"media\"", "and", "not", "object_id_tag", ":", "component_doi", "=", "None", "else", ":", "# Check the object id is for this tag and not one of its children", "# This happens for example when boxed text has a child figure,", "# the boxed text does not have a DOI, the figure does have one", "if", "object_id_tag", "and", "first_parent", "(", "object_id_tag", ",", "nodenames", ")", ".", "name", "==", "tag", ".", "name", ":", "component_doi", "=", "doi_uri_to_doi", "(", "node_text", "(", "object_id_tag", ")", ")", "return", "component_doi" ]
Used to get component DOI from a tag and confirm it is actually for that tag and it is not for one of its children in the list of nodenames
[ "Used", "to", "get", "component", "DOI", "from", "a", "tag", "and", "confirm", "it", "is", "actually", "for", "that", "tag", "and", "it", "is", "not", "for", "one", "of", "its", "children", "in", "the", "list", "of", "nodenames" ]
train
https://github.com/elifesciences/elife-tools/blob/4b9e38cbe485c61a4ed7cbd8970c6b318334fd86/elifetools/parseJATS.py#L1486-L1507
elifesciences/elife-tools
elifetools/parseJATS.py
components
def components(soup): """ Find the components, i.e. those parts that would be assigned a unique component DOI, such as figures, tables, etc. - position is in what order the tag appears in the entire set of nodes - ordinal is in what order it is for all the tags of its own type """ components = [] nodenames = ["abstract", "fig", "table-wrap", "media", "chem-struct-wrap", "sub-article", "supplementary-material", "boxed-text", "app"] # Count node order overall position = 1 position_by_type = {} for nodename in nodenames: position_by_type[nodename] = 1 article_doi = doi(soup) # Find all tags for all component_types, allows the order # in which they are found to be preserved component_tags = extract_nodes(soup, nodenames) for tag in component_tags: component = OrderedDict() # Component type is the tag's name ctype = tag.name # First find the doi if present component_doi = extract_component_doi(tag, nodenames) if component_doi is None: continue else: component['doi'] = doi_uri_to_doi(component_doi) component['doi_url'] = doi_to_doi_uri(component['doi']) copy_attribute(tag.attrs, 'id', component) if(ctype == "sub-article"): title_tag = raw_parser.article_title(tag) elif(ctype == "boxed-text"): title_tag = title_tag_inspected(tag, tag.name, direct_sibling_only=True) if not title_tag: title_tag = title_tag_inspected(tag, "caption", "boxed-text") # New kitchen sink has boxed-text inside app tags, tag the sec tag title if so # but do not take it if there is a caption if (not title_tag and tag.parent and tag.parent.name in ["sec", "app"] and not caption_tag_inspected(tag, tag.name)): title_tag = title_tag_inspected(tag.parent, tag.parent.name, direct_sibling_only=True) else: title_tag = raw_parser.title(tag) if title_tag: component['title'] = node_text(title_tag) component['full_title'] = node_contents_str(title_tag) if ctype == "boxed-text": label_tag = label_tag_inspected(tag, "boxed-text") else: label_tag = raw_parser.label(tag) if label_tag: component['label'] = node_text(label_tag) component['full_label'] = node_contents_str(label_tag) if raw_parser.caption(tag): first_paragraph = first(paragraphs(raw_parser.caption(tag))) # fix a problem with the new kitchen sink of caption within caption tag if first_paragraph: nested_caption = raw_parser.caption(first_paragraph) if nested_caption: nested_paragraphs = paragraphs(nested_caption) first_paragraph = first(nested_paragraphs) or first_paragraph if first_paragraph and not starts_with_doi(first_paragraph): # Remove the supplementary tag from the paragraph if present if raw_parser.supplementary_material(first_paragraph): first_paragraph = remove_tag_from_tag(first_paragraph, 'supplementary-material') if node_text(first_paragraph).strip(): component['caption'] = node_text(first_paragraph) component['full_caption'] = node_contents_str(first_paragraph) if raw_parser.permissions(tag): component['permissions'] = [] for permissions_tag in raw_parser.permissions(tag): permissions_item = {} if raw_parser.copyright_statement(permissions_tag): permissions_item['copyright_statement'] = \ node_text(raw_parser.copyright_statement(permissions_tag)) if raw_parser.copyright_year(permissions_tag): permissions_item['copyright_year'] = \ node_text(raw_parser.copyright_year(permissions_tag)) if raw_parser.copyright_holder(permissions_tag): permissions_item['copyright_holder'] = \ node_text(raw_parser.copyright_holder(permissions_tag)) if raw_parser.licence_p(permissions_tag): permissions_item['license'] = \ node_text(first(raw_parser.licence_p(permissions_tag))) permissions_item['full_license'] = \ node_contents_str(first(raw_parser.licence_p(permissions_tag))) component['permissions'].append(permissions_item) if raw_parser.contributors(tag): component['contributors'] = [] for contributor_tag in raw_parser.contributors(tag): component['contributors'].append(format_contributor(contributor_tag, soup)) # There are only some parent tags we care about for components # and only check two levels of parentage parent_nodenames = ["sub-article", "fig-group", "fig", "boxed-text", "table-wrap", "app", "media"] parent_tag = first_parent(tag, parent_nodenames) if parent_tag: # For fig-group we actually want the first fig of the fig-group as the parent acting_parent_tag = component_acting_parent_tag(parent_tag, tag) # Only counts if the acting parent tag has a DOI if (acting_parent_tag and \ extract_component_doi(acting_parent_tag, parent_nodenames) is not None): component['parent_type'] = acting_parent_tag.name component['parent_ordinal'] = tag_ordinal(acting_parent_tag) component['parent_sibling_ordinal'] = tag_details_sibling_ordinal(acting_parent_tag) component['parent_asset'] = tag_details_asset(acting_parent_tag) # Look for parent parent, if available parent_parent_tag = first_parent(parent_tag, parent_nodenames) if parent_parent_tag: acting_parent_tag = component_acting_parent_tag(parent_parent_tag, parent_tag) if (acting_parent_tag and \ extract_component_doi(acting_parent_tag, parent_nodenames) is not None): component['parent_parent_type'] = acting_parent_tag.name component['parent_parent_ordinal'] = tag_ordinal(acting_parent_tag) component['parent_parent_sibling_ordinal'] = tag_details_sibling_ordinal(acting_parent_tag) component['parent_parent_asset'] = tag_details_asset(acting_parent_tag) content = "" for p_tag in extract_nodes(tag, "p"): if content != "": # Add a space before each new paragraph for now content = content + " " content = content + node_text(p_tag) if(content != ""): component['content'] = content # mime type media_tag = None if(ctype == "media"): media_tag = tag elif(ctype == "supplementary-material"): media_tag = first(raw_parser.media(tag)) if media_tag: component['mimetype'] = media_tag.get("mimetype") component['mime-subtype'] = media_tag.get("mime-subtype") if(len(component) > 0): component['article_doi'] = article_doi component['type'] = ctype component['position'] = position # Ordinal is based on all tags of the same type even if they have no DOI component['ordinal'] = tag_ordinal(tag) component['sibling_ordinal'] = tag_details_sibling_ordinal(tag) component['asset'] = tag_details_asset(tag) #component['ordinal'] = position_by_type[ctype] components.append(component) position += 1 position_by_type[ctype] += 1 return components
python
def components(soup): """ Find the components, i.e. those parts that would be assigned a unique component DOI, such as figures, tables, etc. - position is in what order the tag appears in the entire set of nodes - ordinal is in what order it is for all the tags of its own type """ components = [] nodenames = ["abstract", "fig", "table-wrap", "media", "chem-struct-wrap", "sub-article", "supplementary-material", "boxed-text", "app"] # Count node order overall position = 1 position_by_type = {} for nodename in nodenames: position_by_type[nodename] = 1 article_doi = doi(soup) # Find all tags for all component_types, allows the order # in which they are found to be preserved component_tags = extract_nodes(soup, nodenames) for tag in component_tags: component = OrderedDict() # Component type is the tag's name ctype = tag.name # First find the doi if present component_doi = extract_component_doi(tag, nodenames) if component_doi is None: continue else: component['doi'] = doi_uri_to_doi(component_doi) component['doi_url'] = doi_to_doi_uri(component['doi']) copy_attribute(tag.attrs, 'id', component) if(ctype == "sub-article"): title_tag = raw_parser.article_title(tag) elif(ctype == "boxed-text"): title_tag = title_tag_inspected(tag, tag.name, direct_sibling_only=True) if not title_tag: title_tag = title_tag_inspected(tag, "caption", "boxed-text") # New kitchen sink has boxed-text inside app tags, tag the sec tag title if so # but do not take it if there is a caption if (not title_tag and tag.parent and tag.parent.name in ["sec", "app"] and not caption_tag_inspected(tag, tag.name)): title_tag = title_tag_inspected(tag.parent, tag.parent.name, direct_sibling_only=True) else: title_tag = raw_parser.title(tag) if title_tag: component['title'] = node_text(title_tag) component['full_title'] = node_contents_str(title_tag) if ctype == "boxed-text": label_tag = label_tag_inspected(tag, "boxed-text") else: label_tag = raw_parser.label(tag) if label_tag: component['label'] = node_text(label_tag) component['full_label'] = node_contents_str(label_tag) if raw_parser.caption(tag): first_paragraph = first(paragraphs(raw_parser.caption(tag))) # fix a problem with the new kitchen sink of caption within caption tag if first_paragraph: nested_caption = raw_parser.caption(first_paragraph) if nested_caption: nested_paragraphs = paragraphs(nested_caption) first_paragraph = first(nested_paragraphs) or first_paragraph if first_paragraph and not starts_with_doi(first_paragraph): # Remove the supplementary tag from the paragraph if present if raw_parser.supplementary_material(first_paragraph): first_paragraph = remove_tag_from_tag(first_paragraph, 'supplementary-material') if node_text(first_paragraph).strip(): component['caption'] = node_text(first_paragraph) component['full_caption'] = node_contents_str(first_paragraph) if raw_parser.permissions(tag): component['permissions'] = [] for permissions_tag in raw_parser.permissions(tag): permissions_item = {} if raw_parser.copyright_statement(permissions_tag): permissions_item['copyright_statement'] = \ node_text(raw_parser.copyright_statement(permissions_tag)) if raw_parser.copyright_year(permissions_tag): permissions_item['copyright_year'] = \ node_text(raw_parser.copyright_year(permissions_tag)) if raw_parser.copyright_holder(permissions_tag): permissions_item['copyright_holder'] = \ node_text(raw_parser.copyright_holder(permissions_tag)) if raw_parser.licence_p(permissions_tag): permissions_item['license'] = \ node_text(first(raw_parser.licence_p(permissions_tag))) permissions_item['full_license'] = \ node_contents_str(first(raw_parser.licence_p(permissions_tag))) component['permissions'].append(permissions_item) if raw_parser.contributors(tag): component['contributors'] = [] for contributor_tag in raw_parser.contributors(tag): component['contributors'].append(format_contributor(contributor_tag, soup)) # There are only some parent tags we care about for components # and only check two levels of parentage parent_nodenames = ["sub-article", "fig-group", "fig", "boxed-text", "table-wrap", "app", "media"] parent_tag = first_parent(tag, parent_nodenames) if parent_tag: # For fig-group we actually want the first fig of the fig-group as the parent acting_parent_tag = component_acting_parent_tag(parent_tag, tag) # Only counts if the acting parent tag has a DOI if (acting_parent_tag and \ extract_component_doi(acting_parent_tag, parent_nodenames) is not None): component['parent_type'] = acting_parent_tag.name component['parent_ordinal'] = tag_ordinal(acting_parent_tag) component['parent_sibling_ordinal'] = tag_details_sibling_ordinal(acting_parent_tag) component['parent_asset'] = tag_details_asset(acting_parent_tag) # Look for parent parent, if available parent_parent_tag = first_parent(parent_tag, parent_nodenames) if parent_parent_tag: acting_parent_tag = component_acting_parent_tag(parent_parent_tag, parent_tag) if (acting_parent_tag and \ extract_component_doi(acting_parent_tag, parent_nodenames) is not None): component['parent_parent_type'] = acting_parent_tag.name component['parent_parent_ordinal'] = tag_ordinal(acting_parent_tag) component['parent_parent_sibling_ordinal'] = tag_details_sibling_ordinal(acting_parent_tag) component['parent_parent_asset'] = tag_details_asset(acting_parent_tag) content = "" for p_tag in extract_nodes(tag, "p"): if content != "": # Add a space before each new paragraph for now content = content + " " content = content + node_text(p_tag) if(content != ""): component['content'] = content # mime type media_tag = None if(ctype == "media"): media_tag = tag elif(ctype == "supplementary-material"): media_tag = first(raw_parser.media(tag)) if media_tag: component['mimetype'] = media_tag.get("mimetype") component['mime-subtype'] = media_tag.get("mime-subtype") if(len(component) > 0): component['article_doi'] = article_doi component['type'] = ctype component['position'] = position # Ordinal is based on all tags of the same type even if they have no DOI component['ordinal'] = tag_ordinal(tag) component['sibling_ordinal'] = tag_details_sibling_ordinal(tag) component['asset'] = tag_details_asset(tag) #component['ordinal'] = position_by_type[ctype] components.append(component) position += 1 position_by_type[ctype] += 1 return components
[ "def", "components", "(", "soup", ")", ":", "components", "=", "[", "]", "nodenames", "=", "[", "\"abstract\"", ",", "\"fig\"", ",", "\"table-wrap\"", ",", "\"media\"", ",", "\"chem-struct-wrap\"", ",", "\"sub-article\"", ",", "\"supplementary-material\"", ",", "\"boxed-text\"", ",", "\"app\"", "]", "# Count node order overall", "position", "=", "1", "position_by_type", "=", "{", "}", "for", "nodename", "in", "nodenames", ":", "position_by_type", "[", "nodename", "]", "=", "1", "article_doi", "=", "doi", "(", "soup", ")", "# Find all tags for all component_types, allows the order", "# in which they are found to be preserved", "component_tags", "=", "extract_nodes", "(", "soup", ",", "nodenames", ")", "for", "tag", "in", "component_tags", ":", "component", "=", "OrderedDict", "(", ")", "# Component type is the tag's name", "ctype", "=", "tag", ".", "name", "# First find the doi if present", "component_doi", "=", "extract_component_doi", "(", "tag", ",", "nodenames", ")", "if", "component_doi", "is", "None", ":", "continue", "else", ":", "component", "[", "'doi'", "]", "=", "doi_uri_to_doi", "(", "component_doi", ")", "component", "[", "'doi_url'", "]", "=", "doi_to_doi_uri", "(", "component", "[", "'doi'", "]", ")", "copy_attribute", "(", "tag", ".", "attrs", ",", "'id'", ",", "component", ")", "if", "(", "ctype", "==", "\"sub-article\"", ")", ":", "title_tag", "=", "raw_parser", ".", "article_title", "(", "tag", ")", "elif", "(", "ctype", "==", "\"boxed-text\"", ")", ":", "title_tag", "=", "title_tag_inspected", "(", "tag", ",", "tag", ".", "name", ",", "direct_sibling_only", "=", "True", ")", "if", "not", "title_tag", ":", "title_tag", "=", "title_tag_inspected", "(", "tag", ",", "\"caption\"", ",", "\"boxed-text\"", ")", "# New kitchen sink has boxed-text inside app tags, tag the sec tag title if so", "# but do not take it if there is a caption", "if", "(", "not", "title_tag", "and", "tag", ".", "parent", "and", "tag", ".", "parent", ".", "name", "in", "[", "\"sec\"", ",", "\"app\"", "]", "and", "not", "caption_tag_inspected", "(", "tag", ",", "tag", ".", "name", ")", ")", ":", "title_tag", "=", "title_tag_inspected", "(", "tag", ".", "parent", ",", "tag", ".", "parent", ".", "name", ",", "direct_sibling_only", "=", "True", ")", "else", ":", "title_tag", "=", "raw_parser", ".", "title", "(", "tag", ")", "if", "title_tag", ":", "component", "[", "'title'", "]", "=", "node_text", "(", "title_tag", ")", "component", "[", "'full_title'", "]", "=", "node_contents_str", "(", "title_tag", ")", "if", "ctype", "==", "\"boxed-text\"", ":", "label_tag", "=", "label_tag_inspected", "(", "tag", ",", "\"boxed-text\"", ")", "else", ":", "label_tag", "=", "raw_parser", ".", "label", "(", "tag", ")", "if", "label_tag", ":", "component", "[", "'label'", "]", "=", "node_text", "(", "label_tag", ")", "component", "[", "'full_label'", "]", "=", "node_contents_str", "(", "label_tag", ")", "if", "raw_parser", ".", "caption", "(", "tag", ")", ":", "first_paragraph", "=", "first", "(", "paragraphs", "(", "raw_parser", ".", "caption", "(", "tag", ")", ")", ")", "# fix a problem with the new kitchen sink of caption within caption tag", "if", "first_paragraph", ":", "nested_caption", "=", "raw_parser", ".", "caption", "(", "first_paragraph", ")", "if", "nested_caption", ":", "nested_paragraphs", "=", "paragraphs", "(", "nested_caption", ")", "first_paragraph", "=", "first", "(", "nested_paragraphs", ")", "or", "first_paragraph", "if", "first_paragraph", "and", "not", "starts_with_doi", "(", "first_paragraph", ")", ":", "# Remove the supplementary tag from the paragraph if present", "if", "raw_parser", ".", "supplementary_material", "(", "first_paragraph", ")", ":", "first_paragraph", "=", "remove_tag_from_tag", "(", "first_paragraph", ",", "'supplementary-material'", ")", "if", "node_text", "(", "first_paragraph", ")", ".", "strip", "(", ")", ":", "component", "[", "'caption'", "]", "=", "node_text", "(", "first_paragraph", ")", "component", "[", "'full_caption'", "]", "=", "node_contents_str", "(", "first_paragraph", ")", "if", "raw_parser", ".", "permissions", "(", "tag", ")", ":", "component", "[", "'permissions'", "]", "=", "[", "]", "for", "permissions_tag", "in", "raw_parser", ".", "permissions", "(", "tag", ")", ":", "permissions_item", "=", "{", "}", "if", "raw_parser", ".", "copyright_statement", "(", "permissions_tag", ")", ":", "permissions_item", "[", "'copyright_statement'", "]", "=", "node_text", "(", "raw_parser", ".", "copyright_statement", "(", "permissions_tag", ")", ")", "if", "raw_parser", ".", "copyright_year", "(", "permissions_tag", ")", ":", "permissions_item", "[", "'copyright_year'", "]", "=", "node_text", "(", "raw_parser", ".", "copyright_year", "(", "permissions_tag", ")", ")", "if", "raw_parser", ".", "copyright_holder", "(", "permissions_tag", ")", ":", "permissions_item", "[", "'copyright_holder'", "]", "=", "node_text", "(", "raw_parser", ".", "copyright_holder", "(", "permissions_tag", ")", ")", "if", "raw_parser", ".", "licence_p", "(", "permissions_tag", ")", ":", "permissions_item", "[", "'license'", "]", "=", "node_text", "(", "first", "(", "raw_parser", ".", "licence_p", "(", "permissions_tag", ")", ")", ")", "permissions_item", "[", "'full_license'", "]", "=", "node_contents_str", "(", "first", "(", "raw_parser", ".", "licence_p", "(", "permissions_tag", ")", ")", ")", "component", "[", "'permissions'", "]", ".", "append", "(", "permissions_item", ")", "if", "raw_parser", ".", "contributors", "(", "tag", ")", ":", "component", "[", "'contributors'", "]", "=", "[", "]", "for", "contributor_tag", "in", "raw_parser", ".", "contributors", "(", "tag", ")", ":", "component", "[", "'contributors'", "]", ".", "append", "(", "format_contributor", "(", "contributor_tag", ",", "soup", ")", ")", "# There are only some parent tags we care about for components", "# and only check two levels of parentage", "parent_nodenames", "=", "[", "\"sub-article\"", ",", "\"fig-group\"", ",", "\"fig\"", ",", "\"boxed-text\"", ",", "\"table-wrap\"", ",", "\"app\"", ",", "\"media\"", "]", "parent_tag", "=", "first_parent", "(", "tag", ",", "parent_nodenames", ")", "if", "parent_tag", ":", "# For fig-group we actually want the first fig of the fig-group as the parent", "acting_parent_tag", "=", "component_acting_parent_tag", "(", "parent_tag", ",", "tag", ")", "# Only counts if the acting parent tag has a DOI", "if", "(", "acting_parent_tag", "and", "extract_component_doi", "(", "acting_parent_tag", ",", "parent_nodenames", ")", "is", "not", "None", ")", ":", "component", "[", "'parent_type'", "]", "=", "acting_parent_tag", ".", "name", "component", "[", "'parent_ordinal'", "]", "=", "tag_ordinal", "(", "acting_parent_tag", ")", "component", "[", "'parent_sibling_ordinal'", "]", "=", "tag_details_sibling_ordinal", "(", "acting_parent_tag", ")", "component", "[", "'parent_asset'", "]", "=", "tag_details_asset", "(", "acting_parent_tag", ")", "# Look for parent parent, if available", "parent_parent_tag", "=", "first_parent", "(", "parent_tag", ",", "parent_nodenames", ")", "if", "parent_parent_tag", ":", "acting_parent_tag", "=", "component_acting_parent_tag", "(", "parent_parent_tag", ",", "parent_tag", ")", "if", "(", "acting_parent_tag", "and", "extract_component_doi", "(", "acting_parent_tag", ",", "parent_nodenames", ")", "is", "not", "None", ")", ":", "component", "[", "'parent_parent_type'", "]", "=", "acting_parent_tag", ".", "name", "component", "[", "'parent_parent_ordinal'", "]", "=", "tag_ordinal", "(", "acting_parent_tag", ")", "component", "[", "'parent_parent_sibling_ordinal'", "]", "=", "tag_details_sibling_ordinal", "(", "acting_parent_tag", ")", "component", "[", "'parent_parent_asset'", "]", "=", "tag_details_asset", "(", "acting_parent_tag", ")", "content", "=", "\"\"", "for", "p_tag", "in", "extract_nodes", "(", "tag", ",", "\"p\"", ")", ":", "if", "content", "!=", "\"\"", ":", "# Add a space before each new paragraph for now", "content", "=", "content", "+", "\" \"", "content", "=", "content", "+", "node_text", "(", "p_tag", ")", "if", "(", "content", "!=", "\"\"", ")", ":", "component", "[", "'content'", "]", "=", "content", "# mime type", "media_tag", "=", "None", "if", "(", "ctype", "==", "\"media\"", ")", ":", "media_tag", "=", "tag", "elif", "(", "ctype", "==", "\"supplementary-material\"", ")", ":", "media_tag", "=", "first", "(", "raw_parser", ".", "media", "(", "tag", ")", ")", "if", "media_tag", ":", "component", "[", "'mimetype'", "]", "=", "media_tag", ".", "get", "(", "\"mimetype\"", ")", "component", "[", "'mime-subtype'", "]", "=", "media_tag", ".", "get", "(", "\"mime-subtype\"", ")", "if", "(", "len", "(", "component", ")", ">", "0", ")", ":", "component", "[", "'article_doi'", "]", "=", "article_doi", "component", "[", "'type'", "]", "=", "ctype", "component", "[", "'position'", "]", "=", "position", "# Ordinal is based on all tags of the same type even if they have no DOI", "component", "[", "'ordinal'", "]", "=", "tag_ordinal", "(", "tag", ")", "component", "[", "'sibling_ordinal'", "]", "=", "tag_details_sibling_ordinal", "(", "tag", ")", "component", "[", "'asset'", "]", "=", "tag_details_asset", "(", "tag", ")", "#component['ordinal'] = position_by_type[ctype]", "components", ".", "append", "(", "component", ")", "position", "+=", "1", "position_by_type", "[", "ctype", "]", "+=", "1", "return", "components" ]
Find the components, i.e. those parts that would be assigned a unique component DOI, such as figures, tables, etc. - position is in what order the tag appears in the entire set of nodes - ordinal is in what order it is for all the tags of its own type
[ "Find", "the", "components", "i", ".", "e", ".", "those", "parts", "that", "would", "be", "assigned", "a", "unique", "component", "DOI", "such", "as", "figures", "tables", "etc", ".", "-", "position", "is", "in", "what", "order", "the", "tag", "appears", "in", "the", "entire", "set", "of", "nodes", "-", "ordinal", "is", "in", "what", "order", "it", "is", "for", "all", "the", "tags", "of", "its", "own", "type" ]
train
https://github.com/elifesciences/elife-tools/blob/4b9e38cbe485c61a4ed7cbd8970c6b318334fd86/elifetools/parseJATS.py#L1509-L1695
elifesciences/elife-tools
elifetools/parseJATS.py
correspondence
def correspondence(soup): """ Find the corresp tags included in author-notes for primary correspondence """ correspondence = [] author_notes_nodes = raw_parser.author_notes(soup) if author_notes_nodes: corresp_nodes = raw_parser.corresp(author_notes_nodes) for tag in corresp_nodes: correspondence.append(tag.text) return correspondence
python
def correspondence(soup): """ Find the corresp tags included in author-notes for primary correspondence """ correspondence = [] author_notes_nodes = raw_parser.author_notes(soup) if author_notes_nodes: corresp_nodes = raw_parser.corresp(author_notes_nodes) for tag in corresp_nodes: correspondence.append(tag.text) return correspondence
[ "def", "correspondence", "(", "soup", ")", ":", "correspondence", "=", "[", "]", "author_notes_nodes", "=", "raw_parser", ".", "author_notes", "(", "soup", ")", "if", "author_notes_nodes", ":", "corresp_nodes", "=", "raw_parser", ".", "corresp", "(", "author_notes_nodes", ")", "for", "tag", "in", "corresp_nodes", ":", "correspondence", ".", "append", "(", "tag", ".", "text", ")", "return", "correspondence" ]
Find the corresp tags included in author-notes for primary correspondence
[ "Find", "the", "corresp", "tags", "included", "in", "author", "-", "notes", "for", "primary", "correspondence" ]
train
https://github.com/elifesciences/elife-tools/blob/4b9e38cbe485c61a4ed7cbd8970c6b318334fd86/elifetools/parseJATS.py#L1699-L1713
elifesciences/elife-tools
elifetools/parseJATS.py
author_notes
def author_notes(soup): """ Find the fn tags included in author-notes """ author_notes = [] author_notes_section = raw_parser.author_notes(soup) if author_notes_section: fn_nodes = raw_parser.fn(author_notes_section) for tag in fn_nodes: if 'fn-type' in tag.attrs: if(tag['fn-type'] != 'present-address'): author_notes.append(node_text(tag)) return author_notes
python
def author_notes(soup): """ Find the fn tags included in author-notes """ author_notes = [] author_notes_section = raw_parser.author_notes(soup) if author_notes_section: fn_nodes = raw_parser.fn(author_notes_section) for tag in fn_nodes: if 'fn-type' in tag.attrs: if(tag['fn-type'] != 'present-address'): author_notes.append(node_text(tag)) return author_notes
[ "def", "author_notes", "(", "soup", ")", ":", "author_notes", "=", "[", "]", "author_notes_section", "=", "raw_parser", ".", "author_notes", "(", "soup", ")", "if", "author_notes_section", ":", "fn_nodes", "=", "raw_parser", ".", "fn", "(", "author_notes_section", ")", "for", "tag", "in", "fn_nodes", ":", "if", "'fn-type'", "in", "tag", ".", "attrs", ":", "if", "(", "tag", "[", "'fn-type'", "]", "!=", "'present-address'", ")", ":", "author_notes", ".", "append", "(", "node_text", "(", "tag", ")", ")", "return", "author_notes" ]
Find the fn tags included in author-notes
[ "Find", "the", "fn", "tags", "included", "in", "author", "-", "notes" ]
train
https://github.com/elifesciences/elife-tools/blob/4b9e38cbe485c61a4ed7cbd8970c6b318334fd86/elifetools/parseJATS.py#L1740-L1754
elifesciences/elife-tools
elifetools/parseJATS.py
full_author_notes
def full_author_notes(soup, fntype_filter=None): """ Find the fn tags included in author-notes """ notes = [] author_notes_section = raw_parser.author_notes(soup) if author_notes_section: fn_nodes = raw_parser.fn(author_notes_section) notes = footnotes(fn_nodes, fntype_filter) return notes
python
def full_author_notes(soup, fntype_filter=None): """ Find the fn tags included in author-notes """ notes = [] author_notes_section = raw_parser.author_notes(soup) if author_notes_section: fn_nodes = raw_parser.fn(author_notes_section) notes = footnotes(fn_nodes, fntype_filter) return notes
[ "def", "full_author_notes", "(", "soup", ",", "fntype_filter", "=", "None", ")", ":", "notes", "=", "[", "]", "author_notes_section", "=", "raw_parser", ".", "author_notes", "(", "soup", ")", "if", "author_notes_section", ":", "fn_nodes", "=", "raw_parser", ".", "fn", "(", "author_notes_section", ")", "notes", "=", "footnotes", "(", "fn_nodes", ",", "fntype_filter", ")", "return", "notes" ]
Find the fn tags included in author-notes
[ "Find", "the", "fn", "tags", "included", "in", "author", "-", "notes" ]
train
https://github.com/elifesciences/elife-tools/blob/4b9e38cbe485c61a4ed7cbd8970c6b318334fd86/elifetools/parseJATS.py#L1757-L1768
elifesciences/elife-tools
elifetools/parseJATS.py
competing_interests
def competing_interests(soup, fntype_filter): """ Find the fn tags included in the competing interest """ competing_interests_section = extract_nodes(soup, "fn-group", attr="content-type", value="competing-interest") if not competing_interests_section: return None fn = extract_nodes(first(competing_interests_section), "fn") interests = footnotes(fn, fntype_filter) return interests
python
def competing_interests(soup, fntype_filter): """ Find the fn tags included in the competing interest """ competing_interests_section = extract_nodes(soup, "fn-group", attr="content-type", value="competing-interest") if not competing_interests_section: return None fn = extract_nodes(first(competing_interests_section), "fn") interests = footnotes(fn, fntype_filter) return interests
[ "def", "competing_interests", "(", "soup", ",", "fntype_filter", ")", ":", "competing_interests_section", "=", "extract_nodes", "(", "soup", ",", "\"fn-group\"", ",", "attr", "=", "\"content-type\"", ",", "value", "=", "\"competing-interest\"", ")", "if", "not", "competing_interests_section", ":", "return", "None", "fn", "=", "extract_nodes", "(", "first", "(", "competing_interests_section", ")", ",", "\"fn\"", ")", "interests", "=", "footnotes", "(", "fn", ",", "fntype_filter", ")", "return", "interests" ]
Find the fn tags included in the competing interest
[ "Find", "the", "fn", "tags", "included", "in", "the", "competing", "interest" ]
train
https://github.com/elifesciences/elife-tools/blob/4b9e38cbe485c61a4ed7cbd8970c6b318334fd86/elifetools/parseJATS.py#L1771-L1782
elifesciences/elife-tools
elifetools/parseJATS.py
author_contributions
def author_contributions(soup, fntype_filter): """ Find the fn tags included in the competing interest """ author_contributions_section = extract_nodes(soup, "fn-group", attr="content-type", value="author-contribution") if not author_contributions_section: return None fn = extract_nodes(first(author_contributions_section), "fn") cons = footnotes(fn, fntype_filter) return cons
python
def author_contributions(soup, fntype_filter): """ Find the fn tags included in the competing interest """ author_contributions_section = extract_nodes(soup, "fn-group", attr="content-type", value="author-contribution") if not author_contributions_section: return None fn = extract_nodes(first(author_contributions_section), "fn") cons = footnotes(fn, fntype_filter) return cons
[ "def", "author_contributions", "(", "soup", ",", "fntype_filter", ")", ":", "author_contributions_section", "=", "extract_nodes", "(", "soup", ",", "\"fn-group\"", ",", "attr", "=", "\"content-type\"", ",", "value", "=", "\"author-contribution\"", ")", "if", "not", "author_contributions_section", ":", "return", "None", "fn", "=", "extract_nodes", "(", "first", "(", "author_contributions_section", ")", ",", "\"fn\"", ")", "cons", "=", "footnotes", "(", "fn", ",", "fntype_filter", ")", "return", "cons" ]
Find the fn tags included in the competing interest
[ "Find", "the", "fn", "tags", "included", "in", "the", "competing", "interest" ]
train
https://github.com/elifesciences/elife-tools/blob/4b9e38cbe485c61a4ed7cbd8970c6b318334fd86/elifetools/parseJATS.py#L1806-L1817
elifesciences/elife-tools
elifetools/parseJATS.py
full_award_groups
def full_award_groups(soup): """ Find the award-group items and return a list of details """ award_groups = [] funding_group_section = extract_nodes(soup, "funding-group") # counter for auto generated id values, if required generated_id_counter = 1 for fg in funding_group_section: award_group_tags = extract_nodes(fg, "award-group") for ag in award_group_tags: if 'id' in ag.attrs: ref = ag['id'] else: # hack: generate and increment an id value none is available ref = "award-group-{id}".format(id=generated_id_counter) generated_id_counter += 1 award_group = {} award_group_id = award_group_award_id(ag) if award_group_id is not None: award_group['award-id'] = first(award_group_id) funding_sources = full_award_group_funding_source(ag) source = first(funding_sources) if source is not None: copy_attribute(source, 'institution', award_group) copy_attribute(source, 'institution-id', award_group, 'id') copy_attribute(source, 'institution-id-type', award_group, destination_key='id-type') award_group_by_ref = {} award_group_by_ref[ref] = award_group award_groups.append(award_group_by_ref) return award_groups
python
def full_award_groups(soup): """ Find the award-group items and return a list of details """ award_groups = [] funding_group_section = extract_nodes(soup, "funding-group") # counter for auto generated id values, if required generated_id_counter = 1 for fg in funding_group_section: award_group_tags = extract_nodes(fg, "award-group") for ag in award_group_tags: if 'id' in ag.attrs: ref = ag['id'] else: # hack: generate and increment an id value none is available ref = "award-group-{id}".format(id=generated_id_counter) generated_id_counter += 1 award_group = {} award_group_id = award_group_award_id(ag) if award_group_id is not None: award_group['award-id'] = first(award_group_id) funding_sources = full_award_group_funding_source(ag) source = first(funding_sources) if source is not None: copy_attribute(source, 'institution', award_group) copy_attribute(source, 'institution-id', award_group, 'id') copy_attribute(source, 'institution-id-type', award_group, destination_key='id-type') award_group_by_ref = {} award_group_by_ref[ref] = award_group award_groups.append(award_group_by_ref) return award_groups
[ "def", "full_award_groups", "(", "soup", ")", ":", "award_groups", "=", "[", "]", "funding_group_section", "=", "extract_nodes", "(", "soup", ",", "\"funding-group\"", ")", "# counter for auto generated id values, if required", "generated_id_counter", "=", "1", "for", "fg", "in", "funding_group_section", ":", "award_group_tags", "=", "extract_nodes", "(", "fg", ",", "\"award-group\"", ")", "for", "ag", "in", "award_group_tags", ":", "if", "'id'", "in", "ag", ".", "attrs", ":", "ref", "=", "ag", "[", "'id'", "]", "else", ":", "# hack: generate and increment an id value none is available", "ref", "=", "\"award-group-{id}\"", ".", "format", "(", "id", "=", "generated_id_counter", ")", "generated_id_counter", "+=", "1", "award_group", "=", "{", "}", "award_group_id", "=", "award_group_award_id", "(", "ag", ")", "if", "award_group_id", "is", "not", "None", ":", "award_group", "[", "'award-id'", "]", "=", "first", "(", "award_group_id", ")", "funding_sources", "=", "full_award_group_funding_source", "(", "ag", ")", "source", "=", "first", "(", "funding_sources", ")", "if", "source", "is", "not", "None", ":", "copy_attribute", "(", "source", ",", "'institution'", ",", "award_group", ")", "copy_attribute", "(", "source", ",", "'institution-id'", ",", "award_group", ",", "'id'", ")", "copy_attribute", "(", "source", ",", "'institution-id-type'", ",", "award_group", ",", "destination_key", "=", "'id-type'", ")", "award_group_by_ref", "=", "{", "}", "award_group_by_ref", "[", "ref", "]", "=", "award_group", "award_groups", ".", "append", "(", "award_group_by_ref", ")", "return", "award_groups" ]
Find the award-group items and return a list of details
[ "Find", "the", "award", "-", "group", "items", "and", "return", "a", "list", "of", "details" ]
train
https://github.com/elifesciences/elife-tools/blob/4b9e38cbe485c61a4ed7cbd8970c6b318334fd86/elifetools/parseJATS.py#L1836-L1871
elifesciences/elife-tools
elifetools/parseJATS.py
award_groups
def award_groups(soup): """ Find the award-group items and return a list of details """ award_groups = [] funding_group_section = extract_nodes(soup, "funding-group") for fg in funding_group_section: award_group_tags = extract_nodes(fg, "award-group") for ag in award_group_tags: award_group = {} award_group['funding_source'] = award_group_funding_source(ag) award_group['recipient'] = award_group_principal_award_recipient(ag) award_group['award_id'] = award_group_award_id(ag) award_groups.append(award_group) return award_groups
python
def award_groups(soup): """ Find the award-group items and return a list of details """ award_groups = [] funding_group_section = extract_nodes(soup, "funding-group") for fg in funding_group_section: award_group_tags = extract_nodes(fg, "award-group") for ag in award_group_tags: award_group = {} award_group['funding_source'] = award_group_funding_source(ag) award_group['recipient'] = award_group_principal_award_recipient(ag) award_group['award_id'] = award_group_award_id(ag) award_groups.append(award_group) return award_groups
[ "def", "award_groups", "(", "soup", ")", ":", "award_groups", "=", "[", "]", "funding_group_section", "=", "extract_nodes", "(", "soup", ",", "\"funding-group\"", ")", "for", "fg", "in", "funding_group_section", ":", "award_group_tags", "=", "extract_nodes", "(", "fg", ",", "\"award-group\"", ")", "for", "ag", "in", "award_group_tags", ":", "award_group", "=", "{", "}", "award_group", "[", "'funding_source'", "]", "=", "award_group_funding_source", "(", "ag", ")", "award_group", "[", "'recipient'", "]", "=", "award_group_principal_award_recipient", "(", "ag", ")", "award_group", "[", "'award_id'", "]", "=", "award_group_award_id", "(", "ag", ")", "award_groups", ".", "append", "(", "award_group", ")", "return", "award_groups" ]
Find the award-group items and return a list of details
[ "Find", "the", "award", "-", "group", "items", "and", "return", "a", "list", "of", "details" ]
train
https://github.com/elifesciences/elife-tools/blob/4b9e38cbe485c61a4ed7cbd8970c6b318334fd86/elifetools/parseJATS.py#L1875-L1896
elifesciences/elife-tools
elifetools/parseJATS.py
award_group_funding_source
def award_group_funding_source(tag): """ Given a funding group element Find the award group funding sources, one for each item found in the get_funding_group section """ award_group_funding_source = [] funding_source_tags = extract_nodes(tag, "funding-source") for t in funding_source_tags: award_group_funding_source.append(t.text) return award_group_funding_source
python
def award_group_funding_source(tag): """ Given a funding group element Find the award group funding sources, one for each item found in the get_funding_group section """ award_group_funding_source = [] funding_source_tags = extract_nodes(tag, "funding-source") for t in funding_source_tags: award_group_funding_source.append(t.text) return award_group_funding_source
[ "def", "award_group_funding_source", "(", "tag", ")", ":", "award_group_funding_source", "=", "[", "]", "funding_source_tags", "=", "extract_nodes", "(", "tag", ",", "\"funding-source\"", ")", "for", "t", "in", "funding_source_tags", ":", "award_group_funding_source", ".", "append", "(", "t", ".", "text", ")", "return", "award_group_funding_source" ]
Given a funding group element Find the award group funding sources, one for each item found in the get_funding_group section
[ "Given", "a", "funding", "group", "element", "Find", "the", "award", "group", "funding", "sources", "one", "for", "each", "item", "found", "in", "the", "get_funding_group", "section" ]
train
https://github.com/elifesciences/elife-tools/blob/4b9e38cbe485c61a4ed7cbd8970c6b318334fd86/elifetools/parseJATS.py#L1900-L1910
elifesciences/elife-tools
elifetools/parseJATS.py
full_award_group_funding_source
def full_award_group_funding_source(tag): """ Given a funding group element Find the award group funding sources, one for each item found in the get_funding_group section """ award_group_funding_sources = [] funding_source_nodes = extract_nodes(tag, "funding-source") for funding_source_node in funding_source_nodes: award_group_funding_source = {} institution_nodes = extract_nodes(funding_source_node, 'institution') institution_node = first(institution_nodes) if institution_node: award_group_funding_source['institution'] = node_text(institution_node) if 'content-type' in institution_node.attrs: award_group_funding_source['institution-type'] = institution_node['content-type'] institution_id_nodes = extract_nodes(funding_source_node, 'institution-id') institution_id_node = first(institution_id_nodes) if institution_id_node: award_group_funding_source['institution-id'] = node_text(institution_id_node) if 'institution-id-type' in institution_id_node.attrs: award_group_funding_source['institution-id-type'] = institution_id_node['institution-id-type'] award_group_funding_sources.append(award_group_funding_source) return award_group_funding_sources
python
def full_award_group_funding_source(tag): """ Given a funding group element Find the award group funding sources, one for each item found in the get_funding_group section """ award_group_funding_sources = [] funding_source_nodes = extract_nodes(tag, "funding-source") for funding_source_node in funding_source_nodes: award_group_funding_source = {} institution_nodes = extract_nodes(funding_source_node, 'institution') institution_node = first(institution_nodes) if institution_node: award_group_funding_source['institution'] = node_text(institution_node) if 'content-type' in institution_node.attrs: award_group_funding_source['institution-type'] = institution_node['content-type'] institution_id_nodes = extract_nodes(funding_source_node, 'institution-id') institution_id_node = first(institution_id_nodes) if institution_id_node: award_group_funding_source['institution-id'] = node_text(institution_id_node) if 'institution-id-type' in institution_id_node.attrs: award_group_funding_source['institution-id-type'] = institution_id_node['institution-id-type'] award_group_funding_sources.append(award_group_funding_source) return award_group_funding_sources
[ "def", "full_award_group_funding_source", "(", "tag", ")", ":", "award_group_funding_sources", "=", "[", "]", "funding_source_nodes", "=", "extract_nodes", "(", "tag", ",", "\"funding-source\"", ")", "for", "funding_source_node", "in", "funding_source_nodes", ":", "award_group_funding_source", "=", "{", "}", "institution_nodes", "=", "extract_nodes", "(", "funding_source_node", ",", "'institution'", ")", "institution_node", "=", "first", "(", "institution_nodes", ")", "if", "institution_node", ":", "award_group_funding_source", "[", "'institution'", "]", "=", "node_text", "(", "institution_node", ")", "if", "'content-type'", "in", "institution_node", ".", "attrs", ":", "award_group_funding_source", "[", "'institution-type'", "]", "=", "institution_node", "[", "'content-type'", "]", "institution_id_nodes", "=", "extract_nodes", "(", "funding_source_node", ",", "'institution-id'", ")", "institution_id_node", "=", "first", "(", "institution_id_nodes", ")", "if", "institution_id_node", ":", "award_group_funding_source", "[", "'institution-id'", "]", "=", "node_text", "(", "institution_id_node", ")", "if", "'institution-id-type'", "in", "institution_id_node", ".", "attrs", ":", "award_group_funding_source", "[", "'institution-id-type'", "]", "=", "institution_id_node", "[", "'institution-id-type'", "]", "award_group_funding_sources", ".", "append", "(", "award_group_funding_source", ")", "return", "award_group_funding_sources" ]
Given a funding group element Find the award group funding sources, one for each item found in the get_funding_group section
[ "Given", "a", "funding", "group", "element", "Find", "the", "award", "group", "funding", "sources", "one", "for", "each", "item", "found", "in", "the", "get_funding_group", "section" ]
train
https://github.com/elifesciences/elife-tools/blob/4b9e38cbe485c61a4ed7cbd8970c6b318334fd86/elifetools/parseJATS.py#L1913-L1942
elifesciences/elife-tools
elifetools/parseJATS.py
award_group_award_id
def award_group_award_id(tag): """ Find the award group award id, one for each item found in the get_funding_group section """ award_group_award_id = [] award_id_tags = extract_nodes(tag, "award-id") for t in award_id_tags: award_group_award_id.append(t.text) return award_group_award_id
python
def award_group_award_id(tag): """ Find the award group award id, one for each item found in the get_funding_group section """ award_group_award_id = [] award_id_tags = extract_nodes(tag, "award-id") for t in award_id_tags: award_group_award_id.append(t.text) return award_group_award_id
[ "def", "award_group_award_id", "(", "tag", ")", ":", "award_group_award_id", "=", "[", "]", "award_id_tags", "=", "extract_nodes", "(", "tag", ",", "\"award-id\"", ")", "for", "t", "in", "award_id_tags", ":", "award_group_award_id", ".", "append", "(", "t", ".", "text", ")", "return", "award_group_award_id" ]
Find the award group award id, one for each item found in the get_funding_group section
[ "Find", "the", "award", "group", "award", "id", "one", "for", "each", "item", "found", "in", "the", "get_funding_group", "section" ]
train
https://github.com/elifesciences/elife-tools/blob/4b9e38cbe485c61a4ed7cbd8970c6b318334fd86/elifetools/parseJATS.py#L1946-L1955
elifesciences/elife-tools
elifetools/parseJATS.py
award_group_principal_award_recipient
def award_group_principal_award_recipient(tag): """ Find the award group principal award recipient, one for each item found in the get_funding_group section """ award_group_principal_award_recipient = [] principal_award_recipients = extract_nodes(tag, "principal-award-recipient") for t in principal_award_recipients: principal_award_recipient_text = "" institution = node_text(first(extract_nodes(t, "institution"))) surname = node_text(first(extract_nodes(t, "surname"))) given_names = node_text(first(extract_nodes(t, "given-names"))) string_name = node_text(first(raw_parser.string_name(t))) # Concatenate name and institution values if found # while filtering out excess whitespace if(given_names): principal_award_recipient_text += given_names if(principal_award_recipient_text != ""): principal_award_recipient_text += " " if(surname): principal_award_recipient_text += surname if(institution): principal_award_recipient_text += institution if(string_name): principal_award_recipient_text += string_name award_group_principal_award_recipient.append(principal_award_recipient_text) return award_group_principal_award_recipient
python
def award_group_principal_award_recipient(tag): """ Find the award group principal award recipient, one for each item found in the get_funding_group section """ award_group_principal_award_recipient = [] principal_award_recipients = extract_nodes(tag, "principal-award-recipient") for t in principal_award_recipients: principal_award_recipient_text = "" institution = node_text(first(extract_nodes(t, "institution"))) surname = node_text(first(extract_nodes(t, "surname"))) given_names = node_text(first(extract_nodes(t, "given-names"))) string_name = node_text(first(raw_parser.string_name(t))) # Concatenate name and institution values if found # while filtering out excess whitespace if(given_names): principal_award_recipient_text += given_names if(principal_award_recipient_text != ""): principal_award_recipient_text += " " if(surname): principal_award_recipient_text += surname if(institution): principal_award_recipient_text += institution if(string_name): principal_award_recipient_text += string_name award_group_principal_award_recipient.append(principal_award_recipient_text) return award_group_principal_award_recipient
[ "def", "award_group_principal_award_recipient", "(", "tag", ")", ":", "award_group_principal_award_recipient", "=", "[", "]", "principal_award_recipients", "=", "extract_nodes", "(", "tag", ",", "\"principal-award-recipient\"", ")", "for", "t", "in", "principal_award_recipients", ":", "principal_award_recipient_text", "=", "\"\"", "institution", "=", "node_text", "(", "first", "(", "extract_nodes", "(", "t", ",", "\"institution\"", ")", ")", ")", "surname", "=", "node_text", "(", "first", "(", "extract_nodes", "(", "t", ",", "\"surname\"", ")", ")", ")", "given_names", "=", "node_text", "(", "first", "(", "extract_nodes", "(", "t", ",", "\"given-names\"", ")", ")", ")", "string_name", "=", "node_text", "(", "first", "(", "raw_parser", ".", "string_name", "(", "t", ")", ")", ")", "# Concatenate name and institution values if found", "# while filtering out excess whitespace", "if", "(", "given_names", ")", ":", "principal_award_recipient_text", "+=", "given_names", "if", "(", "principal_award_recipient_text", "!=", "\"\"", ")", ":", "principal_award_recipient_text", "+=", "\" \"", "if", "(", "surname", ")", ":", "principal_award_recipient_text", "+=", "surname", "if", "(", "institution", ")", ":", "principal_award_recipient_text", "+=", "institution", "if", "(", "string_name", ")", ":", "principal_award_recipient_text", "+=", "string_name", "award_group_principal_award_recipient", ".", "append", "(", "principal_award_recipient_text", ")", "return", "award_group_principal_award_recipient" ]
Find the award group principal award recipient, one for each item found in the get_funding_group section
[ "Find", "the", "award", "group", "principal", "award", "recipient", "one", "for", "each", "item", "found", "in", "the", "get_funding_group", "section" ]
train
https://github.com/elifesciences/elife-tools/blob/4b9e38cbe485c61a4ed7cbd8970c6b318334fd86/elifetools/parseJATS.py#L1958-L1987
elifesciences/elife-tools
elifetools/parseJATS.py
object_id_doi
def object_id_doi(tag, parent_tag_name=None): """DOI in an object-id tag found inside the tag""" doi = None object_id = None object_ids = raw_parser.object_id(tag, "doi") if object_ids: object_id = first([id_ for id_ in object_ids]) if parent_tag_name and object_id and object_id.parent.name != parent_tag_name: object_id = None if object_id: doi = node_contents_str(object_id) return doi
python
def object_id_doi(tag, parent_tag_name=None): """DOI in an object-id tag found inside the tag""" doi = None object_id = None object_ids = raw_parser.object_id(tag, "doi") if object_ids: object_id = first([id_ for id_ in object_ids]) if parent_tag_name and object_id and object_id.parent.name != parent_tag_name: object_id = None if object_id: doi = node_contents_str(object_id) return doi
[ "def", "object_id_doi", "(", "tag", ",", "parent_tag_name", "=", "None", ")", ":", "doi", "=", "None", "object_id", "=", "None", "object_ids", "=", "raw_parser", ".", "object_id", "(", "tag", ",", "\"doi\"", ")", "if", "object_ids", ":", "object_id", "=", "first", "(", "[", "id_", "for", "id_", "in", "object_ids", "]", ")", "if", "parent_tag_name", "and", "object_id", "and", "object_id", ".", "parent", ".", "name", "!=", "parent_tag_name", ":", "object_id", "=", "None", "if", "object_id", ":", "doi", "=", "node_contents_str", "(", "object_id", ")", "return", "doi" ]
DOI in an object-id tag found inside the tag
[ "DOI", "in", "an", "object", "-", "id", "tag", "found", "inside", "the", "tag" ]
train
https://github.com/elifesciences/elife-tools/blob/4b9e38cbe485c61a4ed7cbd8970c6b318334fd86/elifetools/parseJATS.py#L1989-L2000
elifesciences/elife-tools
elifetools/parseJATS.py
title_tag_inspected
def title_tag_inspected(tag, parent_tag_name=None, p_parent_tag_name=None, direct_sibling_only=False): """Extract the title tag and sometimes inspect its parents""" title_tag = None if direct_sibling_only is True: for sibling_tag in tag: if sibling_tag.name and sibling_tag.name == "title": title_tag = sibling_tag else: title_tag = raw_parser.title(tag) if parent_tag_name and p_parent_tag_name: if (title_tag and title_tag.parent.name and title_tag.parent.parent.name and title_tag.parent.name == parent_tag_name and title_tag.parent.parent.name == p_parent_tag_name): pass else: title_tag = None return title_tag
python
def title_tag_inspected(tag, parent_tag_name=None, p_parent_tag_name=None, direct_sibling_only=False): """Extract the title tag and sometimes inspect its parents""" title_tag = None if direct_sibling_only is True: for sibling_tag in tag: if sibling_tag.name and sibling_tag.name == "title": title_tag = sibling_tag else: title_tag = raw_parser.title(tag) if parent_tag_name and p_parent_tag_name: if (title_tag and title_tag.parent.name and title_tag.parent.parent.name and title_tag.parent.name == parent_tag_name and title_tag.parent.parent.name == p_parent_tag_name): pass else: title_tag = None return title_tag
[ "def", "title_tag_inspected", "(", "tag", ",", "parent_tag_name", "=", "None", ",", "p_parent_tag_name", "=", "None", ",", "direct_sibling_only", "=", "False", ")", ":", "title_tag", "=", "None", "if", "direct_sibling_only", "is", "True", ":", "for", "sibling_tag", "in", "tag", ":", "if", "sibling_tag", ".", "name", "and", "sibling_tag", ".", "name", "==", "\"title\"", ":", "title_tag", "=", "sibling_tag", "else", ":", "title_tag", "=", "raw_parser", ".", "title", "(", "tag", ")", "if", "parent_tag_name", "and", "p_parent_tag_name", ":", "if", "(", "title_tag", "and", "title_tag", ".", "parent", ".", "name", "and", "title_tag", ".", "parent", ".", "parent", ".", "name", "and", "title_tag", ".", "parent", ".", "name", "==", "parent_tag_name", "and", "title_tag", ".", "parent", ".", "parent", ".", "name", "==", "p_parent_tag_name", ")", ":", "pass", "else", ":", "title_tag", "=", "None", "return", "title_tag" ]
Extract the title tag and sometimes inspect its parents
[ "Extract", "the", "title", "tag", "and", "sometimes", "inspect", "its", "parents" ]
train
https://github.com/elifesciences/elife-tools/blob/4b9e38cbe485c61a4ed7cbd8970c6b318334fd86/elifetools/parseJATS.py#L2002-L2021
elifesciences/elife-tools
elifetools/parseJATS.py
title_text
def title_text(tag, parent_tag_name=None, p_parent_tag_name=None, direct_sibling_only=False): """Extract the text of a title tag and sometimes inspect its parents""" title = None title_tag = title_tag_inspected(tag, parent_tag_name, p_parent_tag_name, direct_sibling_only) if title_tag: title = node_contents_str(title_tag) return title
python
def title_text(tag, parent_tag_name=None, p_parent_tag_name=None, direct_sibling_only=False): """Extract the text of a title tag and sometimes inspect its parents""" title = None title_tag = title_tag_inspected(tag, parent_tag_name, p_parent_tag_name, direct_sibling_only) if title_tag: title = node_contents_str(title_tag) return title
[ "def", "title_text", "(", "tag", ",", "parent_tag_name", "=", "None", ",", "p_parent_tag_name", "=", "None", ",", "direct_sibling_only", "=", "False", ")", ":", "title", "=", "None", "title_tag", "=", "title_tag_inspected", "(", "tag", ",", "parent_tag_name", ",", "p_parent_tag_name", ",", "direct_sibling_only", ")", "if", "title_tag", ":", "title", "=", "node_contents_str", "(", "title_tag", ")", "return", "title" ]
Extract the text of a title tag and sometimes inspect its parents
[ "Extract", "the", "text", "of", "a", "title", "tag", "and", "sometimes", "inspect", "its", "parents" ]
train
https://github.com/elifesciences/elife-tools/blob/4b9e38cbe485c61a4ed7cbd8970c6b318334fd86/elifetools/parseJATS.py#L2023-L2031
elifesciences/elife-tools
elifetools/parseJATS.py
boxed_text_to_image_block
def boxed_text_to_image_block(tag): "covert boxed-text to an image block containing an inline-graphic" tag_block = OrderedDict() image_content = body_block_image_content(first(raw_parser.inline_graphic(tag))) tag_block["type"] = "image" set_if_value(tag_block, "doi", doi_uri_to_doi(object_id_doi(tag, tag.name))) set_if_value(tag_block, "id", tag.get("id")) set_if_value(tag_block, "image", image_content) # render paragraphs into a caption p_tags = raw_parser.paragraph(tag) caption_content = [] for p_tag in p_tags: if not raw_parser.inline_graphic(p_tag): caption_content.append(body_block_content(p_tag)) set_if_value(tag_block, "caption", caption_content) return tag_block
python
def boxed_text_to_image_block(tag): "covert boxed-text to an image block containing an inline-graphic" tag_block = OrderedDict() image_content = body_block_image_content(first(raw_parser.inline_graphic(tag))) tag_block["type"] = "image" set_if_value(tag_block, "doi", doi_uri_to_doi(object_id_doi(tag, tag.name))) set_if_value(tag_block, "id", tag.get("id")) set_if_value(tag_block, "image", image_content) # render paragraphs into a caption p_tags = raw_parser.paragraph(tag) caption_content = [] for p_tag in p_tags: if not raw_parser.inline_graphic(p_tag): caption_content.append(body_block_content(p_tag)) set_if_value(tag_block, "caption", caption_content) return tag_block
[ "def", "boxed_text_to_image_block", "(", "tag", ")", ":", "tag_block", "=", "OrderedDict", "(", ")", "image_content", "=", "body_block_image_content", "(", "first", "(", "raw_parser", ".", "inline_graphic", "(", "tag", ")", ")", ")", "tag_block", "[", "\"type\"", "]", "=", "\"image\"", "set_if_value", "(", "tag_block", ",", "\"doi\"", ",", "doi_uri_to_doi", "(", "object_id_doi", "(", "tag", ",", "tag", ".", "name", ")", ")", ")", "set_if_value", "(", "tag_block", ",", "\"id\"", ",", "tag", ".", "get", "(", "\"id\"", ")", ")", "set_if_value", "(", "tag_block", ",", "\"image\"", ",", "image_content", ")", "# render paragraphs into a caption", "p_tags", "=", "raw_parser", ".", "paragraph", "(", "tag", ")", "caption_content", "=", "[", "]", "for", "p_tag", "in", "p_tags", ":", "if", "not", "raw_parser", ".", "inline_graphic", "(", "p_tag", ")", ":", "caption_content", ".", "append", "(", "body_block_content", "(", "p_tag", ")", ")", "set_if_value", "(", "tag_block", ",", "\"caption\"", ",", "caption_content", ")", "return", "tag_block" ]
covert boxed-text to an image block containing an inline-graphic
[ "covert", "boxed", "-", "text", "to", "an", "image", "block", "containing", "an", "inline", "-", "graphic" ]
train
https://github.com/elifesciences/elife-tools/blob/4b9e38cbe485c61a4ed7cbd8970c6b318334fd86/elifetools/parseJATS.py#L2077-L2092
elifesciences/elife-tools
elifetools/parseJATS.py
body_json
def body_json(soup, base_url=None): """ Get body json and then alter it with section wrapping and removing boxed-text """ body_content = body(soup, remove_key_info_box=True, base_url=base_url) # Wrap in a section if the first block is not a section if (body_content and len(body_content) > 0 and "type" in body_content[0] and body_content[0]["type"] != "section"): # Wrap this one new_body_section = OrderedDict() new_body_section["type"] = "section" new_body_section["id"] = "s0" new_body_section["title"] = "Main text" new_body_section["content"] = [] for body_block in body_content: new_body_section["content"].append(body_block) new_body = [] new_body.append(new_body_section) body_content = new_body body_content_rewritten = elifetools.json_rewrite.rewrite_json("body_json", soup, body_content) return body_content_rewritten
python
def body_json(soup, base_url=None): """ Get body json and then alter it with section wrapping and removing boxed-text """ body_content = body(soup, remove_key_info_box=True, base_url=base_url) # Wrap in a section if the first block is not a section if (body_content and len(body_content) > 0 and "type" in body_content[0] and body_content[0]["type"] != "section"): # Wrap this one new_body_section = OrderedDict() new_body_section["type"] = "section" new_body_section["id"] = "s0" new_body_section["title"] = "Main text" new_body_section["content"] = [] for body_block in body_content: new_body_section["content"].append(body_block) new_body = [] new_body.append(new_body_section) body_content = new_body body_content_rewritten = elifetools.json_rewrite.rewrite_json("body_json", soup, body_content) return body_content_rewritten
[ "def", "body_json", "(", "soup", ",", "base_url", "=", "None", ")", ":", "body_content", "=", "body", "(", "soup", ",", "remove_key_info_box", "=", "True", ",", "base_url", "=", "base_url", ")", "# Wrap in a section if the first block is not a section", "if", "(", "body_content", "and", "len", "(", "body_content", ")", ">", "0", "and", "\"type\"", "in", "body_content", "[", "0", "]", "and", "body_content", "[", "0", "]", "[", "\"type\"", "]", "!=", "\"section\"", ")", ":", "# Wrap this one", "new_body_section", "=", "OrderedDict", "(", ")", "new_body_section", "[", "\"type\"", "]", "=", "\"section\"", "new_body_section", "[", "\"id\"", "]", "=", "\"s0\"", "new_body_section", "[", "\"title\"", "]", "=", "\"Main text\"", "new_body_section", "[", "\"content\"", "]", "=", "[", "]", "for", "body_block", "in", "body_content", ":", "new_body_section", "[", "\"content\"", "]", ".", "append", "(", "body_block", ")", "new_body", "=", "[", "]", "new_body", ".", "append", "(", "new_body_section", ")", "body_content", "=", "new_body", "body_content_rewritten", "=", "elifetools", ".", "json_rewrite", ".", "rewrite_json", "(", "\"body_json\"", ",", "soup", ",", "body_content", ")", "return", "body_content_rewritten" ]
Get body json and then alter it with section wrapping and removing boxed-text
[ "Get", "body", "json", "and", "then", "alter", "it", "with", "section", "wrapping", "and", "removing", "boxed", "-", "text" ]
train
https://github.com/elifesciences/elife-tools/blob/4b9e38cbe485c61a4ed7cbd8970c6b318334fd86/elifetools/parseJATS.py#L2108-L2126
elifesciences/elife-tools
elifetools/parseJATS.py
body_block_content_render
def body_block_content_render(tag, recursive=False, base_url=None): """ Render the tag as body content and call recursively if the tag has child tags """ block_content_list = [] tag_content = OrderedDict() if tag.name == "p": for block_content in body_block_paragraph_render(tag, base_url=base_url): if block_content != {}: block_content_list.append(block_content) else: tag_content = body_block_content(tag, base_url=base_url) nodenames = body_block_nodenames() tag_content_content = [] # Collect the content of the tag but only for some tags if tag.name not in ["p", "fig", "table-wrap", "list", "media", "disp-quote", "code"]: for child_tag in tag: if not(hasattr(child_tag, 'name')): continue if child_tag.name == "p": # Ignore paragraphs that start with DOI: if node_text(child_tag) and len(remove_doi_paragraph([child_tag])) <= 0: continue for block_content in body_block_paragraph_render(child_tag, base_url=base_url): if block_content != {}: tag_content_content.append(block_content) elif child_tag.name == "fig" and tag.name == "fig-group": # Do not fig inside fig-group a second time pass elif child_tag.name == "media" and tag.name == "fig-group": # Do not include a media video inside fig-group a second time if child_tag.get("mimetype") == "video": pass else: for block_content in body_block_content_render(child_tag, recursive=True, base_url=base_url): if block_content != {}: tag_content_content.append(block_content) if len(tag_content_content) > 0: if tag.name in nodenames or recursive is False: tag_content["content"] = [] for block_content in tag_content_content: tag_content["content"].append(block_content) block_content_list.append(tag_content) else: # Not a block tag, e.g. a caption tag, let the content pass through block_content_list = tag_content_content else: block_content_list.append(tag_content) return block_content_list
python
def body_block_content_render(tag, recursive=False, base_url=None): """ Render the tag as body content and call recursively if the tag has child tags """ block_content_list = [] tag_content = OrderedDict() if tag.name == "p": for block_content in body_block_paragraph_render(tag, base_url=base_url): if block_content != {}: block_content_list.append(block_content) else: tag_content = body_block_content(tag, base_url=base_url) nodenames = body_block_nodenames() tag_content_content = [] # Collect the content of the tag but only for some tags if tag.name not in ["p", "fig", "table-wrap", "list", "media", "disp-quote", "code"]: for child_tag in tag: if not(hasattr(child_tag, 'name')): continue if child_tag.name == "p": # Ignore paragraphs that start with DOI: if node_text(child_tag) and len(remove_doi_paragraph([child_tag])) <= 0: continue for block_content in body_block_paragraph_render(child_tag, base_url=base_url): if block_content != {}: tag_content_content.append(block_content) elif child_tag.name == "fig" and tag.name == "fig-group": # Do not fig inside fig-group a second time pass elif child_tag.name == "media" and tag.name == "fig-group": # Do not include a media video inside fig-group a second time if child_tag.get("mimetype") == "video": pass else: for block_content in body_block_content_render(child_tag, recursive=True, base_url=base_url): if block_content != {}: tag_content_content.append(block_content) if len(tag_content_content) > 0: if tag.name in nodenames or recursive is False: tag_content["content"] = [] for block_content in tag_content_content: tag_content["content"].append(block_content) block_content_list.append(tag_content) else: # Not a block tag, e.g. a caption tag, let the content pass through block_content_list = tag_content_content else: block_content_list.append(tag_content) return block_content_list
[ "def", "body_block_content_render", "(", "tag", ",", "recursive", "=", "False", ",", "base_url", "=", "None", ")", ":", "block_content_list", "=", "[", "]", "tag_content", "=", "OrderedDict", "(", ")", "if", "tag", ".", "name", "==", "\"p\"", ":", "for", "block_content", "in", "body_block_paragraph_render", "(", "tag", ",", "base_url", "=", "base_url", ")", ":", "if", "block_content", "!=", "{", "}", ":", "block_content_list", ".", "append", "(", "block_content", ")", "else", ":", "tag_content", "=", "body_block_content", "(", "tag", ",", "base_url", "=", "base_url", ")", "nodenames", "=", "body_block_nodenames", "(", ")", "tag_content_content", "=", "[", "]", "# Collect the content of the tag but only for some tags", "if", "tag", ".", "name", "not", "in", "[", "\"p\"", ",", "\"fig\"", ",", "\"table-wrap\"", ",", "\"list\"", ",", "\"media\"", ",", "\"disp-quote\"", ",", "\"code\"", "]", ":", "for", "child_tag", "in", "tag", ":", "if", "not", "(", "hasattr", "(", "child_tag", ",", "'name'", ")", ")", ":", "continue", "if", "child_tag", ".", "name", "==", "\"p\"", ":", "# Ignore paragraphs that start with DOI:", "if", "node_text", "(", "child_tag", ")", "and", "len", "(", "remove_doi_paragraph", "(", "[", "child_tag", "]", ")", ")", "<=", "0", ":", "continue", "for", "block_content", "in", "body_block_paragraph_render", "(", "child_tag", ",", "base_url", "=", "base_url", ")", ":", "if", "block_content", "!=", "{", "}", ":", "tag_content_content", ".", "append", "(", "block_content", ")", "elif", "child_tag", ".", "name", "==", "\"fig\"", "and", "tag", ".", "name", "==", "\"fig-group\"", ":", "# Do not fig inside fig-group a second time", "pass", "elif", "child_tag", ".", "name", "==", "\"media\"", "and", "tag", ".", "name", "==", "\"fig-group\"", ":", "# Do not include a media video inside fig-group a second time", "if", "child_tag", ".", "get", "(", "\"mimetype\"", ")", "==", "\"video\"", ":", "pass", "else", ":", "for", "block_content", "in", "body_block_content_render", "(", "child_tag", ",", "recursive", "=", "True", ",", "base_url", "=", "base_url", ")", ":", "if", "block_content", "!=", "{", "}", ":", "tag_content_content", ".", "append", "(", "block_content", ")", "if", "len", "(", "tag_content_content", ")", ">", "0", ":", "if", "tag", ".", "name", "in", "nodenames", "or", "recursive", "is", "False", ":", "tag_content", "[", "\"content\"", "]", "=", "[", "]", "for", "block_content", "in", "tag_content_content", ":", "tag_content", "[", "\"content\"", "]", ".", "append", "(", "block_content", ")", "block_content_list", ".", "append", "(", "tag_content", ")", "else", ":", "# Not a block tag, e.g. a caption tag, let the content pass through", "block_content_list", "=", "tag_content_content", "else", ":", "block_content_list", ".", "append", "(", "tag_content", ")", "return", "block_content_list" ]
Render the tag as body content and call recursively if the tag has child tags
[ "Render", "the", "tag", "as", "body", "content", "and", "call", "recursively", "if", "the", "tag", "has", "child", "tags" ]
train
https://github.com/elifesciences/elife-tools/blob/4b9e38cbe485c61a4ed7cbd8970c6b318334fd86/elifetools/parseJATS.py#L2175-L2232
elifesciences/elife-tools
elifetools/parseJATS.py
body_block_paragraph_render
def body_block_paragraph_render(p_tag, html_flag=True, base_url=None): """ paragraphs may wrap some other body block content this is separated out so it can be called from more than one place """ # Configure the XML to HTML conversion preference for shorthand use below convert = lambda xml_string: xml_to_html(html_flag, xml_string, base_url) block_content_list = [] tag_content_content = [] nodenames = body_block_nodenames() paragraph_content = u'' for child_tag in p_tag: if child_tag.name is None or body_block_content(child_tag) == {}: paragraph_content = paragraph_content + unicode_value(child_tag) else: # Add previous paragraph content first if paragraph_content.strip() != '': tag_content_content.append(body_block_paragraph_content(convert(paragraph_content))) paragraph_content = u'' if child_tag.name is not None and body_block_content(child_tag) != {}: for block_content in body_block_content_render(child_tag, base_url=base_url): if block_content != {}: tag_content_content.append(block_content) # finish up if paragraph_content.strip() != '': tag_content_content.append(body_block_paragraph_content(convert(paragraph_content))) if len(tag_content_content) > 0: for block_content in tag_content_content: block_content_list.append(block_content) return block_content_list
python
def body_block_paragraph_render(p_tag, html_flag=True, base_url=None): """ paragraphs may wrap some other body block content this is separated out so it can be called from more than one place """ # Configure the XML to HTML conversion preference for shorthand use below convert = lambda xml_string: xml_to_html(html_flag, xml_string, base_url) block_content_list = [] tag_content_content = [] nodenames = body_block_nodenames() paragraph_content = u'' for child_tag in p_tag: if child_tag.name is None or body_block_content(child_tag) == {}: paragraph_content = paragraph_content + unicode_value(child_tag) else: # Add previous paragraph content first if paragraph_content.strip() != '': tag_content_content.append(body_block_paragraph_content(convert(paragraph_content))) paragraph_content = u'' if child_tag.name is not None and body_block_content(child_tag) != {}: for block_content in body_block_content_render(child_tag, base_url=base_url): if block_content != {}: tag_content_content.append(block_content) # finish up if paragraph_content.strip() != '': tag_content_content.append(body_block_paragraph_content(convert(paragraph_content))) if len(tag_content_content) > 0: for block_content in tag_content_content: block_content_list.append(block_content) return block_content_list
[ "def", "body_block_paragraph_render", "(", "p_tag", ",", "html_flag", "=", "True", ",", "base_url", "=", "None", ")", ":", "# Configure the XML to HTML conversion preference for shorthand use below", "convert", "=", "lambda", "xml_string", ":", "xml_to_html", "(", "html_flag", ",", "xml_string", ",", "base_url", ")", "block_content_list", "=", "[", "]", "tag_content_content", "=", "[", "]", "nodenames", "=", "body_block_nodenames", "(", ")", "paragraph_content", "=", "u''", "for", "child_tag", "in", "p_tag", ":", "if", "child_tag", ".", "name", "is", "None", "or", "body_block_content", "(", "child_tag", ")", "==", "{", "}", ":", "paragraph_content", "=", "paragraph_content", "+", "unicode_value", "(", "child_tag", ")", "else", ":", "# Add previous paragraph content first", "if", "paragraph_content", ".", "strip", "(", ")", "!=", "''", ":", "tag_content_content", ".", "append", "(", "body_block_paragraph_content", "(", "convert", "(", "paragraph_content", ")", ")", ")", "paragraph_content", "=", "u''", "if", "child_tag", ".", "name", "is", "not", "None", "and", "body_block_content", "(", "child_tag", ")", "!=", "{", "}", ":", "for", "block_content", "in", "body_block_content_render", "(", "child_tag", ",", "base_url", "=", "base_url", ")", ":", "if", "block_content", "!=", "{", "}", ":", "tag_content_content", ".", "append", "(", "block_content", ")", "# finish up", "if", "paragraph_content", ".", "strip", "(", ")", "!=", "''", ":", "tag_content_content", ".", "append", "(", "body_block_paragraph_content", "(", "convert", "(", "paragraph_content", ")", ")", ")", "if", "len", "(", "tag_content_content", ")", ">", "0", ":", "for", "block_content", "in", "tag_content_content", ":", "block_content_list", ".", "append", "(", "block_content", ")", "return", "block_content_list" ]
paragraphs may wrap some other body block content this is separated out so it can be called from more than one place
[ "paragraphs", "may", "wrap", "some", "other", "body", "block", "content", "this", "is", "separated", "out", "so", "it", "can", "be", "called", "from", "more", "than", "one", "place" ]
train
https://github.com/elifesciences/elife-tools/blob/4b9e38cbe485c61a4ed7cbd8970c6b318334fd86/elifetools/parseJATS.py#L2234-L2271
elifesciences/elife-tools
elifetools/parseJATS.py
body_block_caption_render
def body_block_caption_render(caption_tags, base_url=None): """fig and media tag captions are similar so use this common function""" caption_content = [] supplementary_material_tags = [] for block_tag in remove_doi_paragraph(caption_tags): # Note then skip p tags with supplementary-material inside if raw_parser.supplementary_material(block_tag): for supp_tag in raw_parser.supplementary_material(block_tag): supplementary_material_tags.append(supp_tag) continue for block_content in body_block_content_render(block_tag, base_url=base_url): if block_content != {}: caption_content.append(block_content) return caption_content, supplementary_material_tags
python
def body_block_caption_render(caption_tags, base_url=None): """fig and media tag captions are similar so use this common function""" caption_content = [] supplementary_material_tags = [] for block_tag in remove_doi_paragraph(caption_tags): # Note then skip p tags with supplementary-material inside if raw_parser.supplementary_material(block_tag): for supp_tag in raw_parser.supplementary_material(block_tag): supplementary_material_tags.append(supp_tag) continue for block_content in body_block_content_render(block_tag, base_url=base_url): if block_content != {}: caption_content.append(block_content) return caption_content, supplementary_material_tags
[ "def", "body_block_caption_render", "(", "caption_tags", ",", "base_url", "=", "None", ")", ":", "caption_content", "=", "[", "]", "supplementary_material_tags", "=", "[", "]", "for", "block_tag", "in", "remove_doi_paragraph", "(", "caption_tags", ")", ":", "# Note then skip p tags with supplementary-material inside", "if", "raw_parser", ".", "supplementary_material", "(", "block_tag", ")", ":", "for", "supp_tag", "in", "raw_parser", ".", "supplementary_material", "(", "block_tag", ")", ":", "supplementary_material_tags", ".", "append", "(", "supp_tag", ")", "continue", "for", "block_content", "in", "body_block_content_render", "(", "block_tag", ",", "base_url", "=", "base_url", ")", ":", "if", "block_content", "!=", "{", "}", ":", "caption_content", ".", "append", "(", "block_content", ")", "return", "caption_content", ",", "supplementary_material_tags" ]
fig and media tag captions are similar so use this common function
[ "fig", "and", "media", "tag", "captions", "are", "similar", "so", "use", "this", "common", "function" ]
train
https://github.com/elifesciences/elife-tools/blob/4b9e38cbe485c61a4ed7cbd8970c6b318334fd86/elifetools/parseJATS.py#L2273-L2290
elifesciences/elife-tools
elifetools/parseJATS.py
body_block_supplementary_material_render
def body_block_supplementary_material_render(supp_tags, base_url=None): """fig and media tag caption may have supplementary material""" source_data = [] for supp_tag in supp_tags: for block_content in body_block_content_render(supp_tag, base_url=base_url): if block_content != {}: if "content" in block_content: del block_content["content"] source_data.append(block_content) return source_data
python
def body_block_supplementary_material_render(supp_tags, base_url=None): """fig and media tag caption may have supplementary material""" source_data = [] for supp_tag in supp_tags: for block_content in body_block_content_render(supp_tag, base_url=base_url): if block_content != {}: if "content" in block_content: del block_content["content"] source_data.append(block_content) return source_data
[ "def", "body_block_supplementary_material_render", "(", "supp_tags", ",", "base_url", "=", "None", ")", ":", "source_data", "=", "[", "]", "for", "supp_tag", "in", "supp_tags", ":", "for", "block_content", "in", "body_block_content_render", "(", "supp_tag", ",", "base_url", "=", "base_url", ")", ":", "if", "block_content", "!=", "{", "}", ":", "if", "\"content\"", "in", "block_content", ":", "del", "block_content", "[", "\"content\"", "]", "source_data", ".", "append", "(", "block_content", ")", "return", "source_data" ]
fig and media tag caption may have supplementary material
[ "fig", "and", "media", "tag", "caption", "may", "have", "supplementary", "material" ]
train
https://github.com/elifesciences/elife-tools/blob/4b9e38cbe485c61a4ed7cbd8970c6b318334fd86/elifetools/parseJATS.py#L2292-L2301
elifesciences/elife-tools
elifetools/parseJATS.py
body_block_paragraph_content
def body_block_paragraph_content(text): "for formatting of simple paragraphs of text only, and check if it is all whitespace" tag_content = OrderedDict() if text and text != '': tag_content["type"] = "paragraph" tag_content["text"] = clean_whitespace(text) return tag_content
python
def body_block_paragraph_content(text): "for formatting of simple paragraphs of text only, and check if it is all whitespace" tag_content = OrderedDict() if text and text != '': tag_content["type"] = "paragraph" tag_content["text"] = clean_whitespace(text) return tag_content
[ "def", "body_block_paragraph_content", "(", "text", ")", ":", "tag_content", "=", "OrderedDict", "(", ")", "if", "text", "and", "text", "!=", "''", ":", "tag_content", "[", "\"type\"", "]", "=", "\"paragraph\"", "tag_content", "[", "\"text\"", "]", "=", "clean_whitespace", "(", "text", ")", "return", "tag_content" ]
for formatting of simple paragraphs of text only, and check if it is all whitespace
[ "for", "formatting", "of", "simple", "paragraphs", "of", "text", "only", "and", "check", "if", "it", "is", "all", "whitespace" ]
train
https://github.com/elifesciences/elife-tools/blob/4b9e38cbe485c61a4ed7cbd8970c6b318334fd86/elifetools/parseJATS.py#L2303-L2309
elifesciences/elife-tools
elifetools/parseJATS.py
body_block_image_content
def body_block_image_content(tag): "format a graphic or inline-graphic into a body block json format" image_content = OrderedDict() if tag: copy_attribute(tag.attrs, 'xlink:href', image_content, 'uri') if "uri" in image_content: # todo!! alt set_if_value(image_content, "alt", "") return image_content
python
def body_block_image_content(tag): "format a graphic or inline-graphic into a body block json format" image_content = OrderedDict() if tag: copy_attribute(tag.attrs, 'xlink:href', image_content, 'uri') if "uri" in image_content: # todo!! alt set_if_value(image_content, "alt", "") return image_content
[ "def", "body_block_image_content", "(", "tag", ")", ":", "image_content", "=", "OrderedDict", "(", ")", "if", "tag", ":", "copy_attribute", "(", "tag", ".", "attrs", ",", "'xlink:href'", ",", "image_content", ",", "'uri'", ")", "if", "\"uri\"", "in", "image_content", ":", "# todo!! alt", "set_if_value", "(", "image_content", ",", "\"alt\"", ",", "\"\"", ")", "return", "image_content" ]
format a graphic or inline-graphic into a body block json format
[ "format", "a", "graphic", "or", "inline", "-", "graphic", "into", "a", "body", "block", "json", "format" ]
train
https://github.com/elifesciences/elife-tools/blob/4b9e38cbe485c61a4ed7cbd8970c6b318334fd86/elifetools/parseJATS.py#L2311-L2319
elifesciences/elife-tools
elifetools/parseJATS.py
body_block_title_label_caption
def body_block_title_label_caption(tag_content, title_value, label_value, caption_content, set_caption=True, prefer_title=False, prefer_label=False): """set the title, label and caption values in a consistent way set_caption: insert a "caption" field prefer_title: when only one value is available, set title rather than label. If False, set label rather than title""" set_if_value(tag_content, "label", rstrip_punctuation(label_value)) set_if_value(tag_content, "title", title_value) if set_caption is True and caption_content and len(caption_content) > 0: tag_content["caption"] = caption_content if prefer_title: if "title" not in tag_content and label_value: set_if_value(tag_content, "title", label_value) del(tag_content["label"]) if prefer_label: if "label" not in tag_content and title_value: set_if_value(tag_content, "label", rstrip_punctuation(title_value)) del(tag_content["title"])
python
def body_block_title_label_caption(tag_content, title_value, label_value, caption_content, set_caption=True, prefer_title=False, prefer_label=False): """set the title, label and caption values in a consistent way set_caption: insert a "caption" field prefer_title: when only one value is available, set title rather than label. If False, set label rather than title""" set_if_value(tag_content, "label", rstrip_punctuation(label_value)) set_if_value(tag_content, "title", title_value) if set_caption is True and caption_content and len(caption_content) > 0: tag_content["caption"] = caption_content if prefer_title: if "title" not in tag_content and label_value: set_if_value(tag_content, "title", label_value) del(tag_content["label"]) if prefer_label: if "label" not in tag_content and title_value: set_if_value(tag_content, "label", rstrip_punctuation(title_value)) del(tag_content["title"])
[ "def", "body_block_title_label_caption", "(", "tag_content", ",", "title_value", ",", "label_value", ",", "caption_content", ",", "set_caption", "=", "True", ",", "prefer_title", "=", "False", ",", "prefer_label", "=", "False", ")", ":", "set_if_value", "(", "tag_content", ",", "\"label\"", ",", "rstrip_punctuation", "(", "label_value", ")", ")", "set_if_value", "(", "tag_content", ",", "\"title\"", ",", "title_value", ")", "if", "set_caption", "is", "True", "and", "caption_content", "and", "len", "(", "caption_content", ")", ">", "0", ":", "tag_content", "[", "\"caption\"", "]", "=", "caption_content", "if", "prefer_title", ":", "if", "\"title\"", "not", "in", "tag_content", "and", "label_value", ":", "set_if_value", "(", "tag_content", ",", "\"title\"", ",", "label_value", ")", "del", "(", "tag_content", "[", "\"label\"", "]", ")", "if", "prefer_label", ":", "if", "\"label\"", "not", "in", "tag_content", "and", "title_value", ":", "set_if_value", "(", "tag_content", ",", "\"label\"", ",", "rstrip_punctuation", "(", "title_value", ")", ")", "del", "(", "tag_content", "[", "\"title\"", "]", ")" ]
set the title, label and caption values in a consistent way set_caption: insert a "caption" field prefer_title: when only one value is available, set title rather than label. If False, set label rather than title
[ "set", "the", "title", "label", "and", "caption", "values", "in", "a", "consistent", "way" ]
train
https://github.com/elifesciences/elife-tools/blob/4b9e38cbe485c61a4ed7cbd8970c6b318334fd86/elifetools/parseJATS.py#L2321-L2338
elifesciences/elife-tools
elifetools/parseJATS.py
body_block_attribution
def body_block_attribution(tag): "extract the attribution content for figures, tables, videos" attributions = [] if raw_parser.attrib(tag): for attrib_tag in raw_parser.attrib(tag): attributions.append(node_contents_str(attrib_tag)) if raw_parser.permissions(tag): # concatenate content from from the permissions tag for permissions_tag in raw_parser.permissions(tag): attrib_string = '' # add the copyright statement if found attrib_string = join_sentences(attrib_string, node_contents_str(raw_parser.copyright_statement(permissions_tag)), '.') # add the license paragraphs if raw_parser.licence_p(permissions_tag): for licence_p_tag in raw_parser.licence_p(permissions_tag): attrib_string = join_sentences(attrib_string, node_contents_str(licence_p_tag), '.') if attrib_string != '': attributions.append(attrib_string) return attributions
python
def body_block_attribution(tag): "extract the attribution content for figures, tables, videos" attributions = [] if raw_parser.attrib(tag): for attrib_tag in raw_parser.attrib(tag): attributions.append(node_contents_str(attrib_tag)) if raw_parser.permissions(tag): # concatenate content from from the permissions tag for permissions_tag in raw_parser.permissions(tag): attrib_string = '' # add the copyright statement if found attrib_string = join_sentences(attrib_string, node_contents_str(raw_parser.copyright_statement(permissions_tag)), '.') # add the license paragraphs if raw_parser.licence_p(permissions_tag): for licence_p_tag in raw_parser.licence_p(permissions_tag): attrib_string = join_sentences(attrib_string, node_contents_str(licence_p_tag), '.') if attrib_string != '': attributions.append(attrib_string) return attributions
[ "def", "body_block_attribution", "(", "tag", ")", ":", "attributions", "=", "[", "]", "if", "raw_parser", ".", "attrib", "(", "tag", ")", ":", "for", "attrib_tag", "in", "raw_parser", ".", "attrib", "(", "tag", ")", ":", "attributions", ".", "append", "(", "node_contents_str", "(", "attrib_tag", ")", ")", "if", "raw_parser", ".", "permissions", "(", "tag", ")", ":", "# concatenate content from from the permissions tag", "for", "permissions_tag", "in", "raw_parser", ".", "permissions", "(", "tag", ")", ":", "attrib_string", "=", "''", "# add the copyright statement if found", "attrib_string", "=", "join_sentences", "(", "attrib_string", ",", "node_contents_str", "(", "raw_parser", ".", "copyright_statement", "(", "permissions_tag", ")", ")", ",", "'.'", ")", "# add the license paragraphs", "if", "raw_parser", ".", "licence_p", "(", "permissions_tag", ")", ":", "for", "licence_p_tag", "in", "raw_parser", ".", "licence_p", "(", "permissions_tag", ")", ":", "attrib_string", "=", "join_sentences", "(", "attrib_string", ",", "node_contents_str", "(", "licence_p_tag", ")", ",", "'.'", ")", "if", "attrib_string", "!=", "''", ":", "attributions", ".", "append", "(", "attrib_string", ")", "return", "attributions" ]
extract the attribution content for figures, tables, videos
[ "extract", "the", "attribution", "content", "for", "figures", "tables", "videos" ]
train
https://github.com/elifesciences/elife-tools/blob/4b9e38cbe485c61a4ed7cbd8970c6b318334fd86/elifetools/parseJATS.py#L2340-L2360
elifesciences/elife-tools
elifetools/parseJATS.py
body_blocks
def body_blocks(soup): """ Note: for some reason this works and few other attempted methods work Search for certain node types, find the first nodes siblings of the same type Add the first sibling and the other siblings to a list and return them """ nodenames = body_block_nodenames() body_block_tags = [] if not soup: return body_block_tags first_sibling_node = firstnn(soup.find_all()) if first_sibling_node is None: return body_block_tags sibling_tags = first_sibling_node.find_next_siblings(nodenames) # Add the first component tag and the ResultSet tags together body_block_tags.append(first_sibling_node) for tag in sibling_tags: body_block_tags.append(tag) return body_block_tags
python
def body_blocks(soup): """ Note: for some reason this works and few other attempted methods work Search for certain node types, find the first nodes siblings of the same type Add the first sibling and the other siblings to a list and return them """ nodenames = body_block_nodenames() body_block_tags = [] if not soup: return body_block_tags first_sibling_node = firstnn(soup.find_all()) if first_sibling_node is None: return body_block_tags sibling_tags = first_sibling_node.find_next_siblings(nodenames) # Add the first component tag and the ResultSet tags together body_block_tags.append(first_sibling_node) for tag in sibling_tags: body_block_tags.append(tag) return body_block_tags
[ "def", "body_blocks", "(", "soup", ")", ":", "nodenames", "=", "body_block_nodenames", "(", ")", "body_block_tags", "=", "[", "]", "if", "not", "soup", ":", "return", "body_block_tags", "first_sibling_node", "=", "firstnn", "(", "soup", ".", "find_all", "(", ")", ")", "if", "first_sibling_node", "is", "None", ":", "return", "body_block_tags", "sibling_tags", "=", "first_sibling_node", ".", "find_next_siblings", "(", "nodenames", ")", "# Add the first component tag and the ResultSet tags together", "body_block_tags", ".", "append", "(", "first_sibling_node", ")", "for", "tag", "in", "sibling_tags", ":", "body_block_tags", ".", "append", "(", "tag", ")", "return", "body_block_tags" ]
Note: for some reason this works and few other attempted methods work Search for certain node types, find the first nodes siblings of the same type Add the first sibling and the other siblings to a list and return them
[ "Note", ":", "for", "some", "reason", "this", "works", "and", "few", "other", "attempted", "methods", "work", "Search", "for", "certain", "node", "types", "find", "the", "first", "nodes", "siblings", "of", "the", "same", "type", "Add", "the", "first", "sibling", "and", "the", "other", "siblings", "to", "a", "list", "and", "return", "them" ]
train
https://github.com/elifesciences/elife-tools/blob/4b9e38cbe485c61a4ed7cbd8970c6b318334fd86/elifetools/parseJATS.py#L2655-L2681
elifesciences/elife-tools
elifetools/parseJATS.py
abstract_json
def abstract_json(soup): """abstract in article json format""" abstract_tags = raw_parser.abstract(soup) abstract_json = None for tag in abstract_tags: if tag.get("abstract-type") is None: abstract_json = render_abstract_json(tag) return abstract_json
python
def abstract_json(soup): """abstract in article json format""" abstract_tags = raw_parser.abstract(soup) abstract_json = None for tag in abstract_tags: if tag.get("abstract-type") is None: abstract_json = render_abstract_json(tag) return abstract_json
[ "def", "abstract_json", "(", "soup", ")", ":", "abstract_tags", "=", "raw_parser", ".", "abstract", "(", "soup", ")", "abstract_json", "=", "None", "for", "tag", "in", "abstract_tags", ":", "if", "tag", ".", "get", "(", "\"abstract-type\"", ")", "is", "None", ":", "abstract_json", "=", "render_abstract_json", "(", "tag", ")", "return", "abstract_json" ]
abstract in article json format
[ "abstract", "in", "article", "json", "format" ]
train
https://github.com/elifesciences/elife-tools/blob/4b9e38cbe485c61a4ed7cbd8970c6b318334fd86/elifetools/parseJATS.py#L2763-L2770
elifesciences/elife-tools
elifetools/parseJATS.py
digest_json
def digest_json(soup): """digest in article json format""" abstract_tags = raw_parser.abstract(soup, abstract_type="executive-summary") abstract_json = None for tag in abstract_tags: abstract_json = render_abstract_json(tag) return abstract_json
python
def digest_json(soup): """digest in article json format""" abstract_tags = raw_parser.abstract(soup, abstract_type="executive-summary") abstract_json = None for tag in abstract_tags: abstract_json = render_abstract_json(tag) return abstract_json
[ "def", "digest_json", "(", "soup", ")", ":", "abstract_tags", "=", "raw_parser", ".", "abstract", "(", "soup", ",", "abstract_type", "=", "\"executive-summary\"", ")", "abstract_json", "=", "None", "for", "tag", "in", "abstract_tags", ":", "abstract_json", "=", "render_abstract_json", "(", "tag", ")", "return", "abstract_json" ]
digest in article json format
[ "digest", "in", "article", "json", "format" ]
train
https://github.com/elifesciences/elife-tools/blob/4b9e38cbe485c61a4ed7cbd8970c6b318334fd86/elifetools/parseJATS.py#L2773-L2779
elifesciences/elife-tools
elifetools/parseJATS.py
author_affiliations
def author_affiliations(author, html_flag=True): """compile author affiliations for json output""" # Configure the XML to HTML conversion preference for shorthand use below convert = lambda xml_string: xml_to_html(html_flag, xml_string) affilations = [] if author.get("affiliations"): for affiliation in author.get("affiliations"): affiliation_json = OrderedDict() affiliation_json["name"] = [] if affiliation.get("dept"): affiliation_json["name"].append(convert(affiliation.get("dept"))) if affiliation.get("institution") and affiliation.get("institution").strip() != '': affiliation_json["name"].append(convert(affiliation.get("institution"))) # Remove if empty if affiliation_json["name"] == []: del affiliation_json["name"] if ((affiliation.get("city") and affiliation.get("city").strip() != '') or affiliation.get("country") and affiliation.get("country").strip() != ''): affiliation_address = OrderedDict() affiliation_address["formatted"] = [] affiliation_address["components"] = OrderedDict() if affiliation.get("city") and affiliation.get("city").strip() != '': affiliation_address["formatted"].append(affiliation.get("city")) affiliation_address["components"]["locality"] = [] affiliation_address["components"]["locality"].append(affiliation.get("city")) if affiliation.get("country") and affiliation.get("country").strip() != '': affiliation_address["formatted"].append(affiliation.get("country")) affiliation_address["components"]["country"] = affiliation.get("country") # Add if not empty if affiliation_address != {}: affiliation_json["address"] = affiliation_address # Add if not empty if affiliation_json != {}: affilations.append(affiliation_json) if affilations != []: return affilations else: return None
python
def author_affiliations(author, html_flag=True): """compile author affiliations for json output""" # Configure the XML to HTML conversion preference for shorthand use below convert = lambda xml_string: xml_to_html(html_flag, xml_string) affilations = [] if author.get("affiliations"): for affiliation in author.get("affiliations"): affiliation_json = OrderedDict() affiliation_json["name"] = [] if affiliation.get("dept"): affiliation_json["name"].append(convert(affiliation.get("dept"))) if affiliation.get("institution") and affiliation.get("institution").strip() != '': affiliation_json["name"].append(convert(affiliation.get("institution"))) # Remove if empty if affiliation_json["name"] == []: del affiliation_json["name"] if ((affiliation.get("city") and affiliation.get("city").strip() != '') or affiliation.get("country") and affiliation.get("country").strip() != ''): affiliation_address = OrderedDict() affiliation_address["formatted"] = [] affiliation_address["components"] = OrderedDict() if affiliation.get("city") and affiliation.get("city").strip() != '': affiliation_address["formatted"].append(affiliation.get("city")) affiliation_address["components"]["locality"] = [] affiliation_address["components"]["locality"].append(affiliation.get("city")) if affiliation.get("country") and affiliation.get("country").strip() != '': affiliation_address["formatted"].append(affiliation.get("country")) affiliation_address["components"]["country"] = affiliation.get("country") # Add if not empty if affiliation_address != {}: affiliation_json["address"] = affiliation_address # Add if not empty if affiliation_json != {}: affilations.append(affiliation_json) if affilations != []: return affilations else: return None
[ "def", "author_affiliations", "(", "author", ",", "html_flag", "=", "True", ")", ":", "# Configure the XML to HTML conversion preference for shorthand use below", "convert", "=", "lambda", "xml_string", ":", "xml_to_html", "(", "html_flag", ",", "xml_string", ")", "affilations", "=", "[", "]", "if", "author", ".", "get", "(", "\"affiliations\"", ")", ":", "for", "affiliation", "in", "author", ".", "get", "(", "\"affiliations\"", ")", ":", "affiliation_json", "=", "OrderedDict", "(", ")", "affiliation_json", "[", "\"name\"", "]", "=", "[", "]", "if", "affiliation", ".", "get", "(", "\"dept\"", ")", ":", "affiliation_json", "[", "\"name\"", "]", ".", "append", "(", "convert", "(", "affiliation", ".", "get", "(", "\"dept\"", ")", ")", ")", "if", "affiliation", ".", "get", "(", "\"institution\"", ")", "and", "affiliation", ".", "get", "(", "\"institution\"", ")", ".", "strip", "(", ")", "!=", "''", ":", "affiliation_json", "[", "\"name\"", "]", ".", "append", "(", "convert", "(", "affiliation", ".", "get", "(", "\"institution\"", ")", ")", ")", "# Remove if empty", "if", "affiliation_json", "[", "\"name\"", "]", "==", "[", "]", ":", "del", "affiliation_json", "[", "\"name\"", "]", "if", "(", "(", "affiliation", ".", "get", "(", "\"city\"", ")", "and", "affiliation", ".", "get", "(", "\"city\"", ")", ".", "strip", "(", ")", "!=", "''", ")", "or", "affiliation", ".", "get", "(", "\"country\"", ")", "and", "affiliation", ".", "get", "(", "\"country\"", ")", ".", "strip", "(", ")", "!=", "''", ")", ":", "affiliation_address", "=", "OrderedDict", "(", ")", "affiliation_address", "[", "\"formatted\"", "]", "=", "[", "]", "affiliation_address", "[", "\"components\"", "]", "=", "OrderedDict", "(", ")", "if", "affiliation", ".", "get", "(", "\"city\"", ")", "and", "affiliation", ".", "get", "(", "\"city\"", ")", ".", "strip", "(", ")", "!=", "''", ":", "affiliation_address", "[", "\"formatted\"", "]", ".", "append", "(", "affiliation", ".", "get", "(", "\"city\"", ")", ")", "affiliation_address", "[", "\"components\"", "]", "[", "\"locality\"", "]", "=", "[", "]", "affiliation_address", "[", "\"components\"", "]", "[", "\"locality\"", "]", ".", "append", "(", "affiliation", ".", "get", "(", "\"city\"", ")", ")", "if", "affiliation", ".", "get", "(", "\"country\"", ")", "and", "affiliation", ".", "get", "(", "\"country\"", ")", ".", "strip", "(", ")", "!=", "''", ":", "affiliation_address", "[", "\"formatted\"", "]", ".", "append", "(", "affiliation", ".", "get", "(", "\"country\"", ")", ")", "affiliation_address", "[", "\"components\"", "]", "[", "\"country\"", "]", "=", "affiliation", ".", "get", "(", "\"country\"", ")", "# Add if not empty", "if", "affiliation_address", "!=", "{", "}", ":", "affiliation_json", "[", "\"address\"", "]", "=", "affiliation_address", "# Add if not empty", "if", "affiliation_json", "!=", "{", "}", ":", "affilations", ".", "append", "(", "affiliation_json", ")", "if", "affilations", "!=", "[", "]", ":", "return", "affilations", "else", ":", "return", "None" ]
compile author affiliations for json output
[ "compile", "author", "affiliations", "for", "json", "output" ]
train
https://github.com/elifesciences/elife-tools/blob/4b9e38cbe485c61a4ed7cbd8970c6b318334fd86/elifetools/parseJATS.py#L2782-L2825
elifesciences/elife-tools
elifetools/parseJATS.py
author_json_details
def author_json_details(author, author_json, contributions, correspondence, competing_interests, equal_contributions_map, present_address_data, foot_notes_data, html_flag=True): # Configure the XML to HTML conversion preference for shorthand use below convert = lambda xml_string: xml_to_html(html_flag, xml_string) """add more author json""" if author_affiliations(author): author_json["affiliations"] = author_affiliations(author) # foot notes or additionalInformation if author_foot_notes(author, foot_notes_data): author_json["additionalInformation"] = author_foot_notes(author, foot_notes_data) # email if author_email_addresses(author, correspondence): author_json["emailAddresses"] = author_email_addresses(author, correspondence) # phone if author_phone_numbers(author, correspondence): author_json["phoneNumbers"] = author_phone_numbers_json(author, correspondence) # contributions if author_contribution(author, contributions): author_json["contribution"] = convert(author_contribution(author, contributions)) # competing interests if author_competing_interests(author, competing_interests): author_json["competingInterests"] = convert( author_competing_interests(author, competing_interests)) # equal-contributions if author_equal_contribution(author, equal_contributions_map): author_json["equalContributionGroups"] = author_equal_contribution(author, equal_contributions_map) # postalAddress if author_present_address(author, present_address_data): author_json["postalAddresses"] = author_present_address(author, present_address_data) return author_json
python
def author_json_details(author, author_json, contributions, correspondence, competing_interests, equal_contributions_map, present_address_data, foot_notes_data, html_flag=True): # Configure the XML to HTML conversion preference for shorthand use below convert = lambda xml_string: xml_to_html(html_flag, xml_string) """add more author json""" if author_affiliations(author): author_json["affiliations"] = author_affiliations(author) # foot notes or additionalInformation if author_foot_notes(author, foot_notes_data): author_json["additionalInformation"] = author_foot_notes(author, foot_notes_data) # email if author_email_addresses(author, correspondence): author_json["emailAddresses"] = author_email_addresses(author, correspondence) # phone if author_phone_numbers(author, correspondence): author_json["phoneNumbers"] = author_phone_numbers_json(author, correspondence) # contributions if author_contribution(author, contributions): author_json["contribution"] = convert(author_contribution(author, contributions)) # competing interests if author_competing_interests(author, competing_interests): author_json["competingInterests"] = convert( author_competing_interests(author, competing_interests)) # equal-contributions if author_equal_contribution(author, equal_contributions_map): author_json["equalContributionGroups"] = author_equal_contribution(author, equal_contributions_map) # postalAddress if author_present_address(author, present_address_data): author_json["postalAddresses"] = author_present_address(author, present_address_data) return author_json
[ "def", "author_json_details", "(", "author", ",", "author_json", ",", "contributions", ",", "correspondence", ",", "competing_interests", ",", "equal_contributions_map", ",", "present_address_data", ",", "foot_notes_data", ",", "html_flag", "=", "True", ")", ":", "# Configure the XML to HTML conversion preference for shorthand use below", "convert", "=", "lambda", "xml_string", ":", "xml_to_html", "(", "html_flag", ",", "xml_string", ")", "if", "author_affiliations", "(", "author", ")", ":", "author_json", "[", "\"affiliations\"", "]", "=", "author_affiliations", "(", "author", ")", "# foot notes or additionalInformation", "if", "author_foot_notes", "(", "author", ",", "foot_notes_data", ")", ":", "author_json", "[", "\"additionalInformation\"", "]", "=", "author_foot_notes", "(", "author", ",", "foot_notes_data", ")", "# email", "if", "author_email_addresses", "(", "author", ",", "correspondence", ")", ":", "author_json", "[", "\"emailAddresses\"", "]", "=", "author_email_addresses", "(", "author", ",", "correspondence", ")", "# phone", "if", "author_phone_numbers", "(", "author", ",", "correspondence", ")", ":", "author_json", "[", "\"phoneNumbers\"", "]", "=", "author_phone_numbers_json", "(", "author", ",", "correspondence", ")", "# contributions", "if", "author_contribution", "(", "author", ",", "contributions", ")", ":", "author_json", "[", "\"contribution\"", "]", "=", "convert", "(", "author_contribution", "(", "author", ",", "contributions", ")", ")", "# competing interests", "if", "author_competing_interests", "(", "author", ",", "competing_interests", ")", ":", "author_json", "[", "\"competingInterests\"", "]", "=", "convert", "(", "author_competing_interests", "(", "author", ",", "competing_interests", ")", ")", "# equal-contributions", "if", "author_equal_contribution", "(", "author", ",", "equal_contributions_map", ")", ":", "author_json", "[", "\"equalContributionGroups\"", "]", "=", "author_equal_contribution", "(", "author", ",", "equal_contributions_map", ")", "# postalAddress", "if", "author_present_address", "(", "author", ",", "present_address_data", ")", ":", "author_json", "[", "\"postalAddresses\"", "]", "=", "author_present_address", "(", "author", ",", "present_address_data", ")", "return", "author_json" ]
add more author json
[ "add", "more", "author", "json" ]
train
https://github.com/elifesciences/elife-tools/blob/4b9e38cbe485c61a4ed7cbd8970c6b318334fd86/elifetools/parseJATS.py#L2963-L3002
elifesciences/elife-tools
elifetools/parseJATS.py
collab_to_group_author_key_map
def collab_to_group_author_key_map(authors): """compile a map of author collab to group-author-key""" collab_map = {} for author in authors: if author.get("collab"): collab_map[author.get("collab")] = author.get("group-author-key") return collab_map
python
def collab_to_group_author_key_map(authors): """compile a map of author collab to group-author-key""" collab_map = {} for author in authors: if author.get("collab"): collab_map[author.get("collab")] = author.get("group-author-key") return collab_map
[ "def", "collab_to_group_author_key_map", "(", "authors", ")", ":", "collab_map", "=", "{", "}", "for", "author", "in", "authors", ":", "if", "author", ".", "get", "(", "\"collab\"", ")", ":", "collab_map", "[", "author", ".", "get", "(", "\"collab\"", ")", "]", "=", "author", ".", "get", "(", "\"group-author-key\"", ")", "return", "collab_map" ]
compile a map of author collab to group-author-key
[ "compile", "a", "map", "of", "author", "collab", "to", "group", "-", "author", "-", "key" ]
train
https://github.com/elifesciences/elife-tools/blob/4b9e38cbe485c61a4ed7cbd8970c6b318334fd86/elifetools/parseJATS.py#L3049-L3055
elifesciences/elife-tools
elifetools/parseJATS.py
map_equal_contributions
def map_equal_contributions(contributors): """assign numeric values to each unique equal-contrib id""" equal_contribution_map = {} equal_contribution_keys = [] for contributor in contributors: if contributor.get("references") and "equal-contrib" in contributor.get("references"): for key in contributor["references"]["equal-contrib"]: if key not in equal_contribution_keys: equal_contribution_keys.append(key) # Do a basic sort equal_contribution_keys = sorted(equal_contribution_keys) # Assign keys based on sorted values for i, equal_contribution_key in enumerate(equal_contribution_keys): equal_contribution_map[equal_contribution_key] = i+1 return equal_contribution_map
python
def map_equal_contributions(contributors): """assign numeric values to each unique equal-contrib id""" equal_contribution_map = {} equal_contribution_keys = [] for contributor in contributors: if contributor.get("references") and "equal-contrib" in contributor.get("references"): for key in contributor["references"]["equal-contrib"]: if key not in equal_contribution_keys: equal_contribution_keys.append(key) # Do a basic sort equal_contribution_keys = sorted(equal_contribution_keys) # Assign keys based on sorted values for i, equal_contribution_key in enumerate(equal_contribution_keys): equal_contribution_map[equal_contribution_key] = i+1 return equal_contribution_map
[ "def", "map_equal_contributions", "(", "contributors", ")", ":", "equal_contribution_map", "=", "{", "}", "equal_contribution_keys", "=", "[", "]", "for", "contributor", "in", "contributors", ":", "if", "contributor", ".", "get", "(", "\"references\"", ")", "and", "\"equal-contrib\"", "in", "contributor", ".", "get", "(", "\"references\"", ")", ":", "for", "key", "in", "contributor", "[", "\"references\"", "]", "[", "\"equal-contrib\"", "]", ":", "if", "key", "not", "in", "equal_contribution_keys", ":", "equal_contribution_keys", ".", "append", "(", "key", ")", "# Do a basic sort", "equal_contribution_keys", "=", "sorted", "(", "equal_contribution_keys", ")", "# Assign keys based on sorted values", "for", "i", ",", "equal_contribution_key", "in", "enumerate", "(", "equal_contribution_keys", ")", ":", "equal_contribution_map", "[", "equal_contribution_key", "]", "=", "i", "+", "1", "return", "equal_contribution_map" ]
assign numeric values to each unique equal-contrib id
[ "assign", "numeric", "values", "to", "each", "unique", "equal", "-", "contrib", "id" ]
train
https://github.com/elifesciences/elife-tools/blob/4b9e38cbe485c61a4ed7cbd8970c6b318334fd86/elifetools/parseJATS.py#L3057-L3071
elifesciences/elife-tools
elifetools/parseJATS.py
authors_json
def authors_json(soup): """authors list in article json format""" authors_json_data = [] contributors_data = contributors(soup, "full") author_contributions_data = author_contributions(soup, None) author_competing_interests_data = competing_interests(soup, None) author_correspondence_data = full_correspondence(soup) authors_non_byline_data = authors_non_byline(soup) equal_contributions_map = map_equal_contributions(contributors_data) present_address_data = present_addresses(soup) foot_notes_data = other_foot_notes(soup) # First line authors builds basic structure for contributor in contributors_data: author_json = None if contributor["type"] == "author" and contributor.get("collab"): author_json = author_group(contributor, author_contributions_data, author_correspondence_data, author_competing_interests_data, equal_contributions_map, present_address_data, foot_notes_data) elif contributor.get("on-behalf-of"): author_json = author_on_behalf_of(contributor) elif contributor["type"] == "author" and not contributor.get("group-author-key"): author_json = author_person(contributor, author_contributions_data, author_correspondence_data, author_competing_interests_data, equal_contributions_map, present_address_data, foot_notes_data) if author_json: authors_json_data.append(author_json) # Second, add byline author data collab_map = collab_to_group_author_key_map(contributors_data) for contributor in [elem for elem in contributors_data if elem.get("group-author-key") and not elem.get("collab")]: for group_author in [elem for elem in authors_json_data if elem.get('type') == 'group']: group_author_key = None if group_author["name"] in collab_map: group_author_key = collab_map[group_author["name"]] if contributor.get("group-author-key") == group_author_key: author_json = author_person(contributor, author_contributions_data, author_correspondence_data, author_competing_interests_data, equal_contributions_map, present_address_data, foot_notes_data) if contributor.get("sub-group"): if "groups" not in group_author: group_author["groups"] = OrderedDict() if contributor.get("sub-group") not in group_author["groups"]: group_author["groups"][contributor.get("sub-group")] = [] group_author["groups"][contributor.get("sub-group")].append(author_json) else: if "people" not in group_author: group_author["people"] = [] group_author["people"].append(author_json) authors_json_data_rewritten = elifetools.json_rewrite.rewrite_json("authors_json", soup, authors_json_data) return authors_json_data_rewritten
python
def authors_json(soup): """authors list in article json format""" authors_json_data = [] contributors_data = contributors(soup, "full") author_contributions_data = author_contributions(soup, None) author_competing_interests_data = competing_interests(soup, None) author_correspondence_data = full_correspondence(soup) authors_non_byline_data = authors_non_byline(soup) equal_contributions_map = map_equal_contributions(contributors_data) present_address_data = present_addresses(soup) foot_notes_data = other_foot_notes(soup) # First line authors builds basic structure for contributor in contributors_data: author_json = None if contributor["type"] == "author" and contributor.get("collab"): author_json = author_group(contributor, author_contributions_data, author_correspondence_data, author_competing_interests_data, equal_contributions_map, present_address_data, foot_notes_data) elif contributor.get("on-behalf-of"): author_json = author_on_behalf_of(contributor) elif contributor["type"] == "author" and not contributor.get("group-author-key"): author_json = author_person(contributor, author_contributions_data, author_correspondence_data, author_competing_interests_data, equal_contributions_map, present_address_data, foot_notes_data) if author_json: authors_json_data.append(author_json) # Second, add byline author data collab_map = collab_to_group_author_key_map(contributors_data) for contributor in [elem for elem in contributors_data if elem.get("group-author-key") and not elem.get("collab")]: for group_author in [elem for elem in authors_json_data if elem.get('type') == 'group']: group_author_key = None if group_author["name"] in collab_map: group_author_key = collab_map[group_author["name"]] if contributor.get("group-author-key") == group_author_key: author_json = author_person(contributor, author_contributions_data, author_correspondence_data, author_competing_interests_data, equal_contributions_map, present_address_data, foot_notes_data) if contributor.get("sub-group"): if "groups" not in group_author: group_author["groups"] = OrderedDict() if contributor.get("sub-group") not in group_author["groups"]: group_author["groups"][contributor.get("sub-group")] = [] group_author["groups"][contributor.get("sub-group")].append(author_json) else: if "people" not in group_author: group_author["people"] = [] group_author["people"].append(author_json) authors_json_data_rewritten = elifetools.json_rewrite.rewrite_json("authors_json", soup, authors_json_data) return authors_json_data_rewritten
[ "def", "authors_json", "(", "soup", ")", ":", "authors_json_data", "=", "[", "]", "contributors_data", "=", "contributors", "(", "soup", ",", "\"full\"", ")", "author_contributions_data", "=", "author_contributions", "(", "soup", ",", "None", ")", "author_competing_interests_data", "=", "competing_interests", "(", "soup", ",", "None", ")", "author_correspondence_data", "=", "full_correspondence", "(", "soup", ")", "authors_non_byline_data", "=", "authors_non_byline", "(", "soup", ")", "equal_contributions_map", "=", "map_equal_contributions", "(", "contributors_data", ")", "present_address_data", "=", "present_addresses", "(", "soup", ")", "foot_notes_data", "=", "other_foot_notes", "(", "soup", ")", "# First line authors builds basic structure", "for", "contributor", "in", "contributors_data", ":", "author_json", "=", "None", "if", "contributor", "[", "\"type\"", "]", "==", "\"author\"", "and", "contributor", ".", "get", "(", "\"collab\"", ")", ":", "author_json", "=", "author_group", "(", "contributor", ",", "author_contributions_data", ",", "author_correspondence_data", ",", "author_competing_interests_data", ",", "equal_contributions_map", ",", "present_address_data", ",", "foot_notes_data", ")", "elif", "contributor", ".", "get", "(", "\"on-behalf-of\"", ")", ":", "author_json", "=", "author_on_behalf_of", "(", "contributor", ")", "elif", "contributor", "[", "\"type\"", "]", "==", "\"author\"", "and", "not", "contributor", ".", "get", "(", "\"group-author-key\"", ")", ":", "author_json", "=", "author_person", "(", "contributor", ",", "author_contributions_data", ",", "author_correspondence_data", ",", "author_competing_interests_data", ",", "equal_contributions_map", ",", "present_address_data", ",", "foot_notes_data", ")", "if", "author_json", ":", "authors_json_data", ".", "append", "(", "author_json", ")", "# Second, add byline author data", "collab_map", "=", "collab_to_group_author_key_map", "(", "contributors_data", ")", "for", "contributor", "in", "[", "elem", "for", "elem", "in", "contributors_data", "if", "elem", ".", "get", "(", "\"group-author-key\"", ")", "and", "not", "elem", ".", "get", "(", "\"collab\"", ")", "]", ":", "for", "group_author", "in", "[", "elem", "for", "elem", "in", "authors_json_data", "if", "elem", ".", "get", "(", "'type'", ")", "==", "'group'", "]", ":", "group_author_key", "=", "None", "if", "group_author", "[", "\"name\"", "]", "in", "collab_map", ":", "group_author_key", "=", "collab_map", "[", "group_author", "[", "\"name\"", "]", "]", "if", "contributor", ".", "get", "(", "\"group-author-key\"", ")", "==", "group_author_key", ":", "author_json", "=", "author_person", "(", "contributor", ",", "author_contributions_data", ",", "author_correspondence_data", ",", "author_competing_interests_data", ",", "equal_contributions_map", ",", "present_address_data", ",", "foot_notes_data", ")", "if", "contributor", ".", "get", "(", "\"sub-group\"", ")", ":", "if", "\"groups\"", "not", "in", "group_author", ":", "group_author", "[", "\"groups\"", "]", "=", "OrderedDict", "(", ")", "if", "contributor", ".", "get", "(", "\"sub-group\"", ")", "not", "in", "group_author", "[", "\"groups\"", "]", ":", "group_author", "[", "\"groups\"", "]", "[", "contributor", ".", "get", "(", "\"sub-group\"", ")", "]", "=", "[", "]", "group_author", "[", "\"groups\"", "]", "[", "contributor", ".", "get", "(", "\"sub-group\"", ")", "]", ".", "append", "(", "author_json", ")", "else", ":", "if", "\"people\"", "not", "in", "group_author", ":", "group_author", "[", "\"people\"", "]", "=", "[", "]", "group_author", "[", "\"people\"", "]", ".", "append", "(", "author_json", ")", "authors_json_data_rewritten", "=", "elifetools", ".", "json_rewrite", ".", "rewrite_json", "(", "\"authors_json\"", ",", "soup", ",", "authors_json_data", ")", "return", "authors_json_data_rewritten" ]
authors list in article json format
[ "authors", "list", "in", "article", "json", "format" ]
train
https://github.com/elifesciences/elife-tools/blob/4b9e38cbe485c61a4ed7cbd8970c6b318334fd86/elifetools/parseJATS.py#L3088-L3141
elifesciences/elife-tools
elifetools/parseJATS.py
author_line
def author_line(soup): """take preferred names from authors json and format them into an author line""" author_line = None authors_json_data = authors_json(soup) author_names = extract_author_line_names(authors_json_data) if len(author_names) > 0: author_line = format_author_line(author_names) return author_line
python
def author_line(soup): """take preferred names from authors json and format them into an author line""" author_line = None authors_json_data = authors_json(soup) author_names = extract_author_line_names(authors_json_data) if len(author_names) > 0: author_line = format_author_line(author_names) return author_line
[ "def", "author_line", "(", "soup", ")", ":", "author_line", "=", "None", "authors_json_data", "=", "authors_json", "(", "soup", ")", "author_names", "=", "extract_author_line_names", "(", "authors_json_data", ")", "if", "len", "(", "author_names", ")", ">", "0", ":", "author_line", "=", "format_author_line", "(", "author_names", ")", "return", "author_line" ]
take preferred names from authors json and format them into an author line
[ "take", "preferred", "names", "from", "authors", "json", "and", "format", "them", "into", "an", "author", "line" ]
train
https://github.com/elifesciences/elife-tools/blob/4b9e38cbe485c61a4ed7cbd8970c6b318334fd86/elifetools/parseJATS.py#L3143-L3150
elifesciences/elife-tools
elifetools/parseJATS.py
format_author_line
def format_author_line(author_names): """authorLine format depends on if there is 1, 2 or more than 2 authors""" author_line = None if not author_names: return author_line if len(author_names) <= 2: author_line = ", ".join(author_names) elif len(author_names) > 2: author_line = author_names[0] + " et al." return author_line
python
def format_author_line(author_names): """authorLine format depends on if there is 1, 2 or more than 2 authors""" author_line = None if not author_names: return author_line if len(author_names) <= 2: author_line = ", ".join(author_names) elif len(author_names) > 2: author_line = author_names[0] + " et al." return author_line
[ "def", "format_author_line", "(", "author_names", ")", ":", "author_line", "=", "None", "if", "not", "author_names", ":", "return", "author_line", "if", "len", "(", "author_names", ")", "<=", "2", ":", "author_line", "=", "\", \"", ".", "join", "(", "author_names", ")", "elif", "len", "(", "author_names", ")", ">", "2", ":", "author_line", "=", "author_names", "[", "0", "]", "+", "\" et al.\"", "return", "author_line" ]
authorLine format depends on if there is 1, 2 or more than 2 authors
[ "authorLine", "format", "depends", "on", "if", "there", "is", "1", "2", "or", "more", "than", "2", "authors" ]
train
https://github.com/elifesciences/elife-tools/blob/4b9e38cbe485c61a4ed7cbd8970c6b318334fd86/elifetools/parseJATS.py#L3165-L3174
elifesciences/elife-tools
elifetools/parseJATS.py
references_date
def references_date(year=None): "Handle year value parsing for some edge cases" date = None discriminator = None in_press = None if year and "in press" in year.lower().strip(): in_press = True elif year and re.match("^[0-9]+$", year): date = year elif year: discriminator_match = re.match("^([0-9]+?)([a-z]+?)$", year) if discriminator_match: date = discriminator_match.group(1) discriminator = discriminator_match.group(2) else: date = year return (date, discriminator, in_press)
python
def references_date(year=None): "Handle year value parsing for some edge cases" date = None discriminator = None in_press = None if year and "in press" in year.lower().strip(): in_press = True elif year and re.match("^[0-9]+$", year): date = year elif year: discriminator_match = re.match("^([0-9]+?)([a-z]+?)$", year) if discriminator_match: date = discriminator_match.group(1) discriminator = discriminator_match.group(2) else: date = year return (date, discriminator, in_press)
[ "def", "references_date", "(", "year", "=", "None", ")", ":", "date", "=", "None", "discriminator", "=", "None", "in_press", "=", "None", "if", "year", "and", "\"in press\"", "in", "year", ".", "lower", "(", ")", ".", "strip", "(", ")", ":", "in_press", "=", "True", "elif", "year", "and", "re", ".", "match", "(", "\"^[0-9]+$\"", ",", "year", ")", ":", "date", "=", "year", "elif", "year", ":", "discriminator_match", "=", "re", ".", "match", "(", "\"^([0-9]+?)([a-z]+?)$\"", ",", "year", ")", "if", "discriminator_match", ":", "date", "=", "discriminator_match", ".", "group", "(", "1", ")", "discriminator", "=", "discriminator_match", ".", "group", "(", "2", ")", "else", ":", "date", "=", "year", "return", "(", "date", ",", "discriminator", ",", "in_press", ")" ]
Handle year value parsing for some edge cases
[ "Handle", "year", "value", "parsing", "for", "some", "edge", "cases" ]
train
https://github.com/elifesciences/elife-tools/blob/4b9e38cbe485c61a4ed7cbd8970c6b318334fd86/elifetools/parseJATS.py#L3210-L3226
elifesciences/elife-tools
elifetools/parseJATS.py
references_json_authors
def references_json_authors(ref_authors, ref_content): "build the authors for references json here for testability" all_authors = references_authors(ref_authors) if all_authors != {}: if ref_content.get("type") in ["conference-proceeding", "journal", "other", "periodical", "preprint", "report", "web"]: for author_type in ["authors", "authorsEtAl"]: set_if_value(ref_content, author_type, all_authors.get(author_type)) elif ref_content.get("type") in ["book", "book-chapter"]: for author_type in ["authors", "authorsEtAl", "editors", "editorsEtAl"]: set_if_value(ref_content, author_type, all_authors.get(author_type)) elif ref_content.get("type") in ["clinical-trial"]: # Always set as authors, once, then add the authorsType for author_type in ["authors", "collaborators", "sponsors"]: if "authorsType" not in ref_content and all_authors.get(author_type): set_if_value(ref_content, "authors", all_authors.get(author_type)) set_if_value(ref_content, "authorsEtAl", all_authors.get(author_type + "EtAl")) ref_content["authorsType"] = author_type elif ref_content.get("type") in ["data", "software"]: for author_type in ["authors", "authorsEtAl", "compilers", "compilersEtAl", "curators", "curatorsEtAl"]: set_if_value(ref_content, author_type, all_authors.get(author_type)) elif ref_content.get("type") in ["patent"]: for author_type in ["inventors", "inventorsEtAl", "assignees", "assigneesEtAl"]: set_if_value(ref_content, author_type, all_authors.get(author_type)) elif ref_content.get("type") in ["thesis"]: # Convert list to a non-list if all_authors.get("authors") and len(all_authors.get("authors")) > 0: ref_content["author"] = all_authors.get("authors")[0] return ref_content
python
def references_json_authors(ref_authors, ref_content): "build the authors for references json here for testability" all_authors = references_authors(ref_authors) if all_authors != {}: if ref_content.get("type") in ["conference-proceeding", "journal", "other", "periodical", "preprint", "report", "web"]: for author_type in ["authors", "authorsEtAl"]: set_if_value(ref_content, author_type, all_authors.get(author_type)) elif ref_content.get("type") in ["book", "book-chapter"]: for author_type in ["authors", "authorsEtAl", "editors", "editorsEtAl"]: set_if_value(ref_content, author_type, all_authors.get(author_type)) elif ref_content.get("type") in ["clinical-trial"]: # Always set as authors, once, then add the authorsType for author_type in ["authors", "collaborators", "sponsors"]: if "authorsType" not in ref_content and all_authors.get(author_type): set_if_value(ref_content, "authors", all_authors.get(author_type)) set_if_value(ref_content, "authorsEtAl", all_authors.get(author_type + "EtAl")) ref_content["authorsType"] = author_type elif ref_content.get("type") in ["data", "software"]: for author_type in ["authors", "authorsEtAl", "compilers", "compilersEtAl", "curators", "curatorsEtAl"]: set_if_value(ref_content, author_type, all_authors.get(author_type)) elif ref_content.get("type") in ["patent"]: for author_type in ["inventors", "inventorsEtAl", "assignees", "assigneesEtAl"]: set_if_value(ref_content, author_type, all_authors.get(author_type)) elif ref_content.get("type") in ["thesis"]: # Convert list to a non-list if all_authors.get("authors") and len(all_authors.get("authors")) > 0: ref_content["author"] = all_authors.get("authors")[0] return ref_content
[ "def", "references_json_authors", "(", "ref_authors", ",", "ref_content", ")", ":", "all_authors", "=", "references_authors", "(", "ref_authors", ")", "if", "all_authors", "!=", "{", "}", ":", "if", "ref_content", ".", "get", "(", "\"type\"", ")", "in", "[", "\"conference-proceeding\"", ",", "\"journal\"", ",", "\"other\"", ",", "\"periodical\"", ",", "\"preprint\"", ",", "\"report\"", ",", "\"web\"", "]", ":", "for", "author_type", "in", "[", "\"authors\"", ",", "\"authorsEtAl\"", "]", ":", "set_if_value", "(", "ref_content", ",", "author_type", ",", "all_authors", ".", "get", "(", "author_type", ")", ")", "elif", "ref_content", ".", "get", "(", "\"type\"", ")", "in", "[", "\"book\"", ",", "\"book-chapter\"", "]", ":", "for", "author_type", "in", "[", "\"authors\"", ",", "\"authorsEtAl\"", ",", "\"editors\"", ",", "\"editorsEtAl\"", "]", ":", "set_if_value", "(", "ref_content", ",", "author_type", ",", "all_authors", ".", "get", "(", "author_type", ")", ")", "elif", "ref_content", ".", "get", "(", "\"type\"", ")", "in", "[", "\"clinical-trial\"", "]", ":", "# Always set as authors, once, then add the authorsType", "for", "author_type", "in", "[", "\"authors\"", ",", "\"collaborators\"", ",", "\"sponsors\"", "]", ":", "if", "\"authorsType\"", "not", "in", "ref_content", "and", "all_authors", ".", "get", "(", "author_type", ")", ":", "set_if_value", "(", "ref_content", ",", "\"authors\"", ",", "all_authors", ".", "get", "(", "author_type", ")", ")", "set_if_value", "(", "ref_content", ",", "\"authorsEtAl\"", ",", "all_authors", ".", "get", "(", "author_type", "+", "\"EtAl\"", ")", ")", "ref_content", "[", "\"authorsType\"", "]", "=", "author_type", "elif", "ref_content", ".", "get", "(", "\"type\"", ")", "in", "[", "\"data\"", ",", "\"software\"", "]", ":", "for", "author_type", "in", "[", "\"authors\"", ",", "\"authorsEtAl\"", ",", "\"compilers\"", ",", "\"compilersEtAl\"", ",", "\"curators\"", ",", "\"curatorsEtAl\"", "]", ":", "set_if_value", "(", "ref_content", ",", "author_type", ",", "all_authors", ".", "get", "(", "author_type", ")", ")", "elif", "ref_content", ".", "get", "(", "\"type\"", ")", "in", "[", "\"patent\"", "]", ":", "for", "author_type", "in", "[", "\"inventors\"", ",", "\"inventorsEtAl\"", ",", "\"assignees\"", ",", "\"assigneesEtAl\"", "]", ":", "set_if_value", "(", "ref_content", ",", "author_type", ",", "all_authors", ".", "get", "(", "author_type", ")", ")", "elif", "ref_content", ".", "get", "(", "\"type\"", ")", "in", "[", "\"thesis\"", "]", ":", "# Convert list to a non-list", "if", "all_authors", ".", "get", "(", "\"authors\"", ")", "and", "len", "(", "all_authors", ".", "get", "(", "\"authors\"", ")", ")", ">", "0", ":", "ref_content", "[", "\"author\"", "]", "=", "all_authors", ".", "get", "(", "\"authors\"", ")", "[", "0", "]", "return", "ref_content" ]
build the authors for references json here for testability
[ "build", "the", "authors", "for", "references", "json", "here", "for", "testability" ]
train
https://github.com/elifesciences/elife-tools/blob/4b9e38cbe485c61a4ed7cbd8970c6b318334fd86/elifetools/parseJATS.py#L3256-L3285
elifesciences/elife-tools
elifetools/parseJATS.py
convert_references_json
def convert_references_json(ref_content, soup=None): "Check for references that will not pass schema validation, fix or convert them to unknown" # Convert reference to unkonwn if still missing important values if ( (ref_content.get("type") == "other") or (ref_content.get("type") == "book-chapter" and "editors" not in ref_content) or (ref_content.get("type") == "journal" and "articleTitle" not in ref_content) or (ref_content.get("type") in ["journal", "book-chapter"] and not "pages" in ref_content) or (ref_content.get("type") == "journal" and "journal" not in ref_content) or (ref_content.get("type") in ["book", "book-chapter", "report", "thesis", "software"] and "publisher" not in ref_content) or (ref_content.get("type") == "book" and "bookTitle" not in ref_content) or (ref_content.get("type") == "data" and "source" not in ref_content) or (ref_content.get("type") == "conference-proceeding" and "conference" not in ref_content) ): ref_content = references_json_to_unknown(ref_content, soup) return ref_content
python
def convert_references_json(ref_content, soup=None): "Check for references that will not pass schema validation, fix or convert them to unknown" # Convert reference to unkonwn if still missing important values if ( (ref_content.get("type") == "other") or (ref_content.get("type") == "book-chapter" and "editors" not in ref_content) or (ref_content.get("type") == "journal" and "articleTitle" not in ref_content) or (ref_content.get("type") in ["journal", "book-chapter"] and not "pages" in ref_content) or (ref_content.get("type") == "journal" and "journal" not in ref_content) or (ref_content.get("type") in ["book", "book-chapter", "report", "thesis", "software"] and "publisher" not in ref_content) or (ref_content.get("type") == "book" and "bookTitle" not in ref_content) or (ref_content.get("type") == "data" and "source" not in ref_content) or (ref_content.get("type") == "conference-proceeding" and "conference" not in ref_content) ): ref_content = references_json_to_unknown(ref_content, soup) return ref_content
[ "def", "convert_references_json", "(", "ref_content", ",", "soup", "=", "None", ")", ":", "# Convert reference to unkonwn if still missing important values", "if", "(", "(", "ref_content", ".", "get", "(", "\"type\"", ")", "==", "\"other\"", ")", "or", "(", "ref_content", ".", "get", "(", "\"type\"", ")", "==", "\"book-chapter\"", "and", "\"editors\"", "not", "in", "ref_content", ")", "or", "(", "ref_content", ".", "get", "(", "\"type\"", ")", "==", "\"journal\"", "and", "\"articleTitle\"", "not", "in", "ref_content", ")", "or", "(", "ref_content", ".", "get", "(", "\"type\"", ")", "in", "[", "\"journal\"", ",", "\"book-chapter\"", "]", "and", "not", "\"pages\"", "in", "ref_content", ")", "or", "(", "ref_content", ".", "get", "(", "\"type\"", ")", "==", "\"journal\"", "and", "\"journal\"", "not", "in", "ref_content", ")", "or", "(", "ref_content", ".", "get", "(", "\"type\"", ")", "in", "[", "\"book\"", ",", "\"book-chapter\"", ",", "\"report\"", ",", "\"thesis\"", ",", "\"software\"", "]", "and", "\"publisher\"", "not", "in", "ref_content", ")", "or", "(", "ref_content", ".", "get", "(", "\"type\"", ")", "==", "\"book\"", "and", "\"bookTitle\"", "not", "in", "ref_content", ")", "or", "(", "ref_content", ".", "get", "(", "\"type\"", ")", "==", "\"data\"", "and", "\"source\"", "not", "in", "ref_content", ")", "or", "(", "ref_content", ".", "get", "(", "\"type\"", ")", "==", "\"conference-proceeding\"", "and", "\"conference\"", "not", "in", "ref_content", ")", ")", ":", "ref_content", "=", "references_json_to_unknown", "(", "ref_content", ",", "soup", ")", "return", "ref_content" ]
Check for references that will not pass schema validation, fix or convert them to unknown
[ "Check", "for", "references", "that", "will", "not", "pass", "schema", "validation", "fix", "or", "convert", "them", "to", "unknown" ]
train
https://github.com/elifesciences/elife-tools/blob/4b9e38cbe485c61a4ed7cbd8970c6b318334fd86/elifetools/parseJATS.py#L3480-L3507
elifesciences/elife-tools
elifetools/parseJATS.py
references_json_unknown_details
def references_json_unknown_details(ref_content, soup=None): "Extract detail value for references of type unknown" details = "" # Try adding pages values first if "pages" in ref_content: if "range" in ref_content["pages"]: details += ref_content["pages"]["range"] else: details += ref_content["pages"] if soup: # Attempt to find the XML element by id, and convert it to details if "id" in ref_content: ref_tag = first(soup.select("ref#" + ref_content["id"])) if ref_tag: # Now remove tags that would be already part of the unknown reference by now for remove_tag in ["person-group", "year", "article-title", "elocation-id", "fpage", "lpage"]: ref_tag = remove_tag_from_tag(ref_tag, remove_tag) # Add the remaining tag content comma separated for tag in first(raw_parser.element_citation(ref_tag)): if node_text(tag) is not None: if details != "": details += ", " details += node_text(tag) if details == "": return None else: return details
python
def references_json_unknown_details(ref_content, soup=None): "Extract detail value for references of type unknown" details = "" # Try adding pages values first if "pages" in ref_content: if "range" in ref_content["pages"]: details += ref_content["pages"]["range"] else: details += ref_content["pages"] if soup: # Attempt to find the XML element by id, and convert it to details if "id" in ref_content: ref_tag = first(soup.select("ref#" + ref_content["id"])) if ref_tag: # Now remove tags that would be already part of the unknown reference by now for remove_tag in ["person-group", "year", "article-title", "elocation-id", "fpage", "lpage"]: ref_tag = remove_tag_from_tag(ref_tag, remove_tag) # Add the remaining tag content comma separated for tag in first(raw_parser.element_citation(ref_tag)): if node_text(tag) is not None: if details != "": details += ", " details += node_text(tag) if details == "": return None else: return details
[ "def", "references_json_unknown_details", "(", "ref_content", ",", "soup", "=", "None", ")", ":", "details", "=", "\"\"", "# Try adding pages values first", "if", "\"pages\"", "in", "ref_content", ":", "if", "\"range\"", "in", "ref_content", "[", "\"pages\"", "]", ":", "details", "+=", "ref_content", "[", "\"pages\"", "]", "[", "\"range\"", "]", "else", ":", "details", "+=", "ref_content", "[", "\"pages\"", "]", "if", "soup", ":", "# Attempt to find the XML element by id, and convert it to details", "if", "\"id\"", "in", "ref_content", ":", "ref_tag", "=", "first", "(", "soup", ".", "select", "(", "\"ref#\"", "+", "ref_content", "[", "\"id\"", "]", ")", ")", "if", "ref_tag", ":", "# Now remove tags that would be already part of the unknown reference by now", "for", "remove_tag", "in", "[", "\"person-group\"", ",", "\"year\"", ",", "\"article-title\"", ",", "\"elocation-id\"", ",", "\"fpage\"", ",", "\"lpage\"", "]", ":", "ref_tag", "=", "remove_tag_from_tag", "(", "ref_tag", ",", "remove_tag", ")", "# Add the remaining tag content comma separated", "for", "tag", "in", "first", "(", "raw_parser", ".", "element_citation", "(", "ref_tag", ")", ")", ":", "if", "node_text", "(", "tag", ")", "is", "not", "None", ":", "if", "details", "!=", "\"\"", ":", "details", "+=", "\", \"", "details", "+=", "node_text", "(", "tag", ")", "if", "details", "==", "\"\"", ":", "return", "None", "else", ":", "return", "details" ]
Extract detail value for references of type unknown
[ "Extract", "detail", "value", "for", "references", "of", "type", "unknown" ]
train
https://github.com/elifesciences/elife-tools/blob/4b9e38cbe485c61a4ed7cbd8970c6b318334fd86/elifetools/parseJATS.py#L3540-L3569
elifesciences/elife-tools
elifetools/parseJATS.py
unwrap_appendix_box
def unwrap_appendix_box(json_content): """for use in removing unwanted boxed-content from appendices json""" if json_content.get("content") and len(json_content["content"]) > 0: first_block = json_content["content"][0] if (first_block.get("type") and first_block.get("type") == "box" and first_block.get("content")): if first_block.get("doi") and not json_content.get("doi"): json_content["doi"] = first_block.get("doi") json_content["content"] = first_block["content"] return json_content
python
def unwrap_appendix_box(json_content): """for use in removing unwanted boxed-content from appendices json""" if json_content.get("content") and len(json_content["content"]) > 0: first_block = json_content["content"][0] if (first_block.get("type") and first_block.get("type") == "box" and first_block.get("content")): if first_block.get("doi") and not json_content.get("doi"): json_content["doi"] = first_block.get("doi") json_content["content"] = first_block["content"] return json_content
[ "def", "unwrap_appendix_box", "(", "json_content", ")", ":", "if", "json_content", ".", "get", "(", "\"content\"", ")", "and", "len", "(", "json_content", "[", "\"content\"", "]", ")", ">", "0", ":", "first_block", "=", "json_content", "[", "\"content\"", "]", "[", "0", "]", "if", "(", "first_block", ".", "get", "(", "\"type\"", ")", "and", "first_block", ".", "get", "(", "\"type\"", ")", "==", "\"box\"", "and", "first_block", ".", "get", "(", "\"content\"", ")", ")", ":", "if", "first_block", ".", "get", "(", "\"doi\"", ")", "and", "not", "json_content", ".", "get", "(", "\"doi\"", ")", ":", "json_content", "[", "\"doi\"", "]", "=", "first_block", ".", "get", "(", "\"doi\"", ")", "json_content", "[", "\"content\"", "]", "=", "first_block", "[", "\"content\"", "]", "return", "json_content" ]
for use in removing unwanted boxed-content from appendices json
[ "for", "use", "in", "removing", "unwanted", "boxed", "-", "content", "from", "appendices", "json" ]
train
https://github.com/elifesciences/elife-tools/blob/4b9e38cbe485c61a4ed7cbd8970c6b318334fd86/elifetools/parseJATS.py#L3593-L3603
canonical-ols/acceptable
acceptable/_build_doubles.py
extract_schemas_from_file
def extract_schemas_from_file(source_path): """Extract schemas from 'source_path'. :returns: a list of ViewSchema objects on success, None if no schemas could be extracted. """ logging.info("Extracting schemas from %s", source_path) try: with open(source_path, 'r') as source_file: source = source_file.read() except (FileNotFoundError, PermissionError) as e: logging.error("Cannot extract schemas: %s", e.strerror) else: try: schemas = extract_schemas_from_source(source, source_path) except SyntaxError as e: logging.error("Cannot extract schemas: %s", str(e)) else: logging.info( "Extracted %d %s", len(schemas), "schema" if len(schemas) == 1 else "schemas") return schemas
python
def extract_schemas_from_file(source_path): """Extract schemas from 'source_path'. :returns: a list of ViewSchema objects on success, None if no schemas could be extracted. """ logging.info("Extracting schemas from %s", source_path) try: with open(source_path, 'r') as source_file: source = source_file.read() except (FileNotFoundError, PermissionError) as e: logging.error("Cannot extract schemas: %s", e.strerror) else: try: schemas = extract_schemas_from_source(source, source_path) except SyntaxError as e: logging.error("Cannot extract schemas: %s", str(e)) else: logging.info( "Extracted %d %s", len(schemas), "schema" if len(schemas) == 1 else "schemas") return schemas
[ "def", "extract_schemas_from_file", "(", "source_path", ")", ":", "logging", ".", "info", "(", "\"Extracting schemas from %s\"", ",", "source_path", ")", "try", ":", "with", "open", "(", "source_path", ",", "'r'", ")", "as", "source_file", ":", "source", "=", "source_file", ".", "read", "(", ")", "except", "(", "FileNotFoundError", ",", "PermissionError", ")", "as", "e", ":", "logging", ".", "error", "(", "\"Cannot extract schemas: %s\"", ",", "e", ".", "strerror", ")", "else", ":", "try", ":", "schemas", "=", "extract_schemas_from_source", "(", "source", ",", "source_path", ")", "except", "SyntaxError", "as", "e", ":", "logging", ".", "error", "(", "\"Cannot extract schemas: %s\"", ",", "str", "(", "e", ")", ")", "else", ":", "logging", ".", "info", "(", "\"Extracted %d %s\"", ",", "len", "(", "schemas", ")", ",", "\"schema\"", "if", "len", "(", "schemas", ")", "==", "1", "else", "\"schemas\"", ")", "return", "schemas" ]
Extract schemas from 'source_path'. :returns: a list of ViewSchema objects on success, None if no schemas could be extracted.
[ "Extract", "schemas", "from", "source_path", "." ]
train
https://github.com/canonical-ols/acceptable/blob/6ccbe969078166a5315d857da38b59b43b29fadc/acceptable/_build_doubles.py#L129-L151
canonical-ols/acceptable
acceptable/_build_doubles.py
_get_simple_assignments
def _get_simple_assignments(tree): """Get simple assignments from node tree.""" result = {} for node in ast.walk(tree): if isinstance(node, ast.Assign): for target in node.targets: if isinstance(target, ast.Name): result[target.id] = node.value return result
python
def _get_simple_assignments(tree): """Get simple assignments from node tree.""" result = {} for node in ast.walk(tree): if isinstance(node, ast.Assign): for target in node.targets: if isinstance(target, ast.Name): result[target.id] = node.value return result
[ "def", "_get_simple_assignments", "(", "tree", ")", ":", "result", "=", "{", "}", "for", "node", "in", "ast", ".", "walk", "(", "tree", ")", ":", "if", "isinstance", "(", "node", ",", "ast", ".", "Assign", ")", ":", "for", "target", "in", "node", ".", "targets", ":", "if", "isinstance", "(", "target", ",", "ast", ".", "Name", ")", ":", "result", "[", "target", ".", "id", "]", "=", "node", ".", "value", "return", "result" ]
Get simple assignments from node tree.
[ "Get", "simple", "assignments", "from", "node", "tree", "." ]
train
https://github.com/canonical-ols/acceptable/blob/6ccbe969078166a5315d857da38b59b43b29fadc/acceptable/_build_doubles.py#L154-L162
canonical-ols/acceptable
acceptable/_build_doubles.py
extract_schemas_from_source
def extract_schemas_from_source(source, filename='<unknown>'): """Extract schemas from 'source'. The 'source' parameter must be a string, and should be valid python source. If 'source' is not valid python source, a SyntaxError will be raised. :returns: a list of ViewSchema objects. """ # Track which acceptable services have been configured. acceptable_services = set() # Track which acceptable views have been configured: acceptable_views = {} schemas_found = [] ast_tree = ast.parse(source, filename) simple_names = _get_simple_assignments(ast_tree) assigns = [n for n in ast_tree.body if isinstance(n, ast.Assign)] call_assigns = [n for n in assigns if isinstance(n.value, ast.Call)] # We need to extract the AcceptableService-related views. We parse the # assignations twice: The first time to extract the AcceptableService # instances, the second to extract the views created on those services. for assign in call_assigns: if isinstance(assign.value.func, ast.Attribute): continue if assign.value.func.id == 'AcceptableService': for target in assign.targets: acceptable_services.add(target.id) for assign in call_assigns: # only consider calls which are attribute accesses, AND # calls where the object being accessed is in acceptable_services, AND # calls where the attribute being accessed is the 'api' method. if isinstance(assign.value.func, ast.Attribute) and \ assign.value.func.value.id in acceptable_services and \ assign.value.func.attr == 'api': # this is a view. We need to extract the url and methods specified. # they may be specified positionally or via a keyword. url = None name = None # methods has a default value: methods = ['GET'] # This is a view - the URL is the first positional argument: args = assign.value.args if len(args) >= 1: url = ast.literal_eval(args[0]) if len(args) >= 2: name = ast.literal_eval(args[1]) kwargs = assign.value.keywords for kwarg in kwargs: if kwarg.arg == 'url': url = ast.literal_eval(kwarg.value) if kwarg.arg == 'methods': methods = ast.literal_eval(kwarg.value) if kwarg.arg == 'view_name': name = ast.literal_eval(kwarg.value) if url and name: for target in assign.targets: acceptable_views[target.id] = { 'url': url, 'name': name, 'methods': methods, } # iterate over all functions, attempting to find the views. functions = [n for n in ast_tree.body if isinstance(n, ast.FunctionDef)] for function in functions: input_schema = None output_schema = None doc = ast.get_docstring(function) api_options_list = [] for decorator in function.decorator_list: if not isinstance(decorator, ast.Call): continue if isinstance(decorator.func, ast.Attribute): decorator_name = decorator.func.value.id # extract version this view was introduced at, which can be # specified as an arg or a kwarg: version = None for kwarg in decorator.keywords: if kwarg.arg == 'introduced_at': version = ast.literal_eval(kwarg.value) break if len(decorator.args) == 1: version = ast.literal_eval(decorator.args[0]) if decorator_name in acceptable_views: api_options = acceptable_views[decorator_name] api_options['version'] = version api_options_list.append(api_options) else: decorator_name = decorator.func.id if decorator_name == 'validate_body': _SimpleNamesResolver(simple_names).visit(decorator.args[0]) input_schema = ast.literal_eval(decorator.args[0]) if decorator_name == 'validate_output': _SimpleNamesResolver(simple_names).visit(decorator.args[0]) output_schema = ast.literal_eval(decorator.args[0]) for api_options in api_options_list: schema = ViewSchema( view_name=api_options['name'], version=api_options['version'], input_schema=input_schema, output_schema=output_schema, methods=api_options['methods'], url=api_options['url'], doc=doc, ) schemas_found.append(schema) return schemas_found
python
def extract_schemas_from_source(source, filename='<unknown>'): """Extract schemas from 'source'. The 'source' parameter must be a string, and should be valid python source. If 'source' is not valid python source, a SyntaxError will be raised. :returns: a list of ViewSchema objects. """ # Track which acceptable services have been configured. acceptable_services = set() # Track which acceptable views have been configured: acceptable_views = {} schemas_found = [] ast_tree = ast.parse(source, filename) simple_names = _get_simple_assignments(ast_tree) assigns = [n for n in ast_tree.body if isinstance(n, ast.Assign)] call_assigns = [n for n in assigns if isinstance(n.value, ast.Call)] # We need to extract the AcceptableService-related views. We parse the # assignations twice: The first time to extract the AcceptableService # instances, the second to extract the views created on those services. for assign in call_assigns: if isinstance(assign.value.func, ast.Attribute): continue if assign.value.func.id == 'AcceptableService': for target in assign.targets: acceptable_services.add(target.id) for assign in call_assigns: # only consider calls which are attribute accesses, AND # calls where the object being accessed is in acceptable_services, AND # calls where the attribute being accessed is the 'api' method. if isinstance(assign.value.func, ast.Attribute) and \ assign.value.func.value.id in acceptable_services and \ assign.value.func.attr == 'api': # this is a view. We need to extract the url and methods specified. # they may be specified positionally or via a keyword. url = None name = None # methods has a default value: methods = ['GET'] # This is a view - the URL is the first positional argument: args = assign.value.args if len(args) >= 1: url = ast.literal_eval(args[0]) if len(args) >= 2: name = ast.literal_eval(args[1]) kwargs = assign.value.keywords for kwarg in kwargs: if kwarg.arg == 'url': url = ast.literal_eval(kwarg.value) if kwarg.arg == 'methods': methods = ast.literal_eval(kwarg.value) if kwarg.arg == 'view_name': name = ast.literal_eval(kwarg.value) if url and name: for target in assign.targets: acceptable_views[target.id] = { 'url': url, 'name': name, 'methods': methods, } # iterate over all functions, attempting to find the views. functions = [n for n in ast_tree.body if isinstance(n, ast.FunctionDef)] for function in functions: input_schema = None output_schema = None doc = ast.get_docstring(function) api_options_list = [] for decorator in function.decorator_list: if not isinstance(decorator, ast.Call): continue if isinstance(decorator.func, ast.Attribute): decorator_name = decorator.func.value.id # extract version this view was introduced at, which can be # specified as an arg or a kwarg: version = None for kwarg in decorator.keywords: if kwarg.arg == 'introduced_at': version = ast.literal_eval(kwarg.value) break if len(decorator.args) == 1: version = ast.literal_eval(decorator.args[0]) if decorator_name in acceptable_views: api_options = acceptable_views[decorator_name] api_options['version'] = version api_options_list.append(api_options) else: decorator_name = decorator.func.id if decorator_name == 'validate_body': _SimpleNamesResolver(simple_names).visit(decorator.args[0]) input_schema = ast.literal_eval(decorator.args[0]) if decorator_name == 'validate_output': _SimpleNamesResolver(simple_names).visit(decorator.args[0]) output_schema = ast.literal_eval(decorator.args[0]) for api_options in api_options_list: schema = ViewSchema( view_name=api_options['name'], version=api_options['version'], input_schema=input_schema, output_schema=output_schema, methods=api_options['methods'], url=api_options['url'], doc=doc, ) schemas_found.append(schema) return schemas_found
[ "def", "extract_schemas_from_source", "(", "source", ",", "filename", "=", "'<unknown>'", ")", ":", "# Track which acceptable services have been configured.", "acceptable_services", "=", "set", "(", ")", "# Track which acceptable views have been configured:", "acceptable_views", "=", "{", "}", "schemas_found", "=", "[", "]", "ast_tree", "=", "ast", ".", "parse", "(", "source", ",", "filename", ")", "simple_names", "=", "_get_simple_assignments", "(", "ast_tree", ")", "assigns", "=", "[", "n", "for", "n", "in", "ast_tree", ".", "body", "if", "isinstance", "(", "n", ",", "ast", ".", "Assign", ")", "]", "call_assigns", "=", "[", "n", "for", "n", "in", "assigns", "if", "isinstance", "(", "n", ".", "value", ",", "ast", ".", "Call", ")", "]", "# We need to extract the AcceptableService-related views. We parse the", "# assignations twice: The first time to extract the AcceptableService", "# instances, the second to extract the views created on those services.", "for", "assign", "in", "call_assigns", ":", "if", "isinstance", "(", "assign", ".", "value", ".", "func", ",", "ast", ".", "Attribute", ")", ":", "continue", "if", "assign", ".", "value", ".", "func", ".", "id", "==", "'AcceptableService'", ":", "for", "target", "in", "assign", ".", "targets", ":", "acceptable_services", ".", "add", "(", "target", ".", "id", ")", "for", "assign", "in", "call_assigns", ":", "# only consider calls which are attribute accesses, AND", "# calls where the object being accessed is in acceptable_services, AND", "# calls where the attribute being accessed is the 'api' method.", "if", "isinstance", "(", "assign", ".", "value", ".", "func", ",", "ast", ".", "Attribute", ")", "and", "assign", ".", "value", ".", "func", ".", "value", ".", "id", "in", "acceptable_services", "and", "assign", ".", "value", ".", "func", ".", "attr", "==", "'api'", ":", "# this is a view. We need to extract the url and methods specified.", "# they may be specified positionally or via a keyword.", "url", "=", "None", "name", "=", "None", "# methods has a default value:", "methods", "=", "[", "'GET'", "]", "# This is a view - the URL is the first positional argument:", "args", "=", "assign", ".", "value", ".", "args", "if", "len", "(", "args", ")", ">=", "1", ":", "url", "=", "ast", ".", "literal_eval", "(", "args", "[", "0", "]", ")", "if", "len", "(", "args", ")", ">=", "2", ":", "name", "=", "ast", ".", "literal_eval", "(", "args", "[", "1", "]", ")", "kwargs", "=", "assign", ".", "value", ".", "keywords", "for", "kwarg", "in", "kwargs", ":", "if", "kwarg", ".", "arg", "==", "'url'", ":", "url", "=", "ast", ".", "literal_eval", "(", "kwarg", ".", "value", ")", "if", "kwarg", ".", "arg", "==", "'methods'", ":", "methods", "=", "ast", ".", "literal_eval", "(", "kwarg", ".", "value", ")", "if", "kwarg", ".", "arg", "==", "'view_name'", ":", "name", "=", "ast", ".", "literal_eval", "(", "kwarg", ".", "value", ")", "if", "url", "and", "name", ":", "for", "target", "in", "assign", ".", "targets", ":", "acceptable_views", "[", "target", ".", "id", "]", "=", "{", "'url'", ":", "url", ",", "'name'", ":", "name", ",", "'methods'", ":", "methods", ",", "}", "# iterate over all functions, attempting to find the views.", "functions", "=", "[", "n", "for", "n", "in", "ast_tree", ".", "body", "if", "isinstance", "(", "n", ",", "ast", ".", "FunctionDef", ")", "]", "for", "function", "in", "functions", ":", "input_schema", "=", "None", "output_schema", "=", "None", "doc", "=", "ast", ".", "get_docstring", "(", "function", ")", "api_options_list", "=", "[", "]", "for", "decorator", "in", "function", ".", "decorator_list", ":", "if", "not", "isinstance", "(", "decorator", ",", "ast", ".", "Call", ")", ":", "continue", "if", "isinstance", "(", "decorator", ".", "func", ",", "ast", ".", "Attribute", ")", ":", "decorator_name", "=", "decorator", ".", "func", ".", "value", ".", "id", "# extract version this view was introduced at, which can be", "# specified as an arg or a kwarg:", "version", "=", "None", "for", "kwarg", "in", "decorator", ".", "keywords", ":", "if", "kwarg", ".", "arg", "==", "'introduced_at'", ":", "version", "=", "ast", ".", "literal_eval", "(", "kwarg", ".", "value", ")", "break", "if", "len", "(", "decorator", ".", "args", ")", "==", "1", ":", "version", "=", "ast", ".", "literal_eval", "(", "decorator", ".", "args", "[", "0", "]", ")", "if", "decorator_name", "in", "acceptable_views", ":", "api_options", "=", "acceptable_views", "[", "decorator_name", "]", "api_options", "[", "'version'", "]", "=", "version", "api_options_list", ".", "append", "(", "api_options", ")", "else", ":", "decorator_name", "=", "decorator", ".", "func", ".", "id", "if", "decorator_name", "==", "'validate_body'", ":", "_SimpleNamesResolver", "(", "simple_names", ")", ".", "visit", "(", "decorator", ".", "args", "[", "0", "]", ")", "input_schema", "=", "ast", ".", "literal_eval", "(", "decorator", ".", "args", "[", "0", "]", ")", "if", "decorator_name", "==", "'validate_output'", ":", "_SimpleNamesResolver", "(", "simple_names", ")", ".", "visit", "(", "decorator", ".", "args", "[", "0", "]", ")", "output_schema", "=", "ast", ".", "literal_eval", "(", "decorator", ".", "args", "[", "0", "]", ")", "for", "api_options", "in", "api_options_list", ":", "schema", "=", "ViewSchema", "(", "view_name", "=", "api_options", "[", "'name'", "]", ",", "version", "=", "api_options", "[", "'version'", "]", ",", "input_schema", "=", "input_schema", ",", "output_schema", "=", "output_schema", ",", "methods", "=", "api_options", "[", "'methods'", "]", ",", "url", "=", "api_options", "[", "'url'", "]", ",", "doc", "=", "doc", ",", ")", "schemas_found", ".", "append", "(", "schema", ")", "return", "schemas_found" ]
Extract schemas from 'source'. The 'source' parameter must be a string, and should be valid python source. If 'source' is not valid python source, a SyntaxError will be raised. :returns: a list of ViewSchema objects.
[ "Extract", "schemas", "from", "source", "." ]
train
https://github.com/canonical-ols/acceptable/blob/6ccbe969078166a5315d857da38b59b43b29fadc/acceptable/_build_doubles.py#L177-L289
canonical-ols/acceptable
acceptable/_build_doubles.py
render_value
def render_value(value): """Render a value, ensuring that any nested dicts are sorted by key.""" if isinstance(value, list): return '[' + ', '.join(render_value(v) for v in value) + ']' elif isinstance(value, dict): return ( '{' + ', '.join('{k!r}: {v}'.format( k=k, v=render_value(v)) for k, v in sorted(value.items())) + '}') else: return repr(value)
python
def render_value(value): """Render a value, ensuring that any nested dicts are sorted by key.""" if isinstance(value, list): return '[' + ', '.join(render_value(v) for v in value) + ']' elif isinstance(value, dict): return ( '{' + ', '.join('{k!r}: {v}'.format( k=k, v=render_value(v)) for k, v in sorted(value.items())) + '}') else: return repr(value)
[ "def", "render_value", "(", "value", ")", ":", "if", "isinstance", "(", "value", ",", "list", ")", ":", "return", "'['", "+", "', '", ".", "join", "(", "render_value", "(", "v", ")", "for", "v", "in", "value", ")", "+", "']'", "elif", "isinstance", "(", "value", ",", "dict", ")", ":", "return", "(", "'{'", "+", "', '", ".", "join", "(", "'{k!r}: {v}'", ".", "format", "(", "k", "=", "k", ",", "v", "=", "render_value", "(", "v", ")", ")", "for", "k", ",", "v", "in", "sorted", "(", "value", ".", "items", "(", ")", ")", ")", "+", "'}'", ")", "else", ":", "return", "repr", "(", "value", ")" ]
Render a value, ensuring that any nested dicts are sorted by key.
[ "Render", "a", "value", "ensuring", "that", "any", "nested", "dicts", "are", "sorted", "by", "key", "." ]
train
https://github.com/canonical-ols/acceptable/blob/6ccbe969078166a5315d857da38b59b43b29fadc/acceptable/_build_doubles.py#L292-L303
canonical-ols/acceptable
acceptable/_build_doubles.py
write_service_double_file
def write_service_double_file(target_root, service_name, rendered): """Render syntactically valid python service double code.""" target_path = os.path.join( target_root, 'snapstore_schemas', 'service_doubles', '%s.py' % service_name ) with open(target_path, 'w') as target_file: target_file.write(rendered)
python
def write_service_double_file(target_root, service_name, rendered): """Render syntactically valid python service double code.""" target_path = os.path.join( target_root, 'snapstore_schemas', 'service_doubles', '%s.py' % service_name ) with open(target_path, 'w') as target_file: target_file.write(rendered)
[ "def", "write_service_double_file", "(", "target_root", ",", "service_name", ",", "rendered", ")", ":", "target_path", "=", "os", ".", "path", ".", "join", "(", "target_root", ",", "'snapstore_schemas'", ",", "'service_doubles'", ",", "'%s.py'", "%", "service_name", ")", "with", "open", "(", "target_path", ",", "'w'", ")", "as", "target_file", ":", "target_file", ".", "write", "(", "rendered", ")" ]
Render syntactically valid python service double code.
[ "Render", "syntactically", "valid", "python", "service", "double", "code", "." ]
train
https://github.com/canonical-ols/acceptable/blob/6ccbe969078166a5315d857da38b59b43b29fadc/acceptable/_build_doubles.py#L346-L353
canonical-ols/acceptable
acceptable/util.py
clean_docstring
def clean_docstring(docstring): """Dedent docstring, special casing the first line.""" docstring = docstring.strip() if '\n' in docstring: # multiline docstring if docstring[0].isspace(): # whole docstring is indented return textwrap.dedent(docstring) else: # first line not indented, rest maybe first, _, rest = docstring.partition('\n') return first + '\n' + textwrap.dedent(rest) return docstring
python
def clean_docstring(docstring): """Dedent docstring, special casing the first line.""" docstring = docstring.strip() if '\n' in docstring: # multiline docstring if docstring[0].isspace(): # whole docstring is indented return textwrap.dedent(docstring) else: # first line not indented, rest maybe first, _, rest = docstring.partition('\n') return first + '\n' + textwrap.dedent(rest) return docstring
[ "def", "clean_docstring", "(", "docstring", ")", ":", "docstring", "=", "docstring", ".", "strip", "(", ")", "if", "'\\n'", "in", "docstring", ":", "# multiline docstring", "if", "docstring", "[", "0", "]", ".", "isspace", "(", ")", ":", "# whole docstring is indented", "return", "textwrap", ".", "dedent", "(", "docstring", ")", "else", ":", "# first line not indented, rest maybe", "first", ",", "_", ",", "rest", "=", "docstring", ".", "partition", "(", "'\\n'", ")", "return", "first", "+", "'\\n'", "+", "textwrap", ".", "dedent", "(", "rest", ")", "return", "docstring" ]
Dedent docstring, special casing the first line.
[ "Dedent", "docstring", "special", "casing", "the", "first", "line", "." ]
train
https://github.com/canonical-ols/acceptable/blob/6ccbe969078166a5315d857da38b59b43b29fadc/acceptable/util.py#L24-L36
canonical-ols/acceptable
acceptable/util.py
_sort_schema
def _sort_schema(schema): """Recursively sorts a JSON schema by dict key.""" if isinstance(schema, dict): for k, v in sorted(schema.items()): if isinstance(v, dict): yield k, OrderedDict(_sort_schema(v)) elif isinstance(v, list): yield k, list(_sort_schema(v)) else: yield k, v elif isinstance(schema, list): for v in schema: if isinstance(v, dict): yield OrderedDict(_sort_schema(v)) elif isinstance(v, list): yield list(_sort_schema(v)) else: yield v else: yield d
python
def _sort_schema(schema): """Recursively sorts a JSON schema by dict key.""" if isinstance(schema, dict): for k, v in sorted(schema.items()): if isinstance(v, dict): yield k, OrderedDict(_sort_schema(v)) elif isinstance(v, list): yield k, list(_sort_schema(v)) else: yield k, v elif isinstance(schema, list): for v in schema: if isinstance(v, dict): yield OrderedDict(_sort_schema(v)) elif isinstance(v, list): yield list(_sort_schema(v)) else: yield v else: yield d
[ "def", "_sort_schema", "(", "schema", ")", ":", "if", "isinstance", "(", "schema", ",", "dict", ")", ":", "for", "k", ",", "v", "in", "sorted", "(", "schema", ".", "items", "(", ")", ")", ":", "if", "isinstance", "(", "v", ",", "dict", ")", ":", "yield", "k", ",", "OrderedDict", "(", "_sort_schema", "(", "v", ")", ")", "elif", "isinstance", "(", "v", ",", "list", ")", ":", "yield", "k", ",", "list", "(", "_sort_schema", "(", "v", ")", ")", "else", ":", "yield", "k", ",", "v", "elif", "isinstance", "(", "schema", ",", "list", ")", ":", "for", "v", "in", "schema", ":", "if", "isinstance", "(", "v", ",", "dict", ")", ":", "yield", "OrderedDict", "(", "_sort_schema", "(", "v", ")", ")", "elif", "isinstance", "(", "v", ",", "list", ")", ":", "yield", "list", "(", "_sort_schema", "(", "v", ")", ")", "else", ":", "yield", "v", "else", ":", "yield", "d" ]
Recursively sorts a JSON schema by dict key.
[ "Recursively", "sorts", "a", "JSON", "schema", "by", "dict", "key", "." ]
train
https://github.com/canonical-ols/acceptable/blob/6ccbe969078166a5315d857da38b59b43b29fadc/acceptable/util.py#L39-L59
canonical-ols/acceptable
acceptable/djangoutil.py
urlmap
def urlmap(patterns): """Recursively build a map of (group, name) => url patterns. Group is either the resolver namespace or app name for the url config. The urls are joined with any prefixes, and cleaned up of extraneous regex specific syntax.""" for pattern in patterns: group = getattr(pattern, 'namespace', None) if group is None: group = getattr(pattern, 'app_name', None) path = '/' + get_pattern(pattern).lstrip('^').rstrip('$') if isinstance(pattern, PATTERNS): yield (group, pattern.name), path elif isinstance(pattern, RESOLVERS): subpatterns = pattern.url_patterns for (_, name), subpath in urlmap(subpatterns): yield (group, name), path.rstrip('/') + subpath
python
def urlmap(patterns): """Recursively build a map of (group, name) => url patterns. Group is either the resolver namespace or app name for the url config. The urls are joined with any prefixes, and cleaned up of extraneous regex specific syntax.""" for pattern in patterns: group = getattr(pattern, 'namespace', None) if group is None: group = getattr(pattern, 'app_name', None) path = '/' + get_pattern(pattern).lstrip('^').rstrip('$') if isinstance(pattern, PATTERNS): yield (group, pattern.name), path elif isinstance(pattern, RESOLVERS): subpatterns = pattern.url_patterns for (_, name), subpath in urlmap(subpatterns): yield (group, name), path.rstrip('/') + subpath
[ "def", "urlmap", "(", "patterns", ")", ":", "for", "pattern", "in", "patterns", ":", "group", "=", "getattr", "(", "pattern", ",", "'namespace'", ",", "None", ")", "if", "group", "is", "None", ":", "group", "=", "getattr", "(", "pattern", ",", "'app_name'", ",", "None", ")", "path", "=", "'/'", "+", "get_pattern", "(", "pattern", ")", ".", "lstrip", "(", "'^'", ")", ".", "rstrip", "(", "'$'", ")", "if", "isinstance", "(", "pattern", ",", "PATTERNS", ")", ":", "yield", "(", "group", ",", "pattern", ".", "name", ")", ",", "path", "elif", "isinstance", "(", "pattern", ",", "RESOLVERS", ")", ":", "subpatterns", "=", "pattern", ".", "url_patterns", "for", "(", "_", ",", "name", ")", ",", "subpath", "in", "urlmap", "(", "subpatterns", ")", ":", "yield", "(", "group", ",", "name", ")", ",", "path", ".", "rstrip", "(", "'/'", ")", "+", "subpath" ]
Recursively build a map of (group, name) => url patterns. Group is either the resolver namespace or app name for the url config. The urls are joined with any prefixes, and cleaned up of extraneous regex specific syntax.
[ "Recursively", "build", "a", "map", "of", "(", "group", "name", ")", "=", ">", "url", "patterns", "." ]
train
https://github.com/canonical-ols/acceptable/blob/6ccbe969078166a5315d857da38b59b43b29fadc/acceptable/djangoutil.py#L64-L81
canonical-ols/acceptable
acceptable/djangoutil.py
get_field_schema
def get_field_schema(name, field): """Returns a JSON Schema representation of a form field.""" field_schema = { 'type': 'string', } if field.label: field_schema['title'] = str(field.label) # force translation if field.help_text: field_schema['description'] = str(field.help_text) # force translation if isinstance(field, (fields.URLField, fields.FileField)): field_schema['format'] = 'uri' elif isinstance(field, fields.EmailField): field_schema['format'] = 'email' elif isinstance(field, fields.DateTimeField): field_schema['format'] = 'date-time' elif isinstance(field, fields.DateField): field_schema['format'] = 'date' elif isinstance(field, (fields.DecimalField, fields.FloatField)): field_schema['type'] = 'number' elif isinstance(field, fields.IntegerField): field_schema['type'] = 'integer' elif isinstance(field, fields.NullBooleanField): field_schema['type'] = 'boolean' elif isinstance(field.widget, widgets.CheckboxInput): field_schema['type'] = 'boolean' if getattr(field, 'choices', []): field_schema['enum'] = sorted([choice[0] for choice in field.choices]) # check for multiple values if isinstance(field.widget, (widgets.Select, widgets.ChoiceWidget)): if field.widget.allow_multiple_selected: # promote to array of <type>, move details into the items field field_schema['items'] = { 'type': field_schema['type'], } if 'enum' in field_schema: field_schema['items']['enum'] = field_schema.pop('enum') field_schema['type'] = 'array' return field_schema
python
def get_field_schema(name, field): """Returns a JSON Schema representation of a form field.""" field_schema = { 'type': 'string', } if field.label: field_schema['title'] = str(field.label) # force translation if field.help_text: field_schema['description'] = str(field.help_text) # force translation if isinstance(field, (fields.URLField, fields.FileField)): field_schema['format'] = 'uri' elif isinstance(field, fields.EmailField): field_schema['format'] = 'email' elif isinstance(field, fields.DateTimeField): field_schema['format'] = 'date-time' elif isinstance(field, fields.DateField): field_schema['format'] = 'date' elif isinstance(field, (fields.DecimalField, fields.FloatField)): field_schema['type'] = 'number' elif isinstance(field, fields.IntegerField): field_schema['type'] = 'integer' elif isinstance(field, fields.NullBooleanField): field_schema['type'] = 'boolean' elif isinstance(field.widget, widgets.CheckboxInput): field_schema['type'] = 'boolean' if getattr(field, 'choices', []): field_schema['enum'] = sorted([choice[0] for choice in field.choices]) # check for multiple values if isinstance(field.widget, (widgets.Select, widgets.ChoiceWidget)): if field.widget.allow_multiple_selected: # promote to array of <type>, move details into the items field field_schema['items'] = { 'type': field_schema['type'], } if 'enum' in field_schema: field_schema['items']['enum'] = field_schema.pop('enum') field_schema['type'] = 'array' return field_schema
[ "def", "get_field_schema", "(", "name", ",", "field", ")", ":", "field_schema", "=", "{", "'type'", ":", "'string'", ",", "}", "if", "field", ".", "label", ":", "field_schema", "[", "'title'", "]", "=", "str", "(", "field", ".", "label", ")", "# force translation", "if", "field", ".", "help_text", ":", "field_schema", "[", "'description'", "]", "=", "str", "(", "field", ".", "help_text", ")", "# force translation", "if", "isinstance", "(", "field", ",", "(", "fields", ".", "URLField", ",", "fields", ".", "FileField", ")", ")", ":", "field_schema", "[", "'format'", "]", "=", "'uri'", "elif", "isinstance", "(", "field", ",", "fields", ".", "EmailField", ")", ":", "field_schema", "[", "'format'", "]", "=", "'email'", "elif", "isinstance", "(", "field", ",", "fields", ".", "DateTimeField", ")", ":", "field_schema", "[", "'format'", "]", "=", "'date-time'", "elif", "isinstance", "(", "field", ",", "fields", ".", "DateField", ")", ":", "field_schema", "[", "'format'", "]", "=", "'date'", "elif", "isinstance", "(", "field", ",", "(", "fields", ".", "DecimalField", ",", "fields", ".", "FloatField", ")", ")", ":", "field_schema", "[", "'type'", "]", "=", "'number'", "elif", "isinstance", "(", "field", ",", "fields", ".", "IntegerField", ")", ":", "field_schema", "[", "'type'", "]", "=", "'integer'", "elif", "isinstance", "(", "field", ",", "fields", ".", "NullBooleanField", ")", ":", "field_schema", "[", "'type'", "]", "=", "'boolean'", "elif", "isinstance", "(", "field", ".", "widget", ",", "widgets", ".", "CheckboxInput", ")", ":", "field_schema", "[", "'type'", "]", "=", "'boolean'", "if", "getattr", "(", "field", ",", "'choices'", ",", "[", "]", ")", ":", "field_schema", "[", "'enum'", "]", "=", "sorted", "(", "[", "choice", "[", "0", "]", "for", "choice", "in", "field", ".", "choices", "]", ")", "# check for multiple values", "if", "isinstance", "(", "field", ".", "widget", ",", "(", "widgets", ".", "Select", ",", "widgets", ".", "ChoiceWidget", ")", ")", ":", "if", "field", ".", "widget", ".", "allow_multiple_selected", ":", "# promote to array of <type>, move details into the items field", "field_schema", "[", "'items'", "]", "=", "{", "'type'", ":", "field_schema", "[", "'type'", "]", ",", "}", "if", "'enum'", "in", "field_schema", ":", "field_schema", "[", "'items'", "]", "[", "'enum'", "]", "=", "field_schema", ".", "pop", "(", "'enum'", ")", "field_schema", "[", "'type'", "]", "=", "'array'", "return", "field_schema" ]
Returns a JSON Schema representation of a form field.
[ "Returns", "a", "JSON", "Schema", "representation", "of", "a", "form", "field", "." ]
train
https://github.com/canonical-ols/acceptable/blob/6ccbe969078166a5315d857da38b59b43b29fadc/acceptable/djangoutil.py#L84-L127
canonical-ols/acceptable
acceptable/djangoutil.py
get_form_schema
def get_form_schema(form): """Return a JSON Schema object for a Django Form.""" schema = { 'type': 'object', 'properties': {}, } for name, field in form.base_fields.items(): schema['properties'][name] = get_field_schema(name, field) if field.required: schema.setdefault('required', []).append(name) return schema
python
def get_form_schema(form): """Return a JSON Schema object for a Django Form.""" schema = { 'type': 'object', 'properties': {}, } for name, field in form.base_fields.items(): schema['properties'][name] = get_field_schema(name, field) if field.required: schema.setdefault('required', []).append(name) return schema
[ "def", "get_form_schema", "(", "form", ")", ":", "schema", "=", "{", "'type'", ":", "'object'", ",", "'properties'", ":", "{", "}", ",", "}", "for", "name", ",", "field", "in", "form", ".", "base_fields", ".", "items", "(", ")", ":", "schema", "[", "'properties'", "]", "[", "name", "]", "=", "get_field_schema", "(", "name", ",", "field", ")", "if", "field", ".", "required", ":", "schema", ".", "setdefault", "(", "'required'", ",", "[", "]", ")", ".", "append", "(", "name", ")", "return", "schema" ]
Return a JSON Schema object for a Django Form.
[ "Return", "a", "JSON", "Schema", "object", "for", "a", "Django", "Form", "." ]
train
https://github.com/canonical-ols/acceptable/blob/6ccbe969078166a5315d857da38b59b43b29fadc/acceptable/djangoutil.py#L130-L142
canonical-ols/acceptable
acceptable/djangoutil.py
DjangoAPI.handler
def handler(self, handler_class): """Link to an API handler class (e.g. piston or DRF).""" self.handler_class = handler_class # we take the docstring from the handler class, not the methods if self.docs is None and handler_class.__doc__: self.docs = clean_docstring(handler_class.__doc__) return handler_class
python
def handler(self, handler_class): """Link to an API handler class (e.g. piston or DRF).""" self.handler_class = handler_class # we take the docstring from the handler class, not the methods if self.docs is None and handler_class.__doc__: self.docs = clean_docstring(handler_class.__doc__) return handler_class
[ "def", "handler", "(", "self", ",", "handler_class", ")", ":", "self", ".", "handler_class", "=", "handler_class", "# we take the docstring from the handler class, not the methods", "if", "self", ".", "docs", "is", "None", "and", "handler_class", ".", "__doc__", ":", "self", ".", "docs", "=", "clean_docstring", "(", "handler_class", ".", "__doc__", ")", "return", "handler_class" ]
Link to an API handler class (e.g. piston or DRF).
[ "Link", "to", "an", "API", "handler", "class", "(", "e", ".", "g", ".", "piston", "or", "DRF", ")", "." ]
train
https://github.com/canonical-ols/acceptable/blob/6ccbe969078166a5315d857da38b59b43b29fadc/acceptable/djangoutil.py#L217-L223
elifesciences/elife-tools
elifetools/utils_html.py
xml_to_html
def xml_to_html(html_flag, xml_string, base_url=None): "For formatting json output into HTML friendly format" if not xml_string or not html_flag is True: return xml_string html_string = xml_string html_string = remove_comment_tags(html_string) # Escape unmatched angle brackets if '<' in html_string or '>' in html_string: html_string = escape_html(html_string) # Replace more tags html_string = replace_xref_tags(html_string) html_string = replace_ext_link_tags(html_string) html_string = replace_email_tags(html_string) html_string = replace_inline_graphic_tags(html_string, base_url) html_string = replace_named_content_tags(html_string) html_string = replace_mathml_tags(html_string) html_string = replace_table_style_author_callout(html_string) html_string = replace_simple_tags(html_string, 'italic', 'i') html_string = replace_simple_tags(html_string, 'bold', 'b') html_string = replace_simple_tags(html_string, 'underline', 'span', '<span class="underline">') html_string = replace_simple_tags(html_string, 'sc', 'span', '<span class="small-caps">') html_string = replace_simple_tags(html_string, 'monospace', 'span', '<span class="monospace">') html_string = replace_simple_tags(html_string, 'inline-formula', None) html_string = replace_simple_tags(html_string, 'break', 'br') return html_string
python
def xml_to_html(html_flag, xml_string, base_url=None): "For formatting json output into HTML friendly format" if not xml_string or not html_flag is True: return xml_string html_string = xml_string html_string = remove_comment_tags(html_string) # Escape unmatched angle brackets if '<' in html_string or '>' in html_string: html_string = escape_html(html_string) # Replace more tags html_string = replace_xref_tags(html_string) html_string = replace_ext_link_tags(html_string) html_string = replace_email_tags(html_string) html_string = replace_inline_graphic_tags(html_string, base_url) html_string = replace_named_content_tags(html_string) html_string = replace_mathml_tags(html_string) html_string = replace_table_style_author_callout(html_string) html_string = replace_simple_tags(html_string, 'italic', 'i') html_string = replace_simple_tags(html_string, 'bold', 'b') html_string = replace_simple_tags(html_string, 'underline', 'span', '<span class="underline">') html_string = replace_simple_tags(html_string, 'sc', 'span', '<span class="small-caps">') html_string = replace_simple_tags(html_string, 'monospace', 'span', '<span class="monospace">') html_string = replace_simple_tags(html_string, 'inline-formula', None) html_string = replace_simple_tags(html_string, 'break', 'br') return html_string
[ "def", "xml_to_html", "(", "html_flag", ",", "xml_string", ",", "base_url", "=", "None", ")", ":", "if", "not", "xml_string", "or", "not", "html_flag", "is", "True", ":", "return", "xml_string", "html_string", "=", "xml_string", "html_string", "=", "remove_comment_tags", "(", "html_string", ")", "# Escape unmatched angle brackets", "if", "'<'", "in", "html_string", "or", "'>'", "in", "html_string", ":", "html_string", "=", "escape_html", "(", "html_string", ")", "# Replace more tags", "html_string", "=", "replace_xref_tags", "(", "html_string", ")", "html_string", "=", "replace_ext_link_tags", "(", "html_string", ")", "html_string", "=", "replace_email_tags", "(", "html_string", ")", "html_string", "=", "replace_inline_graphic_tags", "(", "html_string", ",", "base_url", ")", "html_string", "=", "replace_named_content_tags", "(", "html_string", ")", "html_string", "=", "replace_mathml_tags", "(", "html_string", ")", "html_string", "=", "replace_table_style_author_callout", "(", "html_string", ")", "html_string", "=", "replace_simple_tags", "(", "html_string", ",", "'italic'", ",", "'i'", ")", "html_string", "=", "replace_simple_tags", "(", "html_string", ",", "'bold'", ",", "'b'", ")", "html_string", "=", "replace_simple_tags", "(", "html_string", ",", "'underline'", ",", "'span'", ",", "'<span class=\"underline\">'", ")", "html_string", "=", "replace_simple_tags", "(", "html_string", ",", "'sc'", ",", "'span'", ",", "'<span class=\"small-caps\">'", ")", "html_string", "=", "replace_simple_tags", "(", "html_string", ",", "'monospace'", ",", "'span'", ",", "'<span class=\"monospace\">'", ")", "html_string", "=", "replace_simple_tags", "(", "html_string", ",", "'inline-formula'", ",", "None", ")", "html_string", "=", "replace_simple_tags", "(", "html_string", ",", "'break'", ",", "'br'", ")", "return", "html_string" ]
For formatting json output into HTML friendly format
[ "For", "formatting", "json", "output", "into", "HTML", "friendly", "format" ]
train
https://github.com/elifesciences/elife-tools/blob/4b9e38cbe485c61a4ed7cbd8970c6b318334fd86/elifetools/utils_html.py#L5-L29
elifesciences/elife-tools
elifetools/utils_html.py
replace_simple_tags
def replace_simple_tags(s, from_tag='italic', to_tag='i', to_open_tag=None): """ Replace tags such as <italic> to <i> This does not validate markup """ if to_open_tag: s = s.replace('<' + from_tag + '>', to_open_tag) elif to_tag: s = s.replace('<' + from_tag + '>', '<' + to_tag + '>') s = s.replace('<' + from_tag + '/>', '<' + to_tag + '/>') else: s = s.replace('<' + from_tag + '>', '') s = s.replace('<' + from_tag + '/>', '') if to_tag: s = s.replace('</' + from_tag + '>', '</' + to_tag + '>') else: s = s.replace('</' + from_tag + '>', '') return s
python
def replace_simple_tags(s, from_tag='italic', to_tag='i', to_open_tag=None): """ Replace tags such as <italic> to <i> This does not validate markup """ if to_open_tag: s = s.replace('<' + from_tag + '>', to_open_tag) elif to_tag: s = s.replace('<' + from_tag + '>', '<' + to_tag + '>') s = s.replace('<' + from_tag + '/>', '<' + to_tag + '/>') else: s = s.replace('<' + from_tag + '>', '') s = s.replace('<' + from_tag + '/>', '') if to_tag: s = s.replace('</' + from_tag + '>', '</' + to_tag + '>') else: s = s.replace('</' + from_tag + '>', '') return s
[ "def", "replace_simple_tags", "(", "s", ",", "from_tag", "=", "'italic'", ",", "to_tag", "=", "'i'", ",", "to_open_tag", "=", "None", ")", ":", "if", "to_open_tag", ":", "s", "=", "s", ".", "replace", "(", "'<'", "+", "from_tag", "+", "'>'", ",", "to_open_tag", ")", "elif", "to_tag", ":", "s", "=", "s", ".", "replace", "(", "'<'", "+", "from_tag", "+", "'>'", ",", "'<'", "+", "to_tag", "+", "'>'", ")", "s", "=", "s", ".", "replace", "(", "'<'", "+", "from_tag", "+", "'/>'", ",", "'<'", "+", "to_tag", "+", "'/>'", ")", "else", ":", "s", "=", "s", ".", "replace", "(", "'<'", "+", "from_tag", "+", "'>'", ",", "''", ")", "s", "=", "s", ".", "replace", "(", "'<'", "+", "from_tag", "+", "'/>'", ",", "''", ")", "if", "to_tag", ":", "s", "=", "s", ".", "replace", "(", "'</'", "+", "from_tag", "+", "'>'", ",", "'</'", "+", "to_tag", "+", "'>'", ")", "else", ":", "s", "=", "s", ".", "replace", "(", "'</'", "+", "from_tag", "+", "'>'", ",", "''", ")", "return", "s" ]
Replace tags such as <italic> to <i> This does not validate markup
[ "Replace", "tags", "such", "as", "<italic", ">", "to", "<i", ">", "This", "does", "not", "validate", "markup" ]
train
https://github.com/elifesciences/elife-tools/blob/4b9e38cbe485c61a4ed7cbd8970c6b318334fd86/elifetools/utils_html.py#L70-L89
canonical-ols/acceptable
acceptable/_validation.py
validate_body
def validate_body(schema): """Validate the body of incoming requests for a flask view. An example usage might look like this:: from snapstore_schemas import validate_body @validate_body({ 'type': 'array', 'items': { 'type': 'object', 'properties': { 'snap_id': {'type': 'string'}, 'series': {'type': 'string'}, 'name': {'type': 'string'}, 'title': {'type': 'string'}, 'keywords': { 'type': 'array', 'items': {'type': 'string'} }, 'summary': {'type': 'string'}, 'description': {'type': 'string'}, 'created_at': {'type': 'string'}, }, 'required': ['snap_id', 'series'], 'additionalProperties': False } }) def my_flask_view(): # view code here return "Hello World", 200 All incoming request that have been routed to this view will be matched against the specified schema. If the request body does not match the schema an instance of `DataValidationError` will be raised. By default this will cause the flask application to return a 500 response, but this can be customised by telling flask how to handle these exceptions. The exception instance has an 'error_list' attribute that contains a list of all the errors encountered while processing the request body. """ location = get_callsite_location() def decorator(fn): validate_schema(schema) wrapper = wrap_request(fn, schema) record_schemas( fn, wrapper, location, request_schema=sort_schema(schema)) return wrapper return decorator
python
def validate_body(schema): """Validate the body of incoming requests for a flask view. An example usage might look like this:: from snapstore_schemas import validate_body @validate_body({ 'type': 'array', 'items': { 'type': 'object', 'properties': { 'snap_id': {'type': 'string'}, 'series': {'type': 'string'}, 'name': {'type': 'string'}, 'title': {'type': 'string'}, 'keywords': { 'type': 'array', 'items': {'type': 'string'} }, 'summary': {'type': 'string'}, 'description': {'type': 'string'}, 'created_at': {'type': 'string'}, }, 'required': ['snap_id', 'series'], 'additionalProperties': False } }) def my_flask_view(): # view code here return "Hello World", 200 All incoming request that have been routed to this view will be matched against the specified schema. If the request body does not match the schema an instance of `DataValidationError` will be raised. By default this will cause the flask application to return a 500 response, but this can be customised by telling flask how to handle these exceptions. The exception instance has an 'error_list' attribute that contains a list of all the errors encountered while processing the request body. """ location = get_callsite_location() def decorator(fn): validate_schema(schema) wrapper = wrap_request(fn, schema) record_schemas( fn, wrapper, location, request_schema=sort_schema(schema)) return wrapper return decorator
[ "def", "validate_body", "(", "schema", ")", ":", "location", "=", "get_callsite_location", "(", ")", "def", "decorator", "(", "fn", ")", ":", "validate_schema", "(", "schema", ")", "wrapper", "=", "wrap_request", "(", "fn", ",", "schema", ")", "record_schemas", "(", "fn", ",", "wrapper", ",", "location", ",", "request_schema", "=", "sort_schema", "(", "schema", ")", ")", "return", "wrapper", "return", "decorator" ]
Validate the body of incoming requests for a flask view. An example usage might look like this:: from snapstore_schemas import validate_body @validate_body({ 'type': 'array', 'items': { 'type': 'object', 'properties': { 'snap_id': {'type': 'string'}, 'series': {'type': 'string'}, 'name': {'type': 'string'}, 'title': {'type': 'string'}, 'keywords': { 'type': 'array', 'items': {'type': 'string'} }, 'summary': {'type': 'string'}, 'description': {'type': 'string'}, 'created_at': {'type': 'string'}, }, 'required': ['snap_id', 'series'], 'additionalProperties': False } }) def my_flask_view(): # view code here return "Hello World", 200 All incoming request that have been routed to this view will be matched against the specified schema. If the request body does not match the schema an instance of `DataValidationError` will be raised. By default this will cause the flask application to return a 500 response, but this can be customised by telling flask how to handle these exceptions. The exception instance has an 'error_list' attribute that contains a list of all the errors encountered while processing the request body.
[ "Validate", "the", "body", "of", "incoming", "requests", "for", "a", "flask", "view", "." ]
train
https://github.com/canonical-ols/acceptable/blob/6ccbe969078166a5315d857da38b59b43b29fadc/acceptable/_validation.py#L30-L81
canonical-ols/acceptable
acceptable/_validation.py
record_schemas
def record_schemas( fn, wrapper, location, request_schema=None, response_schema=None): """Support extracting the schema from the decorated function.""" # have we already been decorated by an acceptable api call? has_acceptable = hasattr(fn, '_acceptable_metadata') if request_schema is not None: # preserve schema for later use wrapper._request_schema = wrapper._request_schema = request_schema wrapper._request_schema_location = location if has_acceptable: fn._acceptable_metadata._request_schema = request_schema fn._acceptable_metadata._request_schema_location = location if response_schema is not None: # preserve schema for later use wrapper._response_schema = wrapper._response_schema = response_schema wrapper._response_schema_location = location if has_acceptable: fn._acceptable_metadata._response_schema = response_schema fn._acceptable_metadata._response_schema_location = location
python
def record_schemas( fn, wrapper, location, request_schema=None, response_schema=None): """Support extracting the schema from the decorated function.""" # have we already been decorated by an acceptable api call? has_acceptable = hasattr(fn, '_acceptable_metadata') if request_schema is not None: # preserve schema for later use wrapper._request_schema = wrapper._request_schema = request_schema wrapper._request_schema_location = location if has_acceptable: fn._acceptable_metadata._request_schema = request_schema fn._acceptable_metadata._request_schema_location = location if response_schema is not None: # preserve schema for later use wrapper._response_schema = wrapper._response_schema = response_schema wrapper._response_schema_location = location if has_acceptable: fn._acceptable_metadata._response_schema = response_schema fn._acceptable_metadata._response_schema_location = location
[ "def", "record_schemas", "(", "fn", ",", "wrapper", ",", "location", ",", "request_schema", "=", "None", ",", "response_schema", "=", "None", ")", ":", "# have we already been decorated by an acceptable api call?", "has_acceptable", "=", "hasattr", "(", "fn", ",", "'_acceptable_metadata'", ")", "if", "request_schema", "is", "not", "None", ":", "# preserve schema for later use", "wrapper", ".", "_request_schema", "=", "wrapper", ".", "_request_schema", "=", "request_schema", "wrapper", ".", "_request_schema_location", "=", "location", "if", "has_acceptable", ":", "fn", ".", "_acceptable_metadata", ".", "_request_schema", "=", "request_schema", "fn", ".", "_acceptable_metadata", ".", "_request_schema_location", "=", "location", "if", "response_schema", "is", "not", "None", ":", "# preserve schema for later use", "wrapper", ".", "_response_schema", "=", "wrapper", ".", "_response_schema", "=", "response_schema", "wrapper", ".", "_response_schema_location", "=", "location", "if", "has_acceptable", ":", "fn", ".", "_acceptable_metadata", ".", "_response_schema", "=", "response_schema", "fn", ".", "_acceptable_metadata", ".", "_response_schema_location", "=", "location" ]
Support extracting the schema from the decorated function.
[ "Support", "extracting", "the", "schema", "from", "the", "decorated", "function", "." ]
train
https://github.com/canonical-ols/acceptable/blob/6ccbe969078166a5315d857da38b59b43b29fadc/acceptable/_validation.py#L108-L128
canonical-ols/acceptable
acceptable/_validation.py
validate_output
def validate_output(schema): """Validate the body of a response from a flask view. Like `validate_body`, this function compares a json document to a jsonschema specification. However, this function applies the schema to the view response. Instead of the view returning a flask response object, it should instead return a Python list or dictionary. For example:: from snapstore_schemas import validate_output @validate_output({ 'type': 'object', 'properties': { 'ok': {'type': 'boolean'}, }, 'required': ['ok'], 'additionalProperties': False } def my_flask_view(): # view code here return {'ok': True} Every view response will be evaluated against the schema. Any that do not comply with the schema will cause DataValidationError to be raised. """ location = get_callsite_location() def decorator(fn): validate_schema(schema) wrapper = wrap_response(fn, schema) record_schemas( fn, wrapper, location, response_schema=sort_schema(schema)) return wrapper return decorator
python
def validate_output(schema): """Validate the body of a response from a flask view. Like `validate_body`, this function compares a json document to a jsonschema specification. However, this function applies the schema to the view response. Instead of the view returning a flask response object, it should instead return a Python list or dictionary. For example:: from snapstore_schemas import validate_output @validate_output({ 'type': 'object', 'properties': { 'ok': {'type': 'boolean'}, }, 'required': ['ok'], 'additionalProperties': False } def my_flask_view(): # view code here return {'ok': True} Every view response will be evaluated against the schema. Any that do not comply with the schema will cause DataValidationError to be raised. """ location = get_callsite_location() def decorator(fn): validate_schema(schema) wrapper = wrap_response(fn, schema) record_schemas( fn, wrapper, location, response_schema=sort_schema(schema)) return wrapper return decorator
[ "def", "validate_output", "(", "schema", ")", ":", "location", "=", "get_callsite_location", "(", ")", "def", "decorator", "(", "fn", ")", ":", "validate_schema", "(", "schema", ")", "wrapper", "=", "wrap_response", "(", "fn", ",", "schema", ")", "record_schemas", "(", "fn", ",", "wrapper", ",", "location", ",", "response_schema", "=", "sort_schema", "(", "schema", ")", ")", "return", "wrapper", "return", "decorator" ]
Validate the body of a response from a flask view. Like `validate_body`, this function compares a json document to a jsonschema specification. However, this function applies the schema to the view response. Instead of the view returning a flask response object, it should instead return a Python list or dictionary. For example:: from snapstore_schemas import validate_output @validate_output({ 'type': 'object', 'properties': { 'ok': {'type': 'boolean'}, }, 'required': ['ok'], 'additionalProperties': False } def my_flask_view(): # view code here return {'ok': True} Every view response will be evaluated against the schema. Any that do not comply with the schema will cause DataValidationError to be raised.
[ "Validate", "the", "body", "of", "a", "response", "from", "a", "flask", "view", "." ]
train
https://github.com/canonical-ols/acceptable/blob/6ccbe969078166a5315d857da38b59b43b29fadc/acceptable/_validation.py#L131-L167
canonical-ols/acceptable
acceptable/_validation.py
validate
def validate(payload, schema): """Validate `payload` against `schema`, returning an error list. jsonschema provides lots of information in it's errors, but it can be a bit of work to extract all the information. """ v = jsonschema.Draft4Validator( schema, format_checker=jsonschema.FormatChecker()) error_list = [] for error in v.iter_errors(payload): message = error.message location = '/' + '/'.join([str(c) for c in error.absolute_path]) error_list.append(message + ' at ' + location) return error_list
python
def validate(payload, schema): """Validate `payload` against `schema`, returning an error list. jsonschema provides lots of information in it's errors, but it can be a bit of work to extract all the information. """ v = jsonschema.Draft4Validator( schema, format_checker=jsonschema.FormatChecker()) error_list = [] for error in v.iter_errors(payload): message = error.message location = '/' + '/'.join([str(c) for c in error.absolute_path]) error_list.append(message + ' at ' + location) return error_list
[ "def", "validate", "(", "payload", ",", "schema", ")", ":", "v", "=", "jsonschema", ".", "Draft4Validator", "(", "schema", ",", "format_checker", "=", "jsonschema", ".", "FormatChecker", "(", ")", ")", "error_list", "=", "[", "]", "for", "error", "in", "v", ".", "iter_errors", "(", "payload", ")", ":", "message", "=", "error", ".", "message", "location", "=", "'/'", "+", "'/'", ".", "join", "(", "[", "str", "(", "c", ")", "for", "c", "in", "error", ".", "absolute_path", "]", ")", "error_list", ".", "append", "(", "message", "+", "' at '", "+", "location", ")", "return", "error_list" ]
Validate `payload` against `schema`, returning an error list. jsonschema provides lots of information in it's errors, but it can be a bit of work to extract all the information.
[ "Validate", "payload", "against", "schema", "returning", "an", "error", "list", "." ]
train
https://github.com/canonical-ols/acceptable/blob/6ccbe969078166a5315d857da38b59b43b29fadc/acceptable/_validation.py#L207-L220
lalinsky/python-phoenixdb
phoenixdb/__init__.py
connect
def connect(url, max_retries=None, **kwargs): """Connects to a Phoenix query server. :param url: URL to the Phoenix query server, e.g. ``http://localhost:8765/`` :param autocommit: Switch the connection to autocommit mode. :param readonly: Switch the connection to readonly mode. :param max_retries: The maximum number of retries in case there is a connection error. :param cursor_factory: If specified, the connection's :attr:`~phoenixdb.connection.Connection.cursor_factory` is set to it. :returns: :class:`~phoenixdb.connection.Connection` object. """ client = AvaticaClient(url, max_retries=max_retries) client.connect() return Connection(client, **kwargs)
python
def connect(url, max_retries=None, **kwargs): """Connects to a Phoenix query server. :param url: URL to the Phoenix query server, e.g. ``http://localhost:8765/`` :param autocommit: Switch the connection to autocommit mode. :param readonly: Switch the connection to readonly mode. :param max_retries: The maximum number of retries in case there is a connection error. :param cursor_factory: If specified, the connection's :attr:`~phoenixdb.connection.Connection.cursor_factory` is set to it. :returns: :class:`~phoenixdb.connection.Connection` object. """ client = AvaticaClient(url, max_retries=max_retries) client.connect() return Connection(client, **kwargs)
[ "def", "connect", "(", "url", ",", "max_retries", "=", "None", ",", "*", "*", "kwargs", ")", ":", "client", "=", "AvaticaClient", "(", "url", ",", "max_retries", "=", "max_retries", ")", "client", ".", "connect", "(", ")", "return", "Connection", "(", "client", ",", "*", "*", "kwargs", ")" ]
Connects to a Phoenix query server. :param url: URL to the Phoenix query server, e.g. ``http://localhost:8765/`` :param autocommit: Switch the connection to autocommit mode. :param readonly: Switch the connection to readonly mode. :param max_retries: The maximum number of retries in case there is a connection error. :param cursor_factory: If specified, the connection's :attr:`~phoenixdb.connection.Connection.cursor_factory` is set to it. :returns: :class:`~phoenixdb.connection.Connection` object.
[ "Connects", "to", "a", "Phoenix", "query", "server", "." ]
train
https://github.com/lalinsky/python-phoenixdb/blob/1bb34488dd530ca65f91b29ef16aa7b71f26b806/phoenixdb/__init__.py#L44-L67
lalinsky/python-phoenixdb
phoenixdb/avatica/client.py
AvaticaClient.connect
def connect(self): """Opens a HTTP connection to the RPC server.""" logger.debug("Opening connection to %s:%s", self.url.hostname, self.url.port) try: self.connection = httplib.HTTPConnection(self.url.hostname, self.url.port) self.connection.connect() except (httplib.HTTPException, socket.error) as e: raise errors.InterfaceError('Unable to connect to the specified service', e)
python
def connect(self): """Opens a HTTP connection to the RPC server.""" logger.debug("Opening connection to %s:%s", self.url.hostname, self.url.port) try: self.connection = httplib.HTTPConnection(self.url.hostname, self.url.port) self.connection.connect() except (httplib.HTTPException, socket.error) as e: raise errors.InterfaceError('Unable to connect to the specified service', e)
[ "def", "connect", "(", "self", ")", ":", "logger", ".", "debug", "(", "\"Opening connection to %s:%s\"", ",", "self", ".", "url", ".", "hostname", ",", "self", ".", "url", ".", "port", ")", "try", ":", "self", ".", "connection", "=", "httplib", ".", "HTTPConnection", "(", "self", ".", "url", ".", "hostname", ",", "self", ".", "url", ".", "port", ")", "self", ".", "connection", ".", "connect", "(", ")", "except", "(", "httplib", ".", "HTTPException", ",", "socket", ".", "error", ")", "as", "e", ":", "raise", "errors", ".", "InterfaceError", "(", "'Unable to connect to the specified service'", ",", "e", ")" ]
Opens a HTTP connection to the RPC server.
[ "Opens", "a", "HTTP", "connection", "to", "the", "RPC", "server", "." ]
train
https://github.com/lalinsky/python-phoenixdb/blob/1bb34488dd530ca65f91b29ef16aa7b71f26b806/phoenixdb/avatica/client.py#L151-L158
lalinsky/python-phoenixdb
phoenixdb/avatica/client.py
AvaticaClient.close
def close(self): """Closes the HTTP connection to the RPC server.""" if self.connection is not None: logger.debug("Closing connection to %s:%s", self.url.hostname, self.url.port) try: self.connection.close() except httplib.HTTPException: logger.warning("Error while closing connection", exc_info=True) self.connection = None
python
def close(self): """Closes the HTTP connection to the RPC server.""" if self.connection is not None: logger.debug("Closing connection to %s:%s", self.url.hostname, self.url.port) try: self.connection.close() except httplib.HTTPException: logger.warning("Error while closing connection", exc_info=True) self.connection = None
[ "def", "close", "(", "self", ")", ":", "if", "self", ".", "connection", "is", "not", "None", ":", "logger", ".", "debug", "(", "\"Closing connection to %s:%s\"", ",", "self", ".", "url", ".", "hostname", ",", "self", ".", "url", ".", "port", ")", "try", ":", "self", ".", "connection", ".", "close", "(", ")", "except", "httplib", ".", "HTTPException", ":", "logger", ".", "warning", "(", "\"Error while closing connection\"", ",", "exc_info", "=", "True", ")", "self", ".", "connection", "=", "None" ]
Closes the HTTP connection to the RPC server.
[ "Closes", "the", "HTTP", "connection", "to", "the", "RPC", "server", "." ]
train
https://github.com/lalinsky/python-phoenixdb/blob/1bb34488dd530ca65f91b29ef16aa7b71f26b806/phoenixdb/avatica/client.py#L160-L168
lalinsky/python-phoenixdb
phoenixdb/avatica/client.py
AvaticaClient.connection_sync
def connection_sync(self, connection_id, connProps=None): """Synchronizes connection properties with the server. :param connection_id: ID of the current connection. :param connProps: Dictionary with the properties that should be changed. :returns: A ``common_pb2.ConnectionProperties`` object. """ if connProps is None: connProps = {} request = requests_pb2.ConnectionSyncRequest() request.connection_id = connection_id request.conn_props.auto_commit = connProps.get('autoCommit', False) request.conn_props.has_auto_commit = True request.conn_props.read_only = connProps.get('readOnly', False) request.conn_props.has_read_only = True request.conn_props.transaction_isolation = connProps.get('transactionIsolation', 0) request.conn_props.catalog = connProps.get('catalog', '') request.conn_props.schema = connProps.get('schema', '') response_data = self._apply(request) response = responses_pb2.ConnectionSyncResponse() response.ParseFromString(response_data) return response.conn_props
python
def connection_sync(self, connection_id, connProps=None): """Synchronizes connection properties with the server. :param connection_id: ID of the current connection. :param connProps: Dictionary with the properties that should be changed. :returns: A ``common_pb2.ConnectionProperties`` object. """ if connProps is None: connProps = {} request = requests_pb2.ConnectionSyncRequest() request.connection_id = connection_id request.conn_props.auto_commit = connProps.get('autoCommit', False) request.conn_props.has_auto_commit = True request.conn_props.read_only = connProps.get('readOnly', False) request.conn_props.has_read_only = True request.conn_props.transaction_isolation = connProps.get('transactionIsolation', 0) request.conn_props.catalog = connProps.get('catalog', '') request.conn_props.schema = connProps.get('schema', '') response_data = self._apply(request) response = responses_pb2.ConnectionSyncResponse() response.ParseFromString(response_data) return response.conn_props
[ "def", "connection_sync", "(", "self", ",", "connection_id", ",", "connProps", "=", "None", ")", ":", "if", "connProps", "is", "None", ":", "connProps", "=", "{", "}", "request", "=", "requests_pb2", ".", "ConnectionSyncRequest", "(", ")", "request", ".", "connection_id", "=", "connection_id", "request", ".", "conn_props", ".", "auto_commit", "=", "connProps", ".", "get", "(", "'autoCommit'", ",", "False", ")", "request", ".", "conn_props", ".", "has_auto_commit", "=", "True", "request", ".", "conn_props", ".", "read_only", "=", "connProps", ".", "get", "(", "'readOnly'", ",", "False", ")", "request", ".", "conn_props", ".", "has_read_only", "=", "True", "request", ".", "conn_props", ".", "transaction_isolation", "=", "connProps", ".", "get", "(", "'transactionIsolation'", ",", "0", ")", "request", ".", "conn_props", ".", "catalog", "=", "connProps", ".", "get", "(", "'catalog'", ",", "''", ")", "request", ".", "conn_props", ".", "schema", "=", "connProps", ".", "get", "(", "'schema'", ",", "''", ")", "response_data", "=", "self", ".", "_apply", "(", "request", ")", "response", "=", "responses_pb2", ".", "ConnectionSyncResponse", "(", ")", "response", ".", "ParseFromString", "(", "response_data", ")", "return", "response", ".", "conn_props" ]
Synchronizes connection properties with the server. :param connection_id: ID of the current connection. :param connProps: Dictionary with the properties that should be changed. :returns: A ``common_pb2.ConnectionProperties`` object.
[ "Synchronizes", "connection", "properties", "with", "the", "server", "." ]
train
https://github.com/lalinsky/python-phoenixdb/blob/1bb34488dd530ca65f91b29ef16aa7b71f26b806/phoenixdb/avatica/client.py#L286-L314
lalinsky/python-phoenixdb
phoenixdb/avatica/client.py
AvaticaClient.open_connection
def open_connection(self, connection_id, info=None): """Opens a new connection. :param connection_id: ID of the connection to open. """ request = requests_pb2.OpenConnectionRequest() request.connection_id = connection_id if info is not None: # Info is a list of repeated pairs, setting a dict directly fails for k, v in info.items(): request.info[k] = v response_data = self._apply(request) response = responses_pb2.OpenConnectionResponse() response.ParseFromString(response_data)
python
def open_connection(self, connection_id, info=None): """Opens a new connection. :param connection_id: ID of the connection to open. """ request = requests_pb2.OpenConnectionRequest() request.connection_id = connection_id if info is not None: # Info is a list of repeated pairs, setting a dict directly fails for k, v in info.items(): request.info[k] = v response_data = self._apply(request) response = responses_pb2.OpenConnectionResponse() response.ParseFromString(response_data)
[ "def", "open_connection", "(", "self", ",", "connection_id", ",", "info", "=", "None", ")", ":", "request", "=", "requests_pb2", ".", "OpenConnectionRequest", "(", ")", "request", ".", "connection_id", "=", "connection_id", "if", "info", "is", "not", "None", ":", "# Info is a list of repeated pairs, setting a dict directly fails", "for", "k", ",", "v", "in", "info", ".", "items", "(", ")", ":", "request", ".", "info", "[", "k", "]", "=", "v", "response_data", "=", "self", ".", "_apply", "(", "request", ")", "response", "=", "responses_pb2", ".", "OpenConnectionResponse", "(", ")", "response", ".", "ParseFromString", "(", "response_data", ")" ]
Opens a new connection. :param connection_id: ID of the connection to open.
[ "Opens", "a", "new", "connection", "." ]
train
https://github.com/lalinsky/python-phoenixdb/blob/1bb34488dd530ca65f91b29ef16aa7b71f26b806/phoenixdb/avatica/client.py#L316-L331
lalinsky/python-phoenixdb
phoenixdb/avatica/client.py
AvaticaClient.close_connection
def close_connection(self, connection_id): """Closes a connection. :param connection_id: ID of the connection to close. """ request = requests_pb2.CloseConnectionRequest() request.connection_id = connection_id self._apply(request)
python
def close_connection(self, connection_id): """Closes a connection. :param connection_id: ID of the connection to close. """ request = requests_pb2.CloseConnectionRequest() request.connection_id = connection_id self._apply(request)
[ "def", "close_connection", "(", "self", ",", "connection_id", ")", ":", "request", "=", "requests_pb2", ".", "CloseConnectionRequest", "(", ")", "request", ".", "connection_id", "=", "connection_id", "self", ".", "_apply", "(", "request", ")" ]
Closes a connection. :param connection_id: ID of the connection to close.
[ "Closes", "a", "connection", "." ]
train
https://github.com/lalinsky/python-phoenixdb/blob/1bb34488dd530ca65f91b29ef16aa7b71f26b806/phoenixdb/avatica/client.py#L333-L341
lalinsky/python-phoenixdb
phoenixdb/avatica/client.py
AvaticaClient.create_statement
def create_statement(self, connection_id): """Creates a new statement. :param connection_id: ID of the current connection. :returns: New statement ID. """ request = requests_pb2.CreateStatementRequest() request.connection_id = connection_id response_data = self._apply(request) response = responses_pb2.CreateStatementResponse() response.ParseFromString(response_data) return response.statement_id
python
def create_statement(self, connection_id): """Creates a new statement. :param connection_id: ID of the current connection. :returns: New statement ID. """ request = requests_pb2.CreateStatementRequest() request.connection_id = connection_id response_data = self._apply(request) response = responses_pb2.CreateStatementResponse() response.ParseFromString(response_data) return response.statement_id
[ "def", "create_statement", "(", "self", ",", "connection_id", ")", ":", "request", "=", "requests_pb2", ".", "CreateStatementRequest", "(", ")", "request", ".", "connection_id", "=", "connection_id", "response_data", "=", "self", ".", "_apply", "(", "request", ")", "response", "=", "responses_pb2", ".", "CreateStatementResponse", "(", ")", "response", ".", "ParseFromString", "(", "response_data", ")", "return", "response", ".", "statement_id" ]
Creates a new statement. :param connection_id: ID of the current connection. :returns: New statement ID.
[ "Creates", "a", "new", "statement", "." ]
train
https://github.com/lalinsky/python-phoenixdb/blob/1bb34488dd530ca65f91b29ef16aa7b71f26b806/phoenixdb/avatica/client.py#L343-L358
lalinsky/python-phoenixdb
phoenixdb/avatica/client.py
AvaticaClient.close_statement
def close_statement(self, connection_id, statement_id): """Closes a statement. :param connection_id: ID of the current connection. :param statement_id: ID of the statement to close. """ request = requests_pb2.CloseStatementRequest() request.connection_id = connection_id request.statement_id = statement_id self._apply(request)
python
def close_statement(self, connection_id, statement_id): """Closes a statement. :param connection_id: ID of the current connection. :param statement_id: ID of the statement to close. """ request = requests_pb2.CloseStatementRequest() request.connection_id = connection_id request.statement_id = statement_id self._apply(request)
[ "def", "close_statement", "(", "self", ",", "connection_id", ",", "statement_id", ")", ":", "request", "=", "requests_pb2", ".", "CloseStatementRequest", "(", ")", "request", ".", "connection_id", "=", "connection_id", "request", ".", "statement_id", "=", "statement_id", "self", ".", "_apply", "(", "request", ")" ]
Closes a statement. :param connection_id: ID of the current connection. :param statement_id: ID of the statement to close.
[ "Closes", "a", "statement", "." ]
train
https://github.com/lalinsky/python-phoenixdb/blob/1bb34488dd530ca65f91b29ef16aa7b71f26b806/phoenixdb/avatica/client.py#L360-L373
lalinsky/python-phoenixdb
phoenixdb/avatica/client.py
AvaticaClient.prepare_and_execute
def prepare_and_execute(self, connection_id, statement_id, sql, max_rows_total=None, first_frame_max_size=None): """Prepares and immediately executes a statement. :param connection_id: ID of the current connection. :param statement_id: ID of the statement to prepare. :param sql: SQL query. :param max_rows_total: The maximum number of rows that will be allowed for this query. :param first_frame_max_size: The maximum number of rows that will be returned in the first Frame returned for this query. :returns: Result set with the signature of the prepared statement and the first frame data. """ request = requests_pb2.PrepareAndExecuteRequest() request.connection_id = connection_id request.statement_id = statement_id request.sql = sql if max_rows_total is not None: request.max_rows_total = max_rows_total if first_frame_max_size is not None: request.first_frame_max_size = first_frame_max_size response_data = self._apply(request, 'ExecuteResponse') response = responses_pb2.ExecuteResponse() response.ParseFromString(response_data) return response.results
python
def prepare_and_execute(self, connection_id, statement_id, sql, max_rows_total=None, first_frame_max_size=None): """Prepares and immediately executes a statement. :param connection_id: ID of the current connection. :param statement_id: ID of the statement to prepare. :param sql: SQL query. :param max_rows_total: The maximum number of rows that will be allowed for this query. :param first_frame_max_size: The maximum number of rows that will be returned in the first Frame returned for this query. :returns: Result set with the signature of the prepared statement and the first frame data. """ request = requests_pb2.PrepareAndExecuteRequest() request.connection_id = connection_id request.statement_id = statement_id request.sql = sql if max_rows_total is not None: request.max_rows_total = max_rows_total if first_frame_max_size is not None: request.first_frame_max_size = first_frame_max_size response_data = self._apply(request, 'ExecuteResponse') response = responses_pb2.ExecuteResponse() response.ParseFromString(response_data) return response.results
[ "def", "prepare_and_execute", "(", "self", ",", "connection_id", ",", "statement_id", ",", "sql", ",", "max_rows_total", "=", "None", ",", "first_frame_max_size", "=", "None", ")", ":", "request", "=", "requests_pb2", ".", "PrepareAndExecuteRequest", "(", ")", "request", ".", "connection_id", "=", "connection_id", "request", ".", "statement_id", "=", "statement_id", "request", ".", "sql", "=", "sql", "if", "max_rows_total", "is", "not", "None", ":", "request", ".", "max_rows_total", "=", "max_rows_total", "if", "first_frame_max_size", "is", "not", "None", ":", "request", ".", "first_frame_max_size", "=", "first_frame_max_size", "response_data", "=", "self", ".", "_apply", "(", "request", ",", "'ExecuteResponse'", ")", "response", "=", "responses_pb2", ".", "ExecuteResponse", "(", ")", "response", ".", "ParseFromString", "(", "response_data", ")", "return", "response", ".", "results" ]
Prepares and immediately executes a statement. :param connection_id: ID of the current connection. :param statement_id: ID of the statement to prepare. :param sql: SQL query. :param max_rows_total: The maximum number of rows that will be allowed for this query. :param first_frame_max_size: The maximum number of rows that will be returned in the first Frame returned for this query. :returns: Result set with the signature of the prepared statement and the first frame data.
[ "Prepares", "and", "immediately", "executes", "a", "statement", "." ]
train
https://github.com/lalinsky/python-phoenixdb/blob/1bb34488dd530ca65f91b29ef16aa7b71f26b806/phoenixdb/avatica/client.py#L375-L408