repo
stringlengths
7
55
path
stringlengths
4
127
func_name
stringlengths
1
88
original_string
stringlengths
75
19.8k
language
stringclasses
1 value
code
stringlengths
75
19.8k
code_tokens
list
docstring
stringlengths
3
17.3k
docstring_tokens
list
sha
stringlengths
40
40
url
stringlengths
87
242
partition
stringclasses
1 value
LuqueDaniel/pybooru
pybooru/api_danbooru.py
DanbooruApi_Mixin.pool_versions
def pool_versions(self, updater_id=None, updater_name=None, pool_id=None): """Get list of pool versions. Parameters: updater_id (int): updater_name (str): pool_id (int): """ params = { 'search[updater_id]': updater_id, 'search[updater_name]': updater_name, 'search[pool_id]': pool_id } return self._get('pool_versions.json', params)
python
def pool_versions(self, updater_id=None, updater_name=None, pool_id=None): """Get list of pool versions. Parameters: updater_id (int): updater_name (str): pool_id (int): """ params = { 'search[updater_id]': updater_id, 'search[updater_name]': updater_name, 'search[pool_id]': pool_id } return self._get('pool_versions.json', params)
[ "def", "pool_versions", "(", "self", ",", "updater_id", "=", "None", ",", "updater_name", "=", "None", ",", "pool_id", "=", "None", ")", ":", "params", "=", "{", "'search[updater_id]'", ":", "updater_id", ",", "'search[updater_name]'", ":", "updater_name", ",", "'search[pool_id]'", ":", "pool_id", "}", "return", "self", ".", "_get", "(", "'pool_versions.json'", ",", "params", ")" ]
Get list of pool versions. Parameters: updater_id (int): updater_name (str): pool_id (int):
[ "Get", "list", "of", "pool", "versions", "." ]
60cd5254684d293b308f0b11b8f4ac2dce101479
https://github.com/LuqueDaniel/pybooru/blob/60cd5254684d293b308f0b11b8f4ac2dce101479/pybooru/api_danbooru.py#L975-L988
train
LuqueDaniel/pybooru
pybooru/api_danbooru.py
DanbooruApi_Mixin.tag_aliases
def tag_aliases(self, name_matches=None, antecedent_name=None, tag_id=None): """Get tags aliases. Parameters: name_matches (str): Match antecedent or consequent name. antecedent_name (str): Match antecedent name (exact match). tag_id (int): The tag alias id. """ params = { 'search[name_matches]': name_matches, 'search[antecedent_name]': antecedent_name, 'search[id]': tag_id } return self._get('tag_aliases.json', params)
python
def tag_aliases(self, name_matches=None, antecedent_name=None, tag_id=None): """Get tags aliases. Parameters: name_matches (str): Match antecedent or consequent name. antecedent_name (str): Match antecedent name (exact match). tag_id (int): The tag alias id. """ params = { 'search[name_matches]': name_matches, 'search[antecedent_name]': antecedent_name, 'search[id]': tag_id } return self._get('tag_aliases.json', params)
[ "def", "tag_aliases", "(", "self", ",", "name_matches", "=", "None", ",", "antecedent_name", "=", "None", ",", "tag_id", "=", "None", ")", ":", "params", "=", "{", "'search[name_matches]'", ":", "name_matches", ",", "'search[antecedent_name]'", ":", "antecedent_name", ",", "'search[id]'", ":", "tag_id", "}", "return", "self", ".", "_get", "(", "'tag_aliases.json'", ",", "params", ")" ]
Get tags aliases. Parameters: name_matches (str): Match antecedent or consequent name. antecedent_name (str): Match antecedent name (exact match). tag_id (int): The tag alias id.
[ "Get", "tags", "aliases", "." ]
60cd5254684d293b308f0b11b8f4ac2dce101479
https://github.com/LuqueDaniel/pybooru/blob/60cd5254684d293b308f0b11b8f4ac2dce101479/pybooru/api_danbooru.py#L1039-L1053
train
LuqueDaniel/pybooru
pybooru/api_danbooru.py
DanbooruApi_Mixin.tag_implications
def tag_implications(self, name_matches=None, antecedent_name=None, tag_id=None): """Get tags implications. Parameters: name_matches (str): Match antecedent or consequent name. antecedent_name (str): Match antecedent name (exact match). tag_id (int): Tag implication id. """ params = { 'search[name_matches]': name_matches, 'search[antecedent_name]': antecedent_name, 'search[id]': tag_id } return self._get('tag_implications.json', params)
python
def tag_implications(self, name_matches=None, antecedent_name=None, tag_id=None): """Get tags implications. Parameters: name_matches (str): Match antecedent or consequent name. antecedent_name (str): Match antecedent name (exact match). tag_id (int): Tag implication id. """ params = { 'search[name_matches]': name_matches, 'search[antecedent_name]': antecedent_name, 'search[id]': tag_id } return self._get('tag_implications.json', params)
[ "def", "tag_implications", "(", "self", ",", "name_matches", "=", "None", ",", "antecedent_name", "=", "None", ",", "tag_id", "=", "None", ")", ":", "params", "=", "{", "'search[name_matches]'", ":", "name_matches", ",", "'search[antecedent_name]'", ":", "antecedent_name", ",", "'search[id]'", ":", "tag_id", "}", "return", "self", ".", "_get", "(", "'tag_implications.json'", ",", "params", ")" ]
Get tags implications. Parameters: name_matches (str): Match antecedent or consequent name. antecedent_name (str): Match antecedent name (exact match). tag_id (int): Tag implication id.
[ "Get", "tags", "implications", "." ]
60cd5254684d293b308f0b11b8f4ac2dce101479
https://github.com/LuqueDaniel/pybooru/blob/60cd5254684d293b308f0b11b8f4ac2dce101479/pybooru/api_danbooru.py#L1055-L1069
train
LuqueDaniel/pybooru
pybooru/api_danbooru.py
DanbooruApi_Mixin.tag_related
def tag_related(self, query, category=None): """Get related tags. Parameters: query (str): The tag to find the related tags for. category (str): If specified, show only tags of a specific category. Can be: General 0, Artist 1, Copyright 3 and Character 4. """ params = {'query': query, 'category': category} return self._get('related_tag.json', params)
python
def tag_related(self, query, category=None): """Get related tags. Parameters: query (str): The tag to find the related tags for. category (str): If specified, show only tags of a specific category. Can be: General 0, Artist 1, Copyright 3 and Character 4. """ params = {'query': query, 'category': category} return self._get('related_tag.json', params)
[ "def", "tag_related", "(", "self", ",", "query", ",", "category", "=", "None", ")", ":", "params", "=", "{", "'query'", ":", "query", ",", "'category'", ":", "category", "}", "return", "self", ".", "_get", "(", "'related_tag.json'", ",", "params", ")" ]
Get related tags. Parameters: query (str): The tag to find the related tags for. category (str): If specified, show only tags of a specific category. Can be: General 0, Artist 1, Copyright 3 and Character 4.
[ "Get", "related", "tags", "." ]
60cd5254684d293b308f0b11b8f4ac2dce101479
https://github.com/LuqueDaniel/pybooru/blob/60cd5254684d293b308f0b11b8f4ac2dce101479/pybooru/api_danbooru.py#L1071-L1081
train
LuqueDaniel/pybooru
pybooru/api_danbooru.py
DanbooruApi_Mixin.wiki_list
def wiki_list(self, title=None, creator_id=None, body_matches=None, other_names_match=None, creator_name=None, hide_deleted=None, other_names_present=None, order=None): """Function to retrieves a list of every wiki page. Parameters: title (str): Page title. creator_id (int): Creator id. body_matches (str): Page content. other_names_match (str): Other names. creator_name (str): Creator name. hide_deleted (str): Can be: yes, no. other_names_present (str): Can be: yes, no. order (str): Can be: date, title. """ params = { 'search[title]': title, 'search[creator_id]': creator_id, 'search[body_matches]': body_matches, 'search[other_names_match]': other_names_match, 'search[creator_name]': creator_name, 'search[hide_deleted]': hide_deleted, 'search[other_names_present]': other_names_present, 'search[order]': order } return self._get('wiki_pages.json', params)
python
def wiki_list(self, title=None, creator_id=None, body_matches=None, other_names_match=None, creator_name=None, hide_deleted=None, other_names_present=None, order=None): """Function to retrieves a list of every wiki page. Parameters: title (str): Page title. creator_id (int): Creator id. body_matches (str): Page content. other_names_match (str): Other names. creator_name (str): Creator name. hide_deleted (str): Can be: yes, no. other_names_present (str): Can be: yes, no. order (str): Can be: date, title. """ params = { 'search[title]': title, 'search[creator_id]': creator_id, 'search[body_matches]': body_matches, 'search[other_names_match]': other_names_match, 'search[creator_name]': creator_name, 'search[hide_deleted]': hide_deleted, 'search[other_names_present]': other_names_present, 'search[order]': order } return self._get('wiki_pages.json', params)
[ "def", "wiki_list", "(", "self", ",", "title", "=", "None", ",", "creator_id", "=", "None", ",", "body_matches", "=", "None", ",", "other_names_match", "=", "None", ",", "creator_name", "=", "None", ",", "hide_deleted", "=", "None", ",", "other_names_present", "=", "None", ",", "order", "=", "None", ")", ":", "params", "=", "{", "'search[title]'", ":", "title", ",", "'search[creator_id]'", ":", "creator_id", ",", "'search[body_matches]'", ":", "body_matches", ",", "'search[other_names_match]'", ":", "other_names_match", ",", "'search[creator_name]'", ":", "creator_name", ",", "'search[hide_deleted]'", ":", "hide_deleted", ",", "'search[other_names_present]'", ":", "other_names_present", ",", "'search[order]'", ":", "order", "}", "return", "self", ".", "_get", "(", "'wiki_pages.json'", ",", "params", ")" ]
Function to retrieves a list of every wiki page. Parameters: title (str): Page title. creator_id (int): Creator id. body_matches (str): Page content. other_names_match (str): Other names. creator_name (str): Creator name. hide_deleted (str): Can be: yes, no. other_names_present (str): Can be: yes, no. order (str): Can be: date, title.
[ "Function", "to", "retrieves", "a", "list", "of", "every", "wiki", "page", "." ]
60cd5254684d293b308f0b11b8f4ac2dce101479
https://github.com/LuqueDaniel/pybooru/blob/60cd5254684d293b308f0b11b8f4ac2dce101479/pybooru/api_danbooru.py#L1083-L1108
train
LuqueDaniel/pybooru
pybooru/api_danbooru.py
DanbooruApi_Mixin.wiki_versions_list
def wiki_versions_list(self, page_id, updater_id): """Return a list of wiki page version. Parameters: page_id (int): updater_id (int): """ params = { 'earch[updater_id]': updater_id, 'search[wiki_page_id]': page_id } return self._get('wiki_page_versions.json', params)
python
def wiki_versions_list(self, page_id, updater_id): """Return a list of wiki page version. Parameters: page_id (int): updater_id (int): """ params = { 'earch[updater_id]': updater_id, 'search[wiki_page_id]': page_id } return self._get('wiki_page_versions.json', params)
[ "def", "wiki_versions_list", "(", "self", ",", "page_id", ",", "updater_id", ")", ":", "params", "=", "{", "'earch[updater_id]'", ":", "updater_id", ",", "'search[wiki_page_id]'", ":", "page_id", "}", "return", "self", ".", "_get", "(", "'wiki_page_versions.json'", ",", "params", ")" ]
Return a list of wiki page version. Parameters: page_id (int): updater_id (int):
[ "Return", "a", "list", "of", "wiki", "page", "version", "." ]
60cd5254684d293b308f0b11b8f4ac2dce101479
https://github.com/LuqueDaniel/pybooru/blob/60cd5254684d293b308f0b11b8f4ac2dce101479/pybooru/api_danbooru.py#L1172-L1183
train
LuqueDaniel/pybooru
pybooru/api_danbooru.py
DanbooruApi_Mixin.forum_topic_list
def forum_topic_list(self, title_matches=None, title=None, category_id=None): """Function to get forum topics. Parameters: title_matches (str): Search body for the given terms. title (str): Exact title match. category_id (int): Can be: 0, 1, 2 (General, Tags, Bugs & Features respectively). """ params = { 'search[title_matches]': title_matches, 'search[title]': title, 'search[category_id]': category_id } return self._get('forum_topics.json', params)
python
def forum_topic_list(self, title_matches=None, title=None, category_id=None): """Function to get forum topics. Parameters: title_matches (str): Search body for the given terms. title (str): Exact title match. category_id (int): Can be: 0, 1, 2 (General, Tags, Bugs & Features respectively). """ params = { 'search[title_matches]': title_matches, 'search[title]': title, 'search[category_id]': category_id } return self._get('forum_topics.json', params)
[ "def", "forum_topic_list", "(", "self", ",", "title_matches", "=", "None", ",", "title", "=", "None", ",", "category_id", "=", "None", ")", ":", "params", "=", "{", "'search[title_matches]'", ":", "title_matches", ",", "'search[title]'", ":", "title", ",", "'search[category_id]'", ":", "category_id", "}", "return", "self", ".", "_get", "(", "'forum_topics.json'", ",", "params", ")" ]
Function to get forum topics. Parameters: title_matches (str): Search body for the given terms. title (str): Exact title match. category_id (int): Can be: 0, 1, 2 (General, Tags, Bugs & Features respectively).
[ "Function", "to", "get", "forum", "topics", "." ]
60cd5254684d293b308f0b11b8f4ac2dce101479
https://github.com/LuqueDaniel/pybooru/blob/60cd5254684d293b308f0b11b8f4ac2dce101479/pybooru/api_danbooru.py#L1193-L1208
train
LuqueDaniel/pybooru
pybooru/api_danbooru.py
DanbooruApi_Mixin.forum_post_list
def forum_post_list(self, creator_id=None, creator_name=None, topic_id=None, topic_title_matches=None, topic_category_id=None, body_matches=None): """Return a list of forum posts. Parameters: creator_id (int): creator_name (str): topic_id (int): topic_title_matches (str): topic_category_id (int): Can be: 0, 1, 2 (General, Tags, Bugs & Features respectively). body_matches (str): Can be part of the post content. """ params = { 'search[creator_id]': creator_id, 'search[creator_name]': creator_name, 'search[topic_id]': topic_id, 'search[topic_title_matches]': topic_title_matches, 'search[topic_category_id]': topic_category_id, 'search[body_matches]': body_matches } return self._get('forum_posts.json', params)
python
def forum_post_list(self, creator_id=None, creator_name=None, topic_id=None, topic_title_matches=None, topic_category_id=None, body_matches=None): """Return a list of forum posts. Parameters: creator_id (int): creator_name (str): topic_id (int): topic_title_matches (str): topic_category_id (int): Can be: 0, 1, 2 (General, Tags, Bugs & Features respectively). body_matches (str): Can be part of the post content. """ params = { 'search[creator_id]': creator_id, 'search[creator_name]': creator_name, 'search[topic_id]': topic_id, 'search[topic_title_matches]': topic_title_matches, 'search[topic_category_id]': topic_category_id, 'search[body_matches]': body_matches } return self._get('forum_posts.json', params)
[ "def", "forum_post_list", "(", "self", ",", "creator_id", "=", "None", ",", "creator_name", "=", "None", ",", "topic_id", "=", "None", ",", "topic_title_matches", "=", "None", ",", "topic_category_id", "=", "None", ",", "body_matches", "=", "None", ")", ":", "params", "=", "{", "'search[creator_id]'", ":", "creator_id", ",", "'search[creator_name]'", ":", "creator_name", ",", "'search[topic_id]'", ":", "topic_id", ",", "'search[topic_title_matches]'", ":", "topic_title_matches", ",", "'search[topic_category_id]'", ":", "topic_category_id", ",", "'search[body_matches]'", ":", "body_matches", "}", "return", "self", ".", "_get", "(", "'forum_posts.json'", ",", "params", ")" ]
Return a list of forum posts. Parameters: creator_id (int): creator_name (str): topic_id (int): topic_title_matches (str): topic_category_id (int): Can be: 0, 1, 2 (General, Tags, Bugs & Features respectively). body_matches (str): Can be part of the post content.
[ "Return", "a", "list", "of", "forum", "posts", "." ]
60cd5254684d293b308f0b11b8f4ac2dce101479
https://github.com/LuqueDaniel/pybooru/blob/60cd5254684d293b308f0b11b8f4ac2dce101479/pybooru/api_danbooru.py#L1268-L1290
train
LuqueDaniel/pybooru
pybooru/pybooru.py
_Pybooru.site_name
def site_name(self, site_name): """Function that sets and checks the site name and set url. Parameters: site_name (str): The site name in 'SITE_LIST', default sites. Raises: PybooruError: When 'site_name' isn't valid. """ if site_name in SITE_LIST: self.__site_name = site_name self.__site_url = SITE_LIST[site_name]['url'] else: raise PybooruError( "The 'site_name' is not valid, specify a valid 'site_name'.")
python
def site_name(self, site_name): """Function that sets and checks the site name and set url. Parameters: site_name (str): The site name in 'SITE_LIST', default sites. Raises: PybooruError: When 'site_name' isn't valid. """ if site_name in SITE_LIST: self.__site_name = site_name self.__site_url = SITE_LIST[site_name]['url'] else: raise PybooruError( "The 'site_name' is not valid, specify a valid 'site_name'.")
[ "def", "site_name", "(", "self", ",", "site_name", ")", ":", "if", "site_name", "in", "SITE_LIST", ":", "self", ".", "__site_name", "=", "site_name", "self", ".", "__site_url", "=", "SITE_LIST", "[", "site_name", "]", "[", "'url'", "]", "else", ":", "raise", "PybooruError", "(", "\"The 'site_name' is not valid, specify a valid 'site_name'.\"", ")" ]
Function that sets and checks the site name and set url. Parameters: site_name (str): The site name in 'SITE_LIST', default sites. Raises: PybooruError: When 'site_name' isn't valid.
[ "Function", "that", "sets", "and", "checks", "the", "site", "name", "and", "set", "url", "." ]
60cd5254684d293b308f0b11b8f4ac2dce101479
https://github.com/LuqueDaniel/pybooru/blob/60cd5254684d293b308f0b11b8f4ac2dce101479/pybooru/pybooru.py#L79-L93
train
LuqueDaniel/pybooru
pybooru/pybooru.py
_Pybooru.site_url
def site_url(self, url): """URL setter and validator for site_url property. Parameters: url (str): URL of on Moebooru/Danbooru based sites. Raises: PybooruError: When URL scheme or URL are invalid. """ # Regular expression to URL validate regex = re.compile( r'^(?:http|https)://' # Scheme only HTTP/HTTPS r'(?:(?:[A-Z0-9](?:[A-Z0-9-]{0,61}[A-Z0-9])?\.)+(?:[A-Z]{2,6}\.?| \ [A-Z0-9-]{2,}(?<!-)\.?)|' # Domain r'localhost|' # localhost... r'\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3}|' # or ipv4 r'\[?[A-F0-9]*:[A-F0-9:]+\]?)' # or ipv6 r'(?::\d+)?' # Port r'(?:/?|[/?]\S+)$', re.IGNORECASE) # Validate URL if re.match('^(?:http|https)://', url): if re.search(regex, url): self.__site_url = url else: raise PybooruError("Invalid URL: {0}".format(url)) else: raise PybooruError( "Invalid URL scheme, use HTTP or HTTPS: {0}".format(url))
python
def site_url(self, url): """URL setter and validator for site_url property. Parameters: url (str): URL of on Moebooru/Danbooru based sites. Raises: PybooruError: When URL scheme or URL are invalid. """ # Regular expression to URL validate regex = re.compile( r'^(?:http|https)://' # Scheme only HTTP/HTTPS r'(?:(?:[A-Z0-9](?:[A-Z0-9-]{0,61}[A-Z0-9])?\.)+(?:[A-Z]{2,6}\.?| \ [A-Z0-9-]{2,}(?<!-)\.?)|' # Domain r'localhost|' # localhost... r'\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3}|' # or ipv4 r'\[?[A-F0-9]*:[A-F0-9:]+\]?)' # or ipv6 r'(?::\d+)?' # Port r'(?:/?|[/?]\S+)$', re.IGNORECASE) # Validate URL if re.match('^(?:http|https)://', url): if re.search(regex, url): self.__site_url = url else: raise PybooruError("Invalid URL: {0}".format(url)) else: raise PybooruError( "Invalid URL scheme, use HTTP or HTTPS: {0}".format(url))
[ "def", "site_url", "(", "self", ",", "url", ")", ":", "# Regular expression to URL validate", "regex", "=", "re", ".", "compile", "(", "r'^(?:http|https)://'", "# Scheme only HTTP/HTTPS", "r'(?:(?:[A-Z0-9](?:[A-Z0-9-]{0,61}[A-Z0-9])?\\.)+(?:[A-Z]{2,6}\\.?| \\\n [A-Z0-9-]{2,}(?<!-)\\.?)|'", "# Domain", "r'localhost|'", "# localhost...", "r'\\d{1,3}\\.\\d{1,3}\\.\\d{1,3}\\.\\d{1,3}|'", "# or ipv4", "r'\\[?[A-F0-9]*:[A-F0-9:]+\\]?)'", "# or ipv6", "r'(?::\\d+)?'", "# Port", "r'(?:/?|[/?]\\S+)$'", ",", "re", ".", "IGNORECASE", ")", "# Validate URL", "if", "re", ".", "match", "(", "'^(?:http|https)://'", ",", "url", ")", ":", "if", "re", ".", "search", "(", "regex", ",", "url", ")", ":", "self", ".", "__site_url", "=", "url", "else", ":", "raise", "PybooruError", "(", "\"Invalid URL: {0}\"", ".", "format", "(", "url", ")", ")", "else", ":", "raise", "PybooruError", "(", "\"Invalid URL scheme, use HTTP or HTTPS: {0}\"", ".", "format", "(", "url", ")", ")" ]
URL setter and validator for site_url property. Parameters: url (str): URL of on Moebooru/Danbooru based sites. Raises: PybooruError: When URL scheme or URL are invalid.
[ "URL", "setter", "and", "validator", "for", "site_url", "property", "." ]
60cd5254684d293b308f0b11b8f4ac2dce101479
https://github.com/LuqueDaniel/pybooru/blob/60cd5254684d293b308f0b11b8f4ac2dce101479/pybooru/pybooru.py#L106-L134
train
LuqueDaniel/pybooru
pybooru/pybooru.py
_Pybooru._request
def _request(self, url, api_call, request_args, method='GET'): """Function to request and returning JSON data. Parameters: url (str): Base url call. api_call (str): API function to be called. request_args (dict): All requests parameters. method (str): (Defauld: GET) HTTP method 'GET' or 'POST' Raises: PybooruHTTPError: HTTP Error. requests.exceptions.Timeout: When HTTP Timeout. ValueError: When can't decode JSON response. """ try: if method != 'GET': # Reset content-type for data encoded as a multipart form self.client.headers.update({'content-type': None}) response = self.client.request(method, url, **request_args) self.last_call.update({ 'API': api_call, 'url': response.url, 'status_code': response.status_code, 'status': self._get_status(response.status_code), 'headers': response.headers }) if response.status_code in (200, 201, 202, 204): return response.json() raise PybooruHTTPError("In _request", response.status_code, response.url) except requests.exceptions.Timeout: raise PybooruError("Timeout! url: {0}".format(response.url)) except ValueError as e: raise PybooruError("JSON Error: {0} in line {1} column {2}".format( e.msg, e.lineno, e.colno))
python
def _request(self, url, api_call, request_args, method='GET'): """Function to request and returning JSON data. Parameters: url (str): Base url call. api_call (str): API function to be called. request_args (dict): All requests parameters. method (str): (Defauld: GET) HTTP method 'GET' or 'POST' Raises: PybooruHTTPError: HTTP Error. requests.exceptions.Timeout: When HTTP Timeout. ValueError: When can't decode JSON response. """ try: if method != 'GET': # Reset content-type for data encoded as a multipart form self.client.headers.update({'content-type': None}) response = self.client.request(method, url, **request_args) self.last_call.update({ 'API': api_call, 'url': response.url, 'status_code': response.status_code, 'status': self._get_status(response.status_code), 'headers': response.headers }) if response.status_code in (200, 201, 202, 204): return response.json() raise PybooruHTTPError("In _request", response.status_code, response.url) except requests.exceptions.Timeout: raise PybooruError("Timeout! url: {0}".format(response.url)) except ValueError as e: raise PybooruError("JSON Error: {0} in line {1} column {2}".format( e.msg, e.lineno, e.colno))
[ "def", "_request", "(", "self", ",", "url", ",", "api_call", ",", "request_args", ",", "method", "=", "'GET'", ")", ":", "try", ":", "if", "method", "!=", "'GET'", ":", "# Reset content-type for data encoded as a multipart form", "self", ".", "client", ".", "headers", ".", "update", "(", "{", "'content-type'", ":", "None", "}", ")", "response", "=", "self", ".", "client", ".", "request", "(", "method", ",", "url", ",", "*", "*", "request_args", ")", "self", ".", "last_call", ".", "update", "(", "{", "'API'", ":", "api_call", ",", "'url'", ":", "response", ".", "url", ",", "'status_code'", ":", "response", ".", "status_code", ",", "'status'", ":", "self", ".", "_get_status", "(", "response", ".", "status_code", ")", ",", "'headers'", ":", "response", ".", "headers", "}", ")", "if", "response", ".", "status_code", "in", "(", "200", ",", "201", ",", "202", ",", "204", ")", ":", "return", "response", ".", "json", "(", ")", "raise", "PybooruHTTPError", "(", "\"In _request\"", ",", "response", ".", "status_code", ",", "response", ".", "url", ")", "except", "requests", ".", "exceptions", ".", "Timeout", ":", "raise", "PybooruError", "(", "\"Timeout! url: {0}\"", ".", "format", "(", "response", ".", "url", ")", ")", "except", "ValueError", "as", "e", ":", "raise", "PybooruError", "(", "\"JSON Error: {0} in line {1} column {2}\"", ".", "format", "(", "e", ".", "msg", ",", "e", ".", "lineno", ",", "e", ".", "colno", ")", ")" ]
Function to request and returning JSON data. Parameters: url (str): Base url call. api_call (str): API function to be called. request_args (dict): All requests parameters. method (str): (Defauld: GET) HTTP method 'GET' or 'POST' Raises: PybooruHTTPError: HTTP Error. requests.exceptions.Timeout: When HTTP Timeout. ValueError: When can't decode JSON response.
[ "Function", "to", "request", "and", "returning", "JSON", "data", "." ]
60cd5254684d293b308f0b11b8f4ac2dce101479
https://github.com/LuqueDaniel/pybooru/blob/60cd5254684d293b308f0b11b8f4ac2dce101479/pybooru/pybooru.py#L149-L186
train
LuqueDaniel/pybooru
pybooru/api_moebooru.py
MoebooruApi_Mixin.post_update
def post_update(self, post_id, tags=None, file_=None, rating=None, source=None, is_rating_locked=None, is_note_locked=None, parent_id=None): """Update a specific post. Only the 'post_id' parameter is required. Leave the other parameters blank if you don't want to change them (Requires login). Parameters: post_id (int): The id number of the post to update. tags (str): A space delimited list of tags. Specify previous tags. file_ (str): The file data ENCODED as a multipart form. rating (str): The rating for the post. Can be: safe, questionable, or explicit. source (str): If this is a URL, Moebooru will download the file. rating_locked (bool): Set to True to prevent others from changing the rating. note_locked (bool): Set to True to prevent others from adding notes. parent_id (int): The ID of the parent post. """ params = { 'id': post_id, 'post[tags]': tags, 'post[rating]': rating, 'post[source]': source, 'post[is_rating_locked]': is_rating_locked, 'post[is_note_locked]': is_note_locked, 'post[parent_id]': parent_id } if file_ is not None: file_ = {'post[file]': open(file_, 'rb')} return self._get('post/update', params, 'PUT', file_) else: return self._get('post/update', params, 'PUT')
python
def post_update(self, post_id, tags=None, file_=None, rating=None, source=None, is_rating_locked=None, is_note_locked=None, parent_id=None): """Update a specific post. Only the 'post_id' parameter is required. Leave the other parameters blank if you don't want to change them (Requires login). Parameters: post_id (int): The id number of the post to update. tags (str): A space delimited list of tags. Specify previous tags. file_ (str): The file data ENCODED as a multipart form. rating (str): The rating for the post. Can be: safe, questionable, or explicit. source (str): If this is a URL, Moebooru will download the file. rating_locked (bool): Set to True to prevent others from changing the rating. note_locked (bool): Set to True to prevent others from adding notes. parent_id (int): The ID of the parent post. """ params = { 'id': post_id, 'post[tags]': tags, 'post[rating]': rating, 'post[source]': source, 'post[is_rating_locked]': is_rating_locked, 'post[is_note_locked]': is_note_locked, 'post[parent_id]': parent_id } if file_ is not None: file_ = {'post[file]': open(file_, 'rb')} return self._get('post/update', params, 'PUT', file_) else: return self._get('post/update', params, 'PUT')
[ "def", "post_update", "(", "self", ",", "post_id", ",", "tags", "=", "None", ",", "file_", "=", "None", ",", "rating", "=", "None", ",", "source", "=", "None", ",", "is_rating_locked", "=", "None", ",", "is_note_locked", "=", "None", ",", "parent_id", "=", "None", ")", ":", "params", "=", "{", "'id'", ":", "post_id", ",", "'post[tags]'", ":", "tags", ",", "'post[rating]'", ":", "rating", ",", "'post[source]'", ":", "source", ",", "'post[is_rating_locked]'", ":", "is_rating_locked", ",", "'post[is_note_locked]'", ":", "is_note_locked", ",", "'post[parent_id]'", ":", "parent_id", "}", "if", "file_", "is", "not", "None", ":", "file_", "=", "{", "'post[file]'", ":", "open", "(", "file_", ",", "'rb'", ")", "}", "return", "self", ".", "_get", "(", "'post/update'", ",", "params", ",", "'PUT'", ",", "file_", ")", "else", ":", "return", "self", ".", "_get", "(", "'post/update'", ",", "params", ",", "'PUT'", ")" ]
Update a specific post. Only the 'post_id' parameter is required. Leave the other parameters blank if you don't want to change them (Requires login). Parameters: post_id (int): The id number of the post to update. tags (str): A space delimited list of tags. Specify previous tags. file_ (str): The file data ENCODED as a multipart form. rating (str): The rating for the post. Can be: safe, questionable, or explicit. source (str): If this is a URL, Moebooru will download the file. rating_locked (bool): Set to True to prevent others from changing the rating. note_locked (bool): Set to True to prevent others from adding notes. parent_id (int): The ID of the parent post.
[ "Update", "a", "specific", "post", "." ]
60cd5254684d293b308f0b11b8f4ac2dce101479
https://github.com/LuqueDaniel/pybooru/blob/60cd5254684d293b308f0b11b8f4ac2dce101479/pybooru/api_moebooru.py#L79-L113
train
LuqueDaniel/pybooru
pybooru/moebooru.py
Moebooru.site_name
def site_name(self, site_name): """Sets api_version and hash_string. Parameters: site_name (str): The site name in 'SITE_LIST', default sites. Raises: PybooruError: When 'site_name' isn't valid. """ # Set base class property site_name _Pybooru.site_name.fset(self, site_name) if ('api_version' and 'hashed_string') in SITE_LIST[site_name]: self.api_version = SITE_LIST[site_name]['api_version'] self.hash_string = SITE_LIST[site_name]['hashed_string']
python
def site_name(self, site_name): """Sets api_version and hash_string. Parameters: site_name (str): The site name in 'SITE_LIST', default sites. Raises: PybooruError: When 'site_name' isn't valid. """ # Set base class property site_name _Pybooru.site_name.fset(self, site_name) if ('api_version' and 'hashed_string') in SITE_LIST[site_name]: self.api_version = SITE_LIST[site_name]['api_version'] self.hash_string = SITE_LIST[site_name]['hashed_string']
[ "def", "site_name", "(", "self", ",", "site_name", ")", ":", "# Set base class property site_name", "_Pybooru", ".", "site_name", ".", "fset", "(", "self", ",", "site_name", ")", "if", "(", "'api_version'", "and", "'hashed_string'", ")", "in", "SITE_LIST", "[", "site_name", "]", ":", "self", ".", "api_version", "=", "SITE_LIST", "[", "site_name", "]", "[", "'api_version'", "]", "self", ".", "hash_string", "=", "SITE_LIST", "[", "site_name", "]", "[", "'hashed_string'", "]" ]
Sets api_version and hash_string. Parameters: site_name (str): The site name in 'SITE_LIST', default sites. Raises: PybooruError: When 'site_name' isn't valid.
[ "Sets", "api_version", "and", "hash_string", "." ]
60cd5254684d293b308f0b11b8f4ac2dce101479
https://github.com/LuqueDaniel/pybooru/blob/60cd5254684d293b308f0b11b8f4ac2dce101479/pybooru/moebooru.py#L74-L88
train
LuqueDaniel/pybooru
pybooru/moebooru.py
Moebooru._build_url
def _build_url(self, api_call): """Build request url. Parameters: api_call (str): Base API Call. Returns: Complete url (str). """ if self.api_version in ('1.13.0', '1.13.0+update.1', '1.13.0+update.2'): if '/' not in api_call: return "{0}/{1}/index.json".format(self.site_url, api_call) return "{0}/{1}.json".format(self.site_url, api_call)
python
def _build_url(self, api_call): """Build request url. Parameters: api_call (str): Base API Call. Returns: Complete url (str). """ if self.api_version in ('1.13.0', '1.13.0+update.1', '1.13.0+update.2'): if '/' not in api_call: return "{0}/{1}/index.json".format(self.site_url, api_call) return "{0}/{1}.json".format(self.site_url, api_call)
[ "def", "_build_url", "(", "self", ",", "api_call", ")", ":", "if", "self", ".", "api_version", "in", "(", "'1.13.0'", ",", "'1.13.0+update.1'", ",", "'1.13.0+update.2'", ")", ":", "if", "'/'", "not", "in", "api_call", ":", "return", "\"{0}/{1}/index.json\"", ".", "format", "(", "self", ".", "site_url", ",", "api_call", ")", "return", "\"{0}/{1}.json\"", ".", "format", "(", "self", ".", "site_url", ",", "api_call", ")" ]
Build request url. Parameters: api_call (str): Base API Call. Returns: Complete url (str).
[ "Build", "request", "url", "." ]
60cd5254684d293b308f0b11b8f4ac2dce101479
https://github.com/LuqueDaniel/pybooru/blob/60cd5254684d293b308f0b11b8f4ac2dce101479/pybooru/moebooru.py#L90-L102
train
LuqueDaniel/pybooru
pybooru/moebooru.py
Moebooru._build_hash_string
def _build_hash_string(self): """Function for build password hash string. Raises: PybooruError: When isn't provide hash string. PybooruError: When aren't provide username or password. PybooruError: When Pybooru can't add password to hash strring. """ # Build AUTENTICATION hash_string # Check if hash_string exists if self.site_name in SITE_LIST or self.hash_string: if self.username and self.password: try: hash_string = self.hash_string.format(self.password) except TypeError: raise PybooruError("Pybooru can't add 'password' " "to 'hash_string'") # encrypt hashed_string to SHA1 and return hexdigest string self.password_hash = hashlib.sha1( hash_string.encode('utf-8')).hexdigest() else: raise PybooruError("Specify the 'username' and 'password' " "parameters of the Pybooru object, for " "setting 'password_hash' attribute.") else: raise PybooruError( "Specify the 'hash_string' parameter of the Pybooru" " object, for the functions that requires login.")
python
def _build_hash_string(self): """Function for build password hash string. Raises: PybooruError: When isn't provide hash string. PybooruError: When aren't provide username or password. PybooruError: When Pybooru can't add password to hash strring. """ # Build AUTENTICATION hash_string # Check if hash_string exists if self.site_name in SITE_LIST or self.hash_string: if self.username and self.password: try: hash_string = self.hash_string.format(self.password) except TypeError: raise PybooruError("Pybooru can't add 'password' " "to 'hash_string'") # encrypt hashed_string to SHA1 and return hexdigest string self.password_hash = hashlib.sha1( hash_string.encode('utf-8')).hexdigest() else: raise PybooruError("Specify the 'username' and 'password' " "parameters of the Pybooru object, for " "setting 'password_hash' attribute.") else: raise PybooruError( "Specify the 'hash_string' parameter of the Pybooru" " object, for the functions that requires login.")
[ "def", "_build_hash_string", "(", "self", ")", ":", "# Build AUTENTICATION hash_string", "# Check if hash_string exists", "if", "self", ".", "site_name", "in", "SITE_LIST", "or", "self", ".", "hash_string", ":", "if", "self", ".", "username", "and", "self", ".", "password", ":", "try", ":", "hash_string", "=", "self", ".", "hash_string", ".", "format", "(", "self", ".", "password", ")", "except", "TypeError", ":", "raise", "PybooruError", "(", "\"Pybooru can't add 'password' \"", "\"to 'hash_string'\"", ")", "# encrypt hashed_string to SHA1 and return hexdigest string", "self", ".", "password_hash", "=", "hashlib", ".", "sha1", "(", "hash_string", ".", "encode", "(", "'utf-8'", ")", ")", ".", "hexdigest", "(", ")", "else", ":", "raise", "PybooruError", "(", "\"Specify the 'username' and 'password' \"", "\"parameters of the Pybooru object, for \"", "\"setting 'password_hash' attribute.\"", ")", "else", ":", "raise", "PybooruError", "(", "\"Specify the 'hash_string' parameter of the Pybooru\"", "\" object, for the functions that requires login.\"", ")" ]
Function for build password hash string. Raises: PybooruError: When isn't provide hash string. PybooruError: When aren't provide username or password. PybooruError: When Pybooru can't add password to hash strring.
[ "Function", "for", "build", "password", "hash", "string", "." ]
60cd5254684d293b308f0b11b8f4ac2dce101479
https://github.com/LuqueDaniel/pybooru/blob/60cd5254684d293b308f0b11b8f4ac2dce101479/pybooru/moebooru.py#L104-L131
train
bjodah/pyodesys
pyodesys/symbolic.py
_is_autonomous
def _is_autonomous(indep, exprs): """ Whether the expressions for the dependent variables are autonomous. Note that the system may still behave as an autonomous system on the interface of :meth:`integrate` due to use of pre-/post-processors. """ if indep is None: return True for expr in exprs: try: in_there = indep in expr.free_symbols except: in_there = expr.has(indep) if in_there: return False return True
python
def _is_autonomous(indep, exprs): """ Whether the expressions for the dependent variables are autonomous. Note that the system may still behave as an autonomous system on the interface of :meth:`integrate` due to use of pre-/post-processors. """ if indep is None: return True for expr in exprs: try: in_there = indep in expr.free_symbols except: in_there = expr.has(indep) if in_there: return False return True
[ "def", "_is_autonomous", "(", "indep", ",", "exprs", ")", ":", "if", "indep", "is", "None", ":", "return", "True", "for", "expr", "in", "exprs", ":", "try", ":", "in_there", "=", "indep", "in", "expr", ".", "free_symbols", "except", ":", "in_there", "=", "expr", ".", "has", "(", "indep", ")", "if", "in_there", ":", "return", "False", "return", "True" ]
Whether the expressions for the dependent variables are autonomous. Note that the system may still behave as an autonomous system on the interface of :meth:`integrate` due to use of pre-/post-processors.
[ "Whether", "the", "expressions", "for", "the", "dependent", "variables", "are", "autonomous", "." ]
0034a6165b550d8d9808baef58678dca5a493ab7
https://github.com/bjodah/pyodesys/blob/0034a6165b550d8d9808baef58678dca5a493ab7/pyodesys/symbolic.py#L81-L96
train
bjodah/pyodesys
pyodesys/symbolic.py
symmetricsys
def symmetricsys(dep_tr=None, indep_tr=None, SuperClass=TransformedSys, **kwargs): """ A factory function for creating symmetrically transformed systems. Creates a new subclass which applies the same transformation for each dependent variable. Parameters ---------- dep_tr : pair of callables (default: None) Forward and backward transformation callbacks to be applied to the dependent variables. indep_tr : pair of callables (default: None) Forward and backward transformation to be applied to the independent variable. SuperClass : class \*\*kwargs : Default keyword arguments for the TransformedSys subclass. Returns ------- Subclass of SuperClass (by default :class:`TransformedSys`). Examples -------- >>> import sympy >>> logexp = (sympy.log, sympy.exp) >>> def psimp(exprs): ... return [sympy.powsimp(expr.expand(), force=True) for expr in exprs] ... >>> LogLogSys = symmetricsys(logexp, logexp, exprs_process_cb=psimp) >>> mysys = LogLogSys.from_callback(lambda x, y, p: [-y[0], y[0] - y[1]], 2, 0) >>> mysys.exprs (-exp(x_0), -exp(x_0) + exp(x_0 + y_0 - y_1)) """ if dep_tr is not None: if not callable(dep_tr[0]) or not callable(dep_tr[1]): raise ValueError("Exceptected dep_tr to be a pair of callables") if indep_tr is not None: if not callable(indep_tr[0]) or not callable(indep_tr[1]): raise ValueError("Exceptected indep_tr to be a pair of callables") class _SymmetricSys(SuperClass): def __init__(self, dep_exprs, indep=None, **inner_kwargs): new_kwargs = kwargs.copy() new_kwargs.update(inner_kwargs) dep, exprs = zip(*dep_exprs) super(_SymmetricSys, self).__init__( zip(dep, exprs), indep, dep_transf=list(zip( list(map(dep_tr[0], dep)), list(map(dep_tr[1], dep)) )) if dep_tr is not None else None, indep_transf=((indep_tr[0](indep), indep_tr[1](indep)) if indep_tr is not None else None), **new_kwargs) @classmethod def from_callback(cls, cb, ny=None, nparams=None, **inner_kwargs): new_kwargs = kwargs.copy() new_kwargs.update(inner_kwargs) return SuperClass.from_callback( cb, ny, nparams, dep_transf_cbs=repeat(dep_tr) if dep_tr is not None else None, indep_transf_cbs=indep_tr, **new_kwargs) return _SymmetricSys
python
def symmetricsys(dep_tr=None, indep_tr=None, SuperClass=TransformedSys, **kwargs): """ A factory function for creating symmetrically transformed systems. Creates a new subclass which applies the same transformation for each dependent variable. Parameters ---------- dep_tr : pair of callables (default: None) Forward and backward transformation callbacks to be applied to the dependent variables. indep_tr : pair of callables (default: None) Forward and backward transformation to be applied to the independent variable. SuperClass : class \*\*kwargs : Default keyword arguments for the TransformedSys subclass. Returns ------- Subclass of SuperClass (by default :class:`TransformedSys`). Examples -------- >>> import sympy >>> logexp = (sympy.log, sympy.exp) >>> def psimp(exprs): ... return [sympy.powsimp(expr.expand(), force=True) for expr in exprs] ... >>> LogLogSys = symmetricsys(logexp, logexp, exprs_process_cb=psimp) >>> mysys = LogLogSys.from_callback(lambda x, y, p: [-y[0], y[0] - y[1]], 2, 0) >>> mysys.exprs (-exp(x_0), -exp(x_0) + exp(x_0 + y_0 - y_1)) """ if dep_tr is not None: if not callable(dep_tr[0]) or not callable(dep_tr[1]): raise ValueError("Exceptected dep_tr to be a pair of callables") if indep_tr is not None: if not callable(indep_tr[0]) or not callable(indep_tr[1]): raise ValueError("Exceptected indep_tr to be a pair of callables") class _SymmetricSys(SuperClass): def __init__(self, dep_exprs, indep=None, **inner_kwargs): new_kwargs = kwargs.copy() new_kwargs.update(inner_kwargs) dep, exprs = zip(*dep_exprs) super(_SymmetricSys, self).__init__( zip(dep, exprs), indep, dep_transf=list(zip( list(map(dep_tr[0], dep)), list(map(dep_tr[1], dep)) )) if dep_tr is not None else None, indep_transf=((indep_tr[0](indep), indep_tr[1](indep)) if indep_tr is not None else None), **new_kwargs) @classmethod def from_callback(cls, cb, ny=None, nparams=None, **inner_kwargs): new_kwargs = kwargs.copy() new_kwargs.update(inner_kwargs) return SuperClass.from_callback( cb, ny, nparams, dep_transf_cbs=repeat(dep_tr) if dep_tr is not None else None, indep_transf_cbs=indep_tr, **new_kwargs) return _SymmetricSys
[ "def", "symmetricsys", "(", "dep_tr", "=", "None", ",", "indep_tr", "=", "None", ",", "SuperClass", "=", "TransformedSys", ",", "*", "*", "kwargs", ")", ":", "if", "dep_tr", "is", "not", "None", ":", "if", "not", "callable", "(", "dep_tr", "[", "0", "]", ")", "or", "not", "callable", "(", "dep_tr", "[", "1", "]", ")", ":", "raise", "ValueError", "(", "\"Exceptected dep_tr to be a pair of callables\"", ")", "if", "indep_tr", "is", "not", "None", ":", "if", "not", "callable", "(", "indep_tr", "[", "0", "]", ")", "or", "not", "callable", "(", "indep_tr", "[", "1", "]", ")", ":", "raise", "ValueError", "(", "\"Exceptected indep_tr to be a pair of callables\"", ")", "class", "_SymmetricSys", "(", "SuperClass", ")", ":", "def", "__init__", "(", "self", ",", "dep_exprs", ",", "indep", "=", "None", ",", "*", "*", "inner_kwargs", ")", ":", "new_kwargs", "=", "kwargs", ".", "copy", "(", ")", "new_kwargs", ".", "update", "(", "inner_kwargs", ")", "dep", ",", "exprs", "=", "zip", "(", "*", "dep_exprs", ")", "super", "(", "_SymmetricSys", ",", "self", ")", ".", "__init__", "(", "zip", "(", "dep", ",", "exprs", ")", ",", "indep", ",", "dep_transf", "=", "list", "(", "zip", "(", "list", "(", "map", "(", "dep_tr", "[", "0", "]", ",", "dep", ")", ")", ",", "list", "(", "map", "(", "dep_tr", "[", "1", "]", ",", "dep", ")", ")", ")", ")", "if", "dep_tr", "is", "not", "None", "else", "None", ",", "indep_transf", "=", "(", "(", "indep_tr", "[", "0", "]", "(", "indep", ")", ",", "indep_tr", "[", "1", "]", "(", "indep", ")", ")", "if", "indep_tr", "is", "not", "None", "else", "None", ")", ",", "*", "*", "new_kwargs", ")", "@", "classmethod", "def", "from_callback", "(", "cls", ",", "cb", ",", "ny", "=", "None", ",", "nparams", "=", "None", ",", "*", "*", "inner_kwargs", ")", ":", "new_kwargs", "=", "kwargs", ".", "copy", "(", ")", "new_kwargs", ".", "update", "(", "inner_kwargs", ")", "return", "SuperClass", ".", "from_callback", "(", "cb", ",", "ny", ",", "nparams", ",", "dep_transf_cbs", "=", "repeat", "(", "dep_tr", ")", "if", "dep_tr", "is", "not", "None", "else", "None", ",", "indep_transf_cbs", "=", "indep_tr", ",", "*", "*", "new_kwargs", ")", "return", "_SymmetricSys" ]
A factory function for creating symmetrically transformed systems. Creates a new subclass which applies the same transformation for each dependent variable. Parameters ---------- dep_tr : pair of callables (default: None) Forward and backward transformation callbacks to be applied to the dependent variables. indep_tr : pair of callables (default: None) Forward and backward transformation to be applied to the independent variable. SuperClass : class \*\*kwargs : Default keyword arguments for the TransformedSys subclass. Returns ------- Subclass of SuperClass (by default :class:`TransformedSys`). Examples -------- >>> import sympy >>> logexp = (sympy.log, sympy.exp) >>> def psimp(exprs): ... return [sympy.powsimp(expr.expand(), force=True) for expr in exprs] ... >>> LogLogSys = symmetricsys(logexp, logexp, exprs_process_cb=psimp) >>> mysys = LogLogSys.from_callback(lambda x, y, p: [-y[0], y[0] - y[1]], 2, 0) >>> mysys.exprs (-exp(x_0), -exp(x_0) + exp(x_0 + y_0 - y_1))
[ "A", "factory", "function", "for", "creating", "symmetrically", "transformed", "systems", "." ]
0034a6165b550d8d9808baef58678dca5a493ab7
https://github.com/bjodah/pyodesys/blob/0034a6165b550d8d9808baef58678dca5a493ab7/pyodesys/symbolic.py#L961-L1027
train
bjodah/pyodesys
pyodesys/symbolic.py
SymbolicSys.from_other
def from_other(cls, ori, **kwargs): """ Creates a new instance with an existing one as a template. Parameters ---------- ori : SymbolicSys instance \\*\\*kwargs: Keyword arguments used to create the new instance. Returns ------- A new instance of the class. """ for k in cls._attrs_to_copy + ('params', 'roots', 'init_indep', 'init_dep'): if k not in kwargs: val = getattr(ori, k) if val is not None: kwargs[k] = val if 'lower_bounds' not in kwargs and getattr(ori, 'lower_bounds') is not None: kwargs['lower_bounds'] = ori.lower_bounds if 'upper_bounds' not in kwargs and getattr(ori, 'upper_bounds') is not None: kwargs['upper_bounds'] = ori.upper_bounds if len(ori.pre_processors) > 0: if 'pre_processors' not in kwargs: kwargs['pre_processors'] = [] kwargs['pre_processors'] = kwargs['pre_processors'] + ori.pre_processors if len(ori.post_processors) > 0: if 'post_processors' not in kwargs: kwargs['post_processors'] = [] kwargs['post_processors'] = ori.post_processors + kwargs['post_processors'] if 'dep_exprs' not in kwargs: kwargs['dep_exprs'] = zip(ori.dep, ori.exprs) if 'indep' not in kwargs: kwargs['indep'] = ori.indep instance = cls(**kwargs) for attr in ori._attrs_to_copy: if attr not in cls._attrs_to_copy: setattr(instance, attr, getattr(ori, attr)) return instance
python
def from_other(cls, ori, **kwargs): """ Creates a new instance with an existing one as a template. Parameters ---------- ori : SymbolicSys instance \\*\\*kwargs: Keyword arguments used to create the new instance. Returns ------- A new instance of the class. """ for k in cls._attrs_to_copy + ('params', 'roots', 'init_indep', 'init_dep'): if k not in kwargs: val = getattr(ori, k) if val is not None: kwargs[k] = val if 'lower_bounds' not in kwargs and getattr(ori, 'lower_bounds') is not None: kwargs['lower_bounds'] = ori.lower_bounds if 'upper_bounds' not in kwargs and getattr(ori, 'upper_bounds') is not None: kwargs['upper_bounds'] = ori.upper_bounds if len(ori.pre_processors) > 0: if 'pre_processors' not in kwargs: kwargs['pre_processors'] = [] kwargs['pre_processors'] = kwargs['pre_processors'] + ori.pre_processors if len(ori.post_processors) > 0: if 'post_processors' not in kwargs: kwargs['post_processors'] = [] kwargs['post_processors'] = ori.post_processors + kwargs['post_processors'] if 'dep_exprs' not in kwargs: kwargs['dep_exprs'] = zip(ori.dep, ori.exprs) if 'indep' not in kwargs: kwargs['indep'] = ori.indep instance = cls(**kwargs) for attr in ori._attrs_to_copy: if attr not in cls._attrs_to_copy: setattr(instance, attr, getattr(ori, attr)) return instance
[ "def", "from_other", "(", "cls", ",", "ori", ",", "*", "*", "kwargs", ")", ":", "for", "k", "in", "cls", ".", "_attrs_to_copy", "+", "(", "'params'", ",", "'roots'", ",", "'init_indep'", ",", "'init_dep'", ")", ":", "if", "k", "not", "in", "kwargs", ":", "val", "=", "getattr", "(", "ori", ",", "k", ")", "if", "val", "is", "not", "None", ":", "kwargs", "[", "k", "]", "=", "val", "if", "'lower_bounds'", "not", "in", "kwargs", "and", "getattr", "(", "ori", ",", "'lower_bounds'", ")", "is", "not", "None", ":", "kwargs", "[", "'lower_bounds'", "]", "=", "ori", ".", "lower_bounds", "if", "'upper_bounds'", "not", "in", "kwargs", "and", "getattr", "(", "ori", ",", "'upper_bounds'", ")", "is", "not", "None", ":", "kwargs", "[", "'upper_bounds'", "]", "=", "ori", ".", "upper_bounds", "if", "len", "(", "ori", ".", "pre_processors", ")", ">", "0", ":", "if", "'pre_processors'", "not", "in", "kwargs", ":", "kwargs", "[", "'pre_processors'", "]", "=", "[", "]", "kwargs", "[", "'pre_processors'", "]", "=", "kwargs", "[", "'pre_processors'", "]", "+", "ori", ".", "pre_processors", "if", "len", "(", "ori", ".", "post_processors", ")", ">", "0", ":", "if", "'post_processors'", "not", "in", "kwargs", ":", "kwargs", "[", "'post_processors'", "]", "=", "[", "]", "kwargs", "[", "'post_processors'", "]", "=", "ori", ".", "post_processors", "+", "kwargs", "[", "'post_processors'", "]", "if", "'dep_exprs'", "not", "in", "kwargs", ":", "kwargs", "[", "'dep_exprs'", "]", "=", "zip", "(", "ori", ".", "dep", ",", "ori", ".", "exprs", ")", "if", "'indep'", "not", "in", "kwargs", ":", "kwargs", "[", "'indep'", "]", "=", "ori", ".", "indep", "instance", "=", "cls", "(", "*", "*", "kwargs", ")", "for", "attr", "in", "ori", ".", "_attrs_to_copy", ":", "if", "attr", "not", "in", "cls", ".", "_attrs_to_copy", ":", "setattr", "(", "instance", ",", "attr", ",", "getattr", "(", "ori", ",", "attr", ")", ")", "return", "instance" ]
Creates a new instance with an existing one as a template. Parameters ---------- ori : SymbolicSys instance \\*\\*kwargs: Keyword arguments used to create the new instance. Returns ------- A new instance of the class.
[ "Creates", "a", "new", "instance", "with", "an", "existing", "one", "as", "a", "template", "." ]
0034a6165b550d8d9808baef58678dca5a493ab7
https://github.com/bjodah/pyodesys/blob/0034a6165b550d8d9808baef58678dca5a493ab7/pyodesys/symbolic.py#L466-L508
train
bjodah/pyodesys
pyodesys/symbolic.py
SymbolicSys.get_jac
def get_jac(self): """ Derives the jacobian from ``self.exprs`` and ``self.dep``. """ if self._jac is True: if self.sparse is True: self._jac, self._colptrs, self._rowvals = self.be.sparse_jacobian_csc(self.exprs, self.dep) elif self.band is not None: # Banded self._jac = self.be.banded_jacobian(self.exprs, self.dep, *self.band) else: f = self.be.Matrix(1, self.ny, self.exprs) self._jac = f.jacobian(self.be.Matrix(1, self.ny, self.dep)) elif self._jac is False: return False return self._jac
python
def get_jac(self): """ Derives the jacobian from ``self.exprs`` and ``self.dep``. """ if self._jac is True: if self.sparse is True: self._jac, self._colptrs, self._rowvals = self.be.sparse_jacobian_csc(self.exprs, self.dep) elif self.band is not None: # Banded self._jac = self.be.banded_jacobian(self.exprs, self.dep, *self.band) else: f = self.be.Matrix(1, self.ny, self.exprs) self._jac = f.jacobian(self.be.Matrix(1, self.ny, self.dep)) elif self._jac is False: return False return self._jac
[ "def", "get_jac", "(", "self", ")", ":", "if", "self", ".", "_jac", "is", "True", ":", "if", "self", ".", "sparse", "is", "True", ":", "self", ".", "_jac", ",", "self", ".", "_colptrs", ",", "self", ".", "_rowvals", "=", "self", ".", "be", ".", "sparse_jacobian_csc", "(", "self", ".", "exprs", ",", "self", ".", "dep", ")", "elif", "self", ".", "band", "is", "not", "None", ":", "# Banded", "self", ".", "_jac", "=", "self", ".", "be", ".", "banded_jacobian", "(", "self", ".", "exprs", ",", "self", ".", "dep", ",", "*", "self", ".", "band", ")", "else", ":", "f", "=", "self", ".", "be", ".", "Matrix", "(", "1", ",", "self", ".", "ny", ",", "self", ".", "exprs", ")", "self", ".", "_jac", "=", "f", ".", "jacobian", "(", "self", ".", "be", ".", "Matrix", "(", "1", ",", "self", ".", "ny", ",", "self", ".", "dep", ")", ")", "elif", "self", ".", "_jac", "is", "False", ":", "return", "False", "return", "self", ".", "_jac" ]
Derives the jacobian from ``self.exprs`` and ``self.dep``.
[ "Derives", "the", "jacobian", "from", "self", ".", "exprs", "and", "self", ".", "dep", "." ]
0034a6165b550d8d9808baef58678dca5a493ab7
https://github.com/bjodah/pyodesys/blob/0034a6165b550d8d9808baef58678dca5a493ab7/pyodesys/symbolic.py#L637-L650
train
bjodah/pyodesys
pyodesys/symbolic.py
SymbolicSys.get_jtimes
def get_jtimes(self): """ Derive the jacobian-vector product from ``self.exprs`` and ``self.dep``""" if self._jtimes is False: return False if self._jtimes is True: r = self.be.Dummy('r') v = tuple(self.be.Dummy('v_{0}'.format(i)) for i in range(self.ny)) f = self.be.Matrix(1, self.ny, self.exprs) f = f.subs([(x_i, x_i + r * v_i) for x_i, v_i in zip(self.dep, v)]) return v, self.be.flatten(f.diff(r).subs(r, 0)) else: return tuple(zip(*self._jtimes))
python
def get_jtimes(self): """ Derive the jacobian-vector product from ``self.exprs`` and ``self.dep``""" if self._jtimes is False: return False if self._jtimes is True: r = self.be.Dummy('r') v = tuple(self.be.Dummy('v_{0}'.format(i)) for i in range(self.ny)) f = self.be.Matrix(1, self.ny, self.exprs) f = f.subs([(x_i, x_i + r * v_i) for x_i, v_i in zip(self.dep, v)]) return v, self.be.flatten(f.diff(r).subs(r, 0)) else: return tuple(zip(*self._jtimes))
[ "def", "get_jtimes", "(", "self", ")", ":", "if", "self", ".", "_jtimes", "is", "False", ":", "return", "False", "if", "self", ".", "_jtimes", "is", "True", ":", "r", "=", "self", ".", "be", ".", "Dummy", "(", "'r'", ")", "v", "=", "tuple", "(", "self", ".", "be", ".", "Dummy", "(", "'v_{0}'", ".", "format", "(", "i", ")", ")", "for", "i", "in", "range", "(", "self", ".", "ny", ")", ")", "f", "=", "self", ".", "be", ".", "Matrix", "(", "1", ",", "self", ".", "ny", ",", "self", ".", "exprs", ")", "f", "=", "f", ".", "subs", "(", "[", "(", "x_i", ",", "x_i", "+", "r", "*", "v_i", ")", "for", "x_i", ",", "v_i", "in", "zip", "(", "self", ".", "dep", ",", "v", ")", "]", ")", "return", "v", ",", "self", ".", "be", ".", "flatten", "(", "f", ".", "diff", "(", "r", ")", ".", "subs", "(", "r", ",", "0", ")", ")", "else", ":", "return", "tuple", "(", "zip", "(", "*", "self", ".", "_jtimes", ")", ")" ]
Derive the jacobian-vector product from ``self.exprs`` and ``self.dep``
[ "Derive", "the", "jacobian", "-", "vector", "product", "from", "self", ".", "exprs", "and", "self", ".", "dep" ]
0034a6165b550d8d9808baef58678dca5a493ab7
https://github.com/bjodah/pyodesys/blob/0034a6165b550d8d9808baef58678dca5a493ab7/pyodesys/symbolic.py#L652-L664
train
bjodah/pyodesys
pyodesys/symbolic.py
SymbolicSys.jacobian_singular
def jacobian_singular(self): """ Returns True if Jacobian is singular, else False. """ cses, (jac_in_cses,) = self.be.cse(self.get_jac()) if jac_in_cses.nullspace(): return True else: return False
python
def jacobian_singular(self): """ Returns True if Jacobian is singular, else False. """ cses, (jac_in_cses,) = self.be.cse(self.get_jac()) if jac_in_cses.nullspace(): return True else: return False
[ "def", "jacobian_singular", "(", "self", ")", ":", "cses", ",", "(", "jac_in_cses", ",", ")", "=", "self", ".", "be", ".", "cse", "(", "self", ".", "get_jac", "(", ")", ")", "if", "jac_in_cses", ".", "nullspace", "(", ")", ":", "return", "True", "else", ":", "return", "False" ]
Returns True if Jacobian is singular, else False.
[ "Returns", "True", "if", "Jacobian", "is", "singular", "else", "False", "." ]
0034a6165b550d8d9808baef58678dca5a493ab7
https://github.com/bjodah/pyodesys/blob/0034a6165b550d8d9808baef58678dca5a493ab7/pyodesys/symbolic.py#L666-L672
train
bjodah/pyodesys
pyodesys/symbolic.py
SymbolicSys.get_dfdx
def get_dfdx(self): """ Calculates 2nd derivatives of ``self.exprs`` """ if self._dfdx is True: if self.indep is None: zero = 0*self.be.Dummy()**0 self._dfdx = self.be.Matrix(1, self.ny, [zero]*self.ny) else: self._dfdx = self.be.Matrix(1, self.ny, [expr.diff(self.indep) for expr in self.exprs]) elif self._dfdx is False: return False return self._dfdx
python
def get_dfdx(self): """ Calculates 2nd derivatives of ``self.exprs`` """ if self._dfdx is True: if self.indep is None: zero = 0*self.be.Dummy()**0 self._dfdx = self.be.Matrix(1, self.ny, [zero]*self.ny) else: self._dfdx = self.be.Matrix(1, self.ny, [expr.diff(self.indep) for expr in self.exprs]) elif self._dfdx is False: return False return self._dfdx
[ "def", "get_dfdx", "(", "self", ")", ":", "if", "self", ".", "_dfdx", "is", "True", ":", "if", "self", ".", "indep", "is", "None", ":", "zero", "=", "0", "*", "self", ".", "be", ".", "Dummy", "(", ")", "**", "0", "self", ".", "_dfdx", "=", "self", ".", "be", ".", "Matrix", "(", "1", ",", "self", ".", "ny", ",", "[", "zero", "]", "*", "self", ".", "ny", ")", "else", ":", "self", ".", "_dfdx", "=", "self", ".", "be", ".", "Matrix", "(", "1", ",", "self", ".", "ny", ",", "[", "expr", ".", "diff", "(", "self", ".", "indep", ")", "for", "expr", "in", "self", ".", "exprs", "]", ")", "elif", "self", ".", "_dfdx", "is", "False", ":", "return", "False", "return", "self", ".", "_dfdx" ]
Calculates 2nd derivatives of ``self.exprs``
[ "Calculates", "2nd", "derivatives", "of", "self", ".", "exprs" ]
0034a6165b550d8d9808baef58678dca5a493ab7
https://github.com/bjodah/pyodesys/blob/0034a6165b550d8d9808baef58678dca5a493ab7/pyodesys/symbolic.py#L674-L684
train
bjodah/pyodesys
pyodesys/symbolic.py
SymbolicSys.get_f_ty_callback
def get_f_ty_callback(self): """ Generates a callback for evaluating ``self.exprs``. """ cb = self._callback_factory(self.exprs) lb = self.lower_bounds ub = self.upper_bounds if lb is not None or ub is not None: def _bounds_wrapper(t, y, p=(), be=None): if lb is not None: if np.any(y < lb - 10*self._current_integration_kwargs['atol']): raise RecoverableError y = np.array(y) y[y < lb] = lb[y < lb] if ub is not None: if np.any(y > ub + 10*self._current_integration_kwargs['atol']): raise RecoverableError y = np.array(y) y[y > ub] = ub[y > ub] return cb(t, y, p, be) return _bounds_wrapper else: return cb
python
def get_f_ty_callback(self): """ Generates a callback for evaluating ``self.exprs``. """ cb = self._callback_factory(self.exprs) lb = self.lower_bounds ub = self.upper_bounds if lb is not None or ub is not None: def _bounds_wrapper(t, y, p=(), be=None): if lb is not None: if np.any(y < lb - 10*self._current_integration_kwargs['atol']): raise RecoverableError y = np.array(y) y[y < lb] = lb[y < lb] if ub is not None: if np.any(y > ub + 10*self._current_integration_kwargs['atol']): raise RecoverableError y = np.array(y) y[y > ub] = ub[y > ub] return cb(t, y, p, be) return _bounds_wrapper else: return cb
[ "def", "get_f_ty_callback", "(", "self", ")", ":", "cb", "=", "self", ".", "_callback_factory", "(", "self", ".", "exprs", ")", "lb", "=", "self", ".", "lower_bounds", "ub", "=", "self", ".", "upper_bounds", "if", "lb", "is", "not", "None", "or", "ub", "is", "not", "None", ":", "def", "_bounds_wrapper", "(", "t", ",", "y", ",", "p", "=", "(", ")", ",", "be", "=", "None", ")", ":", "if", "lb", "is", "not", "None", ":", "if", "np", ".", "any", "(", "y", "<", "lb", "-", "10", "*", "self", ".", "_current_integration_kwargs", "[", "'atol'", "]", ")", ":", "raise", "RecoverableError", "y", "=", "np", ".", "array", "(", "y", ")", "y", "[", "y", "<", "lb", "]", "=", "lb", "[", "y", "<", "lb", "]", "if", "ub", "is", "not", "None", ":", "if", "np", ".", "any", "(", "y", ">", "ub", "+", "10", "*", "self", ".", "_current_integration_kwargs", "[", "'atol'", "]", ")", ":", "raise", "RecoverableError", "y", "=", "np", ".", "array", "(", "y", ")", "y", "[", "y", ">", "ub", "]", "=", "ub", "[", "y", ">", "ub", "]", "return", "cb", "(", "t", ",", "y", ",", "p", ",", "be", ")", "return", "_bounds_wrapper", "else", ":", "return", "cb" ]
Generates a callback for evaluating ``self.exprs``.
[ "Generates", "a", "callback", "for", "evaluating", "self", ".", "exprs", "." ]
0034a6165b550d8d9808baef58678dca5a493ab7
https://github.com/bjodah/pyodesys/blob/0034a6165b550d8d9808baef58678dca5a493ab7/pyodesys/symbolic.py#L689-L709
train
bjodah/pyodesys
pyodesys/symbolic.py
SymbolicSys.get_j_ty_callback
def get_j_ty_callback(self): """ Generates a callback for evaluating the jacobian. """ j_exprs = self.get_jac() if j_exprs is False: return None cb = self._callback_factory(j_exprs) if self.sparse: from scipy.sparse import csc_matrix def sparse_cb(x, y, p=()): data = cb(x, y, p).flatten() return csc_matrix((data, self._rowvals, self._colptrs)) return sparse_cb else: return cb
python
def get_j_ty_callback(self): """ Generates a callback for evaluating the jacobian. """ j_exprs = self.get_jac() if j_exprs is False: return None cb = self._callback_factory(j_exprs) if self.sparse: from scipy.sparse import csc_matrix def sparse_cb(x, y, p=()): data = cb(x, y, p).flatten() return csc_matrix((data, self._rowvals, self._colptrs)) return sparse_cb else: return cb
[ "def", "get_j_ty_callback", "(", "self", ")", ":", "j_exprs", "=", "self", ".", "get_jac", "(", ")", "if", "j_exprs", "is", "False", ":", "return", "None", "cb", "=", "self", ".", "_callback_factory", "(", "j_exprs", ")", "if", "self", ".", "sparse", ":", "from", "scipy", ".", "sparse", "import", "csc_matrix", "def", "sparse_cb", "(", "x", ",", "y", ",", "p", "=", "(", ")", ")", ":", "data", "=", "cb", "(", "x", ",", "y", ",", "p", ")", ".", "flatten", "(", ")", "return", "csc_matrix", "(", "(", "data", ",", "self", ".", "_rowvals", ",", "self", ".", "_colptrs", ")", ")", "return", "sparse_cb", "else", ":", "return", "cb" ]
Generates a callback for evaluating the jacobian.
[ "Generates", "a", "callback", "for", "evaluating", "the", "jacobian", "." ]
0034a6165b550d8d9808baef58678dca5a493ab7
https://github.com/bjodah/pyodesys/blob/0034a6165b550d8d9808baef58678dca5a493ab7/pyodesys/symbolic.py#L711-L726
train
bjodah/pyodesys
pyodesys/symbolic.py
SymbolicSys.get_dfdx_callback
def get_dfdx_callback(self): """ Generate a callback for evaluating derivative of ``self.exprs`` """ dfdx_exprs = self.get_dfdx() if dfdx_exprs is False: return None return self._callback_factory(dfdx_exprs)
python
def get_dfdx_callback(self): """ Generate a callback for evaluating derivative of ``self.exprs`` """ dfdx_exprs = self.get_dfdx() if dfdx_exprs is False: return None return self._callback_factory(dfdx_exprs)
[ "def", "get_dfdx_callback", "(", "self", ")", ":", "dfdx_exprs", "=", "self", ".", "get_dfdx", "(", ")", "if", "dfdx_exprs", "is", "False", ":", "return", "None", "return", "self", ".", "_callback_factory", "(", "dfdx_exprs", ")" ]
Generate a callback for evaluating derivative of ``self.exprs``
[ "Generate", "a", "callback", "for", "evaluating", "derivative", "of", "self", ".", "exprs" ]
0034a6165b550d8d9808baef58678dca5a493ab7
https://github.com/bjodah/pyodesys/blob/0034a6165b550d8d9808baef58678dca5a493ab7/pyodesys/symbolic.py#L728-L733
train
bjodah/pyodesys
pyodesys/symbolic.py
SymbolicSys.get_jtimes_callback
def get_jtimes_callback(self): """ Generate a callback fro evaluating the jacobian-vector product.""" jtimes = self.get_jtimes() if jtimes is False: return None v, jtimes_exprs = jtimes return _Callback(self.indep, tuple(self.dep) + tuple(v), self.params, jtimes_exprs, Lambdify=self.be.Lambdify)
python
def get_jtimes_callback(self): """ Generate a callback fro evaluating the jacobian-vector product.""" jtimes = self.get_jtimes() if jtimes is False: return None v, jtimes_exprs = jtimes return _Callback(self.indep, tuple(self.dep) + tuple(v), self.params, jtimes_exprs, Lambdify=self.be.Lambdify)
[ "def", "get_jtimes_callback", "(", "self", ")", ":", "jtimes", "=", "self", ".", "get_jtimes", "(", ")", "if", "jtimes", "is", "False", ":", "return", "None", "v", ",", "jtimes_exprs", "=", "jtimes", "return", "_Callback", "(", "self", ".", "indep", ",", "tuple", "(", "self", ".", "dep", ")", "+", "tuple", "(", "v", ")", ",", "self", ".", "params", ",", "jtimes_exprs", ",", "Lambdify", "=", "self", ".", "be", ".", "Lambdify", ")" ]
Generate a callback fro evaluating the jacobian-vector product.
[ "Generate", "a", "callback", "fro", "evaluating", "the", "jacobian", "-", "vector", "product", "." ]
0034a6165b550d8d9808baef58678dca5a493ab7
https://github.com/bjodah/pyodesys/blob/0034a6165b550d8d9808baef58678dca5a493ab7/pyodesys/symbolic.py#L735-L742
train
bjodah/pyodesys
pyodesys/symbolic.py
PartiallySolvedSystem.from_linear_invariants
def from_linear_invariants(cls, ori_sys, preferred=None, **kwargs): """ Reformulates the ODE system in fewer variables. Given linear invariant equations one can always reduce the number of dependent variables in the system by the rank of the matrix describing this linear system. Parameters ---------- ori_sys : :class:`SymbolicSys` instance preferred : iterable of preferred dependent variables Due to numerical rounding it is preferable to choose the variables which are expected to be of the largest magnitude during integration. \*\*kwargs : Keyword arguments passed on to constructor. """ _be = ori_sys.be A = _be.Matrix(ori_sys.linear_invariants) rA, pivots = A.rref() if len(pivots) < A.shape[0]: # If the linear system contains rows which a linearly dependent these could be removed. # The criterion for removal could be dictated by a user provided callback. # # An alternative would be to write the matrix in reduced row echelon form, however, # this would cause the invariants to become linear combinations of each other and # their intuitive meaning (original principles they were formulated from) will be lost. # Hence that is not the default behaviour. However, the user may choose to rewrite the # equations in reduced row echelon form if they choose to before calling this method. raise NotImplementedError("Linear invariants contain linear dependencies.") per_row_cols = [(ri, [ci for ci in range(A.cols) if A[ri, ci] != 0]) for ri in range(A.rows)] if preferred is None: preferred = ori_sys.names[:A.rows] if ori_sys.dep_by_name else list(range(A.rows)) targets = [ ori_sys.names.index(dep) if ori_sys.dep_by_name else ( dep if isinstance(dep, int) else ori_sys.dep.index(dep)) for dep in preferred] row_tgt = [] for ri, colids in sorted(per_row_cols, key=lambda k: len(k[1])): for tgt in targets: if tgt in colids: row_tgt.append((ri, tgt)) targets.remove(tgt) break if len(targets) == 0: break else: raise ValueError("Could not find a solutions for: %s" % targets) def analytic_factory(x0, y0, p0, be): return { ori_sys.dep[tgt]: y0[ori_sys.dep[tgt] if ori_sys.dep_by_name else tgt] - sum( [A[ri, ci]*(ori_sys.dep[ci] - y0[ori_sys.dep[ci] if ori_sys.dep_by_name else ci]) for ci in range(A.cols) if ci != tgt])/A[ri, tgt] for ri, tgt in row_tgt } ori_li_nms = ori_sys.linear_invariant_names or () new_lin_invar = [[cell for ci, cell in enumerate(row) if ci not in list(zip(*row_tgt))[1]] for ri, row in enumerate(A.tolist()) if ri not in list(zip(*row_tgt))[0]] new_lin_i_nms = [nam for ri, nam in enumerate(ori_li_nms) if ri not in list(zip(*row_tgt))[0]] return cls(ori_sys, analytic_factory, linear_invariants=new_lin_invar, linear_invariant_names=new_lin_i_nms, **kwargs)
python
def from_linear_invariants(cls, ori_sys, preferred=None, **kwargs): """ Reformulates the ODE system in fewer variables. Given linear invariant equations one can always reduce the number of dependent variables in the system by the rank of the matrix describing this linear system. Parameters ---------- ori_sys : :class:`SymbolicSys` instance preferred : iterable of preferred dependent variables Due to numerical rounding it is preferable to choose the variables which are expected to be of the largest magnitude during integration. \*\*kwargs : Keyword arguments passed on to constructor. """ _be = ori_sys.be A = _be.Matrix(ori_sys.linear_invariants) rA, pivots = A.rref() if len(pivots) < A.shape[0]: # If the linear system contains rows which a linearly dependent these could be removed. # The criterion for removal could be dictated by a user provided callback. # # An alternative would be to write the matrix in reduced row echelon form, however, # this would cause the invariants to become linear combinations of each other and # their intuitive meaning (original principles they were formulated from) will be lost. # Hence that is not the default behaviour. However, the user may choose to rewrite the # equations in reduced row echelon form if they choose to before calling this method. raise NotImplementedError("Linear invariants contain linear dependencies.") per_row_cols = [(ri, [ci for ci in range(A.cols) if A[ri, ci] != 0]) for ri in range(A.rows)] if preferred is None: preferred = ori_sys.names[:A.rows] if ori_sys.dep_by_name else list(range(A.rows)) targets = [ ori_sys.names.index(dep) if ori_sys.dep_by_name else ( dep if isinstance(dep, int) else ori_sys.dep.index(dep)) for dep in preferred] row_tgt = [] for ri, colids in sorted(per_row_cols, key=lambda k: len(k[1])): for tgt in targets: if tgt in colids: row_tgt.append((ri, tgt)) targets.remove(tgt) break if len(targets) == 0: break else: raise ValueError("Could not find a solutions for: %s" % targets) def analytic_factory(x0, y0, p0, be): return { ori_sys.dep[tgt]: y0[ori_sys.dep[tgt] if ori_sys.dep_by_name else tgt] - sum( [A[ri, ci]*(ori_sys.dep[ci] - y0[ori_sys.dep[ci] if ori_sys.dep_by_name else ci]) for ci in range(A.cols) if ci != tgt])/A[ri, tgt] for ri, tgt in row_tgt } ori_li_nms = ori_sys.linear_invariant_names or () new_lin_invar = [[cell for ci, cell in enumerate(row) if ci not in list(zip(*row_tgt))[1]] for ri, row in enumerate(A.tolist()) if ri not in list(zip(*row_tgt))[0]] new_lin_i_nms = [nam for ri, nam in enumerate(ori_li_nms) if ri not in list(zip(*row_tgt))[0]] return cls(ori_sys, analytic_factory, linear_invariants=new_lin_invar, linear_invariant_names=new_lin_i_nms, **kwargs)
[ "def", "from_linear_invariants", "(", "cls", ",", "ori_sys", ",", "preferred", "=", "None", ",", "*", "*", "kwargs", ")", ":", "_be", "=", "ori_sys", ".", "be", "A", "=", "_be", ".", "Matrix", "(", "ori_sys", ".", "linear_invariants", ")", "rA", ",", "pivots", "=", "A", ".", "rref", "(", ")", "if", "len", "(", "pivots", ")", "<", "A", ".", "shape", "[", "0", "]", ":", "# If the linear system contains rows which a linearly dependent these could be removed.", "# The criterion for removal could be dictated by a user provided callback.", "#", "# An alternative would be to write the matrix in reduced row echelon form, however,", "# this would cause the invariants to become linear combinations of each other and", "# their intuitive meaning (original principles they were formulated from) will be lost.", "# Hence that is not the default behaviour. However, the user may choose to rewrite the", "# equations in reduced row echelon form if they choose to before calling this method.", "raise", "NotImplementedError", "(", "\"Linear invariants contain linear dependencies.\"", ")", "per_row_cols", "=", "[", "(", "ri", ",", "[", "ci", "for", "ci", "in", "range", "(", "A", ".", "cols", ")", "if", "A", "[", "ri", ",", "ci", "]", "!=", "0", "]", ")", "for", "ri", "in", "range", "(", "A", ".", "rows", ")", "]", "if", "preferred", "is", "None", ":", "preferred", "=", "ori_sys", ".", "names", "[", ":", "A", ".", "rows", "]", "if", "ori_sys", ".", "dep_by_name", "else", "list", "(", "range", "(", "A", ".", "rows", ")", ")", "targets", "=", "[", "ori_sys", ".", "names", ".", "index", "(", "dep", ")", "if", "ori_sys", ".", "dep_by_name", "else", "(", "dep", "if", "isinstance", "(", "dep", ",", "int", ")", "else", "ori_sys", ".", "dep", ".", "index", "(", "dep", ")", ")", "for", "dep", "in", "preferred", "]", "row_tgt", "=", "[", "]", "for", "ri", ",", "colids", "in", "sorted", "(", "per_row_cols", ",", "key", "=", "lambda", "k", ":", "len", "(", "k", "[", "1", "]", ")", ")", ":", "for", "tgt", "in", "targets", ":", "if", "tgt", "in", "colids", ":", "row_tgt", ".", "append", "(", "(", "ri", ",", "tgt", ")", ")", "targets", ".", "remove", "(", "tgt", ")", "break", "if", "len", "(", "targets", ")", "==", "0", ":", "break", "else", ":", "raise", "ValueError", "(", "\"Could not find a solutions for: %s\"", "%", "targets", ")", "def", "analytic_factory", "(", "x0", ",", "y0", ",", "p0", ",", "be", ")", ":", "return", "{", "ori_sys", ".", "dep", "[", "tgt", "]", ":", "y0", "[", "ori_sys", ".", "dep", "[", "tgt", "]", "if", "ori_sys", ".", "dep_by_name", "else", "tgt", "]", "-", "sum", "(", "[", "A", "[", "ri", ",", "ci", "]", "*", "(", "ori_sys", ".", "dep", "[", "ci", "]", "-", "y0", "[", "ori_sys", ".", "dep", "[", "ci", "]", "if", "ori_sys", ".", "dep_by_name", "else", "ci", "]", ")", "for", "ci", "in", "range", "(", "A", ".", "cols", ")", "if", "ci", "!=", "tgt", "]", ")", "/", "A", "[", "ri", ",", "tgt", "]", "for", "ri", ",", "tgt", "in", "row_tgt", "}", "ori_li_nms", "=", "ori_sys", ".", "linear_invariant_names", "or", "(", ")", "new_lin_invar", "=", "[", "[", "cell", "for", "ci", ",", "cell", "in", "enumerate", "(", "row", ")", "if", "ci", "not", "in", "list", "(", "zip", "(", "*", "row_tgt", ")", ")", "[", "1", "]", "]", "for", "ri", ",", "row", "in", "enumerate", "(", "A", ".", "tolist", "(", ")", ")", "if", "ri", "not", "in", "list", "(", "zip", "(", "*", "row_tgt", ")", ")", "[", "0", "]", "]", "new_lin_i_nms", "=", "[", "nam", "for", "ri", ",", "nam", "in", "enumerate", "(", "ori_li_nms", ")", "if", "ri", "not", "in", "list", "(", "zip", "(", "*", "row_tgt", ")", ")", "[", "0", "]", "]", "return", "cls", "(", "ori_sys", ",", "analytic_factory", ",", "linear_invariants", "=", "new_lin_invar", ",", "linear_invariant_names", "=", "new_lin_i_nms", ",", "*", "*", "kwargs", ")" ]
Reformulates the ODE system in fewer variables. Given linear invariant equations one can always reduce the number of dependent variables in the system by the rank of the matrix describing this linear system. Parameters ---------- ori_sys : :class:`SymbolicSys` instance preferred : iterable of preferred dependent variables Due to numerical rounding it is preferable to choose the variables which are expected to be of the largest magnitude during integration. \*\*kwargs : Keyword arguments passed on to constructor.
[ "Reformulates", "the", "ODE", "system", "in", "fewer", "variables", "." ]
0034a6165b550d8d9808baef58678dca5a493ab7
https://github.com/bjodah/pyodesys/blob/0034a6165b550d8d9808baef58678dca5a493ab7/pyodesys/symbolic.py#L1275-L1335
train
bjodah/pyodesys
pyodesys/core.py
chained_parameter_variation
def chained_parameter_variation(subject, durations, y0, varied_params, default_params=None, integrate_kwargs=None, x0=None, npoints=1, numpy=None): """ Integrate an ODE-system for a serie of durations with some parameters changed in-between Parameters ---------- subject : function or ODESys instance If a function: should have the signature of :meth:`pyodesys.ODESys.integrate` (and resturn a :class:`pyodesys.results.Result` object). If a ODESys instance: the ``integrate`` method will be used. durations : iterable of floats Spans of the independent variable. y0 : dict or array_like varied_params : dict mapping parameter name (or index) to array_like Each array_like need to be of same length as durations. default_params : dict or array_like Default values for the parameters of the ODE system. integrate_kwargs : dict Keyword arguments passed on to ``integrate``. x0 : float-like First value of independent variable. default: 0. npoints : int Number of points per sub-interval. Examples -------- >>> odesys = ODESys(lambda t, y, p: [-p[0]*y[0]]) >>> int_kw = dict(integrator='cvode', method='adams', atol=1e-12, rtol=1e-12) >>> kwargs = dict(default_params=[0], integrate_kwargs=int_kw) >>> res = chained_parameter_variation(odesys, [2, 3], [42], {0: [.7, .1]}, **kwargs) >>> mask1 = res.xout <= 2 >>> import numpy as np >>> np.allclose(res.yout[mask1, 0], 42*np.exp(-.7*res.xout[mask1])) True >>> mask2 = 2 <= res.xout >>> np.allclose(res.yout[mask2, 0], res.yout[mask2, 0][0]*np.exp(-.1*(res.xout[mask2] - res.xout[mask2][0]))) True """ assert len(durations) > 0, 'need at least 1 duration (preferably many)' assert npoints > 0, 'need at least 1 point per duration' for k, v in varied_params.items(): if len(v) != len(durations): raise ValueError("Mismathced lengths of durations and varied_params") if isinstance(subject, ODESys): integrate = subject.integrate numpy = numpy or subject.numpy else: integrate = subject numpy = numpy or np default_params = default_params or {} integrate_kwargs = integrate_kwargs or {} def _get_idx(cont, idx): if isinstance(cont, dict): return {k: (v[idx] if hasattr(v, '__len__') and getattr(v, 'ndim', 1) > 0 else v) for k, v in cont.items()} else: return cont[idx] durations = numpy.cumsum(durations) for idx_dur in range(len(durations)): params = copy.copy(default_params) for k, v in varied_params.items(): params[k] = v[idx_dur] if idx_dur == 0: if x0 is None: x0 = durations[0]*0 out = integrate(numpy.linspace(x0, durations[0], npoints + 1), y0, params, **integrate_kwargs) else: if isinstance(out, Result): out.extend_by_integration(durations[idx_dur], params, npoints=npoints, **integrate_kwargs) else: for idx_res, r in enumerate(out): r.extend_by_integration(durations[idx_dur], _get_idx(params, idx_res), npoints=npoints, **integrate_kwargs) return out
python
def chained_parameter_variation(subject, durations, y0, varied_params, default_params=None, integrate_kwargs=None, x0=None, npoints=1, numpy=None): """ Integrate an ODE-system for a serie of durations with some parameters changed in-between Parameters ---------- subject : function or ODESys instance If a function: should have the signature of :meth:`pyodesys.ODESys.integrate` (and resturn a :class:`pyodesys.results.Result` object). If a ODESys instance: the ``integrate`` method will be used. durations : iterable of floats Spans of the independent variable. y0 : dict or array_like varied_params : dict mapping parameter name (or index) to array_like Each array_like need to be of same length as durations. default_params : dict or array_like Default values for the parameters of the ODE system. integrate_kwargs : dict Keyword arguments passed on to ``integrate``. x0 : float-like First value of independent variable. default: 0. npoints : int Number of points per sub-interval. Examples -------- >>> odesys = ODESys(lambda t, y, p: [-p[0]*y[0]]) >>> int_kw = dict(integrator='cvode', method='adams', atol=1e-12, rtol=1e-12) >>> kwargs = dict(default_params=[0], integrate_kwargs=int_kw) >>> res = chained_parameter_variation(odesys, [2, 3], [42], {0: [.7, .1]}, **kwargs) >>> mask1 = res.xout <= 2 >>> import numpy as np >>> np.allclose(res.yout[mask1, 0], 42*np.exp(-.7*res.xout[mask1])) True >>> mask2 = 2 <= res.xout >>> np.allclose(res.yout[mask2, 0], res.yout[mask2, 0][0]*np.exp(-.1*(res.xout[mask2] - res.xout[mask2][0]))) True """ assert len(durations) > 0, 'need at least 1 duration (preferably many)' assert npoints > 0, 'need at least 1 point per duration' for k, v in varied_params.items(): if len(v) != len(durations): raise ValueError("Mismathced lengths of durations and varied_params") if isinstance(subject, ODESys): integrate = subject.integrate numpy = numpy or subject.numpy else: integrate = subject numpy = numpy or np default_params = default_params or {} integrate_kwargs = integrate_kwargs or {} def _get_idx(cont, idx): if isinstance(cont, dict): return {k: (v[idx] if hasattr(v, '__len__') and getattr(v, 'ndim', 1) > 0 else v) for k, v in cont.items()} else: return cont[idx] durations = numpy.cumsum(durations) for idx_dur in range(len(durations)): params = copy.copy(default_params) for k, v in varied_params.items(): params[k] = v[idx_dur] if idx_dur == 0: if x0 is None: x0 = durations[0]*0 out = integrate(numpy.linspace(x0, durations[0], npoints + 1), y0, params, **integrate_kwargs) else: if isinstance(out, Result): out.extend_by_integration(durations[idx_dur], params, npoints=npoints, **integrate_kwargs) else: for idx_res, r in enumerate(out): r.extend_by_integration(durations[idx_dur], _get_idx(params, idx_res), npoints=npoints, **integrate_kwargs) return out
[ "def", "chained_parameter_variation", "(", "subject", ",", "durations", ",", "y0", ",", "varied_params", ",", "default_params", "=", "None", ",", "integrate_kwargs", "=", "None", ",", "x0", "=", "None", ",", "npoints", "=", "1", ",", "numpy", "=", "None", ")", ":", "assert", "len", "(", "durations", ")", ">", "0", ",", "'need at least 1 duration (preferably many)'", "assert", "npoints", ">", "0", ",", "'need at least 1 point per duration'", "for", "k", ",", "v", "in", "varied_params", ".", "items", "(", ")", ":", "if", "len", "(", "v", ")", "!=", "len", "(", "durations", ")", ":", "raise", "ValueError", "(", "\"Mismathced lengths of durations and varied_params\"", ")", "if", "isinstance", "(", "subject", ",", "ODESys", ")", ":", "integrate", "=", "subject", ".", "integrate", "numpy", "=", "numpy", "or", "subject", ".", "numpy", "else", ":", "integrate", "=", "subject", "numpy", "=", "numpy", "or", "np", "default_params", "=", "default_params", "or", "{", "}", "integrate_kwargs", "=", "integrate_kwargs", "or", "{", "}", "def", "_get_idx", "(", "cont", ",", "idx", ")", ":", "if", "isinstance", "(", "cont", ",", "dict", ")", ":", "return", "{", "k", ":", "(", "v", "[", "idx", "]", "if", "hasattr", "(", "v", ",", "'__len__'", ")", "and", "getattr", "(", "v", ",", "'ndim'", ",", "1", ")", ">", "0", "else", "v", ")", "for", "k", ",", "v", "in", "cont", ".", "items", "(", ")", "}", "else", ":", "return", "cont", "[", "idx", "]", "durations", "=", "numpy", ".", "cumsum", "(", "durations", ")", "for", "idx_dur", "in", "range", "(", "len", "(", "durations", ")", ")", ":", "params", "=", "copy", ".", "copy", "(", "default_params", ")", "for", "k", ",", "v", "in", "varied_params", ".", "items", "(", ")", ":", "params", "[", "k", "]", "=", "v", "[", "idx_dur", "]", "if", "idx_dur", "==", "0", ":", "if", "x0", "is", "None", ":", "x0", "=", "durations", "[", "0", "]", "*", "0", "out", "=", "integrate", "(", "numpy", ".", "linspace", "(", "x0", ",", "durations", "[", "0", "]", ",", "npoints", "+", "1", ")", ",", "y0", ",", "params", ",", "*", "*", "integrate_kwargs", ")", "else", ":", "if", "isinstance", "(", "out", ",", "Result", ")", ":", "out", ".", "extend_by_integration", "(", "durations", "[", "idx_dur", "]", ",", "params", ",", "npoints", "=", "npoints", ",", "*", "*", "integrate_kwargs", ")", "else", ":", "for", "idx_res", ",", "r", "in", "enumerate", "(", "out", ")", ":", "r", ".", "extend_by_integration", "(", "durations", "[", "idx_dur", "]", ",", "_get_idx", "(", "params", ",", "idx_res", ")", ",", "npoints", "=", "npoints", ",", "*", "*", "integrate_kwargs", ")", "return", "out" ]
Integrate an ODE-system for a serie of durations with some parameters changed in-between Parameters ---------- subject : function or ODESys instance If a function: should have the signature of :meth:`pyodesys.ODESys.integrate` (and resturn a :class:`pyodesys.results.Result` object). If a ODESys instance: the ``integrate`` method will be used. durations : iterable of floats Spans of the independent variable. y0 : dict or array_like varied_params : dict mapping parameter name (or index) to array_like Each array_like need to be of same length as durations. default_params : dict or array_like Default values for the parameters of the ODE system. integrate_kwargs : dict Keyword arguments passed on to ``integrate``. x0 : float-like First value of independent variable. default: 0. npoints : int Number of points per sub-interval. Examples -------- >>> odesys = ODESys(lambda t, y, p: [-p[0]*y[0]]) >>> int_kw = dict(integrator='cvode', method='adams', atol=1e-12, rtol=1e-12) >>> kwargs = dict(default_params=[0], integrate_kwargs=int_kw) >>> res = chained_parameter_variation(odesys, [2, 3], [42], {0: [.7, .1]}, **kwargs) >>> mask1 = res.xout <= 2 >>> import numpy as np >>> np.allclose(res.yout[mask1, 0], 42*np.exp(-.7*res.xout[mask1])) True >>> mask2 = 2 <= res.xout >>> np.allclose(res.yout[mask2, 0], res.yout[mask2, 0][0]*np.exp(-.1*(res.xout[mask2] - res.xout[mask2][0]))) True
[ "Integrate", "an", "ODE", "-", "system", "for", "a", "serie", "of", "durations", "with", "some", "parameters", "changed", "in", "-", "between" ]
0034a6165b550d8d9808baef58678dca5a493ab7
https://github.com/bjodah/pyodesys/blob/0034a6165b550d8d9808baef58678dca5a493ab7/pyodesys/core.py#L917-L996
train
bjodah/pyodesys
pyodesys/core.py
ODESys.pre_process
def pre_process(self, xout, y0, params=()): """ Transforms input to internal values, used internally. """ for pre_processor in self.pre_processors: xout, y0, params = pre_processor(xout, y0, params) return [self.numpy.atleast_1d(arr) for arr in (xout, y0, params)]
python
def pre_process(self, xout, y0, params=()): """ Transforms input to internal values, used internally. """ for pre_processor in self.pre_processors: xout, y0, params = pre_processor(xout, y0, params) return [self.numpy.atleast_1d(arr) for arr in (xout, y0, params)]
[ "def", "pre_process", "(", "self", ",", "xout", ",", "y0", ",", "params", "=", "(", ")", ")", ":", "for", "pre_processor", "in", "self", ".", "pre_processors", ":", "xout", ",", "y0", ",", "params", "=", "pre_processor", "(", "xout", ",", "y0", ",", "params", ")", "return", "[", "self", ".", "numpy", ".", "atleast_1d", "(", "arr", ")", "for", "arr", "in", "(", "xout", ",", "y0", ",", "params", ")", "]" ]
Transforms input to internal values, used internally.
[ "Transforms", "input", "to", "internal", "values", "used", "internally", "." ]
0034a6165b550d8d9808baef58678dca5a493ab7
https://github.com/bjodah/pyodesys/blob/0034a6165b550d8d9808baef58678dca5a493ab7/pyodesys/core.py#L286-L290
train
bjodah/pyodesys
pyodesys/core.py
ODESys.post_process
def post_process(self, xout, yout, params): """ Transforms internal values to output, used internally. """ for post_processor in self.post_processors: xout, yout, params = post_processor(xout, yout, params) return xout, yout, params
python
def post_process(self, xout, yout, params): """ Transforms internal values to output, used internally. """ for post_processor in self.post_processors: xout, yout, params = post_processor(xout, yout, params) return xout, yout, params
[ "def", "post_process", "(", "self", ",", "xout", ",", "yout", ",", "params", ")", ":", "for", "post_processor", "in", "self", ".", "post_processors", ":", "xout", ",", "yout", ",", "params", "=", "post_processor", "(", "xout", ",", "yout", ",", "params", ")", "return", "xout", ",", "yout", ",", "params" ]
Transforms internal values to output, used internally.
[ "Transforms", "internal", "values", "to", "output", "used", "internally", "." ]
0034a6165b550d8d9808baef58678dca5a493ab7
https://github.com/bjodah/pyodesys/blob/0034a6165b550d8d9808baef58678dca5a493ab7/pyodesys/core.py#L292-L296
train
bjodah/pyodesys
pyodesys/core.py
ODESys.adaptive
def adaptive(self, y0, x0, xend, params=(), **kwargs): """ Integrate with integrator chosen output. Parameters ---------- integrator : str See :meth:`integrate`. y0 : array_like See :meth:`integrate`. x0 : float Initial value of the independent variable. xend : float Final value of the independent variable. params : array_like See :meth:`integrate`. \*\*kwargs : See :meth:`integrate`. Returns ------- Same as :meth:`integrate` """ return self.integrate((x0, xend), y0, params=params, **kwargs)
python
def adaptive(self, y0, x0, xend, params=(), **kwargs): """ Integrate with integrator chosen output. Parameters ---------- integrator : str See :meth:`integrate`. y0 : array_like See :meth:`integrate`. x0 : float Initial value of the independent variable. xend : float Final value of the independent variable. params : array_like See :meth:`integrate`. \*\*kwargs : See :meth:`integrate`. Returns ------- Same as :meth:`integrate` """ return self.integrate((x0, xend), y0, params=params, **kwargs)
[ "def", "adaptive", "(", "self", ",", "y0", ",", "x0", ",", "xend", ",", "params", "=", "(", ")", ",", "*", "*", "kwargs", ")", ":", "return", "self", ".", "integrate", "(", "(", "x0", ",", "xend", ")", ",", "y0", ",", "params", "=", "params", ",", "*", "*", "kwargs", ")" ]
Integrate with integrator chosen output. Parameters ---------- integrator : str See :meth:`integrate`. y0 : array_like See :meth:`integrate`. x0 : float Initial value of the independent variable. xend : float Final value of the independent variable. params : array_like See :meth:`integrate`. \*\*kwargs : See :meth:`integrate`. Returns ------- Same as :meth:`integrate`
[ "Integrate", "with", "integrator", "chosen", "output", "." ]
0034a6165b550d8d9808baef58678dca5a493ab7
https://github.com/bjodah/pyodesys/blob/0034a6165b550d8d9808baef58678dca5a493ab7/pyodesys/core.py#L298-L321
train
bjodah/pyodesys
pyodesys/core.py
ODESys.predefined
def predefined(self, y0, xout, params=(), **kwargs): """ Integrate with user chosen output. Parameters ---------- integrator : str See :meth:`integrate`. y0 : array_like See :meth:`integrate`. xout : array_like params : array_like See :meth:`integrate`. \*\*kwargs: See :meth:`integrate` Returns ------- Length 2 tuple : (yout, info) See :meth:`integrate`. """ xout, yout, info = self.integrate(xout, y0, params=params, force_predefined=True, **kwargs) return yout, info
python
def predefined(self, y0, xout, params=(), **kwargs): """ Integrate with user chosen output. Parameters ---------- integrator : str See :meth:`integrate`. y0 : array_like See :meth:`integrate`. xout : array_like params : array_like See :meth:`integrate`. \*\*kwargs: See :meth:`integrate` Returns ------- Length 2 tuple : (yout, info) See :meth:`integrate`. """ xout, yout, info = self.integrate(xout, y0, params=params, force_predefined=True, **kwargs) return yout, info
[ "def", "predefined", "(", "self", ",", "y0", ",", "xout", ",", "params", "=", "(", ")", ",", "*", "*", "kwargs", ")", ":", "xout", ",", "yout", ",", "info", "=", "self", ".", "integrate", "(", "xout", ",", "y0", ",", "params", "=", "params", ",", "force_predefined", "=", "True", ",", "*", "*", "kwargs", ")", "return", "yout", ",", "info" ]
Integrate with user chosen output. Parameters ---------- integrator : str See :meth:`integrate`. y0 : array_like See :meth:`integrate`. xout : array_like params : array_like See :meth:`integrate`. \*\*kwargs: See :meth:`integrate` Returns ------- Length 2 tuple : (yout, info) See :meth:`integrate`.
[ "Integrate", "with", "user", "chosen", "output", "." ]
0034a6165b550d8d9808baef58678dca5a493ab7
https://github.com/bjodah/pyodesys/blob/0034a6165b550d8d9808baef58678dca5a493ab7/pyodesys/core.py#L323-L345
train
bjodah/pyodesys
pyodesys/core.py
ODESys.integrate
def integrate(self, x, y0, params=(), atol=1e-8, rtol=1e-8, **kwargs): """ Integrate the system of ordinary differential equations. Solves the initial value problem (IVP). Parameters ---------- x : array_like or pair (start and final time) or float if float: make it a pair: (0, x) if pair or length-2 array: initial and final value of the independent variable if array_like: values of independent variable report at y0 : array_like Initial values at x[0] for the dependent variables. params : array_like (default: tuple()) Value of parameters passed to user-supplied callbacks. integrator : str or None Name of integrator, one of: - 'scipy': :meth:`_integrate_scipy` - 'gsl': :meth:`_integrate_gsl` - 'odeint': :meth:`_integrate_odeint` - 'cvode': :meth:`_integrate_cvode` See respective method for more information. If ``None``: ``os.environ.get('PYODESYS_INTEGRATOR', 'scipy')`` atol : float Absolute tolerance rtol : float Relative tolerance with_jacobian : bool or None (default) Whether to use the jacobian. When ``None`` the choice is done automatically (only used when required). This matters when jacobian is derived at runtime (high computational cost). with_jtimes : bool (default: False) Whether to use the jacobian-vector product. This is only supported by ``cvode`` and only when ``linear_solver`` is one of: gmres', 'gmres_classic', 'bicgstab', 'tfqmr'. See the documentation for ``pycvodes`` for more information. force_predefined : bool (default: False) override behaviour of ``len(x) == 2`` => :meth:`adaptive` \\*\\*kwargs : Additional keyword arguments for ``_integrate_$(integrator)``. Returns ------- Length 3 tuple: (x, yout, info) x : array of values of the independent variable yout : array of the dependent variable(s) for the different values of x. info : dict ('nfev' is guaranteed to be a key) """ arrs = self.to_arrays(x, y0, params) _x, _y, _p = _arrs = self.pre_process(*arrs) ndims = [a.ndim for a in _arrs] if ndims == [1, 1, 1]: twodim = False elif ndims == [2, 2, 2]: twodim = True else: raise ValueError("Pre-processor made ndims inconsistent?") if self.append_iv: _p = self.numpy.concatenate((_p, _y), axis=-1) if hasattr(self, 'ny'): if _y.shape[-1] != self.ny: raise ValueError("Incorrect shape of intern_y0") if isinstance(atol, dict): kwargs['atol'] = [atol[k] for k in self.names] else: kwargs['atol'] = atol kwargs['rtol'] = rtol integrator = kwargs.pop('integrator', None) if integrator is None: integrator = os.environ.get('PYODESYS_INTEGRATOR', 'scipy') args = tuple(map(self.numpy.atleast_2d, (_x, _y, _p))) self._current_integration_kwargs = kwargs if isinstance(integrator, str): nfo = getattr(self, '_integrate_' + integrator)(*args, **kwargs) else: kwargs['with_jacobian'] = getattr(integrator, 'with_jacobian', None) nfo = self._integrate(integrator.integrate_adaptive, integrator.integrate_predefined, *args, **kwargs) if twodim: _xout = [d['internal_xout'] for d in nfo] _yout = [d['internal_yout'] for d in nfo] _params = [d['internal_params'] for d in nfo] res = [Result(*(self.post_process(_xout[i], _yout[i], _params[i]) + (nfo[i], self))) for i in range(len(nfo))] else: _xout = nfo[0]['internal_xout'] _yout = nfo[0]['internal_yout'] self._internal = _xout.copy(), _yout.copy(), _p.copy() nfo = nfo[0] res = Result(*(self.post_process(_xout, _yout, _p) + (nfo, self))) return res
python
def integrate(self, x, y0, params=(), atol=1e-8, rtol=1e-8, **kwargs): """ Integrate the system of ordinary differential equations. Solves the initial value problem (IVP). Parameters ---------- x : array_like or pair (start and final time) or float if float: make it a pair: (0, x) if pair or length-2 array: initial and final value of the independent variable if array_like: values of independent variable report at y0 : array_like Initial values at x[0] for the dependent variables. params : array_like (default: tuple()) Value of parameters passed to user-supplied callbacks. integrator : str or None Name of integrator, one of: - 'scipy': :meth:`_integrate_scipy` - 'gsl': :meth:`_integrate_gsl` - 'odeint': :meth:`_integrate_odeint` - 'cvode': :meth:`_integrate_cvode` See respective method for more information. If ``None``: ``os.environ.get('PYODESYS_INTEGRATOR', 'scipy')`` atol : float Absolute tolerance rtol : float Relative tolerance with_jacobian : bool or None (default) Whether to use the jacobian. When ``None`` the choice is done automatically (only used when required). This matters when jacobian is derived at runtime (high computational cost). with_jtimes : bool (default: False) Whether to use the jacobian-vector product. This is only supported by ``cvode`` and only when ``linear_solver`` is one of: gmres', 'gmres_classic', 'bicgstab', 'tfqmr'. See the documentation for ``pycvodes`` for more information. force_predefined : bool (default: False) override behaviour of ``len(x) == 2`` => :meth:`adaptive` \\*\\*kwargs : Additional keyword arguments for ``_integrate_$(integrator)``. Returns ------- Length 3 tuple: (x, yout, info) x : array of values of the independent variable yout : array of the dependent variable(s) for the different values of x. info : dict ('nfev' is guaranteed to be a key) """ arrs = self.to_arrays(x, y0, params) _x, _y, _p = _arrs = self.pre_process(*arrs) ndims = [a.ndim for a in _arrs] if ndims == [1, 1, 1]: twodim = False elif ndims == [2, 2, 2]: twodim = True else: raise ValueError("Pre-processor made ndims inconsistent?") if self.append_iv: _p = self.numpy.concatenate((_p, _y), axis=-1) if hasattr(self, 'ny'): if _y.shape[-1] != self.ny: raise ValueError("Incorrect shape of intern_y0") if isinstance(atol, dict): kwargs['atol'] = [atol[k] for k in self.names] else: kwargs['atol'] = atol kwargs['rtol'] = rtol integrator = kwargs.pop('integrator', None) if integrator is None: integrator = os.environ.get('PYODESYS_INTEGRATOR', 'scipy') args = tuple(map(self.numpy.atleast_2d, (_x, _y, _p))) self._current_integration_kwargs = kwargs if isinstance(integrator, str): nfo = getattr(self, '_integrate_' + integrator)(*args, **kwargs) else: kwargs['with_jacobian'] = getattr(integrator, 'with_jacobian', None) nfo = self._integrate(integrator.integrate_adaptive, integrator.integrate_predefined, *args, **kwargs) if twodim: _xout = [d['internal_xout'] for d in nfo] _yout = [d['internal_yout'] for d in nfo] _params = [d['internal_params'] for d in nfo] res = [Result(*(self.post_process(_xout[i], _yout[i], _params[i]) + (nfo[i], self))) for i in range(len(nfo))] else: _xout = nfo[0]['internal_xout'] _yout = nfo[0]['internal_yout'] self._internal = _xout.copy(), _yout.copy(), _p.copy() nfo = nfo[0] res = Result(*(self.post_process(_xout, _yout, _p) + (nfo, self))) return res
[ "def", "integrate", "(", "self", ",", "x", ",", "y0", ",", "params", "=", "(", ")", ",", "atol", "=", "1e-8", ",", "rtol", "=", "1e-8", ",", "*", "*", "kwargs", ")", ":", "arrs", "=", "self", ".", "to_arrays", "(", "x", ",", "y0", ",", "params", ")", "_x", ",", "_y", ",", "_p", "=", "_arrs", "=", "self", ".", "pre_process", "(", "*", "arrs", ")", "ndims", "=", "[", "a", ".", "ndim", "for", "a", "in", "_arrs", "]", "if", "ndims", "==", "[", "1", ",", "1", ",", "1", "]", ":", "twodim", "=", "False", "elif", "ndims", "==", "[", "2", ",", "2", ",", "2", "]", ":", "twodim", "=", "True", "else", ":", "raise", "ValueError", "(", "\"Pre-processor made ndims inconsistent?\"", ")", "if", "self", ".", "append_iv", ":", "_p", "=", "self", ".", "numpy", ".", "concatenate", "(", "(", "_p", ",", "_y", ")", ",", "axis", "=", "-", "1", ")", "if", "hasattr", "(", "self", ",", "'ny'", ")", ":", "if", "_y", ".", "shape", "[", "-", "1", "]", "!=", "self", ".", "ny", ":", "raise", "ValueError", "(", "\"Incorrect shape of intern_y0\"", ")", "if", "isinstance", "(", "atol", ",", "dict", ")", ":", "kwargs", "[", "'atol'", "]", "=", "[", "atol", "[", "k", "]", "for", "k", "in", "self", ".", "names", "]", "else", ":", "kwargs", "[", "'atol'", "]", "=", "atol", "kwargs", "[", "'rtol'", "]", "=", "rtol", "integrator", "=", "kwargs", ".", "pop", "(", "'integrator'", ",", "None", ")", "if", "integrator", "is", "None", ":", "integrator", "=", "os", ".", "environ", ".", "get", "(", "'PYODESYS_INTEGRATOR'", ",", "'scipy'", ")", "args", "=", "tuple", "(", "map", "(", "self", ".", "numpy", ".", "atleast_2d", ",", "(", "_x", ",", "_y", ",", "_p", ")", ")", ")", "self", ".", "_current_integration_kwargs", "=", "kwargs", "if", "isinstance", "(", "integrator", ",", "str", ")", ":", "nfo", "=", "getattr", "(", "self", ",", "'_integrate_'", "+", "integrator", ")", "(", "*", "args", ",", "*", "*", "kwargs", ")", "else", ":", "kwargs", "[", "'with_jacobian'", "]", "=", "getattr", "(", "integrator", ",", "'with_jacobian'", ",", "None", ")", "nfo", "=", "self", ".", "_integrate", "(", "integrator", ".", "integrate_adaptive", ",", "integrator", ".", "integrate_predefined", ",", "*", "args", ",", "*", "*", "kwargs", ")", "if", "twodim", ":", "_xout", "=", "[", "d", "[", "'internal_xout'", "]", "for", "d", "in", "nfo", "]", "_yout", "=", "[", "d", "[", "'internal_yout'", "]", "for", "d", "in", "nfo", "]", "_params", "=", "[", "d", "[", "'internal_params'", "]", "for", "d", "in", "nfo", "]", "res", "=", "[", "Result", "(", "*", "(", "self", ".", "post_process", "(", "_xout", "[", "i", "]", ",", "_yout", "[", "i", "]", ",", "_params", "[", "i", "]", ")", "+", "(", "nfo", "[", "i", "]", ",", "self", ")", ")", ")", "for", "i", "in", "range", "(", "len", "(", "nfo", ")", ")", "]", "else", ":", "_xout", "=", "nfo", "[", "0", "]", "[", "'internal_xout'", "]", "_yout", "=", "nfo", "[", "0", "]", "[", "'internal_yout'", "]", "self", ".", "_internal", "=", "_xout", ".", "copy", "(", ")", ",", "_yout", ".", "copy", "(", ")", ",", "_p", ".", "copy", "(", ")", "nfo", "=", "nfo", "[", "0", "]", "res", "=", "Result", "(", "*", "(", "self", ".", "post_process", "(", "_xout", ",", "_yout", ",", "_p", ")", "+", "(", "nfo", ",", "self", ")", ")", ")", "return", "res" ]
Integrate the system of ordinary differential equations. Solves the initial value problem (IVP). Parameters ---------- x : array_like or pair (start and final time) or float if float: make it a pair: (0, x) if pair or length-2 array: initial and final value of the independent variable if array_like: values of independent variable report at y0 : array_like Initial values at x[0] for the dependent variables. params : array_like (default: tuple()) Value of parameters passed to user-supplied callbacks. integrator : str or None Name of integrator, one of: - 'scipy': :meth:`_integrate_scipy` - 'gsl': :meth:`_integrate_gsl` - 'odeint': :meth:`_integrate_odeint` - 'cvode': :meth:`_integrate_cvode` See respective method for more information. If ``None``: ``os.environ.get('PYODESYS_INTEGRATOR', 'scipy')`` atol : float Absolute tolerance rtol : float Relative tolerance with_jacobian : bool or None (default) Whether to use the jacobian. When ``None`` the choice is done automatically (only used when required). This matters when jacobian is derived at runtime (high computational cost). with_jtimes : bool (default: False) Whether to use the jacobian-vector product. This is only supported by ``cvode`` and only when ``linear_solver`` is one of: gmres', 'gmres_classic', 'bicgstab', 'tfqmr'. See the documentation for ``pycvodes`` for more information. force_predefined : bool (default: False) override behaviour of ``len(x) == 2`` => :meth:`adaptive` \\*\\*kwargs : Additional keyword arguments for ``_integrate_$(integrator)``. Returns ------- Length 3 tuple: (x, yout, info) x : array of values of the independent variable yout : array of the dependent variable(s) for the different values of x. info : dict ('nfev' is guaranteed to be a key)
[ "Integrate", "the", "system", "of", "ordinary", "differential", "equations", "." ]
0034a6165b550d8d9808baef58678dca5a493ab7
https://github.com/bjodah/pyodesys/blob/0034a6165b550d8d9808baef58678dca5a493ab7/pyodesys/core.py#L347-L449
train
bjodah/pyodesys
pyodesys/core.py
ODESys.plot_phase_plane
def plot_phase_plane(self, indices=None, **kwargs): """ Plots a phase portrait from last integration. This method will be deprecated. Please use :meth:`Result.plot_phase_plane`. See :func:`pyodesys.plotting.plot_phase_plane` """ return self._plot(plot_phase_plane, indices=indices, **kwargs)
python
def plot_phase_plane(self, indices=None, **kwargs): """ Plots a phase portrait from last integration. This method will be deprecated. Please use :meth:`Result.plot_phase_plane`. See :func:`pyodesys.plotting.plot_phase_plane` """ return self._plot(plot_phase_plane, indices=indices, **kwargs)
[ "def", "plot_phase_plane", "(", "self", ",", "indices", "=", "None", ",", "*", "*", "kwargs", ")", ":", "return", "self", ".", "_plot", "(", "plot_phase_plane", ",", "indices", "=", "indices", ",", "*", "*", "kwargs", ")" ]
Plots a phase portrait from last integration. This method will be deprecated. Please use :meth:`Result.plot_phase_plane`. See :func:`pyodesys.plotting.plot_phase_plane`
[ "Plots", "a", "phase", "portrait", "from", "last", "integration", "." ]
0034a6165b550d8d9808baef58678dca5a493ab7
https://github.com/bjodah/pyodesys/blob/0034a6165b550d8d9808baef58678dca5a493ab7/pyodesys/core.py#L756-L762
train
neon-jungle/wagtailnews
wagtailnews/permissions.py
user_can_edit_news
def user_can_edit_news(user): """ Check if the user has permission to edit any of the registered NewsItem types. """ newsitem_models = [model.get_newsitem_model() for model in NEWSINDEX_MODEL_CLASSES] if user.is_active and user.is_superuser: # admin can edit news iff any news types exist return bool(newsitem_models) for NewsItem in newsitem_models: for perm in format_perms(NewsItem, ['add', 'change', 'delete']): if user.has_perm(perm): return True return False
python
def user_can_edit_news(user): """ Check if the user has permission to edit any of the registered NewsItem types. """ newsitem_models = [model.get_newsitem_model() for model in NEWSINDEX_MODEL_CLASSES] if user.is_active and user.is_superuser: # admin can edit news iff any news types exist return bool(newsitem_models) for NewsItem in newsitem_models: for perm in format_perms(NewsItem, ['add', 'change', 'delete']): if user.has_perm(perm): return True return False
[ "def", "user_can_edit_news", "(", "user", ")", ":", "newsitem_models", "=", "[", "model", ".", "get_newsitem_model", "(", ")", "for", "model", "in", "NEWSINDEX_MODEL_CLASSES", "]", "if", "user", ".", "is_active", "and", "user", ".", "is_superuser", ":", "# admin can edit news iff any news types exist", "return", "bool", "(", "newsitem_models", ")", "for", "NewsItem", "in", "newsitem_models", ":", "for", "perm", "in", "format_perms", "(", "NewsItem", ",", "[", "'add'", ",", "'change'", ",", "'delete'", "]", ")", ":", "if", "user", ".", "has_perm", "(", "perm", ")", ":", "return", "True", "return", "False" ]
Check if the user has permission to edit any of the registered NewsItem types.
[ "Check", "if", "the", "user", "has", "permission", "to", "edit", "any", "of", "the", "registered", "NewsItem", "types", "." ]
4cdec7013cca276dcfc658d3c986444ba6a42a84
https://github.com/neon-jungle/wagtailnews/blob/4cdec7013cca276dcfc658d3c986444ba6a42a84/wagtailnews/permissions.py#L21-L38
train
neon-jungle/wagtailnews
wagtailnews/permissions.py
user_can_edit_newsitem
def user_can_edit_newsitem(user, NewsItem): """ Check if the user has permission to edit a particular NewsItem type. """ for perm in format_perms(NewsItem, ['add', 'change', 'delete']): if user.has_perm(perm): return True return False
python
def user_can_edit_newsitem(user, NewsItem): """ Check if the user has permission to edit a particular NewsItem type. """ for perm in format_perms(NewsItem, ['add', 'change', 'delete']): if user.has_perm(perm): return True return False
[ "def", "user_can_edit_newsitem", "(", "user", ",", "NewsItem", ")", ":", "for", "perm", "in", "format_perms", "(", "NewsItem", ",", "[", "'add'", ",", "'change'", ",", "'delete'", "]", ")", ":", "if", "user", ".", "has_perm", "(", "perm", ")", ":", "return", "True", "return", "False" ]
Check if the user has permission to edit a particular NewsItem type.
[ "Check", "if", "the", "user", "has", "permission", "to", "edit", "a", "particular", "NewsItem", "type", "." ]
4cdec7013cca276dcfc658d3c986444ba6a42a84
https://github.com/neon-jungle/wagtailnews/blob/4cdec7013cca276dcfc658d3c986444ba6a42a84/wagtailnews/permissions.py#L41-L49
train
neon-jungle/wagtailnews
wagtailnews/models.py
get_date_or_404
def get_date_or_404(year, month, day): """Try to make a date from the given inputs, raising Http404 on error""" try: return datetime.date(int(year), int(month), int(day)) except ValueError: raise Http404
python
def get_date_or_404(year, month, day): """Try to make a date from the given inputs, raising Http404 on error""" try: return datetime.date(int(year), int(month), int(day)) except ValueError: raise Http404
[ "def", "get_date_or_404", "(", "year", ",", "month", ",", "day", ")", ":", "try", ":", "return", "datetime", ".", "date", "(", "int", "(", "year", ")", ",", "int", "(", "month", ")", ",", "int", "(", "day", ")", ")", "except", "ValueError", ":", "raise", "Http404" ]
Try to make a date from the given inputs, raising Http404 on error
[ "Try", "to", "make", "a", "date", "from", "the", "given", "inputs", "raising", "Http404", "on", "error" ]
4cdec7013cca276dcfc658d3c986444ba6a42a84
https://github.com/neon-jungle/wagtailnews/blob/4cdec7013cca276dcfc658d3c986444ba6a42a84/wagtailnews/models.py#L29-L34
train
neon-jungle/wagtailnews
wagtailnews/models.py
NewsIndexMixin.respond
def respond(self, request, view, newsitems, extra_context={}): """A helper that takes some news items and returns an HttpResponse""" context = self.get_context(request, view=view) context.update(self.paginate_newsitems(request, newsitems)) context.update(extra_context) template = self.get_template(request, view=view) return TemplateResponse(request, template, context)
python
def respond(self, request, view, newsitems, extra_context={}): """A helper that takes some news items and returns an HttpResponse""" context = self.get_context(request, view=view) context.update(self.paginate_newsitems(request, newsitems)) context.update(extra_context) template = self.get_template(request, view=view) return TemplateResponse(request, template, context)
[ "def", "respond", "(", "self", ",", "request", ",", "view", ",", "newsitems", ",", "extra_context", "=", "{", "}", ")", ":", "context", "=", "self", ".", "get_context", "(", "request", ",", "view", "=", "view", ")", "context", ".", "update", "(", "self", ".", "paginate_newsitems", "(", "request", ",", "newsitems", ")", ")", "context", ".", "update", "(", "extra_context", ")", "template", "=", "self", ".", "get_template", "(", "request", ",", "view", "=", "view", ")", "return", "TemplateResponse", "(", "request", ",", "template", ",", "context", ")" ]
A helper that takes some news items and returns an HttpResponse
[ "A", "helper", "that", "takes", "some", "news", "items", "and", "returns", "an", "HttpResponse" ]
4cdec7013cca276dcfc658d3c986444ba6a42a84
https://github.com/neon-jungle/wagtailnews/blob/4cdec7013cca276dcfc658d3c986444ba6a42a84/wagtailnews/models.py#L80-L86
train
neon-jungle/wagtailnews
wagtailnews/views/chooser.py
get_newsitem_model
def get_newsitem_model(model_string): """ Get the NewsItem model from a model string. Raises ValueError if the model string is invalid, or references a model that is not a NewsItem. """ try: NewsItem = apps.get_model(model_string) assert issubclass(NewsItem, AbstractNewsItem) except (ValueError, LookupError, AssertionError): raise ValueError('Invalid news item model string'.format(model_string)) return NewsItem
python
def get_newsitem_model(model_string): """ Get the NewsItem model from a model string. Raises ValueError if the model string is invalid, or references a model that is not a NewsItem. """ try: NewsItem = apps.get_model(model_string) assert issubclass(NewsItem, AbstractNewsItem) except (ValueError, LookupError, AssertionError): raise ValueError('Invalid news item model string'.format(model_string)) return NewsItem
[ "def", "get_newsitem_model", "(", "model_string", ")", ":", "try", ":", "NewsItem", "=", "apps", ".", "get_model", "(", "model_string", ")", "assert", "issubclass", "(", "NewsItem", ",", "AbstractNewsItem", ")", "except", "(", "ValueError", ",", "LookupError", ",", "AssertionError", ")", ":", "raise", "ValueError", "(", "'Invalid news item model string'", ".", "format", "(", "model_string", ")", ")", "return", "NewsItem" ]
Get the NewsItem model from a model string. Raises ValueError if the model string is invalid, or references a model that is not a NewsItem.
[ "Get", "the", "NewsItem", "model", "from", "a", "model", "string", ".", "Raises", "ValueError", "if", "the", "model", "string", "is", "invalid", "or", "references", "a", "model", "that", "is", "not", "a", "NewsItem", "." ]
4cdec7013cca276dcfc658d3c986444ba6a42a84
https://github.com/neon-jungle/wagtailnews/blob/4cdec7013cca276dcfc658d3c986444ba6a42a84/wagtailnews/views/chooser.py#L119-L129
train
geometalab/pyGeoTile
pygeotile/tile.py
Tile.from_quad_tree
def from_quad_tree(cls, quad_tree): """Creates a tile from a Microsoft QuadTree""" assert bool(re.match('^[0-3]*$', quad_tree)), 'QuadTree value can only consists of the digits 0, 1, 2 and 3.' zoom = len(str(quad_tree)) offset = int(math.pow(2, zoom)) - 1 google_x, google_y = [reduce(lambda result, bit: (result << 1) | bit, bits, 0) for bits in zip(*(reversed(divmod(digit, 2)) for digit in (int(c) for c in str(quad_tree))))] return cls(tms_x=google_x, tms_y=(offset - google_y), zoom=zoom)
python
def from_quad_tree(cls, quad_tree): """Creates a tile from a Microsoft QuadTree""" assert bool(re.match('^[0-3]*$', quad_tree)), 'QuadTree value can only consists of the digits 0, 1, 2 and 3.' zoom = len(str(quad_tree)) offset = int(math.pow(2, zoom)) - 1 google_x, google_y = [reduce(lambda result, bit: (result << 1) | bit, bits, 0) for bits in zip(*(reversed(divmod(digit, 2)) for digit in (int(c) for c in str(quad_tree))))] return cls(tms_x=google_x, tms_y=(offset - google_y), zoom=zoom)
[ "def", "from_quad_tree", "(", "cls", ",", "quad_tree", ")", ":", "assert", "bool", "(", "re", ".", "match", "(", "'^[0-3]*$'", ",", "quad_tree", ")", ")", ",", "'QuadTree value can only consists of the digits 0, 1, 2 and 3.'", "zoom", "=", "len", "(", "str", "(", "quad_tree", ")", ")", "offset", "=", "int", "(", "math", ".", "pow", "(", "2", ",", "zoom", ")", ")", "-", "1", "google_x", ",", "google_y", "=", "[", "reduce", "(", "lambda", "result", ",", "bit", ":", "(", "result", "<<", "1", ")", "|", "bit", ",", "bits", ",", "0", ")", "for", "bits", "in", "zip", "(", "*", "(", "reversed", "(", "divmod", "(", "digit", ",", "2", ")", ")", "for", "digit", "in", "(", "int", "(", "c", ")", "for", "c", "in", "str", "(", "quad_tree", ")", ")", ")", ")", "]", "return", "cls", "(", "tms_x", "=", "google_x", ",", "tms_y", "=", "(", "offset", "-", "google_y", ")", ",", "zoom", "=", "zoom", ")" ]
Creates a tile from a Microsoft QuadTree
[ "Creates", "a", "tile", "from", "a", "Microsoft", "QuadTree" ]
b1f44271698f5fc4d18c2add935797ed43254aa6
https://github.com/geometalab/pyGeoTile/blob/b1f44271698f5fc4d18c2add935797ed43254aa6/pygeotile/tile.py#L16-L24
train
geometalab/pyGeoTile
pygeotile/tile.py
Tile.from_google
def from_google(cls, google_x, google_y, zoom): """Creates a tile from Google format X Y and zoom""" max_tile = (2 ** zoom) - 1 assert 0 <= google_x <= max_tile, 'Google X needs to be a value between 0 and (2^zoom) -1.' assert 0 <= google_y <= max_tile, 'Google Y needs to be a value between 0 and (2^zoom) -1.' return cls(tms_x=google_x, tms_y=(2 ** zoom - 1) - google_y, zoom=zoom)
python
def from_google(cls, google_x, google_y, zoom): """Creates a tile from Google format X Y and zoom""" max_tile = (2 ** zoom) - 1 assert 0 <= google_x <= max_tile, 'Google X needs to be a value between 0 and (2^zoom) -1.' assert 0 <= google_y <= max_tile, 'Google Y needs to be a value between 0 and (2^zoom) -1.' return cls(tms_x=google_x, tms_y=(2 ** zoom - 1) - google_y, zoom=zoom)
[ "def", "from_google", "(", "cls", ",", "google_x", ",", "google_y", ",", "zoom", ")", ":", "max_tile", "=", "(", "2", "**", "zoom", ")", "-", "1", "assert", "0", "<=", "google_x", "<=", "max_tile", ",", "'Google X needs to be a value between 0 and (2^zoom) -1.'", "assert", "0", "<=", "google_y", "<=", "max_tile", ",", "'Google Y needs to be a value between 0 and (2^zoom) -1.'", "return", "cls", "(", "tms_x", "=", "google_x", ",", "tms_y", "=", "(", "2", "**", "zoom", "-", "1", ")", "-", "google_y", ",", "zoom", "=", "zoom", ")" ]
Creates a tile from Google format X Y and zoom
[ "Creates", "a", "tile", "from", "Google", "format", "X", "Y", "and", "zoom" ]
b1f44271698f5fc4d18c2add935797ed43254aa6
https://github.com/geometalab/pyGeoTile/blob/b1f44271698f5fc4d18c2add935797ed43254aa6/pygeotile/tile.py#L35-L40
train
geometalab/pyGeoTile
pygeotile/tile.py
Tile.for_point
def for_point(cls, point, zoom): """Creates a tile for given point""" latitude, longitude = point.latitude_longitude return cls.for_latitude_longitude(latitude=latitude, longitude=longitude, zoom=zoom)
python
def for_point(cls, point, zoom): """Creates a tile for given point""" latitude, longitude = point.latitude_longitude return cls.for_latitude_longitude(latitude=latitude, longitude=longitude, zoom=zoom)
[ "def", "for_point", "(", "cls", ",", "point", ",", "zoom", ")", ":", "latitude", ",", "longitude", "=", "point", ".", "latitude_longitude", "return", "cls", ".", "for_latitude_longitude", "(", "latitude", "=", "latitude", ",", "longitude", "=", "longitude", ",", "zoom", "=", "zoom", ")" ]
Creates a tile for given point
[ "Creates", "a", "tile", "for", "given", "point" ]
b1f44271698f5fc4d18c2add935797ed43254aa6
https://github.com/geometalab/pyGeoTile/blob/b1f44271698f5fc4d18c2add935797ed43254aa6/pygeotile/tile.py#L43-L46
train
geometalab/pyGeoTile
pygeotile/tile.py
Tile.quad_tree
def quad_tree(self): """Gets the tile in the Microsoft QuadTree format, converted from TMS""" value = '' tms_x, tms_y = self.tms tms_y = (2 ** self.zoom - 1) - tms_y for i in range(self.zoom, 0, -1): digit = 0 mask = 1 << (i - 1) if (tms_x & mask) != 0: digit += 1 if (tms_y & mask) != 0: digit += 2 value += str(digit) return value
python
def quad_tree(self): """Gets the tile in the Microsoft QuadTree format, converted from TMS""" value = '' tms_x, tms_y = self.tms tms_y = (2 ** self.zoom - 1) - tms_y for i in range(self.zoom, 0, -1): digit = 0 mask = 1 << (i - 1) if (tms_x & mask) != 0: digit += 1 if (tms_y & mask) != 0: digit += 2 value += str(digit) return value
[ "def", "quad_tree", "(", "self", ")", ":", "value", "=", "''", "tms_x", ",", "tms_y", "=", "self", ".", "tms", "tms_y", "=", "(", "2", "**", "self", ".", "zoom", "-", "1", ")", "-", "tms_y", "for", "i", "in", "range", "(", "self", ".", "zoom", ",", "0", ",", "-", "1", ")", ":", "digit", "=", "0", "mask", "=", "1", "<<", "(", "i", "-", "1", ")", "if", "(", "tms_x", "&", "mask", ")", "!=", "0", ":", "digit", "+=", "1", "if", "(", "tms_y", "&", "mask", ")", "!=", "0", ":", "digit", "+=", "2", "value", "+=", "str", "(", "digit", ")", "return", "value" ]
Gets the tile in the Microsoft QuadTree format, converted from TMS
[ "Gets", "the", "tile", "in", "the", "Microsoft", "QuadTree", "format", "converted", "from", "TMS" ]
b1f44271698f5fc4d18c2add935797ed43254aa6
https://github.com/geometalab/pyGeoTile/blob/b1f44271698f5fc4d18c2add935797ed43254aa6/pygeotile/tile.py#L75-L88
train
geometalab/pyGeoTile
pygeotile/tile.py
Tile.google
def google(self): """Gets the tile in the Google format, converted from TMS""" tms_x, tms_y = self.tms return tms_x, (2 ** self.zoom - 1) - tms_y
python
def google(self): """Gets the tile in the Google format, converted from TMS""" tms_x, tms_y = self.tms return tms_x, (2 ** self.zoom - 1) - tms_y
[ "def", "google", "(", "self", ")", ":", "tms_x", ",", "tms_y", "=", "self", ".", "tms", "return", "tms_x", ",", "(", "2", "**", "self", ".", "zoom", "-", "1", ")", "-", "tms_y" ]
Gets the tile in the Google format, converted from TMS
[ "Gets", "the", "tile", "in", "the", "Google", "format", "converted", "from", "TMS" ]
b1f44271698f5fc4d18c2add935797ed43254aa6
https://github.com/geometalab/pyGeoTile/blob/b1f44271698f5fc4d18c2add935797ed43254aa6/pygeotile/tile.py#L91-L94
train
geometalab/pyGeoTile
pygeotile/tile.py
Tile.bounds
def bounds(self): """Gets the bounds of a tile represented as the most west and south point and the most east and north point""" google_x, google_y = self.google pixel_x_west, pixel_y_north = google_x * TILE_SIZE, google_y * TILE_SIZE pixel_x_east, pixel_y_south = (google_x + 1) * TILE_SIZE, (google_y + 1) * TILE_SIZE point_min = Point.from_pixel(pixel_x=pixel_x_west, pixel_y=pixel_y_south, zoom=self.zoom) point_max = Point.from_pixel(pixel_x=pixel_x_east, pixel_y=pixel_y_north, zoom=self.zoom) return point_min, point_max
python
def bounds(self): """Gets the bounds of a tile represented as the most west and south point and the most east and north point""" google_x, google_y = self.google pixel_x_west, pixel_y_north = google_x * TILE_SIZE, google_y * TILE_SIZE pixel_x_east, pixel_y_south = (google_x + 1) * TILE_SIZE, (google_y + 1) * TILE_SIZE point_min = Point.from_pixel(pixel_x=pixel_x_west, pixel_y=pixel_y_south, zoom=self.zoom) point_max = Point.from_pixel(pixel_x=pixel_x_east, pixel_y=pixel_y_north, zoom=self.zoom) return point_min, point_max
[ "def", "bounds", "(", "self", ")", ":", "google_x", ",", "google_y", "=", "self", ".", "google", "pixel_x_west", ",", "pixel_y_north", "=", "google_x", "*", "TILE_SIZE", ",", "google_y", "*", "TILE_SIZE", "pixel_x_east", ",", "pixel_y_south", "=", "(", "google_x", "+", "1", ")", "*", "TILE_SIZE", ",", "(", "google_y", "+", "1", ")", "*", "TILE_SIZE", "point_min", "=", "Point", ".", "from_pixel", "(", "pixel_x", "=", "pixel_x_west", ",", "pixel_y", "=", "pixel_y_south", ",", "zoom", "=", "self", ".", "zoom", ")", "point_max", "=", "Point", ".", "from_pixel", "(", "pixel_x", "=", "pixel_x_east", ",", "pixel_y", "=", "pixel_y_north", ",", "zoom", "=", "self", ".", "zoom", ")", "return", "point_min", ",", "point_max" ]
Gets the bounds of a tile represented as the most west and south point and the most east and north point
[ "Gets", "the", "bounds", "of", "a", "tile", "represented", "as", "the", "most", "west", "and", "south", "point", "and", "the", "most", "east", "and", "north", "point" ]
b1f44271698f5fc4d18c2add935797ed43254aa6
https://github.com/geometalab/pyGeoTile/blob/b1f44271698f5fc4d18c2add935797ed43254aa6/pygeotile/tile.py#L97-L105
train
IAMconsortium/pyam
pyam/read_ixmp.py
read_ix
def read_ix(ix, **kwargs): """Read timeseries data from an ixmp object Parameters ---------- ix: ixmp.TimeSeries or ixmp.Scenario this option requires the ixmp package as a dependency kwargs: arguments passed to ixmp.TimeSeries.timeseries() """ if not isinstance(ix, ixmp.TimeSeries): error = 'not recognized as valid ixmp class: {}'.format(ix) raise ValueError(error) df = ix.timeseries(iamc=False, **kwargs) df['model'] = ix.model df['scenario'] = ix.scenario return df, 'year', []
python
def read_ix(ix, **kwargs): """Read timeseries data from an ixmp object Parameters ---------- ix: ixmp.TimeSeries or ixmp.Scenario this option requires the ixmp package as a dependency kwargs: arguments passed to ixmp.TimeSeries.timeseries() """ if not isinstance(ix, ixmp.TimeSeries): error = 'not recognized as valid ixmp class: {}'.format(ix) raise ValueError(error) df = ix.timeseries(iamc=False, **kwargs) df['model'] = ix.model df['scenario'] = ix.scenario return df, 'year', []
[ "def", "read_ix", "(", "ix", ",", "*", "*", "kwargs", ")", ":", "if", "not", "isinstance", "(", "ix", ",", "ixmp", ".", "TimeSeries", ")", ":", "error", "=", "'not recognized as valid ixmp class: {}'", ".", "format", "(", "ix", ")", "raise", "ValueError", "(", "error", ")", "df", "=", "ix", ".", "timeseries", "(", "iamc", "=", "False", ",", "*", "*", "kwargs", ")", "df", "[", "'model'", "]", "=", "ix", ".", "model", "df", "[", "'scenario'", "]", "=", "ix", ".", "scenario", "return", "df", ",", "'year'", ",", "[", "]" ]
Read timeseries data from an ixmp object Parameters ---------- ix: ixmp.TimeSeries or ixmp.Scenario this option requires the ixmp package as a dependency kwargs: arguments passed to ixmp.TimeSeries.timeseries()
[ "Read", "timeseries", "data", "from", "an", "ixmp", "object" ]
4077929ca6e7be63a0e3ecf882c5f1da97b287bf
https://github.com/IAMconsortium/pyam/blob/4077929ca6e7be63a0e3ecf882c5f1da97b287bf/pyam/read_ixmp.py#L8-L24
train
IAMconsortium/pyam
pyam/utils.py
requires_package
def requires_package(pkg, msg, error_type=ImportError): """Decorator when a function requires an optional dependency Parameters ---------- pkg : imported package object msg : string Message to show to user with error_type error_type : python error class """ def _requires_package(func): def wrapper(*args, **kwargs): if pkg is None: raise error_type(msg) return func(*args, **kwargs) return wrapper return _requires_package
python
def requires_package(pkg, msg, error_type=ImportError): """Decorator when a function requires an optional dependency Parameters ---------- pkg : imported package object msg : string Message to show to user with error_type error_type : python error class """ def _requires_package(func): def wrapper(*args, **kwargs): if pkg is None: raise error_type(msg) return func(*args, **kwargs) return wrapper return _requires_package
[ "def", "requires_package", "(", "pkg", ",", "msg", ",", "error_type", "=", "ImportError", ")", ":", "def", "_requires_package", "(", "func", ")", ":", "def", "wrapper", "(", "*", "args", ",", "*", "*", "kwargs", ")", ":", "if", "pkg", "is", "None", ":", "raise", "error_type", "(", "msg", ")", "return", "func", "(", "*", "args", ",", "*", "*", "kwargs", ")", "return", "wrapper", "return", "_requires_package" ]
Decorator when a function requires an optional dependency Parameters ---------- pkg : imported package object msg : string Message to show to user with error_type error_type : python error class
[ "Decorator", "when", "a", "function", "requires", "an", "optional", "dependency" ]
4077929ca6e7be63a0e3ecf882c5f1da97b287bf
https://github.com/IAMconsortium/pyam/blob/4077929ca6e7be63a0e3ecf882c5f1da97b287bf/pyam/utils.py#L36-L52
train
IAMconsortium/pyam
pyam/utils.py
write_sheet
def write_sheet(writer, name, df, index=False): """Write a pandas DataFrame to an ExcelWriter, auto-formatting column width depending on maxwidth of data and colum header Parameters ---------- writer: pandas.ExcelWriter an instance of a pandas ExcelWriter name: string name of the sheet to be written df: pandas.DataFrame a pandas DataFrame to be written to the sheet index: boolean, default False flag whether index should be written to the sheet """ if index: df = df.reset_index() df.to_excel(writer, name, index=False) worksheet = writer.sheets[name] for i, col in enumerate(df.columns): if df.dtypes[col].name.startswith(('float', 'int')): width = len(str(col)) + 2 else: width = max([df[col].map(lambda x: len(str(x or 'None'))).max(), len(col)]) + 2 xls_col = '{c}:{c}'.format(c=NUMERIC_TO_STR[i]) worksheet.set_column(xls_col, width)
python
def write_sheet(writer, name, df, index=False): """Write a pandas DataFrame to an ExcelWriter, auto-formatting column width depending on maxwidth of data and colum header Parameters ---------- writer: pandas.ExcelWriter an instance of a pandas ExcelWriter name: string name of the sheet to be written df: pandas.DataFrame a pandas DataFrame to be written to the sheet index: boolean, default False flag whether index should be written to the sheet """ if index: df = df.reset_index() df.to_excel(writer, name, index=False) worksheet = writer.sheets[name] for i, col in enumerate(df.columns): if df.dtypes[col].name.startswith(('float', 'int')): width = len(str(col)) + 2 else: width = max([df[col].map(lambda x: len(str(x or 'None'))).max(), len(col)]) + 2 xls_col = '{c}:{c}'.format(c=NUMERIC_TO_STR[i]) worksheet.set_column(xls_col, width)
[ "def", "write_sheet", "(", "writer", ",", "name", ",", "df", ",", "index", "=", "False", ")", ":", "if", "index", ":", "df", "=", "df", ".", "reset_index", "(", ")", "df", ".", "to_excel", "(", "writer", ",", "name", ",", "index", "=", "False", ")", "worksheet", "=", "writer", ".", "sheets", "[", "name", "]", "for", "i", ",", "col", "in", "enumerate", "(", "df", ".", "columns", ")", ":", "if", "df", ".", "dtypes", "[", "col", "]", ".", "name", ".", "startswith", "(", "(", "'float'", ",", "'int'", ")", ")", ":", "width", "=", "len", "(", "str", "(", "col", ")", ")", "+", "2", "else", ":", "width", "=", "max", "(", "[", "df", "[", "col", "]", ".", "map", "(", "lambda", "x", ":", "len", "(", "str", "(", "x", "or", "'None'", ")", ")", ")", ".", "max", "(", ")", ",", "len", "(", "col", ")", "]", ")", "+", "2", "xls_col", "=", "'{c}:{c}'", ".", "format", "(", "c", "=", "NUMERIC_TO_STR", "[", "i", "]", ")", "worksheet", ".", "set_column", "(", "xls_col", ",", "width", ")" ]
Write a pandas DataFrame to an ExcelWriter, auto-formatting column width depending on maxwidth of data and colum header Parameters ---------- writer: pandas.ExcelWriter an instance of a pandas ExcelWriter name: string name of the sheet to be written df: pandas.DataFrame a pandas DataFrame to be written to the sheet index: boolean, default False flag whether index should be written to the sheet
[ "Write", "a", "pandas", "DataFrame", "to", "an", "ExcelWriter", "auto", "-", "formatting", "column", "width", "depending", "on", "maxwidth", "of", "data", "and", "colum", "header" ]
4077929ca6e7be63a0e3ecf882c5f1da97b287bf
https://github.com/IAMconsortium/pyam/blob/4077929ca6e7be63a0e3ecf882c5f1da97b287bf/pyam/utils.py#L70-L96
train
IAMconsortium/pyam
pyam/utils.py
read_pandas
def read_pandas(fname, *args, **kwargs): """Read a file and return a pd.DataFrame""" if not os.path.exists(fname): raise ValueError('no data file `{}` found!'.format(fname)) if fname.endswith('csv'): df = pd.read_csv(fname, *args, **kwargs) else: xl = pd.ExcelFile(fname) if len(xl.sheet_names) > 1 and 'sheet_name' not in kwargs: kwargs['sheet_name'] = 'data' df = pd.read_excel(fname, *args, **kwargs) return df
python
def read_pandas(fname, *args, **kwargs): """Read a file and return a pd.DataFrame""" if not os.path.exists(fname): raise ValueError('no data file `{}` found!'.format(fname)) if fname.endswith('csv'): df = pd.read_csv(fname, *args, **kwargs) else: xl = pd.ExcelFile(fname) if len(xl.sheet_names) > 1 and 'sheet_name' not in kwargs: kwargs['sheet_name'] = 'data' df = pd.read_excel(fname, *args, **kwargs) return df
[ "def", "read_pandas", "(", "fname", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "if", "not", "os", ".", "path", ".", "exists", "(", "fname", ")", ":", "raise", "ValueError", "(", "'no data file `{}` found!'", ".", "format", "(", "fname", ")", ")", "if", "fname", ".", "endswith", "(", "'csv'", ")", ":", "df", "=", "pd", ".", "read_csv", "(", "fname", ",", "*", "args", ",", "*", "*", "kwargs", ")", "else", ":", "xl", "=", "pd", ".", "ExcelFile", "(", "fname", ")", "if", "len", "(", "xl", ".", "sheet_names", ")", ">", "1", "and", "'sheet_name'", "not", "in", "kwargs", ":", "kwargs", "[", "'sheet_name'", "]", "=", "'data'", "df", "=", "pd", ".", "read_excel", "(", "fname", ",", "*", "args", ",", "*", "*", "kwargs", ")", "return", "df" ]
Read a file and return a pd.DataFrame
[ "Read", "a", "file", "and", "return", "a", "pd", ".", "DataFrame" ]
4077929ca6e7be63a0e3ecf882c5f1da97b287bf
https://github.com/IAMconsortium/pyam/blob/4077929ca6e7be63a0e3ecf882c5f1da97b287bf/pyam/utils.py#L99-L110
train
IAMconsortium/pyam
pyam/utils.py
sort_data
def sort_data(data, cols): """Sort `data` rows and order columns""" return data.sort_values(cols)[cols + ['value']].reset_index(drop=True)
python
def sort_data(data, cols): """Sort `data` rows and order columns""" return data.sort_values(cols)[cols + ['value']].reset_index(drop=True)
[ "def", "sort_data", "(", "data", ",", "cols", ")", ":", "return", "data", ".", "sort_values", "(", "cols", ")", "[", "cols", "+", "[", "'value'", "]", "]", ".", "reset_index", "(", "drop", "=", "True", ")" ]
Sort `data` rows and order columns
[ "Sort", "data", "rows", "and", "order", "columns" ]
4077929ca6e7be63a0e3ecf882c5f1da97b287bf
https://github.com/IAMconsortium/pyam/blob/4077929ca6e7be63a0e3ecf882c5f1da97b287bf/pyam/utils.py#L258-L260
train
IAMconsortium/pyam
pyam/utils.py
_escape_regexp
def _escape_regexp(s): """escape characters with specific regexp use""" return ( str(s) .replace('|', '\\|') .replace('.', '\.') # `.` has to be replaced before `*` .replace('*', '.*') .replace('+', '\+') .replace('(', '\(') .replace(')', '\)') .replace('$', '\\$') )
python
def _escape_regexp(s): """escape characters with specific regexp use""" return ( str(s) .replace('|', '\\|') .replace('.', '\.') # `.` has to be replaced before `*` .replace('*', '.*') .replace('+', '\+') .replace('(', '\(') .replace(')', '\)') .replace('$', '\\$') )
[ "def", "_escape_regexp", "(", "s", ")", ":", "return", "(", "str", "(", "s", ")", ".", "replace", "(", "'|'", ",", "'\\\\|'", ")", ".", "replace", "(", "'.'", ",", "'\\.'", ")", "# `.` has to be replaced before `*`", ".", "replace", "(", "'*'", ",", "'.*'", ")", ".", "replace", "(", "'+'", ",", "'\\+'", ")", ".", "replace", "(", "'('", ",", "'\\('", ")", ".", "replace", "(", "')'", ",", "'\\)'", ")", ".", "replace", "(", "'$'", ",", "'\\\\$'", ")", ")" ]
escape characters with specific regexp use
[ "escape", "characters", "with", "specific", "regexp", "use" ]
4077929ca6e7be63a0e3ecf882c5f1da97b287bf
https://github.com/IAMconsortium/pyam/blob/4077929ca6e7be63a0e3ecf882c5f1da97b287bf/pyam/utils.py#L332-L343
train
IAMconsortium/pyam
pyam/utils.py
years_match
def years_match(data, years): """ matching of year columns for data filtering """ years = [years] if isinstance(years, int) else years dt = datetime.datetime if isinstance(years, dt) or isinstance(years[0], dt): error_msg = "`year` can only be filtered with ints or lists of ints" raise TypeError(error_msg) return data.isin(years)
python
def years_match(data, years): """ matching of year columns for data filtering """ years = [years] if isinstance(years, int) else years dt = datetime.datetime if isinstance(years, dt) or isinstance(years[0], dt): error_msg = "`year` can only be filtered with ints or lists of ints" raise TypeError(error_msg) return data.isin(years)
[ "def", "years_match", "(", "data", ",", "years", ")", ":", "years", "=", "[", "years", "]", "if", "isinstance", "(", "years", ",", "int", ")", "else", "years", "dt", "=", "datetime", ".", "datetime", "if", "isinstance", "(", "years", ",", "dt", ")", "or", "isinstance", "(", "years", "[", "0", "]", ",", "dt", ")", ":", "error_msg", "=", "\"`year` can only be filtered with ints or lists of ints\"", "raise", "TypeError", "(", "error_msg", ")", "return", "data", ".", "isin", "(", "years", ")" ]
matching of year columns for data filtering
[ "matching", "of", "year", "columns", "for", "data", "filtering" ]
4077929ca6e7be63a0e3ecf882c5f1da97b287bf
https://github.com/IAMconsortium/pyam/blob/4077929ca6e7be63a0e3ecf882c5f1da97b287bf/pyam/utils.py#L346-L355
train
IAMconsortium/pyam
pyam/utils.py
hour_match
def hour_match(data, hours): """ matching of days in time columns for data filtering """ hours = [hours] if isinstance(hours, int) else hours return data.isin(hours)
python
def hour_match(data, hours): """ matching of days in time columns for data filtering """ hours = [hours] if isinstance(hours, int) else hours return data.isin(hours)
[ "def", "hour_match", "(", "data", ",", "hours", ")", ":", "hours", "=", "[", "hours", "]", "if", "isinstance", "(", "hours", ",", "int", ")", "else", "hours", "return", "data", ".", "isin", "(", "hours", ")" ]
matching of days in time columns for data filtering
[ "matching", "of", "days", "in", "time", "columns", "for", "data", "filtering" ]
4077929ca6e7be63a0e3ecf882c5f1da97b287bf
https://github.com/IAMconsortium/pyam/blob/4077929ca6e7be63a0e3ecf882c5f1da97b287bf/pyam/utils.py#L372-L377
train
IAMconsortium/pyam
pyam/utils.py
datetime_match
def datetime_match(data, dts): """ matching of datetimes in time columns for data filtering """ dts = dts if islistable(dts) else [dts] if any([not isinstance(i, datetime.datetime) for i in dts]): error_msg = ( "`time` can only be filtered by datetimes" ) raise TypeError(error_msg) return data.isin(dts)
python
def datetime_match(data, dts): """ matching of datetimes in time columns for data filtering """ dts = dts if islistable(dts) else [dts] if any([not isinstance(i, datetime.datetime) for i in dts]): error_msg = ( "`time` can only be filtered by datetimes" ) raise TypeError(error_msg) return data.isin(dts)
[ "def", "datetime_match", "(", "data", ",", "dts", ")", ":", "dts", "=", "dts", "if", "islistable", "(", "dts", ")", "else", "[", "dts", "]", "if", "any", "(", "[", "not", "isinstance", "(", "i", ",", "datetime", ".", "datetime", ")", "for", "i", "in", "dts", "]", ")", ":", "error_msg", "=", "(", "\"`time` can only be filtered by datetimes\"", ")", "raise", "TypeError", "(", "error_msg", ")", "return", "data", ".", "isin", "(", "dts", ")" ]
matching of datetimes in time columns for data filtering
[ "matching", "of", "datetimes", "in", "time", "columns", "for", "data", "filtering" ]
4077929ca6e7be63a0e3ecf882c5f1da97b287bf
https://github.com/IAMconsortium/pyam/blob/4077929ca6e7be63a0e3ecf882c5f1da97b287bf/pyam/utils.py#L422-L432
train
IAMconsortium/pyam
pyam/utils.py
to_int
def to_int(x, index=False): """Formatting series or timeseries columns to int and checking validity. If `index=False`, the function works on the `pd.Series x`; else, the function casts the index of `x` to int and returns x with a new index. """ _x = x.index if index else x cols = list(map(int, _x)) error = _x[cols != _x] if not error.empty: raise ValueError('invalid values `{}`'.format(list(error))) if index: x.index = cols return x else: return _x
python
def to_int(x, index=False): """Formatting series or timeseries columns to int and checking validity. If `index=False`, the function works on the `pd.Series x`; else, the function casts the index of `x` to int and returns x with a new index. """ _x = x.index if index else x cols = list(map(int, _x)) error = _x[cols != _x] if not error.empty: raise ValueError('invalid values `{}`'.format(list(error))) if index: x.index = cols return x else: return _x
[ "def", "to_int", "(", "x", ",", "index", "=", "False", ")", ":", "_x", "=", "x", ".", "index", "if", "index", "else", "x", "cols", "=", "list", "(", "map", "(", "int", ",", "_x", ")", ")", "error", "=", "_x", "[", "cols", "!=", "_x", "]", "if", "not", "error", ".", "empty", ":", "raise", "ValueError", "(", "'invalid values `{}`'", ".", "format", "(", "list", "(", "error", ")", ")", ")", "if", "index", ":", "x", ".", "index", "=", "cols", "return", "x", "else", ":", "return", "_x" ]
Formatting series or timeseries columns to int and checking validity. If `index=False`, the function works on the `pd.Series x`; else, the function casts the index of `x` to int and returns x with a new index.
[ "Formatting", "series", "or", "timeseries", "columns", "to", "int", "and", "checking", "validity", ".", "If", "index", "=", "False", "the", "function", "works", "on", "the", "pd", ".", "Series", "x", ";", "else", "the", "function", "casts", "the", "index", "of", "x", "to", "int", "and", "returns", "x", "with", "a", "new", "index", "." ]
4077929ca6e7be63a0e3ecf882c5f1da97b287bf
https://github.com/IAMconsortium/pyam/blob/4077929ca6e7be63a0e3ecf882c5f1da97b287bf/pyam/utils.py#L435-L449
train
IAMconsortium/pyam
pyam/utils.py
concat_with_pipe
def concat_with_pipe(x, cols=None): """Concatenate a `pd.Series` separated by `|`, drop `None` or `np.nan`""" cols = cols or x.index return '|'.join([x[i] for i in cols if x[i] not in [None, np.nan]])
python
def concat_with_pipe(x, cols=None): """Concatenate a `pd.Series` separated by `|`, drop `None` or `np.nan`""" cols = cols or x.index return '|'.join([x[i] for i in cols if x[i] not in [None, np.nan]])
[ "def", "concat_with_pipe", "(", "x", ",", "cols", "=", "None", ")", ":", "cols", "=", "cols", "or", "x", ".", "index", "return", "'|'", ".", "join", "(", "[", "x", "[", "i", "]", "for", "i", "in", "cols", "if", "x", "[", "i", "]", "not", "in", "[", "None", ",", "np", ".", "nan", "]", "]", ")" ]
Concatenate a `pd.Series` separated by `|`, drop `None` or `np.nan`
[ "Concatenate", "a", "pd", ".", "Series", "separated", "by", "|", "drop", "None", "or", "np", ".", "nan" ]
4077929ca6e7be63a0e3ecf882c5f1da97b287bf
https://github.com/IAMconsortium/pyam/blob/4077929ca6e7be63a0e3ecf882c5f1da97b287bf/pyam/utils.py#L452-L455
train
IAMconsortium/pyam
pyam/core.py
_make_index
def _make_index(df, cols=META_IDX): """Create an index from the columns of a dataframe""" return pd.MultiIndex.from_tuples( pd.unique(list(zip(*[df[col] for col in cols]))), names=tuple(cols))
python
def _make_index(df, cols=META_IDX): """Create an index from the columns of a dataframe""" return pd.MultiIndex.from_tuples( pd.unique(list(zip(*[df[col] for col in cols]))), names=tuple(cols))
[ "def", "_make_index", "(", "df", ",", "cols", "=", "META_IDX", ")", ":", "return", "pd", ".", "MultiIndex", ".", "from_tuples", "(", "pd", ".", "unique", "(", "list", "(", "zip", "(", "*", "[", "df", "[", "col", "]", "for", "col", "in", "cols", "]", ")", ")", ")", ",", "names", "=", "tuple", "(", "cols", ")", ")" ]
Create an index from the columns of a dataframe
[ "Create", "an", "index", "from", "the", "columns", "of", "a", "dataframe" ]
4077929ca6e7be63a0e3ecf882c5f1da97b287bf
https://github.com/IAMconsortium/pyam/blob/4077929ca6e7be63a0e3ecf882c5f1da97b287bf/pyam/core.py#L1400-L1403
train
IAMconsortium/pyam
pyam/core.py
check_aggregate
def check_aggregate(df, variable, components=None, exclude_on_fail=False, multiplier=1, **kwargs): """Check whether the timeseries values match the aggregation of sub-categories Parameters ---------- df: IamDataFrame instance args: see IamDataFrame.check_aggregate() for details kwargs: passed to `df.filter()` """ fdf = df.filter(**kwargs) if len(fdf.data) > 0: vdf = fdf.check_aggregate(variable=variable, components=components, exclude_on_fail=exclude_on_fail, multiplier=multiplier) df.meta['exclude'] |= fdf.meta['exclude'] # update if any excluded return vdf
python
def check_aggregate(df, variable, components=None, exclude_on_fail=False, multiplier=1, **kwargs): """Check whether the timeseries values match the aggregation of sub-categories Parameters ---------- df: IamDataFrame instance args: see IamDataFrame.check_aggregate() for details kwargs: passed to `df.filter()` """ fdf = df.filter(**kwargs) if len(fdf.data) > 0: vdf = fdf.check_aggregate(variable=variable, components=components, exclude_on_fail=exclude_on_fail, multiplier=multiplier) df.meta['exclude'] |= fdf.meta['exclude'] # update if any excluded return vdf
[ "def", "check_aggregate", "(", "df", ",", "variable", ",", "components", "=", "None", ",", "exclude_on_fail", "=", "False", ",", "multiplier", "=", "1", ",", "*", "*", "kwargs", ")", ":", "fdf", "=", "df", ".", "filter", "(", "*", "*", "kwargs", ")", "if", "len", "(", "fdf", ".", "data", ")", ">", "0", ":", "vdf", "=", "fdf", ".", "check_aggregate", "(", "variable", "=", "variable", ",", "components", "=", "components", ",", "exclude_on_fail", "=", "exclude_on_fail", ",", "multiplier", "=", "multiplier", ")", "df", ".", "meta", "[", "'exclude'", "]", "|=", "fdf", ".", "meta", "[", "'exclude'", "]", "# update if any excluded", "return", "vdf" ]
Check whether the timeseries values match the aggregation of sub-categories Parameters ---------- df: IamDataFrame instance args: see IamDataFrame.check_aggregate() for details kwargs: passed to `df.filter()`
[ "Check", "whether", "the", "timeseries", "values", "match", "the", "aggregation", "of", "sub", "-", "categories" ]
4077929ca6e7be63a0e3ecf882c5f1da97b287bf
https://github.com/IAMconsortium/pyam/blob/4077929ca6e7be63a0e3ecf882c5f1da97b287bf/pyam/core.py#L1462-L1479
train
IAMconsortium/pyam
pyam/core.py
filter_by_meta
def filter_by_meta(data, df, join_meta=False, **kwargs): """Filter by and join meta columns from an IamDataFrame to a pd.DataFrame Parameters ---------- data: pd.DataFrame instance DataFrame to which meta columns are to be joined, index or columns must include `['model', 'scenario']` df: IamDataFrame instance IamDataFrame from which meta columns are filtered and joined (optional) join_meta: bool, default False join selected columns from `df.meta` on `data` kwargs: meta columns to be filtered/joined, where `col=...` applies filters by the given arguments (using `utils.pattern_match()`) and `col=None` joins the column without filtering (setting col to `np.nan` if `(model, scenario) not in df.meta.index`) """ if not set(META_IDX).issubset(data.index.names + list(data.columns)): raise ValueError('missing required index dimensions or columns!') meta = pd.DataFrame(df.meta[list(set(kwargs) - set(META_IDX))].copy()) # filter meta by columns keep = np.array([True] * len(meta)) apply_filter = False for col, values in kwargs.items(): if col in META_IDX and values is not None: _col = meta.index.get_level_values(0 if col is 'model' else 1) keep &= pattern_match(_col, values, has_nan=False) apply_filter = True elif values is not None: keep &= pattern_match(meta[col], values) apply_filter |= values is not None meta = meta[keep] # set the data index to META_IDX and apply filtered meta index data = data.copy() idx = list(data.index.names) if not data.index.names == [None] else None data = data.reset_index().set_index(META_IDX) meta = meta.loc[meta.index.intersection(data.index)] meta.index.names = META_IDX if apply_filter: data = data.loc[meta.index] data.index.names = META_IDX # join meta (optional), reset index to format as input arg data = data.join(meta) if join_meta else data data = data.reset_index().set_index(idx or 'index') if idx is None: data.index.name = None return data
python
def filter_by_meta(data, df, join_meta=False, **kwargs): """Filter by and join meta columns from an IamDataFrame to a pd.DataFrame Parameters ---------- data: pd.DataFrame instance DataFrame to which meta columns are to be joined, index or columns must include `['model', 'scenario']` df: IamDataFrame instance IamDataFrame from which meta columns are filtered and joined (optional) join_meta: bool, default False join selected columns from `df.meta` on `data` kwargs: meta columns to be filtered/joined, where `col=...` applies filters by the given arguments (using `utils.pattern_match()`) and `col=None` joins the column without filtering (setting col to `np.nan` if `(model, scenario) not in df.meta.index`) """ if not set(META_IDX).issubset(data.index.names + list(data.columns)): raise ValueError('missing required index dimensions or columns!') meta = pd.DataFrame(df.meta[list(set(kwargs) - set(META_IDX))].copy()) # filter meta by columns keep = np.array([True] * len(meta)) apply_filter = False for col, values in kwargs.items(): if col in META_IDX and values is not None: _col = meta.index.get_level_values(0 if col is 'model' else 1) keep &= pattern_match(_col, values, has_nan=False) apply_filter = True elif values is not None: keep &= pattern_match(meta[col], values) apply_filter |= values is not None meta = meta[keep] # set the data index to META_IDX and apply filtered meta index data = data.copy() idx = list(data.index.names) if not data.index.names == [None] else None data = data.reset_index().set_index(META_IDX) meta = meta.loc[meta.index.intersection(data.index)] meta.index.names = META_IDX if apply_filter: data = data.loc[meta.index] data.index.names = META_IDX # join meta (optional), reset index to format as input arg data = data.join(meta) if join_meta else data data = data.reset_index().set_index(idx or 'index') if idx is None: data.index.name = None return data
[ "def", "filter_by_meta", "(", "data", ",", "df", ",", "join_meta", "=", "False", ",", "*", "*", "kwargs", ")", ":", "if", "not", "set", "(", "META_IDX", ")", ".", "issubset", "(", "data", ".", "index", ".", "names", "+", "list", "(", "data", ".", "columns", ")", ")", ":", "raise", "ValueError", "(", "'missing required index dimensions or columns!'", ")", "meta", "=", "pd", ".", "DataFrame", "(", "df", ".", "meta", "[", "list", "(", "set", "(", "kwargs", ")", "-", "set", "(", "META_IDX", ")", ")", "]", ".", "copy", "(", ")", ")", "# filter meta by columns", "keep", "=", "np", ".", "array", "(", "[", "True", "]", "*", "len", "(", "meta", ")", ")", "apply_filter", "=", "False", "for", "col", ",", "values", "in", "kwargs", ".", "items", "(", ")", ":", "if", "col", "in", "META_IDX", "and", "values", "is", "not", "None", ":", "_col", "=", "meta", ".", "index", ".", "get_level_values", "(", "0", "if", "col", "is", "'model'", "else", "1", ")", "keep", "&=", "pattern_match", "(", "_col", ",", "values", ",", "has_nan", "=", "False", ")", "apply_filter", "=", "True", "elif", "values", "is", "not", "None", ":", "keep", "&=", "pattern_match", "(", "meta", "[", "col", "]", ",", "values", ")", "apply_filter", "|=", "values", "is", "not", "None", "meta", "=", "meta", "[", "keep", "]", "# set the data index to META_IDX and apply filtered meta index", "data", "=", "data", ".", "copy", "(", ")", "idx", "=", "list", "(", "data", ".", "index", ".", "names", ")", "if", "not", "data", ".", "index", ".", "names", "==", "[", "None", "]", "else", "None", "data", "=", "data", ".", "reset_index", "(", ")", ".", "set_index", "(", "META_IDX", ")", "meta", "=", "meta", ".", "loc", "[", "meta", ".", "index", ".", "intersection", "(", "data", ".", "index", ")", "]", "meta", ".", "index", ".", "names", "=", "META_IDX", "if", "apply_filter", ":", "data", "=", "data", ".", "loc", "[", "meta", ".", "index", "]", "data", ".", "index", ".", "names", "=", "META_IDX", "# join meta (optional), reset index to format as input arg", "data", "=", "data", ".", "join", "(", "meta", ")", "if", "join_meta", "else", "data", "data", "=", "data", ".", "reset_index", "(", ")", ".", "set_index", "(", "idx", "or", "'index'", ")", "if", "idx", "is", "None", ":", "data", ".", "index", ".", "name", "=", "None", "return", "data" ]
Filter by and join meta columns from an IamDataFrame to a pd.DataFrame Parameters ---------- data: pd.DataFrame instance DataFrame to which meta columns are to be joined, index or columns must include `['model', 'scenario']` df: IamDataFrame instance IamDataFrame from which meta columns are filtered and joined (optional) join_meta: bool, default False join selected columns from `df.meta` on `data` kwargs: meta columns to be filtered/joined, where `col=...` applies filters by the given arguments (using `utils.pattern_match()`) and `col=None` joins the column without filtering (setting col to `np.nan` if `(model, scenario) not in df.meta.index`)
[ "Filter", "by", "and", "join", "meta", "columns", "from", "an", "IamDataFrame", "to", "a", "pd", ".", "DataFrame" ]
4077929ca6e7be63a0e3ecf882c5f1da97b287bf
https://github.com/IAMconsortium/pyam/blob/4077929ca6e7be63a0e3ecf882c5f1da97b287bf/pyam/core.py#L1482-L1534
train
IAMconsortium/pyam
pyam/core.py
compare
def compare(left, right, left_label='left', right_label='right', drop_close=True, **kwargs): """Compare the data in two IamDataFrames and return a pd.DataFrame Parameters ---------- left, right: IamDataFrames the IamDataFrames to be compared left_label, right_label: str, default `left`, `right` column names of the returned dataframe drop_close: bool, default True remove all data where `left` and `right` are close kwargs: passed to `np.isclose()` """ ret = pd.concat({right_label: right.data.set_index(right._LONG_IDX), left_label: left.data.set_index(left._LONG_IDX)}, axis=1) ret.columns = ret.columns.droplevel(1) if drop_close: ret = ret[~np.isclose(ret[left_label], ret[right_label], **kwargs)] return ret[[right_label, left_label]]
python
def compare(left, right, left_label='left', right_label='right', drop_close=True, **kwargs): """Compare the data in two IamDataFrames and return a pd.DataFrame Parameters ---------- left, right: IamDataFrames the IamDataFrames to be compared left_label, right_label: str, default `left`, `right` column names of the returned dataframe drop_close: bool, default True remove all data where `left` and `right` are close kwargs: passed to `np.isclose()` """ ret = pd.concat({right_label: right.data.set_index(right._LONG_IDX), left_label: left.data.set_index(left._LONG_IDX)}, axis=1) ret.columns = ret.columns.droplevel(1) if drop_close: ret = ret[~np.isclose(ret[left_label], ret[right_label], **kwargs)] return ret[[right_label, left_label]]
[ "def", "compare", "(", "left", ",", "right", ",", "left_label", "=", "'left'", ",", "right_label", "=", "'right'", ",", "drop_close", "=", "True", ",", "*", "*", "kwargs", ")", ":", "ret", "=", "pd", ".", "concat", "(", "{", "right_label", ":", "right", ".", "data", ".", "set_index", "(", "right", ".", "_LONG_IDX", ")", ",", "left_label", ":", "left", ".", "data", ".", "set_index", "(", "left", ".", "_LONG_IDX", ")", "}", ",", "axis", "=", "1", ")", "ret", ".", "columns", "=", "ret", ".", "columns", ".", "droplevel", "(", "1", ")", "if", "drop_close", ":", "ret", "=", "ret", "[", "~", "np", ".", "isclose", "(", "ret", "[", "left_label", "]", ",", "ret", "[", "right_label", "]", ",", "*", "*", "kwargs", ")", "]", "return", "ret", "[", "[", "right_label", ",", "left_label", "]", "]" ]
Compare the data in two IamDataFrames and return a pd.DataFrame Parameters ---------- left, right: IamDataFrames the IamDataFrames to be compared left_label, right_label: str, default `left`, `right` column names of the returned dataframe drop_close: bool, default True remove all data where `left` and `right` are close kwargs: passed to `np.isclose()`
[ "Compare", "the", "data", "in", "two", "IamDataFrames", "and", "return", "a", "pd", ".", "DataFrame" ]
4077929ca6e7be63a0e3ecf882c5f1da97b287bf
https://github.com/IAMconsortium/pyam/blob/4077929ca6e7be63a0e3ecf882c5f1da97b287bf/pyam/core.py#L1537-L1556
train
IAMconsortium/pyam
pyam/core.py
concat
def concat(dfs): """Concatenate a series of `pyam.IamDataFrame`-like objects together""" if isstr(dfs) or not hasattr(dfs, '__iter__'): msg = 'Argument must be a non-string iterable (e.g., list or tuple)' raise TypeError(msg) _df = None for df in dfs: df = df if isinstance(df, IamDataFrame) else IamDataFrame(df) if _df is None: _df = copy.deepcopy(df) else: _df.append(df, inplace=True) return _df
python
def concat(dfs): """Concatenate a series of `pyam.IamDataFrame`-like objects together""" if isstr(dfs) or not hasattr(dfs, '__iter__'): msg = 'Argument must be a non-string iterable (e.g., list or tuple)' raise TypeError(msg) _df = None for df in dfs: df = df if isinstance(df, IamDataFrame) else IamDataFrame(df) if _df is None: _df = copy.deepcopy(df) else: _df.append(df, inplace=True) return _df
[ "def", "concat", "(", "dfs", ")", ":", "if", "isstr", "(", "dfs", ")", "or", "not", "hasattr", "(", "dfs", ",", "'__iter__'", ")", ":", "msg", "=", "'Argument must be a non-string iterable (e.g., list or tuple)'", "raise", "TypeError", "(", "msg", ")", "_df", "=", "None", "for", "df", "in", "dfs", ":", "df", "=", "df", "if", "isinstance", "(", "df", ",", "IamDataFrame", ")", "else", "IamDataFrame", "(", "df", ")", "if", "_df", "is", "None", ":", "_df", "=", "copy", ".", "deepcopy", "(", "df", ")", "else", ":", "_df", ".", "append", "(", "df", ",", "inplace", "=", "True", ")", "return", "_df" ]
Concatenate a series of `pyam.IamDataFrame`-like objects together
[ "Concatenate", "a", "series", "of", "pyam", ".", "IamDataFrame", "-", "like", "objects", "together" ]
4077929ca6e7be63a0e3ecf882c5f1da97b287bf
https://github.com/IAMconsortium/pyam/blob/4077929ca6e7be63a0e3ecf882c5f1da97b287bf/pyam/core.py#L1559-L1572
train
IAMconsortium/pyam
pyam/core.py
IamDataFrame.variables
def variables(self, include_units=False): """Get a list of variables Parameters ---------- include_units: boolean, default False include the units """ if include_units: return self.data[['variable', 'unit']].drop_duplicates()\ .reset_index(drop=True).sort_values('variable') else: return pd.Series(self.data.variable.unique(), name='variable')
python
def variables(self, include_units=False): """Get a list of variables Parameters ---------- include_units: boolean, default False include the units """ if include_units: return self.data[['variable', 'unit']].drop_duplicates()\ .reset_index(drop=True).sort_values('variable') else: return pd.Series(self.data.variable.unique(), name='variable')
[ "def", "variables", "(", "self", ",", "include_units", "=", "False", ")", ":", "if", "include_units", ":", "return", "self", ".", "data", "[", "[", "'variable'", ",", "'unit'", "]", "]", ".", "drop_duplicates", "(", ")", ".", "reset_index", "(", "drop", "=", "True", ")", ".", "sort_values", "(", "'variable'", ")", "else", ":", "return", "pd", ".", "Series", "(", "self", ".", "data", ".", "variable", ".", "unique", "(", ")", ",", "name", "=", "'variable'", ")" ]
Get a list of variables Parameters ---------- include_units: boolean, default False include the units
[ "Get", "a", "list", "of", "variables" ]
4077929ca6e7be63a0e3ecf882c5f1da97b287bf
https://github.com/IAMconsortium/pyam/blob/4077929ca6e7be63a0e3ecf882c5f1da97b287bf/pyam/core.py#L161-L173
train
IAMconsortium/pyam
pyam/core.py
IamDataFrame.append
def append(self, other, ignore_meta_conflict=False, inplace=False, **kwargs): """Append any castable object to this IamDataFrame. Columns in `other.meta` that are not in `self.meta` are always merged, duplicate region-variable-unit-year rows raise a ValueError. Parameters ---------- other: pyam.IamDataFrame, ixmp.TimeSeries, ixmp.Scenario, pd.DataFrame or data file An IamDataFrame, TimeSeries or Scenario (requires `ixmp`), pandas.DataFrame or data file with IAMC-format data columns ignore_meta_conflict : bool, default False If False and `other` is an IamDataFrame, raise an error if any meta columns present in `self` and `other` are not identical. inplace : bool, default False If True, do operation inplace and return None kwargs are passed through to `IamDataFrame(other, **kwargs)` """ if not isinstance(other, IamDataFrame): other = IamDataFrame(other, **kwargs) ignore_meta_conflict = True if self.time_col is not other.time_col: raise ValueError('incompatible time format (years vs. datetime)!') ret = copy.deepcopy(self) if not inplace else self diff = other.meta.index.difference(ret.meta.index) intersect = other.meta.index.intersection(ret.meta.index) # merge other.meta columns not in self.meta for existing scenarios if not intersect.empty: # if not ignored, check that overlapping meta dataframes are equal if not ignore_meta_conflict: cols = [i for i in other.meta.columns if i in ret.meta.columns] if not ret.meta.loc[intersect, cols].equals( other.meta.loc[intersect, cols]): conflict_idx = ( pd.concat([ret.meta.loc[intersect, cols], other.meta.loc[intersect, cols]] ).drop_duplicates() .index.drop_duplicates() ) msg = 'conflict in `meta` for scenarios {}'.format( [i for i in pd.DataFrame(index=conflict_idx).index]) raise ValueError(msg) cols = [i for i in other.meta.columns if i not in ret.meta.columns] _meta = other.meta.loc[intersect, cols] ret.meta = ret.meta.merge(_meta, how='outer', left_index=True, right_index=True) # join other.meta for new scenarios if not diff.empty: # sorting not supported by ` pd.append()` prior to version 23 sort_kwarg = {} if int(pd.__version__.split('.')[1]) < 23 \ else dict(sort=False) ret.meta = ret.meta.append(other.meta.loc[diff, :], **sort_kwarg) # append other.data (verify integrity for no duplicates) _data = ret.data.set_index(ret._LONG_IDX).append( other.data.set_index(other._LONG_IDX), verify_integrity=True) # merge extra columns in `data` and set `LONG_IDX` ret.extra_cols += [i for i in other.extra_cols if i not in ret.extra_cols] ret._LONG_IDX = IAMC_IDX + [ret.time_col] + ret.extra_cols ret.data = sort_data(_data.reset_index(), ret._LONG_IDX) if not inplace: return ret
python
def append(self, other, ignore_meta_conflict=False, inplace=False, **kwargs): """Append any castable object to this IamDataFrame. Columns in `other.meta` that are not in `self.meta` are always merged, duplicate region-variable-unit-year rows raise a ValueError. Parameters ---------- other: pyam.IamDataFrame, ixmp.TimeSeries, ixmp.Scenario, pd.DataFrame or data file An IamDataFrame, TimeSeries or Scenario (requires `ixmp`), pandas.DataFrame or data file with IAMC-format data columns ignore_meta_conflict : bool, default False If False and `other` is an IamDataFrame, raise an error if any meta columns present in `self` and `other` are not identical. inplace : bool, default False If True, do operation inplace and return None kwargs are passed through to `IamDataFrame(other, **kwargs)` """ if not isinstance(other, IamDataFrame): other = IamDataFrame(other, **kwargs) ignore_meta_conflict = True if self.time_col is not other.time_col: raise ValueError('incompatible time format (years vs. datetime)!') ret = copy.deepcopy(self) if not inplace else self diff = other.meta.index.difference(ret.meta.index) intersect = other.meta.index.intersection(ret.meta.index) # merge other.meta columns not in self.meta for existing scenarios if not intersect.empty: # if not ignored, check that overlapping meta dataframes are equal if not ignore_meta_conflict: cols = [i for i in other.meta.columns if i in ret.meta.columns] if not ret.meta.loc[intersect, cols].equals( other.meta.loc[intersect, cols]): conflict_idx = ( pd.concat([ret.meta.loc[intersect, cols], other.meta.loc[intersect, cols]] ).drop_duplicates() .index.drop_duplicates() ) msg = 'conflict in `meta` for scenarios {}'.format( [i for i in pd.DataFrame(index=conflict_idx).index]) raise ValueError(msg) cols = [i for i in other.meta.columns if i not in ret.meta.columns] _meta = other.meta.loc[intersect, cols] ret.meta = ret.meta.merge(_meta, how='outer', left_index=True, right_index=True) # join other.meta for new scenarios if not diff.empty: # sorting not supported by ` pd.append()` prior to version 23 sort_kwarg = {} if int(pd.__version__.split('.')[1]) < 23 \ else dict(sort=False) ret.meta = ret.meta.append(other.meta.loc[diff, :], **sort_kwarg) # append other.data (verify integrity for no duplicates) _data = ret.data.set_index(ret._LONG_IDX).append( other.data.set_index(other._LONG_IDX), verify_integrity=True) # merge extra columns in `data` and set `LONG_IDX` ret.extra_cols += [i for i in other.extra_cols if i not in ret.extra_cols] ret._LONG_IDX = IAMC_IDX + [ret.time_col] + ret.extra_cols ret.data = sort_data(_data.reset_index(), ret._LONG_IDX) if not inplace: return ret
[ "def", "append", "(", "self", ",", "other", ",", "ignore_meta_conflict", "=", "False", ",", "inplace", "=", "False", ",", "*", "*", "kwargs", ")", ":", "if", "not", "isinstance", "(", "other", ",", "IamDataFrame", ")", ":", "other", "=", "IamDataFrame", "(", "other", ",", "*", "*", "kwargs", ")", "ignore_meta_conflict", "=", "True", "if", "self", ".", "time_col", "is", "not", "other", ".", "time_col", ":", "raise", "ValueError", "(", "'incompatible time format (years vs. datetime)!'", ")", "ret", "=", "copy", ".", "deepcopy", "(", "self", ")", "if", "not", "inplace", "else", "self", "diff", "=", "other", ".", "meta", ".", "index", ".", "difference", "(", "ret", ".", "meta", ".", "index", ")", "intersect", "=", "other", ".", "meta", ".", "index", ".", "intersection", "(", "ret", ".", "meta", ".", "index", ")", "# merge other.meta columns not in self.meta for existing scenarios", "if", "not", "intersect", ".", "empty", ":", "# if not ignored, check that overlapping meta dataframes are equal", "if", "not", "ignore_meta_conflict", ":", "cols", "=", "[", "i", "for", "i", "in", "other", ".", "meta", ".", "columns", "if", "i", "in", "ret", ".", "meta", ".", "columns", "]", "if", "not", "ret", ".", "meta", ".", "loc", "[", "intersect", ",", "cols", "]", ".", "equals", "(", "other", ".", "meta", ".", "loc", "[", "intersect", ",", "cols", "]", ")", ":", "conflict_idx", "=", "(", "pd", ".", "concat", "(", "[", "ret", ".", "meta", ".", "loc", "[", "intersect", ",", "cols", "]", ",", "other", ".", "meta", ".", "loc", "[", "intersect", ",", "cols", "]", "]", ")", ".", "drop_duplicates", "(", ")", ".", "index", ".", "drop_duplicates", "(", ")", ")", "msg", "=", "'conflict in `meta` for scenarios {}'", ".", "format", "(", "[", "i", "for", "i", "in", "pd", ".", "DataFrame", "(", "index", "=", "conflict_idx", ")", ".", "index", "]", ")", "raise", "ValueError", "(", "msg", ")", "cols", "=", "[", "i", "for", "i", "in", "other", ".", "meta", ".", "columns", "if", "i", "not", "in", "ret", ".", "meta", ".", "columns", "]", "_meta", "=", "other", ".", "meta", ".", "loc", "[", "intersect", ",", "cols", "]", "ret", ".", "meta", "=", "ret", ".", "meta", ".", "merge", "(", "_meta", ",", "how", "=", "'outer'", ",", "left_index", "=", "True", ",", "right_index", "=", "True", ")", "# join other.meta for new scenarios", "if", "not", "diff", ".", "empty", ":", "# sorting not supported by ` pd.append()` prior to version 23", "sort_kwarg", "=", "{", "}", "if", "int", "(", "pd", ".", "__version__", ".", "split", "(", "'.'", ")", "[", "1", "]", ")", "<", "23", "else", "dict", "(", "sort", "=", "False", ")", "ret", ".", "meta", "=", "ret", ".", "meta", ".", "append", "(", "other", ".", "meta", ".", "loc", "[", "diff", ",", ":", "]", ",", "*", "*", "sort_kwarg", ")", "# append other.data (verify integrity for no duplicates)", "_data", "=", "ret", ".", "data", ".", "set_index", "(", "ret", ".", "_LONG_IDX", ")", ".", "append", "(", "other", ".", "data", ".", "set_index", "(", "other", ".", "_LONG_IDX", ")", ",", "verify_integrity", "=", "True", ")", "# merge extra columns in `data` and set `LONG_IDX`", "ret", ".", "extra_cols", "+=", "[", "i", "for", "i", "in", "other", ".", "extra_cols", "if", "i", "not", "in", "ret", ".", "extra_cols", "]", "ret", ".", "_LONG_IDX", "=", "IAMC_IDX", "+", "[", "ret", ".", "time_col", "]", "+", "ret", ".", "extra_cols", "ret", ".", "data", "=", "sort_data", "(", "_data", ".", "reset_index", "(", ")", ",", "ret", ".", "_LONG_IDX", ")", "if", "not", "inplace", ":", "return", "ret" ]
Append any castable object to this IamDataFrame. Columns in `other.meta` that are not in `self.meta` are always merged, duplicate region-variable-unit-year rows raise a ValueError. Parameters ---------- other: pyam.IamDataFrame, ixmp.TimeSeries, ixmp.Scenario, pd.DataFrame or data file An IamDataFrame, TimeSeries or Scenario (requires `ixmp`), pandas.DataFrame or data file with IAMC-format data columns ignore_meta_conflict : bool, default False If False and `other` is an IamDataFrame, raise an error if any meta columns present in `self` and `other` are not identical. inplace : bool, default False If True, do operation inplace and return None kwargs are passed through to `IamDataFrame(other, **kwargs)`
[ "Append", "any", "castable", "object", "to", "this", "IamDataFrame", ".", "Columns", "in", "other", ".", "meta", "that", "are", "not", "in", "self", ".", "meta", "are", "always", "merged", "duplicate", "region", "-", "variable", "-", "unit", "-", "year", "rows", "raise", "a", "ValueError", "." ]
4077929ca6e7be63a0e3ecf882c5f1da97b287bf
https://github.com/IAMconsortium/pyam/blob/4077929ca6e7be63a0e3ecf882c5f1da97b287bf/pyam/core.py#L175-L246
train
IAMconsortium/pyam
pyam/core.py
IamDataFrame.pivot_table
def pivot_table(self, index, columns, values='value', aggfunc='count', fill_value=None, style=None): """Returns a pivot table Parameters ---------- index: str or list of strings rows for Pivot table columns: str or list of strings columns for Pivot table values: str, default 'value' dataframe column to aggregate or count aggfunc: str or function, default 'count' function used for aggregation, accepts 'count', 'mean', and 'sum' fill_value: scalar, default None value to replace missing values with style: str, default None output style for pivot table formatting accepts 'highlight_not_max', 'heatmap' """ index = [index] if isstr(index) else index columns = [columns] if isstr(columns) else columns df = self.data # allow 'aggfunc' to be passed as string for easier user interface if isstr(aggfunc): if aggfunc == 'count': df = self.data.groupby(index + columns, as_index=False).count() fill_value = 0 elif aggfunc == 'mean': df = self.data.groupby(index + columns, as_index=False).mean()\ .round(2) aggfunc = np.sum fill_value = 0 if style == 'heatmap' else "" elif aggfunc == 'sum': aggfunc = np.sum fill_value = 0 if style == 'heatmap' else "" df = df.pivot_table(values=values, index=index, columns=columns, aggfunc=aggfunc, fill_value=fill_value) return df
python
def pivot_table(self, index, columns, values='value', aggfunc='count', fill_value=None, style=None): """Returns a pivot table Parameters ---------- index: str or list of strings rows for Pivot table columns: str or list of strings columns for Pivot table values: str, default 'value' dataframe column to aggregate or count aggfunc: str or function, default 'count' function used for aggregation, accepts 'count', 'mean', and 'sum' fill_value: scalar, default None value to replace missing values with style: str, default None output style for pivot table formatting accepts 'highlight_not_max', 'heatmap' """ index = [index] if isstr(index) else index columns = [columns] if isstr(columns) else columns df = self.data # allow 'aggfunc' to be passed as string for easier user interface if isstr(aggfunc): if aggfunc == 'count': df = self.data.groupby(index + columns, as_index=False).count() fill_value = 0 elif aggfunc == 'mean': df = self.data.groupby(index + columns, as_index=False).mean()\ .round(2) aggfunc = np.sum fill_value = 0 if style == 'heatmap' else "" elif aggfunc == 'sum': aggfunc = np.sum fill_value = 0 if style == 'heatmap' else "" df = df.pivot_table(values=values, index=index, columns=columns, aggfunc=aggfunc, fill_value=fill_value) return df
[ "def", "pivot_table", "(", "self", ",", "index", ",", "columns", ",", "values", "=", "'value'", ",", "aggfunc", "=", "'count'", ",", "fill_value", "=", "None", ",", "style", "=", "None", ")", ":", "index", "=", "[", "index", "]", "if", "isstr", "(", "index", ")", "else", "index", "columns", "=", "[", "columns", "]", "if", "isstr", "(", "columns", ")", "else", "columns", "df", "=", "self", ".", "data", "# allow 'aggfunc' to be passed as string for easier user interface", "if", "isstr", "(", "aggfunc", ")", ":", "if", "aggfunc", "==", "'count'", ":", "df", "=", "self", ".", "data", ".", "groupby", "(", "index", "+", "columns", ",", "as_index", "=", "False", ")", ".", "count", "(", ")", "fill_value", "=", "0", "elif", "aggfunc", "==", "'mean'", ":", "df", "=", "self", ".", "data", ".", "groupby", "(", "index", "+", "columns", ",", "as_index", "=", "False", ")", ".", "mean", "(", ")", ".", "round", "(", "2", ")", "aggfunc", "=", "np", ".", "sum", "fill_value", "=", "0", "if", "style", "==", "'heatmap'", "else", "\"\"", "elif", "aggfunc", "==", "'sum'", ":", "aggfunc", "=", "np", ".", "sum", "fill_value", "=", "0", "if", "style", "==", "'heatmap'", "else", "\"\"", "df", "=", "df", ".", "pivot_table", "(", "values", "=", "values", ",", "index", "=", "index", ",", "columns", "=", "columns", ",", "aggfunc", "=", "aggfunc", ",", "fill_value", "=", "fill_value", ")", "return", "df" ]
Returns a pivot table Parameters ---------- index: str or list of strings rows for Pivot table columns: str or list of strings columns for Pivot table values: str, default 'value' dataframe column to aggregate or count aggfunc: str or function, default 'count' function used for aggregation, accepts 'count', 'mean', and 'sum' fill_value: scalar, default None value to replace missing values with style: str, default None output style for pivot table formatting accepts 'highlight_not_max', 'heatmap'
[ "Returns", "a", "pivot", "table" ]
4077929ca6e7be63a0e3ecf882c5f1da97b287bf
https://github.com/IAMconsortium/pyam/blob/4077929ca6e7be63a0e3ecf882c5f1da97b287bf/pyam/core.py#L248-L290
train
IAMconsortium/pyam
pyam/core.py
IamDataFrame.as_pandas
def as_pandas(self, with_metadata=False): """Return this as a pd.DataFrame Parameters ---------- with_metadata : bool, default False or dict if True, join data with all meta columns; if a dict, discover meaningful meta columns from values (in key-value) """ if with_metadata: cols = self._discover_meta_cols(**with_metadata) \ if isinstance(with_metadata, dict) else self.meta.columns return ( self.data .set_index(META_IDX) .join(self.meta[cols]) .reset_index() ) else: return self.data.copy()
python
def as_pandas(self, with_metadata=False): """Return this as a pd.DataFrame Parameters ---------- with_metadata : bool, default False or dict if True, join data with all meta columns; if a dict, discover meaningful meta columns from values (in key-value) """ if with_metadata: cols = self._discover_meta_cols(**with_metadata) \ if isinstance(with_metadata, dict) else self.meta.columns return ( self.data .set_index(META_IDX) .join(self.meta[cols]) .reset_index() ) else: return self.data.copy()
[ "def", "as_pandas", "(", "self", ",", "with_metadata", "=", "False", ")", ":", "if", "with_metadata", ":", "cols", "=", "self", ".", "_discover_meta_cols", "(", "*", "*", "with_metadata", ")", "if", "isinstance", "(", "with_metadata", ",", "dict", ")", "else", "self", ".", "meta", ".", "columns", "return", "(", "self", ".", "data", ".", "set_index", "(", "META_IDX", ")", ".", "join", "(", "self", ".", "meta", "[", "cols", "]", ")", ".", "reset_index", "(", ")", ")", "else", ":", "return", "self", ".", "data", ".", "copy", "(", ")" ]
Return this as a pd.DataFrame Parameters ---------- with_metadata : bool, default False or dict if True, join data with all meta columns; if a dict, discover meaningful meta columns from values (in key-value)
[ "Return", "this", "as", "a", "pd", ".", "DataFrame" ]
4077929ca6e7be63a0e3ecf882c5f1da97b287bf
https://github.com/IAMconsortium/pyam/blob/4077929ca6e7be63a0e3ecf882c5f1da97b287bf/pyam/core.py#L312-L331
train
IAMconsortium/pyam
pyam/core.py
IamDataFrame._new_meta_column
def _new_meta_column(self, name): """Add a column to meta if it doesn't exist, set to value `np.nan`""" if name is None: raise ValueError('cannot add a meta column `{}`'.format(name)) if name not in self.meta: self.meta[name] = np.nan
python
def _new_meta_column(self, name): """Add a column to meta if it doesn't exist, set to value `np.nan`""" if name is None: raise ValueError('cannot add a meta column `{}`'.format(name)) if name not in self.meta: self.meta[name] = np.nan
[ "def", "_new_meta_column", "(", "self", ",", "name", ")", ":", "if", "name", "is", "None", ":", "raise", "ValueError", "(", "'cannot add a meta column `{}`'", ".", "format", "(", "name", ")", ")", "if", "name", "not", "in", "self", ".", "meta", ":", "self", ".", "meta", "[", "name", "]", "=", "np", ".", "nan" ]
Add a column to meta if it doesn't exist, set to value `np.nan`
[ "Add", "a", "column", "to", "meta", "if", "it", "doesn", "t", "exist", "set", "to", "value", "np", ".", "nan" ]
4077929ca6e7be63a0e3ecf882c5f1da97b287bf
https://github.com/IAMconsortium/pyam/blob/4077929ca6e7be63a0e3ecf882c5f1da97b287bf/pyam/core.py#L474-L479
train
IAMconsortium/pyam
pyam/core.py
IamDataFrame.convert_unit
def convert_unit(self, conversion_mapping, inplace=False): """Converts units based on provided unit conversion factors Parameters ---------- conversion_mapping: dict for each unit for which a conversion should be carried out, provide current unit and target unit and conversion factor {<current unit>: [<target unit>, <conversion factor>]} inplace: bool, default False if True, do operation inplace and return None """ ret = copy.deepcopy(self) if not inplace else self for current_unit, (new_unit, factor) in conversion_mapping.items(): factor = pd.to_numeric(factor) where = ret.data['unit'] == current_unit ret.data.loc[where, 'value'] *= factor ret.data.loc[where, 'unit'] = new_unit if not inplace: return ret
python
def convert_unit(self, conversion_mapping, inplace=False): """Converts units based on provided unit conversion factors Parameters ---------- conversion_mapping: dict for each unit for which a conversion should be carried out, provide current unit and target unit and conversion factor {<current unit>: [<target unit>, <conversion factor>]} inplace: bool, default False if True, do operation inplace and return None """ ret = copy.deepcopy(self) if not inplace else self for current_unit, (new_unit, factor) in conversion_mapping.items(): factor = pd.to_numeric(factor) where = ret.data['unit'] == current_unit ret.data.loc[where, 'value'] *= factor ret.data.loc[where, 'unit'] = new_unit if not inplace: return ret
[ "def", "convert_unit", "(", "self", ",", "conversion_mapping", ",", "inplace", "=", "False", ")", ":", "ret", "=", "copy", ".", "deepcopy", "(", "self", ")", "if", "not", "inplace", "else", "self", "for", "current_unit", ",", "(", "new_unit", ",", "factor", ")", "in", "conversion_mapping", ".", "items", "(", ")", ":", "factor", "=", "pd", ".", "to_numeric", "(", "factor", ")", "where", "=", "ret", ".", "data", "[", "'unit'", "]", "==", "current_unit", "ret", ".", "data", ".", "loc", "[", "where", ",", "'value'", "]", "*=", "factor", "ret", ".", "data", ".", "loc", "[", "where", ",", "'unit'", "]", "=", "new_unit", "if", "not", "inplace", ":", "return", "ret" ]
Converts units based on provided unit conversion factors Parameters ---------- conversion_mapping: dict for each unit for which a conversion should be carried out, provide current unit and target unit and conversion factor {<current unit>: [<target unit>, <conversion factor>]} inplace: bool, default False if True, do operation inplace and return None
[ "Converts", "units", "based", "on", "provided", "unit", "conversion", "factors" ]
4077929ca6e7be63a0e3ecf882c5f1da97b287bf
https://github.com/IAMconsortium/pyam/blob/4077929ca6e7be63a0e3ecf882c5f1da97b287bf/pyam/core.py#L635-L654
train
IAMconsortium/pyam
pyam/core.py
IamDataFrame.normalize
def normalize(self, inplace=False, **kwargs): """Normalize data to a given value. Currently only supports normalizing to a specific time. Parameters ---------- inplace: bool, default False if True, do operation inplace and return None kwargs: the values on which to normalize (e.g., `year=2005`) """ if len(kwargs) > 1 or self.time_col not in kwargs: raise ValueError('Only time(year)-based normalization supported') ret = copy.deepcopy(self) if not inplace else self df = ret.data # change all below if supporting more in the future cols = self.time_col value = kwargs[self.time_col] x = df.set_index(IAMC_IDX) x['value'] /= x[x[cols] == value]['value'] ret.data = x.reset_index() if not inplace: return ret
python
def normalize(self, inplace=False, **kwargs): """Normalize data to a given value. Currently only supports normalizing to a specific time. Parameters ---------- inplace: bool, default False if True, do operation inplace and return None kwargs: the values on which to normalize (e.g., `year=2005`) """ if len(kwargs) > 1 or self.time_col not in kwargs: raise ValueError('Only time(year)-based normalization supported') ret = copy.deepcopy(self) if not inplace else self df = ret.data # change all below if supporting more in the future cols = self.time_col value = kwargs[self.time_col] x = df.set_index(IAMC_IDX) x['value'] /= x[x[cols] == value]['value'] ret.data = x.reset_index() if not inplace: return ret
[ "def", "normalize", "(", "self", ",", "inplace", "=", "False", ",", "*", "*", "kwargs", ")", ":", "if", "len", "(", "kwargs", ")", ">", "1", "or", "self", ".", "time_col", "not", "in", "kwargs", ":", "raise", "ValueError", "(", "'Only time(year)-based normalization supported'", ")", "ret", "=", "copy", ".", "deepcopy", "(", "self", ")", "if", "not", "inplace", "else", "self", "df", "=", "ret", ".", "data", "# change all below if supporting more in the future", "cols", "=", "self", ".", "time_col", "value", "=", "kwargs", "[", "self", ".", "time_col", "]", "x", "=", "df", ".", "set_index", "(", "IAMC_IDX", ")", "x", "[", "'value'", "]", "/=", "x", "[", "x", "[", "cols", "]", "==", "value", "]", "[", "'value'", "]", "ret", ".", "data", "=", "x", ".", "reset_index", "(", ")", "if", "not", "inplace", ":", "return", "ret" ]
Normalize data to a given value. Currently only supports normalizing to a specific time. Parameters ---------- inplace: bool, default False if True, do operation inplace and return None kwargs: the values on which to normalize (e.g., `year=2005`)
[ "Normalize", "data", "to", "a", "given", "value", ".", "Currently", "only", "supports", "normalizing", "to", "a", "specific", "time", "." ]
4077929ca6e7be63a0e3ecf882c5f1da97b287bf
https://github.com/IAMconsortium/pyam/blob/4077929ca6e7be63a0e3ecf882c5f1da97b287bf/pyam/core.py#L656-L677
train
IAMconsortium/pyam
pyam/core.py
IamDataFrame.aggregate
def aggregate(self, variable, components=None, append=False): """Compute the aggregate of timeseries components or sub-categories Parameters ---------- variable: str variable for which the aggregate should be computed components: list of str, default None list of variables, defaults to all sub-categories of `variable` append: bool, default False append the aggregate timeseries to `data` and return None, else return aggregate timeseries """ # default components to all variables one level below `variable` components = components or self._variable_components(variable) if not len(components): msg = 'cannot aggregate variable `{}` because it has no components' logger().info(msg.format(variable)) return rows = self._apply_filters(variable=components) _data = _aggregate(self.data[rows], 'variable') if append is True: self.append(_data, variable=variable, inplace=True) else: return _data
python
def aggregate(self, variable, components=None, append=False): """Compute the aggregate of timeseries components or sub-categories Parameters ---------- variable: str variable for which the aggregate should be computed components: list of str, default None list of variables, defaults to all sub-categories of `variable` append: bool, default False append the aggregate timeseries to `data` and return None, else return aggregate timeseries """ # default components to all variables one level below `variable` components = components or self._variable_components(variable) if not len(components): msg = 'cannot aggregate variable `{}` because it has no components' logger().info(msg.format(variable)) return rows = self._apply_filters(variable=components) _data = _aggregate(self.data[rows], 'variable') if append is True: self.append(_data, variable=variable, inplace=True) else: return _data
[ "def", "aggregate", "(", "self", ",", "variable", ",", "components", "=", "None", ",", "append", "=", "False", ")", ":", "# default components to all variables one level below `variable`", "components", "=", "components", "or", "self", ".", "_variable_components", "(", "variable", ")", "if", "not", "len", "(", "components", ")", ":", "msg", "=", "'cannot aggregate variable `{}` because it has no components'", "logger", "(", ")", ".", "info", "(", "msg", ".", "format", "(", "variable", ")", ")", "return", "rows", "=", "self", ".", "_apply_filters", "(", "variable", "=", "components", ")", "_data", "=", "_aggregate", "(", "self", ".", "data", "[", "rows", "]", ",", "'variable'", ")", "if", "append", "is", "True", ":", "self", ".", "append", "(", "_data", ",", "variable", "=", "variable", ",", "inplace", "=", "True", ")", "else", ":", "return", "_data" ]
Compute the aggregate of timeseries components or sub-categories Parameters ---------- variable: str variable for which the aggregate should be computed components: list of str, default None list of variables, defaults to all sub-categories of `variable` append: bool, default False append the aggregate timeseries to `data` and return None, else return aggregate timeseries
[ "Compute", "the", "aggregate", "of", "timeseries", "components", "or", "sub", "-", "categories" ]
4077929ca6e7be63a0e3ecf882c5f1da97b287bf
https://github.com/IAMconsortium/pyam/blob/4077929ca6e7be63a0e3ecf882c5f1da97b287bf/pyam/core.py#L679-L707
train
IAMconsortium/pyam
pyam/core.py
IamDataFrame.check_aggregate
def check_aggregate(self, variable, components=None, exclude_on_fail=False, multiplier=1, **kwargs): """Check whether a timeseries matches the aggregation of its components Parameters ---------- variable: str variable to be checked for matching aggregation of sub-categories components: list of str, default None list of variables, defaults to all sub-categories of `variable` exclude_on_fail: boolean, default False flag scenarios failing validation as `exclude: True` multiplier: number, default 1 factor when comparing variable and sum of components kwargs: passed to `np.isclose()` """ # compute aggregate from components, return None if no components df_components = self.aggregate(variable, components) if df_components is None: return # filter and groupby data, use `pd.Series.align` for matching index rows = self._apply_filters(variable=variable) df_variable, df_components = ( _aggregate(self.data[rows], 'variable').align(df_components) ) # use `np.isclose` for checking match diff = df_variable[~np.isclose(df_variable, multiplier * df_components, **kwargs)] if len(diff): msg = '`{}` - {} of {} rows are not aggregates of components' logger().info(msg.format(variable, len(diff), len(df_variable))) if exclude_on_fail: self._exclude_on_fail(diff.index.droplevel([2, 3, 4])) return IamDataFrame(diff, variable=variable).timeseries()
python
def check_aggregate(self, variable, components=None, exclude_on_fail=False, multiplier=1, **kwargs): """Check whether a timeseries matches the aggregation of its components Parameters ---------- variable: str variable to be checked for matching aggregation of sub-categories components: list of str, default None list of variables, defaults to all sub-categories of `variable` exclude_on_fail: boolean, default False flag scenarios failing validation as `exclude: True` multiplier: number, default 1 factor when comparing variable and sum of components kwargs: passed to `np.isclose()` """ # compute aggregate from components, return None if no components df_components = self.aggregate(variable, components) if df_components is None: return # filter and groupby data, use `pd.Series.align` for matching index rows = self._apply_filters(variable=variable) df_variable, df_components = ( _aggregate(self.data[rows], 'variable').align(df_components) ) # use `np.isclose` for checking match diff = df_variable[~np.isclose(df_variable, multiplier * df_components, **kwargs)] if len(diff): msg = '`{}` - {} of {} rows are not aggregates of components' logger().info(msg.format(variable, len(diff), len(df_variable))) if exclude_on_fail: self._exclude_on_fail(diff.index.droplevel([2, 3, 4])) return IamDataFrame(diff, variable=variable).timeseries()
[ "def", "check_aggregate", "(", "self", ",", "variable", ",", "components", "=", "None", ",", "exclude_on_fail", "=", "False", ",", "multiplier", "=", "1", ",", "*", "*", "kwargs", ")", ":", "# compute aggregate from components, return None if no components", "df_components", "=", "self", ".", "aggregate", "(", "variable", ",", "components", ")", "if", "df_components", "is", "None", ":", "return", "# filter and groupby data, use `pd.Series.align` for matching index", "rows", "=", "self", ".", "_apply_filters", "(", "variable", "=", "variable", ")", "df_variable", ",", "df_components", "=", "(", "_aggregate", "(", "self", ".", "data", "[", "rows", "]", ",", "'variable'", ")", ".", "align", "(", "df_components", ")", ")", "# use `np.isclose` for checking match", "diff", "=", "df_variable", "[", "~", "np", ".", "isclose", "(", "df_variable", ",", "multiplier", "*", "df_components", ",", "*", "*", "kwargs", ")", "]", "if", "len", "(", "diff", ")", ":", "msg", "=", "'`{}` - {} of {} rows are not aggregates of components'", "logger", "(", ")", ".", "info", "(", "msg", ".", "format", "(", "variable", ",", "len", "(", "diff", ")", ",", "len", "(", "df_variable", ")", ")", ")", "if", "exclude_on_fail", ":", "self", ".", "_exclude_on_fail", "(", "diff", ".", "index", ".", "droplevel", "(", "[", "2", ",", "3", ",", "4", "]", ")", ")", "return", "IamDataFrame", "(", "diff", ",", "variable", "=", "variable", ")", ".", "timeseries", "(", ")" ]
Check whether a timeseries matches the aggregation of its components Parameters ---------- variable: str variable to be checked for matching aggregation of sub-categories components: list of str, default None list of variables, defaults to all sub-categories of `variable` exclude_on_fail: boolean, default False flag scenarios failing validation as `exclude: True` multiplier: number, default 1 factor when comparing variable and sum of components kwargs: passed to `np.isclose()`
[ "Check", "whether", "a", "timeseries", "matches", "the", "aggregation", "of", "its", "components" ]
4077929ca6e7be63a0e3ecf882c5f1da97b287bf
https://github.com/IAMconsortium/pyam/blob/4077929ca6e7be63a0e3ecf882c5f1da97b287bf/pyam/core.py#L709-L747
train
IAMconsortium/pyam
pyam/core.py
IamDataFrame.aggregate_region
def aggregate_region(self, variable, region='World', subregions=None, components=None, append=False): """Compute the aggregate of timeseries over a number of regions including variable components only defined at the `region` level Parameters ---------- variable: str variable for which the aggregate should be computed region: str, default 'World' dimension subregions: list of str list of subregions, defaults to all regions other than `region` components: list of str list of variables, defaults to all sub-categories of `variable` included in `region` but not in any of `subregions` append: bool, default False append the aggregate timeseries to `data` and return None, else return aggregate timeseries """ # default subregions to all regions other than `region` if subregions is None: rows = self._apply_filters(variable=variable) subregions = set(self.data[rows].region) - set([region]) if not len(subregions): msg = 'cannot aggregate variable `{}` to `{}` because it does not'\ ' exist in any subregion' logger().info(msg.format(variable, region)) return # compute aggregate over all subregions subregion_df = self.filter(region=subregions) cols = ['region', 'variable'] _data = _aggregate(subregion_df.filter(variable=variable).data, cols) # add components at the `region` level, defaults to all variables one # level below `variable` that are only present in `region` region_df = self.filter(region=region) components = components or ( set(region_df._variable_components(variable)).difference( subregion_df._variable_components(variable))) if len(components): rows = region_df._apply_filters(variable=components) _data = _data.add(_aggregate(region_df.data[rows], cols), fill_value=0) if append is True: self.append(_data, region=region, variable=variable, inplace=True) else: return _data
python
def aggregate_region(self, variable, region='World', subregions=None, components=None, append=False): """Compute the aggregate of timeseries over a number of regions including variable components only defined at the `region` level Parameters ---------- variable: str variable for which the aggregate should be computed region: str, default 'World' dimension subregions: list of str list of subregions, defaults to all regions other than `region` components: list of str list of variables, defaults to all sub-categories of `variable` included in `region` but not in any of `subregions` append: bool, default False append the aggregate timeseries to `data` and return None, else return aggregate timeseries """ # default subregions to all regions other than `region` if subregions is None: rows = self._apply_filters(variable=variable) subregions = set(self.data[rows].region) - set([region]) if not len(subregions): msg = 'cannot aggregate variable `{}` to `{}` because it does not'\ ' exist in any subregion' logger().info(msg.format(variable, region)) return # compute aggregate over all subregions subregion_df = self.filter(region=subregions) cols = ['region', 'variable'] _data = _aggregate(subregion_df.filter(variable=variable).data, cols) # add components at the `region` level, defaults to all variables one # level below `variable` that are only present in `region` region_df = self.filter(region=region) components = components or ( set(region_df._variable_components(variable)).difference( subregion_df._variable_components(variable))) if len(components): rows = region_df._apply_filters(variable=components) _data = _data.add(_aggregate(region_df.data[rows], cols), fill_value=0) if append is True: self.append(_data, region=region, variable=variable, inplace=True) else: return _data
[ "def", "aggregate_region", "(", "self", ",", "variable", ",", "region", "=", "'World'", ",", "subregions", "=", "None", ",", "components", "=", "None", ",", "append", "=", "False", ")", ":", "# default subregions to all regions other than `region`", "if", "subregions", "is", "None", ":", "rows", "=", "self", ".", "_apply_filters", "(", "variable", "=", "variable", ")", "subregions", "=", "set", "(", "self", ".", "data", "[", "rows", "]", ".", "region", ")", "-", "set", "(", "[", "region", "]", ")", "if", "not", "len", "(", "subregions", ")", ":", "msg", "=", "'cannot aggregate variable `{}` to `{}` because it does not'", "' exist in any subregion'", "logger", "(", ")", ".", "info", "(", "msg", ".", "format", "(", "variable", ",", "region", ")", ")", "return", "# compute aggregate over all subregions", "subregion_df", "=", "self", ".", "filter", "(", "region", "=", "subregions", ")", "cols", "=", "[", "'region'", ",", "'variable'", "]", "_data", "=", "_aggregate", "(", "subregion_df", ".", "filter", "(", "variable", "=", "variable", ")", ".", "data", ",", "cols", ")", "# add components at the `region` level, defaults to all variables one", "# level below `variable` that are only present in `region`", "region_df", "=", "self", ".", "filter", "(", "region", "=", "region", ")", "components", "=", "components", "or", "(", "set", "(", "region_df", ".", "_variable_components", "(", "variable", ")", ")", ".", "difference", "(", "subregion_df", ".", "_variable_components", "(", "variable", ")", ")", ")", "if", "len", "(", "components", ")", ":", "rows", "=", "region_df", ".", "_apply_filters", "(", "variable", "=", "components", ")", "_data", "=", "_data", ".", "add", "(", "_aggregate", "(", "region_df", ".", "data", "[", "rows", "]", ",", "cols", ")", ",", "fill_value", "=", "0", ")", "if", "append", "is", "True", ":", "self", ".", "append", "(", "_data", ",", "region", "=", "region", ",", "variable", "=", "variable", ",", "inplace", "=", "True", ")", "else", ":", "return", "_data" ]
Compute the aggregate of timeseries over a number of regions including variable components only defined at the `region` level Parameters ---------- variable: str variable for which the aggregate should be computed region: str, default 'World' dimension subregions: list of str list of subregions, defaults to all regions other than `region` components: list of str list of variables, defaults to all sub-categories of `variable` included in `region` but not in any of `subregions` append: bool, default False append the aggregate timeseries to `data` and return None, else return aggregate timeseries
[ "Compute", "the", "aggregate", "of", "timeseries", "over", "a", "number", "of", "regions", "including", "variable", "components", "only", "defined", "at", "the", "region", "level" ]
4077929ca6e7be63a0e3ecf882c5f1da97b287bf
https://github.com/IAMconsortium/pyam/blob/4077929ca6e7be63a0e3ecf882c5f1da97b287bf/pyam/core.py#L749-L801
train
IAMconsortium/pyam
pyam/core.py
IamDataFrame.check_aggregate_region
def check_aggregate_region(self, variable, region='World', subregions=None, components=None, exclude_on_fail=False, **kwargs): """Check whether the region timeseries data match the aggregation of components Parameters ---------- variable: str variable to be checked for matching aggregation of subregions region: str, default 'World' region to be checked for matching aggregation of subregions subregions: list of str list of subregions, defaults to all regions other than `region` components: list of str, default None list of variables, defaults to all sub-categories of `variable` included in `region` but not in any of `subregions` exclude_on_fail: boolean, default False flag scenarios failing validation as `exclude: True` kwargs: passed to `np.isclose()` """ # compute aggregate from subregions, return None if no subregions df_subregions = self.aggregate_region(variable, region, subregions, components) if df_subregions is None: return # filter and groupby data, use `pd.Series.align` for matching index rows = self._apply_filters(region=region, variable=variable) df_region, df_subregions = ( _aggregate(self.data[rows], ['region', 'variable']) .align(df_subregions) ) # use `np.isclose` for checking match diff = df_region[~np.isclose(df_region, df_subregions, **kwargs)] if len(diff): msg = ( '`{}` - {} of {} rows are not aggregates of subregions' ) logger().info(msg.format(variable, len(diff), len(df_region))) if exclude_on_fail: self._exclude_on_fail(diff.index.droplevel([2, 3])) col_args = dict(region=region, variable=variable) return IamDataFrame(diff, **col_args).timeseries()
python
def check_aggregate_region(self, variable, region='World', subregions=None, components=None, exclude_on_fail=False, **kwargs): """Check whether the region timeseries data match the aggregation of components Parameters ---------- variable: str variable to be checked for matching aggregation of subregions region: str, default 'World' region to be checked for matching aggregation of subregions subregions: list of str list of subregions, defaults to all regions other than `region` components: list of str, default None list of variables, defaults to all sub-categories of `variable` included in `region` but not in any of `subregions` exclude_on_fail: boolean, default False flag scenarios failing validation as `exclude: True` kwargs: passed to `np.isclose()` """ # compute aggregate from subregions, return None if no subregions df_subregions = self.aggregate_region(variable, region, subregions, components) if df_subregions is None: return # filter and groupby data, use `pd.Series.align` for matching index rows = self._apply_filters(region=region, variable=variable) df_region, df_subregions = ( _aggregate(self.data[rows], ['region', 'variable']) .align(df_subregions) ) # use `np.isclose` for checking match diff = df_region[~np.isclose(df_region, df_subregions, **kwargs)] if len(diff): msg = ( '`{}` - {} of {} rows are not aggregates of subregions' ) logger().info(msg.format(variable, len(diff), len(df_region))) if exclude_on_fail: self._exclude_on_fail(diff.index.droplevel([2, 3])) col_args = dict(region=region, variable=variable) return IamDataFrame(diff, **col_args).timeseries()
[ "def", "check_aggregate_region", "(", "self", ",", "variable", ",", "region", "=", "'World'", ",", "subregions", "=", "None", ",", "components", "=", "None", ",", "exclude_on_fail", "=", "False", ",", "*", "*", "kwargs", ")", ":", "# compute aggregate from subregions, return None if no subregions", "df_subregions", "=", "self", ".", "aggregate_region", "(", "variable", ",", "region", ",", "subregions", ",", "components", ")", "if", "df_subregions", "is", "None", ":", "return", "# filter and groupby data, use `pd.Series.align` for matching index", "rows", "=", "self", ".", "_apply_filters", "(", "region", "=", "region", ",", "variable", "=", "variable", ")", "df_region", ",", "df_subregions", "=", "(", "_aggregate", "(", "self", ".", "data", "[", "rows", "]", ",", "[", "'region'", ",", "'variable'", "]", ")", ".", "align", "(", "df_subregions", ")", ")", "# use `np.isclose` for checking match", "diff", "=", "df_region", "[", "~", "np", ".", "isclose", "(", "df_region", ",", "df_subregions", ",", "*", "*", "kwargs", ")", "]", "if", "len", "(", "diff", ")", ":", "msg", "=", "(", "'`{}` - {} of {} rows are not aggregates of subregions'", ")", "logger", "(", ")", ".", "info", "(", "msg", ".", "format", "(", "variable", ",", "len", "(", "diff", ")", ",", "len", "(", "df_region", ")", ")", ")", "if", "exclude_on_fail", ":", "self", ".", "_exclude_on_fail", "(", "diff", ".", "index", ".", "droplevel", "(", "[", "2", ",", "3", "]", ")", ")", "col_args", "=", "dict", "(", "region", "=", "region", ",", "variable", "=", "variable", ")", "return", "IamDataFrame", "(", "diff", ",", "*", "*", "col_args", ")", ".", "timeseries", "(", ")" ]
Check whether the region timeseries data match the aggregation of components Parameters ---------- variable: str variable to be checked for matching aggregation of subregions region: str, default 'World' region to be checked for matching aggregation of subregions subregions: list of str list of subregions, defaults to all regions other than `region` components: list of str, default None list of variables, defaults to all sub-categories of `variable` included in `region` but not in any of `subregions` exclude_on_fail: boolean, default False flag scenarios failing validation as `exclude: True` kwargs: passed to `np.isclose()`
[ "Check", "whether", "the", "region", "timeseries", "data", "match", "the", "aggregation", "of", "components" ]
4077929ca6e7be63a0e3ecf882c5f1da97b287bf
https://github.com/IAMconsortium/pyam/blob/4077929ca6e7be63a0e3ecf882c5f1da97b287bf/pyam/core.py#L803-L850
train
IAMconsortium/pyam
pyam/core.py
IamDataFrame.check_internal_consistency
def check_internal_consistency(self, **kwargs): """Check whether the database is internally consistent We check that all variables are equal to the sum of their sectoral components and that all the regions add up to the World total. If the check is passed, None is returned, otherwise a dictionary of inconsistent variables is returned. Note: at the moment, this method's regional checking is limited to checking that all the regions sum to the World region. We cannot make this more automatic unless we start to store how the regions relate, see [this issue](https://github.com/IAMconsortium/pyam/issues/106). Parameters ---------- kwargs: passed to `np.isclose()` """ inconsistent_vars = {} for variable in self.variables(): diff_agg = self.check_aggregate(variable, **kwargs) if diff_agg is not None: inconsistent_vars[variable + "-aggregate"] = diff_agg diff_regional = self.check_aggregate_region(variable, **kwargs) if diff_regional is not None: inconsistent_vars[variable + "-regional"] = diff_regional return inconsistent_vars if inconsistent_vars else None
python
def check_internal_consistency(self, **kwargs): """Check whether the database is internally consistent We check that all variables are equal to the sum of their sectoral components and that all the regions add up to the World total. If the check is passed, None is returned, otherwise a dictionary of inconsistent variables is returned. Note: at the moment, this method's regional checking is limited to checking that all the regions sum to the World region. We cannot make this more automatic unless we start to store how the regions relate, see [this issue](https://github.com/IAMconsortium/pyam/issues/106). Parameters ---------- kwargs: passed to `np.isclose()` """ inconsistent_vars = {} for variable in self.variables(): diff_agg = self.check_aggregate(variable, **kwargs) if diff_agg is not None: inconsistent_vars[variable + "-aggregate"] = diff_agg diff_regional = self.check_aggregate_region(variable, **kwargs) if diff_regional is not None: inconsistent_vars[variable + "-regional"] = diff_regional return inconsistent_vars if inconsistent_vars else None
[ "def", "check_internal_consistency", "(", "self", ",", "*", "*", "kwargs", ")", ":", "inconsistent_vars", "=", "{", "}", "for", "variable", "in", "self", ".", "variables", "(", ")", ":", "diff_agg", "=", "self", ".", "check_aggregate", "(", "variable", ",", "*", "*", "kwargs", ")", "if", "diff_agg", "is", "not", "None", ":", "inconsistent_vars", "[", "variable", "+", "\"-aggregate\"", "]", "=", "diff_agg", "diff_regional", "=", "self", ".", "check_aggregate_region", "(", "variable", ",", "*", "*", "kwargs", ")", "if", "diff_regional", "is", "not", "None", ":", "inconsistent_vars", "[", "variable", "+", "\"-regional\"", "]", "=", "diff_regional", "return", "inconsistent_vars", "if", "inconsistent_vars", "else", "None" ]
Check whether the database is internally consistent We check that all variables are equal to the sum of their sectoral components and that all the regions add up to the World total. If the check is passed, None is returned, otherwise a dictionary of inconsistent variables is returned. Note: at the moment, this method's regional checking is limited to checking that all the regions sum to the World region. We cannot make this more automatic unless we start to store how the regions relate, see [this issue](https://github.com/IAMconsortium/pyam/issues/106). Parameters ---------- kwargs: passed to `np.isclose()`
[ "Check", "whether", "the", "database", "is", "internally", "consistent" ]
4077929ca6e7be63a0e3ecf882c5f1da97b287bf
https://github.com/IAMconsortium/pyam/blob/4077929ca6e7be63a0e3ecf882c5f1da97b287bf/pyam/core.py#L860-L888
train
IAMconsortium/pyam
pyam/core.py
IamDataFrame._apply_filters
def _apply_filters(self, **filters): """Determine rows to keep in data for given set of filters Parameters ---------- filters: dict dictionary of filters ({col: values}}); uses a pseudo-regexp syntax by default, but accepts `regexp: True` to use regexp directly """ regexp = filters.pop('regexp', False) keep = np.array([True] * len(self.data)) # filter by columns and list of values for col, values in filters.items(): # treat `_apply_filters(col=None)` as no filter applied if values is None: continue if col in self.meta.columns: matches = pattern_match(self.meta[col], values, regexp=regexp) cat_idx = self.meta[matches].index keep_col = (self.data[META_IDX].set_index(META_IDX) .index.isin(cat_idx)) elif col == 'variable': level = filters['level'] if 'level' in filters else None keep_col = pattern_match(self.data[col], values, level, regexp) elif col == 'year': _data = self.data[col] if self.time_col is not 'time' \ else self.data['time'].apply(lambda x: x.year) keep_col = years_match(_data, values) elif col == 'month' and self.time_col is 'time': keep_col = month_match(self.data['time'] .apply(lambda x: x.month), values) elif col == 'day' and self.time_col is 'time': if isinstance(values, str): wday = True elif isinstance(values, list) and isinstance(values[0], str): wday = True else: wday = False if wday: days = self.data['time'].apply(lambda x: x.weekday()) else: # ints or list of ints days = self.data['time'].apply(lambda x: x.day) keep_col = day_match(days, values) elif col == 'hour' and self.time_col is 'time': keep_col = hour_match(self.data['time'] .apply(lambda x: x.hour), values) elif col == 'time' and self.time_col is 'time': keep_col = datetime_match(self.data[col], values) elif col == 'level': if 'variable' not in filters.keys(): keep_col = find_depth(self.data['variable'], level=values) else: continue elif col in self.data.columns: keep_col = pattern_match(self.data[col], values, regexp=regexp) else: _raise_filter_error(col) keep &= keep_col return keep
python
def _apply_filters(self, **filters): """Determine rows to keep in data for given set of filters Parameters ---------- filters: dict dictionary of filters ({col: values}}); uses a pseudo-regexp syntax by default, but accepts `regexp: True` to use regexp directly """ regexp = filters.pop('regexp', False) keep = np.array([True] * len(self.data)) # filter by columns and list of values for col, values in filters.items(): # treat `_apply_filters(col=None)` as no filter applied if values is None: continue if col in self.meta.columns: matches = pattern_match(self.meta[col], values, regexp=regexp) cat_idx = self.meta[matches].index keep_col = (self.data[META_IDX].set_index(META_IDX) .index.isin(cat_idx)) elif col == 'variable': level = filters['level'] if 'level' in filters else None keep_col = pattern_match(self.data[col], values, level, regexp) elif col == 'year': _data = self.data[col] if self.time_col is not 'time' \ else self.data['time'].apply(lambda x: x.year) keep_col = years_match(_data, values) elif col == 'month' and self.time_col is 'time': keep_col = month_match(self.data['time'] .apply(lambda x: x.month), values) elif col == 'day' and self.time_col is 'time': if isinstance(values, str): wday = True elif isinstance(values, list) and isinstance(values[0], str): wday = True else: wday = False if wday: days = self.data['time'].apply(lambda x: x.weekday()) else: # ints or list of ints days = self.data['time'].apply(lambda x: x.day) keep_col = day_match(days, values) elif col == 'hour' and self.time_col is 'time': keep_col = hour_match(self.data['time'] .apply(lambda x: x.hour), values) elif col == 'time' and self.time_col is 'time': keep_col = datetime_match(self.data[col], values) elif col == 'level': if 'variable' not in filters.keys(): keep_col = find_depth(self.data['variable'], level=values) else: continue elif col in self.data.columns: keep_col = pattern_match(self.data[col], values, regexp=regexp) else: _raise_filter_error(col) keep &= keep_col return keep
[ "def", "_apply_filters", "(", "self", ",", "*", "*", "filters", ")", ":", "regexp", "=", "filters", ".", "pop", "(", "'regexp'", ",", "False", ")", "keep", "=", "np", ".", "array", "(", "[", "True", "]", "*", "len", "(", "self", ".", "data", ")", ")", "# filter by columns and list of values", "for", "col", ",", "values", "in", "filters", ".", "items", "(", ")", ":", "# treat `_apply_filters(col=None)` as no filter applied", "if", "values", "is", "None", ":", "continue", "if", "col", "in", "self", ".", "meta", ".", "columns", ":", "matches", "=", "pattern_match", "(", "self", ".", "meta", "[", "col", "]", ",", "values", ",", "regexp", "=", "regexp", ")", "cat_idx", "=", "self", ".", "meta", "[", "matches", "]", ".", "index", "keep_col", "=", "(", "self", ".", "data", "[", "META_IDX", "]", ".", "set_index", "(", "META_IDX", ")", ".", "index", ".", "isin", "(", "cat_idx", ")", ")", "elif", "col", "==", "'variable'", ":", "level", "=", "filters", "[", "'level'", "]", "if", "'level'", "in", "filters", "else", "None", "keep_col", "=", "pattern_match", "(", "self", ".", "data", "[", "col", "]", ",", "values", ",", "level", ",", "regexp", ")", "elif", "col", "==", "'year'", ":", "_data", "=", "self", ".", "data", "[", "col", "]", "if", "self", ".", "time_col", "is", "not", "'time'", "else", "self", ".", "data", "[", "'time'", "]", ".", "apply", "(", "lambda", "x", ":", "x", ".", "year", ")", "keep_col", "=", "years_match", "(", "_data", ",", "values", ")", "elif", "col", "==", "'month'", "and", "self", ".", "time_col", "is", "'time'", ":", "keep_col", "=", "month_match", "(", "self", ".", "data", "[", "'time'", "]", ".", "apply", "(", "lambda", "x", ":", "x", ".", "month", ")", ",", "values", ")", "elif", "col", "==", "'day'", "and", "self", ".", "time_col", "is", "'time'", ":", "if", "isinstance", "(", "values", ",", "str", ")", ":", "wday", "=", "True", "elif", "isinstance", "(", "values", ",", "list", ")", "and", "isinstance", "(", "values", "[", "0", "]", ",", "str", ")", ":", "wday", "=", "True", "else", ":", "wday", "=", "False", "if", "wday", ":", "days", "=", "self", ".", "data", "[", "'time'", "]", ".", "apply", "(", "lambda", "x", ":", "x", ".", "weekday", "(", ")", ")", "else", ":", "# ints or list of ints", "days", "=", "self", ".", "data", "[", "'time'", "]", ".", "apply", "(", "lambda", "x", ":", "x", ".", "day", ")", "keep_col", "=", "day_match", "(", "days", ",", "values", ")", "elif", "col", "==", "'hour'", "and", "self", ".", "time_col", "is", "'time'", ":", "keep_col", "=", "hour_match", "(", "self", ".", "data", "[", "'time'", "]", ".", "apply", "(", "lambda", "x", ":", "x", ".", "hour", ")", ",", "values", ")", "elif", "col", "==", "'time'", "and", "self", ".", "time_col", "is", "'time'", ":", "keep_col", "=", "datetime_match", "(", "self", ".", "data", "[", "col", "]", ",", "values", ")", "elif", "col", "==", "'level'", ":", "if", "'variable'", "not", "in", "filters", ".", "keys", "(", ")", ":", "keep_col", "=", "find_depth", "(", "self", ".", "data", "[", "'variable'", "]", ",", "level", "=", "values", ")", "else", ":", "continue", "elif", "col", "in", "self", ".", "data", ".", "columns", ":", "keep_col", "=", "pattern_match", "(", "self", ".", "data", "[", "col", "]", ",", "values", ",", "regexp", "=", "regexp", ")", "else", ":", "_raise_filter_error", "(", "col", ")", "keep", "&=", "keep_col", "return", "keep" ]
Determine rows to keep in data for given set of filters Parameters ---------- filters: dict dictionary of filters ({col: values}}); uses a pseudo-regexp syntax by default, but accepts `regexp: True` to use regexp directly
[ "Determine", "rows", "to", "keep", "in", "data", "for", "given", "set", "of", "filters" ]
4077929ca6e7be63a0e3ecf882c5f1da97b287bf
https://github.com/IAMconsortium/pyam/blob/4077929ca6e7be63a0e3ecf882c5f1da97b287bf/pyam/core.py#L938-L1013
train
IAMconsortium/pyam
pyam/core.py
IamDataFrame.col_apply
def col_apply(self, col, func, *args, **kwargs): """Apply a function to a column Parameters ---------- col: string column in either data or metadata func: functional function to apply """ if col in self.data: self.data[col] = self.data[col].apply(func, *args, **kwargs) else: self.meta[col] = self.meta[col].apply(func, *args, **kwargs)
python
def col_apply(self, col, func, *args, **kwargs): """Apply a function to a column Parameters ---------- col: string column in either data or metadata func: functional function to apply """ if col in self.data: self.data[col] = self.data[col].apply(func, *args, **kwargs) else: self.meta[col] = self.meta[col].apply(func, *args, **kwargs)
[ "def", "col_apply", "(", "self", ",", "col", ",", "func", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "if", "col", "in", "self", ".", "data", ":", "self", ".", "data", "[", "col", "]", "=", "self", ".", "data", "[", "col", "]", ".", "apply", "(", "func", ",", "*", "args", ",", "*", "*", "kwargs", ")", "else", ":", "self", ".", "meta", "[", "col", "]", "=", "self", ".", "meta", "[", "col", "]", ".", "apply", "(", "func", ",", "*", "args", ",", "*", "*", "kwargs", ")" ]
Apply a function to a column Parameters ---------- col: string column in either data or metadata func: functional function to apply
[ "Apply", "a", "function", "to", "a", "column" ]
4077929ca6e7be63a0e3ecf882c5f1da97b287bf
https://github.com/IAMconsortium/pyam/blob/4077929ca6e7be63a0e3ecf882c5f1da97b287bf/pyam/core.py#L1015-L1028
train
IAMconsortium/pyam
pyam/core.py
IamDataFrame._to_file_format
def _to_file_format(self, iamc_index): """Return a dataframe suitable for writing to a file""" df = self.timeseries(iamc_index=iamc_index).reset_index() df = df.rename(columns={c: str(c).title() for c in df.columns}) return df
python
def _to_file_format(self, iamc_index): """Return a dataframe suitable for writing to a file""" df = self.timeseries(iamc_index=iamc_index).reset_index() df = df.rename(columns={c: str(c).title() for c in df.columns}) return df
[ "def", "_to_file_format", "(", "self", ",", "iamc_index", ")", ":", "df", "=", "self", ".", "timeseries", "(", "iamc_index", "=", "iamc_index", ")", ".", "reset_index", "(", ")", "df", "=", "df", ".", "rename", "(", "columns", "=", "{", "c", ":", "str", "(", "c", ")", ".", "title", "(", ")", "for", "c", "in", "df", ".", "columns", "}", ")", "return", "df" ]
Return a dataframe suitable for writing to a file
[ "Return", "a", "dataframe", "suitable", "for", "writing", "to", "a", "file" ]
4077929ca6e7be63a0e3ecf882c5f1da97b287bf
https://github.com/IAMconsortium/pyam/blob/4077929ca6e7be63a0e3ecf882c5f1da97b287bf/pyam/core.py#L1030-L1034
train
IAMconsortium/pyam
pyam/core.py
IamDataFrame.to_csv
def to_csv(self, path, iamc_index=False, **kwargs): """Write timeseries data to a csv file Parameters ---------- path: string file path iamc_index: bool, default False if True, use `['model', 'scenario', 'region', 'variable', 'unit']`; else, use all `data` columns """ self._to_file_format(iamc_index).to_csv(path, index=False, **kwargs)
python
def to_csv(self, path, iamc_index=False, **kwargs): """Write timeseries data to a csv file Parameters ---------- path: string file path iamc_index: bool, default False if True, use `['model', 'scenario', 'region', 'variable', 'unit']`; else, use all `data` columns """ self._to_file_format(iamc_index).to_csv(path, index=False, **kwargs)
[ "def", "to_csv", "(", "self", ",", "path", ",", "iamc_index", "=", "False", ",", "*", "*", "kwargs", ")", ":", "self", ".", "_to_file_format", "(", "iamc_index", ")", ".", "to_csv", "(", "path", ",", "index", "=", "False", ",", "*", "*", "kwargs", ")" ]
Write timeseries data to a csv file Parameters ---------- path: string file path iamc_index: bool, default False if True, use `['model', 'scenario', 'region', 'variable', 'unit']`; else, use all `data` columns
[ "Write", "timeseries", "data", "to", "a", "csv", "file" ]
4077929ca6e7be63a0e3ecf882c5f1da97b287bf
https://github.com/IAMconsortium/pyam/blob/4077929ca6e7be63a0e3ecf882c5f1da97b287bf/pyam/core.py#L1036-L1047
train
IAMconsortium/pyam
pyam/core.py
IamDataFrame.to_excel
def to_excel(self, excel_writer, sheet_name='data', iamc_index=False, **kwargs): """Write timeseries data to Excel format Parameters ---------- excel_writer: string or ExcelWriter object file path or existing ExcelWriter sheet_name: string, default 'data' name of sheet which will contain `IamDataFrame.timeseries()` data iamc_index: bool, default False if True, use `['model', 'scenario', 'region', 'variable', 'unit']`; else, use all `data` columns """ if not isinstance(excel_writer, pd.ExcelWriter): close = True excel_writer = pd.ExcelWriter(excel_writer) self._to_file_format(iamc_index)\ .to_excel(excel_writer, sheet_name=sheet_name, index=False, **kwargs) if close: excel_writer.close()
python
def to_excel(self, excel_writer, sheet_name='data', iamc_index=False, **kwargs): """Write timeseries data to Excel format Parameters ---------- excel_writer: string or ExcelWriter object file path or existing ExcelWriter sheet_name: string, default 'data' name of sheet which will contain `IamDataFrame.timeseries()` data iamc_index: bool, default False if True, use `['model', 'scenario', 'region', 'variable', 'unit']`; else, use all `data` columns """ if not isinstance(excel_writer, pd.ExcelWriter): close = True excel_writer = pd.ExcelWriter(excel_writer) self._to_file_format(iamc_index)\ .to_excel(excel_writer, sheet_name=sheet_name, index=False, **kwargs) if close: excel_writer.close()
[ "def", "to_excel", "(", "self", ",", "excel_writer", ",", "sheet_name", "=", "'data'", ",", "iamc_index", "=", "False", ",", "*", "*", "kwargs", ")", ":", "if", "not", "isinstance", "(", "excel_writer", ",", "pd", ".", "ExcelWriter", ")", ":", "close", "=", "True", "excel_writer", "=", "pd", ".", "ExcelWriter", "(", "excel_writer", ")", "self", ".", "_to_file_format", "(", "iamc_index", ")", ".", "to_excel", "(", "excel_writer", ",", "sheet_name", "=", "sheet_name", ",", "index", "=", "False", ",", "*", "*", "kwargs", ")", "if", "close", ":", "excel_writer", ".", "close", "(", ")" ]
Write timeseries data to Excel format Parameters ---------- excel_writer: string or ExcelWriter object file path or existing ExcelWriter sheet_name: string, default 'data' name of sheet which will contain `IamDataFrame.timeseries()` data iamc_index: bool, default False if True, use `['model', 'scenario', 'region', 'variable', 'unit']`; else, use all `data` columns
[ "Write", "timeseries", "data", "to", "Excel", "format" ]
4077929ca6e7be63a0e3ecf882c5f1da97b287bf
https://github.com/IAMconsortium/pyam/blob/4077929ca6e7be63a0e3ecf882c5f1da97b287bf/pyam/core.py#L1049-L1070
train
IAMconsortium/pyam
pyam/core.py
IamDataFrame.export_metadata
def export_metadata(self, path): """Export metadata to Excel Parameters ---------- path: string path/filename for xlsx file of metadata export """ writer = pd.ExcelWriter(path) write_sheet(writer, 'meta', self.meta, index=True) writer.save()
python
def export_metadata(self, path): """Export metadata to Excel Parameters ---------- path: string path/filename for xlsx file of metadata export """ writer = pd.ExcelWriter(path) write_sheet(writer, 'meta', self.meta, index=True) writer.save()
[ "def", "export_metadata", "(", "self", ",", "path", ")", ":", "writer", "=", "pd", ".", "ExcelWriter", "(", "path", ")", "write_sheet", "(", "writer", ",", "'meta'", ",", "self", ".", "meta", ",", "index", "=", "True", ")", "writer", ".", "save", "(", ")" ]
Export metadata to Excel Parameters ---------- path: string path/filename for xlsx file of metadata export
[ "Export", "metadata", "to", "Excel" ]
4077929ca6e7be63a0e3ecf882c5f1da97b287bf
https://github.com/IAMconsortium/pyam/blob/4077929ca6e7be63a0e3ecf882c5f1da97b287bf/pyam/core.py#L1072-L1082
train
IAMconsortium/pyam
pyam/core.py
IamDataFrame.load_metadata
def load_metadata(self, path, *args, **kwargs): """Load metadata exported from `pyam.IamDataFrame` instance Parameters ---------- path: string xlsx file with metadata exported from `pyam.IamDataFrame` instance """ if not os.path.exists(path): raise ValueError("no metadata file '" + path + "' found!") if path.endswith('csv'): df = pd.read_csv(path, *args, **kwargs) else: xl = pd.ExcelFile(path) if len(xl.sheet_names) > 1 and 'sheet_name' not in kwargs: kwargs['sheet_name'] = 'meta' df = pd.read_excel(path, *args, **kwargs) req_cols = ['model', 'scenario', 'exclude'] if not set(req_cols).issubset(set(df.columns)): e = 'File `{}` does not have required columns ({})!' raise ValueError(e.format(path, req_cols)) # set index, filter to relevant scenarios from imported metadata file df.set_index(META_IDX, inplace=True) idx = self.meta.index.intersection(df.index) n_invalid = len(df) - len(idx) if n_invalid > 0: msg = 'Ignoring {} scenario{} from imported metadata' logger().info(msg.format(n_invalid, 's' if n_invalid > 1 else '')) if idx.empty: raise ValueError('No valid scenarios in imported metadata file!') df = df.loc[idx] # Merge in imported metadata msg = 'Importing metadata for {} scenario{} (for total of {})' logger().info(msg.format(len(df), 's' if len(df) > 1 else '', len(self.meta))) for col in df.columns: self._new_meta_column(col) self.meta[col] = df[col].combine_first(self.meta[col]) # set column `exclude` to bool self.meta.exclude = self.meta.exclude.astype('bool')
python
def load_metadata(self, path, *args, **kwargs): """Load metadata exported from `pyam.IamDataFrame` instance Parameters ---------- path: string xlsx file with metadata exported from `pyam.IamDataFrame` instance """ if not os.path.exists(path): raise ValueError("no metadata file '" + path + "' found!") if path.endswith('csv'): df = pd.read_csv(path, *args, **kwargs) else: xl = pd.ExcelFile(path) if len(xl.sheet_names) > 1 and 'sheet_name' not in kwargs: kwargs['sheet_name'] = 'meta' df = pd.read_excel(path, *args, **kwargs) req_cols = ['model', 'scenario', 'exclude'] if not set(req_cols).issubset(set(df.columns)): e = 'File `{}` does not have required columns ({})!' raise ValueError(e.format(path, req_cols)) # set index, filter to relevant scenarios from imported metadata file df.set_index(META_IDX, inplace=True) idx = self.meta.index.intersection(df.index) n_invalid = len(df) - len(idx) if n_invalid > 0: msg = 'Ignoring {} scenario{} from imported metadata' logger().info(msg.format(n_invalid, 's' if n_invalid > 1 else '')) if idx.empty: raise ValueError('No valid scenarios in imported metadata file!') df = df.loc[idx] # Merge in imported metadata msg = 'Importing metadata for {} scenario{} (for total of {})' logger().info(msg.format(len(df), 's' if len(df) > 1 else '', len(self.meta))) for col in df.columns: self._new_meta_column(col) self.meta[col] = df[col].combine_first(self.meta[col]) # set column `exclude` to bool self.meta.exclude = self.meta.exclude.astype('bool')
[ "def", "load_metadata", "(", "self", ",", "path", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "if", "not", "os", ".", "path", ".", "exists", "(", "path", ")", ":", "raise", "ValueError", "(", "\"no metadata file '\"", "+", "path", "+", "\"' found!\"", ")", "if", "path", ".", "endswith", "(", "'csv'", ")", ":", "df", "=", "pd", ".", "read_csv", "(", "path", ",", "*", "args", ",", "*", "*", "kwargs", ")", "else", ":", "xl", "=", "pd", ".", "ExcelFile", "(", "path", ")", "if", "len", "(", "xl", ".", "sheet_names", ")", ">", "1", "and", "'sheet_name'", "not", "in", "kwargs", ":", "kwargs", "[", "'sheet_name'", "]", "=", "'meta'", "df", "=", "pd", ".", "read_excel", "(", "path", ",", "*", "args", ",", "*", "*", "kwargs", ")", "req_cols", "=", "[", "'model'", ",", "'scenario'", ",", "'exclude'", "]", "if", "not", "set", "(", "req_cols", ")", ".", "issubset", "(", "set", "(", "df", ".", "columns", ")", ")", ":", "e", "=", "'File `{}` does not have required columns ({})!'", "raise", "ValueError", "(", "e", ".", "format", "(", "path", ",", "req_cols", ")", ")", "# set index, filter to relevant scenarios from imported metadata file", "df", ".", "set_index", "(", "META_IDX", ",", "inplace", "=", "True", ")", "idx", "=", "self", ".", "meta", ".", "index", ".", "intersection", "(", "df", ".", "index", ")", "n_invalid", "=", "len", "(", "df", ")", "-", "len", "(", "idx", ")", "if", "n_invalid", ">", "0", ":", "msg", "=", "'Ignoring {} scenario{} from imported metadata'", "logger", "(", ")", ".", "info", "(", "msg", ".", "format", "(", "n_invalid", ",", "'s'", "if", "n_invalid", ">", "1", "else", "''", ")", ")", "if", "idx", ".", "empty", ":", "raise", "ValueError", "(", "'No valid scenarios in imported metadata file!'", ")", "df", "=", "df", ".", "loc", "[", "idx", "]", "# Merge in imported metadata", "msg", "=", "'Importing metadata for {} scenario{} (for total of {})'", "logger", "(", ")", ".", "info", "(", "msg", ".", "format", "(", "len", "(", "df", ")", ",", "'s'", "if", "len", "(", "df", ")", ">", "1", "else", "''", ",", "len", "(", "self", ".", "meta", ")", ")", ")", "for", "col", "in", "df", ".", "columns", ":", "self", ".", "_new_meta_column", "(", "col", ")", "self", ".", "meta", "[", "col", "]", "=", "df", "[", "col", "]", ".", "combine_first", "(", "self", ".", "meta", "[", "col", "]", ")", "# set column `exclude` to bool", "self", ".", "meta", ".", "exclude", "=", "self", ".", "meta", ".", "exclude", ".", "astype", "(", "'bool'", ")" ]
Load metadata exported from `pyam.IamDataFrame` instance Parameters ---------- path: string xlsx file with metadata exported from `pyam.IamDataFrame` instance
[ "Load", "metadata", "exported", "from", "pyam", ".", "IamDataFrame", "instance" ]
4077929ca6e7be63a0e3ecf882c5f1da97b287bf
https://github.com/IAMconsortium/pyam/blob/4077929ca6e7be63a0e3ecf882c5f1da97b287bf/pyam/core.py#L1084-L1131
train
IAMconsortium/pyam
pyam/core.py
IamDataFrame.line_plot
def line_plot(self, x='year', y='value', **kwargs): """Plot timeseries lines of existing data see pyam.plotting.line_plot() for all available options """ df = self.as_pandas(with_metadata=kwargs) # pivot data if asked for explicit variable name variables = df['variable'].unique() if x in variables or y in variables: keep_vars = set([x, y]) & set(variables) df = df[df['variable'].isin(keep_vars)] idx = list(set(df.columns) - set(['value'])) df = (df .reset_index() .set_index(idx) .value # df -> series .unstack(level='variable') # keep_vars are columns .rename_axis(None, axis=1) # rm column index name .reset_index() .set_index(META_IDX) ) if x != 'year' and y != 'year': df = df.drop('year', axis=1) # years causes NaNs ax, handles, labels = plotting.line_plot( df.dropna(), x=x, y=y, **kwargs) return ax
python
def line_plot(self, x='year', y='value', **kwargs): """Plot timeseries lines of existing data see pyam.plotting.line_plot() for all available options """ df = self.as_pandas(with_metadata=kwargs) # pivot data if asked for explicit variable name variables = df['variable'].unique() if x in variables or y in variables: keep_vars = set([x, y]) & set(variables) df = df[df['variable'].isin(keep_vars)] idx = list(set(df.columns) - set(['value'])) df = (df .reset_index() .set_index(idx) .value # df -> series .unstack(level='variable') # keep_vars are columns .rename_axis(None, axis=1) # rm column index name .reset_index() .set_index(META_IDX) ) if x != 'year' and y != 'year': df = df.drop('year', axis=1) # years causes NaNs ax, handles, labels = plotting.line_plot( df.dropna(), x=x, y=y, **kwargs) return ax
[ "def", "line_plot", "(", "self", ",", "x", "=", "'year'", ",", "y", "=", "'value'", ",", "*", "*", "kwargs", ")", ":", "df", "=", "self", ".", "as_pandas", "(", "with_metadata", "=", "kwargs", ")", "# pivot data if asked for explicit variable name", "variables", "=", "df", "[", "'variable'", "]", ".", "unique", "(", ")", "if", "x", "in", "variables", "or", "y", "in", "variables", ":", "keep_vars", "=", "set", "(", "[", "x", ",", "y", "]", ")", "&", "set", "(", "variables", ")", "df", "=", "df", "[", "df", "[", "'variable'", "]", ".", "isin", "(", "keep_vars", ")", "]", "idx", "=", "list", "(", "set", "(", "df", ".", "columns", ")", "-", "set", "(", "[", "'value'", "]", ")", ")", "df", "=", "(", "df", ".", "reset_index", "(", ")", ".", "set_index", "(", "idx", ")", ".", "value", "# df -> series", ".", "unstack", "(", "level", "=", "'variable'", ")", "# keep_vars are columns", ".", "rename_axis", "(", "None", ",", "axis", "=", "1", ")", "# rm column index name", ".", "reset_index", "(", ")", ".", "set_index", "(", "META_IDX", ")", ")", "if", "x", "!=", "'year'", "and", "y", "!=", "'year'", ":", "df", "=", "df", ".", "drop", "(", "'year'", ",", "axis", "=", "1", ")", "# years causes NaNs", "ax", ",", "handles", ",", "labels", "=", "plotting", ".", "line_plot", "(", "df", ".", "dropna", "(", ")", ",", "x", "=", "x", ",", "y", "=", "y", ",", "*", "*", "kwargs", ")", "return", "ax" ]
Plot timeseries lines of existing data see pyam.plotting.line_plot() for all available options
[ "Plot", "timeseries", "lines", "of", "existing", "data" ]
4077929ca6e7be63a0e3ecf882c5f1da97b287bf
https://github.com/IAMconsortium/pyam/blob/4077929ca6e7be63a0e3ecf882c5f1da97b287bf/pyam/core.py#L1133-L1160
train
IAMconsortium/pyam
pyam/core.py
IamDataFrame.stack_plot
def stack_plot(self, *args, **kwargs): """Plot timeseries stacks of existing data see pyam.plotting.stack_plot() for all available options """ df = self.as_pandas(with_metadata=True) ax = plotting.stack_plot(df, *args, **kwargs) return ax
python
def stack_plot(self, *args, **kwargs): """Plot timeseries stacks of existing data see pyam.plotting.stack_plot() for all available options """ df = self.as_pandas(with_metadata=True) ax = plotting.stack_plot(df, *args, **kwargs) return ax
[ "def", "stack_plot", "(", "self", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "df", "=", "self", ".", "as_pandas", "(", "with_metadata", "=", "True", ")", "ax", "=", "plotting", ".", "stack_plot", "(", "df", ",", "*", "args", ",", "*", "*", "kwargs", ")", "return", "ax" ]
Plot timeseries stacks of existing data see pyam.plotting.stack_plot() for all available options
[ "Plot", "timeseries", "stacks", "of", "existing", "data" ]
4077929ca6e7be63a0e3ecf882c5f1da97b287bf
https://github.com/IAMconsortium/pyam/blob/4077929ca6e7be63a0e3ecf882c5f1da97b287bf/pyam/core.py#L1162-L1169
train
IAMconsortium/pyam
pyam/core.py
IamDataFrame.scatter
def scatter(self, x, y, **kwargs): """Plot a scatter chart using metadata columns see pyam.plotting.scatter() for all available options """ variables = self.data['variable'].unique() xisvar = x in variables yisvar = y in variables if not xisvar and not yisvar: cols = [x, y] + self._discover_meta_cols(**kwargs) df = self.meta[cols].reset_index() elif xisvar and yisvar: # filter pivot both and rename dfx = ( self .filter(variable=x) .as_pandas(with_metadata=kwargs) .rename(columns={'value': x, 'unit': 'xunit'}) .set_index(YEAR_IDX) .drop('variable', axis=1) ) dfy = ( self .filter(variable=y) .as_pandas(with_metadata=kwargs) .rename(columns={'value': y, 'unit': 'yunit'}) .set_index(YEAR_IDX) .drop('variable', axis=1) ) df = dfx.join(dfy, lsuffix='_left', rsuffix='').reset_index() else: # filter, merge with meta, and rename value column to match var var = x if xisvar else y df = ( self .filter(variable=var) .as_pandas(with_metadata=kwargs) .rename(columns={'value': var}) ) ax = plotting.scatter(df.dropna(), x, y, **kwargs) return ax
python
def scatter(self, x, y, **kwargs): """Plot a scatter chart using metadata columns see pyam.plotting.scatter() for all available options """ variables = self.data['variable'].unique() xisvar = x in variables yisvar = y in variables if not xisvar and not yisvar: cols = [x, y] + self._discover_meta_cols(**kwargs) df = self.meta[cols].reset_index() elif xisvar and yisvar: # filter pivot both and rename dfx = ( self .filter(variable=x) .as_pandas(with_metadata=kwargs) .rename(columns={'value': x, 'unit': 'xunit'}) .set_index(YEAR_IDX) .drop('variable', axis=1) ) dfy = ( self .filter(variable=y) .as_pandas(with_metadata=kwargs) .rename(columns={'value': y, 'unit': 'yunit'}) .set_index(YEAR_IDX) .drop('variable', axis=1) ) df = dfx.join(dfy, lsuffix='_left', rsuffix='').reset_index() else: # filter, merge with meta, and rename value column to match var var = x if xisvar else y df = ( self .filter(variable=var) .as_pandas(with_metadata=kwargs) .rename(columns={'value': var}) ) ax = plotting.scatter(df.dropna(), x, y, **kwargs) return ax
[ "def", "scatter", "(", "self", ",", "x", ",", "y", ",", "*", "*", "kwargs", ")", ":", "variables", "=", "self", ".", "data", "[", "'variable'", "]", ".", "unique", "(", ")", "xisvar", "=", "x", "in", "variables", "yisvar", "=", "y", "in", "variables", "if", "not", "xisvar", "and", "not", "yisvar", ":", "cols", "=", "[", "x", ",", "y", "]", "+", "self", ".", "_discover_meta_cols", "(", "*", "*", "kwargs", ")", "df", "=", "self", ".", "meta", "[", "cols", "]", ".", "reset_index", "(", ")", "elif", "xisvar", "and", "yisvar", ":", "# filter pivot both and rename", "dfx", "=", "(", "self", ".", "filter", "(", "variable", "=", "x", ")", ".", "as_pandas", "(", "with_metadata", "=", "kwargs", ")", ".", "rename", "(", "columns", "=", "{", "'value'", ":", "x", ",", "'unit'", ":", "'xunit'", "}", ")", ".", "set_index", "(", "YEAR_IDX", ")", ".", "drop", "(", "'variable'", ",", "axis", "=", "1", ")", ")", "dfy", "=", "(", "self", ".", "filter", "(", "variable", "=", "y", ")", ".", "as_pandas", "(", "with_metadata", "=", "kwargs", ")", ".", "rename", "(", "columns", "=", "{", "'value'", ":", "y", ",", "'unit'", ":", "'yunit'", "}", ")", ".", "set_index", "(", "YEAR_IDX", ")", ".", "drop", "(", "'variable'", ",", "axis", "=", "1", ")", ")", "df", "=", "dfx", ".", "join", "(", "dfy", ",", "lsuffix", "=", "'_left'", ",", "rsuffix", "=", "''", ")", ".", "reset_index", "(", ")", "else", ":", "# filter, merge with meta, and rename value column to match var", "var", "=", "x", "if", "xisvar", "else", "y", "df", "=", "(", "self", ".", "filter", "(", "variable", "=", "var", ")", ".", "as_pandas", "(", "with_metadata", "=", "kwargs", ")", ".", "rename", "(", "columns", "=", "{", "'value'", ":", "var", "}", ")", ")", "ax", "=", "plotting", ".", "scatter", "(", "df", ".", "dropna", "(", ")", ",", "x", ",", "y", ",", "*", "*", "kwargs", ")", "return", "ax" ]
Plot a scatter chart using metadata columns see pyam.plotting.scatter() for all available options
[ "Plot", "a", "scatter", "chart", "using", "metadata", "columns" ]
4077929ca6e7be63a0e3ecf882c5f1da97b287bf
https://github.com/IAMconsortium/pyam/blob/4077929ca6e7be63a0e3ecf882c5f1da97b287bf/pyam/core.py#L1189-L1229
train
IAMconsortium/pyam
pyam/run_control.py
RunControl.update
def update(self, rc): """Add additional run control parameters Parameters ---------- rc : string, file, dictionary, optional a path to a YAML file, a file handle for a YAML file, or a dictionary describing run control configuration """ rc = self._load_yaml(rc) self.store = _recursive_update(self.store, rc)
python
def update(self, rc): """Add additional run control parameters Parameters ---------- rc : string, file, dictionary, optional a path to a YAML file, a file handle for a YAML file, or a dictionary describing run control configuration """ rc = self._load_yaml(rc) self.store = _recursive_update(self.store, rc)
[ "def", "update", "(", "self", ",", "rc", ")", ":", "rc", "=", "self", ".", "_load_yaml", "(", "rc", ")", "self", ".", "store", "=", "_recursive_update", "(", "self", ".", "store", ",", "rc", ")" ]
Add additional run control parameters Parameters ---------- rc : string, file, dictionary, optional a path to a YAML file, a file handle for a YAML file, or a dictionary describing run control configuration
[ "Add", "additional", "run", "control", "parameters" ]
4077929ca6e7be63a0e3ecf882c5f1da97b287bf
https://github.com/IAMconsortium/pyam/blob/4077929ca6e7be63a0e3ecf882c5f1da97b287bf/pyam/run_control.py#L75-L85
train
IAMconsortium/pyam
pyam/run_control.py
RunControl.recursive_update
def recursive_update(self, k, d): """Recursively update a top-level option in the run control Parameters ---------- k : string the top-level key d : dictionary or similar the dictionary to use for updating """ u = self.__getitem__(k) self.store[k] = _recursive_update(u, d)
python
def recursive_update(self, k, d): """Recursively update a top-level option in the run control Parameters ---------- k : string the top-level key d : dictionary or similar the dictionary to use for updating """ u = self.__getitem__(k) self.store[k] = _recursive_update(u, d)
[ "def", "recursive_update", "(", "self", ",", "k", ",", "d", ")", ":", "u", "=", "self", ".", "__getitem__", "(", "k", ")", "self", ".", "store", "[", "k", "]", "=", "_recursive_update", "(", "u", ",", "d", ")" ]
Recursively update a top-level option in the run control Parameters ---------- k : string the top-level key d : dictionary or similar the dictionary to use for updating
[ "Recursively", "update", "a", "top", "-", "level", "option", "in", "the", "run", "control" ]
4077929ca6e7be63a0e3ecf882c5f1da97b287bf
https://github.com/IAMconsortium/pyam/blob/4077929ca6e7be63a0e3ecf882c5f1da97b287bf/pyam/run_control.py#L125-L136
train
IAMconsortium/pyam
pyam/iiasa.py
Connection.available_metadata
def available_metadata(self): """ List all scenario metadata indicators available in the connected data source """ url = self.base_url + 'metadata/types' headers = {'Authorization': 'Bearer {}'.format(self.auth())} r = requests.get(url, headers=headers) return pd.read_json(r.content, orient='records')['name']
python
def available_metadata(self): """ List all scenario metadata indicators available in the connected data source """ url = self.base_url + 'metadata/types' headers = {'Authorization': 'Bearer {}'.format(self.auth())} r = requests.get(url, headers=headers) return pd.read_json(r.content, orient='records')['name']
[ "def", "available_metadata", "(", "self", ")", ":", "url", "=", "self", ".", "base_url", "+", "'metadata/types'", "headers", "=", "{", "'Authorization'", ":", "'Bearer {}'", ".", "format", "(", "self", ".", "auth", "(", ")", ")", "}", "r", "=", "requests", ".", "get", "(", "url", ",", "headers", "=", "headers", ")", "return", "pd", ".", "read_json", "(", "r", ".", "content", ",", "orient", "=", "'records'", ")", "[", "'name'", "]" ]
List all scenario metadata indicators available in the connected data source
[ "List", "all", "scenario", "metadata", "indicators", "available", "in", "the", "connected", "data", "source" ]
4077929ca6e7be63a0e3ecf882c5f1da97b287bf
https://github.com/IAMconsortium/pyam/blob/4077929ca6e7be63a0e3ecf882c5f1da97b287bf/pyam/iiasa.py#L87-L95
train
IAMconsortium/pyam
pyam/iiasa.py
Connection.metadata
def metadata(self, default=True): """ Metadata of scenarios in the connected data source Parameter --------- default : bool, optional, default True Return *only* the default version of each Scenario. Any (`model`, `scenario`) without a default version is omitted. If :obj:`False`, return all versions. """ # at present this reads in all data for all scenarios, it could be sped # up in the future to try to query a subset default = 'true' if default else 'false' add_url = 'runs?getOnlyDefaultRuns={}&includeMetadata=true' url = self.base_url + add_url.format(default) headers = {'Authorization': 'Bearer {}'.format(self.auth())} r = requests.get(url, headers=headers) df = pd.read_json(r.content, orient='records') def extract(row): return ( pd.concat([row[['model', 'scenario']], pd.Series(row.metadata)]) .to_frame() .T .set_index(['model', 'scenario']) ) return pd.concat([extract(row) for idx, row in df.iterrows()], sort=False).reset_index()
python
def metadata(self, default=True): """ Metadata of scenarios in the connected data source Parameter --------- default : bool, optional, default True Return *only* the default version of each Scenario. Any (`model`, `scenario`) without a default version is omitted. If :obj:`False`, return all versions. """ # at present this reads in all data for all scenarios, it could be sped # up in the future to try to query a subset default = 'true' if default else 'false' add_url = 'runs?getOnlyDefaultRuns={}&includeMetadata=true' url = self.base_url + add_url.format(default) headers = {'Authorization': 'Bearer {}'.format(self.auth())} r = requests.get(url, headers=headers) df = pd.read_json(r.content, orient='records') def extract(row): return ( pd.concat([row[['model', 'scenario']], pd.Series(row.metadata)]) .to_frame() .T .set_index(['model', 'scenario']) ) return pd.concat([extract(row) for idx, row in df.iterrows()], sort=False).reset_index()
[ "def", "metadata", "(", "self", ",", "default", "=", "True", ")", ":", "# at present this reads in all data for all scenarios, it could be sped", "# up in the future to try to query a subset", "default", "=", "'true'", "if", "default", "else", "'false'", "add_url", "=", "'runs?getOnlyDefaultRuns={}&includeMetadata=true'", "url", "=", "self", ".", "base_url", "+", "add_url", ".", "format", "(", "default", ")", "headers", "=", "{", "'Authorization'", ":", "'Bearer {}'", ".", "format", "(", "self", ".", "auth", "(", ")", ")", "}", "r", "=", "requests", ".", "get", "(", "url", ",", "headers", "=", "headers", ")", "df", "=", "pd", ".", "read_json", "(", "r", ".", "content", ",", "orient", "=", "'records'", ")", "def", "extract", "(", "row", ")", ":", "return", "(", "pd", ".", "concat", "(", "[", "row", "[", "[", "'model'", ",", "'scenario'", "]", "]", ",", "pd", ".", "Series", "(", "row", ".", "metadata", ")", "]", ")", ".", "to_frame", "(", ")", ".", "T", ".", "set_index", "(", "[", "'model'", ",", "'scenario'", "]", ")", ")", "return", "pd", ".", "concat", "(", "[", "extract", "(", "row", ")", "for", "idx", ",", "row", "in", "df", ".", "iterrows", "(", ")", "]", ",", "sort", "=", "False", ")", ".", "reset_index", "(", ")" ]
Metadata of scenarios in the connected data source Parameter --------- default : bool, optional, default True Return *only* the default version of each Scenario. Any (`model`, `scenario`) without a default version is omitted. If :obj:`False`, return all versions.
[ "Metadata", "of", "scenarios", "in", "the", "connected", "data", "source" ]
4077929ca6e7be63a0e3ecf882c5f1da97b287bf
https://github.com/IAMconsortium/pyam/blob/4077929ca6e7be63a0e3ecf882c5f1da97b287bf/pyam/iiasa.py#L98-L128
train
IAMconsortium/pyam
pyam/iiasa.py
Connection.variables
def variables(self): """All variables in the connected data source""" url = self.base_url + 'ts' headers = {'Authorization': 'Bearer {}'.format(self.auth())} r = requests.get(url, headers=headers) df = pd.read_json(r.content, orient='records') return pd.Series(df['variable'].unique(), name='variable')
python
def variables(self): """All variables in the connected data source""" url = self.base_url + 'ts' headers = {'Authorization': 'Bearer {}'.format(self.auth())} r = requests.get(url, headers=headers) df = pd.read_json(r.content, orient='records') return pd.Series(df['variable'].unique(), name='variable')
[ "def", "variables", "(", "self", ")", ":", "url", "=", "self", ".", "base_url", "+", "'ts'", "headers", "=", "{", "'Authorization'", ":", "'Bearer {}'", ".", "format", "(", "self", ".", "auth", "(", ")", ")", "}", "r", "=", "requests", ".", "get", "(", "url", ",", "headers", "=", "headers", ")", "df", "=", "pd", ".", "read_json", "(", "r", ".", "content", ",", "orient", "=", "'records'", ")", "return", "pd", ".", "Series", "(", "df", "[", "'variable'", "]", ".", "unique", "(", ")", ",", "name", "=", "'variable'", ")" ]
All variables in the connected data source
[ "All", "variables", "in", "the", "connected", "data", "source" ]
4077929ca6e7be63a0e3ecf882c5f1da97b287bf
https://github.com/IAMconsortium/pyam/blob/4077929ca6e7be63a0e3ecf882c5f1da97b287bf/pyam/iiasa.py#L141-L147
train
IAMconsortium/pyam
pyam/iiasa.py
Connection.query
def query(self, **kwargs): """ Query the data source, subselecting data. Available keyword arguments include - model - scenario - region - variable Example ------- ``` Connection.query(model='MESSAGE', scenario='SSP2*', variable=['Emissions|CO2', 'Primary Energy']) ``` """ headers = { 'Authorization': 'Bearer {}'.format(self.auth()), 'Content-Type': 'application/json', } data = json.dumps(self._query_post_data(**kwargs)) url = self.base_url + 'runs/bulk/ts' r = requests.post(url, headers=headers, data=data) # refactor returned json object to be castable to an IamDataFrame df = ( pd.read_json(r.content, orient='records') .drop(columns='runId') .rename(columns={'time': 'subannual'}) ) # check if returned dataframe has subannual disaggregation, drop if not if pd.Series([i in [-1, 'year'] for i in df.subannual]).all(): df.drop(columns='subannual', inplace=True) # check if there are multiple version for any model/scenario lst = ( df[META_IDX + ['version']].drop_duplicates() .groupby(META_IDX).count().version ) if max(lst) > 1: raise ValueError('multiple versions for {}'.format( lst[lst > 1].index.to_list())) df.drop(columns='version', inplace=True) return df
python
def query(self, **kwargs): """ Query the data source, subselecting data. Available keyword arguments include - model - scenario - region - variable Example ------- ``` Connection.query(model='MESSAGE', scenario='SSP2*', variable=['Emissions|CO2', 'Primary Energy']) ``` """ headers = { 'Authorization': 'Bearer {}'.format(self.auth()), 'Content-Type': 'application/json', } data = json.dumps(self._query_post_data(**kwargs)) url = self.base_url + 'runs/bulk/ts' r = requests.post(url, headers=headers, data=data) # refactor returned json object to be castable to an IamDataFrame df = ( pd.read_json(r.content, orient='records') .drop(columns='runId') .rename(columns={'time': 'subannual'}) ) # check if returned dataframe has subannual disaggregation, drop if not if pd.Series([i in [-1, 'year'] for i in df.subannual]).all(): df.drop(columns='subannual', inplace=True) # check if there are multiple version for any model/scenario lst = ( df[META_IDX + ['version']].drop_duplicates() .groupby(META_IDX).count().version ) if max(lst) > 1: raise ValueError('multiple versions for {}'.format( lst[lst > 1].index.to_list())) df.drop(columns='version', inplace=True) return df
[ "def", "query", "(", "self", ",", "*", "*", "kwargs", ")", ":", "headers", "=", "{", "'Authorization'", ":", "'Bearer {}'", ".", "format", "(", "self", ".", "auth", "(", ")", ")", ",", "'Content-Type'", ":", "'application/json'", ",", "}", "data", "=", "json", ".", "dumps", "(", "self", ".", "_query_post_data", "(", "*", "*", "kwargs", ")", ")", "url", "=", "self", ".", "base_url", "+", "'runs/bulk/ts'", "r", "=", "requests", ".", "post", "(", "url", ",", "headers", "=", "headers", ",", "data", "=", "data", ")", "# refactor returned json object to be castable to an IamDataFrame", "df", "=", "(", "pd", ".", "read_json", "(", "r", ".", "content", ",", "orient", "=", "'records'", ")", ".", "drop", "(", "columns", "=", "'runId'", ")", ".", "rename", "(", "columns", "=", "{", "'time'", ":", "'subannual'", "}", ")", ")", "# check if returned dataframe has subannual disaggregation, drop if not", "if", "pd", ".", "Series", "(", "[", "i", "in", "[", "-", "1", ",", "'year'", "]", "for", "i", "in", "df", ".", "subannual", "]", ")", ".", "all", "(", ")", ":", "df", ".", "drop", "(", "columns", "=", "'subannual'", ",", "inplace", "=", "True", ")", "# check if there are multiple version for any model/scenario", "lst", "=", "(", "df", "[", "META_IDX", "+", "[", "'version'", "]", "]", ".", "drop_duplicates", "(", ")", ".", "groupby", "(", "META_IDX", ")", ".", "count", "(", ")", ".", "version", ")", "if", "max", "(", "lst", ")", ">", "1", ":", "raise", "ValueError", "(", "'multiple versions for {}'", ".", "format", "(", "lst", "[", "lst", ">", "1", "]", ".", "index", ".", "to_list", "(", ")", ")", ")", "df", ".", "drop", "(", "columns", "=", "'version'", ",", "inplace", "=", "True", ")", "return", "df" ]
Query the data source, subselecting data. Available keyword arguments include - model - scenario - region - variable Example ------- ``` Connection.query(model='MESSAGE', scenario='SSP2*', variable=['Emissions|CO2', 'Primary Energy']) ```
[ "Query", "the", "data", "source", "subselecting", "data", ".", "Available", "keyword", "arguments", "include" ]
4077929ca6e7be63a0e3ecf882c5f1da97b287bf
https://github.com/IAMconsortium/pyam/blob/4077929ca6e7be63a0e3ecf882c5f1da97b287bf/pyam/iiasa.py#L209-L253
train
IAMconsortium/pyam
pyam/statistics.py
Statistics.reindex
def reindex(self, copy=True): """Reindex the summary statistics dataframe""" ret = deepcopy(self) if copy else self ret.stats = ret.stats.reindex(index=ret._idx, level=0) if ret.idx_depth == 2: ret.stats = ret.stats.reindex(index=ret._sub_idx, level=1) if ret.rows is not None: ret.stats = ret.stats.reindex(index=ret.rows, level=ret.idx_depth) ret.stats = ret.stats.reindex(columns=ret._headers, level=0) ret.stats = ret.stats.reindex(columns=ret._subheaders, level=1) ret.stats = ret.stats.reindex(columns=ret._describe_cols, level=2) if copy: return ret
python
def reindex(self, copy=True): """Reindex the summary statistics dataframe""" ret = deepcopy(self) if copy else self ret.stats = ret.stats.reindex(index=ret._idx, level=0) if ret.idx_depth == 2: ret.stats = ret.stats.reindex(index=ret._sub_idx, level=1) if ret.rows is not None: ret.stats = ret.stats.reindex(index=ret.rows, level=ret.idx_depth) ret.stats = ret.stats.reindex(columns=ret._headers, level=0) ret.stats = ret.stats.reindex(columns=ret._subheaders, level=1) ret.stats = ret.stats.reindex(columns=ret._describe_cols, level=2) if copy: return ret
[ "def", "reindex", "(", "self", ",", "copy", "=", "True", ")", ":", "ret", "=", "deepcopy", "(", "self", ")", "if", "copy", "else", "self", "ret", ".", "stats", "=", "ret", ".", "stats", ".", "reindex", "(", "index", "=", "ret", ".", "_idx", ",", "level", "=", "0", ")", "if", "ret", ".", "idx_depth", "==", "2", ":", "ret", ".", "stats", "=", "ret", ".", "stats", ".", "reindex", "(", "index", "=", "ret", ".", "_sub_idx", ",", "level", "=", "1", ")", "if", "ret", ".", "rows", "is", "not", "None", ":", "ret", ".", "stats", "=", "ret", ".", "stats", ".", "reindex", "(", "index", "=", "ret", ".", "rows", ",", "level", "=", "ret", ".", "idx_depth", ")", "ret", ".", "stats", "=", "ret", ".", "stats", ".", "reindex", "(", "columns", "=", "ret", ".", "_headers", ",", "level", "=", "0", ")", "ret", ".", "stats", "=", "ret", ".", "stats", ".", "reindex", "(", "columns", "=", "ret", ".", "_subheaders", ",", "level", "=", "1", ")", "ret", ".", "stats", "=", "ret", ".", "stats", ".", "reindex", "(", "columns", "=", "ret", ".", "_describe_cols", ",", "level", "=", "2", ")", "if", "copy", ":", "return", "ret" ]
Reindex the summary statistics dataframe
[ "Reindex", "the", "summary", "statistics", "dataframe" ]
4077929ca6e7be63a0e3ecf882c5f1da97b287bf
https://github.com/IAMconsortium/pyam/blob/4077929ca6e7be63a0e3ecf882c5f1da97b287bf/pyam/statistics.py#L198-L213
train
IAMconsortium/pyam
pyam/statistics.py
Statistics.summarize
def summarize(self, center='mean', fullrange=None, interquartile=None, custom_format='{:.2f}'): """Format the compiled statistics to a concise string output Parameter --------- center : str, default `mean` what to return as 'center' of the summary: `mean`, `50%`, `median` fullrange : bool, default None return full range of data if True or `fullrange`, `interquartile` and `format_spec` are None interquartile : bool, default None return interquartile range if True custom_format : formatting specifications """ # call `reindex()` to reorder index and columns self.reindex(copy=False) center = 'median' if center == '50%' else center if fullrange is None and interquartile is None: fullrange = True return self.stats.apply(format_rows, center=center, fullrange=fullrange, interquartile=interquartile, custom_format=custom_format, axis=1, raw=False)
python
def summarize(self, center='mean', fullrange=None, interquartile=None, custom_format='{:.2f}'): """Format the compiled statistics to a concise string output Parameter --------- center : str, default `mean` what to return as 'center' of the summary: `mean`, `50%`, `median` fullrange : bool, default None return full range of data if True or `fullrange`, `interquartile` and `format_spec` are None interquartile : bool, default None return interquartile range if True custom_format : formatting specifications """ # call `reindex()` to reorder index and columns self.reindex(copy=False) center = 'median' if center == '50%' else center if fullrange is None and interquartile is None: fullrange = True return self.stats.apply(format_rows, center=center, fullrange=fullrange, interquartile=interquartile, custom_format=custom_format, axis=1, raw=False)
[ "def", "summarize", "(", "self", ",", "center", "=", "'mean'", ",", "fullrange", "=", "None", ",", "interquartile", "=", "None", ",", "custom_format", "=", "'{:.2f}'", ")", ":", "# call `reindex()` to reorder index and columns", "self", ".", "reindex", "(", "copy", "=", "False", ")", "center", "=", "'median'", "if", "center", "==", "'50%'", "else", "center", "if", "fullrange", "is", "None", "and", "interquartile", "is", "None", ":", "fullrange", "=", "True", "return", "self", ".", "stats", ".", "apply", "(", "format_rows", ",", "center", "=", "center", ",", "fullrange", "=", "fullrange", ",", "interquartile", "=", "interquartile", ",", "custom_format", "=", "custom_format", ",", "axis", "=", "1", ",", "raw", "=", "False", ")" ]
Format the compiled statistics to a concise string output Parameter --------- center : str, default `mean` what to return as 'center' of the summary: `mean`, `50%`, `median` fullrange : bool, default None return full range of data if True or `fullrange`, `interquartile` and `format_spec` are None interquartile : bool, default None return interquartile range if True custom_format : formatting specifications
[ "Format", "the", "compiled", "statistics", "to", "a", "concise", "string", "output" ]
4077929ca6e7be63a0e3ecf882c5f1da97b287bf
https://github.com/IAMconsortium/pyam/blob/4077929ca6e7be63a0e3ecf882c5f1da97b287bf/pyam/statistics.py#L215-L240
train
IAMconsortium/pyam
pyam/plotting.py
reset_default_props
def reset_default_props(**kwargs): """Reset properties to initial cycle point""" global _DEFAULT_PROPS pcycle = plt.rcParams['axes.prop_cycle'] _DEFAULT_PROPS = { 'color': itertools.cycle(_get_standard_colors(**kwargs)) if len(kwargs) > 0 else itertools.cycle([x['color'] for x in pcycle]), 'marker': itertools.cycle(['o', 'x', '.', '+', '*']), 'linestyle': itertools.cycle(['-', '--', '-.', ':']), }
python
def reset_default_props(**kwargs): """Reset properties to initial cycle point""" global _DEFAULT_PROPS pcycle = plt.rcParams['axes.prop_cycle'] _DEFAULT_PROPS = { 'color': itertools.cycle(_get_standard_colors(**kwargs)) if len(kwargs) > 0 else itertools.cycle([x['color'] for x in pcycle]), 'marker': itertools.cycle(['o', 'x', '.', '+', '*']), 'linestyle': itertools.cycle(['-', '--', '-.', ':']), }
[ "def", "reset_default_props", "(", "*", "*", "kwargs", ")", ":", "global", "_DEFAULT_PROPS", "pcycle", "=", "plt", ".", "rcParams", "[", "'axes.prop_cycle'", "]", "_DEFAULT_PROPS", "=", "{", "'color'", ":", "itertools", ".", "cycle", "(", "_get_standard_colors", "(", "*", "*", "kwargs", ")", ")", "if", "len", "(", "kwargs", ")", ">", "0", "else", "itertools", ".", "cycle", "(", "[", "x", "[", "'color'", "]", "for", "x", "in", "pcycle", "]", ")", ",", "'marker'", ":", "itertools", ".", "cycle", "(", "[", "'o'", ",", "'x'", ",", "'.'", ",", "'+'", ",", "'*'", "]", ")", ",", "'linestyle'", ":", "itertools", ".", "cycle", "(", "[", "'-'", ",", "'--'", ",", "'-.'", ",", "':'", "]", ")", ",", "}" ]
Reset properties to initial cycle point
[ "Reset", "properties", "to", "initial", "cycle", "point" ]
4077929ca6e7be63a0e3ecf882c5f1da97b287bf
https://github.com/IAMconsortium/pyam/blob/4077929ca6e7be63a0e3ecf882c5f1da97b287bf/pyam/plotting.py#L75-L84
train
IAMconsortium/pyam
pyam/plotting.py
default_props
def default_props(reset=False, **kwargs): """Return current default properties Parameters ---------- reset : bool if True, reset properties and return default: False """ global _DEFAULT_PROPS if _DEFAULT_PROPS is None or reset: reset_default_props(**kwargs) return _DEFAULT_PROPS
python
def default_props(reset=False, **kwargs): """Return current default properties Parameters ---------- reset : bool if True, reset properties and return default: False """ global _DEFAULT_PROPS if _DEFAULT_PROPS is None or reset: reset_default_props(**kwargs) return _DEFAULT_PROPS
[ "def", "default_props", "(", "reset", "=", "False", ",", "*", "*", "kwargs", ")", ":", "global", "_DEFAULT_PROPS", "if", "_DEFAULT_PROPS", "is", "None", "or", "reset", ":", "reset_default_props", "(", "*", "*", "kwargs", ")", "return", "_DEFAULT_PROPS" ]
Return current default properties Parameters ---------- reset : bool if True, reset properties and return default: False
[ "Return", "current", "default", "properties" ]
4077929ca6e7be63a0e3ecf882c5f1da97b287bf
https://github.com/IAMconsortium/pyam/blob/4077929ca6e7be63a0e3ecf882c5f1da97b287bf/pyam/plotting.py#L87-L99
train
IAMconsortium/pyam
pyam/plotting.py
assign_style_props
def assign_style_props(df, color=None, marker=None, linestyle=None, cmap=None): """Assign the style properties for a plot Parameters ---------- df : pd.DataFrame data to be used for style properties """ if color is None and cmap is not None: raise ValueError('`cmap` must be provided with the `color` argument') # determine color, marker, and linestyle for each line n = len(df[color].unique()) if color in df.columns else \ len(df[list(set(df.columns) & set(IAMC_IDX))].drop_duplicates()) defaults = default_props(reset=True, num_colors=n, colormap=cmap) props = {} rc = run_control() kinds = [('color', color), ('marker', marker), ('linestyle', linestyle)] for kind, var in kinds: rc_has_kind = kind in rc if var in df.columns: rc_has_var = rc_has_kind and var in rc[kind] props_for_kind = {} for val in df[var].unique(): if rc_has_var and val in rc[kind][var]: props_for_kind[val] = rc[kind][var][val] # cycle any way to keep defaults the same next(defaults[kind]) else: props_for_kind[val] = next(defaults[kind]) props[kind] = props_for_kind # update for special properties only if they exist in props if 'color' in props: d = props['color'] values = list(d.values()) # find if any colors in our properties corresponds with special colors # we know about overlap_idx = np.in1d(values, list(PYAM_COLORS.keys())) if overlap_idx.any(): # some exist in our special set keys = np.array(list(d.keys()))[overlap_idx] values = np.array(values)[overlap_idx] # translate each from pyam name, like AR6-SSP2-45 to proper color # designation for k, v in zip(keys, values): d[k] = PYAM_COLORS[v] # replace props with updated dict without special colors props['color'] = d return props
python
def assign_style_props(df, color=None, marker=None, linestyle=None, cmap=None): """Assign the style properties for a plot Parameters ---------- df : pd.DataFrame data to be used for style properties """ if color is None and cmap is not None: raise ValueError('`cmap` must be provided with the `color` argument') # determine color, marker, and linestyle for each line n = len(df[color].unique()) if color in df.columns else \ len(df[list(set(df.columns) & set(IAMC_IDX))].drop_duplicates()) defaults = default_props(reset=True, num_colors=n, colormap=cmap) props = {} rc = run_control() kinds = [('color', color), ('marker', marker), ('linestyle', linestyle)] for kind, var in kinds: rc_has_kind = kind in rc if var in df.columns: rc_has_var = rc_has_kind and var in rc[kind] props_for_kind = {} for val in df[var].unique(): if rc_has_var and val in rc[kind][var]: props_for_kind[val] = rc[kind][var][val] # cycle any way to keep defaults the same next(defaults[kind]) else: props_for_kind[val] = next(defaults[kind]) props[kind] = props_for_kind # update for special properties only if they exist in props if 'color' in props: d = props['color'] values = list(d.values()) # find if any colors in our properties corresponds with special colors # we know about overlap_idx = np.in1d(values, list(PYAM_COLORS.keys())) if overlap_idx.any(): # some exist in our special set keys = np.array(list(d.keys()))[overlap_idx] values = np.array(values)[overlap_idx] # translate each from pyam name, like AR6-SSP2-45 to proper color # designation for k, v in zip(keys, values): d[k] = PYAM_COLORS[v] # replace props with updated dict without special colors props['color'] = d return props
[ "def", "assign_style_props", "(", "df", ",", "color", "=", "None", ",", "marker", "=", "None", ",", "linestyle", "=", "None", ",", "cmap", "=", "None", ")", ":", "if", "color", "is", "None", "and", "cmap", "is", "not", "None", ":", "raise", "ValueError", "(", "'`cmap` must be provided with the `color` argument'", ")", "# determine color, marker, and linestyle for each line", "n", "=", "len", "(", "df", "[", "color", "]", ".", "unique", "(", ")", ")", "if", "color", "in", "df", ".", "columns", "else", "len", "(", "df", "[", "list", "(", "set", "(", "df", ".", "columns", ")", "&", "set", "(", "IAMC_IDX", ")", ")", "]", ".", "drop_duplicates", "(", ")", ")", "defaults", "=", "default_props", "(", "reset", "=", "True", ",", "num_colors", "=", "n", ",", "colormap", "=", "cmap", ")", "props", "=", "{", "}", "rc", "=", "run_control", "(", ")", "kinds", "=", "[", "(", "'color'", ",", "color", ")", ",", "(", "'marker'", ",", "marker", ")", ",", "(", "'linestyle'", ",", "linestyle", ")", "]", "for", "kind", ",", "var", "in", "kinds", ":", "rc_has_kind", "=", "kind", "in", "rc", "if", "var", "in", "df", ".", "columns", ":", "rc_has_var", "=", "rc_has_kind", "and", "var", "in", "rc", "[", "kind", "]", "props_for_kind", "=", "{", "}", "for", "val", "in", "df", "[", "var", "]", ".", "unique", "(", ")", ":", "if", "rc_has_var", "and", "val", "in", "rc", "[", "kind", "]", "[", "var", "]", ":", "props_for_kind", "[", "val", "]", "=", "rc", "[", "kind", "]", "[", "var", "]", "[", "val", "]", "# cycle any way to keep defaults the same", "next", "(", "defaults", "[", "kind", "]", ")", "else", ":", "props_for_kind", "[", "val", "]", "=", "next", "(", "defaults", "[", "kind", "]", ")", "props", "[", "kind", "]", "=", "props_for_kind", "# update for special properties only if they exist in props", "if", "'color'", "in", "props", ":", "d", "=", "props", "[", "'color'", "]", "values", "=", "list", "(", "d", ".", "values", "(", ")", ")", "# find if any colors in our properties corresponds with special colors", "# we know about", "overlap_idx", "=", "np", ".", "in1d", "(", "values", ",", "list", "(", "PYAM_COLORS", ".", "keys", "(", ")", ")", ")", "if", "overlap_idx", ".", "any", "(", ")", ":", "# some exist in our special set", "keys", "=", "np", ".", "array", "(", "list", "(", "d", ".", "keys", "(", ")", ")", ")", "[", "overlap_idx", "]", "values", "=", "np", ".", "array", "(", "values", ")", "[", "overlap_idx", "]", "# translate each from pyam name, like AR6-SSP2-45 to proper color", "# designation", "for", "k", ",", "v", "in", "zip", "(", "keys", ",", "values", ")", ":", "d", "[", "k", "]", "=", "PYAM_COLORS", "[", "v", "]", "# replace props with updated dict without special colors", "props", "[", "'color'", "]", "=", "d", "return", "props" ]
Assign the style properties for a plot Parameters ---------- df : pd.DataFrame data to be used for style properties
[ "Assign", "the", "style", "properties", "for", "a", "plot" ]
4077929ca6e7be63a0e3ecf882c5f1da97b287bf
https://github.com/IAMconsortium/pyam/blob/4077929ca6e7be63a0e3ecf882c5f1da97b287bf/pyam/plotting.py#L102-L155
train
IAMconsortium/pyam
pyam/plotting.py
reshape_line_plot
def reshape_line_plot(df, x, y): """Reshape data from long form to "line plot form". Line plot form has x value as the index with one column for each line. Each column has data points as values and all metadata as column headers. """ idx = list(df.columns.drop(y)) if df.duplicated(idx).any(): warnings.warn('Duplicated index found.') df = df.drop_duplicates(idx, keep='last') df = df.set_index(idx)[y].unstack(x).T return df
python
def reshape_line_plot(df, x, y): """Reshape data from long form to "line plot form". Line plot form has x value as the index with one column for each line. Each column has data points as values and all metadata as column headers. """ idx = list(df.columns.drop(y)) if df.duplicated(idx).any(): warnings.warn('Duplicated index found.') df = df.drop_duplicates(idx, keep='last') df = df.set_index(idx)[y].unstack(x).T return df
[ "def", "reshape_line_plot", "(", "df", ",", "x", ",", "y", ")", ":", "idx", "=", "list", "(", "df", ".", "columns", ".", "drop", "(", "y", ")", ")", "if", "df", ".", "duplicated", "(", "idx", ")", ".", "any", "(", ")", ":", "warnings", ".", "warn", "(", "'Duplicated index found.'", ")", "df", "=", "df", ".", "drop_duplicates", "(", "idx", ",", "keep", "=", "'last'", ")", "df", "=", "df", ".", "set_index", "(", "idx", ")", "[", "y", "]", ".", "unstack", "(", "x", ")", ".", "T", "return", "df" ]
Reshape data from long form to "line plot form". Line plot form has x value as the index with one column for each line. Each column has data points as values and all metadata as column headers.
[ "Reshape", "data", "from", "long", "form", "to", "line", "plot", "form", "." ]
4077929ca6e7be63a0e3ecf882c5f1da97b287bf
https://github.com/IAMconsortium/pyam/blob/4077929ca6e7be63a0e3ecf882c5f1da97b287bf/pyam/plotting.py#L158-L169
train
IAMconsortium/pyam
pyam/plotting.py
reshape_bar_plot
def reshape_bar_plot(df, x, y, bars): """Reshape data from long form to "bar plot form". Bar plot form has x value as the index with one column for bar grouping. Table values come from y values. """ idx = [bars, x] if df.duplicated(idx).any(): warnings.warn('Duplicated index found.') df = df.drop_duplicates(idx, keep='last') df = df.set_index(idx)[y].unstack(x).T return df
python
def reshape_bar_plot(df, x, y, bars): """Reshape data from long form to "bar plot form". Bar plot form has x value as the index with one column for bar grouping. Table values come from y values. """ idx = [bars, x] if df.duplicated(idx).any(): warnings.warn('Duplicated index found.') df = df.drop_duplicates(idx, keep='last') df = df.set_index(idx)[y].unstack(x).T return df
[ "def", "reshape_bar_plot", "(", "df", ",", "x", ",", "y", ",", "bars", ")", ":", "idx", "=", "[", "bars", ",", "x", "]", "if", "df", ".", "duplicated", "(", "idx", ")", ".", "any", "(", ")", ":", "warnings", ".", "warn", "(", "'Duplicated index found.'", ")", "df", "=", "df", ".", "drop_duplicates", "(", "idx", ",", "keep", "=", "'last'", ")", "df", "=", "df", ".", "set_index", "(", "idx", ")", "[", "y", "]", ".", "unstack", "(", "x", ")", ".", "T", "return", "df" ]
Reshape data from long form to "bar plot form". Bar plot form has x value as the index with one column for bar grouping. Table values come from y values.
[ "Reshape", "data", "from", "long", "form", "to", "bar", "plot", "form", "." ]
4077929ca6e7be63a0e3ecf882c5f1da97b287bf
https://github.com/IAMconsortium/pyam/blob/4077929ca6e7be63a0e3ecf882c5f1da97b287bf/pyam/plotting.py#L172-L183
train
IAMconsortium/pyam
pyam/plotting.py
read_shapefile
def read_shapefile(fname, region_col=None, **kwargs): """Read a shapefile for use in regional plots. Shapefiles must have a column denoted as "region". Parameters ---------- fname : string path to shapefile to be read by geopandas region_col : string, default None if provided, rename a column in the shapefile to "region" """ gdf = gpd.read_file(fname, **kwargs) if region_col is not None: gdf = gdf.rename(columns={region_col: 'region'}) if 'region' not in gdf.columns: raise IOError('Must provide a region column') gdf['region'] = gdf['region'].str.upper() return gdf
python
def read_shapefile(fname, region_col=None, **kwargs): """Read a shapefile for use in regional plots. Shapefiles must have a column denoted as "region". Parameters ---------- fname : string path to shapefile to be read by geopandas region_col : string, default None if provided, rename a column in the shapefile to "region" """ gdf = gpd.read_file(fname, **kwargs) if region_col is not None: gdf = gdf.rename(columns={region_col: 'region'}) if 'region' not in gdf.columns: raise IOError('Must provide a region column') gdf['region'] = gdf['region'].str.upper() return gdf
[ "def", "read_shapefile", "(", "fname", ",", "region_col", "=", "None", ",", "*", "*", "kwargs", ")", ":", "gdf", "=", "gpd", ".", "read_file", "(", "fname", ",", "*", "*", "kwargs", ")", "if", "region_col", "is", "not", "None", ":", "gdf", "=", "gdf", ".", "rename", "(", "columns", "=", "{", "region_col", ":", "'region'", "}", ")", "if", "'region'", "not", "in", "gdf", ".", "columns", ":", "raise", "IOError", "(", "'Must provide a region column'", ")", "gdf", "[", "'region'", "]", "=", "gdf", "[", "'region'", "]", ".", "str", ".", "upper", "(", ")", "return", "gdf" ]
Read a shapefile for use in regional plots. Shapefiles must have a column denoted as "region". Parameters ---------- fname : string path to shapefile to be read by geopandas region_col : string, default None if provided, rename a column in the shapefile to "region"
[ "Read", "a", "shapefile", "for", "use", "in", "regional", "plots", ".", "Shapefiles", "must", "have", "a", "column", "denoted", "as", "region", "." ]
4077929ca6e7be63a0e3ecf882c5f1da97b287bf
https://github.com/IAMconsortium/pyam/blob/4077929ca6e7be63a0e3ecf882c5f1da97b287bf/pyam/plotting.py#L188-L205
train
IAMconsortium/pyam
pyam/plotting.py
add_net_values_to_bar_plot
def add_net_values_to_bar_plot(axs, color='k'): """Add net values next to an existing vertical stacked bar chart Parameters ---------- axs : matplotlib.Axes or list thereof color : str, optional, default: black the color of the bars to add """ axs = axs if isinstance(axs, Iterable) else [axs] for ax in axs: box_args = _get_boxes(ax) for x, args in box_args.items(): rect = mpatches.Rectangle(*args, color=color) ax.add_patch(rect)
python
def add_net_values_to_bar_plot(axs, color='k'): """Add net values next to an existing vertical stacked bar chart Parameters ---------- axs : matplotlib.Axes or list thereof color : str, optional, default: black the color of the bars to add """ axs = axs if isinstance(axs, Iterable) else [axs] for ax in axs: box_args = _get_boxes(ax) for x, args in box_args.items(): rect = mpatches.Rectangle(*args, color=color) ax.add_patch(rect)
[ "def", "add_net_values_to_bar_plot", "(", "axs", ",", "color", "=", "'k'", ")", ":", "axs", "=", "axs", "if", "isinstance", "(", "axs", ",", "Iterable", ")", "else", "[", "axs", "]", "for", "ax", "in", "axs", ":", "box_args", "=", "_get_boxes", "(", "ax", ")", "for", "x", ",", "args", "in", "box_args", ".", "items", "(", ")", ":", "rect", "=", "mpatches", ".", "Rectangle", "(", "*", "args", ",", "color", "=", "color", ")", "ax", ".", "add_patch", "(", "rect", ")" ]
Add net values next to an existing vertical stacked bar chart Parameters ---------- axs : matplotlib.Axes or list thereof color : str, optional, default: black the color of the bars to add
[ "Add", "net", "values", "next", "to", "an", "existing", "vertical", "stacked", "bar", "chart" ]
4077929ca6e7be63a0e3ecf882c5f1da97b287bf
https://github.com/IAMconsortium/pyam/blob/4077929ca6e7be63a0e3ecf882c5f1da97b287bf/pyam/plotting.py#L650-L664
train
IAMconsortium/pyam
pyam/plotting.py
scatter
def scatter(df, x, y, ax=None, legend=None, title=None, color=None, marker='o', linestyle=None, cmap=None, groupby=['model', 'scenario'], with_lines=False, **kwargs): """Plot data as a scatter chart. Parameters ---------- df : pd.DataFrame Data to plot as a long-form data frame x : str column to be plotted on the x-axis y : str column to be plotted on the y-axis ax : matplotlib.Axes, optional legend : bool, optional Include a legend (`None` displays legend only if less than 13 entries) default: None title : bool or string, optional Display a custom title. color : string, optional A valid matplotlib color or column name. If a column name, common values will be provided the same color. default: None marker : string A valid matplotlib marker or column name. If a column name, common values will be provided the same marker. default: 'o' linestyle : string, optional A valid matplotlib linestyle or column name. If a column name, common values will be provided the same linestyle. default: None cmap : string, optional A colormap to use. default: None groupby : list-like, optional Data grouping for plotting. default: ['model', 'scenario'] with_lines : bool, optional Make the scatter plot with lines connecting common data. default: False kwargs : Additional arguments to pass to the pd.DataFrame.plot() function """ if ax is None: fig, ax = plt.subplots() # assign styling properties props = assign_style_props(df, color=color, marker=marker, linestyle=linestyle, cmap=cmap) # group data groups = df.groupby(groupby) # loop over grouped dataframe, plot data legend_data = [] for name, group in groups: pargs = {} labels = [] for key, kind, var in [('c', 'color', color), ('marker', 'marker', marker), ('linestyle', 'linestyle', linestyle)]: if kind in props: label = group[var].values[0] pargs[key] = props[kind][group[var].values[0]] labels.append(repr(label).lstrip("u'").strip("'")) else: pargs[key] = var if len(labels) > 0: legend_data.append(' '.join(labels)) else: legend_data.append(' '.join(name)) kwargs.update(pargs) if with_lines: ax.plot(group[x], group[y], **kwargs) else: kwargs.pop('linestyle') # scatter() can't take a linestyle ax.scatter(group[x], group[y], **kwargs) # build legend handles and labels handles, labels = ax.get_legend_handles_labels() if legend_data != [''] * len(legend_data): labels = sorted(list(set(tuple(legend_data)))) idxs = [legend_data.index(d) for d in labels] handles = [handles[i] for i in idxs] if legend is None and len(labels) < 13 or legend is not False: _add_legend(ax, handles, labels, legend) # add labels and title ax.set_xlabel(x) ax.set_ylabel(y) if title: ax.set_title(title) return ax
python
def scatter(df, x, y, ax=None, legend=None, title=None, color=None, marker='o', linestyle=None, cmap=None, groupby=['model', 'scenario'], with_lines=False, **kwargs): """Plot data as a scatter chart. Parameters ---------- df : pd.DataFrame Data to plot as a long-form data frame x : str column to be plotted on the x-axis y : str column to be plotted on the y-axis ax : matplotlib.Axes, optional legend : bool, optional Include a legend (`None` displays legend only if less than 13 entries) default: None title : bool or string, optional Display a custom title. color : string, optional A valid matplotlib color or column name. If a column name, common values will be provided the same color. default: None marker : string A valid matplotlib marker or column name. If a column name, common values will be provided the same marker. default: 'o' linestyle : string, optional A valid matplotlib linestyle or column name. If a column name, common values will be provided the same linestyle. default: None cmap : string, optional A colormap to use. default: None groupby : list-like, optional Data grouping for plotting. default: ['model', 'scenario'] with_lines : bool, optional Make the scatter plot with lines connecting common data. default: False kwargs : Additional arguments to pass to the pd.DataFrame.plot() function """ if ax is None: fig, ax = plt.subplots() # assign styling properties props = assign_style_props(df, color=color, marker=marker, linestyle=linestyle, cmap=cmap) # group data groups = df.groupby(groupby) # loop over grouped dataframe, plot data legend_data = [] for name, group in groups: pargs = {} labels = [] for key, kind, var in [('c', 'color', color), ('marker', 'marker', marker), ('linestyle', 'linestyle', linestyle)]: if kind in props: label = group[var].values[0] pargs[key] = props[kind][group[var].values[0]] labels.append(repr(label).lstrip("u'").strip("'")) else: pargs[key] = var if len(labels) > 0: legend_data.append(' '.join(labels)) else: legend_data.append(' '.join(name)) kwargs.update(pargs) if with_lines: ax.plot(group[x], group[y], **kwargs) else: kwargs.pop('linestyle') # scatter() can't take a linestyle ax.scatter(group[x], group[y], **kwargs) # build legend handles and labels handles, labels = ax.get_legend_handles_labels() if legend_data != [''] * len(legend_data): labels = sorted(list(set(tuple(legend_data)))) idxs = [legend_data.index(d) for d in labels] handles = [handles[i] for i in idxs] if legend is None and len(labels) < 13 or legend is not False: _add_legend(ax, handles, labels, legend) # add labels and title ax.set_xlabel(x) ax.set_ylabel(y) if title: ax.set_title(title) return ax
[ "def", "scatter", "(", "df", ",", "x", ",", "y", ",", "ax", "=", "None", ",", "legend", "=", "None", ",", "title", "=", "None", ",", "color", "=", "None", ",", "marker", "=", "'o'", ",", "linestyle", "=", "None", ",", "cmap", "=", "None", ",", "groupby", "=", "[", "'model'", ",", "'scenario'", "]", ",", "with_lines", "=", "False", ",", "*", "*", "kwargs", ")", ":", "if", "ax", "is", "None", ":", "fig", ",", "ax", "=", "plt", ".", "subplots", "(", ")", "# assign styling properties", "props", "=", "assign_style_props", "(", "df", ",", "color", "=", "color", ",", "marker", "=", "marker", ",", "linestyle", "=", "linestyle", ",", "cmap", "=", "cmap", ")", "# group data", "groups", "=", "df", ".", "groupby", "(", "groupby", ")", "# loop over grouped dataframe, plot data", "legend_data", "=", "[", "]", "for", "name", ",", "group", "in", "groups", ":", "pargs", "=", "{", "}", "labels", "=", "[", "]", "for", "key", ",", "kind", ",", "var", "in", "[", "(", "'c'", ",", "'color'", ",", "color", ")", ",", "(", "'marker'", ",", "'marker'", ",", "marker", ")", ",", "(", "'linestyle'", ",", "'linestyle'", ",", "linestyle", ")", "]", ":", "if", "kind", "in", "props", ":", "label", "=", "group", "[", "var", "]", ".", "values", "[", "0", "]", "pargs", "[", "key", "]", "=", "props", "[", "kind", "]", "[", "group", "[", "var", "]", ".", "values", "[", "0", "]", "]", "labels", ".", "append", "(", "repr", "(", "label", ")", ".", "lstrip", "(", "\"u'\"", ")", ".", "strip", "(", "\"'\"", ")", ")", "else", ":", "pargs", "[", "key", "]", "=", "var", "if", "len", "(", "labels", ")", ">", "0", ":", "legend_data", ".", "append", "(", "' '", ".", "join", "(", "labels", ")", ")", "else", ":", "legend_data", ".", "append", "(", "' '", ".", "join", "(", "name", ")", ")", "kwargs", ".", "update", "(", "pargs", ")", "if", "with_lines", ":", "ax", ".", "plot", "(", "group", "[", "x", "]", ",", "group", "[", "y", "]", ",", "*", "*", "kwargs", ")", "else", ":", "kwargs", ".", "pop", "(", "'linestyle'", ")", "# scatter() can't take a linestyle", "ax", ".", "scatter", "(", "group", "[", "x", "]", ",", "group", "[", "y", "]", ",", "*", "*", "kwargs", ")", "# build legend handles and labels", "handles", ",", "labels", "=", "ax", ".", "get_legend_handles_labels", "(", ")", "if", "legend_data", "!=", "[", "''", "]", "*", "len", "(", "legend_data", ")", ":", "labels", "=", "sorted", "(", "list", "(", "set", "(", "tuple", "(", "legend_data", ")", ")", ")", ")", "idxs", "=", "[", "legend_data", ".", "index", "(", "d", ")", "for", "d", "in", "labels", "]", "handles", "=", "[", "handles", "[", "i", "]", "for", "i", "in", "idxs", "]", "if", "legend", "is", "None", "and", "len", "(", "labels", ")", "<", "13", "or", "legend", "is", "not", "False", ":", "_add_legend", "(", "ax", ",", "handles", ",", "labels", ",", "legend", ")", "# add labels and title", "ax", ".", "set_xlabel", "(", "x", ")", "ax", ".", "set_ylabel", "(", "y", ")", "if", "title", ":", "ax", ".", "set_title", "(", "title", ")", "return", "ax" ]
Plot data as a scatter chart. Parameters ---------- df : pd.DataFrame Data to plot as a long-form data frame x : str column to be plotted on the x-axis y : str column to be plotted on the y-axis ax : matplotlib.Axes, optional legend : bool, optional Include a legend (`None` displays legend only if less than 13 entries) default: None title : bool or string, optional Display a custom title. color : string, optional A valid matplotlib color or column name. If a column name, common values will be provided the same color. default: None marker : string A valid matplotlib marker or column name. If a column name, common values will be provided the same marker. default: 'o' linestyle : string, optional A valid matplotlib linestyle or column name. If a column name, common values will be provided the same linestyle. default: None cmap : string, optional A colormap to use. default: None groupby : list-like, optional Data grouping for plotting. default: ['model', 'scenario'] with_lines : bool, optional Make the scatter plot with lines connecting common data. default: False kwargs : Additional arguments to pass to the pd.DataFrame.plot() function
[ "Plot", "data", "as", "a", "scatter", "chart", "." ]
4077929ca6e7be63a0e3ecf882c5f1da97b287bf
https://github.com/IAMconsortium/pyam/blob/4077929ca6e7be63a0e3ecf882c5f1da97b287bf/pyam/plotting.py#L667-L760
train
IAMconsortium/pyam
pyam/logger.py
logger
def logger(): """Access global logger""" global _LOGGER if _LOGGER is None: logging.basicConfig() _LOGGER = logging.getLogger() _LOGGER.setLevel('INFO') return _LOGGER
python
def logger(): """Access global logger""" global _LOGGER if _LOGGER is None: logging.basicConfig() _LOGGER = logging.getLogger() _LOGGER.setLevel('INFO') return _LOGGER
[ "def", "logger", "(", ")", ":", "global", "_LOGGER", "if", "_LOGGER", "is", "None", ":", "logging", ".", "basicConfig", "(", ")", "_LOGGER", "=", "logging", ".", "getLogger", "(", ")", "_LOGGER", ".", "setLevel", "(", "'INFO'", ")", "return", "_LOGGER" ]
Access global logger
[ "Access", "global", "logger" ]
4077929ca6e7be63a0e3ecf882c5f1da97b287bf
https://github.com/IAMconsortium/pyam/blob/4077929ca6e7be63a0e3ecf882c5f1da97b287bf/pyam/logger.py#L7-L14
train