repository_name
stringlengths 5
67
| func_path_in_repository
stringlengths 4
234
| func_name
stringlengths 0
314
| whole_func_string
stringlengths 52
3.87M
| language
stringclasses 6
values | func_code_string
stringlengths 52
3.87M
| func_documentation_string
stringlengths 1
47.2k
| func_code_url
stringlengths 85
339
|
---|---|---|---|---|---|---|---|
gawel/irc3 | irc3/base.py | IrcObject.from_config | def from_config(cls, cfg, **kwargs):
"""return an instance configured with the ``cfg`` dict"""
cfg = dict(cfg, **kwargs)
pythonpath = cfg.get('pythonpath', [])
if 'here' in cfg:
pythonpath.append(cfg['here'])
for path in pythonpath:
sys.path.append(os.path.expanduser(path))
prog = cls.server and 'irc3d' or 'irc3'
if cfg.get('debug'):
cls.venusian_categories.append(prog + '.debug')
if cfg.get('interactive'): # pragma: no cover
import irc3.testing
context = getattr(irc3.testing, cls.__name__)(**cfg)
else:
context = cls(**cfg)
if cfg.get('raw'):
context.include('irc3.plugins.log',
venusian_categories=[prog + '.debug'])
return context | python | def from_config(cls, cfg, **kwargs):
"""return an instance configured with the ``cfg`` dict"""
cfg = dict(cfg, **kwargs)
pythonpath = cfg.get('pythonpath', [])
if 'here' in cfg:
pythonpath.append(cfg['here'])
for path in pythonpath:
sys.path.append(os.path.expanduser(path))
prog = cls.server and 'irc3d' or 'irc3'
if cfg.get('debug'):
cls.venusian_categories.append(prog + '.debug')
if cfg.get('interactive'): # pragma: no cover
import irc3.testing
context = getattr(irc3.testing, cls.__name__)(**cfg)
else:
context = cls(**cfg)
if cfg.get('raw'):
context.include('irc3.plugins.log',
venusian_categories=[prog + '.debug'])
return context | return an instance configured with the ``cfg`` dict | https://github.com/gawel/irc3/blob/cd27840a5809a1f803dc620860fe75d83d2a2ec8/irc3/base.py#L373-L392 |
Chetic/robotframework-async | AsyncLibrary/robot_async.py | AsyncLibrary.async_run | def async_run(self, keyword, *args, **kwargs):
''' Executes the provided Robot Framework keyword in a separate thread and immediately returns a handle to be used with async_get '''
handle = self._last_thread_handle
thread = self._threaded(keyword, *args, **kwargs)
thread.start()
self._thread_pool[handle] = thread
self._last_thread_handle += 1
return handle | python | def async_run(self, keyword, *args, **kwargs):
''' Executes the provided Robot Framework keyword in a separate thread and immediately returns a handle to be used with async_get '''
handle = self._last_thread_handle
thread = self._threaded(keyword, *args, **kwargs)
thread.start()
self._thread_pool[handle] = thread
self._last_thread_handle += 1
return handle | Executes the provided Robot Framework keyword in a separate thread and immediately returns a handle to be used with async_get | https://github.com/Chetic/robotframework-async/blob/1a2b19d1927f6cd3f62947a9b2b8a166a24278d1/AsyncLibrary/robot_async.py#L14-L21 |
Chetic/robotframework-async | AsyncLibrary/robot_async.py | AsyncLibrary.async_get | def async_get(self, handle):
''' Blocks until the thread created by async_run returns '''
assert handle in self._thread_pool, 'Invalid async call handle'
result = self._thread_pool[handle].result_queue.get()
del self._thread_pool[handle]
return result | python | def async_get(self, handle):
''' Blocks until the thread created by async_run returns '''
assert handle in self._thread_pool, 'Invalid async call handle'
result = self._thread_pool[handle].result_queue.get()
del self._thread_pool[handle]
return result | Blocks until the thread created by async_run returns | https://github.com/Chetic/robotframework-async/blob/1a2b19d1927f6cd3f62947a9b2b8a166a24278d1/AsyncLibrary/robot_async.py#L23-L28 |
Chetic/robotframework-async | AsyncLibrary/robot_async.py | AsyncLibrary._get_handler_from_keyword | def _get_handler_from_keyword(self, keyword):
''' Gets the Robot Framework handler associated with the given keyword '''
if EXECUTION_CONTEXTS.current is None:
raise RobotNotRunningError('Cannot access execution context')
return EXECUTION_CONTEXTS.current.get_handler(keyword) | python | def _get_handler_from_keyword(self, keyword):
''' Gets the Robot Framework handler associated with the given keyword '''
if EXECUTION_CONTEXTS.current is None:
raise RobotNotRunningError('Cannot access execution context')
return EXECUTION_CONTEXTS.current.get_handler(keyword) | Gets the Robot Framework handler associated with the given keyword | https://github.com/Chetic/robotframework-async/blob/1a2b19d1927f6cd3f62947a9b2b8a166a24278d1/AsyncLibrary/robot_async.py#L30-L34 |
themartorana/python-postmark | postmark/core.py | PMMail._set_custom_headers | def _set_custom_headers(self, value):
'''
A special set function to ensure
we're setting with a dictionary
'''
if value is None:
setattr(self, '_PMMail__custom_headers', {})
elif isinstance(value, dict):
setattr(self, '_PMMail__custom_headers', value)
else:
raise TypeError('Custom headers must be a dictionary of key-value pairs') | python | def _set_custom_headers(self, value):
'''
A special set function to ensure
we're setting with a dictionary
'''
if value is None:
setattr(self, '_PMMail__custom_headers', {})
elif isinstance(value, dict):
setattr(self, '_PMMail__custom_headers', value)
else:
raise TypeError('Custom headers must be a dictionary of key-value pairs') | A special set function to ensure
we're setting with a dictionary | https://github.com/themartorana/python-postmark/blob/3087f6894ec2790e295fd59eba9c57da9de78d1c/postmark/core.py#L146-L156 |
themartorana/python-postmark | postmark/core.py | PMMail._set_metadata | def _set_metadata(self, value):
'''
A special set function to ensure
we're setting with a dictionary
'''
if value is None:
setattr(self, '_PMMail__metadata', {})
elif isinstance(value, dict):
for k, v in value.items():
if (not isinstance(k, str) and not isinstance(k, int)) \
or (not isinstance(v, str) and not isinstance(v, int)):
raise TypeError('Metadata keys and values can only be strings or integers')
setattr(self, '_PMMail__metadata', value)
else:
raise TypeError('Metadata must be a dictionary of key-value pairs') | python | def _set_metadata(self, value):
'''
A special set function to ensure
we're setting with a dictionary
'''
if value is None:
setattr(self, '_PMMail__metadata', {})
elif isinstance(value, dict):
for k, v in value.items():
if (not isinstance(k, str) and not isinstance(k, int)) \
or (not isinstance(v, str) and not isinstance(v, int)):
raise TypeError('Metadata keys and values can only be strings or integers')
setattr(self, '_PMMail__metadata', value)
else:
raise TypeError('Metadata must be a dictionary of key-value pairs') | A special set function to ensure
we're setting with a dictionary | https://github.com/themartorana/python-postmark/blob/3087f6894ec2790e295fd59eba9c57da9de78d1c/postmark/core.py#L158-L172 |
themartorana/python-postmark | postmark/core.py | PMMail._set_attachments | def _set_attachments(self, value):
'''
A special set function to ensure
we're setting with a list
'''
if value is None:
setattr(self, '_PMMail__attachments', [])
elif isinstance(value, list):
setattr(self, '_PMMail__attachments', value)
else:
raise TypeError('Attachments must be a list') | python | def _set_attachments(self, value):
'''
A special set function to ensure
we're setting with a list
'''
if value is None:
setattr(self, '_PMMail__attachments', [])
elif isinstance(value, list):
setattr(self, '_PMMail__attachments', value)
else:
raise TypeError('Attachments must be a list') | A special set function to ensure
we're setting with a list | https://github.com/themartorana/python-postmark/blob/3087f6894ec2790e295fd59eba9c57da9de78d1c/postmark/core.py#L174-L184 |
themartorana/python-postmark | postmark/core.py | PMMail._check_values | def _check_values(self):
'''
Make sure all values are of the appropriate
type and are not missing.
'''
if not self.__api_key:
raise PMMailMissingValueException('Cannot send an e-mail without a Postmark API Key')
elif not self.__sender:
raise PMMailMissingValueException('Cannot send an e-mail without a sender (.sender field)')
elif not self.__to:
raise PMMailMissingValueException('Cannot send an e-mail without at least one recipient (.to field)')
elif (self.__template_id or self.__template_model) and not all([self.__template_id, self.__template_model]):
raise PMMailMissingValueException(
'Cannot send a template e-mail without a both template_id and template_model set')
elif not any([self.__template_id, self.__template_model, self.__subject]):
raise PMMailMissingValueException('Cannot send an e-mail without a subject')
elif not self.__html_body and not self.__text_body and not self.__template_id:
raise PMMailMissingValueException('Cannot send an e-mail without either an HTML or text version of your e-mail body')
if self.__track_opens and not self.__html_body:
print('WARNING: .track_opens set to True with no .html_body set. Tracking opens will not work; message will still send.') | python | def _check_values(self):
'''
Make sure all values are of the appropriate
type and are not missing.
'''
if not self.__api_key:
raise PMMailMissingValueException('Cannot send an e-mail without a Postmark API Key')
elif not self.__sender:
raise PMMailMissingValueException('Cannot send an e-mail without a sender (.sender field)')
elif not self.__to:
raise PMMailMissingValueException('Cannot send an e-mail without at least one recipient (.to field)')
elif (self.__template_id or self.__template_model) and not all([self.__template_id, self.__template_model]):
raise PMMailMissingValueException(
'Cannot send a template e-mail without a both template_id and template_model set')
elif not any([self.__template_id, self.__template_model, self.__subject]):
raise PMMailMissingValueException('Cannot send an e-mail without a subject')
elif not self.__html_body and not self.__text_body and not self.__template_id:
raise PMMailMissingValueException('Cannot send an e-mail without either an HTML or text version of your e-mail body')
if self.__track_opens and not self.__html_body:
print('WARNING: .track_opens set to True with no .html_body set. Tracking opens will not work; message will still send.') | Make sure all values are of the appropriate
type and are not missing. | https://github.com/themartorana/python-postmark/blob/3087f6894ec2790e295fd59eba9c57da9de78d1c/postmark/core.py#L371-L390 |
themartorana/python-postmark | postmark/core.py | PMMail.send | def send(self, test=None):
'''
Send the email through the Postmark system.
Pass test=True to just print out the resulting
JSON message being sent to Postmark
'''
self._check_values()
# Set up message dictionary
json_message = self.to_json_message()
# if (self.__html_body and not self.__text_body) and self.__multipart:
# # TODO: Set up regex to strip html
# pass
# If test is not specified, attempt to read the Django setting
if test is None:
try:
from django.conf import settings as django_settings
test = getattr(django_settings, "POSTMARK_TEST_MODE", None)
except ImportError:
pass
# If this is a test, just print the message
if test:
print('JSON message is:\n%s' % json.dumps(json_message, cls=PMJSONEncoder))
return
if self.__template_id:
endpoint_url = __POSTMARK_URL__ + 'email/withTemplate/'
else:
endpoint_url = __POSTMARK_URL__ + 'email'
# Set up the url Request
req = Request(
endpoint_url,
json.dumps(json_message, cls=PMJSONEncoder).encode('utf8'),
{
'Accept': 'application/json',
'Content-Type': 'application/json',
'X-Postmark-Server-Token': self.__api_key,
'User-agent': self.__user_agent
}
)
# Attempt send
try:
# print 'sending request to postmark: %s' % json_message
result = urlopen(req)
jsontxt = result.read().decode('utf8')
result.close()
if result.code == 200:
self.message_id = json.loads(jsontxt).get('MessageID', None)
return True
else:
raise PMMailSendException('Return code %d: %s' % (result.code, result.msg))
except HTTPError as err:
if err.code == 401:
raise PMMailUnauthorizedException('Sending Unauthorized - incorrect API key.', err)
elif err.code == 422:
try:
jsontxt = err.read().decode('utf8')
jsonobj = json.loads(jsontxt)
desc = jsonobj['Message']
error_code = jsonobj['ErrorCode']
except KeyError:
raise PMMailUnprocessableEntityException('Unprocessable Entity: Description not given')
if error_code == 406:
raise PMMailInactiveRecipientException('You tried to send email to a recipient that has been marked as inactive.')
raise PMMailUnprocessableEntityException('Unprocessable Entity: %s' % desc)
elif err.code == 500:
raise PMMailServerErrorException('Internal server error at Postmark. Admins have been alerted.', err)
except URLError as err:
if hasattr(err, 'reason'):
raise PMMailURLException('URLError: Failed to reach the server: %s (See "inner_exception" for details)' % err.reason, err)
elif hasattr(err, 'code'):
raise PMMailURLException('URLError: %d: The server couldn\'t fufill the request. (See "inner_exception" for details)' % err.code, err)
else:
raise PMMailURLException('URLError: The server couldn\'t fufill the request. (See "inner_exception" for details)', err) | python | def send(self, test=None):
'''
Send the email through the Postmark system.
Pass test=True to just print out the resulting
JSON message being sent to Postmark
'''
self._check_values()
# Set up message dictionary
json_message = self.to_json_message()
# if (self.__html_body and not self.__text_body) and self.__multipart:
# # TODO: Set up regex to strip html
# pass
# If test is not specified, attempt to read the Django setting
if test is None:
try:
from django.conf import settings as django_settings
test = getattr(django_settings, "POSTMARK_TEST_MODE", None)
except ImportError:
pass
# If this is a test, just print the message
if test:
print('JSON message is:\n%s' % json.dumps(json_message, cls=PMJSONEncoder))
return
if self.__template_id:
endpoint_url = __POSTMARK_URL__ + 'email/withTemplate/'
else:
endpoint_url = __POSTMARK_URL__ + 'email'
# Set up the url Request
req = Request(
endpoint_url,
json.dumps(json_message, cls=PMJSONEncoder).encode('utf8'),
{
'Accept': 'application/json',
'Content-Type': 'application/json',
'X-Postmark-Server-Token': self.__api_key,
'User-agent': self.__user_agent
}
)
# Attempt send
try:
# print 'sending request to postmark: %s' % json_message
result = urlopen(req)
jsontxt = result.read().decode('utf8')
result.close()
if result.code == 200:
self.message_id = json.loads(jsontxt).get('MessageID', None)
return True
else:
raise PMMailSendException('Return code %d: %s' % (result.code, result.msg))
except HTTPError as err:
if err.code == 401:
raise PMMailUnauthorizedException('Sending Unauthorized - incorrect API key.', err)
elif err.code == 422:
try:
jsontxt = err.read().decode('utf8')
jsonobj = json.loads(jsontxt)
desc = jsonobj['Message']
error_code = jsonobj['ErrorCode']
except KeyError:
raise PMMailUnprocessableEntityException('Unprocessable Entity: Description not given')
if error_code == 406:
raise PMMailInactiveRecipientException('You tried to send email to a recipient that has been marked as inactive.')
raise PMMailUnprocessableEntityException('Unprocessable Entity: %s' % desc)
elif err.code == 500:
raise PMMailServerErrorException('Internal server error at Postmark. Admins have been alerted.', err)
except URLError as err:
if hasattr(err, 'reason'):
raise PMMailURLException('URLError: Failed to reach the server: %s (See "inner_exception" for details)' % err.reason, err)
elif hasattr(err, 'code'):
raise PMMailURLException('URLError: %d: The server couldn\'t fufill the request. (See "inner_exception" for details)' % err.code, err)
else:
raise PMMailURLException('URLError: The server couldn\'t fufill the request. (See "inner_exception" for details)', err) | Send the email through the Postmark system.
Pass test=True to just print out the resulting
JSON message being sent to Postmark | https://github.com/themartorana/python-postmark/blob/3087f6894ec2790e295fd59eba9c57da9de78d1c/postmark/core.py#L472-L552 |
themartorana/python-postmark | postmark/core.py | PMBatchMail.remove_message | def remove_message(self, message):
'''
Remove a message from the batch
'''
if message in self.__messages:
self.__messages.remove(message) | python | def remove_message(self, message):
'''
Remove a message from the batch
'''
if message in self.__messages:
self.__messages.remove(message) | Remove a message from the batch | https://github.com/themartorana/python-postmark/blob/3087f6894ec2790e295fd59eba9c57da9de78d1c/postmark/core.py#L615-L620 |
themartorana/python-postmark | postmark/core.py | PMBounceManager.delivery_stats | def delivery_stats(self):
'''
Returns a summary of inactive emails and bounces by type.
'''
self._check_values()
req = Request(
__POSTMARK_URL__ + 'deliverystats',
None,
{
'Accept': 'application/json',
'Content-Type': 'application/json',
'X-Postmark-Server-Token': self.__api_key,
'User-agent': self.__user_agent
}
)
# Attempt send
try:
# print 'sending request to postmark:'
result = urlopen(req)
with closing(result):
if result.code == 200:
return json.loads(result.read())
else:
raise PMMailSendException('Return code %d: %s' % (result.code, result.msg))
except HTTPError as err:
return err | python | def delivery_stats(self):
'''
Returns a summary of inactive emails and bounces by type.
'''
self._check_values()
req = Request(
__POSTMARK_URL__ + 'deliverystats',
None,
{
'Accept': 'application/json',
'Content-Type': 'application/json',
'X-Postmark-Server-Token': self.__api_key,
'User-agent': self.__user_agent
}
)
# Attempt send
try:
# print 'sending request to postmark:'
result = urlopen(req)
with closing(result):
if result.code == 200:
return json.loads(result.read())
else:
raise PMMailSendException('Return code %d: %s' % (result.code, result.msg))
except HTTPError as err:
return err | Returns a summary of inactive emails and bounces by type. | https://github.com/themartorana/python-postmark/blob/3087f6894ec2790e295fd59eba9c57da9de78d1c/postmark/core.py#L765-L793 |
themartorana/python-postmark | postmark/core.py | PMBounceManager.get_all | def get_all(self, inactive='', email_filter='', tag='', count=25, offset=0):
'''
Fetches a portion of bounces according to the specified input criteria. The count and offset
parameters are mandatory. You should never retrieve all bounces as that could be excessively
slow for your application. To know how many bounces you have, you need to request a portion
first, usually the first page, and the service will return the count in the TotalCount property
of the response.
'''
self._check_values()
params = '?inactive=' + inactive + '&emailFilter=' + email_filter +'&tag=' + tag
params += '&count=' + str(count) + '&offset=' + str(offset)
req = Request(
__POSTMARK_URL__ + 'bounces' + params,
None,
{
'Accept': 'application/json',
'Content-Type': 'application/json',
'X-Postmark-Server-Token': self.__api_key,
'User-agent': self.__user_agent
}
)
# Attempt send
try:
# print 'sending request to postmark:'
result = urlopen(req)
with closing(result):
if result.code == 200:
return json.loads(result.read())
else:
raise PMMailSendException('Return code %d: %s' % (result.code, result.msg))
except HTTPError as err:
return err | python | def get_all(self, inactive='', email_filter='', tag='', count=25, offset=0):
'''
Fetches a portion of bounces according to the specified input criteria. The count and offset
parameters are mandatory. You should never retrieve all bounces as that could be excessively
slow for your application. To know how many bounces you have, you need to request a portion
first, usually the first page, and the service will return the count in the TotalCount property
of the response.
'''
self._check_values()
params = '?inactive=' + inactive + '&emailFilter=' + email_filter +'&tag=' + tag
params += '&count=' + str(count) + '&offset=' + str(offset)
req = Request(
__POSTMARK_URL__ + 'bounces' + params,
None,
{
'Accept': 'application/json',
'Content-Type': 'application/json',
'X-Postmark-Server-Token': self.__api_key,
'User-agent': self.__user_agent
}
)
# Attempt send
try:
# print 'sending request to postmark:'
result = urlopen(req)
with closing(result):
if result.code == 200:
return json.loads(result.read())
else:
raise PMMailSendException('Return code %d: %s' % (result.code, result.msg))
except HTTPError as err:
return err | Fetches a portion of bounces according to the specified input criteria. The count and offset
parameters are mandatory. You should never retrieve all bounces as that could be excessively
slow for your application. To know how many bounces you have, you need to request a portion
first, usually the first page, and the service will return the count in the TotalCount property
of the response. | https://github.com/themartorana/python-postmark/blob/3087f6894ec2790e295fd59eba9c57da9de78d1c/postmark/core.py#L795-L831 |
themartorana/python-postmark | postmark/core.py | PMBounceManager.activate | def activate(self, bounce_id):
'''
Activates a deactivated bounce.
'''
self._check_values()
req_url = '/bounces/' + str(bounce_id) + '/activate'
# print req_url
h1 = HTTPConnection('api.postmarkapp.com')
dta = urlencode({"data": "blank"}).encode('utf8')
req = h1.request(
'PUT',
req_url,
dta,
{
'Accept': 'application/json',
'Content-Type': 'application/json',
'X-Postmark-Server-Token': self.__api_key,
'User-agent': self.__user_agent
}
)
r = h1.getresponse()
return json.loads(r.read()) | python | def activate(self, bounce_id):
'''
Activates a deactivated bounce.
'''
self._check_values()
req_url = '/bounces/' + str(bounce_id) + '/activate'
# print req_url
h1 = HTTPConnection('api.postmarkapp.com')
dta = urlencode({"data": "blank"}).encode('utf8')
req = h1.request(
'PUT',
req_url,
dta,
{
'Accept': 'application/json',
'Content-Type': 'application/json',
'X-Postmark-Server-Token': self.__api_key,
'User-agent': self.__user_agent
}
)
r = h1.getresponse()
return json.loads(r.read()) | Activates a deactivated bounce. | https://github.com/themartorana/python-postmark/blob/3087f6894ec2790e295fd59eba9c57da9de78d1c/postmark/core.py#L928-L949 |
themartorana/python-postmark | postmark/django_backend.py | EmailBackend.send_messages | def send_messages(self, email_messages):
"""
Sends one or more EmailMessage objects and returns the number of email
messages sent.
"""
if not email_messages:
return
sent = self._send(email_messages)
if sent:
return len(email_messages)
return 0 | python | def send_messages(self, email_messages):
"""
Sends one or more EmailMessage objects and returns the number of email
messages sent.
"""
if not email_messages:
return
sent = self._send(email_messages)
if sent:
return len(email_messages)
return 0 | Sends one or more EmailMessage objects and returns the number of email
messages sent. | https://github.com/themartorana/python-postmark/blob/3087f6894ec2790e295fd59eba9c57da9de78d1c/postmark/django_backend.py#L57-L67 |
themartorana/python-postmark | postmark/django_backend.py | EmailBackend._build_message | def _build_message(self, message):
"""A helper method to convert a PMEmailMessage to a PMMail"""
if not message.recipients():
return False
recipients = ','.join(message.to)
recipients_cc = ','.join(message.cc)
recipients_bcc = ','.join(message.bcc)
text_body = message.body
html_body = None
if isinstance(message, EmailMultiAlternatives):
for alt in message.alternatives:
if alt[1] == "text/html":
html_body = alt[0]
break
elif getattr(message, 'content_subtype', None) == 'html':
# Don't send html content as plain text
text_body = None
html_body = message.body
reply_to = ','.join(message.reply_to)
custom_headers = {}
if message.extra_headers and isinstance(message.extra_headers, dict):
if 'Reply-To' in message.extra_headers:
reply_to = message.extra_headers.pop('Reply-To')
if len(message.extra_headers):
custom_headers = message.extra_headers
attachments = []
if message.attachments and isinstance(message.attachments, list):
if len(message.attachments):
for item in message.attachments:
if isinstance(item, tuple):
(f, content, m) = item
content = base64.b64encode(content)
# b64decode returns bytes on Python 3. PMMail needs a
# str (for JSON serialization). Convert on Python 3
# only to avoid a useless performance hit on Python 2.
if not isinstance(content, str):
content = content.decode()
attachments.append((f, content, m))
else:
attachments.append(item)
postmark_message = PMMail(api_key=self.api_key,
subject=message.subject,
sender=message.from_email,
to=recipients,
cc=recipients_cc,
bcc=recipients_bcc,
text_body=text_body,
html_body=html_body,
reply_to=reply_to,
custom_headers=custom_headers,
attachments=attachments)
postmark_message.tag = getattr(message, 'tag', None)
postmark_message.track_opens = getattr(message, 'track_opens', False)
return postmark_message | python | def _build_message(self, message):
"""A helper method to convert a PMEmailMessage to a PMMail"""
if not message.recipients():
return False
recipients = ','.join(message.to)
recipients_cc = ','.join(message.cc)
recipients_bcc = ','.join(message.bcc)
text_body = message.body
html_body = None
if isinstance(message, EmailMultiAlternatives):
for alt in message.alternatives:
if alt[1] == "text/html":
html_body = alt[0]
break
elif getattr(message, 'content_subtype', None) == 'html':
# Don't send html content as plain text
text_body = None
html_body = message.body
reply_to = ','.join(message.reply_to)
custom_headers = {}
if message.extra_headers and isinstance(message.extra_headers, dict):
if 'Reply-To' in message.extra_headers:
reply_to = message.extra_headers.pop('Reply-To')
if len(message.extra_headers):
custom_headers = message.extra_headers
attachments = []
if message.attachments and isinstance(message.attachments, list):
if len(message.attachments):
for item in message.attachments:
if isinstance(item, tuple):
(f, content, m) = item
content = base64.b64encode(content)
# b64decode returns bytes on Python 3. PMMail needs a
# str (for JSON serialization). Convert on Python 3
# only to avoid a useless performance hit on Python 2.
if not isinstance(content, str):
content = content.decode()
attachments.append((f, content, m))
else:
attachments.append(item)
postmark_message = PMMail(api_key=self.api_key,
subject=message.subject,
sender=message.from_email,
to=recipients,
cc=recipients_cc,
bcc=recipients_bcc,
text_body=text_body,
html_body=html_body,
reply_to=reply_to,
custom_headers=custom_headers,
attachments=attachments)
postmark_message.tag = getattr(message, 'tag', None)
postmark_message.track_opens = getattr(message, 'track_opens', False)
return postmark_message | A helper method to convert a PMEmailMessage to a PMMail | https://github.com/themartorana/python-postmark/blob/3087f6894ec2790e295fd59eba9c57da9de78d1c/postmark/django_backend.py#L69-L128 |
themartorana/python-postmark | postmark/django_backend.py | EmailBackend._send | def _send(self, messages):
"""A helper method that does the actual sending."""
if len(messages) == 1:
to_send = self._build_message(messages[0])
if to_send is False:
# The message was missing recipients.
# Bail.
return False
else:
pm_messages = list(map(self._build_message, messages))
pm_messages = [m for m in pm_messages if m]
if len(pm_messages) == 0:
# If after filtering, there aren't any messages
# to send, bail.
return False
to_send = PMBatchMail(messages=pm_messages)
try:
to_send.send(test=self.test_mode)
except:
if self.fail_silently:
return False
raise
return True | python | def _send(self, messages):
"""A helper method that does the actual sending."""
if len(messages) == 1:
to_send = self._build_message(messages[0])
if to_send is False:
# The message was missing recipients.
# Bail.
return False
else:
pm_messages = list(map(self._build_message, messages))
pm_messages = [m for m in pm_messages if m]
if len(pm_messages) == 0:
# If after filtering, there aren't any messages
# to send, bail.
return False
to_send = PMBatchMail(messages=pm_messages)
try:
to_send.send(test=self.test_mode)
except:
if self.fail_silently:
return False
raise
return True | A helper method that does the actual sending. | https://github.com/themartorana/python-postmark/blob/3087f6894ec2790e295fd59eba9c57da9de78d1c/postmark/django_backend.py#L130-L152 |
olivettigroup/article-downloader | articledownloader/scrapers.py | Nature.handle_starttag | def handle_starttag(self, tag, attrs):
'''
PDF link handler; never gets explicitly called by user
'''
if tag == 'a' and ( ('class', 'download-pdf') in attrs or ('id', 'download-pdf') in attrs ):
for attr in attrs:
if attr[0] == 'href':
self.download_link = 'http://www.nature.com' + attr[1] | python | def handle_starttag(self, tag, attrs):
'''
PDF link handler; never gets explicitly called by user
'''
if tag == 'a' and ( ('class', 'download-pdf') in attrs or ('id', 'download-pdf') in attrs ):
for attr in attrs:
if attr[0] == 'href':
self.download_link = 'http://www.nature.com' + attr[1] | PDF link handler; never gets explicitly called by user | https://github.com/olivettigroup/article-downloader/blob/2f56e498413072e2e3a9644d3703804c3cf297ad/articledownloader/scrapers.py#L49-L56 |
marcwebbie/passpie | passpie/utils.py | genpass | def genpass(pattern=r'[\w]{32}'):
"""generates a password with random chararcters
"""
try:
return rstr.xeger(pattern)
except re.error as e:
raise ValueError(str(e)) | python | def genpass(pattern=r'[\w]{32}'):
"""generates a password with random chararcters
"""
try:
return rstr.xeger(pattern)
except re.error as e:
raise ValueError(str(e)) | generates a password with random chararcters | https://github.com/marcwebbie/passpie/blob/421c40a57ad5f55e3f14b323c929a2c41dfb5527/passpie/utils.py#L18-L24 |
andreax79/python-cstruct | cstruct/__init__.py | CStruct.unpack | def unpack(self, string):
"""
Unpack the string containing packed C structure data
"""
if string is None:
string = CHAR_ZERO * self.__size__
data = struct.unpack(self.__fmt__, string)
i = 0
for field in self.__fields__:
(vtype, vlen) = self.__fields_types__[field]
if vtype == 'char': # string
setattr(self, field, data[i])
i = i + 1
elif isinstance(vtype, CStructMeta):
num = int(vlen / vtype.size)
if num == 1: # single struct
sub_struct = vtype()
sub_struct.unpack(EMPTY_BYTES_STRING.join(data[i:i+sub_struct.size]))
setattr(self, field, sub_struct)
i = i + sub_struct.size
else: # multiple struct
sub_structs = []
for j in range(0, num):
sub_struct = vtype()
sub_struct.unpack(EMPTY_BYTES_STRING.join(data[i:i+sub_struct.size]))
i = i + sub_struct.size
sub_structs.append(sub_struct)
setattr(self, field, sub_structs)
elif vlen == 1:
setattr(self, field, data[i])
i = i + vlen
else:
setattr(self, field, list(data[i:i+vlen]))
i = i + vlen | python | def unpack(self, string):
"""
Unpack the string containing packed C structure data
"""
if string is None:
string = CHAR_ZERO * self.__size__
data = struct.unpack(self.__fmt__, string)
i = 0
for field in self.__fields__:
(vtype, vlen) = self.__fields_types__[field]
if vtype == 'char': # string
setattr(self, field, data[i])
i = i + 1
elif isinstance(vtype, CStructMeta):
num = int(vlen / vtype.size)
if num == 1: # single struct
sub_struct = vtype()
sub_struct.unpack(EMPTY_BYTES_STRING.join(data[i:i+sub_struct.size]))
setattr(self, field, sub_struct)
i = i + sub_struct.size
else: # multiple struct
sub_structs = []
for j in range(0, num):
sub_struct = vtype()
sub_struct.unpack(EMPTY_BYTES_STRING.join(data[i:i+sub_struct.size]))
i = i + sub_struct.size
sub_structs.append(sub_struct)
setattr(self, field, sub_structs)
elif vlen == 1:
setattr(self, field, data[i])
i = i + vlen
else:
setattr(self, field, list(data[i:i+vlen]))
i = i + vlen | Unpack the string containing packed C structure data | https://github.com/andreax79/python-cstruct/blob/9fba4f10de9045c34a605e187cd24520962450bf/cstruct/__init__.py#L318-L351 |
andreax79/python-cstruct | cstruct/__init__.py | CStruct.pack | def pack(self):
"""
Pack the structure data into a string
"""
data = []
for field in self.__fields__:
(vtype, vlen) = self.__fields_types__[field]
if vtype == 'char': # string
data.append(getattr(self, field))
elif isinstance(vtype, CStructMeta):
num = int(vlen / vtype.size)
if num == 1: # single struct
v = getattr(self, field, vtype())
v = v.pack()
if sys.version_info >= (3, 0):
v = ([bytes([x]) for x in v])
data.extend(v)
else: # multiple struct
values = getattr(self, field, [])
for j in range(0, num):
try:
v = values[j]
except:
v = vtype()
v = v.pack()
if sys.version_info >= (3, 0):
v = ([bytes([x]) for x in v])
data.extend(v)
elif vlen == 1:
data.append(getattr(self, field))
else:
v = getattr(self, field)
v = v[:vlen] + [0] * (vlen - len(v))
data.extend(v)
return struct.pack(self.__fmt__, *data) | python | def pack(self):
"""
Pack the structure data into a string
"""
data = []
for field in self.__fields__:
(vtype, vlen) = self.__fields_types__[field]
if vtype == 'char': # string
data.append(getattr(self, field))
elif isinstance(vtype, CStructMeta):
num = int(vlen / vtype.size)
if num == 1: # single struct
v = getattr(self, field, vtype())
v = v.pack()
if sys.version_info >= (3, 0):
v = ([bytes([x]) for x in v])
data.extend(v)
else: # multiple struct
values = getattr(self, field, [])
for j in range(0, num):
try:
v = values[j]
except:
v = vtype()
v = v.pack()
if sys.version_info >= (3, 0):
v = ([bytes([x]) for x in v])
data.extend(v)
elif vlen == 1:
data.append(getattr(self, field))
else:
v = getattr(self, field)
v = v[:vlen] + [0] * (vlen - len(v))
data.extend(v)
return struct.pack(self.__fmt__, *data) | Pack the structure data into a string | https://github.com/andreax79/python-cstruct/blob/9fba4f10de9045c34a605e187cd24520962450bf/cstruct/__init__.py#L353-L387 |
andreax79/python-cstruct | cstruct/examples/who.py | Utmp.print_info | def print_info(self):
"andreax + pts/0 2013-08-21 08:58 . 32341 (l26.box)"
" pts/34 2013-06-12 15:04 26396 id=s/34 term=0 exit=0"
# if self.ut_type not in [6,7]:
# return
print("%-10s %-12s %15s %15s %-8s" % (
str_from_c(self.ut_user),
str_from_c(self.ut_line),
time.strftime("%Y-%m-%d %H:%M", time.gmtime(self.ut_tv.tv_sec)),
self.ut_pid,
str_from_c(self.ut_host) and "(%s)" % str_from_c(self.ut_host) or str_from_c(self.ut_id) and "id=%s" % str_from_c(self.ut_id) or "")) | python | def print_info(self):
"andreax + pts/0 2013-08-21 08:58 . 32341 (l26.box)"
" pts/34 2013-06-12 15:04 26396 id=s/34 term=0 exit=0"
# if self.ut_type not in [6,7]:
# return
print("%-10s %-12s %15s %15s %-8s" % (
str_from_c(self.ut_user),
str_from_c(self.ut_line),
time.strftime("%Y-%m-%d %H:%M", time.gmtime(self.ut_tv.tv_sec)),
self.ut_pid,
str_from_c(self.ut_host) and "(%s)" % str_from_c(self.ut_host) or str_from_c(self.ut_id) and "id=%s" % str_from_c(self.ut_id) or "")) | andreax + pts/0 2013-08-21 08:58 . 32341 (l26.box) | https://github.com/andreax79/python-cstruct/blob/9fba4f10de9045c34a605e187cd24520962450bf/cstruct/examples/who.py#L89-L99 |
marcwebbie/passpie | passpie/importers/__init__.py | get_all | def get_all():
"""Get all subclasses of BaseImporter from module and return and generator
"""
_import_all_importer_files()
for module in (value for key, value in globals().items()
if key in __all__):
for klass_name, klass in inspect.getmembers(module, inspect.isclass):
if klass is not BaseImporter and issubclass(klass, BaseImporter):
yield klass
for klass in _get_importers_from_entry_points():
yield klass | python | def get_all():
"""Get all subclasses of BaseImporter from module and return and generator
"""
_import_all_importer_files()
for module in (value for key, value in globals().items()
if key in __all__):
for klass_name, klass in inspect.getmembers(module, inspect.isclass):
if klass is not BaseImporter and issubclass(klass, BaseImporter):
yield klass
for klass in _get_importers_from_entry_points():
yield klass | Get all subclasses of BaseImporter from module and return and generator | https://github.com/marcwebbie/passpie/blob/421c40a57ad5f55e3f14b323c929a2c41dfb5527/passpie/importers/__init__.py#L48-L61 |
marcwebbie/passpie | passpie/cli.py | list_database | def list_database(db):
"""Print credential as a table"""
credentials = db.credentials()
if credentials:
table = Table(
db.config['headers'],
table_format=db.config['table_format'],
colors=db.config['colors'],
hidden=db.config['hidden'],
hidden_string=db.config['hidden_string'],
)
click.echo(table.render(credentials)) | python | def list_database(db):
"""Print credential as a table"""
credentials = db.credentials()
if credentials:
table = Table(
db.config['headers'],
table_format=db.config['table_format'],
colors=db.config['colors'],
hidden=db.config['hidden'],
hidden_string=db.config['hidden_string'],
)
click.echo(table.render(credentials)) | Print credential as a table | https://github.com/marcwebbie/passpie/blob/421c40a57ad5f55e3f14b323c929a2c41dfb5527/passpie/cli.py#L127-L138 |
marcwebbie/passpie | passpie/cli.py | check_config | def check_config(db, level):
"""Show current configuration for shell"""
if level == 'global':
configuration = config.read(config.HOMEDIR, '.passpierc')
elif level == 'local':
configuration = config.read(os.path.join(db.path))
elif level == 'current':
configuration = db.config
if configuration:
click.echo(yaml.safe_dump(configuration, default_flow_style=False)) | python | def check_config(db, level):
"""Show current configuration for shell"""
if level == 'global':
configuration = config.read(config.HOMEDIR, '.passpierc')
elif level == 'local':
configuration = config.read(os.path.join(db.path))
elif level == 'current':
configuration = db.config
if configuration:
click.echo(yaml.safe_dump(configuration, default_flow_style=False)) | Show current configuration for shell | https://github.com/marcwebbie/passpie/blob/421c40a57ad5f55e3f14b323c929a2c41dfb5527/passpie/cli.py#L146-L156 |
mikeywaites/flask-arrested | arrested/handlers.py | Handler.process | def process(self, data=None, **kwargs):
"""Process the provided data and invoke :meth:`Handler.handle` method for this
Handler class.
:params data: The data being processed.
:returns: self
:rtype: :class:`Handler`
.. code-block:: python
def post(self, *args, **kwargs):
self.request = self.get_request_handler()
self.request.process(self.get_data())
return self.get_create_response()
.. seealso:
:meth:`Handler.process`
"""
self.data = self.handle(data, **kwargs)
return self | python | def process(self, data=None, **kwargs):
"""Process the provided data and invoke :meth:`Handler.handle` method for this
Handler class.
:params data: The data being processed.
:returns: self
:rtype: :class:`Handler`
.. code-block:: python
def post(self, *args, **kwargs):
self.request = self.get_request_handler()
self.request.process(self.get_data())
return self.get_create_response()
.. seealso:
:meth:`Handler.process`
"""
self.data = self.handle(data, **kwargs)
return self | Process the provided data and invoke :meth:`Handler.handle` method for this
Handler class.
:params data: The data being processed.
:returns: self
:rtype: :class:`Handler`
.. code-block:: python
def post(self, *args, **kwargs):
self.request = self.get_request_handler()
self.request.process(self.get_data())
return self.get_create_response()
.. seealso:
:meth:`Handler.process` | https://github.com/mikeywaites/flask-arrested/blob/6b97ce2ad2765f9acab10f4726e310258aa51de0/arrested/handlers.py#L54-L73 |
mikeywaites/flask-arrested | arrested/handlers.py | RequestHandler.process | def process(self, data=None):
"""Fetch incoming data from the Flask request object when no data is supplied
to the process method. By default, the RequestHandler expects the
incoming data to be sent as JSON.
"""
return super(RequestHandler, self).process(data=data or self.get_request_data()) | python | def process(self, data=None):
"""Fetch incoming data from the Flask request object when no data is supplied
to the process method. By default, the RequestHandler expects the
incoming data to be sent as JSON.
"""
return super(RequestHandler, self).process(data=data or self.get_request_data()) | Fetch incoming data from the Flask request object when no data is supplied
to the process method. By default, the RequestHandler expects the
incoming data to be sent as JSON. | https://github.com/mikeywaites/flask-arrested/blob/6b97ce2ad2765f9acab10f4726e310258aa51de0/arrested/handlers.py#L116-L122 |
mikeywaites/flask-arrested | arrested/contrib/sql_alchemy.py | DBMixin.save | def save(self, obj):
"""Add ``obj`` to the SQLAlchemy session and commit the changes back to
the database.
:param obj: SQLAlchemy object being saved
:returns: The saved object
"""
session = self.get_db_session()
session.add(obj)
session.commit()
return obj | python | def save(self, obj):
"""Add ``obj`` to the SQLAlchemy session and commit the changes back to
the database.
:param obj: SQLAlchemy object being saved
:returns: The saved object
"""
session = self.get_db_session()
session.add(obj)
session.commit()
return obj | Add ``obj`` to the SQLAlchemy session and commit the changes back to
the database.
:param obj: SQLAlchemy object being saved
:returns: The saved object | https://github.com/mikeywaites/flask-arrested/blob/6b97ce2ad2765f9acab10f4726e310258aa51de0/arrested/contrib/sql_alchemy.py#L52-L63 |
mikeywaites/flask-arrested | arrested/contrib/sql_alchemy.py | DBObjectMixin.filter_by_id | def filter_by_id(self, query):
"""Apply the primary key filter to query to filter the results for a specific
instance by id.
The filter applied by the this method by default can be controlled using the
url_id_param
:param query: SQLAlchemy Query
:returns: A SQLAlchemy Query object
"""
if self.model is None:
raise ArrestedException('DBObjectMixin requires a model to be set.')
idfield = getattr(self.model, self.model_id_param, None)
if not idfield:
raise ArrestedException('DBObjectMixin could not find a valid Model.id.')
return query.filter(idfield == self.kwargs[self.url_id_param]) | python | def filter_by_id(self, query):
"""Apply the primary key filter to query to filter the results for a specific
instance by id.
The filter applied by the this method by default can be controlled using the
url_id_param
:param query: SQLAlchemy Query
:returns: A SQLAlchemy Query object
"""
if self.model is None:
raise ArrestedException('DBObjectMixin requires a model to be set.')
idfield = getattr(self.model, self.model_id_param, None)
if not idfield:
raise ArrestedException('DBObjectMixin could not find a valid Model.id.')
return query.filter(idfield == self.kwargs[self.url_id_param]) | Apply the primary key filter to query to filter the results for a specific
instance by id.
The filter applied by the this method by default can be controlled using the
url_id_param
:param query: SQLAlchemy Query
:returns: A SQLAlchemy Query object | https://github.com/mikeywaites/flask-arrested/blob/6b97ce2ad2765f9acab10f4726e310258aa51de0/arrested/contrib/sql_alchemy.py#L177-L194 |
mikeywaites/flask-arrested | arrested/contrib/sql_alchemy.py | DBObjectMixin.get_object | def get_object(self):
"""Implements the GetObjectMixin interface and calls
:meth:`DBObjectMixin.get_query`. Using this mixin requires usage of
a response handler capable of serializing SQLAlchemy query result objects.
:returns: Typically a SQLALchemy Query result.
:rtype: mixed
.. seealso::
:meth:`DBObjectMixin.get_query`
:meth:`DBObjectMixin.filter_by_id`
:meth:`DBObjectMixin.get_result`
"""
query = self.get_query()
query = self.filter_by_id(query)
return self.get_result(query) | python | def get_object(self):
"""Implements the GetObjectMixin interface and calls
:meth:`DBObjectMixin.get_query`. Using this mixin requires usage of
a response handler capable of serializing SQLAlchemy query result objects.
:returns: Typically a SQLALchemy Query result.
:rtype: mixed
.. seealso::
:meth:`DBObjectMixin.get_query`
:meth:`DBObjectMixin.filter_by_id`
:meth:`DBObjectMixin.get_result`
"""
query = self.get_query()
query = self.filter_by_id(query)
return self.get_result(query) | Implements the GetObjectMixin interface and calls
:meth:`DBObjectMixin.get_query`. Using this mixin requires usage of
a response handler capable of serializing SQLAlchemy query result objects.
:returns: Typically a SQLALchemy Query result.
:rtype: mixed
.. seealso::
:meth:`DBObjectMixin.get_query`
:meth:`DBObjectMixin.filter_by_id`
:meth:`DBObjectMixin.get_result` | https://github.com/mikeywaites/flask-arrested/blob/6b97ce2ad2765f9acab10f4726e310258aa51de0/arrested/contrib/sql_alchemy.py#L196-L212 |
mikeywaites/flask-arrested | arrested/contrib/sql_alchemy.py | DBObjectMixin.delete_object | def delete_object(self, obj):
"""Deletes an object from the session by calling session.delete and then commits
the changes to the database.
:param obj: The SQLAlchemy instance being deleted
:returns: None
"""
session = self.get_db_session()
session.delete(obj)
session.commit() | python | def delete_object(self, obj):
"""Deletes an object from the session by calling session.delete and then commits
the changes to the database.
:param obj: The SQLAlchemy instance being deleted
:returns: None
"""
session = self.get_db_session()
session.delete(obj)
session.commit() | Deletes an object from the session by calling session.delete and then commits
the changes to the database.
:param obj: The SQLAlchemy instance being deleted
:returns: None | https://github.com/mikeywaites/flask-arrested/blob/6b97ce2ad2765f9acab10f4726e310258aa51de0/arrested/contrib/sql_alchemy.py#L226-L235 |
mikeywaites/flask-arrested | arrested/api.py | ArrestedAPI.init_app | def init_app(self, app):
"""Initialise the ArrestedAPI object by storing a pointer to a Flask app object.
This method is typically used when initialisation is deferred.
:param app: Flask application object
Usage::
app = Flask(__name__)
ap1_v1 = ArrestedAPI()
api_v1.init_app(app)
"""
self.app = app
if self.deferred:
self.register_all(self.deferred) | python | def init_app(self, app):
"""Initialise the ArrestedAPI object by storing a pointer to a Flask app object.
This method is typically used when initialisation is deferred.
:param app: Flask application object
Usage::
app = Flask(__name__)
ap1_v1 = ArrestedAPI()
api_v1.init_app(app)
"""
self.app = app
if self.deferred:
self.register_all(self.deferred) | Initialise the ArrestedAPI object by storing a pointer to a Flask app object.
This method is typically used when initialisation is deferred.
:param app: Flask application object
Usage::
app = Flask(__name__)
ap1_v1 = ArrestedAPI()
api_v1.init_app(app) | https://github.com/mikeywaites/flask-arrested/blob/6b97ce2ad2765f9acab10f4726e310258aa51de0/arrested/api.py#L37-L52 |
mikeywaites/flask-arrested | arrested/api.py | ArrestedAPI.register_resource | def register_resource(self, resource, defer=False):
"""Register a :class:`.Resource` blueprint object against the Flask app object.
:param resource: :class:`.Resource` or :class:`flask.Blueprint`
object.
:param defer: Optionally specify that registering this resource should be
deferred. This option is useful when users are creating their
Flask app instance via a factory.
**Deferred resource registration**
Resources can optionally be registered in a deferred manner. Simply pass
`defer=True` to :meth:`.ArrestedAPI.register_resource` to attach the resource to
the API without calling register_blueprint.
This is useful when you're using the factory pattern for creating your Flask app
object as demonstrated below. Deferred resource will not be registered until the
ArrestedAPI instance is initialised with the Flask app object.
Usage::
api_v1 = ArrestedAPI(prefix='/v1')
characters_resource = Resource(
'characters', __name__, url_prefix='/characters'
)
ap1_v1.register_resource(characters_resource, defer=True)
def create_app():
app = Flask(__name__)
api_v1.init_app(app) # deferred resources are now registered.
"""
if defer:
self.deferred.append(resource)
else:
resource.init_api(self)
self.app.register_blueprint(resource, url_prefix=self.url_prefix) | python | def register_resource(self, resource, defer=False):
"""Register a :class:`.Resource` blueprint object against the Flask app object.
:param resource: :class:`.Resource` or :class:`flask.Blueprint`
object.
:param defer: Optionally specify that registering this resource should be
deferred. This option is useful when users are creating their
Flask app instance via a factory.
**Deferred resource registration**
Resources can optionally be registered in a deferred manner. Simply pass
`defer=True` to :meth:`.ArrestedAPI.register_resource` to attach the resource to
the API without calling register_blueprint.
This is useful when you're using the factory pattern for creating your Flask app
object as demonstrated below. Deferred resource will not be registered until the
ArrestedAPI instance is initialised with the Flask app object.
Usage::
api_v1 = ArrestedAPI(prefix='/v1')
characters_resource = Resource(
'characters', __name__, url_prefix='/characters'
)
ap1_v1.register_resource(characters_resource, defer=True)
def create_app():
app = Flask(__name__)
api_v1.init_app(app) # deferred resources are now registered.
"""
if defer:
self.deferred.append(resource)
else:
resource.init_api(self)
self.app.register_blueprint(resource, url_prefix=self.url_prefix) | Register a :class:`.Resource` blueprint object against the Flask app object.
:param resource: :class:`.Resource` or :class:`flask.Blueprint`
object.
:param defer: Optionally specify that registering this resource should be
deferred. This option is useful when users are creating their
Flask app instance via a factory.
**Deferred resource registration**
Resources can optionally be registered in a deferred manner. Simply pass
`defer=True` to :meth:`.ArrestedAPI.register_resource` to attach the resource to
the API without calling register_blueprint.
This is useful when you're using the factory pattern for creating your Flask app
object as demonstrated below. Deferred resource will not be registered until the
ArrestedAPI instance is initialised with the Flask app object.
Usage::
api_v1 = ArrestedAPI(prefix='/v1')
characters_resource = Resource(
'characters', __name__, url_prefix='/characters'
)
ap1_v1.register_resource(characters_resource, defer=True)
def create_app():
app = Flask(__name__)
api_v1.init_app(app) # deferred resources are now registered. | https://github.com/mikeywaites/flask-arrested/blob/6b97ce2ad2765f9acab10f4726e310258aa51de0/arrested/api.py#L54-L90 |
mikeywaites/flask-arrested | arrested/endpoint.py | Endpoint.process_before_request_hooks | def process_before_request_hooks(self):
"""Process the list of before_{method}_hooks and the before_all_hooks. The hooks
will be processed in the following order
1 - any before_all_hooks defined on the :class:`arrested.ArrestedAPI` object
2 - any before_all_hooks defined on the :class:`arrested.Resource` object
3 - any before_all_hooks defined on the :class:`arrested.Endpoint` object
4 - any before_{method}_hooks defined on the :class:`arrested.Endpoint` object
"""
hooks = []
if self.resource:
hooks.extend(self.resource.api.before_all_hooks)
hooks.extend(self.resource.before_all_hooks)
hooks.extend(self.before_all_hooks)
hooks.extend(
getattr(
self,
'before_{method}_hooks'.format(method=self.meth),
[]
)
)
for hook in chain(hooks):
hook(self) | python | def process_before_request_hooks(self):
"""Process the list of before_{method}_hooks and the before_all_hooks. The hooks
will be processed in the following order
1 - any before_all_hooks defined on the :class:`arrested.ArrestedAPI` object
2 - any before_all_hooks defined on the :class:`arrested.Resource` object
3 - any before_all_hooks defined on the :class:`arrested.Endpoint` object
4 - any before_{method}_hooks defined on the :class:`arrested.Endpoint` object
"""
hooks = []
if self.resource:
hooks.extend(self.resource.api.before_all_hooks)
hooks.extend(self.resource.before_all_hooks)
hooks.extend(self.before_all_hooks)
hooks.extend(
getattr(
self,
'before_{method}_hooks'.format(method=self.meth),
[]
)
)
for hook in chain(hooks):
hook(self) | Process the list of before_{method}_hooks and the before_all_hooks. The hooks
will be processed in the following order
1 - any before_all_hooks defined on the :class:`arrested.ArrestedAPI` object
2 - any before_all_hooks defined on the :class:`arrested.Resource` object
3 - any before_all_hooks defined on the :class:`arrested.Endpoint` object
4 - any before_{method}_hooks defined on the :class:`arrested.Endpoint` object | https://github.com/mikeywaites/flask-arrested/blob/6b97ce2ad2765f9acab10f4726e310258aa51de0/arrested/endpoint.py#L69-L95 |
mikeywaites/flask-arrested | arrested/endpoint.py | Endpoint.process_after_request_hooks | def process_after_request_hooks(self, resp):
"""Process the list of before_{method}_hooks and the before_all_hooks. The hooks
will be processed in the following order
1 - any after_{method}_hooks defined on the :class:`arrested.Endpoint` object
2 - any after_all_hooks defined on the :class:`arrested.Endpoint` object
2 - any after_all_hooks defined on the :class:`arrested.Resource` object
4 - any after_all_hooks defined on the :class:`arrested.ArrestedAPI` object
"""
hooks = []
meth_hooks = getattr(
self,
'after_{method}_hooks'.format(method=self.meth),
[]
)
hooks.extend(meth_hooks)
hooks.extend(self.after_all_hooks)
if self.resource:
hooks.extend(self.resource.after_all_hooks)
hooks.extend(self.resource.api.after_all_hooks)
for hook in chain(hooks):
resp = hook(self, resp)
return resp | python | def process_after_request_hooks(self, resp):
"""Process the list of before_{method}_hooks and the before_all_hooks. The hooks
will be processed in the following order
1 - any after_{method}_hooks defined on the :class:`arrested.Endpoint` object
2 - any after_all_hooks defined on the :class:`arrested.Endpoint` object
2 - any after_all_hooks defined on the :class:`arrested.Resource` object
4 - any after_all_hooks defined on the :class:`arrested.ArrestedAPI` object
"""
hooks = []
meth_hooks = getattr(
self,
'after_{method}_hooks'.format(method=self.meth),
[]
)
hooks.extend(meth_hooks)
hooks.extend(self.after_all_hooks)
if self.resource:
hooks.extend(self.resource.after_all_hooks)
hooks.extend(self.resource.api.after_all_hooks)
for hook in chain(hooks):
resp = hook(self, resp)
return resp | Process the list of before_{method}_hooks and the before_all_hooks. The hooks
will be processed in the following order
1 - any after_{method}_hooks defined on the :class:`arrested.Endpoint` object
2 - any after_all_hooks defined on the :class:`arrested.Endpoint` object
2 - any after_all_hooks defined on the :class:`arrested.Resource` object
4 - any after_all_hooks defined on the :class:`arrested.ArrestedAPI` object | https://github.com/mikeywaites/flask-arrested/blob/6b97ce2ad2765f9acab10f4726e310258aa51de0/arrested/endpoint.py#L97-L124 |
mikeywaites/flask-arrested | arrested/endpoint.py | Endpoint.dispatch_request | def dispatch_request(self, *args, **kwargs):
"""Dispatch the incoming HTTP request to the appropriate handler.
"""
self.args = args
self.kwargs = kwargs
self.meth = request.method.lower()
self.resource = current_app.blueprints.get(request.blueprint, None)
if not any([self.meth in self.methods, self.meth.upper() in self.methods]):
return self.return_error(405)
self.process_before_request_hooks()
resp = super(Endpoint, self).dispatch_request(*args, **kwargs)
resp = self.make_response(resp)
resp = self.process_after_request_hooks(resp)
return resp | python | def dispatch_request(self, *args, **kwargs):
"""Dispatch the incoming HTTP request to the appropriate handler.
"""
self.args = args
self.kwargs = kwargs
self.meth = request.method.lower()
self.resource = current_app.blueprints.get(request.blueprint, None)
if not any([self.meth in self.methods, self.meth.upper() in self.methods]):
return self.return_error(405)
self.process_before_request_hooks()
resp = super(Endpoint, self).dispatch_request(*args, **kwargs)
resp = self.make_response(resp)
resp = self.process_after_request_hooks(resp)
return resp | Dispatch the incoming HTTP request to the appropriate handler. | https://github.com/mikeywaites/flask-arrested/blob/6b97ce2ad2765f9acab10f4726e310258aa51de0/arrested/endpoint.py#L126-L144 |
mikeywaites/flask-arrested | arrested/endpoint.py | Endpoint.make_response | def make_response(self, rv, status=200, headers=None, mime='application/json'):
"""Create a response object using the :class:`flask.Response` class.
:param rv: Response value. If the value is not an instance
of :class:`werkzeug.wrappers.Response` it will be converted
into a Response object.
:param status: specify the HTTP status code for this response.
:param mime: Specify the mimetype for this request.
:param headers: Specify dict of headers for the response.
"""
if not isinstance(rv, Response):
resp = Response(
response=rv,
headers=headers,
mimetype=mime,
status=status
)
else:
resp = rv
return resp | python | def make_response(self, rv, status=200, headers=None, mime='application/json'):
"""Create a response object using the :class:`flask.Response` class.
:param rv: Response value. If the value is not an instance
of :class:`werkzeug.wrappers.Response` it will be converted
into a Response object.
:param status: specify the HTTP status code for this response.
:param mime: Specify the mimetype for this request.
:param headers: Specify dict of headers for the response.
"""
if not isinstance(rv, Response):
resp = Response(
response=rv,
headers=headers,
mimetype=mime,
status=status
)
else:
resp = rv
return resp | Create a response object using the :class:`flask.Response` class.
:param rv: Response value. If the value is not an instance
of :class:`werkzeug.wrappers.Response` it will be converted
into a Response object.
:param status: specify the HTTP status code for this response.
:param mime: Specify the mimetype for this request.
:param headers: Specify dict of headers for the response. | https://github.com/mikeywaites/flask-arrested/blob/6b97ce2ad2765f9acab10f4726e310258aa51de0/arrested/endpoint.py#L156-L177 |
mikeywaites/flask-arrested | arrested/endpoint.py | Endpoint.get_response_handler | def get_response_handler(self):
"""Return the Endpoints defined :attr:`Endpoint.response_handler`.
:returns: A instance of the Endpoint specified :class:`ResonseHandler`.
:rtype: :class:`ResponseHandler`
"""
assert self.response_handler is not None, \
'Please define a response_handler ' \
' for Endpoint: %s' % self.__class__.__name__
return self.response_handler(self, **self.get_response_handler_params()) | python | def get_response_handler(self):
"""Return the Endpoints defined :attr:`Endpoint.response_handler`.
:returns: A instance of the Endpoint specified :class:`ResonseHandler`.
:rtype: :class:`ResponseHandler`
"""
assert self.response_handler is not None, \
'Please define a response_handler ' \
' for Endpoint: %s' % self.__class__.__name__
return self.response_handler(self, **self.get_response_handler_params()) | Return the Endpoints defined :attr:`Endpoint.response_handler`.
:returns: A instance of the Endpoint specified :class:`ResonseHandler`.
:rtype: :class:`ResponseHandler` | https://github.com/mikeywaites/flask-arrested/blob/6b97ce2ad2765f9acab10f4726e310258aa51de0/arrested/endpoint.py#L188-L198 |
mikeywaites/flask-arrested | arrested/endpoint.py | Endpoint.get_request_handler | def get_request_handler(self):
"""Return the Endpoints defined :attr:`Endpoint.request_handler`.
:returns: A instance of the Endpoint specified :class:`RequestHandler`.
:rtype: :class:`RequestHandler`
"""
assert self.request_handler is not None, \
'Please define a request_handler ' \
' for Endpoint: %s' % self.__class__.__name__
return self.request_handler(self, **self.get_request_handler_params()) | python | def get_request_handler(self):
"""Return the Endpoints defined :attr:`Endpoint.request_handler`.
:returns: A instance of the Endpoint specified :class:`RequestHandler`.
:rtype: :class:`RequestHandler`
"""
assert self.request_handler is not None, \
'Please define a request_handler ' \
' for Endpoint: %s' % self.__class__.__name__
return self.request_handler(self, **self.get_request_handler_params()) | Return the Endpoints defined :attr:`Endpoint.request_handler`.
:returns: A instance of the Endpoint specified :class:`RequestHandler`.
:rtype: :class:`RequestHandler` | https://github.com/mikeywaites/flask-arrested/blob/6b97ce2ad2765f9acab10f4726e310258aa51de0/arrested/endpoint.py#L209-L219 |
mikeywaites/flask-arrested | arrested/endpoint.py | Endpoint.return_error | def return_error(self, status, payload=None):
"""Error handler called by request handlers when an error occurs and the request
should be aborted.
Usage::
def handle_post_request(self, *args, **kwargs):
self.request_handler = self.get_request_handler()
try:
self.request_handler.process(self.get_data())
except SomeException as e:
self.return_error(400, payload=self.request_handler.errors)
return self.return_create_response()
"""
resp = None
if payload is not None:
payload = json.dumps(payload)
resp = self.make_response(payload, status=status)
if status in [405]:
abort(status)
else:
abort(status, response=resp) | python | def return_error(self, status, payload=None):
"""Error handler called by request handlers when an error occurs and the request
should be aborted.
Usage::
def handle_post_request(self, *args, **kwargs):
self.request_handler = self.get_request_handler()
try:
self.request_handler.process(self.get_data())
except SomeException as e:
self.return_error(400, payload=self.request_handler.errors)
return self.return_create_response()
"""
resp = None
if payload is not None:
payload = json.dumps(payload)
resp = self.make_response(payload, status=status)
if status in [405]:
abort(status)
else:
abort(status, response=resp) | Error handler called by request handlers when an error occurs and the request
should be aborted.
Usage::
def handle_post_request(self, *args, **kwargs):
self.request_handler = self.get_request_handler()
try:
self.request_handler.process(self.get_data())
except SomeException as e:
self.return_error(400, payload=self.request_handler.errors)
return self.return_create_response() | https://github.com/mikeywaites/flask-arrested/blob/6b97ce2ad2765f9acab10f4726e310258aa51de0/arrested/endpoint.py#L221-L245 |
mikeywaites/flask-arrested | arrested/contrib/kim_arrested.py | KimResponseHandler.handle | def handle(self, data, **kwargs):
"""Run serialization for the specified mapper_class.
Supports both .serialize and .many().serialize Kim interfaces.
:param data: Objects to be serialized.
:returns: Serialized data according to mapper configuration
"""
if self.many:
return self.mapper.many(raw=self.raw, **self.mapper_kwargs).serialize(
data, role=self.role
)
else:
return self.mapper(obj=data, raw=self.raw, **self.mapper_kwargs).serialize(
role=self.role
) | python | def handle(self, data, **kwargs):
"""Run serialization for the specified mapper_class.
Supports both .serialize and .many().serialize Kim interfaces.
:param data: Objects to be serialized.
:returns: Serialized data according to mapper configuration
"""
if self.many:
return self.mapper.many(raw=self.raw, **self.mapper_kwargs).serialize(
data, role=self.role
)
else:
return self.mapper(obj=data, raw=self.raw, **self.mapper_kwargs).serialize(
role=self.role
) | Run serialization for the specified mapper_class.
Supports both .serialize and .many().serialize Kim interfaces.
:param data: Objects to be serialized.
:returns: Serialized data according to mapper configuration | https://github.com/mikeywaites/flask-arrested/blob/6b97ce2ad2765f9acab10f4726e310258aa51de0/arrested/contrib/kim_arrested.py#L81-L96 |
mikeywaites/flask-arrested | arrested/contrib/kim_arrested.py | KimRequestHandler.handle_error | def handle_error(self, exp):
"""Called if a Mapper returns MappingInvalid. Should handle the error
and return it in the appropriate format, can be overridden in order
to change the error format.
:param exp: MappingInvalid exception raised
"""
payload = {
"message": "Invalid or incomplete data provided.",
"errors": exp.errors
}
self.endpoint.return_error(self.error_status, payload=payload) | python | def handle_error(self, exp):
"""Called if a Mapper returns MappingInvalid. Should handle the error
and return it in the appropriate format, can be overridden in order
to change the error format.
:param exp: MappingInvalid exception raised
"""
payload = {
"message": "Invalid or incomplete data provided.",
"errors": exp.errors
}
self.endpoint.return_error(self.error_status, payload=payload) | Called if a Mapper returns MappingInvalid. Should handle the error
and return it in the appropriate format, can be overridden in order
to change the error format.
:param exp: MappingInvalid exception raised | https://github.com/mikeywaites/flask-arrested/blob/6b97ce2ad2765f9acab10f4726e310258aa51de0/arrested/contrib/kim_arrested.py#L126-L137 |
mikeywaites/flask-arrested | arrested/contrib/kim_arrested.py | KimRequestHandler.handle | def handle(self, data, **kwargs):
"""Run marshalling for the specified mapper_class.
Supports both .marshal and .many().marshal Kim interfaces. Handles errors raised
during marshalling and automatically returns a HTTP error response.
:param data: Data to be marshaled.
:returns: Marshaled object according to mapper configuration
:raises: :class:`werkzeug.exceptions.UnprocessableEntity`
"""
try:
if self.many:
return self.mapper.many(raw=self.raw, **self.mapper_kwargs).marshal(
data, role=self.role
)
else:
return self.mapper(
data=data,
obj=self.obj,
partial=self.partial,
**self.mapper_kwargs
).marshal(role=self.role)
except MappingInvalid as e:
self.handle_error(e) | python | def handle(self, data, **kwargs):
"""Run marshalling for the specified mapper_class.
Supports both .marshal and .many().marshal Kim interfaces. Handles errors raised
during marshalling and automatically returns a HTTP error response.
:param data: Data to be marshaled.
:returns: Marshaled object according to mapper configuration
:raises: :class:`werkzeug.exceptions.UnprocessableEntity`
"""
try:
if self.many:
return self.mapper.many(raw=self.raw, **self.mapper_kwargs).marshal(
data, role=self.role
)
else:
return self.mapper(
data=data,
obj=self.obj,
partial=self.partial,
**self.mapper_kwargs
).marshal(role=self.role)
except MappingInvalid as e:
self.handle_error(e) | Run marshalling for the specified mapper_class.
Supports both .marshal and .many().marshal Kim interfaces. Handles errors raised
during marshalling and automatically returns a HTTP error response.
:param data: Data to be marshaled.
:returns: Marshaled object according to mapper configuration
:raises: :class:`werkzeug.exceptions.UnprocessableEntity` | https://github.com/mikeywaites/flask-arrested/blob/6b97ce2ad2765f9acab10f4726e310258aa51de0/arrested/contrib/kim_arrested.py#L139-L162 |
mikeywaites/flask-arrested | arrested/contrib/kim_arrested.py | KimEndpoint.get_response_handler_params | def get_response_handler_params(self, **params):
"""Return a config object that will be used to configure the KimResponseHandler
:returns: a dictionary of config options
:rtype: dict
"""
params = super(KimEndpoint, self).get_response_handler_params(**params)
params['mapper_class'] = self.mapper_class
params['role'] = self.serialize_role
# After a successfull attempt to marshal an object has been made, a response
# is generated using the RepsonseHandler. Rather than taking the class level
# setting for many by default, pull it from the request handler params config to
# ensure Marshaling and Serializing are run the same way.
if self._is_marshal_request():
req_params = self.get_request_handler_params()
params['many'] = req_params.get('many', self.many)
else:
params['many'] = self.many
return params | python | def get_response_handler_params(self, **params):
"""Return a config object that will be used to configure the KimResponseHandler
:returns: a dictionary of config options
:rtype: dict
"""
params = super(KimEndpoint, self).get_response_handler_params(**params)
params['mapper_class'] = self.mapper_class
params['role'] = self.serialize_role
# After a successfull attempt to marshal an object has been made, a response
# is generated using the RepsonseHandler. Rather than taking the class level
# setting for many by default, pull it from the request handler params config to
# ensure Marshaling and Serializing are run the same way.
if self._is_marshal_request():
req_params = self.get_request_handler_params()
params['many'] = req_params.get('many', self.many)
else:
params['many'] = self.many
return params | Return a config object that will be used to configure the KimResponseHandler
:returns: a dictionary of config options
:rtype: dict | https://github.com/mikeywaites/flask-arrested/blob/6b97ce2ad2765f9acab10f4726e310258aa51de0/arrested/contrib/kim_arrested.py#L187-L208 |
mikeywaites/flask-arrested | arrested/contrib/kim_arrested.py | KimEndpoint.get_request_handler_params | def get_request_handler_params(self, **params):
"""Return a config object that will be used to configure the KimRequestHandler
:returns: a dictionary of config options
:rtype: dict
"""
params = super(KimEndpoint, self).get_request_handler_params(**params)
params['mapper_class'] = self.mapper_class
params['role'] = self.marshal_role
params['many'] = False
# when handling a PUT or PATCH request, self.obj will be set.. There might be a
# more robust way to handle this?
params['obj'] = getattr(self, 'obj', None)
params['partial'] = self.is_partial()
return params | python | def get_request_handler_params(self, **params):
"""Return a config object that will be used to configure the KimRequestHandler
:returns: a dictionary of config options
:rtype: dict
"""
params = super(KimEndpoint, self).get_request_handler_params(**params)
params['mapper_class'] = self.mapper_class
params['role'] = self.marshal_role
params['many'] = False
# when handling a PUT or PATCH request, self.obj will be set.. There might be a
# more robust way to handle this?
params['obj'] = getattr(self, 'obj', None)
params['partial'] = self.is_partial()
return params | Return a config object that will be used to configure the KimRequestHandler
:returns: a dictionary of config options
:rtype: dict | https://github.com/mikeywaites/flask-arrested/blob/6b97ce2ad2765f9acab10f4726e310258aa51de0/arrested/contrib/kim_arrested.py#L210-L226 |
mikeywaites/flask-arrested | arrested/mixins.py | GetListMixin.list_response | def list_response(self, status=200):
"""Pull the processed data from the response_handler and return a response.
:param status: The HTTP status code returned with the response
.. seealso:
:meth:`Endpoint.make_response`
:meth:`Endpoint.handle_get_request`
"""
return self._response(self.response.get_response_data(), status=status) | python | def list_response(self, status=200):
"""Pull the processed data from the response_handler and return a response.
:param status: The HTTP status code returned with the response
.. seealso:
:meth:`Endpoint.make_response`
:meth:`Endpoint.handle_get_request`
"""
return self._response(self.response.get_response_data(), status=status) | Pull the processed data from the response_handler and return a response.
:param status: The HTTP status code returned with the response
.. seealso:
:meth:`Endpoint.make_response`
:meth:`Endpoint.handle_get_request` | https://github.com/mikeywaites/flask-arrested/blob/6b97ce2ad2765f9acab10f4726e310258aa51de0/arrested/mixins.py#L29-L39 |
mikeywaites/flask-arrested | arrested/mixins.py | GetListMixin.handle_get_request | def handle_get_request(self):
"""Handle incoming GET request to an Endpoint and return an
array of results by calling :meth:`.GetListMixin.get_objects`.
.. seealso::
:meth:`GetListMixin.get_objects`
:meth:`Endpoint.get`
"""
self.objects = self.get_objects()
self.response = self.get_response_handler()
self.response.process(self.objects)
return self.list_response() | python | def handle_get_request(self):
"""Handle incoming GET request to an Endpoint and return an
array of results by calling :meth:`.GetListMixin.get_objects`.
.. seealso::
:meth:`GetListMixin.get_objects`
:meth:`Endpoint.get`
"""
self.objects = self.get_objects()
self.response = self.get_response_handler()
self.response.process(self.objects)
return self.list_response() | Handle incoming GET request to an Endpoint and return an
array of results by calling :meth:`.GetListMixin.get_objects`.
.. seealso::
:meth:`GetListMixin.get_objects`
:meth:`Endpoint.get` | https://github.com/mikeywaites/flask-arrested/blob/6b97ce2ad2765f9acab10f4726e310258aa51de0/arrested/mixins.py#L41-L54 |
mikeywaites/flask-arrested | arrested/mixins.py | CreateMixin.create_response | def create_response(self, status=201):
"""Generate a Response object for a POST request. By default, the newly created
object will be passed to the specified ResponseHandler and will be serialized
as the response body.
"""
self.response = self.get_response_handler()
self.response.process(self.obj)
return self._response(self.response.get_response_data(), status=status) | python | def create_response(self, status=201):
"""Generate a Response object for a POST request. By default, the newly created
object will be passed to the specified ResponseHandler and will be serialized
as the response body.
"""
self.response = self.get_response_handler()
self.response.process(self.obj)
return self._response(self.response.get_response_data(), status=status) | Generate a Response object for a POST request. By default, the newly created
object will be passed to the specified ResponseHandler and will be serialized
as the response body. | https://github.com/mikeywaites/flask-arrested/blob/6b97ce2ad2765f9acab10f4726e310258aa51de0/arrested/mixins.py#L69-L77 |
mikeywaites/flask-arrested | arrested/mixins.py | CreateMixin.handle_post_request | def handle_post_request(self):
"""Handle incoming POST request to an Endpoint and marshal the request data
via the specified RequestHandler. :meth:`.CreateMixin.save_object`. is then
called and must be implemented by mixins implementing this interfce.
.. seealso::
:meth:`CreateMixin.save_object`
:meth:`Endpoint.post`
"""
self.request = self.get_request_handler()
self.obj = self.request.process().data
self.save_object(self.obj)
return self.create_response() | python | def handle_post_request(self):
"""Handle incoming POST request to an Endpoint and marshal the request data
via the specified RequestHandler. :meth:`.CreateMixin.save_object`. is then
called and must be implemented by mixins implementing this interfce.
.. seealso::
:meth:`CreateMixin.save_object`
:meth:`Endpoint.post`
"""
self.request = self.get_request_handler()
self.obj = self.request.process().data
self.save_object(self.obj)
return self.create_response() | Handle incoming POST request to an Endpoint and marshal the request data
via the specified RequestHandler. :meth:`.CreateMixin.save_object`. is then
called and must be implemented by mixins implementing this interfce.
.. seealso::
:meth:`CreateMixin.save_object`
:meth:`Endpoint.post` | https://github.com/mikeywaites/flask-arrested/blob/6b97ce2ad2765f9acab10f4726e310258aa51de0/arrested/mixins.py#L79-L92 |
mikeywaites/flask-arrested | arrested/mixins.py | ObjectMixin.obj | def obj(self):
"""Returns the value of :meth:`ObjectMixin.get_object` and sets a private
property called _obj. This property ensures the logic around allow_none
is enforced across Endpoints using the Object interface.
:raises: :class:`werkzeug.exceptions.BadRequest`
:returns: The result of :meth:ObjectMixin.get_object`
"""
if not getattr(self, '_obj', None):
self._obj = self.get_object()
if self._obj is None and not self.allow_none:
self.return_error(404)
return self._obj | python | def obj(self):
"""Returns the value of :meth:`ObjectMixin.get_object` and sets a private
property called _obj. This property ensures the logic around allow_none
is enforced across Endpoints using the Object interface.
:raises: :class:`werkzeug.exceptions.BadRequest`
:returns: The result of :meth:ObjectMixin.get_object`
"""
if not getattr(self, '_obj', None):
self._obj = self.get_object()
if self._obj is None and not self.allow_none:
self.return_error(404)
return self._obj | Returns the value of :meth:`ObjectMixin.get_object` and sets a private
property called _obj. This property ensures the logic around allow_none
is enforced across Endpoints using the Object interface.
:raises: :class:`werkzeug.exceptions.BadRequest`
:returns: The result of :meth:ObjectMixin.get_object` | https://github.com/mikeywaites/flask-arrested/blob/6b97ce2ad2765f9acab10f4726e310258aa51de0/arrested/mixins.py#L122-L135 |
qntm/greenery | greenery/fsm.py | null | def null(alphabet):
'''
An FSM accepting nothing (not even the empty string). This is
demonstrates that this is possible, and is also extremely useful
in some situations
'''
return fsm(
alphabet = alphabet,
states = {0},
initial = 0,
finals = set(),
map = {
0: dict([(symbol, 0) for symbol in alphabet]),
},
) | python | def null(alphabet):
'''
An FSM accepting nothing (not even the empty string). This is
demonstrates that this is possible, and is also extremely useful
in some situations
'''
return fsm(
alphabet = alphabet,
states = {0},
initial = 0,
finals = set(),
map = {
0: dict([(symbol, 0) for symbol in alphabet]),
},
) | An FSM accepting nothing (not even the empty string). This is
demonstrates that this is possible, and is also extremely useful
in some situations | https://github.com/qntm/greenery/blob/f4dc7fb483825459d26e8fe9f417764469c56ee8/greenery/fsm.py#L686-L700 |
qntm/greenery | greenery/fsm.py | parallel | def parallel(fsms, test):
'''
Crawl several FSMs in parallel, mapping the states of a larger meta-FSM.
To determine whether a state in the larger FSM is final, pass all of the
finality statuses (e.g. [True, False, False] to `test`.
'''
alphabet = set().union(*[fsm.alphabet for fsm in fsms])
initial = dict([(i, fsm.initial) for (i, fsm) in enumerate(fsms)])
# dedicated function accepts a "superset" and returns the next "superset"
# obtained by following this transition in the new FSM
def follow(current, symbol):
next = {}
for i in range(len(fsms)):
if symbol not in fsms[i].alphabet and anything_else in fsms[i].alphabet:
actual_symbol = anything_else
else:
actual_symbol = symbol
if i in current \
and current[i] in fsms[i].map \
and actual_symbol in fsms[i].map[current[i]]:
next[i] = fsms[i].map[current[i]][actual_symbol]
if len(next.keys()) == 0:
raise OblivionError
return next
# Determine the "is final?" condition of each substate, then pass it to the
# test to determine finality of the overall FSM.
def final(state):
accepts = [i in state and state[i] in fsm.finals for (i, fsm) in enumerate(fsms)]
return test(accepts)
return crawl(alphabet, initial, final, follow).reduce() | python | def parallel(fsms, test):
'''
Crawl several FSMs in parallel, mapping the states of a larger meta-FSM.
To determine whether a state in the larger FSM is final, pass all of the
finality statuses (e.g. [True, False, False] to `test`.
'''
alphabet = set().union(*[fsm.alphabet for fsm in fsms])
initial = dict([(i, fsm.initial) for (i, fsm) in enumerate(fsms)])
# dedicated function accepts a "superset" and returns the next "superset"
# obtained by following this transition in the new FSM
def follow(current, symbol):
next = {}
for i in range(len(fsms)):
if symbol not in fsms[i].alphabet and anything_else in fsms[i].alphabet:
actual_symbol = anything_else
else:
actual_symbol = symbol
if i in current \
and current[i] in fsms[i].map \
and actual_symbol in fsms[i].map[current[i]]:
next[i] = fsms[i].map[current[i]][actual_symbol]
if len(next.keys()) == 0:
raise OblivionError
return next
# Determine the "is final?" condition of each substate, then pass it to the
# test to determine finality of the overall FSM.
def final(state):
accepts = [i in state and state[i] in fsm.finals for (i, fsm) in enumerate(fsms)]
return test(accepts)
return crawl(alphabet, initial, final, follow).reduce() | Crawl several FSMs in parallel, mapping the states of a larger meta-FSM.
To determine whether a state in the larger FSM is final, pass all of the
finality statuses (e.g. [True, False, False] to `test`. | https://github.com/qntm/greenery/blob/f4dc7fb483825459d26e8fe9f417764469c56ee8/greenery/fsm.py#L715-L748 |
qntm/greenery | greenery/fsm.py | crawl | def crawl(alphabet, initial, final, follow):
'''
Given the above conditions and instructions, crawl a new unknown FSM,
mapping its states, final states and transitions. Return the new FSM.
This is a pretty powerful procedure which could potentially go on
forever if you supply an evil version of follow().
'''
states = [initial]
finals = set()
map = {}
# iterate over a growing list
i = 0
while i < len(states):
state = states[i]
# add to finals
if final(state):
finals.add(i)
# compute map for this state
map[i] = {}
for symbol in sorted(alphabet, key=key):
try:
next = follow(state, symbol)
try:
j = states.index(next)
except ValueError:
j = len(states)
states.append(next)
except OblivionError:
# Reached an oblivion state. Don't list it.
continue
map[i][symbol] = j
i += 1
return fsm(
alphabet = alphabet,
states = range(len(states)),
initial = 0,
finals = finals,
map = map,
) | python | def crawl(alphabet, initial, final, follow):
'''
Given the above conditions and instructions, crawl a new unknown FSM,
mapping its states, final states and transitions. Return the new FSM.
This is a pretty powerful procedure which could potentially go on
forever if you supply an evil version of follow().
'''
states = [initial]
finals = set()
map = {}
# iterate over a growing list
i = 0
while i < len(states):
state = states[i]
# add to finals
if final(state):
finals.add(i)
# compute map for this state
map[i] = {}
for symbol in sorted(alphabet, key=key):
try:
next = follow(state, symbol)
try:
j = states.index(next)
except ValueError:
j = len(states)
states.append(next)
except OblivionError:
# Reached an oblivion state. Don't list it.
continue
map[i][symbol] = j
i += 1
return fsm(
alphabet = alphabet,
states = range(len(states)),
initial = 0,
finals = finals,
map = map,
) | Given the above conditions and instructions, crawl a new unknown FSM,
mapping its states, final states and transitions. Return the new FSM.
This is a pretty powerful procedure which could potentially go on
forever if you supply an evil version of follow(). | https://github.com/qntm/greenery/blob/f4dc7fb483825459d26e8fe9f417764469c56ee8/greenery/fsm.py#L750-L797 |
qntm/greenery | greenery/fsm.py | fsm.accepts | def accepts(self, input):
'''
Test whether the present FSM accepts the supplied string (iterable of
symbols). Equivalently, consider `self` as a possibly-infinite set of
strings and test whether `string` is a member of it.
This is actually mainly used for unit testing purposes.
If `fsm.anything_else` is in your alphabet, then any symbol not in your
alphabet will be converted to `fsm.anything_else`.
'''
state = self.initial
for symbol in input:
if anything_else in self.alphabet and not symbol in self.alphabet:
symbol = anything_else
# Missing transition = transition to dead state
if not (state in self.map and symbol in self.map[state]):
return False
state = self.map[state][symbol]
return state in self.finals | python | def accepts(self, input):
'''
Test whether the present FSM accepts the supplied string (iterable of
symbols). Equivalently, consider `self` as a possibly-infinite set of
strings and test whether `string` is a member of it.
This is actually mainly used for unit testing purposes.
If `fsm.anything_else` is in your alphabet, then any symbol not in your
alphabet will be converted to `fsm.anything_else`.
'''
state = self.initial
for symbol in input:
if anything_else in self.alphabet and not symbol in self.alphabet:
symbol = anything_else
# Missing transition = transition to dead state
if not (state in self.map and symbol in self.map[state]):
return False
state = self.map[state][symbol]
return state in self.finals | Test whether the present FSM accepts the supplied string (iterable of
symbols). Equivalently, consider `self` as a possibly-infinite set of
strings and test whether `string` is a member of it.
This is actually mainly used for unit testing purposes.
If `fsm.anything_else` is in your alphabet, then any symbol not in your
alphabet will be converted to `fsm.anything_else`. | https://github.com/qntm/greenery/blob/f4dc7fb483825459d26e8fe9f417764469c56ee8/greenery/fsm.py#L82-L101 |
qntm/greenery | greenery/fsm.py | fsm.concatenate | def concatenate(*fsms):
'''
Concatenate arbitrarily many finite state machines together.
'''
alphabet = set().union(*[fsm.alphabet for fsm in fsms])
def connect_all(i, substate):
'''
Take a state in the numbered FSM and return a set containing it, plus
(if it's final) the first state from the next FSM, plus (if that's
final) the first state from the next but one FSM, plus...
'''
result = {(i, substate)}
while i < len(fsms) - 1 and substate in fsms[i].finals:
i += 1
substate = fsms[i].initial
result.add((i, substate))
return result
# Use a superset containing states from all FSMs at once.
# We start at the start of the first FSM. If this state is final in the
# first FSM, then we are also at the start of the second FSM. And so on.
initial = set()
if len(fsms) > 0:
initial.update(connect_all(0, fsms[0].initial))
initial = frozenset(initial)
def final(state):
'''If you're in a final state of the final FSM, it's final'''
for (i, substate) in state:
if i == len(fsms) - 1 and substate in fsms[i].finals:
return True
return False
def follow(current, symbol):
'''
Follow the collection of states through all FSMs at once, jumping to the
next FSM if we reach the end of the current one
TODO: improve all follow() implementations to allow for dead metastates?
'''
next = set()
for (i, substate) in current:
fsm = fsms[i]
if substate in fsm.map and symbol in fsm.map[substate]:
next.update(connect_all(i, fsm.map[substate][symbol]))
if len(next) == 0:
raise OblivionError
return frozenset(next)
return crawl(alphabet, initial, final, follow).reduce() | python | def concatenate(*fsms):
'''
Concatenate arbitrarily many finite state machines together.
'''
alphabet = set().union(*[fsm.alphabet for fsm in fsms])
def connect_all(i, substate):
'''
Take a state in the numbered FSM and return a set containing it, plus
(if it's final) the first state from the next FSM, plus (if that's
final) the first state from the next but one FSM, plus...
'''
result = {(i, substate)}
while i < len(fsms) - 1 and substate in fsms[i].finals:
i += 1
substate = fsms[i].initial
result.add((i, substate))
return result
# Use a superset containing states from all FSMs at once.
# We start at the start of the first FSM. If this state is final in the
# first FSM, then we are also at the start of the second FSM. And so on.
initial = set()
if len(fsms) > 0:
initial.update(connect_all(0, fsms[0].initial))
initial = frozenset(initial)
def final(state):
'''If you're in a final state of the final FSM, it's final'''
for (i, substate) in state:
if i == len(fsms) - 1 and substate in fsms[i].finals:
return True
return False
def follow(current, symbol):
'''
Follow the collection of states through all FSMs at once, jumping to the
next FSM if we reach the end of the current one
TODO: improve all follow() implementations to allow for dead metastates?
'''
next = set()
for (i, substate) in current:
fsm = fsms[i]
if substate in fsm.map and symbol in fsm.map[substate]:
next.update(connect_all(i, fsm.map[substate][symbol]))
if len(next) == 0:
raise OblivionError
return frozenset(next)
return crawl(alphabet, initial, final, follow).reduce() | Concatenate arbitrarily many finite state machines together. | https://github.com/qntm/greenery/blob/f4dc7fb483825459d26e8fe9f417764469c56ee8/greenery/fsm.py#L170-L219 |
qntm/greenery | greenery/fsm.py | fsm.star | def star(self):
'''
If the present FSM accepts X, returns an FSM accepting X* (i.e. 0 or
more Xes). This is NOT as simple as naively connecting the final states
back to the initial state: see (b*ab)* for example.
'''
alphabet = self.alphabet
initial = {self.initial}
def follow(state, symbol):
next = set()
for substate in state:
if substate in self.map and symbol in self.map[substate]:
next.add(self.map[substate][symbol])
# If one of our substates is final, then we can also consider
# transitions from the initial state of the original FSM.
if substate in self.finals \
and self.initial in self.map \
and symbol in self.map[self.initial]:
next.add(self.map[self.initial][symbol])
if len(next) == 0:
raise OblivionError
return frozenset(next)
def final(state):
return any(substate in self.finals for substate in state)
return crawl(alphabet, initial, final, follow) | epsilon(alphabet) | python | def star(self):
'''
If the present FSM accepts X, returns an FSM accepting X* (i.e. 0 or
more Xes). This is NOT as simple as naively connecting the final states
back to the initial state: see (b*ab)* for example.
'''
alphabet = self.alphabet
initial = {self.initial}
def follow(state, symbol):
next = set()
for substate in state:
if substate in self.map and symbol in self.map[substate]:
next.add(self.map[substate][symbol])
# If one of our substates is final, then we can also consider
# transitions from the initial state of the original FSM.
if substate in self.finals \
and self.initial in self.map \
and symbol in self.map[self.initial]:
next.add(self.map[self.initial][symbol])
if len(next) == 0:
raise OblivionError
return frozenset(next)
def final(state):
return any(substate in self.finals for substate in state)
return crawl(alphabet, initial, final, follow) | epsilon(alphabet) | If the present FSM accepts X, returns an FSM accepting X* (i.e. 0 or
more Xes). This is NOT as simple as naively connecting the final states
back to the initial state: see (b*ab)* for example. | https://github.com/qntm/greenery/blob/f4dc7fb483825459d26e8fe9f417764469c56ee8/greenery/fsm.py#L231-L262 |
qntm/greenery | greenery/fsm.py | fsm.times | def times(self, multiplier):
'''
Given an FSM and a multiplier, return the multiplied FSM.
'''
if multiplier < 0:
raise Exception("Can't multiply an FSM by " + repr(multiplier))
alphabet = self.alphabet
# metastate is a set of iterations+states
initial = {(self.initial, 0)}
def final(state):
'''If the initial state is final then multiplying doesn't alter that'''
for (substate, iteration) in state:
if substate == self.initial \
and (self.initial in self.finals or iteration == multiplier):
return True
return False
def follow(current, symbol):
next = []
for (substate, iteration) in current:
if iteration < multiplier \
and substate in self.map \
and symbol in self.map[substate]:
next.append((self.map[substate][symbol], iteration))
# final of self? merge with initial on next iteration
if self.map[substate][symbol] in self.finals:
next.append((self.initial, iteration + 1))
if len(next) == 0:
raise OblivionError
return frozenset(next)
return crawl(alphabet, initial, final, follow).reduce() | python | def times(self, multiplier):
'''
Given an FSM and a multiplier, return the multiplied FSM.
'''
if multiplier < 0:
raise Exception("Can't multiply an FSM by " + repr(multiplier))
alphabet = self.alphabet
# metastate is a set of iterations+states
initial = {(self.initial, 0)}
def final(state):
'''If the initial state is final then multiplying doesn't alter that'''
for (substate, iteration) in state:
if substate == self.initial \
and (self.initial in self.finals or iteration == multiplier):
return True
return False
def follow(current, symbol):
next = []
for (substate, iteration) in current:
if iteration < multiplier \
and substate in self.map \
and symbol in self.map[substate]:
next.append((self.map[substate][symbol], iteration))
# final of self? merge with initial on next iteration
if self.map[substate][symbol] in self.finals:
next.append((self.initial, iteration + 1))
if len(next) == 0:
raise OblivionError
return frozenset(next)
return crawl(alphabet, initial, final, follow).reduce() | Given an FSM and a multiplier, return the multiplied FSM. | https://github.com/qntm/greenery/blob/f4dc7fb483825459d26e8fe9f417764469c56ee8/greenery/fsm.py#L264-L298 |
qntm/greenery | greenery/fsm.py | fsm.everythingbut | def everythingbut(self):
'''
Return a finite state machine which will accept any string NOT
accepted by self, and will not accept any string accepted by self.
This is more complicated if there are missing transitions, because the
missing "dead" state must now be reified.
'''
alphabet = self.alphabet
initial = {0 : self.initial}
def follow(current, symbol):
next = {}
if 0 in current and current[0] in self.map and symbol in self.map[current[0]]:
next[0] = self.map[current[0]][symbol]
return next
# state is final unless the original was
def final(state):
return not (0 in state and state[0] in self.finals)
return crawl(alphabet, initial, final, follow).reduce() | python | def everythingbut(self):
'''
Return a finite state machine which will accept any string NOT
accepted by self, and will not accept any string accepted by self.
This is more complicated if there are missing transitions, because the
missing "dead" state must now be reified.
'''
alphabet = self.alphabet
initial = {0 : self.initial}
def follow(current, symbol):
next = {}
if 0 in current and current[0] in self.map and symbol in self.map[current[0]]:
next[0] = self.map[current[0]][symbol]
return next
# state is final unless the original was
def final(state):
return not (0 in state and state[0] in self.finals)
return crawl(alphabet, initial, final, follow).reduce() | Return a finite state machine which will accept any string NOT
accepted by self, and will not accept any string accepted by self.
This is more complicated if there are missing transitions, because the
missing "dead" state must now be reified. | https://github.com/qntm/greenery/blob/f4dc7fb483825459d26e8fe9f417764469c56ee8/greenery/fsm.py#L358-L379 |
qntm/greenery | greenery/fsm.py | fsm.reversed | def reversed(self):
'''
Return a new FSM such that for every string that self accepts (e.g.
"beer", the new FSM accepts the reversed string ("reeb").
'''
alphabet = self.alphabet
# Start from a composite "state-set" consisting of all final states.
# If there are no final states, this set is empty and we'll find that
# no other states get generated.
initial = frozenset(self.finals)
# Find every possible way to reach the current state-set
# using this symbol.
def follow(current, symbol):
next = frozenset([
prev
for prev in self.map
for state in current
if symbol in self.map[prev] and self.map[prev][symbol] == state
])
if len(next) == 0:
raise OblivionError
return next
# A state-set is final if the initial state is in it.
def final(state):
return self.initial in state
# Man, crawl() is the best!
return crawl(alphabet, initial, final, follow) | python | def reversed(self):
'''
Return a new FSM such that for every string that self accepts (e.g.
"beer", the new FSM accepts the reversed string ("reeb").
'''
alphabet = self.alphabet
# Start from a composite "state-set" consisting of all final states.
# If there are no final states, this set is empty and we'll find that
# no other states get generated.
initial = frozenset(self.finals)
# Find every possible way to reach the current state-set
# using this symbol.
def follow(current, symbol):
next = frozenset([
prev
for prev in self.map
for state in current
if symbol in self.map[prev] and self.map[prev][symbol] == state
])
if len(next) == 0:
raise OblivionError
return next
# A state-set is final if the initial state is in it.
def final(state):
return self.initial in state
# Man, crawl() is the best!
return crawl(alphabet, initial, final, follow) | Return a new FSM such that for every string that self accepts (e.g.
"beer", the new FSM accepts the reversed string ("reeb"). | https://github.com/qntm/greenery/blob/f4dc7fb483825459d26e8fe9f417764469c56ee8/greenery/fsm.py#L381-L411 |
qntm/greenery | greenery/fsm.py | fsm.islive | def islive(self, state):
'''A state is "live" if a final state can be reached from it.'''
reachable = [state]
i = 0
while i < len(reachable):
current = reachable[i]
if current in self.finals:
return True
if current in self.map:
for symbol in self.map[current]:
next = self.map[current][symbol]
if next not in reachable:
reachable.append(next)
i += 1
return False | python | def islive(self, state):
'''A state is "live" if a final state can be reached from it.'''
reachable = [state]
i = 0
while i < len(reachable):
current = reachable[i]
if current in self.finals:
return True
if current in self.map:
for symbol in self.map[current]:
next = self.map[current][symbol]
if next not in reachable:
reachable.append(next)
i += 1
return False | A state is "live" if a final state can be reached from it. | https://github.com/qntm/greenery/blob/f4dc7fb483825459d26e8fe9f417764469c56ee8/greenery/fsm.py#L421-L435 |
qntm/greenery | greenery/fsm.py | fsm.strings | def strings(self):
'''
Generate strings (lists of symbols) that this FSM accepts. Since there may
be infinitely many of these we use a generator instead of constructing a
static list. Strings will be sorted in order of length and then lexically.
This procedure uses arbitrary amounts of memory but is very fast. There
may be more efficient ways to do this, that I haven't investigated yet.
You can use this in list comprehensions.
'''
# Many FSMs have "dead states". Once you reach a dead state, you can no
# longer reach a final state. Since many strings may end up here, it's
# advantageous to constrain our search to live states only.
livestates = set(state for state in self.states if self.islive(state))
# We store a list of tuples. Each tuple consists of an input string and the
# state that this input string leads to. This means we don't have to run the
# state machine from the very beginning every time we want to check a new
# string.
strings = []
# Initial entry (or possibly not, in which case this is a short one)
cstate = self.initial
cstring = []
if cstate in livestates:
if cstate in self.finals:
yield cstring
strings.append((cstring, cstate))
# Fixed point calculation
i = 0
while i < len(strings):
(cstring, cstate) = strings[i]
if cstate in self.map:
for symbol in sorted(self.map[cstate], key=key):
nstate = self.map[cstate][symbol]
nstring = cstring + [symbol]
if nstate in livestates:
if nstate in self.finals:
yield nstring
strings.append((nstring, nstate))
i += 1 | python | def strings(self):
'''
Generate strings (lists of symbols) that this FSM accepts. Since there may
be infinitely many of these we use a generator instead of constructing a
static list. Strings will be sorted in order of length and then lexically.
This procedure uses arbitrary amounts of memory but is very fast. There
may be more efficient ways to do this, that I haven't investigated yet.
You can use this in list comprehensions.
'''
# Many FSMs have "dead states". Once you reach a dead state, you can no
# longer reach a final state. Since many strings may end up here, it's
# advantageous to constrain our search to live states only.
livestates = set(state for state in self.states if self.islive(state))
# We store a list of tuples. Each tuple consists of an input string and the
# state that this input string leads to. This means we don't have to run the
# state machine from the very beginning every time we want to check a new
# string.
strings = []
# Initial entry (or possibly not, in which case this is a short one)
cstate = self.initial
cstring = []
if cstate in livestates:
if cstate in self.finals:
yield cstring
strings.append((cstring, cstate))
# Fixed point calculation
i = 0
while i < len(strings):
(cstring, cstate) = strings[i]
if cstate in self.map:
for symbol in sorted(self.map[cstate], key=key):
nstate = self.map[cstate][symbol]
nstring = cstring + [symbol]
if nstate in livestates:
if nstate in self.finals:
yield nstring
strings.append((nstring, nstate))
i += 1 | Generate strings (lists of symbols) that this FSM accepts. Since there may
be infinitely many of these we use a generator instead of constructing a
static list. Strings will be sorted in order of length and then lexically.
This procedure uses arbitrary amounts of memory but is very fast. There
may be more efficient ways to do this, that I haven't investigated yet.
You can use this in list comprehensions. | https://github.com/qntm/greenery/blob/f4dc7fb483825459d26e8fe9f417764469c56ee8/greenery/fsm.py#L447-L488 |
qntm/greenery | greenery/fsm.py | fsm.cardinality | def cardinality(self):
'''
Consider the FSM as a set of strings and return the cardinality of that
set, or raise an OverflowError if there are infinitely many
'''
num_strings = {}
def get_num_strings(state):
# Many FSMs have at least one oblivion state
if self.islive(state):
if state in num_strings:
if num_strings[state] is None: # "computing..."
# Recursion! There are infinitely many strings recognised
raise OverflowError(state)
return num_strings[state]
num_strings[state] = None # i.e. "computing..."
n = 0
if state in self.finals:
n += 1
if state in self.map:
for symbol in self.map[state]:
n += get_num_strings(self.map[state][symbol])
num_strings[state] = n
else:
# Dead state
num_strings[state] = 0
return num_strings[state]
return get_num_strings(self.initial) | python | def cardinality(self):
'''
Consider the FSM as a set of strings and return the cardinality of that
set, or raise an OverflowError if there are infinitely many
'''
num_strings = {}
def get_num_strings(state):
# Many FSMs have at least one oblivion state
if self.islive(state):
if state in num_strings:
if num_strings[state] is None: # "computing..."
# Recursion! There are infinitely many strings recognised
raise OverflowError(state)
return num_strings[state]
num_strings[state] = None # i.e. "computing..."
n = 0
if state in self.finals:
n += 1
if state in self.map:
for symbol in self.map[state]:
n += get_num_strings(self.map[state][symbol])
num_strings[state] = n
else:
# Dead state
num_strings[state] = 0
return num_strings[state]
return get_num_strings(self.initial) | Consider the FSM as a set of strings and return the cardinality of that
set, or raise an OverflowError if there are infinitely many | https://github.com/qntm/greenery/blob/f4dc7fb483825459d26e8fe9f417764469c56ee8/greenery/fsm.py#L535-L565 |
qntm/greenery | greenery/fsm.py | fsm.copy | def copy(self):
'''
For completeness only, since `set.copy()` also exists. FSM objects are
immutable, so I can see only very odd reasons to need this.
'''
return fsm(
alphabet = self.alphabet,
states = self.states,
initial = self.initial,
finals = self.finals,
map = self.map,
) | python | def copy(self):
'''
For completeness only, since `set.copy()` also exists. FSM objects are
immutable, so I can see only very odd reasons to need this.
'''
return fsm(
alphabet = self.alphabet,
states = self.states,
initial = self.initial,
finals = self.finals,
map = self.map,
) | For completeness only, since `set.copy()` also exists. FSM objects are
immutable, so I can see only very odd reasons to need this. | https://github.com/qntm/greenery/blob/f4dc7fb483825459d26e8fe9f417764469c56ee8/greenery/fsm.py#L636-L647 |
qntm/greenery | greenery/fsm.py | fsm.derive | def derive(self, input):
'''
Compute the Brzozowski derivative of this FSM with respect to the input
string of symbols. <https://en.wikipedia.org/wiki/Brzozowski_derivative>
If any of the symbols are not members of the alphabet, that's a KeyError.
If you fall into oblivion, then the derivative is an FSM accepting no
strings.
'''
try:
# Consume the input string.
state = self.initial
for symbol in input:
if not symbol in self.alphabet:
if not anything_else in self.alphabet:
raise KeyError(symbol)
symbol = anything_else
# Missing transition = transition to dead state
if not (state in self.map and symbol in self.map[state]):
raise OblivionError
state = self.map[state][symbol]
# OK so now we have consumed that string, use the new location as the
# starting point.
return fsm(
alphabet = self.alphabet,
states = self.states,
initial = state,
finals = self.finals,
map = self.map,
)
except OblivionError:
# Fell out of the FSM. The derivative of this FSM is the empty FSM.
return null(self.alphabet) | python | def derive(self, input):
'''
Compute the Brzozowski derivative of this FSM with respect to the input
string of symbols. <https://en.wikipedia.org/wiki/Brzozowski_derivative>
If any of the symbols are not members of the alphabet, that's a KeyError.
If you fall into oblivion, then the derivative is an FSM accepting no
strings.
'''
try:
# Consume the input string.
state = self.initial
for symbol in input:
if not symbol in self.alphabet:
if not anything_else in self.alphabet:
raise KeyError(symbol)
symbol = anything_else
# Missing transition = transition to dead state
if not (state in self.map and symbol in self.map[state]):
raise OblivionError
state = self.map[state][symbol]
# OK so now we have consumed that string, use the new location as the
# starting point.
return fsm(
alphabet = self.alphabet,
states = self.states,
initial = state,
finals = self.finals,
map = self.map,
)
except OblivionError:
# Fell out of the FSM. The derivative of this FSM is the empty FSM.
return null(self.alphabet) | Compute the Brzozowski derivative of this FSM with respect to the input
string of symbols. <https://en.wikipedia.org/wiki/Brzozowski_derivative>
If any of the symbols are not members of the alphabet, that's a KeyError.
If you fall into oblivion, then the derivative is an FSM accepting no
strings. | https://github.com/qntm/greenery/blob/f4dc7fb483825459d26e8fe9f417764469c56ee8/greenery/fsm.py#L649-L684 |
mikeywaites/flask-arrested | arrested/resource.py | Resource.add_endpoint | def add_endpoint(self, endpoint):
"""Register an :class:`.Endpoint` aginst this resource.
:param endpoint: :class:`.Endpoint` API Endpoint class
Usage::
foo_resource = Resource('example', __name__)
class MyEndpoint(Endpoint):
url = '/example'
name = 'myendpoint'
foo_resource.add_endpoint(MyEndpoint)
"""
if self.url_prefix:
url = '{prefix}{url}'.format(prefix=self.url_prefix, url=endpoint.url)
else:
url = endpoint.url
self.add_url_rule(
url,
view_func=endpoint.as_view(endpoint.get_name()),
) | python | def add_endpoint(self, endpoint):
"""Register an :class:`.Endpoint` aginst this resource.
:param endpoint: :class:`.Endpoint` API Endpoint class
Usage::
foo_resource = Resource('example', __name__)
class MyEndpoint(Endpoint):
url = '/example'
name = 'myendpoint'
foo_resource.add_endpoint(MyEndpoint)
"""
if self.url_prefix:
url = '{prefix}{url}'.format(prefix=self.url_prefix, url=endpoint.url)
else:
url = endpoint.url
self.add_url_rule(
url,
view_func=endpoint.as_view(endpoint.get_name()),
) | Register an :class:`.Endpoint` aginst this resource.
:param endpoint: :class:`.Endpoint` API Endpoint class
Usage::
foo_resource = Resource('example', __name__)
class MyEndpoint(Endpoint):
url = '/example'
name = 'myendpoint'
foo_resource.add_endpoint(MyEndpoint) | https://github.com/mikeywaites/flask-arrested/blob/6b97ce2ad2765f9acab10f4726e310258aa51de0/arrested/resource.py#L80-L104 |
qntm/greenery | greenery/lego.py | reduce_after | def reduce_after(method):
'''reduce() the result of this method call (unless you already reduced it).'''
def new_method(self, *args, **kwargs):
result = method(self, *args, **kwargs)
if result == self:
return result
return result.reduce()
return new_method | python | def reduce_after(method):
'''reduce() the result of this method call (unless you already reduced it).'''
def new_method(self, *args, **kwargs):
result = method(self, *args, **kwargs)
if result == self:
return result
return result.reduce()
return new_method | reduce() the result of this method call (unless you already reduced it). | https://github.com/qntm/greenery/blob/f4dc7fb483825459d26e8fe9f417764469c56ee8/greenery/lego.py#L41-L48 |
qntm/greenery | greenery/lego.py | call_fsm | def call_fsm(method):
'''
Take a method which acts on 0 or more regular expression objects... return a
new method which simply converts them all to FSMs, calls the FSM method
on them instead, then converts the result back to a regular expression.
We do this for several of the more annoying operations.
'''
fsm_method = getattr(fsm.fsm, method.__name__)
def new_method(*legos):
alphabet = set().union(*[lego.alphabet() for lego in legos])
return from_fsm(fsm_method(*[lego.to_fsm(alphabet) for lego in legos]))
return new_method | python | def call_fsm(method):
'''
Take a method which acts on 0 or more regular expression objects... return a
new method which simply converts them all to FSMs, calls the FSM method
on them instead, then converts the result back to a regular expression.
We do this for several of the more annoying operations.
'''
fsm_method = getattr(fsm.fsm, method.__name__)
def new_method(*legos):
alphabet = set().union(*[lego.alphabet() for lego in legos])
return from_fsm(fsm_method(*[lego.to_fsm(alphabet) for lego in legos]))
return new_method | Take a method which acts on 0 or more regular expression objects... return a
new method which simply converts them all to FSMs, calls the FSM method
on them instead, then converts the result back to a regular expression.
We do this for several of the more annoying operations. | https://github.com/qntm/greenery/blob/f4dc7fb483825459d26e8fe9f417764469c56ee8/greenery/lego.py#L50-L61 |
qntm/greenery | greenery/lego.py | from_fsm | def from_fsm(f):
'''
Turn the supplied finite state machine into a `lego` object. This is
accomplished using the Brzozowski algebraic method.
'''
# Make sure the supplied alphabet is kosher. It must contain only single-
# character strings or `fsm.anything_else`.
for symbol in f.alphabet:
if symbol == fsm.anything_else:
continue
if isinstance(symbol, str) and len(symbol) == 1:
continue
raise Exception("Symbol " + repr(symbol) + " cannot be used in a regular expression")
# We need a new state not already used
outside = object()
# The set of strings that would be accepted by this FSM if you started
# at state i is represented by the regex R_i.
# If state i has a sole transition "a" to state j, then we know R_i = a R_j.
# If state i is final, then the empty string is also accepted by this regex.
# And so on...
# From this we can build a set of simultaneous equations in len(f.states)
# variables. This system is easily solved for all variables, but we only
# need one: R_a, where a is the starting state.
# The first thing we need to do is organise the states into order of depth,
# so that when we perform our back-substitutions, we can start with the
# last (deepest) state and therefore finish with R_a.
states = [f.initial]
i = 0
while i < len(states):
current = states[i]
if current in f.map:
for symbol in sorted(f.map[current], key=fsm.key):
next = f.map[current][symbol]
if next not in states:
states.append(next)
i += 1
# Our system of equations is represented like so:
brz = {}
for a in f.states:
brz[a] = {}
for b in f.states | {outside}:
brz[a][b] = nothing
# Populate it with some initial data.
for a in f.map:
for symbol in f.map[a]:
b = f.map[a][symbol]
if symbol == fsm.anything_else:
brz[a][b] |= ~charclass(f.alphabet - {fsm.anything_else})
else:
brz[a][b] |= charclass({symbol})
if a in f.finals:
brz[a][outside] |= emptystring
# Now perform our back-substitution
for i in reversed(range(len(states))):
a = states[i]
# Before the equation for R_a can be substituted into the other
# equations, we need to resolve the self-transition (if any).
# e.g. R_a = 0 R_a | 1 R_b | 2 R_c
# becomes R_a = 0*1 R_b | 0*2 R_c
loop = brz[a][a] * star # i.e. "0*"
del brz[a][a]
for right in brz[a]:
brz[a][right] = loop + brz[a][right]
# Note: even if we're down to our final equation, the above step still
# needs to be performed before anything is returned.
# Now we can substitute this equation into all of the previous ones.
for j in range(i):
b = states[j]
# e.g. substituting R_a = 0*1 R_b | 0*2 R_c
# into R_b = 3 R_a | 4 R_c | 5 R_d
# yields R_b = 30*1 R_b | (30*2|4) R_c | 5 R_d
univ = brz[b][a] # i.e. "3"
del brz[b][a]
for right in brz[a]:
brz[b][right] |= univ + brz[a][right]
return brz[f.initial][outside].reduce() | python | def from_fsm(f):
'''
Turn the supplied finite state machine into a `lego` object. This is
accomplished using the Brzozowski algebraic method.
'''
# Make sure the supplied alphabet is kosher. It must contain only single-
# character strings or `fsm.anything_else`.
for symbol in f.alphabet:
if symbol == fsm.anything_else:
continue
if isinstance(symbol, str) and len(symbol) == 1:
continue
raise Exception("Symbol " + repr(symbol) + " cannot be used in a regular expression")
# We need a new state not already used
outside = object()
# The set of strings that would be accepted by this FSM if you started
# at state i is represented by the regex R_i.
# If state i has a sole transition "a" to state j, then we know R_i = a R_j.
# If state i is final, then the empty string is also accepted by this regex.
# And so on...
# From this we can build a set of simultaneous equations in len(f.states)
# variables. This system is easily solved for all variables, but we only
# need one: R_a, where a is the starting state.
# The first thing we need to do is organise the states into order of depth,
# so that when we perform our back-substitutions, we can start with the
# last (deepest) state and therefore finish with R_a.
states = [f.initial]
i = 0
while i < len(states):
current = states[i]
if current in f.map:
for symbol in sorted(f.map[current], key=fsm.key):
next = f.map[current][symbol]
if next not in states:
states.append(next)
i += 1
# Our system of equations is represented like so:
brz = {}
for a in f.states:
brz[a] = {}
for b in f.states | {outside}:
brz[a][b] = nothing
# Populate it with some initial data.
for a in f.map:
for symbol in f.map[a]:
b = f.map[a][symbol]
if symbol == fsm.anything_else:
brz[a][b] |= ~charclass(f.alphabet - {fsm.anything_else})
else:
brz[a][b] |= charclass({symbol})
if a in f.finals:
brz[a][outside] |= emptystring
# Now perform our back-substitution
for i in reversed(range(len(states))):
a = states[i]
# Before the equation for R_a can be substituted into the other
# equations, we need to resolve the self-transition (if any).
# e.g. R_a = 0 R_a | 1 R_b | 2 R_c
# becomes R_a = 0*1 R_b | 0*2 R_c
loop = brz[a][a] * star # i.e. "0*"
del brz[a][a]
for right in brz[a]:
brz[a][right] = loop + brz[a][right]
# Note: even if we're down to our final equation, the above step still
# needs to be performed before anything is returned.
# Now we can substitute this equation into all of the previous ones.
for j in range(i):
b = states[j]
# e.g. substituting R_a = 0*1 R_b | 0*2 R_c
# into R_b = 3 R_a | 4 R_c | 5 R_d
# yields R_b = 30*1 R_b | (30*2|4) R_c | 5 R_d
univ = brz[b][a] # i.e. "3"
del brz[b][a]
for right in brz[a]:
brz[b][right] |= univ + brz[a][right]
return brz[f.initial][outside].reduce() | Turn the supplied finite state machine into a `lego` object. This is
accomplished using the Brzozowski algebraic method. | https://github.com/qntm/greenery/blob/f4dc7fb483825459d26e8fe9f417764469c56ee8/greenery/lego.py#L70-L159 |
qntm/greenery | greenery/lego.py | lego.parse | def parse(cls, string):
'''
Parse the entire supplied string as an instance of the present class.
Mainly for internal use in unit tests because it drops through to match()
in a convenient way.
'''
obj, i = cls.match(string, 0)
if i != len(string):
raise Exception("Could not parse '" + string + "' beyond index " + str(i))
return obj | python | def parse(cls, string):
'''
Parse the entire supplied string as an instance of the present class.
Mainly for internal use in unit tests because it drops through to match()
in a convenient way.
'''
obj, i = cls.match(string, 0)
if i != len(string):
raise Exception("Could not parse '" + string + "' beyond index " + str(i))
return obj | Parse the entire supplied string as an instance of the present class.
Mainly for internal use in unit tests because it drops through to match()
in a convenient way. | https://github.com/qntm/greenery/blob/f4dc7fb483825459d26e8fe9f417764469c56ee8/greenery/lego.py#L236-L245 |
qntm/greenery | greenery/lego.py | lego.strings | def strings(self, otherchar=None):
'''
Each time next() is called on this iterator, a new string is returned
which will the present lego piece can match. StopIteration is raised once
all such strings have been returned, although a regex with a * in may
match infinitely many strings.
'''
# In the case of a regex like "[^abc]", there are infinitely many (well, a
# very large finite number of) single characters which will match. It's not
# productive to iterate over all of these giving every single example.
# You must supply your own "otherchar" to stand in for all of these
# possibilities.
for string in self.to_fsm().strings():
# Have to represent `fsm.anything_else` somehow.
if fsm.anything_else in string:
if otherchar == None:
raise Exception("Please choose an 'otherchar'")
string = [
otherchar if char == fsm.anything_else else char
for char in string
]
yield "".join(string) | python | def strings(self, otherchar=None):
'''
Each time next() is called on this iterator, a new string is returned
which will the present lego piece can match. StopIteration is raised once
all such strings have been returned, although a regex with a * in may
match infinitely many strings.
'''
# In the case of a regex like "[^abc]", there are infinitely many (well, a
# very large finite number of) single characters which will match. It's not
# productive to iterate over all of these giving every single example.
# You must supply your own "otherchar" to stand in for all of these
# possibilities.
for string in self.to_fsm().strings():
# Have to represent `fsm.anything_else` somehow.
if fsm.anything_else in string:
if otherchar == None:
raise Exception("Please choose an 'otherchar'")
string = [
otherchar if char == fsm.anything_else else char
for char in string
]
yield "".join(string) | Each time next() is called on this iterator, a new string is returned
which will the present lego piece can match. StopIteration is raised once
all such strings have been returned, although a regex with a * in may
match infinitely many strings. | https://github.com/qntm/greenery/blob/f4dc7fb483825459d26e8fe9f417764469c56ee8/greenery/lego.py#L395-L419 |
qntm/greenery | greenery/lego.py | multiplier.canmultiplyby | def canmultiplyby(self, other):
'''
Multiplication is not well-defined for all pairs of multipliers because
the resulting possibilities do not necessarily form a continuous range.
For example:
{0,x} * {0,y} = {0,x*y}
{2} * {3} = {6}
{2} * {1,2} = ERROR
The proof isn't simple but suffice it to say that {p,p+q} * {r,r+s} is
equal to {pr, (p+q)(r+s)} only if s=0 or qr+1 >= p. If not, then at least
one gap appears in the range. The first inaccessible number is (p+q)r + 1.
'''
return other.optional == bound(0) or \
self.optional * other.mandatory + bound(1) >= self.mandatory | python | def canmultiplyby(self, other):
'''
Multiplication is not well-defined for all pairs of multipliers because
the resulting possibilities do not necessarily form a continuous range.
For example:
{0,x} * {0,y} = {0,x*y}
{2} * {3} = {6}
{2} * {1,2} = ERROR
The proof isn't simple but suffice it to say that {p,p+q} * {r,r+s} is
equal to {pr, (p+q)(r+s)} only if s=0 or qr+1 >= p. If not, then at least
one gap appears in the range. The first inaccessible number is (p+q)r + 1.
'''
return other.optional == bound(0) or \
self.optional * other.mandatory + bound(1) >= self.mandatory | Multiplication is not well-defined for all pairs of multipliers because
the resulting possibilities do not necessarily form a continuous range.
For example:
{0,x} * {0,y} = {0,x*y}
{2} * {3} = {6}
{2} * {1,2} = ERROR
The proof isn't simple but suffice it to say that {p,p+q} * {r,r+s} is
equal to {pr, (p+q)(r+s)} only if s=0 or qr+1 >= p. If not, then at least
one gap appears in the range. The first inaccessible number is (p+q)r + 1. | https://github.com/qntm/greenery/blob/f4dc7fb483825459d26e8fe9f417764469c56ee8/greenery/lego.py#L1057-L1071 |
qntm/greenery | greenery/lego.py | multiplier.canintersect | def canintersect(self, other):
'''
Intersection is not well-defined for all pairs of multipliers.
For example:
{2,3} & {3,4} = {3}
{2,} & {1,7} = {2,7}
{2} & {5} = ERROR
'''
return not (self.max < other.min or other.max < self.min) | python | def canintersect(self, other):
'''
Intersection is not well-defined for all pairs of multipliers.
For example:
{2,3} & {3,4} = {3}
{2,} & {1,7} = {2,7}
{2} & {5} = ERROR
'''
return not (self.max < other.min or other.max < self.min) | Intersection is not well-defined for all pairs of multipliers.
For example:
{2,3} & {3,4} = {3}
{2,} & {1,7} = {2,7}
{2} & {5} = ERROR | https://github.com/qntm/greenery/blob/f4dc7fb483825459d26e8fe9f417764469c56ee8/greenery/lego.py#L1093-L1101 |
qntm/greenery | greenery/lego.py | multiplier.canunion | def canunion(self, other):
'''Union is not defined for all pairs of multipliers. e.g. {0,1} | {3,4}'''
return not (self.max + bound(1) < other.min or other.max + bound(1) < self.min) | python | def canunion(self, other):
'''Union is not defined for all pairs of multipliers. e.g. {0,1} | {3,4}'''
return not (self.max + bound(1) < other.min or other.max + bound(1) < self.min) | Union is not defined for all pairs of multipliers. e.g. {0,1} | {3,4} | https://github.com/qntm/greenery/blob/f4dc7fb483825459d26e8fe9f417764469c56ee8/greenery/lego.py#L1115-L1117 |
qntm/greenery | greenery/lego.py | multiplier.common | def common(self, other):
'''
Find the shared part of two multipliers. This is the largest multiplier
which can be safely subtracted from both the originals. This may
return the "zero" multiplier.
'''
mandatory = min(self.mandatory, other.mandatory)
optional = min(self.optional, other.optional)
return multiplier(mandatory, mandatory + optional) | python | def common(self, other):
'''
Find the shared part of two multipliers. This is the largest multiplier
which can be safely subtracted from both the originals. This may
return the "zero" multiplier.
'''
mandatory = min(self.mandatory, other.mandatory)
optional = min(self.optional, other.optional)
return multiplier(mandatory, mandatory + optional) | Find the shared part of two multipliers. This is the largest multiplier
which can be safely subtracted from both the originals. This may
return the "zero" multiplier. | https://github.com/qntm/greenery/blob/f4dc7fb483825459d26e8fe9f417764469c56ee8/greenery/lego.py#L1131-L1139 |
qntm/greenery | greenery/lego.py | mult.dock | def dock(self, other):
'''
"Dock" another mult from this one (i.e. remove part of the tail) and
return the result. The reverse of concatenation. This is a lot trickier.
e.g. a{4,5} - a{3} = a{1,2}
'''
if other.multiplicand != self.multiplicand:
raise Exception("Can't subtract " + repr(other) + " from " + repr(self))
return mult(self.multiplicand, self.multiplier - other.multiplier) | python | def dock(self, other):
'''
"Dock" another mult from this one (i.e. remove part of the tail) and
return the result. The reverse of concatenation. This is a lot trickier.
e.g. a{4,5} - a{3} = a{1,2}
'''
if other.multiplicand != self.multiplicand:
raise Exception("Can't subtract " + repr(other) + " from " + repr(self))
return mult(self.multiplicand, self.multiplier - other.multiplier) | "Dock" another mult from this one (i.e. remove part of the tail) and
return the result. The reverse of concatenation. This is a lot trickier.
e.g. a{4,5} - a{3} = a{1,2} | https://github.com/qntm/greenery/blob/f4dc7fb483825459d26e8fe9f417764469c56ee8/greenery/lego.py#L1189-L1197 |
qntm/greenery | greenery/lego.py | mult.common | def common(self, other):
'''
Return the common part of these two mults. This is the largest mult
which can be safely subtracted from both the originals. The multiplier
on this mult could be zero: this is the case if, for example, the
multiplicands disagree.
'''
if self.multiplicand == other.multiplicand:
return mult(self.multiplicand, self.multiplier.common(other.multiplier))
# Multiplicands disagree, no common part at all.
return mult(nothing, zero) | python | def common(self, other):
'''
Return the common part of these two mults. This is the largest mult
which can be safely subtracted from both the originals. The multiplier
on this mult could be zero: this is the case if, for example, the
multiplicands disagree.
'''
if self.multiplicand == other.multiplicand:
return mult(self.multiplicand, self.multiplier.common(other.multiplier))
# Multiplicands disagree, no common part at all.
return mult(nothing, zero) | Return the common part of these two mults. This is the largest mult
which can be safely subtracted from both the originals. The multiplier
on this mult could be zero: this is the case if, for example, the
multiplicands disagree. | https://github.com/qntm/greenery/blob/f4dc7fb483825459d26e8fe9f417764469c56ee8/greenery/lego.py#L1199-L1210 |
qntm/greenery | greenery/lego.py | conc.common | def common(self, other, suffix=False):
'''
Return the common prefix of these two concs; that is, the largest conc
which can be safely beheaded() from the front of both.
The result could be emptystring.
"ZYAA, ZYBB" -> "ZY"
"CZ, CZ" -> "CZ"
"YC, ZC" -> ""
With the "suffix" flag set, works from the end. E.g.:
"AAZY, BBZY" -> "ZY"
"CZ, CZ" -> "CZ"
"CY, CZ" -> ""
'''
mults = []
indices = range(min(len(self.mults), len(other.mults))) # e.g. [0, 1, 2, 3]
# Work backwards from the end of both concs instead.
if suffix:
indices = [-i - 1 for i in indices] # e.g. [-1, -2, -3, -4]
for i in indices:
common = self.mults[i].common(other.mults[i])
# Happens when multiplicands disagree (e.g. "A.common(B)") or if
# the multiplicand is shared but the common multiplier is zero
# (e.g. "ABZ*.common(CZ)".)
if common.multiplier == zero:
break
mults.append(common)
# If we did not remove the entirety of both mults, we cannot continue.
if common != self.mults[i] or common != other.mults[i]:
break
if suffix:
mults = reversed(mults)
return conc(*mults) | python | def common(self, other, suffix=False):
'''
Return the common prefix of these two concs; that is, the largest conc
which can be safely beheaded() from the front of both.
The result could be emptystring.
"ZYAA, ZYBB" -> "ZY"
"CZ, CZ" -> "CZ"
"YC, ZC" -> ""
With the "suffix" flag set, works from the end. E.g.:
"AAZY, BBZY" -> "ZY"
"CZ, CZ" -> "CZ"
"CY, CZ" -> ""
'''
mults = []
indices = range(min(len(self.mults), len(other.mults))) # e.g. [0, 1, 2, 3]
# Work backwards from the end of both concs instead.
if suffix:
indices = [-i - 1 for i in indices] # e.g. [-1, -2, -3, -4]
for i in indices:
common = self.mults[i].common(other.mults[i])
# Happens when multiplicands disagree (e.g. "A.common(B)") or if
# the multiplicand is shared but the common multiplier is zero
# (e.g. "ABZ*.common(CZ)".)
if common.multiplier == zero:
break
mults.append(common)
# If we did not remove the entirety of both mults, we cannot continue.
if common != self.mults[i] or common != other.mults[i]:
break
if suffix:
mults = reversed(mults)
return conc(*mults) | Return the common prefix of these two concs; that is, the largest conc
which can be safely beheaded() from the front of both.
The result could be emptystring.
"ZYAA, ZYBB" -> "ZY"
"CZ, CZ" -> "CZ"
"YC, ZC" -> ""
With the "suffix" flag set, works from the end. E.g.:
"AAZY, BBZY" -> "ZY"
"CZ, CZ" -> "CZ"
"CY, CZ" -> "" | https://github.com/qntm/greenery/blob/f4dc7fb483825459d26e8fe9f417764469c56ee8/greenery/lego.py#L1528-L1569 |
qntm/greenery | greenery/lego.py | conc.dock | def dock(self, other):
'''
Subtract another conc from this one.
This is the opposite of concatenation. For example, if ABC + DEF = ABCDEF,
then logically ABCDEF - DEF = ABC.
'''
# e.g. self has mults at indices [0, 1, 2, 3, 4, 5, 6] len=7
# e.g. other has mults at indices [0, 1, 2] len=3
new = list(self.mults)
for i in reversed(range(len(other.mults))): # [2, 1, 0]
# e.g. i = 1, j = 7 - 3 + 1 = 5
j = len(self.mults) - len(other.mults) + i
new[j] = new[j].dock(other.mults[i])
if new[j].multiplier == zero:
# omit that mult entirely since it has been factored out
del new[j]
# If the subtraction is incomplete but there is more to
# other.mults, then we have a problem. For example, "ABC{2} - BC"
# subtracts the C successfully but leaves something behind,
# then tries to subtract the B too, which isn't possible
else:
if i != 0:
raise Exception("Can't subtract " + repr(other) + " from " + repr(self))
return conc(*new) | python | def dock(self, other):
'''
Subtract another conc from this one.
This is the opposite of concatenation. For example, if ABC + DEF = ABCDEF,
then logically ABCDEF - DEF = ABC.
'''
# e.g. self has mults at indices [0, 1, 2, 3, 4, 5, 6] len=7
# e.g. other has mults at indices [0, 1, 2] len=3
new = list(self.mults)
for i in reversed(range(len(other.mults))): # [2, 1, 0]
# e.g. i = 1, j = 7 - 3 + 1 = 5
j = len(self.mults) - len(other.mults) + i
new[j] = new[j].dock(other.mults[i])
if new[j].multiplier == zero:
# omit that mult entirely since it has been factored out
del new[j]
# If the subtraction is incomplete but there is more to
# other.mults, then we have a problem. For example, "ABC{2} - BC"
# subtracts the C successfully but leaves something behind,
# then tries to subtract the B too, which isn't possible
else:
if i != 0:
raise Exception("Can't subtract " + repr(other) + " from " + repr(self))
return conc(*new) | Subtract another conc from this one.
This is the opposite of concatenation. For example, if ABC + DEF = ABCDEF,
then logically ABCDEF - DEF = ABC. | https://github.com/qntm/greenery/blob/f4dc7fb483825459d26e8fe9f417764469c56ee8/greenery/lego.py#L1571-L1598 |
qntm/greenery | greenery/lego.py | pattern.dock | def dock(self, other):
'''
The opposite of concatenation. Remove a common suffix from the present
pattern; that is, from each of its constituent concs.
AYZ|BYZ|CYZ - YZ = A|B|C.
'''
return pattern(*[c.dock(other) for c in self.concs]) | python | def dock(self, other):
'''
The opposite of concatenation. Remove a common suffix from the present
pattern; that is, from each of its constituent concs.
AYZ|BYZ|CYZ - YZ = A|B|C.
'''
return pattern(*[c.dock(other) for c in self.concs]) | The opposite of concatenation. Remove a common suffix from the present
pattern; that is, from each of its constituent concs.
AYZ|BYZ|CYZ - YZ = A|B|C. | https://github.com/qntm/greenery/blob/f4dc7fb483825459d26e8fe9f417764469c56ee8/greenery/lego.py#L1831-L1837 |
qntm/greenery | greenery/lego.py | pattern.behead | def behead(self, other):
'''
Like dock() but the other way around. Remove a common prefix from the
present pattern; that is, from each of its constituent concs.
ZA|ZB|ZC.behead(Z) = A|B|C
'''
return pattern(*[c.behead(other) for c in self.concs]) | python | def behead(self, other):
'''
Like dock() but the other way around. Remove a common prefix from the
present pattern; that is, from each of its constituent concs.
ZA|ZB|ZC.behead(Z) = A|B|C
'''
return pattern(*[c.behead(other) for c in self.concs]) | Like dock() but the other way around. Remove a common prefix from the
present pattern; that is, from each of its constituent concs.
ZA|ZB|ZC.behead(Z) = A|B|C | https://github.com/qntm/greenery/blob/f4dc7fb483825459d26e8fe9f417764469c56ee8/greenery/lego.py#L1839-L1845 |
qntm/greenery | greenery/lego.py | pattern._commonconc | def _commonconc(self, suffix=False):
'''
Find the longest conc which acts as prefix to every conc in this pattern.
This could be the empty string. Return the common prefix along with all
the leftovers after truncating that common prefix from each conc.
"ZA|ZB|ZC" -> "Z", "(A|B|C)"
"ZA|ZB|ZC|Z" -> "Z", "(A|B|C|)"
"CZ|CZ" -> "CZ", "()"
If "suffix" is True, the same result but for suffixes.
'''
if len(self.concs) == 0:
raise Exception("Can't call _commonconc on " + repr(self))
from functools import reduce
return reduce(
lambda x, y: x.common(y, suffix=suffix),
self.concs
) | python | def _commonconc(self, suffix=False):
'''
Find the longest conc which acts as prefix to every conc in this pattern.
This could be the empty string. Return the common prefix along with all
the leftovers after truncating that common prefix from each conc.
"ZA|ZB|ZC" -> "Z", "(A|B|C)"
"ZA|ZB|ZC|Z" -> "Z", "(A|B|C|)"
"CZ|CZ" -> "CZ", "()"
If "suffix" is True, the same result but for suffixes.
'''
if len(self.concs) == 0:
raise Exception("Can't call _commonconc on " + repr(self))
from functools import reduce
return reduce(
lambda x, y: x.common(y, suffix=suffix),
self.concs
) | Find the longest conc which acts as prefix to every conc in this pattern.
This could be the empty string. Return the common prefix along with all
the leftovers after truncating that common prefix from each conc.
"ZA|ZB|ZC" -> "Z", "(A|B|C)"
"ZA|ZB|ZC|Z" -> "Z", "(A|B|C|)"
"CZ|CZ" -> "CZ", "()"
If "suffix" is True, the same result but for suffixes. | https://github.com/qntm/greenery/blob/f4dc7fb483825459d26e8fe9f417764469c56ee8/greenery/lego.py#L1847-L1865 |
iurisilvio/bottle-sqlalchemy | examples/basic.py | delete_name | def delete_name(name):
''' This function don't use the plugin. '''
session = create_session()
try:
user = session.query(User).filter_by(name=name).first()
session.delete(user)
session.commit()
except SQLAlchemyError, e:
session.rollback()
raise bottle.HTTPError(500, "Database Error", e)
finally:
session.close() | python | def delete_name(name):
''' This function don't use the plugin. '''
session = create_session()
try:
user = session.query(User).filter_by(name=name).first()
session.delete(user)
session.commit()
except SQLAlchemyError, e:
session.rollback()
raise bottle.HTTPError(500, "Database Error", e)
finally:
session.close() | This function don't use the plugin. | https://github.com/iurisilvio/bottle-sqlalchemy/blob/e14779472028c26ccde5f765fe4942df8e7314bb/examples/basic.py#L45-L56 |
iurisilvio/bottle-sqlalchemy | bottle_sqlalchemy.py | SQLAlchemyPlugin.setup | def setup(self, app):
''' Make sure that other installed plugins don't affect the same
keyword argument and check if metadata is available.'''
for other in app.plugins:
if not isinstance(other, SQLAlchemyPlugin):
continue
if other.keyword == self.keyword:
raise bottle.PluginError("Found another SQLAlchemy plugin with "\
"conflicting settings (non-unique keyword).")
elif other.name == self.name:
self.name += '_%s' % self.keyword
if self.create and not self.metadata:
raise bottle.PluginError('Define metadata value to create database.') | python | def setup(self, app):
''' Make sure that other installed plugins don't affect the same
keyword argument and check if metadata is available.'''
for other in app.plugins:
if not isinstance(other, SQLAlchemyPlugin):
continue
if other.keyword == self.keyword:
raise bottle.PluginError("Found another SQLAlchemy plugin with "\
"conflicting settings (non-unique keyword).")
elif other.name == self.name:
self.name += '_%s' % self.keyword
if self.create and not self.metadata:
raise bottle.PluginError('Define metadata value to create database.') | Make sure that other installed plugins don't affect the same
keyword argument and check if metadata is available. | https://github.com/iurisilvio/bottle-sqlalchemy/blob/e14779472028c26ccde5f765fe4942df8e7314bb/bottle_sqlalchemy.py#L101-L113 |
tmc/gevent-zeromq | gevent_zeromq/core.py | GreenSocket.send_multipart | def send_multipart(self, *args, **kwargs):
"""wrap send_multipart to prevent state_changed on each partial send"""
self.__in_send_multipart = True
try:
msg = super(GreenSocket, self).send_multipart(*args, **kwargs)
finally:
self.__in_send_multipart = False
self.__state_changed()
return msg | python | def send_multipart(self, *args, **kwargs):
"""wrap send_multipart to prevent state_changed on each partial send"""
self.__in_send_multipart = True
try:
msg = super(GreenSocket, self).send_multipart(*args, **kwargs)
finally:
self.__in_send_multipart = False
self.__state_changed()
return msg | wrap send_multipart to prevent state_changed on each partial send | https://github.com/tmc/gevent-zeromq/blob/b15d50deedda3d2cdb701106d4b315c7a06353e3/gevent_zeromq/core.py#L148-L156 |
tmc/gevent-zeromq | gevent_zeromq/core.py | GreenSocket.recv_multipart | def recv_multipart(self, *args, **kwargs):
"""wrap recv_multipart to prevent state_changed on each partial recv"""
self.__in_recv_multipart = True
try:
msg = super(GreenSocket, self).recv_multipart(*args, **kwargs)
finally:
self.__in_recv_multipart = False
self.__state_changed()
return msg | python | def recv_multipart(self, *args, **kwargs):
"""wrap recv_multipart to prevent state_changed on each partial recv"""
self.__in_recv_multipart = True
try:
msg = super(GreenSocket, self).recv_multipart(*args, **kwargs)
finally:
self.__in_recv_multipart = False
self.__state_changed()
return msg | wrap recv_multipart to prevent state_changed on each partial recv | https://github.com/tmc/gevent-zeromq/blob/b15d50deedda3d2cdb701106d4b315c7a06353e3/gevent_zeromq/core.py#L158-L166 |
konstantinstadler/pymrio | pymrio/core/fileio.py | load_all | def load_all(path, include_core=True, subfolders=None, path_in_arc=None):
""" Loads a full IO system with all extension in path
Parameters
----------
path : pathlib.Path or string
Path or path with para file name for the data to load.
This must either point to the directory containing the uncompressed
data or the location of a compressed zip file with the data. In the
later case and if there are several mrio's in the zip file the
parameter 'path_in_arc' need to be specifiec to further indicate the
location of the data in the compressed file.
include_core : boolean, optional
If False the load method does not include A, L and Z matrix. This
significantly reduces the required memory if the purpose is only
to analyse the results calculated beforehand.
subfolders: list of pathlib.Path or string, optional
By default (subfolders=None), all subfolders in path containing a json
parameter file (as defined in DEFAULT_FILE_NAMES['filepara']:
metadata.json) are parsed. If only a subset should be used, pass a list
of names of subfolders. These can either be strings specifying direct
subfolders of path, or absolute/relative path if the extensions are
stored at a different location. Both modes can be mixed. If the data
is read from a zip archive the path must be given as described below in
'path_in_arc', relative to the root defined in the paramter
'path_in_arc'. Extensions in a different zip archive must be read
separately by calling the function 'load' for this extension.
path_in_arc: string, optional
Path to the data in the zip file (where the fileparameters file is
located). path_in_arc must be given without leading dot and slash;
thus to point to the data in the root of the compressed file pass '',
for data in e.g. the folder 'emissions' pass 'emissions/'. Only used
if parameter 'path' points to an compressed zip file.
Can be None (default) if there is only one mrio database in the
zip archive (thus only one file_parameter file as the systemtype entry
'IOSystem'.
"""
def clean(varStr):
""" get valid python name from folder
"""
return re.sub('\W|^(?=\d)', '_', str(varStr))
path = Path(path)
if zipfile.is_zipfile(str(path)):
with zipfile.ZipFile(file=str(path), mode='r') as zz:
zipcontent = zz.namelist()
if path_in_arc:
path_in_arc = str(path_in_arc)
if path_in_arc not in zipcontent:
path_in_arc = os.path.join(path_in_arc,
DEFAULT_FILE_NAMES['filepara'])
if path_in_arc not in zipcontent:
raise ReadError('File parameter file {} not found in {}. '
'Tip: specify fileparameter filename '
'through "path_in_arc" if different '
'from default.'.format(
DEFAULT_FILE_NAMES['filepara'], path))
else:
with zipfile.ZipFile(file=str(path), mode='r') as zz:
fpfiles = [
f for f in zz.namelist()
if
os.path.basename(f) == DEFAULT_FILE_NAMES['filepara'] and
json.loads(zz.read(f).decode('utf-8')
)['systemtype'] == 'IOSystem']
if len(fpfiles) == 0:
raise ReadError('File parameter file {} not found in {}. '
'Tip: specify fileparameter filename '
'through "path_in_arc" if different '
'from default.'.format(
DEFAULT_FILE_NAMES['filepara'], path))
elif len(fpfiles) > 1:
raise ReadError('Mulitple mrio archives found in {}. '
'Specify one by the '
'parameter "path_in_arc"'.format(path))
else:
path_in_arc = os.path.dirname(fpfiles[0])
logging.debug("Expect file parameter-file at {} in {}".format(
path_in_arc, path))
io = load(path, include_core=include_core, path_in_arc=path_in_arc)
if zipfile.is_zipfile(str(path)):
root_in_zip = os.path.dirname(path_in_arc)
if subfolders is None:
subfolders = {
os.path.relpath(os.path.dirname(p), root_in_zip)
for p in zipcontent
if p.startswith(root_in_zip) and
os.path.dirname(p) != root_in_zip}
for subfolder_name in subfolders:
if subfolder_name not in zipcontent + list({
os.path.dirname(p) for p in zipcontent}):
subfolder_full = os.path.join(root_in_zip, subfolder_name)
else:
subfolder_full = subfolder_name
subfolder_name = os.path.basename(os.path.normpath(subfolder_name))
if subfolder_name not in zipcontent:
subfolder_full_meta = os.path.join(
subfolder_full, DEFAULT_FILE_NAMES['filepara'])
else:
subfolder_full_meta = subfolder_full
if subfolder_full_meta in zipcontent:
ext = load(path,
include_core=include_core,
path_in_arc=subfolder_full_meta)
setattr(io, clean(subfolder_name), ext)
io.meta._add_fileio("Added satellite account "
"from {}".format(subfolder_full))
else:
continue
else:
if subfolders is None:
subfolders = [d for d in path.iterdir() if d.is_dir()]
for subfolder_name in subfolders:
if not os.path.exists(str(subfolder_name)):
subfolder_full = path / subfolder_name
else:
subfolder_full = subfolder_name
subfolder_name = os.path.basename(os.path.normpath(subfolder_name))
if not os.path.isfile(str(subfolder_full)):
subfolder_full_meta = (subfolder_full /
DEFAULT_FILE_NAMES['filepara'])
else:
subfolder_full_meta = subfolder_full
if subfolder_full_meta.exists():
ext = load(subfolder_full, include_core=include_core)
setattr(io, clean(subfolder_name), ext)
io.meta._add_fileio("Added satellite account "
"from {}".format(subfolder_full))
else:
continue
return io | python | def load_all(path, include_core=True, subfolders=None, path_in_arc=None):
""" Loads a full IO system with all extension in path
Parameters
----------
path : pathlib.Path or string
Path or path with para file name for the data to load.
This must either point to the directory containing the uncompressed
data or the location of a compressed zip file with the data. In the
later case and if there are several mrio's in the zip file the
parameter 'path_in_arc' need to be specifiec to further indicate the
location of the data in the compressed file.
include_core : boolean, optional
If False the load method does not include A, L and Z matrix. This
significantly reduces the required memory if the purpose is only
to analyse the results calculated beforehand.
subfolders: list of pathlib.Path or string, optional
By default (subfolders=None), all subfolders in path containing a json
parameter file (as defined in DEFAULT_FILE_NAMES['filepara']:
metadata.json) are parsed. If only a subset should be used, pass a list
of names of subfolders. These can either be strings specifying direct
subfolders of path, or absolute/relative path if the extensions are
stored at a different location. Both modes can be mixed. If the data
is read from a zip archive the path must be given as described below in
'path_in_arc', relative to the root defined in the paramter
'path_in_arc'. Extensions in a different zip archive must be read
separately by calling the function 'load' for this extension.
path_in_arc: string, optional
Path to the data in the zip file (where the fileparameters file is
located). path_in_arc must be given without leading dot and slash;
thus to point to the data in the root of the compressed file pass '',
for data in e.g. the folder 'emissions' pass 'emissions/'. Only used
if parameter 'path' points to an compressed zip file.
Can be None (default) if there is only one mrio database in the
zip archive (thus only one file_parameter file as the systemtype entry
'IOSystem'.
"""
def clean(varStr):
""" get valid python name from folder
"""
return re.sub('\W|^(?=\d)', '_', str(varStr))
path = Path(path)
if zipfile.is_zipfile(str(path)):
with zipfile.ZipFile(file=str(path), mode='r') as zz:
zipcontent = zz.namelist()
if path_in_arc:
path_in_arc = str(path_in_arc)
if path_in_arc not in zipcontent:
path_in_arc = os.path.join(path_in_arc,
DEFAULT_FILE_NAMES['filepara'])
if path_in_arc not in zipcontent:
raise ReadError('File parameter file {} not found in {}. '
'Tip: specify fileparameter filename '
'through "path_in_arc" if different '
'from default.'.format(
DEFAULT_FILE_NAMES['filepara'], path))
else:
with zipfile.ZipFile(file=str(path), mode='r') as zz:
fpfiles = [
f for f in zz.namelist()
if
os.path.basename(f) == DEFAULT_FILE_NAMES['filepara'] and
json.loads(zz.read(f).decode('utf-8')
)['systemtype'] == 'IOSystem']
if len(fpfiles) == 0:
raise ReadError('File parameter file {} not found in {}. '
'Tip: specify fileparameter filename '
'through "path_in_arc" if different '
'from default.'.format(
DEFAULT_FILE_NAMES['filepara'], path))
elif len(fpfiles) > 1:
raise ReadError('Mulitple mrio archives found in {}. '
'Specify one by the '
'parameter "path_in_arc"'.format(path))
else:
path_in_arc = os.path.dirname(fpfiles[0])
logging.debug("Expect file parameter-file at {} in {}".format(
path_in_arc, path))
io = load(path, include_core=include_core, path_in_arc=path_in_arc)
if zipfile.is_zipfile(str(path)):
root_in_zip = os.path.dirname(path_in_arc)
if subfolders is None:
subfolders = {
os.path.relpath(os.path.dirname(p), root_in_zip)
for p in zipcontent
if p.startswith(root_in_zip) and
os.path.dirname(p) != root_in_zip}
for subfolder_name in subfolders:
if subfolder_name not in zipcontent + list({
os.path.dirname(p) for p in zipcontent}):
subfolder_full = os.path.join(root_in_zip, subfolder_name)
else:
subfolder_full = subfolder_name
subfolder_name = os.path.basename(os.path.normpath(subfolder_name))
if subfolder_name not in zipcontent:
subfolder_full_meta = os.path.join(
subfolder_full, DEFAULT_FILE_NAMES['filepara'])
else:
subfolder_full_meta = subfolder_full
if subfolder_full_meta in zipcontent:
ext = load(path,
include_core=include_core,
path_in_arc=subfolder_full_meta)
setattr(io, clean(subfolder_name), ext)
io.meta._add_fileio("Added satellite account "
"from {}".format(subfolder_full))
else:
continue
else:
if subfolders is None:
subfolders = [d for d in path.iterdir() if d.is_dir()]
for subfolder_name in subfolders:
if not os.path.exists(str(subfolder_name)):
subfolder_full = path / subfolder_name
else:
subfolder_full = subfolder_name
subfolder_name = os.path.basename(os.path.normpath(subfolder_name))
if not os.path.isfile(str(subfolder_full)):
subfolder_full_meta = (subfolder_full /
DEFAULT_FILE_NAMES['filepara'])
else:
subfolder_full_meta = subfolder_full
if subfolder_full_meta.exists():
ext = load(subfolder_full, include_core=include_core)
setattr(io, clean(subfolder_name), ext)
io.meta._add_fileio("Added satellite account "
"from {}".format(subfolder_full))
else:
continue
return io | Loads a full IO system with all extension in path
Parameters
----------
path : pathlib.Path or string
Path or path with para file name for the data to load.
This must either point to the directory containing the uncompressed
data or the location of a compressed zip file with the data. In the
later case and if there are several mrio's in the zip file the
parameter 'path_in_arc' need to be specifiec to further indicate the
location of the data in the compressed file.
include_core : boolean, optional
If False the load method does not include A, L and Z matrix. This
significantly reduces the required memory if the purpose is only
to analyse the results calculated beforehand.
subfolders: list of pathlib.Path or string, optional
By default (subfolders=None), all subfolders in path containing a json
parameter file (as defined in DEFAULT_FILE_NAMES['filepara']:
metadata.json) are parsed. If only a subset should be used, pass a list
of names of subfolders. These can either be strings specifying direct
subfolders of path, or absolute/relative path if the extensions are
stored at a different location. Both modes can be mixed. If the data
is read from a zip archive the path must be given as described below in
'path_in_arc', relative to the root defined in the paramter
'path_in_arc'. Extensions in a different zip archive must be read
separately by calling the function 'load' for this extension.
path_in_arc: string, optional
Path to the data in the zip file (where the fileparameters file is
located). path_in_arc must be given without leading dot and slash;
thus to point to the data in the root of the compressed file pass '',
for data in e.g. the folder 'emissions' pass 'emissions/'. Only used
if parameter 'path' points to an compressed zip file.
Can be None (default) if there is only one mrio database in the
zip archive (thus only one file_parameter file as the systemtype entry
'IOSystem'. | https://github.com/konstantinstadler/pymrio/blob/d764aa0dd2150200e867a9713a98ddae203e12d4/pymrio/core/fileio.py#L34-L181 |
konstantinstadler/pymrio | pymrio/core/fileio.py | load | def load(path, include_core=True, path_in_arc=''):
""" Loads a IOSystem or Extension previously saved with pymrio
This function can be used to load a IOSystem or Extension specified in a
metadata file (as defined in DEFAULT_FILE_NAMES['filepara']: metadata.json)
DataFrames (tables) are loaded from text or binary pickle files.
For the latter, the extension .pkl or .pickle is assumed, in all other case
the tables are assumed to be in .txt format.
Parameters
----------
path : pathlib.Path or string
Path or path with para file name for the data to load. This must
either point to the directory containing the uncompressed data or
the location of a compressed zip file with the data. In the
later case the parameter 'path_in_arc' need to be specific to
further indicate the location of the data in the compressed file.
include_core : boolean, optional
If False the load method does not include A, L and Z matrix. This
significantly reduces the required memory if the purpose is only
to analyse the results calculated beforehand.
path_in_arc: string, optional
Path to the data in the zip file (where the fileparameters file is
located). path_in_arc must be given without leading dot and slash;
thus to point to the data in the root of the compressed file pass '',
for data in e.g. the folder 'emissions' pass 'emissions/'. Only used
if parameter 'path' points to an compressed zip file.
Returns
-------
IOSystem or Extension class depending on systemtype in the json file
None in case of errors
"""
path = Path(path)
if not path.exists():
raise ReadError('Given path does not exist')
file_para = get_file_para(path=path, path_in_arc=path_in_arc)
if file_para.content['systemtype'] == GENERIC_NAMES['iosys']:
if zipfile.is_zipfile(str(path)):
ret_system = IOSystem(meta=MRIOMetaData(
location=path,
path_in_arc=os.path.join(file_para.folder,
DEFAULT_FILE_NAMES['metadata'])))
ret_system.meta._add_fileio(
"Loaded IO system from {} - {}".format(path, path_in_arc))
else:
ret_system = IOSystem(meta=MRIOMetaData(
location=path / DEFAULT_FILE_NAMES['metadata']))
ret_system.meta._add_fileio(
"Loaded IO system from {}".format(path))
elif file_para.content['systemtype'] == GENERIC_NAMES['ext']:
ret_system = Extension(file_para.content['name'])
else:
raise ReadError('Type of system no defined in the file parameters')
return None
for key in file_para.content['files']:
if not include_core and key not in ['A', 'L', 'Z']:
continue
file_name = file_para.content['files'][key]['name']
nr_index_col = file_para.content['files'][key]['nr_index_col']
nr_header = file_para.content['files'][key]['nr_header']
_index_col = list(range(int(nr_index_col)))
_header = list(range(int(nr_header)))
_index_col = 0 if _index_col == [0] else _index_col
_header = 0 if _header == [0] else _header
if zipfile.is_zipfile(str(path)):
full_file_name = os.path.join(file_para.folder, file_name)
logging.info('Load data from {}'.format(full_file_name))
with zipfile.ZipFile(file=str(path)) as zf:
if (os.path.splitext(str(full_file_name))[1] == '.pkl' or
os.path.splitext(str(full_file_name))[1] == '.pickle'):
setattr(ret_system, key,
pd.read_pickle(zf.open(full_file_name)))
else:
setattr(ret_system, key,
pd.read_table(zf.open(full_file_name),
index_col=_index_col,
header=_header))
else:
full_file_name = path / file_name
logging.info('Load data from {}'.format(full_file_name))
if (os.path.splitext(str(full_file_name))[1] == '.pkl' or
os.path.splitext(str(full_file_name))[1] == '.pickle'):
setattr(ret_system, key,
pd.read_pickle(full_file_name))
else:
setattr(ret_system, key,
pd.read_table(full_file_name,
index_col=_index_col,
header=_header))
return ret_system | python | def load(path, include_core=True, path_in_arc=''):
""" Loads a IOSystem or Extension previously saved with pymrio
This function can be used to load a IOSystem or Extension specified in a
metadata file (as defined in DEFAULT_FILE_NAMES['filepara']: metadata.json)
DataFrames (tables) are loaded from text or binary pickle files.
For the latter, the extension .pkl or .pickle is assumed, in all other case
the tables are assumed to be in .txt format.
Parameters
----------
path : pathlib.Path or string
Path or path with para file name for the data to load. This must
either point to the directory containing the uncompressed data or
the location of a compressed zip file with the data. In the
later case the parameter 'path_in_arc' need to be specific to
further indicate the location of the data in the compressed file.
include_core : boolean, optional
If False the load method does not include A, L and Z matrix. This
significantly reduces the required memory if the purpose is only
to analyse the results calculated beforehand.
path_in_arc: string, optional
Path to the data in the zip file (where the fileparameters file is
located). path_in_arc must be given without leading dot and slash;
thus to point to the data in the root of the compressed file pass '',
for data in e.g. the folder 'emissions' pass 'emissions/'. Only used
if parameter 'path' points to an compressed zip file.
Returns
-------
IOSystem or Extension class depending on systemtype in the json file
None in case of errors
"""
path = Path(path)
if not path.exists():
raise ReadError('Given path does not exist')
file_para = get_file_para(path=path, path_in_arc=path_in_arc)
if file_para.content['systemtype'] == GENERIC_NAMES['iosys']:
if zipfile.is_zipfile(str(path)):
ret_system = IOSystem(meta=MRIOMetaData(
location=path,
path_in_arc=os.path.join(file_para.folder,
DEFAULT_FILE_NAMES['metadata'])))
ret_system.meta._add_fileio(
"Loaded IO system from {} - {}".format(path, path_in_arc))
else:
ret_system = IOSystem(meta=MRIOMetaData(
location=path / DEFAULT_FILE_NAMES['metadata']))
ret_system.meta._add_fileio(
"Loaded IO system from {}".format(path))
elif file_para.content['systemtype'] == GENERIC_NAMES['ext']:
ret_system = Extension(file_para.content['name'])
else:
raise ReadError('Type of system no defined in the file parameters')
return None
for key in file_para.content['files']:
if not include_core and key not in ['A', 'L', 'Z']:
continue
file_name = file_para.content['files'][key]['name']
nr_index_col = file_para.content['files'][key]['nr_index_col']
nr_header = file_para.content['files'][key]['nr_header']
_index_col = list(range(int(nr_index_col)))
_header = list(range(int(nr_header)))
_index_col = 0 if _index_col == [0] else _index_col
_header = 0 if _header == [0] else _header
if zipfile.is_zipfile(str(path)):
full_file_name = os.path.join(file_para.folder, file_name)
logging.info('Load data from {}'.format(full_file_name))
with zipfile.ZipFile(file=str(path)) as zf:
if (os.path.splitext(str(full_file_name))[1] == '.pkl' or
os.path.splitext(str(full_file_name))[1] == '.pickle'):
setattr(ret_system, key,
pd.read_pickle(zf.open(full_file_name)))
else:
setattr(ret_system, key,
pd.read_table(zf.open(full_file_name),
index_col=_index_col,
header=_header))
else:
full_file_name = path / file_name
logging.info('Load data from {}'.format(full_file_name))
if (os.path.splitext(str(full_file_name))[1] == '.pkl' or
os.path.splitext(str(full_file_name))[1] == '.pickle'):
setattr(ret_system, key,
pd.read_pickle(full_file_name))
else:
setattr(ret_system, key,
pd.read_table(full_file_name,
index_col=_index_col,
header=_header))
return ret_system | Loads a IOSystem or Extension previously saved with pymrio
This function can be used to load a IOSystem or Extension specified in a
metadata file (as defined in DEFAULT_FILE_NAMES['filepara']: metadata.json)
DataFrames (tables) are loaded from text or binary pickle files.
For the latter, the extension .pkl or .pickle is assumed, in all other case
the tables are assumed to be in .txt format.
Parameters
----------
path : pathlib.Path or string
Path or path with para file name for the data to load. This must
either point to the directory containing the uncompressed data or
the location of a compressed zip file with the data. In the
later case the parameter 'path_in_arc' need to be specific to
further indicate the location of the data in the compressed file.
include_core : boolean, optional
If False the load method does not include A, L and Z matrix. This
significantly reduces the required memory if the purpose is only
to analyse the results calculated beforehand.
path_in_arc: string, optional
Path to the data in the zip file (where the fileparameters file is
located). path_in_arc must be given without leading dot and slash;
thus to point to the data in the root of the compressed file pass '',
for data in e.g. the folder 'emissions' pass 'emissions/'. Only used
if parameter 'path' points to an compressed zip file.
Returns
-------
IOSystem or Extension class depending on systemtype in the json file
None in case of errors | https://github.com/konstantinstadler/pymrio/blob/d764aa0dd2150200e867a9713a98ddae203e12d4/pymrio/core/fileio.py#L184-L289 |
konstantinstadler/pymrio | pymrio/core/fileio.py | archive | def archive(source, archive, path_in_arc=None, remove_source=False,
compression=zipfile.ZIP_DEFLATED, compresslevel=-1):
"""Archives a MRIO database as zip file
This function is a wrapper around zipfile.write,
to ease the writing of an archive and removing the source data.
Note
----
In contrast to zipfile.write, this function raises an
error if the data (path + filename) are identical in the zip archive.
Background: the zip standard allows that files with the same name and path
are stored side by side in a zip file. This becomes an issue when unpacking
this files as they overwrite each other upon extraction.
Parameters
----------
source: str or pathlib.Path or list of these
Location of the mrio data (folder).
If not all data should be archived, pass a list of
all files which should be included in the archive (absolute path)
archive: str or pathlib.Path
Full path with filename for the archive.
path_in_arc: string, optional
Path within the archive zip file where data should be stored.
'path_in_arc' must be given without leading dot and slash.
Thus to point to the data in the root of the compressed file pass '',
for data in e.g. the folder 'mrio_v1' pass 'mrio_v1/'.
If None (default) data will be stored in the root of the archive.
remove_source: boolean, optional
If True, deletes the source file from the disk (all files
specified in 'source' or the specified directory, depending if a
list of files or directory was passed). If False, leaves the
original files on disk. Also removes all empty directories
in source including source.
compression: ZIP compression method, optional
This is passed to zipfile.write. By default it is set to ZIP_DEFLATED.
NB: This is different from the zipfile default (ZIP_STORED) which would
not give any compression. See
https://docs.python.org/3/library/zipfile.html#zipfile-objects for
further information. Depending on the value given here additional
modules might be necessary (e.g. zlib for ZIP_DEFLATED). Futher
information on this can also be found in the zipfile python docs.
compresslevel: int, optional
This is passed to zipfile.write and specifies the compression level.
Acceptable values depend on the method specified at the parameter
'compression'. By default, it is set to -1 which gives a compromise
between speed and size for the ZIP_DEFLATED compression (this is
internally interpreted as 6 as described here:
https://docs.python.org/3/library/zlib.html#zlib.compressobj )
NB: This is only used if python version >= 3.7
Raises
------
FileExistsError: In case a file to be archived already present in the
archive.
"""
archive = Path(archive)
if type(source) is not list:
source_root = str(source)
source_files = [f for f in Path(source).glob('**/*') if f.is_file()]
else:
source_root = os.path.commonpath([str(f) for f in source])
source_files = [Path(f) for f in source]
path_in_arc = '' if not path_in_arc else path_in_arc
arc_file_names = {
str(f): os.path.join(path_in_arc, str(f.relative_to(source_root)))
for f in source_files}
if archive.exists():
with zipfile.ZipFile(file=str(archive), mode='r') as zf:
already_present = zf.namelist()
duplicates = {ff: zf for ff, zf in arc_file_names.items()
if zf in already_present}
if duplicates:
raise FileExistsError(
'These files already exists in {arc} for '
'path_in_arc "{pa}":\n {filelist}'.format(
pa=path_in_arc, arc=archive,
filelist='\n '.join(duplicates.values())))
if sys.version_info.major == 3 and sys.version_info.minor >= 7:
zip_open_para = dict(file=str(archive), mode='a',
compression=compression,
compresslevel=compresslevel)
else:
zip_open_para = dict(file=str(archive), mode='a',
compression=compression)
with zipfile.ZipFile(**zip_open_para) as zz:
for fullpath, zippath in arc_file_names.items():
zz.write(str(fullpath), str(zippath))
if remove_source:
for f in source_files:
os.remove(str(f))
for root, dirs, files in os.walk(source_root, topdown=False):
for name in dirs:
dir_path = os.path.join(root, name)
if not os.listdir(dir_path):
os.rmdir(os.path.join(root, name))
try:
os.rmdir(source_root)
except OSError:
pass | python | def archive(source, archive, path_in_arc=None, remove_source=False,
compression=zipfile.ZIP_DEFLATED, compresslevel=-1):
"""Archives a MRIO database as zip file
This function is a wrapper around zipfile.write,
to ease the writing of an archive and removing the source data.
Note
----
In contrast to zipfile.write, this function raises an
error if the data (path + filename) are identical in the zip archive.
Background: the zip standard allows that files with the same name and path
are stored side by side in a zip file. This becomes an issue when unpacking
this files as they overwrite each other upon extraction.
Parameters
----------
source: str or pathlib.Path or list of these
Location of the mrio data (folder).
If not all data should be archived, pass a list of
all files which should be included in the archive (absolute path)
archive: str or pathlib.Path
Full path with filename for the archive.
path_in_arc: string, optional
Path within the archive zip file where data should be stored.
'path_in_arc' must be given without leading dot and slash.
Thus to point to the data in the root of the compressed file pass '',
for data in e.g. the folder 'mrio_v1' pass 'mrio_v1/'.
If None (default) data will be stored in the root of the archive.
remove_source: boolean, optional
If True, deletes the source file from the disk (all files
specified in 'source' or the specified directory, depending if a
list of files or directory was passed). If False, leaves the
original files on disk. Also removes all empty directories
in source including source.
compression: ZIP compression method, optional
This is passed to zipfile.write. By default it is set to ZIP_DEFLATED.
NB: This is different from the zipfile default (ZIP_STORED) which would
not give any compression. See
https://docs.python.org/3/library/zipfile.html#zipfile-objects for
further information. Depending on the value given here additional
modules might be necessary (e.g. zlib for ZIP_DEFLATED). Futher
information on this can also be found in the zipfile python docs.
compresslevel: int, optional
This is passed to zipfile.write and specifies the compression level.
Acceptable values depend on the method specified at the parameter
'compression'. By default, it is set to -1 which gives a compromise
between speed and size for the ZIP_DEFLATED compression (this is
internally interpreted as 6 as described here:
https://docs.python.org/3/library/zlib.html#zlib.compressobj )
NB: This is only used if python version >= 3.7
Raises
------
FileExistsError: In case a file to be archived already present in the
archive.
"""
archive = Path(archive)
if type(source) is not list:
source_root = str(source)
source_files = [f for f in Path(source).glob('**/*') if f.is_file()]
else:
source_root = os.path.commonpath([str(f) for f in source])
source_files = [Path(f) for f in source]
path_in_arc = '' if not path_in_arc else path_in_arc
arc_file_names = {
str(f): os.path.join(path_in_arc, str(f.relative_to(source_root)))
for f in source_files}
if archive.exists():
with zipfile.ZipFile(file=str(archive), mode='r') as zf:
already_present = zf.namelist()
duplicates = {ff: zf for ff, zf in arc_file_names.items()
if zf in already_present}
if duplicates:
raise FileExistsError(
'These files already exists in {arc} for '
'path_in_arc "{pa}":\n {filelist}'.format(
pa=path_in_arc, arc=archive,
filelist='\n '.join(duplicates.values())))
if sys.version_info.major == 3 and sys.version_info.minor >= 7:
zip_open_para = dict(file=str(archive), mode='a',
compression=compression,
compresslevel=compresslevel)
else:
zip_open_para = dict(file=str(archive), mode='a',
compression=compression)
with zipfile.ZipFile(**zip_open_para) as zz:
for fullpath, zippath in arc_file_names.items():
zz.write(str(fullpath), str(zippath))
if remove_source:
for f in source_files:
os.remove(str(f))
for root, dirs, files in os.walk(source_root, topdown=False):
for name in dirs:
dir_path = os.path.join(root, name)
if not os.listdir(dir_path):
os.rmdir(os.path.join(root, name))
try:
os.rmdir(source_root)
except OSError:
pass | Archives a MRIO database as zip file
This function is a wrapper around zipfile.write,
to ease the writing of an archive and removing the source data.
Note
----
In contrast to zipfile.write, this function raises an
error if the data (path + filename) are identical in the zip archive.
Background: the zip standard allows that files with the same name and path
are stored side by side in a zip file. This becomes an issue when unpacking
this files as they overwrite each other upon extraction.
Parameters
----------
source: str or pathlib.Path or list of these
Location of the mrio data (folder).
If not all data should be archived, pass a list of
all files which should be included in the archive (absolute path)
archive: str or pathlib.Path
Full path with filename for the archive.
path_in_arc: string, optional
Path within the archive zip file where data should be stored.
'path_in_arc' must be given without leading dot and slash.
Thus to point to the data in the root of the compressed file pass '',
for data in e.g. the folder 'mrio_v1' pass 'mrio_v1/'.
If None (default) data will be stored in the root of the archive.
remove_source: boolean, optional
If True, deletes the source file from the disk (all files
specified in 'source' or the specified directory, depending if a
list of files or directory was passed). If False, leaves the
original files on disk. Also removes all empty directories
in source including source.
compression: ZIP compression method, optional
This is passed to zipfile.write. By default it is set to ZIP_DEFLATED.
NB: This is different from the zipfile default (ZIP_STORED) which would
not give any compression. See
https://docs.python.org/3/library/zipfile.html#zipfile-objects for
further information. Depending on the value given here additional
modules might be necessary (e.g. zlib for ZIP_DEFLATED). Futher
information on this can also be found in the zipfile python docs.
compresslevel: int, optional
This is passed to zipfile.write and specifies the compression level.
Acceptable values depend on the method specified at the parameter
'compression'. By default, it is set to -1 which gives a compromise
between speed and size for the ZIP_DEFLATED compression (this is
internally interpreted as 6 as described here:
https://docs.python.org/3/library/zlib.html#zlib.compressobj )
NB: This is only used if python version >= 3.7
Raises
------
FileExistsError: In case a file to be archived already present in the
archive. | https://github.com/konstantinstadler/pymrio/blob/d764aa0dd2150200e867a9713a98ddae203e12d4/pymrio/core/fileio.py#L292-L408 |
konstantinstadler/pymrio | pymrio/core/fileio.py | _load_ini_based_io | def _load_ini_based_io(path, recursive=False, ini=None,
subini={}, include_core=True,
only_coefficients=False):
""" DEPRECATED: For convert a previous version to the new json format
Loads a IOSystem or Extension from a ini files
This function can be used to load a IOSystem or Extension specified in a
ini file. DataFrames (tables) are loaded from text or binary pickle files.
For the latter, the extension .pkl or .pickle is assumed, in all other case
the tables are assumed to be in .txt format.
Parameters
----------
path : string
path or ini file name for the data to load
recursive : boolean, optional
If True, load also the data in the subfolders and add them as
extensions to the IOSystem (in that case path must point to the root).
Only first order subfolders are considered (no subfolders in
subfolders) and if a folder does not contain a ini file it's skipped.
Use the subini parameter in case of multiple ini files in a subfolder.
Attribute name of the extension in the IOSystem are based on the
subfolder name. Default is False
ini : string, optional
If there are several ini files in the root folder, take this one for
loading the data If None (default) take the ini found in the folder,
error if several are found
subini : dict, optional
If there are multiple ini in the subfolder, use the ini given in the
dict. Format: 'subfoldername':'ininame' If a key for a subfolder is
not found or None (default), the ini found in the folder will be taken,
error if several are found
include_core : boolean, optional
If False the load method does not include A, L and Z matrix. This
significantly reduces the required memory if the purpose is only
to analyse the results calculated beforehand.
Returns
-------
IOSystem or Extension class depending on systemtype in the ini file
None in case of errors
"""
# check path and given parameter
ini_file_name = None
path = os.path.abspath(os.path.normpath(path))
if os.path.splitext(path)[1] == '.ini':
(path, ini_file_name) = os.path.split(path)
if ini:
ini_file_name = ini
if not os.path.exists(path):
raise ReadError('Given path does not exist')
return None
if not ini_file_name:
_inifound = False
for file in os.listdir(path):
if os.path.splitext(file)[1] == '.ini':
if _inifound:
raise ReadError(
'Found multiple ini files in folder - specify one')
return None
ini_file_name = file
_inifound = True
# read the ini
io_ini = configparser.RawConfigParser()
io_ini.optionxform = lambda option: option
io_ini.read(os.path.join(path, ini_file_name))
systemtype = io_ini.get('systemtype', 'systemtype', fallback=None)
name = io_ini.get('meta', 'name',
fallback=os.path.splitext(ini_file_name)[0])
if systemtype == 'IOSystem':
ret_system = IOSystem(name=name)
elif systemtype == 'Extension':
ret_system = Extension(name=name)
else:
raise ReadError('System not defined in ini')
return None
for key in io_ini['meta']:
setattr(ret_system, key, io_ini.get('meta', key, fallback=None))
for key in io_ini['files']:
if '_nr_index_col' in key:
continue
if '_nr_header' in key:
continue
if not include_core:
not_to_load = ['A', 'L', 'Z']
if key in not_to_load:
continue
if only_coefficients:
_io = IOSystem()
if key not in _io.__coefficients__ + ['unit']:
continue
file_name = io_ini.get('files', key)
nr_index_col = io_ini.get(
'files', key + '_nr_index_col', fallback=None)
nr_header = io_ini.get('files', key + '_nr_header', fallback=None)
if (nr_index_col is None) or (nr_header is None):
raise ReadError(
'Index or column specification missing for {}'.
format(str(file_name)))
return None
_index_col = list(range(int(nr_index_col)))
_header = list(range(int(nr_header)))
if _index_col == [0]:
_index_col = 0
if _header == [0]:
_header = 0
file = os.path.join(path, file_name)
logging.info('Load data from {}'.format(file))
if (os.path.splitext(file)[1] == '.pkl' or
os.path.splitext(file)[1] == '.pickle'):
setattr(ret_system, key,
pd.read_pickle(file))
else:
setattr(ret_system, key,
pd.read_table(file,
index_col=_index_col,
header=_header))
if recursive:
# look for subfolder in the given path
subfolder_list = os.walk(path).__next__()[1]
# loop all subfolder and append extension based on
# ini file in subfolder
for subfolder in subfolder_list:
subini_file_name = subini.get(subfolder)
subpath = os.path.abspath(os.path.join(path, subfolder))
if not subini_file_name:
_inifound = False
for file in os.listdir(subpath):
if os.path.splitext(file)[1] == '.ini':
if _inifound:
raise ReadError(
'Found multiple ini files in subfolder '
'{} - specify one'.format(subpath))
return None
subini_file_name = file
_inifound = True
if not _inifound:
continue
# read the ini
subio_ini = configparser.RawConfigParser()
subio_ini.optionxform = lambda option: option
subio_ini.read(os.path.join(subpath, subini_file_name))
systemtype = subio_ini.get('systemtype', 'systemtype',
fallback=None)
name = subio_ini.get('meta', 'name',
fallback=os.path.splitext(
subini_file_name)[0])
if systemtype == 'IOSystem':
raise ReadError('IOSystem found in subfolder {} - '
'only extensions expected'.format(subpath))
return None
elif systemtype == 'Extension':
sub_system = Extension(name=name)
else:
raise ReadError('System not defined in ini')
return None
for key in subio_ini['meta']:
setattr(sub_system, key, subio_ini.get('meta', key,
fallback=None))
for key in subio_ini['files']:
if '_nr_index_col' in key:
continue
if '_nr_header' in key:
continue
if only_coefficients:
_ext = Extension('temp')
if key not in _ext.__coefficients__ + ['unit']:
continue
file_name = subio_ini.get('files', key)
nr_index_col = subio_ini.get('files', key + '_nr_index_col',
fallback=None)
nr_header = subio_ini.get('files', key + '_nr_header',
fallback=None)
if (nr_index_col is None) or (nr_header is None):
raise ReadError('Index or column specification missing '
'for {}'.format(str(file_name)))
return None
_index_col = list(range(int(nr_index_col)))
_header = list(range(int(nr_header)))
if _index_col == [0]:
_index_col = 0
if _header == [0]:
_header = 0
file = os.path.join(subpath, file_name)
logging.info('Load data from {}'.format(file))
if (os.path.splitext(file)[1] == '.pkl' or
os.path.splitext(file)[1] == '.pickle'):
setattr(sub_system, key,
pd.read_pickle(file))
else:
setattr(sub_system, key,
pd.read_table(file,
index_col=_index_col,
header=_header))
# get valid python name from folder
def clean(varStr):
return re.sub('\W|^(?=\d)', '_', str(varStr))
setattr(ret_system, clean(subfolder), sub_system)
return ret_system | python | def _load_ini_based_io(path, recursive=False, ini=None,
subini={}, include_core=True,
only_coefficients=False):
""" DEPRECATED: For convert a previous version to the new json format
Loads a IOSystem or Extension from a ini files
This function can be used to load a IOSystem or Extension specified in a
ini file. DataFrames (tables) are loaded from text or binary pickle files.
For the latter, the extension .pkl or .pickle is assumed, in all other case
the tables are assumed to be in .txt format.
Parameters
----------
path : string
path or ini file name for the data to load
recursive : boolean, optional
If True, load also the data in the subfolders and add them as
extensions to the IOSystem (in that case path must point to the root).
Only first order subfolders are considered (no subfolders in
subfolders) and if a folder does not contain a ini file it's skipped.
Use the subini parameter in case of multiple ini files in a subfolder.
Attribute name of the extension in the IOSystem are based on the
subfolder name. Default is False
ini : string, optional
If there are several ini files in the root folder, take this one for
loading the data If None (default) take the ini found in the folder,
error if several are found
subini : dict, optional
If there are multiple ini in the subfolder, use the ini given in the
dict. Format: 'subfoldername':'ininame' If a key for a subfolder is
not found or None (default), the ini found in the folder will be taken,
error if several are found
include_core : boolean, optional
If False the load method does not include A, L and Z matrix. This
significantly reduces the required memory if the purpose is only
to analyse the results calculated beforehand.
Returns
-------
IOSystem or Extension class depending on systemtype in the ini file
None in case of errors
"""
# check path and given parameter
ini_file_name = None
path = os.path.abspath(os.path.normpath(path))
if os.path.splitext(path)[1] == '.ini':
(path, ini_file_name) = os.path.split(path)
if ini:
ini_file_name = ini
if not os.path.exists(path):
raise ReadError('Given path does not exist')
return None
if not ini_file_name:
_inifound = False
for file in os.listdir(path):
if os.path.splitext(file)[1] == '.ini':
if _inifound:
raise ReadError(
'Found multiple ini files in folder - specify one')
return None
ini_file_name = file
_inifound = True
# read the ini
io_ini = configparser.RawConfigParser()
io_ini.optionxform = lambda option: option
io_ini.read(os.path.join(path, ini_file_name))
systemtype = io_ini.get('systemtype', 'systemtype', fallback=None)
name = io_ini.get('meta', 'name',
fallback=os.path.splitext(ini_file_name)[0])
if systemtype == 'IOSystem':
ret_system = IOSystem(name=name)
elif systemtype == 'Extension':
ret_system = Extension(name=name)
else:
raise ReadError('System not defined in ini')
return None
for key in io_ini['meta']:
setattr(ret_system, key, io_ini.get('meta', key, fallback=None))
for key in io_ini['files']:
if '_nr_index_col' in key:
continue
if '_nr_header' in key:
continue
if not include_core:
not_to_load = ['A', 'L', 'Z']
if key in not_to_load:
continue
if only_coefficients:
_io = IOSystem()
if key not in _io.__coefficients__ + ['unit']:
continue
file_name = io_ini.get('files', key)
nr_index_col = io_ini.get(
'files', key + '_nr_index_col', fallback=None)
nr_header = io_ini.get('files', key + '_nr_header', fallback=None)
if (nr_index_col is None) or (nr_header is None):
raise ReadError(
'Index or column specification missing for {}'.
format(str(file_name)))
return None
_index_col = list(range(int(nr_index_col)))
_header = list(range(int(nr_header)))
if _index_col == [0]:
_index_col = 0
if _header == [0]:
_header = 0
file = os.path.join(path, file_name)
logging.info('Load data from {}'.format(file))
if (os.path.splitext(file)[1] == '.pkl' or
os.path.splitext(file)[1] == '.pickle'):
setattr(ret_system, key,
pd.read_pickle(file))
else:
setattr(ret_system, key,
pd.read_table(file,
index_col=_index_col,
header=_header))
if recursive:
# look for subfolder in the given path
subfolder_list = os.walk(path).__next__()[1]
# loop all subfolder and append extension based on
# ini file in subfolder
for subfolder in subfolder_list:
subini_file_name = subini.get(subfolder)
subpath = os.path.abspath(os.path.join(path, subfolder))
if not subini_file_name:
_inifound = False
for file in os.listdir(subpath):
if os.path.splitext(file)[1] == '.ini':
if _inifound:
raise ReadError(
'Found multiple ini files in subfolder '
'{} - specify one'.format(subpath))
return None
subini_file_name = file
_inifound = True
if not _inifound:
continue
# read the ini
subio_ini = configparser.RawConfigParser()
subio_ini.optionxform = lambda option: option
subio_ini.read(os.path.join(subpath, subini_file_name))
systemtype = subio_ini.get('systemtype', 'systemtype',
fallback=None)
name = subio_ini.get('meta', 'name',
fallback=os.path.splitext(
subini_file_name)[0])
if systemtype == 'IOSystem':
raise ReadError('IOSystem found in subfolder {} - '
'only extensions expected'.format(subpath))
return None
elif systemtype == 'Extension':
sub_system = Extension(name=name)
else:
raise ReadError('System not defined in ini')
return None
for key in subio_ini['meta']:
setattr(sub_system, key, subio_ini.get('meta', key,
fallback=None))
for key in subio_ini['files']:
if '_nr_index_col' in key:
continue
if '_nr_header' in key:
continue
if only_coefficients:
_ext = Extension('temp')
if key not in _ext.__coefficients__ + ['unit']:
continue
file_name = subio_ini.get('files', key)
nr_index_col = subio_ini.get('files', key + '_nr_index_col',
fallback=None)
nr_header = subio_ini.get('files', key + '_nr_header',
fallback=None)
if (nr_index_col is None) or (nr_header is None):
raise ReadError('Index or column specification missing '
'for {}'.format(str(file_name)))
return None
_index_col = list(range(int(nr_index_col)))
_header = list(range(int(nr_header)))
if _index_col == [0]:
_index_col = 0
if _header == [0]:
_header = 0
file = os.path.join(subpath, file_name)
logging.info('Load data from {}'.format(file))
if (os.path.splitext(file)[1] == '.pkl' or
os.path.splitext(file)[1] == '.pickle'):
setattr(sub_system, key,
pd.read_pickle(file))
else:
setattr(sub_system, key,
pd.read_table(file,
index_col=_index_col,
header=_header))
# get valid python name from folder
def clean(varStr):
return re.sub('\W|^(?=\d)', '_', str(varStr))
setattr(ret_system, clean(subfolder), sub_system)
return ret_system | DEPRECATED: For convert a previous version to the new json format
Loads a IOSystem or Extension from a ini files
This function can be used to load a IOSystem or Extension specified in a
ini file. DataFrames (tables) are loaded from text or binary pickle files.
For the latter, the extension .pkl or .pickle is assumed, in all other case
the tables are assumed to be in .txt format.
Parameters
----------
path : string
path or ini file name for the data to load
recursive : boolean, optional
If True, load also the data in the subfolders and add them as
extensions to the IOSystem (in that case path must point to the root).
Only first order subfolders are considered (no subfolders in
subfolders) and if a folder does not contain a ini file it's skipped.
Use the subini parameter in case of multiple ini files in a subfolder.
Attribute name of the extension in the IOSystem are based on the
subfolder name. Default is False
ini : string, optional
If there are several ini files in the root folder, take this one for
loading the data If None (default) take the ini found in the folder,
error if several are found
subini : dict, optional
If there are multiple ini in the subfolder, use the ini given in the
dict. Format: 'subfoldername':'ininame' If a key for a subfolder is
not found or None (default), the ini found in the folder will be taken,
error if several are found
include_core : boolean, optional
If False the load method does not include A, L and Z matrix. This
significantly reduces the required memory if the purpose is only
to analyse the results calculated beforehand.
Returns
-------
IOSystem or Extension class depending on systemtype in the ini file
None in case of errors | https://github.com/konstantinstadler/pymrio/blob/d764aa0dd2150200e867a9713a98ddae203e12d4/pymrio/core/fileio.py#L434-L676 |
konstantinstadler/pymrio | pymrio/tools/ioparser.py | parse_exio12_ext | def parse_exio12_ext(ext_file, index_col, name, drop_compartment=True,
version=None, year=None, iosystem=None, sep=','):
""" Parse an EXIOBASE version 1 or 2 like extension file into pymrio.Extension
EXIOBASE like extensions files are assumed to have two
rows which are used as columns multiindex (region and sector)
and up to three columns for the row index (see Parameters).
For EXIOBASE 3 - extension can be loaded directly with pymrio.load
Notes
-----
So far this only parses factor of production extensions F (not
final demand extensions FY nor coeffiecents S).
Parameters
----------
ext_file : string or pathlib.Path
File to parse
index_col : int
The number of columns (1 to 3) at the beginning of the file
to use as the index. The order of the index_col must be
- 1 index column: ['stressor']
- 2 index columns: ['stressor', 'unit']
- 3 index columns: ['stressor', 'compartment', 'unit']
- > 3: everything up to three index columns will be removed
name : string
Name of the extension
drop_compartment : boolean, optional
If True (default) removes the compartment from the index.
version : string, optional
see pymrio.Extension
iosystem : string, optional
see pymrio.Extension
year : string or int
see pymrio.Extension
sep : string, optional
Delimiter to use; default ','
Returns
-------
pymrio.Extension
with F (and unit if available)
"""
ext_file = os.path.abspath(str(ext_file))
F = pd.read_table(
ext_file,
header=[0, 1],
index_col=list(range(index_col)),
sep=sep)
F.columns.names = ['region', 'sector']
if index_col == 1:
F.index.names = ['stressor']
elif index_col == 2:
F.index.names = ['stressor', 'unit']
elif index_col == 3:
F.index.names = ['stressor', 'compartment', 'unit']
else:
F.reset_index(level=list(range(3, index_col)),
drop=True,
inplace=True)
F.index.names = ['stressor', 'compartment', 'unit']
unit = None
if index_col > 1:
unit = pd.DataFrame(F.iloc[:, 0].
reset_index(level='unit').unit)
F.reset_index(level='unit', drop=True, inplace=True)
if drop_compartment:
F.reset_index(level='compartment',
drop=True, inplace=True)
unit.reset_index(level='compartment',
drop=True, inplace=True)
return Extension(name=name,
F=F,
unit=unit,
iosystem=iosystem,
version=version,
year=year,
) | python | def parse_exio12_ext(ext_file, index_col, name, drop_compartment=True,
version=None, year=None, iosystem=None, sep=','):
""" Parse an EXIOBASE version 1 or 2 like extension file into pymrio.Extension
EXIOBASE like extensions files are assumed to have two
rows which are used as columns multiindex (region and sector)
and up to three columns for the row index (see Parameters).
For EXIOBASE 3 - extension can be loaded directly with pymrio.load
Notes
-----
So far this only parses factor of production extensions F (not
final demand extensions FY nor coeffiecents S).
Parameters
----------
ext_file : string or pathlib.Path
File to parse
index_col : int
The number of columns (1 to 3) at the beginning of the file
to use as the index. The order of the index_col must be
- 1 index column: ['stressor']
- 2 index columns: ['stressor', 'unit']
- 3 index columns: ['stressor', 'compartment', 'unit']
- > 3: everything up to three index columns will be removed
name : string
Name of the extension
drop_compartment : boolean, optional
If True (default) removes the compartment from the index.
version : string, optional
see pymrio.Extension
iosystem : string, optional
see pymrio.Extension
year : string or int
see pymrio.Extension
sep : string, optional
Delimiter to use; default ','
Returns
-------
pymrio.Extension
with F (and unit if available)
"""
ext_file = os.path.abspath(str(ext_file))
F = pd.read_table(
ext_file,
header=[0, 1],
index_col=list(range(index_col)),
sep=sep)
F.columns.names = ['region', 'sector']
if index_col == 1:
F.index.names = ['stressor']
elif index_col == 2:
F.index.names = ['stressor', 'unit']
elif index_col == 3:
F.index.names = ['stressor', 'compartment', 'unit']
else:
F.reset_index(level=list(range(3, index_col)),
drop=True,
inplace=True)
F.index.names = ['stressor', 'compartment', 'unit']
unit = None
if index_col > 1:
unit = pd.DataFrame(F.iloc[:, 0].
reset_index(level='unit').unit)
F.reset_index(level='unit', drop=True, inplace=True)
if drop_compartment:
F.reset_index(level='compartment',
drop=True, inplace=True)
unit.reset_index(level='compartment',
drop=True, inplace=True)
return Extension(name=name,
F=F,
unit=unit,
iosystem=iosystem,
version=version,
year=year,
) | Parse an EXIOBASE version 1 or 2 like extension file into pymrio.Extension
EXIOBASE like extensions files are assumed to have two
rows which are used as columns multiindex (region and sector)
and up to three columns for the row index (see Parameters).
For EXIOBASE 3 - extension can be loaded directly with pymrio.load
Notes
-----
So far this only parses factor of production extensions F (not
final demand extensions FY nor coeffiecents S).
Parameters
----------
ext_file : string or pathlib.Path
File to parse
index_col : int
The number of columns (1 to 3) at the beginning of the file
to use as the index. The order of the index_col must be
- 1 index column: ['stressor']
- 2 index columns: ['stressor', 'unit']
- 3 index columns: ['stressor', 'compartment', 'unit']
- > 3: everything up to three index columns will be removed
name : string
Name of the extension
drop_compartment : boolean, optional
If True (default) removes the compartment from the index.
version : string, optional
see pymrio.Extension
iosystem : string, optional
see pymrio.Extension
year : string or int
see pymrio.Extension
sep : string, optional
Delimiter to use; default ','
Returns
-------
pymrio.Extension
with F (and unit if available) | https://github.com/konstantinstadler/pymrio/blob/d764aa0dd2150200e867a9713a98ddae203e12d4/pymrio/tools/ioparser.py#L66-L163 |
konstantinstadler/pymrio | pymrio/tools/ioparser.py | get_exiobase12_version | def get_exiobase12_version(filename):
""" Returns the EXIOBASE version for the given filename,
None if not found
"""
try:
ver_match = re.search(r'(\d+\w*(\.|\-|\_))*\d+\w*', filename)
version = ver_match.string[ver_match.start():ver_match.end()]
if re.search('\_\d\d\d\d', version[-5:]):
version = version[:-5]
except AttributeError:
version = None
return version | python | def get_exiobase12_version(filename):
""" Returns the EXIOBASE version for the given filename,
None if not found
"""
try:
ver_match = re.search(r'(\d+\w*(\.|\-|\_))*\d+\w*', filename)
version = ver_match.string[ver_match.start():ver_match.end()]
if re.search('\_\d\d\d\d', version[-5:]):
version = version[:-5]
except AttributeError:
version = None
return version | Returns the EXIOBASE version for the given filename,
None if not found | https://github.com/konstantinstadler/pymrio/blob/d764aa0dd2150200e867a9713a98ddae203e12d4/pymrio/tools/ioparser.py#L166-L178 |
konstantinstadler/pymrio | pymrio/tools/ioparser.py | get_exiobase_files | def get_exiobase_files(path, coefficients=True):
""" Gets the EXIOBASE files in path (which can be a zip file)
Parameters
----------
path: str or pathlib.Path
Path to exiobase files or zip file
coefficients: boolean, optional
If True (default), considers the mrIot file as A matrix,
and the extensions as S matrices. Otherwise as Z and F, respectively
Returns
-------
dict of dict
"""
path = os.path.normpath(str(path))
if coefficients:
exio_core_regex = dict(
# don’t match file if starting with _
A=re.compile('(?<!\_)mrIot.*txt'),
Y=re.compile('(?<!\_)mrFinalDemand.*txt'),
S_factor_inputs=re.compile('(?<!\_)mrFactorInputs.*txt'),
S_emissions=re.compile('(?<!\_)mrEmissions.*txt'),
S_materials=re.compile('(?<!\_)mrMaterials.*txt'),
S_resources=re.compile('(?<!\_)mrResources.*txt'),
FY_resources=re.compile('(?<!\_)mrFDResources.*txt'),
FY_emissions=re.compile('(?<!\_)mrFDEmissions.*txt'),
FY_materials=re.compile('(?<!\_)mrFDMaterials.*txt'),
)
else:
exio_core_regex = dict(
# don’t match file if starting with _
Z=re.compile('(?<!\_)mrIot.*txt'),
Y=re.compile('(?<!\_)mrFinalDemand.*txt'),
F_fac=re.compile('(?<!\_)mrFactorInputs.*txt'),
F_emissions=re.compile('(?<!\_)mrEmissions.*txt'),
F_materials=re.compile('(?<!\_)mrMaterials.*txt'),
F_resources=re.compile('(?<!\_)mrResources.*txt'),
FY_emissions=re.compile('(?<!\_)mrFDEmissions.*txt'),
FY_materials=re.compile('(?<!\_)mrFDMaterials.*txt'),
)
repo_content = get_repo_content(path)
exio_files = dict()
for kk, vv in exio_core_regex.items():
found_file = [vv.search(ff).string for ff in repo_content.filelist
if vv.search(ff)]
if len(found_file) > 1:
logging.warning(
"Multiple files found for {}: {}"
" - USING THE FIRST ONE".format(kk, found_file))
found_file = found_file[0:1]
elif len(found_file) == 0:
continue
else:
if repo_content.iszip:
format_para = sniff_csv_format(found_file[0],
zip_file=path)
else:
format_para = sniff_csv_format(os.path.join(path,
found_file[0]))
exio_files[kk] = dict(
root_repo=path,
file_path=found_file[0],
version=get_exiobase12_version(
os.path.basename(found_file[0])),
index_rows=format_para['nr_header_row'],
index_col=format_para['nr_index_col'],
unit_col=format_para['nr_index_col'] - 1,
sep=format_para['sep'])
return exio_files | python | def get_exiobase_files(path, coefficients=True):
""" Gets the EXIOBASE files in path (which can be a zip file)
Parameters
----------
path: str or pathlib.Path
Path to exiobase files or zip file
coefficients: boolean, optional
If True (default), considers the mrIot file as A matrix,
and the extensions as S matrices. Otherwise as Z and F, respectively
Returns
-------
dict of dict
"""
path = os.path.normpath(str(path))
if coefficients:
exio_core_regex = dict(
# don’t match file if starting with _
A=re.compile('(?<!\_)mrIot.*txt'),
Y=re.compile('(?<!\_)mrFinalDemand.*txt'),
S_factor_inputs=re.compile('(?<!\_)mrFactorInputs.*txt'),
S_emissions=re.compile('(?<!\_)mrEmissions.*txt'),
S_materials=re.compile('(?<!\_)mrMaterials.*txt'),
S_resources=re.compile('(?<!\_)mrResources.*txt'),
FY_resources=re.compile('(?<!\_)mrFDResources.*txt'),
FY_emissions=re.compile('(?<!\_)mrFDEmissions.*txt'),
FY_materials=re.compile('(?<!\_)mrFDMaterials.*txt'),
)
else:
exio_core_regex = dict(
# don’t match file if starting with _
Z=re.compile('(?<!\_)mrIot.*txt'),
Y=re.compile('(?<!\_)mrFinalDemand.*txt'),
F_fac=re.compile('(?<!\_)mrFactorInputs.*txt'),
F_emissions=re.compile('(?<!\_)mrEmissions.*txt'),
F_materials=re.compile('(?<!\_)mrMaterials.*txt'),
F_resources=re.compile('(?<!\_)mrResources.*txt'),
FY_emissions=re.compile('(?<!\_)mrFDEmissions.*txt'),
FY_materials=re.compile('(?<!\_)mrFDMaterials.*txt'),
)
repo_content = get_repo_content(path)
exio_files = dict()
for kk, vv in exio_core_regex.items():
found_file = [vv.search(ff).string for ff in repo_content.filelist
if vv.search(ff)]
if len(found_file) > 1:
logging.warning(
"Multiple files found for {}: {}"
" - USING THE FIRST ONE".format(kk, found_file))
found_file = found_file[0:1]
elif len(found_file) == 0:
continue
else:
if repo_content.iszip:
format_para = sniff_csv_format(found_file[0],
zip_file=path)
else:
format_para = sniff_csv_format(os.path.join(path,
found_file[0]))
exio_files[kk] = dict(
root_repo=path,
file_path=found_file[0],
version=get_exiobase12_version(
os.path.basename(found_file[0])),
index_rows=format_para['nr_header_row'],
index_col=format_para['nr_index_col'],
unit_col=format_para['nr_index_col'] - 1,
sep=format_para['sep'])
return exio_files | Gets the EXIOBASE files in path (which can be a zip file)
Parameters
----------
path: str or pathlib.Path
Path to exiobase files or zip file
coefficients: boolean, optional
If True (default), considers the mrIot file as A matrix,
and the extensions as S matrices. Otherwise as Z and F, respectively
Returns
-------
dict of dict | https://github.com/konstantinstadler/pymrio/blob/d764aa0dd2150200e867a9713a98ddae203e12d4/pymrio/tools/ioparser.py#L181-L253 |
konstantinstadler/pymrio | pymrio/tools/ioparser.py | generic_exiobase12_parser | def generic_exiobase12_parser(exio_files, system=None):
""" Generic EXIOBASE version 1 and 2 parser
This is used internally by parse_exiobase1 / 2 functions to
parse exiobase files. In most cases, these top-level functions
should just work, but in case of archived exiobase versions
it might be necessary to use low-level function here.
Parameters
----------
exio_files: dict of dict
system: str (pxp or ixi)
Only used for the metadata
"""
version = ' & '.join({dd.get('version', '')
for dd in exio_files.values()
if dd.get('version', '')})
meta_rec = MRIOMetaData(system=system,
name="EXIOBASE",
version=version)
if len(version) == 0:
meta_rec.note("No version information found, assuming exiobase 1")
meta_rec.change_meta('version', 1)
version = '1'
core_components = ['A', 'Y', 'Z']
core_data = dict()
ext_data = dict()
for tt, tpara in exio_files.items():
full_file_path = os.path.join(tpara['root_repo'], tpara['file_path'])
logging.debug("Parse {}".format(full_file_path))
if tpara['root_repo'][-3:] == 'zip':
with zipfile.ZipFile(tpara['root_repo'], 'r') as zz:
raw_data = pd.read_table(
zz.open(tpara['file_path']),
index_col=list(range(tpara['index_col'])),
header=list(range(tpara['index_rows'])))
else:
raw_data = pd.read_table(
full_file_path,
index_col=list(range(tpara['index_col'])),
header=list(range(tpara['index_rows'])))
meta_rec._add_fileio('EXIOBASE data {} parsed from {}'.format(
tt, full_file_path))
if tt in core_components:
core_data[tt] = raw_data
else:
ext_data[tt] = raw_data
for table in core_data:
core_data[table].index.names = ['region', 'sector', 'unit']
if table == 'A' or table == 'Z':
core_data[table].columns.names = ['region', 'sector']
_unit = pd.DataFrame(
core_data[table].iloc[:, 0]).reset_index(
level='unit').unit
_unit = pd.DataFrame(_unit)
_unit.columns = ['unit']
if table == 'Y':
core_data[table].columns.names = ['region', 'category']
core_data[table].reset_index(level='unit', drop=True, inplace=True)
core_data['unit'] = _unit
mon_unit = core_data['unit'].iloc[0, 0]
if '/' in mon_unit:
mon_unit = mon_unit.split('/')[0]
core_data['unit'].unit = mon_unit
extensions = dict()
for tt, tpara in exio_files.items():
if tt in core_components:
continue
ext_name = '_'.join(tt.split('_')[1:])
table_type = tt.split('_')[0]
if tpara['index_col'] == 3:
ext_data[tt].index.names = [
'stressor', 'compartment', 'unit']
elif tpara['index_col'] == 2:
ext_data[tt].index.names = [
'stressor', 'unit']
else:
raise ParserError('Unknown EXIOBASE file structure')
if table_type == 'FY':
ext_data[tt].columns.names = [
'region', 'category']
else:
ext_data[tt].columns.names = [
'region', 'sector']
try:
_unit = pd.DataFrame(
ext_data[tt].iloc[:, 0]
).reset_index(level='unit').unit
except IndexError:
_unit = pd.DataFrame(
ext_data[tt].iloc[:, 0])
_unit.columns = ['unit']
_unit['unit'] = 'undef'
_unit.reset_index(level='unit', drop=True, inplace=True)
_unit = pd.DataFrame(_unit)
_unit.columns = ['unit']
_unit = pd.DataFrame(_unit)
_unit.columns = ['unit']
_new_unit = _unit.unit.str.replace('/'+mon_unit, '')
_new_unit[_new_unit == ''] = _unit.unit[
_new_unit == ''].str.replace('/', '')
_unit.unit = _new_unit
ext_data[tt].reset_index(level='unit', drop=True, inplace=True)
ext_dict = extensions.get(ext_name, dict())
ext_dict.update({table_type: ext_data[tt],
'unit': _unit,
'name': ext_name})
extensions.update({ext_name: ext_dict})
if version[0] == '1':
year = 2000
elif version[0] == '2':
year = 2000
elif version[0] == '3':
raise ParserError(
"This function can not be used to parse EXIOBASE 3")
else:
logging.warning("Unknown EXIOBASE version")
year = None
return IOSystem(version=version,
price='current',
year=year,
meta=meta_rec,
**dict(core_data, **extensions)) | python | def generic_exiobase12_parser(exio_files, system=None):
""" Generic EXIOBASE version 1 and 2 parser
This is used internally by parse_exiobase1 / 2 functions to
parse exiobase files. In most cases, these top-level functions
should just work, but in case of archived exiobase versions
it might be necessary to use low-level function here.
Parameters
----------
exio_files: dict of dict
system: str (pxp or ixi)
Only used for the metadata
"""
version = ' & '.join({dd.get('version', '')
for dd in exio_files.values()
if dd.get('version', '')})
meta_rec = MRIOMetaData(system=system,
name="EXIOBASE",
version=version)
if len(version) == 0:
meta_rec.note("No version information found, assuming exiobase 1")
meta_rec.change_meta('version', 1)
version = '1'
core_components = ['A', 'Y', 'Z']
core_data = dict()
ext_data = dict()
for tt, tpara in exio_files.items():
full_file_path = os.path.join(tpara['root_repo'], tpara['file_path'])
logging.debug("Parse {}".format(full_file_path))
if tpara['root_repo'][-3:] == 'zip':
with zipfile.ZipFile(tpara['root_repo'], 'r') as zz:
raw_data = pd.read_table(
zz.open(tpara['file_path']),
index_col=list(range(tpara['index_col'])),
header=list(range(tpara['index_rows'])))
else:
raw_data = pd.read_table(
full_file_path,
index_col=list(range(tpara['index_col'])),
header=list(range(tpara['index_rows'])))
meta_rec._add_fileio('EXIOBASE data {} parsed from {}'.format(
tt, full_file_path))
if tt in core_components:
core_data[tt] = raw_data
else:
ext_data[tt] = raw_data
for table in core_data:
core_data[table].index.names = ['region', 'sector', 'unit']
if table == 'A' or table == 'Z':
core_data[table].columns.names = ['region', 'sector']
_unit = pd.DataFrame(
core_data[table].iloc[:, 0]).reset_index(
level='unit').unit
_unit = pd.DataFrame(_unit)
_unit.columns = ['unit']
if table == 'Y':
core_data[table].columns.names = ['region', 'category']
core_data[table].reset_index(level='unit', drop=True, inplace=True)
core_data['unit'] = _unit
mon_unit = core_data['unit'].iloc[0, 0]
if '/' in mon_unit:
mon_unit = mon_unit.split('/')[0]
core_data['unit'].unit = mon_unit
extensions = dict()
for tt, tpara in exio_files.items():
if tt in core_components:
continue
ext_name = '_'.join(tt.split('_')[1:])
table_type = tt.split('_')[0]
if tpara['index_col'] == 3:
ext_data[tt].index.names = [
'stressor', 'compartment', 'unit']
elif tpara['index_col'] == 2:
ext_data[tt].index.names = [
'stressor', 'unit']
else:
raise ParserError('Unknown EXIOBASE file structure')
if table_type == 'FY':
ext_data[tt].columns.names = [
'region', 'category']
else:
ext_data[tt].columns.names = [
'region', 'sector']
try:
_unit = pd.DataFrame(
ext_data[tt].iloc[:, 0]
).reset_index(level='unit').unit
except IndexError:
_unit = pd.DataFrame(
ext_data[tt].iloc[:, 0])
_unit.columns = ['unit']
_unit['unit'] = 'undef'
_unit.reset_index(level='unit', drop=True, inplace=True)
_unit = pd.DataFrame(_unit)
_unit.columns = ['unit']
_unit = pd.DataFrame(_unit)
_unit.columns = ['unit']
_new_unit = _unit.unit.str.replace('/'+mon_unit, '')
_new_unit[_new_unit == ''] = _unit.unit[
_new_unit == ''].str.replace('/', '')
_unit.unit = _new_unit
ext_data[tt].reset_index(level='unit', drop=True, inplace=True)
ext_dict = extensions.get(ext_name, dict())
ext_dict.update({table_type: ext_data[tt],
'unit': _unit,
'name': ext_name})
extensions.update({ext_name: ext_dict})
if version[0] == '1':
year = 2000
elif version[0] == '2':
year = 2000
elif version[0] == '3':
raise ParserError(
"This function can not be used to parse EXIOBASE 3")
else:
logging.warning("Unknown EXIOBASE version")
year = None
return IOSystem(version=version,
price='current',
year=year,
meta=meta_rec,
**dict(core_data, **extensions)) | Generic EXIOBASE version 1 and 2 parser
This is used internally by parse_exiobase1 / 2 functions to
parse exiobase files. In most cases, these top-level functions
should just work, but in case of archived exiobase versions
it might be necessary to use low-level function here.
Parameters
----------
exio_files: dict of dict
system: str (pxp or ixi)
Only used for the metadata | https://github.com/konstantinstadler/pymrio/blob/d764aa0dd2150200e867a9713a98ddae203e12d4/pymrio/tools/ioparser.py#L256-L396 |
konstantinstadler/pymrio | pymrio/tools/ioparser.py | _get_MRIO_system | def _get_MRIO_system(path):
""" Extract system information (ixi, pxp) from file path.
Returns 'ixi' or 'pxp', None in undetermined
"""
ispxp = True if re.search('pxp', path, flags=re.IGNORECASE) else False
isixi = True if re.search('ixi', path, flags=re.IGNORECASE) else False
if ispxp == isixi:
system = None
else:
system = 'pxp' if ispxp else 'ixi'
return system | python | def _get_MRIO_system(path):
""" Extract system information (ixi, pxp) from file path.
Returns 'ixi' or 'pxp', None in undetermined
"""
ispxp = True if re.search('pxp', path, flags=re.IGNORECASE) else False
isixi = True if re.search('ixi', path, flags=re.IGNORECASE) else False
if ispxp == isixi:
system = None
else:
system = 'pxp' if ispxp else 'ixi'
return system | Extract system information (ixi, pxp) from file path.
Returns 'ixi' or 'pxp', None in undetermined | https://github.com/konstantinstadler/pymrio/blob/d764aa0dd2150200e867a9713a98ddae203e12d4/pymrio/tools/ioparser.py#L399-L411 |
konstantinstadler/pymrio | pymrio/tools/ioparser.py | parse_exiobase1 | def parse_exiobase1(path):
""" Parse the exiobase1 raw data files.
This function works with
- pxp_ita_44_regions_coeff_txt
- ixi_fpa_44_regions_coeff_txt
- pxp_ita_44_regions_coeff_src_txt
- ixi_fpa_44_regions_coeff_src_txt
which can be found on www.exiobase.eu
The parser works with the compressed (zip) files as well as the unpacked
files.
Parameters
----------
path : pathlib.Path or string
Path of the exiobase 1 data
Returns
-------
pymrio.IOSystem with exio1 data
"""
path = os.path.abspath(os.path.normpath(str(path)))
exio_files = get_exiobase_files(path)
if len(exio_files) == 0:
raise ParserError("No EXIOBASE files found at {}".format(path))
system = _get_MRIO_system(path)
if not system:
logging.warning("Could not determine system (pxp or ixi)"
" set system parameter manually")
io = generic_exiobase12_parser(exio_files, system=system)
return io | python | def parse_exiobase1(path):
""" Parse the exiobase1 raw data files.
This function works with
- pxp_ita_44_regions_coeff_txt
- ixi_fpa_44_regions_coeff_txt
- pxp_ita_44_regions_coeff_src_txt
- ixi_fpa_44_regions_coeff_src_txt
which can be found on www.exiobase.eu
The parser works with the compressed (zip) files as well as the unpacked
files.
Parameters
----------
path : pathlib.Path or string
Path of the exiobase 1 data
Returns
-------
pymrio.IOSystem with exio1 data
"""
path = os.path.abspath(os.path.normpath(str(path)))
exio_files = get_exiobase_files(path)
if len(exio_files) == 0:
raise ParserError("No EXIOBASE files found at {}".format(path))
system = _get_MRIO_system(path)
if not system:
logging.warning("Could not determine system (pxp or ixi)"
" set system parameter manually")
io = generic_exiobase12_parser(exio_files, system=system)
return io | Parse the exiobase1 raw data files.
This function works with
- pxp_ita_44_regions_coeff_txt
- ixi_fpa_44_regions_coeff_txt
- pxp_ita_44_regions_coeff_src_txt
- ixi_fpa_44_regions_coeff_src_txt
which can be found on www.exiobase.eu
The parser works with the compressed (zip) files as well as the unpacked
files.
Parameters
----------
path : pathlib.Path or string
Path of the exiobase 1 data
Returns
-------
pymrio.IOSystem with exio1 data | https://github.com/konstantinstadler/pymrio/blob/d764aa0dd2150200e867a9713a98ddae203e12d4/pymrio/tools/ioparser.py#L414-L451 |
konstantinstadler/pymrio | pymrio/tools/ioparser.py | parse_exiobase2 | def parse_exiobase2(path, charact=True, popvector='exio2'):
""" Parse the exiobase 2.2.2 source files for the IOSystem
The function parse product by product and industry by industry source file
in the coefficient form (A and S).
Filenames are hardcoded in the parser - for any other function the code has
to be adopted. Check git comments to find older verions.
Parameters
----------
path : string or pathlib.Path
Path to the EXIOBASE source files
charact : string or boolean, optional
Filename with path to the characterisation matrices for the extensions
(xls). This is provided together with the EXIOBASE system and given as
a xls file. The four sheets Q_factorinputs, Q_emission, Q_materials
and Q_resources are read and used to generate one new extensions with
the impacts.
If set to True, the characterisation file found in path is used (
can be in the zip or extracted). If a string, it is assumed that
it points to valid characterisation file. If False or None, no
characterisation file will be used.
popvector : string or pd.DataFrame, optional
The population vector for the countries. This can be given as
pd.DataFrame(index = population, columns = countrynames) or, (default)
will be taken from the pymrio module. If popvector = None no population
data will be passed to the IOSystem.
Returns
-------
IOSystem
A IOSystem with the parsed exiobase 2 data
Raises
------
ParserError
If the exiobase source files are not complete in the given path
"""
path = os.path.abspath(os.path.normpath(str(path)))
exio_files = get_exiobase_files(path)
if len(exio_files) == 0:
raise ParserError("No EXIOBASE files found at {}".format(path))
system = _get_MRIO_system(path)
if not system:
logging.warning("Could not determine system (pxp or ixi)"
" set system parameter manually")
io = generic_exiobase12_parser(exio_files, system=system)
# read the characterisation matrices if available
# and build one extension with the impacts
if charact:
logging.debug('Parse characterisation matrix')
# dict with correspondence to the extensions
Qsheets = {'Q_factorinputs': 'factor_inputs',
'Q_emission': 'emissions',
'Q_materials': 'materials',
'Q_resources': 'resources'}
Q_head_col = dict()
Q_head_row = dict()
Q_head_col_rowname = dict()
Q_head_col_rowunit = dict()
# Q_head_col_metadata = dict()
# number of cols containing row headers at the beginning
Q_head_col['Q_emission'] = 4
# number of rows containing col headers at the top - this will be
# skipped
Q_head_row['Q_emission'] = 3
# assuming the same classification as in the extensions
Q_head_col['Q_factorinputs'] = 2
Q_head_row['Q_factorinputs'] = 2
Q_head_col['Q_resources'] = 2
Q_head_row['Q_resources'] = 3
Q_head_col['Q_materials'] = 2
Q_head_row['Q_materials'] = 2
# column to use as name for the rows
Q_head_col_rowname['Q_emission'] = 1
Q_head_col_rowname['Q_factorinputs'] = 0
Q_head_col_rowname['Q_resources'] = 0
Q_head_col_rowname['Q_materials'] = 0
# column to use as unit for the rows which gives also the last column
# before the data
Q_head_col_rowunit['Q_emission'] = 3
Q_head_col_rowunit['Q_factorinputs'] = 1
Q_head_col_rowunit['Q_resources'] = 1
Q_head_col_rowunit['Q_materials'] = 1
if charact is str:
charac_data = {Qname: pd.read_excel(
charact,
sheet_name=Qname,
skiprows=list(range(0, Q_head_row[Qname])),
header=None)
for Qname in Qsheets}
else:
_content = get_repo_content(path)
charac_regex = re.compile('(?<!\_)(?<!\.)characterisation.*xlsx')
charac_files = [ff for ff in _content.filelist if
re.search(charac_regex, ff)]
if len(charac_files) > 1:
raise ParserError(
"Found multiple characcterisation files "
"in {} - specify one: {}".format(path, charac_files))
elif len(charac_files) == 0:
raise ParserError(
"No characcterisation file found "
"in {}".format(path))
else:
if _content.iszip:
with zipfile.ZipFile(path, 'r') as zz:
charac_data = {Qname: pd.read_excel(
zz.open(charac_files[0]),
sheet_name=Qname,
skiprows=list(
range(0, Q_head_row[Qname])),
header=None)
for Qname in Qsheets}
else:
charac_data = {Qname: pd.read_excel(
os.path.join(path, charac_files[0]),
sheet_name=Qname,
skiprows=list(range(0, Q_head_row[Qname])),
header=None)
for Qname in Qsheets}
_unit = dict()
# temp for the calculated impacts which than
# get summarized in the 'impact'
_impact = dict()
impact = dict()
for Qname in Qsheets:
# unfortunately the names in Q_emissions are
# not completely unique - fix that
if Qname is 'Q_emission':
_index = charac_data[Qname][Q_head_col_rowname[Qname]].copy()
_index.iloc[42] = _index.iloc[42] + ' 2008'
_index.iloc[43] = _index.iloc[43] + ' 2008'
_index.iloc[44] = _index.iloc[44] + ' 2010'
_index.iloc[45] = _index.iloc[45] + ' 2010'
charac_data[Qname][Q_head_col_rowname[Qname]] = _index
charac_data[Qname].index = (
charac_data[Qname][Q_head_col_rowname[Qname]])
_unit[Qname] = pd.DataFrame(
charac_data[Qname].iloc[:, Q_head_col_rowunit[Qname]])
_unit[Qname].columns = ['unit']
_unit[Qname].index.name = 'impact'
charac_data[Qname] = charac_data[Qname].ix[
:, Q_head_col_rowunit[Qname]+1:]
charac_data[Qname].index.name = 'impact'
try:
_FY = io.__dict__[Qsheets[Qname]].FY.values
except AttributeError:
_FY = np.zeros([io.__dict__[Qsheets[Qname]].S.shape[0],
io.Y.shape[1]])
_impact[Qname] = {'S': charac_data[Qname].dot(
io.__dict__[Qsheets[Qname]].S.values),
'FY': charac_data[Qname].dot(_FY),
'unit': _unit[Qname]
}
impact['S'] = (_impact['Q_factorinputs']['S']
.append(_impact['Q_emission']['S'])
.append(_impact['Q_materials']['S'])
.append(_impact['Q_resources']['S']))
impact['FY'] = (_impact['Q_factorinputs']['FY']
.append(_impact['Q_emission']['FY'])
.append(_impact['Q_materials']['FY'])
.append(_impact['Q_resources']['FY']))
impact['S'].columns = io.emissions.S.columns
impact['FY'].columns = io.emissions.FY.columns
impact['unit'] = (_impact['Q_factorinputs']['unit']
.append(_impact['Q_emission']['unit'])
.append(_impact['Q_materials']['unit'])
.append(_impact['Q_resources']['unit']))
impact['name'] = 'impact'
io.impact = Extension(**impact)
if popvector is 'exio2':
logging.debug('Read population vector')
io.population = pd.read_table(os.path.join(PYMRIO_PATH['exio20'],
'./misc/population.txt'),
index_col=0).astype(float)
else:
io.population = popvector
return io | python | def parse_exiobase2(path, charact=True, popvector='exio2'):
""" Parse the exiobase 2.2.2 source files for the IOSystem
The function parse product by product and industry by industry source file
in the coefficient form (A and S).
Filenames are hardcoded in the parser - for any other function the code has
to be adopted. Check git comments to find older verions.
Parameters
----------
path : string or pathlib.Path
Path to the EXIOBASE source files
charact : string or boolean, optional
Filename with path to the characterisation matrices for the extensions
(xls). This is provided together with the EXIOBASE system and given as
a xls file. The four sheets Q_factorinputs, Q_emission, Q_materials
and Q_resources are read and used to generate one new extensions with
the impacts.
If set to True, the characterisation file found in path is used (
can be in the zip or extracted). If a string, it is assumed that
it points to valid characterisation file. If False or None, no
characterisation file will be used.
popvector : string or pd.DataFrame, optional
The population vector for the countries. This can be given as
pd.DataFrame(index = population, columns = countrynames) or, (default)
will be taken from the pymrio module. If popvector = None no population
data will be passed to the IOSystem.
Returns
-------
IOSystem
A IOSystem with the parsed exiobase 2 data
Raises
------
ParserError
If the exiobase source files are not complete in the given path
"""
path = os.path.abspath(os.path.normpath(str(path)))
exio_files = get_exiobase_files(path)
if len(exio_files) == 0:
raise ParserError("No EXIOBASE files found at {}".format(path))
system = _get_MRIO_system(path)
if not system:
logging.warning("Could not determine system (pxp or ixi)"
" set system parameter manually")
io = generic_exiobase12_parser(exio_files, system=system)
# read the characterisation matrices if available
# and build one extension with the impacts
if charact:
logging.debug('Parse characterisation matrix')
# dict with correspondence to the extensions
Qsheets = {'Q_factorinputs': 'factor_inputs',
'Q_emission': 'emissions',
'Q_materials': 'materials',
'Q_resources': 'resources'}
Q_head_col = dict()
Q_head_row = dict()
Q_head_col_rowname = dict()
Q_head_col_rowunit = dict()
# Q_head_col_metadata = dict()
# number of cols containing row headers at the beginning
Q_head_col['Q_emission'] = 4
# number of rows containing col headers at the top - this will be
# skipped
Q_head_row['Q_emission'] = 3
# assuming the same classification as in the extensions
Q_head_col['Q_factorinputs'] = 2
Q_head_row['Q_factorinputs'] = 2
Q_head_col['Q_resources'] = 2
Q_head_row['Q_resources'] = 3
Q_head_col['Q_materials'] = 2
Q_head_row['Q_materials'] = 2
# column to use as name for the rows
Q_head_col_rowname['Q_emission'] = 1
Q_head_col_rowname['Q_factorinputs'] = 0
Q_head_col_rowname['Q_resources'] = 0
Q_head_col_rowname['Q_materials'] = 0
# column to use as unit for the rows which gives also the last column
# before the data
Q_head_col_rowunit['Q_emission'] = 3
Q_head_col_rowunit['Q_factorinputs'] = 1
Q_head_col_rowunit['Q_resources'] = 1
Q_head_col_rowunit['Q_materials'] = 1
if charact is str:
charac_data = {Qname: pd.read_excel(
charact,
sheet_name=Qname,
skiprows=list(range(0, Q_head_row[Qname])),
header=None)
for Qname in Qsheets}
else:
_content = get_repo_content(path)
charac_regex = re.compile('(?<!\_)(?<!\.)characterisation.*xlsx')
charac_files = [ff for ff in _content.filelist if
re.search(charac_regex, ff)]
if len(charac_files) > 1:
raise ParserError(
"Found multiple characcterisation files "
"in {} - specify one: {}".format(path, charac_files))
elif len(charac_files) == 0:
raise ParserError(
"No characcterisation file found "
"in {}".format(path))
else:
if _content.iszip:
with zipfile.ZipFile(path, 'r') as zz:
charac_data = {Qname: pd.read_excel(
zz.open(charac_files[0]),
sheet_name=Qname,
skiprows=list(
range(0, Q_head_row[Qname])),
header=None)
for Qname in Qsheets}
else:
charac_data = {Qname: pd.read_excel(
os.path.join(path, charac_files[0]),
sheet_name=Qname,
skiprows=list(range(0, Q_head_row[Qname])),
header=None)
for Qname in Qsheets}
_unit = dict()
# temp for the calculated impacts which than
# get summarized in the 'impact'
_impact = dict()
impact = dict()
for Qname in Qsheets:
# unfortunately the names in Q_emissions are
# not completely unique - fix that
if Qname is 'Q_emission':
_index = charac_data[Qname][Q_head_col_rowname[Qname]].copy()
_index.iloc[42] = _index.iloc[42] + ' 2008'
_index.iloc[43] = _index.iloc[43] + ' 2008'
_index.iloc[44] = _index.iloc[44] + ' 2010'
_index.iloc[45] = _index.iloc[45] + ' 2010'
charac_data[Qname][Q_head_col_rowname[Qname]] = _index
charac_data[Qname].index = (
charac_data[Qname][Q_head_col_rowname[Qname]])
_unit[Qname] = pd.DataFrame(
charac_data[Qname].iloc[:, Q_head_col_rowunit[Qname]])
_unit[Qname].columns = ['unit']
_unit[Qname].index.name = 'impact'
charac_data[Qname] = charac_data[Qname].ix[
:, Q_head_col_rowunit[Qname]+1:]
charac_data[Qname].index.name = 'impact'
try:
_FY = io.__dict__[Qsheets[Qname]].FY.values
except AttributeError:
_FY = np.zeros([io.__dict__[Qsheets[Qname]].S.shape[0],
io.Y.shape[1]])
_impact[Qname] = {'S': charac_data[Qname].dot(
io.__dict__[Qsheets[Qname]].S.values),
'FY': charac_data[Qname].dot(_FY),
'unit': _unit[Qname]
}
impact['S'] = (_impact['Q_factorinputs']['S']
.append(_impact['Q_emission']['S'])
.append(_impact['Q_materials']['S'])
.append(_impact['Q_resources']['S']))
impact['FY'] = (_impact['Q_factorinputs']['FY']
.append(_impact['Q_emission']['FY'])
.append(_impact['Q_materials']['FY'])
.append(_impact['Q_resources']['FY']))
impact['S'].columns = io.emissions.S.columns
impact['FY'].columns = io.emissions.FY.columns
impact['unit'] = (_impact['Q_factorinputs']['unit']
.append(_impact['Q_emission']['unit'])
.append(_impact['Q_materials']['unit'])
.append(_impact['Q_resources']['unit']))
impact['name'] = 'impact'
io.impact = Extension(**impact)
if popvector is 'exio2':
logging.debug('Read population vector')
io.population = pd.read_table(os.path.join(PYMRIO_PATH['exio20'],
'./misc/population.txt'),
index_col=0).astype(float)
else:
io.population = popvector
return io | Parse the exiobase 2.2.2 source files for the IOSystem
The function parse product by product and industry by industry source file
in the coefficient form (A and S).
Filenames are hardcoded in the parser - for any other function the code has
to be adopted. Check git comments to find older verions.
Parameters
----------
path : string or pathlib.Path
Path to the EXIOBASE source files
charact : string or boolean, optional
Filename with path to the characterisation matrices for the extensions
(xls). This is provided together with the EXIOBASE system and given as
a xls file. The four sheets Q_factorinputs, Q_emission, Q_materials
and Q_resources are read and used to generate one new extensions with
the impacts.
If set to True, the characterisation file found in path is used (
can be in the zip or extracted). If a string, it is assumed that
it points to valid characterisation file. If False or None, no
characterisation file will be used.
popvector : string or pd.DataFrame, optional
The population vector for the countries. This can be given as
pd.DataFrame(index = population, columns = countrynames) or, (default)
will be taken from the pymrio module. If popvector = None no population
data will be passed to the IOSystem.
Returns
-------
IOSystem
A IOSystem with the parsed exiobase 2 data
Raises
------
ParserError
If the exiobase source files are not complete in the given path | https://github.com/konstantinstadler/pymrio/blob/d764aa0dd2150200e867a9713a98ddae203e12d4/pymrio/tools/ioparser.py#L454-L651 |
konstantinstadler/pymrio | pymrio/tools/ioparser.py | parse_exiobase3 | def parse_exiobase3(path):
""" Parses the public EXIOBASE 3 system
This parser works with either the compressed zip
archive as downloaded or the extracted system.
Note
----
The exiobase 3 parser does so far not include
population and characterization data.
Parameters
----------
path : string or pathlib.Path
Path to the folder with the EXIOBASE files
or the compressed archive.
Returns
-------
IOSystem
A IOSystem with the parsed exiobase 3 data
"""
io = load_all(path)
# need to rename the final demand satellite,
# wrong name in the standard distribution
try:
io.satellite.FY = io.satellite.F_hh.copy()
del io.satellite.F_hh
except AttributeError:
pass
# some ixi in the exiobase 3.4 official distribution
# have a country name mixup. Clean it here:
io.rename_regions(
{'AUS': 'AU',
'AUT': 'AT',
'BEL': 'BE',
'BGR': 'BG',
'BRA': 'BR',
'CAN': 'CA',
'CHE': 'CH',
'CHN': 'CN',
'CYP': 'CY',
'CZE': 'CZ',
'DEU': 'DE',
'DNK': 'DK',
'ESP': 'ES',
'EST': 'EE',
'FIN': 'FI',
'FRA': 'FR',
'GBR': 'GB',
'GRC': 'GR',
'HRV': 'HR',
'HUN': 'HU',
'IDN': 'ID',
'IND': 'IN',
'IRL': 'IE',
'ITA': 'IT',
'JPN': 'JP',
'KOR': 'KR',
'LTU': 'LT',
'LUX': 'LU',
'LVA': 'LV',
'MEX': 'MX',
'MLT': 'MT',
'NLD': 'NL',
'NOR': 'NO',
'POL': 'PL',
'PRT': 'PT',
'ROM': 'RO',
'RUS': 'RU',
'SVK': 'SK',
'SVN': 'SI',
'SWE': 'SE',
'TUR': 'TR',
'TWN': 'TW',
'USA': 'US',
'ZAF': 'ZA',
'WWA': 'WA',
'WWE': 'WE',
'WWF': 'WF',
'WWL': 'WL',
'WWM': 'WM'})
return io | python | def parse_exiobase3(path):
""" Parses the public EXIOBASE 3 system
This parser works with either the compressed zip
archive as downloaded or the extracted system.
Note
----
The exiobase 3 parser does so far not include
population and characterization data.
Parameters
----------
path : string or pathlib.Path
Path to the folder with the EXIOBASE files
or the compressed archive.
Returns
-------
IOSystem
A IOSystem with the parsed exiobase 3 data
"""
io = load_all(path)
# need to rename the final demand satellite,
# wrong name in the standard distribution
try:
io.satellite.FY = io.satellite.F_hh.copy()
del io.satellite.F_hh
except AttributeError:
pass
# some ixi in the exiobase 3.4 official distribution
# have a country name mixup. Clean it here:
io.rename_regions(
{'AUS': 'AU',
'AUT': 'AT',
'BEL': 'BE',
'BGR': 'BG',
'BRA': 'BR',
'CAN': 'CA',
'CHE': 'CH',
'CHN': 'CN',
'CYP': 'CY',
'CZE': 'CZ',
'DEU': 'DE',
'DNK': 'DK',
'ESP': 'ES',
'EST': 'EE',
'FIN': 'FI',
'FRA': 'FR',
'GBR': 'GB',
'GRC': 'GR',
'HRV': 'HR',
'HUN': 'HU',
'IDN': 'ID',
'IND': 'IN',
'IRL': 'IE',
'ITA': 'IT',
'JPN': 'JP',
'KOR': 'KR',
'LTU': 'LT',
'LUX': 'LU',
'LVA': 'LV',
'MEX': 'MX',
'MLT': 'MT',
'NLD': 'NL',
'NOR': 'NO',
'POL': 'PL',
'PRT': 'PT',
'ROM': 'RO',
'RUS': 'RU',
'SVK': 'SK',
'SVN': 'SI',
'SWE': 'SE',
'TUR': 'TR',
'TWN': 'TW',
'USA': 'US',
'ZAF': 'ZA',
'WWA': 'WA',
'WWE': 'WE',
'WWF': 'WF',
'WWL': 'WL',
'WWM': 'WM'})
return io | Parses the public EXIOBASE 3 system
This parser works with either the compressed zip
archive as downloaded or the extracted system.
Note
----
The exiobase 3 parser does so far not include
population and characterization data.
Parameters
----------
path : string or pathlib.Path
Path to the folder with the EXIOBASE files
or the compressed archive.
Returns
-------
IOSystem
A IOSystem with the parsed exiobase 3 data | https://github.com/konstantinstadler/pymrio/blob/d764aa0dd2150200e867a9713a98ddae203e12d4/pymrio/tools/ioparser.py#L654-L740 |
konstantinstadler/pymrio | pymrio/tools/ioparser.py | parse_wiod | def parse_wiod(path, year=None, names=('isic', 'c_codes'),
popvector=None):
""" Parse the wiod source files for the IOSystem
WIOD provides the MRIO tables in excel - format (xlsx) at
http://www.wiod.org/new_site/database/wiots.htm (release November 2013).
To use WIOD in pymrio these (for the year of analysis) must be downloaded.
The interindustry matrix of these files gets parsed in IOSystem.Z, the
additional information is included as factor_input extension (value
added,...)
The folder with these xslx must than be passed to the WIOD parsing
function. This folder may contain folders with the extension data. Every
folder within the wiod root folder will be parsed for extension data and
will be added to the IOSystem. The WIOD database offers the download of
the environmental extensions as zip files. These can be read directly by
the parser. In case a zip file and a folder with the same name are
available, the data is read from the folder. If the zip files are
extracted into folder, the folders must have the same name as the
corresponding zip file (without the 'zip' extension).
If a WIOD SEA file is present (at the root of path or in a folder named
'SEA' - only one file!), the labor data of this file gets included in the
factor_input extension (calculated for the the three skill levels
available). The monetary data in this file is not added because it is only
given in national currency.
Since the "World Input-Output Tables in previous years' prices" are still
under construction (20141129), no parser for these is provided.
Some of the meta-parameter of the IOSystem are set automatically based on
the values given in the first four cells and the name of the WIOD data
files (base year, version, price, iosystem).
These can be overwritten afterwards if needed.
Parameters
----------
path : string or pathlib.Path
Path to the folder with the WIOD source files. In case that the path
to a specific file is given, only this will be parsed irrespective of
the values given in year.
year : int or str
Which year in the path should be parsed. The years can be given with
four or two digits (eg [2012 or 12]). If the given path contains a
specific file, the value of year will not be used (but inferred from
the meta data)- otherwise it must be given For the monetary data the
parser searches for files with 'wiot - two digit year'.
names : string or tuple, optional
WIOD provides three different sector/final demand categories naming
schemes. These can can be specified for the IOSystem. Pass:
1) 'isic': ISIC rev 3 Codes - available for interindustry flows
and final demand rows.
2) 'full': Full names - available for final demand rows and
final demand columns (categories) and interindustry flows.
3) 'c_codes' : WIOD specific sector numbers, available for final
demand rows and columns (categories) and interindustry flows.
Internally, the parser relies on 1) for the interindustry flows and 3)
for the final demand categories. This is the default and will also be
used if just 'isic' gets passed ('c_codes' also replace 'isic' if this
was passed for final demand categories). To specify different finial
consumption category names, pass a tuple with (sectors/interindustry
classification, fd categories), eg ('isic', 'full'). Names are case
insensitive and passing the first character is sufficient.
TODO popvector : TO BE IMPLEMENTED (consistent with EXIOBASE)
Yields
-------
IOSystem
Raises
------
ParserError
If the WIOD source file are not complete or inconsistent
"""
# Path manipulation, should work cross platform
path = os.path.abspath(os.path.normpath(str(path)))
# wiot start and end
wiot_ext = '.xlsx'
wiot_start = 'wiot'
# determine which wiod file to be parsed
if not os.path.isdir(path):
# 1. case - one file specified in path
if os.path.isfile(path):
wiot_file = path
else:
# just in case the ending was forgotten
wiot_file = path + wiot_ext
else:
# 2. case: directory given-build wiot_file with the value given in year
if not year:
raise ParserError('No year specified '
'(either specify a specific file '
'or a path and year)')
year_two_digit = str(year)[-2:]
wiot_file_list = [fl for fl in os.listdir(path)
if (fl[:6] == wiot_start + year_two_digit and
os.path.splitext(fl)[1] == wiot_ext)]
if len(wiot_file_list) != 1:
raise ParserError('Multiple files for a given year or file not '
'found (specify a specific file in paramters)')
wiot_file = os.path.join(path, wiot_file_list[0])
wiot_file = wiot_file
root_path = os.path.split(wiot_file)[0]
if not os.path.exists(wiot_file):
raise ParserError('WIOD file not found in the specified folder.')
meta_rec = MRIOMetaData(location=root_path)
# wiot file structure
wiot_meta = {
'col': 0, # column of the meta information
'year': 0, # rest: rows with the data
'iosystem': 2,
'unit': 3,
'end_row': 4,
}
wiot_header = {
# the header indexes are the same for rows after removing the first
# two lines (wiot_empty_top_rows)
'code': 0,
'sector_names': 1,
'region': 2,
'c_code': 3,
}
wiot_empty_top_rows = [0, 1]
wiot_marks = { # special marks
'last_interindsec': 'c35', # last sector of the interindustry
'tot_facinp': ['r60', 'r69'], # useless totals to remove from factinp
'total_column': [-1], # the total column in the whole data
}
wiot_sheet = 0 # assume the first one is the one with the data.
# Wiod has an unfortunate file structure with overlapping metadata and
# header. In order to deal with that first the full file is read.
wiot_data = pd.read_excel(wiot_file,
sheet_name=wiot_sheet,
header=None)
meta_rec._add_fileio('WIOD data parsed from {}'.format(wiot_file))
# get meta data
wiot_year = wiot_data.iloc[wiot_meta['year'], wiot_meta['col']][-4:]
wiot_iosystem = wiot_data.iloc[
wiot_meta['iosystem'], wiot_meta['col']].rstrip(')').lstrip('(')
meta_rec.change_meta('system', wiot_iosystem)
_wiot_unit = wiot_data.iloc[
wiot_meta['unit'], wiot_meta['col']].rstrip(')').lstrip('(')
# remove meta data, empty rows, total column
wiot_data.iloc[0:wiot_meta['end_row'], wiot_meta['col']] = np.NaN
wiot_data.drop(wiot_empty_top_rows,
axis=0, inplace=True)
wiot_data.drop(wiot_data.columns[wiot_marks['total_column']],
axis=1, inplace=True)
# at this stage row and column header should have the same size but
# the index starts now at two - replace/reset to row numbers
wiot_data.index = range(wiot_data.shape[0])
# Early years in WIOD tables have a different name for Romania:
# 'ROM' which should be 'ROU'. The latter is also consistent with
# the environmental extensions names.
wiot_data.iloc[wiot_header['region'], :] = wiot_data.iloc[
wiot_header['region'], :].str.replace('ROM', 'ROU')
wiot_data.iloc[:, wiot_header['region']] = wiot_data.iloc[
:, wiot_header['region']].str.replace('ROM', 'ROU')
# get the end of the interindustry matrix
_lastZcol = wiot_data[
wiot_data.iloc[
:, wiot_header['c_code']] == wiot_marks['last_interindsec']
].index[-1]
_lastZrow = wiot_data[
wiot_data[wiot_header['c_code']] == wiot_marks['last_interindsec']
].index[-1]
if _lastZcol != _lastZrow:
raise ParserError(
'Interindustry matrix not symetric in the WIOD source file')
else:
Zshape = (_lastZrow, _lastZcol)
# separate factor input extension and remove
# totals in the first and last row
facinp = wiot_data.iloc[Zshape[0]+1:, :]
facinp = facinp.drop(
facinp[facinp[wiot_header['c_code']].isin(
wiot_marks['tot_facinp'])].index, axis=0
)
Z = wiot_data.iloc[:Zshape[0]+1, :Zshape[1]+1].copy()
Y = wiot_data.iloc[:Zshape[0]+1, Zshape[1]+1:].copy()
F_fac = facinp.iloc[:, :Zshape[1]+1].copy()
FY_fac = facinp.iloc[:, Zshape[1]+1:].copy()
index_wiot_headers = [nr for nr in wiot_header.values()]
# Save lookup of sectors and codes - to be used at the end of the parser
# Assuming USA is present in every WIOT year
wiot_sector_lookup = wiot_data[
wiot_data[wiot_header['region']] == 'USA'].iloc[
:, 0:max(index_wiot_headers)+1].applymap(str)
wiot_sector_lookup.columns = [
entry[1] for entry in sorted(
zip(wiot_header.values(), wiot_header.keys()))]
wiot_sector_lookup.set_index('code', inplace=True, drop=False)
_Y = Y.T.iloc[:, [
wiot_header['code'], # Included to be consistent with wiot_header
wiot_header['sector_names'],
wiot_header['region'],
wiot_header['c_code'],
]]
wiot_fd_lookup = _Y[_Y.iloc[
:, wiot_header['region']] == 'USA'].applymap(str)
wiot_fd_lookup.columns = [
entry[1] for entry in
sorted(zip(wiot_header.values(), wiot_header.keys()))]
wiot_fd_lookup.set_index('c_code', inplace=True, drop=False)
wiot_fd_lookup.index.name = 'code'
# set the index/columns, work with code b/c these are also used in the
# extensions
Z[wiot_header['code']] = Z[wiot_header['code']].astype(str)
Z.set_index([wiot_header['region'],
wiot_header['code']], inplace=True, drop=False)
Z = Z.iloc[max(index_wiot_headers)+1:, max(index_wiot_headers)+1:]
Z.index.names = IDX_NAMES['Z_col']
Z.columns = Z.index
indexY_col_head = Y.iloc[[wiot_header['region'],
wiot_header['c_code']], :]
Y.columns = pd.MultiIndex.from_arrays(indexY_col_head.values,
names=IDX_NAMES['Y_col2'])
Y = Y.iloc[max(index_wiot_headers)+1:, :]
Y.index = Z.index
F_fac.set_index([wiot_header['sector_names']],
inplace=True, drop=False) # c_code missing, use names
F_fac.index.names = ['inputtype']
F_fac = F_fac.iloc[:, max(index_wiot_headers)+1:]
F_fac.columns = Z.columns
FY_fac.columns = Y.columns
FY_fac.index = F_fac.index
# convert from object to float (was object because mixed float,str)
Z = Z.astype('float')
Y = Y.astype('float')
F_fac = F_fac.astype('float')
FY_fac = FY_fac.astype('float')
# save the units
Z_unit = pd.DataFrame(Z.iloc[:, 0])
Z_unit.columns = ['unit']
Z_unit['unit'] = _wiot_unit
F_fac_unit = pd.DataFrame(F_fac.iloc[:, 0])
F_fac_unit.columns = ['unit']
F_fac_unit['unit'] = _wiot_unit
ll_countries = list(Z.index.get_level_values('region').unique())
# Finalize the factor inputs extension
ext = dict()
ext['factor_inputs'] = {'F': F_fac,
'FY': FY_fac,
'year': wiot_year,
'iosystem': wiot_iosystem,
'unit': F_fac_unit,
'name': 'factor input',
}
# SEA extension
_F_sea_data, _F_sea_unit = __get_WIOD_SEA_extension(
root_path=root_path, year=year)
if _F_sea_data is not None:
# None if no SEA file present
_FY_sea = pd.DataFrame(index=_F_sea_data.index,
columns=FY_fac.columns, data=0)
_FY_sea = _FY_sea.astype('float')
ext['SEA'] = {'F': _F_sea_data,
'FY': _FY_sea,
'year': wiot_year,
'iosystem': wiot_iosystem,
'unit': _F_sea_unit,
'name': 'SEA',
}
meta_rec._add_fileio('SEA file extension parsed from {}'.format(
root_path))
# Environmental extensions, names follow the name given
# in the meta sheet (except for CO2 to get a better description).
# Units are hardcoded if no consistent place to read them
# within the files (for all extensions in upper case).
# The units names must exactly match!
# Start must identify exactly one folder or a zip file to
# read the extension.
# Within the folder, the routine looks for xls files
# starting with the country code.
dl_envext_para = {
'AIR': {'name': 'Air Emission Accounts',
'start': 'AIR_',
'ext': '.xls',
'unit': {
'CO2': 'Gg',
'CH4': 't',
'N2O': 't',
'NOx': 't',
'SOx': 't',
'CO': 't',
'NMVOC': 't',
'NH3': 't',
},
},
'CO2': {'name': 'CO2 emissions - per source',
'start': 'CO2_',
'ext': '.xls',
'unit': {
'all': 'Gg'}
},
'EM': {'name': 'Emission relevant energy use',
'start': 'EM_',
'ext': '.xls',
'unit': {
'all': 'TJ'}
},
'EU': {'name': 'Gross energy use',
'start': 'EU_',
'ext': '.xls',
'unit': {
'all': 'TJ'}
},
'lan': {'name': 'land use',
'start': 'lan_',
'ext': '.xls',
'unit': {
'all': None}
},
'mat': {'name': 'material use',
'start': 'mat_',
'ext': '.xls',
'unit': {
'all': None}
},
'wat': {'name': 'water use',
'start': 'wat_',
'ext': '.xls',
'unit': {
'all': None}
},
}
_FY_template = pd.DataFrame(columns=FY_fac.columns)
_ss_FY_pressure_column = 'c37'
for ik_ext in dl_envext_para:
_dl_ex = __get_WIOD_env_extension(root_path=root_path,
year=year,
ll_co=ll_countries,
para=dl_envext_para[ik_ext])
if _dl_ex is not None:
# None if extension not available
_FY = _dl_ex['FY']
_FY.columns = pd.MultiIndex.from_product([
_FY.columns, [_ss_FY_pressure_column]])
_FY = _FY_template.append(_FY)
_FY.fillna(0, inplace=True)
_FY.index.names = _dl_ex['F'].index.names
_FY.columns.names = _FY_template.columns.names
_FY = _FY[ll_countries]
_FY = _FY.astype('float')
ext[ik_ext] = {
'F': _dl_ex['F'],
'FY': _FY,
'year': wiot_year,
'iosystem': wiot_iosystem,
'unit': _dl_ex['unit'],
'name': dl_envext_para[ik_ext]['name'],
}
meta_rec._add_fileio('Extension {} parsed from {}'.format(
ik_ext, root_path))
# Build system
wiod = IOSystem(Z=Z, Y=Y,
unit=Z_unit,
meta=meta_rec,
**ext)
# Replace sector/final demand category names
if type(names) is str:
names = (names, names)
ll_names = [w[0].lower() for w in names]
if ll_names[0] == 'c':
dd_sec_rename = wiot_sector_lookup.c_code.to_dict()
elif ll_names[0] == 'i':
dd_sec_rename = wiot_sector_lookup.code.to_dict()
elif ll_names[0] == 'f':
dd_sec_rename = wiot_sector_lookup.sector_names.to_dict()
else:
dd_sec_rename = wiot_sector_lookup.code.to_dict()
warnings.warn('Parameter for names not understood - '
'used ISIC codes as sector names')
if ll_names[1] == 'c':
dd_fd_rename = wiot_fd_lookup.c_code.to_dict()
elif ll_names[1] == 'i':
dd_fd_rename = wiot_fd_lookup.c_code.to_dict()
elif ll_names[1] == 'f':
dd_fd_rename = wiot_fd_lookup.sector_names.to_dict()
else:
warnings.warn('Parameter for names not understood - '
'used c_codes as final demand category names')
wiod.Z.rename(columns=dd_sec_rename, index=dd_sec_rename, inplace=True)
wiod.Y.rename(columns=dd_fd_rename, index=dd_sec_rename, inplace=True)
for ext in wiod.get_extensions(data=True):
ext.F.rename(columns=dd_sec_rename, inplace=True)
ext.FY.rename(columns=dd_fd_rename, inplace=True)
return wiod | python | def parse_wiod(path, year=None, names=('isic', 'c_codes'),
popvector=None):
""" Parse the wiod source files for the IOSystem
WIOD provides the MRIO tables in excel - format (xlsx) at
http://www.wiod.org/new_site/database/wiots.htm (release November 2013).
To use WIOD in pymrio these (for the year of analysis) must be downloaded.
The interindustry matrix of these files gets parsed in IOSystem.Z, the
additional information is included as factor_input extension (value
added,...)
The folder with these xslx must than be passed to the WIOD parsing
function. This folder may contain folders with the extension data. Every
folder within the wiod root folder will be parsed for extension data and
will be added to the IOSystem. The WIOD database offers the download of
the environmental extensions as zip files. These can be read directly by
the parser. In case a zip file and a folder with the same name are
available, the data is read from the folder. If the zip files are
extracted into folder, the folders must have the same name as the
corresponding zip file (without the 'zip' extension).
If a WIOD SEA file is present (at the root of path or in a folder named
'SEA' - only one file!), the labor data of this file gets included in the
factor_input extension (calculated for the the three skill levels
available). The monetary data in this file is not added because it is only
given in national currency.
Since the "World Input-Output Tables in previous years' prices" are still
under construction (20141129), no parser for these is provided.
Some of the meta-parameter of the IOSystem are set automatically based on
the values given in the first four cells and the name of the WIOD data
files (base year, version, price, iosystem).
These can be overwritten afterwards if needed.
Parameters
----------
path : string or pathlib.Path
Path to the folder with the WIOD source files. In case that the path
to a specific file is given, only this will be parsed irrespective of
the values given in year.
year : int or str
Which year in the path should be parsed. The years can be given with
four or two digits (eg [2012 or 12]). If the given path contains a
specific file, the value of year will not be used (but inferred from
the meta data)- otherwise it must be given For the monetary data the
parser searches for files with 'wiot - two digit year'.
names : string or tuple, optional
WIOD provides three different sector/final demand categories naming
schemes. These can can be specified for the IOSystem. Pass:
1) 'isic': ISIC rev 3 Codes - available for interindustry flows
and final demand rows.
2) 'full': Full names - available for final demand rows and
final demand columns (categories) and interindustry flows.
3) 'c_codes' : WIOD specific sector numbers, available for final
demand rows and columns (categories) and interindustry flows.
Internally, the parser relies on 1) for the interindustry flows and 3)
for the final demand categories. This is the default and will also be
used if just 'isic' gets passed ('c_codes' also replace 'isic' if this
was passed for final demand categories). To specify different finial
consumption category names, pass a tuple with (sectors/interindustry
classification, fd categories), eg ('isic', 'full'). Names are case
insensitive and passing the first character is sufficient.
TODO popvector : TO BE IMPLEMENTED (consistent with EXIOBASE)
Yields
-------
IOSystem
Raises
------
ParserError
If the WIOD source file are not complete or inconsistent
"""
# Path manipulation, should work cross platform
path = os.path.abspath(os.path.normpath(str(path)))
# wiot start and end
wiot_ext = '.xlsx'
wiot_start = 'wiot'
# determine which wiod file to be parsed
if not os.path.isdir(path):
# 1. case - one file specified in path
if os.path.isfile(path):
wiot_file = path
else:
# just in case the ending was forgotten
wiot_file = path + wiot_ext
else:
# 2. case: directory given-build wiot_file with the value given in year
if not year:
raise ParserError('No year specified '
'(either specify a specific file '
'or a path and year)')
year_two_digit = str(year)[-2:]
wiot_file_list = [fl for fl in os.listdir(path)
if (fl[:6] == wiot_start + year_two_digit and
os.path.splitext(fl)[1] == wiot_ext)]
if len(wiot_file_list) != 1:
raise ParserError('Multiple files for a given year or file not '
'found (specify a specific file in paramters)')
wiot_file = os.path.join(path, wiot_file_list[0])
wiot_file = wiot_file
root_path = os.path.split(wiot_file)[0]
if not os.path.exists(wiot_file):
raise ParserError('WIOD file not found in the specified folder.')
meta_rec = MRIOMetaData(location=root_path)
# wiot file structure
wiot_meta = {
'col': 0, # column of the meta information
'year': 0, # rest: rows with the data
'iosystem': 2,
'unit': 3,
'end_row': 4,
}
wiot_header = {
# the header indexes are the same for rows after removing the first
# two lines (wiot_empty_top_rows)
'code': 0,
'sector_names': 1,
'region': 2,
'c_code': 3,
}
wiot_empty_top_rows = [0, 1]
wiot_marks = { # special marks
'last_interindsec': 'c35', # last sector of the interindustry
'tot_facinp': ['r60', 'r69'], # useless totals to remove from factinp
'total_column': [-1], # the total column in the whole data
}
wiot_sheet = 0 # assume the first one is the one with the data.
# Wiod has an unfortunate file structure with overlapping metadata and
# header. In order to deal with that first the full file is read.
wiot_data = pd.read_excel(wiot_file,
sheet_name=wiot_sheet,
header=None)
meta_rec._add_fileio('WIOD data parsed from {}'.format(wiot_file))
# get meta data
wiot_year = wiot_data.iloc[wiot_meta['year'], wiot_meta['col']][-4:]
wiot_iosystem = wiot_data.iloc[
wiot_meta['iosystem'], wiot_meta['col']].rstrip(')').lstrip('(')
meta_rec.change_meta('system', wiot_iosystem)
_wiot_unit = wiot_data.iloc[
wiot_meta['unit'], wiot_meta['col']].rstrip(')').lstrip('(')
# remove meta data, empty rows, total column
wiot_data.iloc[0:wiot_meta['end_row'], wiot_meta['col']] = np.NaN
wiot_data.drop(wiot_empty_top_rows,
axis=0, inplace=True)
wiot_data.drop(wiot_data.columns[wiot_marks['total_column']],
axis=1, inplace=True)
# at this stage row and column header should have the same size but
# the index starts now at two - replace/reset to row numbers
wiot_data.index = range(wiot_data.shape[0])
# Early years in WIOD tables have a different name for Romania:
# 'ROM' which should be 'ROU'. The latter is also consistent with
# the environmental extensions names.
wiot_data.iloc[wiot_header['region'], :] = wiot_data.iloc[
wiot_header['region'], :].str.replace('ROM', 'ROU')
wiot_data.iloc[:, wiot_header['region']] = wiot_data.iloc[
:, wiot_header['region']].str.replace('ROM', 'ROU')
# get the end of the interindustry matrix
_lastZcol = wiot_data[
wiot_data.iloc[
:, wiot_header['c_code']] == wiot_marks['last_interindsec']
].index[-1]
_lastZrow = wiot_data[
wiot_data[wiot_header['c_code']] == wiot_marks['last_interindsec']
].index[-1]
if _lastZcol != _lastZrow:
raise ParserError(
'Interindustry matrix not symetric in the WIOD source file')
else:
Zshape = (_lastZrow, _lastZcol)
# separate factor input extension and remove
# totals in the first and last row
facinp = wiot_data.iloc[Zshape[0]+1:, :]
facinp = facinp.drop(
facinp[facinp[wiot_header['c_code']].isin(
wiot_marks['tot_facinp'])].index, axis=0
)
Z = wiot_data.iloc[:Zshape[0]+1, :Zshape[1]+1].copy()
Y = wiot_data.iloc[:Zshape[0]+1, Zshape[1]+1:].copy()
F_fac = facinp.iloc[:, :Zshape[1]+1].copy()
FY_fac = facinp.iloc[:, Zshape[1]+1:].copy()
index_wiot_headers = [nr for nr in wiot_header.values()]
# Save lookup of sectors and codes - to be used at the end of the parser
# Assuming USA is present in every WIOT year
wiot_sector_lookup = wiot_data[
wiot_data[wiot_header['region']] == 'USA'].iloc[
:, 0:max(index_wiot_headers)+1].applymap(str)
wiot_sector_lookup.columns = [
entry[1] for entry in sorted(
zip(wiot_header.values(), wiot_header.keys()))]
wiot_sector_lookup.set_index('code', inplace=True, drop=False)
_Y = Y.T.iloc[:, [
wiot_header['code'], # Included to be consistent with wiot_header
wiot_header['sector_names'],
wiot_header['region'],
wiot_header['c_code'],
]]
wiot_fd_lookup = _Y[_Y.iloc[
:, wiot_header['region']] == 'USA'].applymap(str)
wiot_fd_lookup.columns = [
entry[1] for entry in
sorted(zip(wiot_header.values(), wiot_header.keys()))]
wiot_fd_lookup.set_index('c_code', inplace=True, drop=False)
wiot_fd_lookup.index.name = 'code'
# set the index/columns, work with code b/c these are also used in the
# extensions
Z[wiot_header['code']] = Z[wiot_header['code']].astype(str)
Z.set_index([wiot_header['region'],
wiot_header['code']], inplace=True, drop=False)
Z = Z.iloc[max(index_wiot_headers)+1:, max(index_wiot_headers)+1:]
Z.index.names = IDX_NAMES['Z_col']
Z.columns = Z.index
indexY_col_head = Y.iloc[[wiot_header['region'],
wiot_header['c_code']], :]
Y.columns = pd.MultiIndex.from_arrays(indexY_col_head.values,
names=IDX_NAMES['Y_col2'])
Y = Y.iloc[max(index_wiot_headers)+1:, :]
Y.index = Z.index
F_fac.set_index([wiot_header['sector_names']],
inplace=True, drop=False) # c_code missing, use names
F_fac.index.names = ['inputtype']
F_fac = F_fac.iloc[:, max(index_wiot_headers)+1:]
F_fac.columns = Z.columns
FY_fac.columns = Y.columns
FY_fac.index = F_fac.index
# convert from object to float (was object because mixed float,str)
Z = Z.astype('float')
Y = Y.astype('float')
F_fac = F_fac.astype('float')
FY_fac = FY_fac.astype('float')
# save the units
Z_unit = pd.DataFrame(Z.iloc[:, 0])
Z_unit.columns = ['unit']
Z_unit['unit'] = _wiot_unit
F_fac_unit = pd.DataFrame(F_fac.iloc[:, 0])
F_fac_unit.columns = ['unit']
F_fac_unit['unit'] = _wiot_unit
ll_countries = list(Z.index.get_level_values('region').unique())
# Finalize the factor inputs extension
ext = dict()
ext['factor_inputs'] = {'F': F_fac,
'FY': FY_fac,
'year': wiot_year,
'iosystem': wiot_iosystem,
'unit': F_fac_unit,
'name': 'factor input',
}
# SEA extension
_F_sea_data, _F_sea_unit = __get_WIOD_SEA_extension(
root_path=root_path, year=year)
if _F_sea_data is not None:
# None if no SEA file present
_FY_sea = pd.DataFrame(index=_F_sea_data.index,
columns=FY_fac.columns, data=0)
_FY_sea = _FY_sea.astype('float')
ext['SEA'] = {'F': _F_sea_data,
'FY': _FY_sea,
'year': wiot_year,
'iosystem': wiot_iosystem,
'unit': _F_sea_unit,
'name': 'SEA',
}
meta_rec._add_fileio('SEA file extension parsed from {}'.format(
root_path))
# Environmental extensions, names follow the name given
# in the meta sheet (except for CO2 to get a better description).
# Units are hardcoded if no consistent place to read them
# within the files (for all extensions in upper case).
# The units names must exactly match!
# Start must identify exactly one folder or a zip file to
# read the extension.
# Within the folder, the routine looks for xls files
# starting with the country code.
dl_envext_para = {
'AIR': {'name': 'Air Emission Accounts',
'start': 'AIR_',
'ext': '.xls',
'unit': {
'CO2': 'Gg',
'CH4': 't',
'N2O': 't',
'NOx': 't',
'SOx': 't',
'CO': 't',
'NMVOC': 't',
'NH3': 't',
},
},
'CO2': {'name': 'CO2 emissions - per source',
'start': 'CO2_',
'ext': '.xls',
'unit': {
'all': 'Gg'}
},
'EM': {'name': 'Emission relevant energy use',
'start': 'EM_',
'ext': '.xls',
'unit': {
'all': 'TJ'}
},
'EU': {'name': 'Gross energy use',
'start': 'EU_',
'ext': '.xls',
'unit': {
'all': 'TJ'}
},
'lan': {'name': 'land use',
'start': 'lan_',
'ext': '.xls',
'unit': {
'all': None}
},
'mat': {'name': 'material use',
'start': 'mat_',
'ext': '.xls',
'unit': {
'all': None}
},
'wat': {'name': 'water use',
'start': 'wat_',
'ext': '.xls',
'unit': {
'all': None}
},
}
_FY_template = pd.DataFrame(columns=FY_fac.columns)
_ss_FY_pressure_column = 'c37'
for ik_ext in dl_envext_para:
_dl_ex = __get_WIOD_env_extension(root_path=root_path,
year=year,
ll_co=ll_countries,
para=dl_envext_para[ik_ext])
if _dl_ex is not None:
# None if extension not available
_FY = _dl_ex['FY']
_FY.columns = pd.MultiIndex.from_product([
_FY.columns, [_ss_FY_pressure_column]])
_FY = _FY_template.append(_FY)
_FY.fillna(0, inplace=True)
_FY.index.names = _dl_ex['F'].index.names
_FY.columns.names = _FY_template.columns.names
_FY = _FY[ll_countries]
_FY = _FY.astype('float')
ext[ik_ext] = {
'F': _dl_ex['F'],
'FY': _FY,
'year': wiot_year,
'iosystem': wiot_iosystem,
'unit': _dl_ex['unit'],
'name': dl_envext_para[ik_ext]['name'],
}
meta_rec._add_fileio('Extension {} parsed from {}'.format(
ik_ext, root_path))
# Build system
wiod = IOSystem(Z=Z, Y=Y,
unit=Z_unit,
meta=meta_rec,
**ext)
# Replace sector/final demand category names
if type(names) is str:
names = (names, names)
ll_names = [w[0].lower() for w in names]
if ll_names[0] == 'c':
dd_sec_rename = wiot_sector_lookup.c_code.to_dict()
elif ll_names[0] == 'i':
dd_sec_rename = wiot_sector_lookup.code.to_dict()
elif ll_names[0] == 'f':
dd_sec_rename = wiot_sector_lookup.sector_names.to_dict()
else:
dd_sec_rename = wiot_sector_lookup.code.to_dict()
warnings.warn('Parameter for names not understood - '
'used ISIC codes as sector names')
if ll_names[1] == 'c':
dd_fd_rename = wiot_fd_lookup.c_code.to_dict()
elif ll_names[1] == 'i':
dd_fd_rename = wiot_fd_lookup.c_code.to_dict()
elif ll_names[1] == 'f':
dd_fd_rename = wiot_fd_lookup.sector_names.to_dict()
else:
warnings.warn('Parameter for names not understood - '
'used c_codes as final demand category names')
wiod.Z.rename(columns=dd_sec_rename, index=dd_sec_rename, inplace=True)
wiod.Y.rename(columns=dd_fd_rename, index=dd_sec_rename, inplace=True)
for ext in wiod.get_extensions(data=True):
ext.F.rename(columns=dd_sec_rename, inplace=True)
ext.FY.rename(columns=dd_fd_rename, inplace=True)
return wiod | Parse the wiod source files for the IOSystem
WIOD provides the MRIO tables in excel - format (xlsx) at
http://www.wiod.org/new_site/database/wiots.htm (release November 2013).
To use WIOD in pymrio these (for the year of analysis) must be downloaded.
The interindustry matrix of these files gets parsed in IOSystem.Z, the
additional information is included as factor_input extension (value
added,...)
The folder with these xslx must than be passed to the WIOD parsing
function. This folder may contain folders with the extension data. Every
folder within the wiod root folder will be parsed for extension data and
will be added to the IOSystem. The WIOD database offers the download of
the environmental extensions as zip files. These can be read directly by
the parser. In case a zip file and a folder with the same name are
available, the data is read from the folder. If the zip files are
extracted into folder, the folders must have the same name as the
corresponding zip file (without the 'zip' extension).
If a WIOD SEA file is present (at the root of path or in a folder named
'SEA' - only one file!), the labor data of this file gets included in the
factor_input extension (calculated for the the three skill levels
available). The monetary data in this file is not added because it is only
given in national currency.
Since the "World Input-Output Tables in previous years' prices" are still
under construction (20141129), no parser for these is provided.
Some of the meta-parameter of the IOSystem are set automatically based on
the values given in the first four cells and the name of the WIOD data
files (base year, version, price, iosystem).
These can be overwritten afterwards if needed.
Parameters
----------
path : string or pathlib.Path
Path to the folder with the WIOD source files. In case that the path
to a specific file is given, only this will be parsed irrespective of
the values given in year.
year : int or str
Which year in the path should be parsed. The years can be given with
four or two digits (eg [2012 or 12]). If the given path contains a
specific file, the value of year will not be used (but inferred from
the meta data)- otherwise it must be given For the monetary data the
parser searches for files with 'wiot - two digit year'.
names : string or tuple, optional
WIOD provides three different sector/final demand categories naming
schemes. These can can be specified for the IOSystem. Pass:
1) 'isic': ISIC rev 3 Codes - available for interindustry flows
and final demand rows.
2) 'full': Full names - available for final demand rows and
final demand columns (categories) and interindustry flows.
3) 'c_codes' : WIOD specific sector numbers, available for final
demand rows and columns (categories) and interindustry flows.
Internally, the parser relies on 1) for the interindustry flows and 3)
for the final demand categories. This is the default and will also be
used if just 'isic' gets passed ('c_codes' also replace 'isic' if this
was passed for final demand categories). To specify different finial
consumption category names, pass a tuple with (sectors/interindustry
classification, fd categories), eg ('isic', 'full'). Names are case
insensitive and passing the first character is sufficient.
TODO popvector : TO BE IMPLEMENTED (consistent with EXIOBASE)
Yields
-------
IOSystem
Raises
------
ParserError
If the WIOD source file are not complete or inconsistent | https://github.com/konstantinstadler/pymrio/blob/d764aa0dd2150200e867a9713a98ddae203e12d4/pymrio/tools/ioparser.py#L743-L1173 |
konstantinstadler/pymrio | pymrio/tools/ioparser.py | __get_WIOD_env_extension | def __get_WIOD_env_extension(root_path, year, ll_co, para):
""" Parses the wiod environmental extension
Extension can either be given as original .zip files or as extracted
data in a folder with the same name as the corresponding zip file (with-
out the extension).
This function is based on the structure of the extensions from _may12.
Note
----
The function deletes 'secQ' which is not present in the economic tables.
Parameters
----------
root_path : string
Path to the WIOD data or the path with the
extension data folder or zip file.
year : str or int
Year to return for the extension = valid sheetname for the xls file.
ll_co : list like
List of countries in WIOD - used for finding and matching
extension data in the given folder.
para : dict
Defining the parameters for reading the extension.
Returns
-------
dict with keys
F : pd.DataFrame with index 'stressor' and columns 'region', 'sector'
FY : pd.Dataframe with index 'stressor' and column 'region'
This data is for household stressors - must be applied to the right
final demand column afterwards.
unit : pd.DataFrame with index 'stressor' and column 'unit'
"""
ll_root_content = [ff for ff in os.listdir(root_path) if
ff.startswith(para['start'])]
if len(ll_root_content) < 1:
warnings.warn(
'Extension data for {} not found - '
'Extension not included'.format(para['start']), ParserWarning)
return None
elif len(ll_root_content) > 1:
raise ParserError(
'Several raw data for extension'
'{} available - clean extension folder.'.format(para['start']))
pf_env = os.path.join(root_path, ll_root_content[0])
if pf_env.endswith('.zip'):
rf_zip = zipfile.ZipFile(pf_env)
ll_env_content = [ff for ff in rf_zip.namelist() if
ff.endswith(para['ext'])]
else:
ll_env_content = [ff for ff in os.listdir(pf_env) if
ff.endswith(para['ext'])]
dl_env = dict()
dl_env_hh = dict()
for co in ll_co:
ll_pff_read = [ff for ff in ll_env_content if
ff.endswith(para['ext']) and
(ff.startswith(co.upper()) or
ff.startswith(co.lower()))]
if len(ll_pff_read) < 1:
raise ParserError('Country data not complete for Extension '
'{} - missing {}.'.format(para['start'], co))
elif len(ll_pff_read) > 1:
raise ParserError('Multiple country data for Extension '
'{} - country {}.'.format(para['start'], co))
pff_read = ll_pff_read[0]
if pf_env.endswith('.zip'):
ff_excel = pd.ExcelFile(rf_zip.open(pff_read))
else:
ff_excel = pd.ExcelFile(os.path.join(pf_env, pff_read))
if str(year) in ff_excel.sheet_names:
df_env = ff_excel.parse(sheet_name=str(year),
index_col=None,
header=0
)
else:
warnings.warn('Extension {} does not include'
'data for the year {} - '
'Extension not included'.format(para['start'], year),
ParserWarning)
return None
if not df_env.index.is_numeric():
# upper case letter extensions gets parsed with multiindex, not
# quite sure why...
df_env.reset_index(inplace=True)
# unit can be taken from the first cell in the excel sheet
if df_env.columns[0] != 'level_0':
para['unit']['all'] = df_env.columns[0]
# two clean up cases - can be identified by lower/upper case extension
# description
if para['start'].islower():
pass
elif para['start'].isupper():
df_env = df_env.iloc[:, 1:]
else:
raise ParserError('Format of extension not given.')
df_env.dropna(axis=0, how='all', inplace=True)
df_env = df_env[df_env.iloc[:, 0] != 'total']
df_env = df_env[df_env.iloc[:, 0] != 'secTOT']
df_env = df_env[df_env.iloc[:, 0] != 'secQ']
df_env.iloc[:, 0].astype(str, inplace=True)
df_env.iloc[:, 0].replace(to_replace='sec',
value='',
regex=True,
inplace=True)
df_env.set_index([df_env.columns[0]], inplace=True)
df_env.index.names = ['sector']
df_env = df_env.T
ikc_hh = 'FC_HH'
dl_env_hh[co] = df_env[ikc_hh]
del df_env[ikc_hh]
dl_env[co] = df_env
df_F = pd.concat(dl_env, axis=1)[ll_co]
df_FY = pd.concat(dl_env_hh, axis=1)[ll_co]
df_F.fillna(0, inplace=True)
df_FY.fillna(0, inplace=True)
df_F.columns.names = IDX_NAMES['F_col']
df_F.index.names = IDX_NAMES['F_row_single']
df_FY.columns.names = IDX_NAMES['Y_col1']
df_FY.index.names = IDX_NAMES['F_row_single']
# build the unit df
df_unit = pd.DataFrame(index=df_F.index, columns=['unit'])
_ss_unit = para['unit'].get('all', 'undef')
for ikr in df_unit.index:
df_unit.ix[ikr, 'unit'] = para['unit'].get(ikr, _ss_unit)
df_unit.columns.names = ['unit']
df_unit.index.names = ['stressor']
if pf_env.endswith('.zip'):
rf_zip.close()
return {'F': df_F,
'FY': df_FY,
'unit': df_unit
} | python | def __get_WIOD_env_extension(root_path, year, ll_co, para):
""" Parses the wiod environmental extension
Extension can either be given as original .zip files or as extracted
data in a folder with the same name as the corresponding zip file (with-
out the extension).
This function is based on the structure of the extensions from _may12.
Note
----
The function deletes 'secQ' which is not present in the economic tables.
Parameters
----------
root_path : string
Path to the WIOD data or the path with the
extension data folder or zip file.
year : str or int
Year to return for the extension = valid sheetname for the xls file.
ll_co : list like
List of countries in WIOD - used for finding and matching
extension data in the given folder.
para : dict
Defining the parameters for reading the extension.
Returns
-------
dict with keys
F : pd.DataFrame with index 'stressor' and columns 'region', 'sector'
FY : pd.Dataframe with index 'stressor' and column 'region'
This data is for household stressors - must be applied to the right
final demand column afterwards.
unit : pd.DataFrame with index 'stressor' and column 'unit'
"""
ll_root_content = [ff for ff in os.listdir(root_path) if
ff.startswith(para['start'])]
if len(ll_root_content) < 1:
warnings.warn(
'Extension data for {} not found - '
'Extension not included'.format(para['start']), ParserWarning)
return None
elif len(ll_root_content) > 1:
raise ParserError(
'Several raw data for extension'
'{} available - clean extension folder.'.format(para['start']))
pf_env = os.path.join(root_path, ll_root_content[0])
if pf_env.endswith('.zip'):
rf_zip = zipfile.ZipFile(pf_env)
ll_env_content = [ff for ff in rf_zip.namelist() if
ff.endswith(para['ext'])]
else:
ll_env_content = [ff for ff in os.listdir(pf_env) if
ff.endswith(para['ext'])]
dl_env = dict()
dl_env_hh = dict()
for co in ll_co:
ll_pff_read = [ff for ff in ll_env_content if
ff.endswith(para['ext']) and
(ff.startswith(co.upper()) or
ff.startswith(co.lower()))]
if len(ll_pff_read) < 1:
raise ParserError('Country data not complete for Extension '
'{} - missing {}.'.format(para['start'], co))
elif len(ll_pff_read) > 1:
raise ParserError('Multiple country data for Extension '
'{} - country {}.'.format(para['start'], co))
pff_read = ll_pff_read[0]
if pf_env.endswith('.zip'):
ff_excel = pd.ExcelFile(rf_zip.open(pff_read))
else:
ff_excel = pd.ExcelFile(os.path.join(pf_env, pff_read))
if str(year) in ff_excel.sheet_names:
df_env = ff_excel.parse(sheet_name=str(year),
index_col=None,
header=0
)
else:
warnings.warn('Extension {} does not include'
'data for the year {} - '
'Extension not included'.format(para['start'], year),
ParserWarning)
return None
if not df_env.index.is_numeric():
# upper case letter extensions gets parsed with multiindex, not
# quite sure why...
df_env.reset_index(inplace=True)
# unit can be taken from the first cell in the excel sheet
if df_env.columns[0] != 'level_0':
para['unit']['all'] = df_env.columns[0]
# two clean up cases - can be identified by lower/upper case extension
# description
if para['start'].islower():
pass
elif para['start'].isupper():
df_env = df_env.iloc[:, 1:]
else:
raise ParserError('Format of extension not given.')
df_env.dropna(axis=0, how='all', inplace=True)
df_env = df_env[df_env.iloc[:, 0] != 'total']
df_env = df_env[df_env.iloc[:, 0] != 'secTOT']
df_env = df_env[df_env.iloc[:, 0] != 'secQ']
df_env.iloc[:, 0].astype(str, inplace=True)
df_env.iloc[:, 0].replace(to_replace='sec',
value='',
regex=True,
inplace=True)
df_env.set_index([df_env.columns[0]], inplace=True)
df_env.index.names = ['sector']
df_env = df_env.T
ikc_hh = 'FC_HH'
dl_env_hh[co] = df_env[ikc_hh]
del df_env[ikc_hh]
dl_env[co] = df_env
df_F = pd.concat(dl_env, axis=1)[ll_co]
df_FY = pd.concat(dl_env_hh, axis=1)[ll_co]
df_F.fillna(0, inplace=True)
df_FY.fillna(0, inplace=True)
df_F.columns.names = IDX_NAMES['F_col']
df_F.index.names = IDX_NAMES['F_row_single']
df_FY.columns.names = IDX_NAMES['Y_col1']
df_FY.index.names = IDX_NAMES['F_row_single']
# build the unit df
df_unit = pd.DataFrame(index=df_F.index, columns=['unit'])
_ss_unit = para['unit'].get('all', 'undef')
for ikr in df_unit.index:
df_unit.ix[ikr, 'unit'] = para['unit'].get(ikr, _ss_unit)
df_unit.columns.names = ['unit']
df_unit.index.names = ['stressor']
if pf_env.endswith('.zip'):
rf_zip.close()
return {'F': df_F,
'FY': df_FY,
'unit': df_unit
} | Parses the wiod environmental extension
Extension can either be given as original .zip files or as extracted
data in a folder with the same name as the corresponding zip file (with-
out the extension).
This function is based on the structure of the extensions from _may12.
Note
----
The function deletes 'secQ' which is not present in the economic tables.
Parameters
----------
root_path : string
Path to the WIOD data or the path with the
extension data folder or zip file.
year : str or int
Year to return for the extension = valid sheetname for the xls file.
ll_co : list like
List of countries in WIOD - used for finding and matching
extension data in the given folder.
para : dict
Defining the parameters for reading the extension.
Returns
-------
dict with keys
F : pd.DataFrame with index 'stressor' and columns 'region', 'sector'
FY : pd.Dataframe with index 'stressor' and column 'region'
This data is for household stressors - must be applied to the right
final demand column afterwards.
unit : pd.DataFrame with index 'stressor' and column 'unit' | https://github.com/konstantinstadler/pymrio/blob/d764aa0dd2150200e867a9713a98ddae203e12d4/pymrio/tools/ioparser.py#L1176-L1334 |
konstantinstadler/pymrio | pymrio/tools/ioparser.py | __get_WIOD_SEA_extension | def __get_WIOD_SEA_extension(root_path, year, data_sheet='DATA'):
""" Utility function to get the extension data from the SEA file in WIOD
This function is based on the structure in the WIOD_SEA_July14 file.
Missing values are set to zero.
The function works if the SEA file is either in path or in a subfolder
named 'SEA'.
Parameters
----------
root_path : string
Path to the WIOD data or the path with the SEA data.
year : str or int
Year to return for the extension
sea_data_sheet : string, optional
Worksheet with the SEA data in the excel file
Returns
-------
SEA data as extension for the WIOD MRIO
"""
sea_ext = '.xlsx'
sea_start = 'WIOD_SEA'
_SEA_folder = os.path.join(root_path, 'SEA')
if not os.path.exists(_SEA_folder):
_SEA_folder = root_path
sea_folder_content = [ff for ff in os.listdir(_SEA_folder)
if os.path.splitext(ff)[-1] == sea_ext and
ff[:8] == sea_start]
if sea_folder_content:
# read data
sea_file = os.path.join(_SEA_folder, sorted(sea_folder_content)[0])
df_sea = pd.read_excel(sea_file,
sheet_name=data_sheet,
header=0,
index_col=[0, 1, 2, 3])
# fix years
ic_sea = df_sea.columns.tolist()
ic_sea = [yystr.lstrip('_') for yystr in ic_sea]
df_sea.columns = ic_sea
try:
ds_sea = df_sea[str(year)]
except KeyError:
warnings.warn(
'SEA extension does not include data for the '
'year {} - SEA-Extension not included'.format(year),
ParserWarning)
return None, None
# get useful data (employment)
mt_sea = ['EMP', 'EMPE', 'H_EMP', 'H_EMPE']
ds_use_sea = pd.concat(
[ds_sea.xs(key=vari, level='Variable', drop_level=False)
for vari in mt_sea])
ds_use_sea.drop(labels='TOT', level='Code', inplace=True)
ds_use_sea.reset_index('Description', drop=True, inplace=True)
# RoW not included in SEA but needed to get it consistent for
# all countries. Just add a dummy with 0 for all accounts.
if 'RoW' not in ds_use_sea.index.get_level_values('Country'):
ds_RoW = ds_use_sea.xs('USA',
level='Country', drop_level=False)
ds_RoW.ix[:] = 0
df_RoW = ds_RoW.reset_index()
df_RoW['Country'] = 'RoW'
ds_use_sea = pd.concat(
[ds_use_sea.reset_index(), df_RoW]).set_index(
['Country', 'Code', 'Variable'])
ds_use_sea.fillna(value=0, inplace=True)
df_use_sea = ds_use_sea.unstack(level=['Country', 'Code'])[str(year)]
df_use_sea.index.names = IDX_NAMES['VA_row_single']
df_use_sea.columns.names = IDX_NAMES['F_col']
df_use_sea = df_use_sea.astype('float')
df_unit = pd.DataFrame(
data=[ # this data must be in the same order as mt_sea
'thousand persons',
'thousand persons',
'mill hours',
'mill hours',
],
columns=['unit'],
index=df_use_sea.index)
return df_use_sea, df_unit
else:
warnings.warn(
'SEA extension raw data file not found - '
'SEA-Extension not included', ParserWarning)
return None, None | python | def __get_WIOD_SEA_extension(root_path, year, data_sheet='DATA'):
""" Utility function to get the extension data from the SEA file in WIOD
This function is based on the structure in the WIOD_SEA_July14 file.
Missing values are set to zero.
The function works if the SEA file is either in path or in a subfolder
named 'SEA'.
Parameters
----------
root_path : string
Path to the WIOD data or the path with the SEA data.
year : str or int
Year to return for the extension
sea_data_sheet : string, optional
Worksheet with the SEA data in the excel file
Returns
-------
SEA data as extension for the WIOD MRIO
"""
sea_ext = '.xlsx'
sea_start = 'WIOD_SEA'
_SEA_folder = os.path.join(root_path, 'SEA')
if not os.path.exists(_SEA_folder):
_SEA_folder = root_path
sea_folder_content = [ff for ff in os.listdir(_SEA_folder)
if os.path.splitext(ff)[-1] == sea_ext and
ff[:8] == sea_start]
if sea_folder_content:
# read data
sea_file = os.path.join(_SEA_folder, sorted(sea_folder_content)[0])
df_sea = pd.read_excel(sea_file,
sheet_name=data_sheet,
header=0,
index_col=[0, 1, 2, 3])
# fix years
ic_sea = df_sea.columns.tolist()
ic_sea = [yystr.lstrip('_') for yystr in ic_sea]
df_sea.columns = ic_sea
try:
ds_sea = df_sea[str(year)]
except KeyError:
warnings.warn(
'SEA extension does not include data for the '
'year {} - SEA-Extension not included'.format(year),
ParserWarning)
return None, None
# get useful data (employment)
mt_sea = ['EMP', 'EMPE', 'H_EMP', 'H_EMPE']
ds_use_sea = pd.concat(
[ds_sea.xs(key=vari, level='Variable', drop_level=False)
for vari in mt_sea])
ds_use_sea.drop(labels='TOT', level='Code', inplace=True)
ds_use_sea.reset_index('Description', drop=True, inplace=True)
# RoW not included in SEA but needed to get it consistent for
# all countries. Just add a dummy with 0 for all accounts.
if 'RoW' not in ds_use_sea.index.get_level_values('Country'):
ds_RoW = ds_use_sea.xs('USA',
level='Country', drop_level=False)
ds_RoW.ix[:] = 0
df_RoW = ds_RoW.reset_index()
df_RoW['Country'] = 'RoW'
ds_use_sea = pd.concat(
[ds_use_sea.reset_index(), df_RoW]).set_index(
['Country', 'Code', 'Variable'])
ds_use_sea.fillna(value=0, inplace=True)
df_use_sea = ds_use_sea.unstack(level=['Country', 'Code'])[str(year)]
df_use_sea.index.names = IDX_NAMES['VA_row_single']
df_use_sea.columns.names = IDX_NAMES['F_col']
df_use_sea = df_use_sea.astype('float')
df_unit = pd.DataFrame(
data=[ # this data must be in the same order as mt_sea
'thousand persons',
'thousand persons',
'mill hours',
'mill hours',
],
columns=['unit'],
index=df_use_sea.index)
return df_use_sea, df_unit
else:
warnings.warn(
'SEA extension raw data file not found - '
'SEA-Extension not included', ParserWarning)
return None, None | Utility function to get the extension data from the SEA file in WIOD
This function is based on the structure in the WIOD_SEA_July14 file.
Missing values are set to zero.
The function works if the SEA file is either in path or in a subfolder
named 'SEA'.
Parameters
----------
root_path : string
Path to the WIOD data or the path with the SEA data.
year : str or int
Year to return for the extension
sea_data_sheet : string, optional
Worksheet with the SEA data in the excel file
Returns
-------
SEA data as extension for the WIOD MRIO | https://github.com/konstantinstadler/pymrio/blob/d764aa0dd2150200e867a9713a98ddae203e12d4/pymrio/tools/ioparser.py#L1337-L1434 |
konstantinstadler/pymrio | pymrio/tools/ioparser.py | parse_eora26 | def parse_eora26(path, year=None, price='bp', country_names='eora'):
""" Parse the Eora26 database
Note
----
This parser deletes the statistical disrecpancy columns from
the parsed Eora system (reports the amount of loss in the
meta recors).
Eora does not provide any information on the unit of the
monetary values. Based on personal communication the unit
is set to Mill USD manually.
Parameters
----------
path : string or pathlib.Path
Path to the eora raw storage folder or a specific eora zip file to
parse. There are several options to specify the data for parsing:
1) Pass the name of eora zip file. In this case the parameters 'year'
and 'price' will not be used
2) Pass a folder which eiter contains eora zip files or unpacked eora
data In that case, a year must be given
3) Pass a folder which contains subfolders in the format 'YYYY', e.g.
'1998' This subfolder can either contain an Eora zip file or an
unpacked Eora system
year : int or str
4 digit year spec. This will not be used if a zip file
is specified in 'path'
price : str, optional
'bp' or 'pp'
country_names: str, optional
Which country names to use:
'eora' = Eora flavoured ISO 3 varian
'full' = Full country names as provided by Eora
Passing the first letter suffice.
"""
path = os.path.abspath(os.path.normpath(str(path)))
if country_names[0].lower() == 'e':
country_names = 'eora'
elif country_names[0].lower() == 'f':
country_names = 'full'
else:
raise ParserError('Parameter country_names must be eora or full')
row_name = 'ROW'
eora_zip_ext = '.zip'
is_zip = False
# determine which eora file to be parsed
if os.path.splitext(path)[1] == eora_zip_ext:
# case direct pass of eora zipfile
year = re.search(r'\d\d\d\d',
os.path.basename(path)).group(0)
price = re.search(r'bp|pp',
os.path.basename(path)).group(0)
eora_loc = path
root_path = os.path.split(path)[0]
is_zip = True
else:
root_path = path
if str(year) in os.listdir(path):
path = os.path.join(path, str(year))
eora_file_list = [fl for fl in os.listdir(path)
if os.path.splitext(fl)[1] == eora_zip_ext and
str(year) in fl and
str(price) in fl
]
if len(eora_file_list) > 1:
raise ParserError('Multiple files for a given year '
'found (specify a specific file in paramters)')
elif len(eora_file_list) == 1:
eora_loc = os.path.join(path, eora_file_list[0])
is_zip = True
else:
# Just a path was given, no zip file found,
# continue with only the path information - assumed an
# unpacked zip file
eora_loc = path
is_zip = False
meta_rec = MRIOMetaData(location=root_path)
# Eora file specs
eora_sep = '\t'
ZY_col = namedtuple('ZY', 'full eora system name')(0, 1, 2, 3)
eora_files = {
'Z': 'Eora26_{year}_{price}_T.txt'.format(
year=str(year), price=price),
'Q': 'Eora26_{year}_{price}_Q.txt'.format(
year=str(year), price=price),
'QY': 'Eora26_{year}_{price}_QY.txt'.format(
year=str(year), price=price),
'VA': 'Eora26_{year}_{price}_VA.txt'.format(
year=str(year), price=price),
'Y': 'Eora26_{year}_{price}_FD.txt'.format(
year=str(year), price=price),
'labels_Z': 'labels_T.txt',
'labels_Y': 'labels_FD.txt',
'labels_Q': 'labels_Q.txt',
'labels_VA': 'labels_VA.txt',
}
header = namedtuple('header', 'index columns index_names, column_names')
eora_header_spec = {
'Z': header(index='labels_Z',
columns='labels_Z',
index_names=IDX_NAMES['Z_row'],
column_names=IDX_NAMES['Z_col'],
),
'Q': header(index='labels_Q',
columns='labels_Z',
index_names=IDX_NAMES['F_row_src'],
column_names=IDX_NAMES['F_col']),
'QY': header(index='labels_Q',
columns='labels_Y',
index_names=IDX_NAMES['F_row_src'],
column_names=IDX_NAMES['Y_col2'],
),
'VA': header(index='labels_VA',
columns='labels_Z',
index_names=IDX_NAMES['VA_row_unit_cat'],
column_names=IDX_NAMES['F_col']
),
'Y': header(index='labels_Z',
columns='labels_Y',
index_names=IDX_NAMES['Y_row'],
column_names=IDX_NAMES['Y_col2'],
),
}
if is_zip:
zip_file = zipfile.ZipFile(eora_loc)
eora_data = {
key: pd.read_table(
zip_file.open(filename),
sep=eora_sep,
header=None
) for
key, filename in eora_files.items()}
zip_file.close()
else:
eora_data = {
key: pd.read_table(
os.path.join(eora_loc, filename),
sep=eora_sep,
header=None
) for
key, filename in eora_files.items()}
meta_rec._add_fileio(
'Eora26 for {year}-{price} data parsed from {loc}'.format(
year=year, price=price, loc=eora_loc))
eora_data['labels_Z'] = eora_data[
'labels_Z'].loc[:, [getattr(ZY_col, country_names), ZY_col.name]]
eora_data['labels_Y'] = eora_data[
'labels_Y'].loc[:, [getattr(ZY_col, country_names), ZY_col.name]]
eora_data['labels_VA'] = eora_data[
'labels_VA'].iloc[:, :len(eora_header_spec['VA'].column_names)]
labQ = eora_data[
'labels_Q'].iloc[:, :len(eora_header_spec['Q'].column_names)]
labQ.columns = IDX_NAMES['F_row_src']
Q_unit = labQ['stressor'].str.extract(r'\((.*)\)', expand=False)
Q_unit.columns = IDX_NAMES['unit']
labQ['stressor'] = labQ['stressor'].str.replace(r'\s\((.*)\)', '')
eora_data['labels_Q'] = labQ
for key in eora_header_spec.keys():
eora_data[key].columns = (
eora_data[eora_header_spec[key].columns].set_index(list(
eora_data[eora_header_spec[key].columns])).index)
eora_data[key].columns.names = eora_header_spec[key].column_names
eora_data[key].index = (
eora_data[eora_header_spec[key].index].set_index(list(
eora_data[eora_header_spec[key].index])).index)
eora_data[key].index.names = eora_header_spec[key].index_names
try:
meta_rec._add_modify(
'Remove Rest of the World ({name}) '
'row from {table} - loosing {amount}'.format(
name=row_name,
table=key,
amount=eora_data[key].loc[:, row_name].sum().values[0]))
eora_data[key].drop(row_name, axis=1, inplace=True)
except KeyError:
pass
try:
meta_rec._add_modify(
'Remove Rest of the World ({name}) column '
'from {table} - loosing {amount}'.format(
name=row_name,
table=key,
amount=eora_data[key].loc[row_name, :].sum().values[0]))
eora_data[key].drop(row_name, axis=0, inplace=True)
except KeyError:
pass
Q_unit.index = eora_data['Q'].index
meta_rec.note('Set Eora moneatry units to Mill USD manually')
Z_unit = pd.DataFrame(data=['Mill USD'] * len(eora_data['Z'].index),
index=eora_data['Z'].index,
columns=['unit'])
VA_unit = pd.DataFrame(data=['Mill USD'] * len(eora_data['VA'].index),
index=eora_data['VA'].index,
columns=['unit'])
eora = IOSystem(
Z=eora_data['Z'],
Y=eora_data['Y'],
unit=Z_unit,
Q={
'name': 'Q',
'unit': Q_unit,
'F': eora_data['Q'],
'FY': eora_data['QY']
},
VA={
'name': 'VA',
'F': eora_data['VA'],
'unit': VA_unit,
},
meta=meta_rec)
return eora | python | def parse_eora26(path, year=None, price='bp', country_names='eora'):
""" Parse the Eora26 database
Note
----
This parser deletes the statistical disrecpancy columns from
the parsed Eora system (reports the amount of loss in the
meta recors).
Eora does not provide any information on the unit of the
monetary values. Based on personal communication the unit
is set to Mill USD manually.
Parameters
----------
path : string or pathlib.Path
Path to the eora raw storage folder or a specific eora zip file to
parse. There are several options to specify the data for parsing:
1) Pass the name of eora zip file. In this case the parameters 'year'
and 'price' will not be used
2) Pass a folder which eiter contains eora zip files or unpacked eora
data In that case, a year must be given
3) Pass a folder which contains subfolders in the format 'YYYY', e.g.
'1998' This subfolder can either contain an Eora zip file or an
unpacked Eora system
year : int or str
4 digit year spec. This will not be used if a zip file
is specified in 'path'
price : str, optional
'bp' or 'pp'
country_names: str, optional
Which country names to use:
'eora' = Eora flavoured ISO 3 varian
'full' = Full country names as provided by Eora
Passing the first letter suffice.
"""
path = os.path.abspath(os.path.normpath(str(path)))
if country_names[0].lower() == 'e':
country_names = 'eora'
elif country_names[0].lower() == 'f':
country_names = 'full'
else:
raise ParserError('Parameter country_names must be eora or full')
row_name = 'ROW'
eora_zip_ext = '.zip'
is_zip = False
# determine which eora file to be parsed
if os.path.splitext(path)[1] == eora_zip_ext:
# case direct pass of eora zipfile
year = re.search(r'\d\d\d\d',
os.path.basename(path)).group(0)
price = re.search(r'bp|pp',
os.path.basename(path)).group(0)
eora_loc = path
root_path = os.path.split(path)[0]
is_zip = True
else:
root_path = path
if str(year) in os.listdir(path):
path = os.path.join(path, str(year))
eora_file_list = [fl for fl in os.listdir(path)
if os.path.splitext(fl)[1] == eora_zip_ext and
str(year) in fl and
str(price) in fl
]
if len(eora_file_list) > 1:
raise ParserError('Multiple files for a given year '
'found (specify a specific file in paramters)')
elif len(eora_file_list) == 1:
eora_loc = os.path.join(path, eora_file_list[0])
is_zip = True
else:
# Just a path was given, no zip file found,
# continue with only the path information - assumed an
# unpacked zip file
eora_loc = path
is_zip = False
meta_rec = MRIOMetaData(location=root_path)
# Eora file specs
eora_sep = '\t'
ZY_col = namedtuple('ZY', 'full eora system name')(0, 1, 2, 3)
eora_files = {
'Z': 'Eora26_{year}_{price}_T.txt'.format(
year=str(year), price=price),
'Q': 'Eora26_{year}_{price}_Q.txt'.format(
year=str(year), price=price),
'QY': 'Eora26_{year}_{price}_QY.txt'.format(
year=str(year), price=price),
'VA': 'Eora26_{year}_{price}_VA.txt'.format(
year=str(year), price=price),
'Y': 'Eora26_{year}_{price}_FD.txt'.format(
year=str(year), price=price),
'labels_Z': 'labels_T.txt',
'labels_Y': 'labels_FD.txt',
'labels_Q': 'labels_Q.txt',
'labels_VA': 'labels_VA.txt',
}
header = namedtuple('header', 'index columns index_names, column_names')
eora_header_spec = {
'Z': header(index='labels_Z',
columns='labels_Z',
index_names=IDX_NAMES['Z_row'],
column_names=IDX_NAMES['Z_col'],
),
'Q': header(index='labels_Q',
columns='labels_Z',
index_names=IDX_NAMES['F_row_src'],
column_names=IDX_NAMES['F_col']),
'QY': header(index='labels_Q',
columns='labels_Y',
index_names=IDX_NAMES['F_row_src'],
column_names=IDX_NAMES['Y_col2'],
),
'VA': header(index='labels_VA',
columns='labels_Z',
index_names=IDX_NAMES['VA_row_unit_cat'],
column_names=IDX_NAMES['F_col']
),
'Y': header(index='labels_Z',
columns='labels_Y',
index_names=IDX_NAMES['Y_row'],
column_names=IDX_NAMES['Y_col2'],
),
}
if is_zip:
zip_file = zipfile.ZipFile(eora_loc)
eora_data = {
key: pd.read_table(
zip_file.open(filename),
sep=eora_sep,
header=None
) for
key, filename in eora_files.items()}
zip_file.close()
else:
eora_data = {
key: pd.read_table(
os.path.join(eora_loc, filename),
sep=eora_sep,
header=None
) for
key, filename in eora_files.items()}
meta_rec._add_fileio(
'Eora26 for {year}-{price} data parsed from {loc}'.format(
year=year, price=price, loc=eora_loc))
eora_data['labels_Z'] = eora_data[
'labels_Z'].loc[:, [getattr(ZY_col, country_names), ZY_col.name]]
eora_data['labels_Y'] = eora_data[
'labels_Y'].loc[:, [getattr(ZY_col, country_names), ZY_col.name]]
eora_data['labels_VA'] = eora_data[
'labels_VA'].iloc[:, :len(eora_header_spec['VA'].column_names)]
labQ = eora_data[
'labels_Q'].iloc[:, :len(eora_header_spec['Q'].column_names)]
labQ.columns = IDX_NAMES['F_row_src']
Q_unit = labQ['stressor'].str.extract(r'\((.*)\)', expand=False)
Q_unit.columns = IDX_NAMES['unit']
labQ['stressor'] = labQ['stressor'].str.replace(r'\s\((.*)\)', '')
eora_data['labels_Q'] = labQ
for key in eora_header_spec.keys():
eora_data[key].columns = (
eora_data[eora_header_spec[key].columns].set_index(list(
eora_data[eora_header_spec[key].columns])).index)
eora_data[key].columns.names = eora_header_spec[key].column_names
eora_data[key].index = (
eora_data[eora_header_spec[key].index].set_index(list(
eora_data[eora_header_spec[key].index])).index)
eora_data[key].index.names = eora_header_spec[key].index_names
try:
meta_rec._add_modify(
'Remove Rest of the World ({name}) '
'row from {table} - loosing {amount}'.format(
name=row_name,
table=key,
amount=eora_data[key].loc[:, row_name].sum().values[0]))
eora_data[key].drop(row_name, axis=1, inplace=True)
except KeyError:
pass
try:
meta_rec._add_modify(
'Remove Rest of the World ({name}) column '
'from {table} - loosing {amount}'.format(
name=row_name,
table=key,
amount=eora_data[key].loc[row_name, :].sum().values[0]))
eora_data[key].drop(row_name, axis=0, inplace=True)
except KeyError:
pass
Q_unit.index = eora_data['Q'].index
meta_rec.note('Set Eora moneatry units to Mill USD manually')
Z_unit = pd.DataFrame(data=['Mill USD'] * len(eora_data['Z'].index),
index=eora_data['Z'].index,
columns=['unit'])
VA_unit = pd.DataFrame(data=['Mill USD'] * len(eora_data['VA'].index),
index=eora_data['VA'].index,
columns=['unit'])
eora = IOSystem(
Z=eora_data['Z'],
Y=eora_data['Y'],
unit=Z_unit,
Q={
'name': 'Q',
'unit': Q_unit,
'F': eora_data['Q'],
'FY': eora_data['QY']
},
VA={
'name': 'VA',
'F': eora_data['VA'],
'unit': VA_unit,
},
meta=meta_rec)
return eora | Parse the Eora26 database
Note
----
This parser deletes the statistical disrecpancy columns from
the parsed Eora system (reports the amount of loss in the
meta recors).
Eora does not provide any information on the unit of the
monetary values. Based on personal communication the unit
is set to Mill USD manually.
Parameters
----------
path : string or pathlib.Path
Path to the eora raw storage folder or a specific eora zip file to
parse. There are several options to specify the data for parsing:
1) Pass the name of eora zip file. In this case the parameters 'year'
and 'price' will not be used
2) Pass a folder which eiter contains eora zip files or unpacked eora
data In that case, a year must be given
3) Pass a folder which contains subfolders in the format 'YYYY', e.g.
'1998' This subfolder can either contain an Eora zip file or an
unpacked Eora system
year : int or str
4 digit year spec. This will not be used if a zip file
is specified in 'path'
price : str, optional
'bp' or 'pp'
country_names: str, optional
Which country names to use:
'eora' = Eora flavoured ISO 3 varian
'full' = Full country names as provided by Eora
Passing the first letter suffice. | https://github.com/konstantinstadler/pymrio/blob/d764aa0dd2150200e867a9713a98ddae203e12d4/pymrio/tools/ioparser.py#L1437-L1677 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.