repository_name
stringlengths 5
67
| func_path_in_repository
stringlengths 4
234
| func_name
stringlengths 0
314
| whole_func_string
stringlengths 52
3.87M
| language
stringclasses 6
values | func_code_string
stringlengths 52
3.87M
| func_documentation_string
stringlengths 1
47.2k
| func_code_url
stringlengths 85
339
|
---|---|---|---|---|---|---|---|
konstantinstadler/pymrio | pymrio/tools/iometadata.py | MRIOMetaData._add_history | def _add_history(self, entry_type, entry):
""" Generic method to add entry as entry_type to the history """
meta_string = "{time} - {etype} - {entry}".format(
time=self._time(),
etype=entry_type.upper(),
entry=entry)
self._content['history'].insert(0, meta_string)
self.logger(meta_string) | python | def _add_history(self, entry_type, entry):
""" Generic method to add entry as entry_type to the history """
meta_string = "{time} - {etype} - {entry}".format(
time=self._time(),
etype=entry_type.upper(),
entry=entry)
self._content['history'].insert(0, meta_string)
self.logger(meta_string) | Generic method to add entry as entry_type to the history | https://github.com/konstantinstadler/pymrio/blob/d764aa0dd2150200e867a9713a98ddae203e12d4/pymrio/tools/iometadata.py#L184-L192 |
konstantinstadler/pymrio | pymrio/tools/iometadata.py | MRIOMetaData.change_meta | def change_meta(self, para, new_value, log=True):
""" Changes the meta data
This function does nothing if None is passed as new_value.
To set a certain value to None pass the str 'None'
Parameters
----------
para: str
Meta data entry to change
new_value: str
New value
log: boolean, optional
If True (default) records the meta data change
in the history
"""
if not new_value:
return
para = para.lower()
if para == 'history':
raise ValueError(
'History can only be extended - use method "note"')
old_value = self._content.get(para, None)
if new_value == old_value:
return
self._content[para] = new_value
if old_value and log:
self._add_history(entry_type="METADATA_CHANGE",
entry='Changed parameter "{para}" '
'from "{old}" to "{new}"'.format(
para=para,
old=old_value,
new=new_value)) | python | def change_meta(self, para, new_value, log=True):
""" Changes the meta data
This function does nothing if None is passed as new_value.
To set a certain value to None pass the str 'None'
Parameters
----------
para: str
Meta data entry to change
new_value: str
New value
log: boolean, optional
If True (default) records the meta data change
in the history
"""
if not new_value:
return
para = para.lower()
if para == 'history':
raise ValueError(
'History can only be extended - use method "note"')
old_value = self._content.get(para, None)
if new_value == old_value:
return
self._content[para] = new_value
if old_value and log:
self._add_history(entry_type="METADATA_CHANGE",
entry='Changed parameter "{para}" '
'from "{old}" to "{new}"'.format(
para=para,
old=old_value,
new=new_value)) | Changes the meta data
This function does nothing if None is passed as new_value.
To set a certain value to None pass the str 'None'
Parameters
----------
para: str
Meta data entry to change
new_value: str
New value
log: boolean, optional
If True (default) records the meta data change
in the history | https://github.com/konstantinstadler/pymrio/blob/d764aa0dd2150200e867a9713a98ddae203e12d4/pymrio/tools/iometadata.py#L238-L273 |
konstantinstadler/pymrio | pymrio/tools/iometadata.py | MRIOMetaData._read_content | def _read_content(self):
""" Reads metadata from location (and path_in_arc if archive)
This function is called during the init process and
should not be used in isolation: it overwrites
unsafed metadata.
"""
if self._path_in_arc:
with zipfile.ZipFile(file=str(self._metadata_file)) as zf:
self._content = json.loads(
zf.read(self._path_in_arc).decode('utf-8'),
object_pairs_hook=OrderedDict)
else:
with self._metadata_file.open('r') as mdf:
self._content = json.load(mdf,
object_pairs_hook=OrderedDict) | python | def _read_content(self):
""" Reads metadata from location (and path_in_arc if archive)
This function is called during the init process and
should not be used in isolation: it overwrites
unsafed metadata.
"""
if self._path_in_arc:
with zipfile.ZipFile(file=str(self._metadata_file)) as zf:
self._content = json.loads(
zf.read(self._path_in_arc).decode('utf-8'),
object_pairs_hook=OrderedDict)
else:
with self._metadata_file.open('r') as mdf:
self._content = json.load(mdf,
object_pairs_hook=OrderedDict) | Reads metadata from location (and path_in_arc if archive)
This function is called during the init process and
should not be used in isolation: it overwrites
unsafed metadata. | https://github.com/konstantinstadler/pymrio/blob/d764aa0dd2150200e867a9713a98ddae203e12d4/pymrio/tools/iometadata.py#L278-L293 |
konstantinstadler/pymrio | pymrio/tools/iometadata.py | MRIOMetaData.save | def save(self, location=None):
""" Saves the current status of the metadata
This saves the metadata at the location of the previously loaded
metadata or at the file/path given in location.
Specify a location if the metadata should be stored in a different
location or was never stored before. Subsequent saves will use the
location set here.
Parameters
----------
location: str, optional
Path or file for saving the metadata.
This can be the full file path or just the storage folder.
In the latter case, the filename defined in
DEFAULT_FILE_NAMES['metadata'] (currently 'metadata.json') is
assumed.
"""
if location:
location = Path(location)
if os.path.splitext(str(location))[1] == '':
self._metadata_file = location / DEFAULT_FILE_NAMES['metadata']
else:
self._metadata_file = location
if self._metadata_file:
with self._metadata_file.open(mode='w') as mdf:
json.dump(self._content, mdf, indent=4)
else:
logging.error("No metadata file given for storing the file") | python | def save(self, location=None):
""" Saves the current status of the metadata
This saves the metadata at the location of the previously loaded
metadata or at the file/path given in location.
Specify a location if the metadata should be stored in a different
location or was never stored before. Subsequent saves will use the
location set here.
Parameters
----------
location: str, optional
Path or file for saving the metadata.
This can be the full file path or just the storage folder.
In the latter case, the filename defined in
DEFAULT_FILE_NAMES['metadata'] (currently 'metadata.json') is
assumed.
"""
if location:
location = Path(location)
if os.path.splitext(str(location))[1] == '':
self._metadata_file = location / DEFAULT_FILE_NAMES['metadata']
else:
self._metadata_file = location
if self._metadata_file:
with self._metadata_file.open(mode='w') as mdf:
json.dump(self._content, mdf, indent=4)
else:
logging.error("No metadata file given for storing the file") | Saves the current status of the metadata
This saves the metadata at the location of the previously loaded
metadata or at the file/path given in location.
Specify a location if the metadata should be stored in a different
location or was never stored before. Subsequent saves will use the
location set here.
Parameters
----------
location: str, optional
Path or file for saving the metadata.
This can be the full file path or just the storage folder.
In the latter case, the filename defined in
DEFAULT_FILE_NAMES['metadata'] (currently 'metadata.json') is
assumed. | https://github.com/konstantinstadler/pymrio/blob/d764aa0dd2150200e867a9713a98ddae203e12d4/pymrio/tools/iometadata.py#L295-L325 |
konstantinstadler/pymrio | pymrio/tools/iomath.py | calc_x | def calc_x(Z, Y):
""" Calculate the industry output x from the Z and Y matrix
Parameters
----------
Z : pandas.DataFrame or numpy.array
Symmetric input output table (flows)
Y : pandas.DataFrame or numpy.array
final demand with categories (1.order) for each country (2.order)
Returns
-------
pandas.DataFrame or numpy.array
Industry output x as column vector
The type is determined by the type of Z. If DataFrame index as Z
"""
x = np.reshape(np.sum(np.hstack((Z, Y)), 1), (-1, 1))
if type(Z) is pd.DataFrame:
x = pd.DataFrame(x, index=Z.index, columns=['indout'])
if type(x) is pd.Series:
x = pd.DataFrame(x)
if type(x) is pd.DataFrame:
x.columns = ['indout']
return x | python | def calc_x(Z, Y):
""" Calculate the industry output x from the Z and Y matrix
Parameters
----------
Z : pandas.DataFrame or numpy.array
Symmetric input output table (flows)
Y : pandas.DataFrame or numpy.array
final demand with categories (1.order) for each country (2.order)
Returns
-------
pandas.DataFrame or numpy.array
Industry output x as column vector
The type is determined by the type of Z. If DataFrame index as Z
"""
x = np.reshape(np.sum(np.hstack((Z, Y)), 1), (-1, 1))
if type(Z) is pd.DataFrame:
x = pd.DataFrame(x, index=Z.index, columns=['indout'])
if type(x) is pd.Series:
x = pd.DataFrame(x)
if type(x) is pd.DataFrame:
x.columns = ['indout']
return x | Calculate the industry output x from the Z and Y matrix
Parameters
----------
Z : pandas.DataFrame or numpy.array
Symmetric input output table (flows)
Y : pandas.DataFrame or numpy.array
final demand with categories (1.order) for each country (2.order)
Returns
-------
pandas.DataFrame or numpy.array
Industry output x as column vector
The type is determined by the type of Z. If DataFrame index as Z | https://github.com/konstantinstadler/pymrio/blob/d764aa0dd2150200e867a9713a98ddae203e12d4/pymrio/tools/iomath.py#L18-L42 |
konstantinstadler/pymrio | pymrio/tools/iomath.py | calc_x_from_L | def calc_x_from_L(L, y):
""" Calculate the industry output x from L and a y vector
Parameters
----------
L : pandas.DataFrame or numpy.array
Symmetric input output Leontief table
y : pandas.DataFrame or numpy.array
a column vector of the total final demand
Returns
-------
pandas.DataFrame or numpy.array
Industry output x as column vector
The type is determined by the type of L. If DataFrame index as L
"""
x = L.dot(y)
if type(x) is pd.Series:
x = pd.DataFrame(x)
if type(x) is pd.DataFrame:
x.columns = ['indout']
return x | python | def calc_x_from_L(L, y):
""" Calculate the industry output x from L and a y vector
Parameters
----------
L : pandas.DataFrame or numpy.array
Symmetric input output Leontief table
y : pandas.DataFrame or numpy.array
a column vector of the total final demand
Returns
-------
pandas.DataFrame or numpy.array
Industry output x as column vector
The type is determined by the type of L. If DataFrame index as L
"""
x = L.dot(y)
if type(x) is pd.Series:
x = pd.DataFrame(x)
if type(x) is pd.DataFrame:
x.columns = ['indout']
return x | Calculate the industry output x from L and a y vector
Parameters
----------
L : pandas.DataFrame or numpy.array
Symmetric input output Leontief table
y : pandas.DataFrame or numpy.array
a column vector of the total final demand
Returns
-------
pandas.DataFrame or numpy.array
Industry output x as column vector
The type is determined by the type of L. If DataFrame index as L | https://github.com/konstantinstadler/pymrio/blob/d764aa0dd2150200e867a9713a98ddae203e12d4/pymrio/tools/iomath.py#L45-L67 |
konstantinstadler/pymrio | pymrio/tools/iomath.py | calc_Z | def calc_Z(A, x):
""" calculate the Z matrix (flows) from A and x
Parameters
----------
A : pandas.DataFrame or numpy.array
Symmetric input output table (coefficients)
x : pandas.DataFrame or numpy.array
Industry output column vector
Returns
-------
pandas.DataFrame or numpy.array
Symmetric input output table (flows) Z
The type is determined by the type of A.
If DataFrame index/columns as A
"""
if (type(x) is pd.DataFrame) or (type(x) is pd.Series):
x = x.values
x = x.reshape((1, -1)) # use numpy broadcasting - much faster
# (but has to ensure that x is a row vector)
# old mathematical form:
# return A.dot(np.diagflat(x))
if type(A) is pd.DataFrame:
return pd.DataFrame(A.values * x, index=A.index, columns=A.columns)
else:
return A*x | python | def calc_Z(A, x):
""" calculate the Z matrix (flows) from A and x
Parameters
----------
A : pandas.DataFrame or numpy.array
Symmetric input output table (coefficients)
x : pandas.DataFrame or numpy.array
Industry output column vector
Returns
-------
pandas.DataFrame or numpy.array
Symmetric input output table (flows) Z
The type is determined by the type of A.
If DataFrame index/columns as A
"""
if (type(x) is pd.DataFrame) or (type(x) is pd.Series):
x = x.values
x = x.reshape((1, -1)) # use numpy broadcasting - much faster
# (but has to ensure that x is a row vector)
# old mathematical form:
# return A.dot(np.diagflat(x))
if type(A) is pd.DataFrame:
return pd.DataFrame(A.values * x, index=A.index, columns=A.columns)
else:
return A*x | calculate the Z matrix (flows) from A and x
Parameters
----------
A : pandas.DataFrame or numpy.array
Symmetric input output table (coefficients)
x : pandas.DataFrame or numpy.array
Industry output column vector
Returns
-------
pandas.DataFrame or numpy.array
Symmetric input output table (flows) Z
The type is determined by the type of A.
If DataFrame index/columns as A | https://github.com/konstantinstadler/pymrio/blob/d764aa0dd2150200e867a9713a98ddae203e12d4/pymrio/tools/iomath.py#L70-L97 |
konstantinstadler/pymrio | pymrio/tools/iomath.py | calc_A | def calc_A(Z, x):
""" Calculate the A matrix (coefficients) from Z and x
Parameters
----------
Z : pandas.DataFrame or numpy.array
Symmetric input output table (flows)
x : pandas.DataFrame or numpy.array
Industry output column vector
Returns
-------
pandas.DataFrame or numpy.array
Symmetric input output table (coefficients) A
The type is determined by the type of Z.
If DataFrame index/columns as Z
"""
if (type(x) is pd.DataFrame) or (type(x) is pd.Series):
x = x.values
if (type(x) is not np.ndarray) and (x == 0):
recix = 0
else:
with warnings.catch_warnings():
# catch the divide by zero warning
# we deal wit that by setting to 0 afterwards
warnings.simplefilter('ignore')
recix = 1/x
recix[recix == np.inf] = 0
recix = recix.reshape((1, -1))
# use numpy broadcasting - factor ten faster
# Mathematical form - slow
# return Z.dot(np.diagflat(recix))
if type(Z) is pd.DataFrame:
return pd.DataFrame(Z.values * recix, index=Z.index, columns=Z.columns)
else:
return Z*recix | python | def calc_A(Z, x):
""" Calculate the A matrix (coefficients) from Z and x
Parameters
----------
Z : pandas.DataFrame or numpy.array
Symmetric input output table (flows)
x : pandas.DataFrame or numpy.array
Industry output column vector
Returns
-------
pandas.DataFrame or numpy.array
Symmetric input output table (coefficients) A
The type is determined by the type of Z.
If DataFrame index/columns as Z
"""
if (type(x) is pd.DataFrame) or (type(x) is pd.Series):
x = x.values
if (type(x) is not np.ndarray) and (x == 0):
recix = 0
else:
with warnings.catch_warnings():
# catch the divide by zero warning
# we deal wit that by setting to 0 afterwards
warnings.simplefilter('ignore')
recix = 1/x
recix[recix == np.inf] = 0
recix = recix.reshape((1, -1))
# use numpy broadcasting - factor ten faster
# Mathematical form - slow
# return Z.dot(np.diagflat(recix))
if type(Z) is pd.DataFrame:
return pd.DataFrame(Z.values * recix, index=Z.index, columns=Z.columns)
else:
return Z*recix | Calculate the A matrix (coefficients) from Z and x
Parameters
----------
Z : pandas.DataFrame or numpy.array
Symmetric input output table (flows)
x : pandas.DataFrame or numpy.array
Industry output column vector
Returns
-------
pandas.DataFrame or numpy.array
Symmetric input output table (coefficients) A
The type is determined by the type of Z.
If DataFrame index/columns as Z | https://github.com/konstantinstadler/pymrio/blob/d764aa0dd2150200e867a9713a98ddae203e12d4/pymrio/tools/iomath.py#L100-L136 |
konstantinstadler/pymrio | pymrio/tools/iomath.py | calc_L | def calc_L(A):
""" Calculate the Leontief L from A
Parameters
----------
A : pandas.DataFrame or numpy.array
Symmetric input output table (coefficients)
Returns
-------
pandas.DataFrame or numpy.array
Leontief input output table L
The type is determined by the type of A.
If DataFrame index/columns as A
"""
I = np.eye(A.shape[0]) # noqa
if type(A) is pd.DataFrame:
return pd.DataFrame(np.linalg.inv(I-A),
index=A.index, columns=A.columns)
else:
return np.linalg.inv(I-A) | python | def calc_L(A):
""" Calculate the Leontief L from A
Parameters
----------
A : pandas.DataFrame or numpy.array
Symmetric input output table (coefficients)
Returns
-------
pandas.DataFrame or numpy.array
Leontief input output table L
The type is determined by the type of A.
If DataFrame index/columns as A
"""
I = np.eye(A.shape[0]) # noqa
if type(A) is pd.DataFrame:
return pd.DataFrame(np.linalg.inv(I-A),
index=A.index, columns=A.columns)
else:
return np.linalg.inv(I-A) | Calculate the Leontief L from A
Parameters
----------
A : pandas.DataFrame or numpy.array
Symmetric input output table (coefficients)
Returns
-------
pandas.DataFrame or numpy.array
Leontief input output table L
The type is determined by the type of A.
If DataFrame index/columns as A | https://github.com/konstantinstadler/pymrio/blob/d764aa0dd2150200e867a9713a98ddae203e12d4/pymrio/tools/iomath.py#L139-L160 |
konstantinstadler/pymrio | pymrio/tools/iomath.py | recalc_M | def recalc_M(S, D_cba, Y, nr_sectors):
""" Calculate Multipliers based on footprints.
Parameters
----------
D_cba : pandas.DataFrame or numpy array
Footprint per sector and country
Y : pandas.DataFrame or numpy array
Final demand: aggregated across categories or just one category, one
column per country. This will be diagonalized per country block.
The diagonolized form must be invertable for this method to work.
nr_sectors : int
Number of sectors in the MRIO
Returns
-------
pandas.DataFrame or numpy.array
Multipliers M
The type is determined by the type of D_cba.
If DataFrame index/columns as D_cba
"""
Y_diag = ioutil.diagonalize_blocks(Y.values, blocksize=nr_sectors)
Y_inv = np.linalg.inv(Y_diag)
M = D_cba.dot(Y_inv)
if type(D_cba) is pd.DataFrame:
M.columns = D_cba.columns
M.index = D_cba.index
return M | python | def recalc_M(S, D_cba, Y, nr_sectors):
""" Calculate Multipliers based on footprints.
Parameters
----------
D_cba : pandas.DataFrame or numpy array
Footprint per sector and country
Y : pandas.DataFrame or numpy array
Final demand: aggregated across categories or just one category, one
column per country. This will be diagonalized per country block.
The diagonolized form must be invertable for this method to work.
nr_sectors : int
Number of sectors in the MRIO
Returns
-------
pandas.DataFrame or numpy.array
Multipliers M
The type is determined by the type of D_cba.
If DataFrame index/columns as D_cba
"""
Y_diag = ioutil.diagonalize_blocks(Y.values, blocksize=nr_sectors)
Y_inv = np.linalg.inv(Y_diag)
M = D_cba.dot(Y_inv)
if type(D_cba) is pd.DataFrame:
M.columns = D_cba.columns
M.index = D_cba.index
return M | Calculate Multipliers based on footprints.
Parameters
----------
D_cba : pandas.DataFrame or numpy array
Footprint per sector and country
Y : pandas.DataFrame or numpy array
Final demand: aggregated across categories or just one category, one
column per country. This will be diagonalized per country block.
The diagonolized form must be invertable for this method to work.
nr_sectors : int
Number of sectors in the MRIO
Returns
-------
pandas.DataFrame or numpy.array
Multipliers M
The type is determined by the type of D_cba.
If DataFrame index/columns as D_cba | https://github.com/konstantinstadler/pymrio/blob/d764aa0dd2150200e867a9713a98ddae203e12d4/pymrio/tools/iomath.py#L291-L323 |
konstantinstadler/pymrio | pymrio/tools/iomath.py | calc_accounts | def calc_accounts(S, L, Y, nr_sectors):
""" Calculate sector specific cba and pba based accounts, imp and exp accounts
The total industry output x for the calculation
is recalculated from L and y
Parameters
----------
L : pandas.DataFrame
Leontief input output table L
S : pandas.DataFrame
Direct impact coefficients
Y : pandas.DataFrame
Final demand: aggregated across categories or just one category, one
column per country
nr_sectors : int
Number of sectors in the MRIO
Returns
-------
Tuple
(D_cba, D_pba, D_imp, D_exp)
Format: D_row x L_col (=nr_countries*nr_sectors)
- D_cba Footprint per sector and country
- D_pba Total factur use per sector and country
- D_imp Total global factor use to satisfy total final demand in
the country per sector
- D_exp Total factor use in one country to satisfy final demand
in all other countries (per sector)
"""
# diagonalize each sector block per country
# this results in a disaggregated y with final demand per country per
# sector in one column
Y_diag = ioutil.diagonalize_blocks(Y.values, blocksize=nr_sectors)
x_diag = L.dot(Y_diag)
x_tot = x_diag.values.sum(1)
del Y_diag
D_cba = pd.DataFrame(S.values.dot(x_diag),
index=S.index,
columns=S.columns)
# D_pba = S.dot(np.diagflat(x_tot))
# faster broadcasted calculation:
D_pba = pd.DataFrame(S.values*x_tot.reshape((1, -1)),
index=S.index,
columns=S.columns)
# for the traded accounts set the domestic industry output to zero
dom_block = np.zeros((nr_sectors, nr_sectors))
x_trade = ioutil.set_block(x_diag.values, dom_block)
D_imp = pd.DataFrame(S.values.dot(x_trade),
index=S.index,
columns=S.columns)
x_exp = x_trade.sum(1)
# D_exp = S.dot(np.diagflat(x_exp))
# faster broadcasted version:
D_exp = pd.DataFrame(S.values * x_exp.reshape((1, -1)),
index=S.index,
columns=S.columns)
return (D_cba, D_pba, D_imp, D_exp) | python | def calc_accounts(S, L, Y, nr_sectors):
""" Calculate sector specific cba and pba based accounts, imp and exp accounts
The total industry output x for the calculation
is recalculated from L and y
Parameters
----------
L : pandas.DataFrame
Leontief input output table L
S : pandas.DataFrame
Direct impact coefficients
Y : pandas.DataFrame
Final demand: aggregated across categories or just one category, one
column per country
nr_sectors : int
Number of sectors in the MRIO
Returns
-------
Tuple
(D_cba, D_pba, D_imp, D_exp)
Format: D_row x L_col (=nr_countries*nr_sectors)
- D_cba Footprint per sector and country
- D_pba Total factur use per sector and country
- D_imp Total global factor use to satisfy total final demand in
the country per sector
- D_exp Total factor use in one country to satisfy final demand
in all other countries (per sector)
"""
# diagonalize each sector block per country
# this results in a disaggregated y with final demand per country per
# sector in one column
Y_diag = ioutil.diagonalize_blocks(Y.values, blocksize=nr_sectors)
x_diag = L.dot(Y_diag)
x_tot = x_diag.values.sum(1)
del Y_diag
D_cba = pd.DataFrame(S.values.dot(x_diag),
index=S.index,
columns=S.columns)
# D_pba = S.dot(np.diagflat(x_tot))
# faster broadcasted calculation:
D_pba = pd.DataFrame(S.values*x_tot.reshape((1, -1)),
index=S.index,
columns=S.columns)
# for the traded accounts set the domestic industry output to zero
dom_block = np.zeros((nr_sectors, nr_sectors))
x_trade = ioutil.set_block(x_diag.values, dom_block)
D_imp = pd.DataFrame(S.values.dot(x_trade),
index=S.index,
columns=S.columns)
x_exp = x_trade.sum(1)
# D_exp = S.dot(np.diagflat(x_exp))
# faster broadcasted version:
D_exp = pd.DataFrame(S.values * x_exp.reshape((1, -1)),
index=S.index,
columns=S.columns)
return (D_cba, D_pba, D_imp, D_exp) | Calculate sector specific cba and pba based accounts, imp and exp accounts
The total industry output x for the calculation
is recalculated from L and y
Parameters
----------
L : pandas.DataFrame
Leontief input output table L
S : pandas.DataFrame
Direct impact coefficients
Y : pandas.DataFrame
Final demand: aggregated across categories or just one category, one
column per country
nr_sectors : int
Number of sectors in the MRIO
Returns
-------
Tuple
(D_cba, D_pba, D_imp, D_exp)
Format: D_row x L_col (=nr_countries*nr_sectors)
- D_cba Footprint per sector and country
- D_pba Total factur use per sector and country
- D_imp Total global factor use to satisfy total final demand in
the country per sector
- D_exp Total factor use in one country to satisfy final demand
in all other countries (per sector) | https://github.com/konstantinstadler/pymrio/blob/d764aa0dd2150200e867a9713a98ddae203e12d4/pymrio/tools/iomath.py#L326-L390 |
konstantinstadler/pymrio | pymrio/tools/iodownloader.py | _get_url_datafiles | def _get_url_datafiles(url_db_view, url_db_content,
mrio_regex, access_cookie=None):
""" Urls of mrio files by parsing url content for mrio_regex
Parameters
----------
url_db_view: url str
Url which shows the list of mrios in the db
url_db_content: url str
Url which needs to be appended before the url parsed from the
url_db_view to get a valid download link
mrio_regex: regex str
Regex to parse the mrio datafile from url_db_view
access_cookie: dict, optional
If needed, cookie to access the database
Returns
-------
Named tuple:
.raw_text: content of url_db_view for later use
.data_urls: list of url
"""
# Use post here - NB: get could be necessary for some other pages
# but currently works for wiod and eora
returnvalue = namedtuple('url_content',
['raw_text', 'data_urls'])
url_text = requests.post(url_db_view, cookies=access_cookie).text
data_urls = [url_db_content + ff
for ff in re.findall(mrio_regex, url_text)]
return returnvalue(raw_text=url_text, data_urls=data_urls) | python | def _get_url_datafiles(url_db_view, url_db_content,
mrio_regex, access_cookie=None):
""" Urls of mrio files by parsing url content for mrio_regex
Parameters
----------
url_db_view: url str
Url which shows the list of mrios in the db
url_db_content: url str
Url which needs to be appended before the url parsed from the
url_db_view to get a valid download link
mrio_regex: regex str
Regex to parse the mrio datafile from url_db_view
access_cookie: dict, optional
If needed, cookie to access the database
Returns
-------
Named tuple:
.raw_text: content of url_db_view for later use
.data_urls: list of url
"""
# Use post here - NB: get could be necessary for some other pages
# but currently works for wiod and eora
returnvalue = namedtuple('url_content',
['raw_text', 'data_urls'])
url_text = requests.post(url_db_view, cookies=access_cookie).text
data_urls = [url_db_content + ff
for ff in re.findall(mrio_regex, url_text)]
return returnvalue(raw_text=url_text, data_urls=data_urls) | Urls of mrio files by parsing url content for mrio_regex
Parameters
----------
url_db_view: url str
Url which shows the list of mrios in the db
url_db_content: url str
Url which needs to be appended before the url parsed from the
url_db_view to get a valid download link
mrio_regex: regex str
Regex to parse the mrio datafile from url_db_view
access_cookie: dict, optional
If needed, cookie to access the database
Returns
-------
Named tuple:
.raw_text: content of url_db_view for later use
.data_urls: list of url | https://github.com/konstantinstadler/pymrio/blob/d764aa0dd2150200e867a9713a98ddae203e12d4/pymrio/tools/iodownloader.py#L32-L66 |
konstantinstadler/pymrio | pymrio/tools/iodownloader.py | _download_urls | def _download_urls(url_list, storage_folder, overwrite_existing,
meta_handler, access_cookie=None):
""" Save url from url_list to storage_folder
Parameters
----------
url_list: list of str
Valid url to download
storage_folder: str, valid path
Location to store the download, folder will be created if
not existing. If the file is already present in the folder,
the download depends on the setting in 'overwrite_existing'.
overwrite_existing: boolean, optional
If False, skip download of file already existing in
the storage folder (default). Set to True to replace
files.
meta_handler: instance of MRIOMetaData
Returns
-------
The meta_handler is passed back
"""
for url in url_list:
filename = os.path.basename(url)
if not overwrite_existing and filename in os.listdir(storage_folder):
continue
storage_file = os.path.join(storage_folder, filename)
# Using requests here - tried with aiohttp but was actually slower
# Also don’t use shutil.copyfileobj - corrupts zips from Eora
req = requests.post(url, stream=True, cookies=access_cookie)
with open(storage_file, 'wb') as lf:
for chunk in req.iter_content(1024*5):
lf.write(chunk)
meta_handler._add_fileio('Downloaded {} to {}'.format(url, filename))
meta_handler.save()
return meta_handler | python | def _download_urls(url_list, storage_folder, overwrite_existing,
meta_handler, access_cookie=None):
""" Save url from url_list to storage_folder
Parameters
----------
url_list: list of str
Valid url to download
storage_folder: str, valid path
Location to store the download, folder will be created if
not existing. If the file is already present in the folder,
the download depends on the setting in 'overwrite_existing'.
overwrite_existing: boolean, optional
If False, skip download of file already existing in
the storage folder (default). Set to True to replace
files.
meta_handler: instance of MRIOMetaData
Returns
-------
The meta_handler is passed back
"""
for url in url_list:
filename = os.path.basename(url)
if not overwrite_existing and filename in os.listdir(storage_folder):
continue
storage_file = os.path.join(storage_folder, filename)
# Using requests here - tried with aiohttp but was actually slower
# Also don’t use shutil.copyfileobj - corrupts zips from Eora
req = requests.post(url, stream=True, cookies=access_cookie)
with open(storage_file, 'wb') as lf:
for chunk in req.iter_content(1024*5):
lf.write(chunk)
meta_handler._add_fileio('Downloaded {} to {}'.format(url, filename))
meta_handler.save()
return meta_handler | Save url from url_list to storage_folder
Parameters
----------
url_list: list of str
Valid url to download
storage_folder: str, valid path
Location to store the download, folder will be created if
not existing. If the file is already present in the folder,
the download depends on the setting in 'overwrite_existing'.
overwrite_existing: boolean, optional
If False, skip download of file already existing in
the storage folder (default). Set to True to replace
files.
meta_handler: instance of MRIOMetaData
Returns
-------
The meta_handler is passed back | https://github.com/konstantinstadler/pymrio/blob/d764aa0dd2150200e867a9713a98ddae203e12d4/pymrio/tools/iodownloader.py#L69-L112 |
konstantinstadler/pymrio | pymrio/tools/iodownloader.py | download_wiod2013 | def download_wiod2013(storage_folder, years=None, overwrite_existing=False,
satellite_urls=WIOD_CONFIG['satellite_urls']):
""" Downloads the 2013 wiod release
Note
----
Currently, pymrio only works with the 2013 release of the wiod tables. The
more recent 2016 release so far (October 2017) lacks the environmental and
social extensions.
Parameters
----------
storage_folder: str, valid path
Location to store the download, folder will be created if
not existing. If the file is already present in the folder,
the download of the specific file will be skipped.
years: list of int or str, optional
If years is given only downloads the specific years. This
only applies to the IO tables because extensions are stored
by country and not per year.
The years can be given in 2 or 4 digits.
overwrite_existing: boolean, optional
If False, skip download of file already existing in
the storage folder (default). Set to True to replace
files.
satellite_urls : list of str (urls), optional
Which satellite accounts to download. Default: satellite urls defined
in WIOD_CONFIG - list of all available urls Remove items from this list
to only download a subset of extensions
"""
try:
os.makedirs(storage_folder)
except FileExistsError:
pass
if type(years) is int or type(years) is str:
years = [years]
years = years if years else range(1995, 2012)
years = [str(yy).zfill(2)[-2:] for yy in years]
wiod_web_content = _get_url_datafiles(
url_db_view=WIOD_CONFIG['url_db_view'],
url_db_content=WIOD_CONFIG['url_db_content'],
mrio_regex='protected.*?wiot\d\d.*?xlsx')
restricted_wiod_io_urls = [url for url in wiod_web_content.data_urls if
re.search(r"(wiot)(\d\d)",
os.path.basename(url)).group(2)
in years]
meta = MRIOMetaData(location=storage_folder,
description='WIOD metadata file for pymrio',
name='WIOD',
system='ixi',
version='data13')
meta = _download_urls(url_list=restricted_wiod_io_urls + satellite_urls,
storage_folder=storage_folder,
overwrite_existing=overwrite_existing,
meta_handler=meta)
meta.save()
return meta | python | def download_wiod2013(storage_folder, years=None, overwrite_existing=False,
satellite_urls=WIOD_CONFIG['satellite_urls']):
""" Downloads the 2013 wiod release
Note
----
Currently, pymrio only works with the 2013 release of the wiod tables. The
more recent 2016 release so far (October 2017) lacks the environmental and
social extensions.
Parameters
----------
storage_folder: str, valid path
Location to store the download, folder will be created if
not existing. If the file is already present in the folder,
the download of the specific file will be skipped.
years: list of int or str, optional
If years is given only downloads the specific years. This
only applies to the IO tables because extensions are stored
by country and not per year.
The years can be given in 2 or 4 digits.
overwrite_existing: boolean, optional
If False, skip download of file already existing in
the storage folder (default). Set to True to replace
files.
satellite_urls : list of str (urls), optional
Which satellite accounts to download. Default: satellite urls defined
in WIOD_CONFIG - list of all available urls Remove items from this list
to only download a subset of extensions
"""
try:
os.makedirs(storage_folder)
except FileExistsError:
pass
if type(years) is int or type(years) is str:
years = [years]
years = years if years else range(1995, 2012)
years = [str(yy).zfill(2)[-2:] for yy in years]
wiod_web_content = _get_url_datafiles(
url_db_view=WIOD_CONFIG['url_db_view'],
url_db_content=WIOD_CONFIG['url_db_content'],
mrio_regex='protected.*?wiot\d\d.*?xlsx')
restricted_wiod_io_urls = [url for url in wiod_web_content.data_urls if
re.search(r"(wiot)(\d\d)",
os.path.basename(url)).group(2)
in years]
meta = MRIOMetaData(location=storage_folder,
description='WIOD metadata file for pymrio',
name='WIOD',
system='ixi',
version='data13')
meta = _download_urls(url_list=restricted_wiod_io_urls + satellite_urls,
storage_folder=storage_folder,
overwrite_existing=overwrite_existing,
meta_handler=meta)
meta.save()
return meta | Downloads the 2013 wiod release
Note
----
Currently, pymrio only works with the 2013 release of the wiod tables. The
more recent 2016 release so far (October 2017) lacks the environmental and
social extensions.
Parameters
----------
storage_folder: str, valid path
Location to store the download, folder will be created if
not existing. If the file is already present in the folder,
the download of the specific file will be skipped.
years: list of int or str, optional
If years is given only downloads the specific years. This
only applies to the IO tables because extensions are stored
by country and not per year.
The years can be given in 2 or 4 digits.
overwrite_existing: boolean, optional
If False, skip download of file already existing in
the storage folder (default). Set to True to replace
files.
satellite_urls : list of str (urls), optional
Which satellite accounts to download. Default: satellite urls defined
in WIOD_CONFIG - list of all available urls Remove items from this list
to only download a subset of extensions | https://github.com/konstantinstadler/pymrio/blob/d764aa0dd2150200e867a9713a98ddae203e12d4/pymrio/tools/iodownloader.py#L115-L184 |
shichao-an/115wangpan | u115/utils.py | get_timestamp | def get_timestamp(length):
"""Get a timestamp of `length` in string"""
s = '%.6f' % time.time()
whole, frac = map(int, s.split('.'))
res = '%d%d' % (whole, frac)
return res[:length] | python | def get_timestamp(length):
"""Get a timestamp of `length` in string"""
s = '%.6f' % time.time()
whole, frac = map(int, s.split('.'))
res = '%d%d' % (whole, frac)
return res[:length] | Get a timestamp of `length` in string | https://github.com/shichao-an/115wangpan/blob/e7cc935313f675e886bceca831fcffcdedf1e880/u115/utils.py#L25-L30 |
shichao-an/115wangpan | u115/utils.py | mkdir_p | def mkdir_p(path):
"""mkdir -p path"""
if PY3:
return os.makedirs(path, exist_ok=True)
try:
os.makedirs(path)
except OSError as exc:
if exc.errno == errno.EEXIST and os.path.isdir(path):
pass
else:
raise | python | def mkdir_p(path):
"""mkdir -p path"""
if PY3:
return os.makedirs(path, exist_ok=True)
try:
os.makedirs(path)
except OSError as exc:
if exc.errno == errno.EEXIST and os.path.isdir(path):
pass
else:
raise | mkdir -p path | https://github.com/shichao-an/115wangpan/blob/e7cc935313f675e886bceca831fcffcdedf1e880/u115/utils.py#L79-L89 |
tmc/gevent-zeromq | gevent_zeromq/__init__.py | monkey_patch | def monkey_patch():
"""
Monkey patches `zmq.Context` and `zmq.Socket`
If test_suite is True, the pyzmq test suite will be patched for
compatibility as well.
"""
ozmq = __import__('zmq')
ozmq.Socket = zmq.Socket
ozmq.Context = zmq.Context
ozmq.Poller = zmq.Poller
ioloop = __import__('zmq.eventloop.ioloop')
ioloop.Poller = zmq.Poller | python | def monkey_patch():
"""
Monkey patches `zmq.Context` and `zmq.Socket`
If test_suite is True, the pyzmq test suite will be patched for
compatibility as well.
"""
ozmq = __import__('zmq')
ozmq.Socket = zmq.Socket
ozmq.Context = zmq.Context
ozmq.Poller = zmq.Poller
ioloop = __import__('zmq.eventloop.ioloop')
ioloop.Poller = zmq.Poller | Monkey patches `zmq.Context` and `zmq.Socket`
If test_suite is True, the pyzmq test suite will be patched for
compatibility as well. | https://github.com/tmc/gevent-zeromq/blob/b15d50deedda3d2cdb701106d4b315c7a06353e3/gevent_zeromq/__init__.py#L34-L46 |
konstantinstadler/pymrio | pymrio/core/mriosystem.py | concate_extension | def concate_extension(*extensions, name):
""" Concatenate extensions
Notes
----
The method assumes that the first index is the name of the
stressor/impact/input type. To provide a consistent naming this is renamed
to 'indicator' if they differ. All other index names ('compartments', ...)
are added to the concatenated extensions and set to NaN for missing values.
Notes
----
Attributes which are not DataFrames will be set to None if they differ
between the extensions
Parameters
----------
extensions : Extensions
The Extensions to concatenate as multiple parameters
name : string
Name of the new extension
Returns
-------
Concatenated extension
"""
if type(extensions[0]) is tuple or type(extensions[0]) is list:
extensions = extensions[0]
# check if fd extensions is present in one of the given extensions
FY_present = False
SY_present = False
SFY_columns = None
for ext in extensions:
if 'FY' in ext.get_DataFrame(data=False):
FY_present = True
SFY_columns = ext.FY.columns
if 'SY' in ext.get_DataFrame(data=False):
SY_present = True
SFY_columns = ext.SY.columns
# get the intersection of the available dataframes
set_dfs = [set(ext.get_DataFrame(data=False)) for ext in extensions]
df_dict = {key: None for key in set.intersection(*set_dfs)}
if FY_present:
df_dict['FY'] = None
if SY_present:
df_dict['SY'] = None
empty_df_dict = df_dict.copy()
attr_dict = {}
# get data from each extension
first_run = True
for ext in extensions:
# get corresponding attributes of all extensions
for key in ext.__dict__:
if type(ext.__dict__[key]) is not pd.DataFrame:
if attr_dict.get(key, -99) == -99:
attr_dict[key] = ext.__dict__[key]
elif attr_dict[key] == ext.__dict__[key]:
continue
else:
attr_dict[key] = None
# get DataFrame data
cur_dict = empty_df_dict.copy()
for df in cur_dict:
cur_dict[df] = getattr(ext, df)
# add zero final demand extension if final demand extension present in
# one extension
if FY_present:
# doesn't work with getattr b/c FY can be present as attribute but
# not as DataFrame
if 'FY' in ext.get_DataFrame(data=False):
cur_dict['FY'] = getattr(ext, 'FY')
else:
cur_dict['FY'] = pd.DataFrame(data=0,
index=ext.get_index(),
columns=SFY_columns)
if SY_present:
# doesn't work with getattr b/c SY can be present as attribute but
# not as DataFrame
if 'SY' in ext.get_DataFrame(data=False):
cur_dict['SY'] = getattr(ext, 'SY')
else:
cur_dict['SY'] = pd.DataFrame(data=0,
index=ext.get_index(),
columns=SFY_columns)
# append all df data
for key in cur_dict:
if not first_run:
if cur_dict[key].index.names != df_dict[key].index.names:
cur_ind_names = list(cur_dict[key].index.names)
df_ind_names = list(df_dict[key].index.names)
cur_ind_names[0] = 'indicator'
df_ind_names[0] = cur_ind_names[0]
cur_dict[key].index.set_names(cur_ind_names,
inplace=True)
df_dict[key].index.set_names(df_ind_names,
inplace=True)
for ind in cur_ind_names:
if ind not in df_ind_names:
df_dict[key] = (df_dict[key].
set_index(pd.DataFrame(
data=None,
index=df_dict[key].index,
columns=[ind])[ind],
append=True))
for ind in df_ind_names:
if ind not in cur_ind_names:
cur_dict[key] = (cur_dict[key].set_index(
pd.DataFrame(
data=None,
index=cur_dict[key].index,
columns=[ind])
[ind], append=True))
df_dict[key] = pd.concat([df_dict[key], cur_dict[key]])
first_run = False
all_dict = dict(list(attr_dict.items()) + list(df_dict.items()))
all_dict['name'] = name
return Extension(**all_dict) | python | def concate_extension(*extensions, name):
""" Concatenate extensions
Notes
----
The method assumes that the first index is the name of the
stressor/impact/input type. To provide a consistent naming this is renamed
to 'indicator' if they differ. All other index names ('compartments', ...)
are added to the concatenated extensions and set to NaN for missing values.
Notes
----
Attributes which are not DataFrames will be set to None if they differ
between the extensions
Parameters
----------
extensions : Extensions
The Extensions to concatenate as multiple parameters
name : string
Name of the new extension
Returns
-------
Concatenated extension
"""
if type(extensions[0]) is tuple or type(extensions[0]) is list:
extensions = extensions[0]
# check if fd extensions is present in one of the given extensions
FY_present = False
SY_present = False
SFY_columns = None
for ext in extensions:
if 'FY' in ext.get_DataFrame(data=False):
FY_present = True
SFY_columns = ext.FY.columns
if 'SY' in ext.get_DataFrame(data=False):
SY_present = True
SFY_columns = ext.SY.columns
# get the intersection of the available dataframes
set_dfs = [set(ext.get_DataFrame(data=False)) for ext in extensions]
df_dict = {key: None for key in set.intersection(*set_dfs)}
if FY_present:
df_dict['FY'] = None
if SY_present:
df_dict['SY'] = None
empty_df_dict = df_dict.copy()
attr_dict = {}
# get data from each extension
first_run = True
for ext in extensions:
# get corresponding attributes of all extensions
for key in ext.__dict__:
if type(ext.__dict__[key]) is not pd.DataFrame:
if attr_dict.get(key, -99) == -99:
attr_dict[key] = ext.__dict__[key]
elif attr_dict[key] == ext.__dict__[key]:
continue
else:
attr_dict[key] = None
# get DataFrame data
cur_dict = empty_df_dict.copy()
for df in cur_dict:
cur_dict[df] = getattr(ext, df)
# add zero final demand extension if final demand extension present in
# one extension
if FY_present:
# doesn't work with getattr b/c FY can be present as attribute but
# not as DataFrame
if 'FY' in ext.get_DataFrame(data=False):
cur_dict['FY'] = getattr(ext, 'FY')
else:
cur_dict['FY'] = pd.DataFrame(data=0,
index=ext.get_index(),
columns=SFY_columns)
if SY_present:
# doesn't work with getattr b/c SY can be present as attribute but
# not as DataFrame
if 'SY' in ext.get_DataFrame(data=False):
cur_dict['SY'] = getattr(ext, 'SY')
else:
cur_dict['SY'] = pd.DataFrame(data=0,
index=ext.get_index(),
columns=SFY_columns)
# append all df data
for key in cur_dict:
if not first_run:
if cur_dict[key].index.names != df_dict[key].index.names:
cur_ind_names = list(cur_dict[key].index.names)
df_ind_names = list(df_dict[key].index.names)
cur_ind_names[0] = 'indicator'
df_ind_names[0] = cur_ind_names[0]
cur_dict[key].index.set_names(cur_ind_names,
inplace=True)
df_dict[key].index.set_names(df_ind_names,
inplace=True)
for ind in cur_ind_names:
if ind not in df_ind_names:
df_dict[key] = (df_dict[key].
set_index(pd.DataFrame(
data=None,
index=df_dict[key].index,
columns=[ind])[ind],
append=True))
for ind in df_ind_names:
if ind not in cur_ind_names:
cur_dict[key] = (cur_dict[key].set_index(
pd.DataFrame(
data=None,
index=cur_dict[key].index,
columns=[ind])
[ind], append=True))
df_dict[key] = pd.concat([df_dict[key], cur_dict[key]])
first_run = False
all_dict = dict(list(attr_dict.items()) + list(df_dict.items()))
all_dict['name'] = name
return Extension(**all_dict) | Concatenate extensions
Notes
----
The method assumes that the first index is the name of the
stressor/impact/input type. To provide a consistent naming this is renamed
to 'indicator' if they differ. All other index names ('compartments', ...)
are added to the concatenated extensions and set to NaN for missing values.
Notes
----
Attributes which are not DataFrames will be set to None if they differ
between the extensions
Parameters
----------
extensions : Extensions
The Extensions to concatenate as multiple parameters
name : string
Name of the new extension
Returns
-------
Concatenated extension | https://github.com/konstantinstadler/pymrio/blob/d764aa0dd2150200e867a9713a98ddae203e12d4/pymrio/core/mriosystem.py#L2031-L2163 |
konstantinstadler/pymrio | pymrio/core/mriosystem.py | CoreSystem.reset_full | def reset_full(self, force=False, _meta=None):
""" Remove all accounts which can be recalculated based on Z, Y, F, FY
Parameters
----------
force: boolean, optional
If True, reset to flows although the system can not be
recalculated. Default: False
_meta: MRIOMetaData, optional
Metadata handler for logging, optional. Internal
"""
# Attriubtes to keep must be defined in the init: __basic__
strwarn = None
for df in self.__basic__:
if (getattr(self, df)) is None:
if force:
strwarn = ("Reset system warning - Recalculation after "
"reset not possible "
"because {} missing".format(df))
warnings.warn(strwarn, ResetWarning)
else:
raise ResetError("To few tables to recalculate the "
"system after reset ({} missing) "
"- reset can be forced by passing "
"'force=True')".format(df))
if _meta:
_meta._add_modify("Reset system to Z and Y")
if strwarn:
_meta._add_modify(strwarn)
[setattr(self, key, None)
for key in self.get_DataFrame(
data=False,
with_unit=False,
with_population=False)
if key not in self.__basic__]
return self | python | def reset_full(self, force=False, _meta=None):
""" Remove all accounts which can be recalculated based on Z, Y, F, FY
Parameters
----------
force: boolean, optional
If True, reset to flows although the system can not be
recalculated. Default: False
_meta: MRIOMetaData, optional
Metadata handler for logging, optional. Internal
"""
# Attriubtes to keep must be defined in the init: __basic__
strwarn = None
for df in self.__basic__:
if (getattr(self, df)) is None:
if force:
strwarn = ("Reset system warning - Recalculation after "
"reset not possible "
"because {} missing".format(df))
warnings.warn(strwarn, ResetWarning)
else:
raise ResetError("To few tables to recalculate the "
"system after reset ({} missing) "
"- reset can be forced by passing "
"'force=True')".format(df))
if _meta:
_meta._add_modify("Reset system to Z and Y")
if strwarn:
_meta._add_modify(strwarn)
[setattr(self, key, None)
for key in self.get_DataFrame(
data=False,
with_unit=False,
with_population=False)
if key not in self.__basic__]
return self | Remove all accounts which can be recalculated based on Z, Y, F, FY
Parameters
----------
force: boolean, optional
If True, reset to flows although the system can not be
recalculated. Default: False
_meta: MRIOMetaData, optional
Metadata handler for logging, optional. Internal | https://github.com/konstantinstadler/pymrio/blob/d764aa0dd2150200e867a9713a98ddae203e12d4/pymrio/core/mriosystem.py#L83-L124 |
konstantinstadler/pymrio | pymrio/core/mriosystem.py | CoreSystem.reset_to_flows | def reset_to_flows(self, force=False, _meta=None):
""" Keeps only the absolute values.
This removes all attributes which can not be aggregated and must be
recalculated after the aggregation.
Parameters
----------
force: boolean, optional
If True, reset to flows although the system can not be
recalculated. Default: False
_meta: MRIOMetaData, optional
Metadata handler for logging, optional. Internal
"""
# Development note: The attributes which should be removed are
# defined in self.__non_agg_attributes__
strwarn = None
for df in self.__basic__:
if (getattr(self, df)) is None:
if force:
strwarn = ("Reset system warning - Recalculation after "
"reset not possible "
"because {} missing".format(df))
warnings.warn(strwarn, ResetWarning)
else:
raise ResetError("To few tables to recalculate the "
"system after reset ({} missing) "
"- reset can be forced by passing "
"'force=True')".format(df))
if _meta:
_meta._add_modify("Reset to absolute flows")
if strwarn:
_meta._add_modify(strwarn)
[setattr(self, key, None) for key in self.__non_agg_attributes__]
return self | python | def reset_to_flows(self, force=False, _meta=None):
""" Keeps only the absolute values.
This removes all attributes which can not be aggregated and must be
recalculated after the aggregation.
Parameters
----------
force: boolean, optional
If True, reset to flows although the system can not be
recalculated. Default: False
_meta: MRIOMetaData, optional
Metadata handler for logging, optional. Internal
"""
# Development note: The attributes which should be removed are
# defined in self.__non_agg_attributes__
strwarn = None
for df in self.__basic__:
if (getattr(self, df)) is None:
if force:
strwarn = ("Reset system warning - Recalculation after "
"reset not possible "
"because {} missing".format(df))
warnings.warn(strwarn, ResetWarning)
else:
raise ResetError("To few tables to recalculate the "
"system after reset ({} missing) "
"- reset can be forced by passing "
"'force=True')".format(df))
if _meta:
_meta._add_modify("Reset to absolute flows")
if strwarn:
_meta._add_modify(strwarn)
[setattr(self, key, None) for key in self.__non_agg_attributes__]
return self | Keeps only the absolute values.
This removes all attributes which can not be aggregated and must be
recalculated after the aggregation.
Parameters
----------
force: boolean, optional
If True, reset to flows although the system can not be
recalculated. Default: False
_meta: MRIOMetaData, optional
Metadata handler for logging, optional. Internal | https://github.com/konstantinstadler/pymrio/blob/d764aa0dd2150200e867a9713a98ddae203e12d4/pymrio/core/mriosystem.py#L126-L166 |
konstantinstadler/pymrio | pymrio/core/mriosystem.py | CoreSystem.reset_to_coefficients | def reset_to_coefficients(self):
""" Keeps only the coefficient.
This can be used to recalculate the IO tables for a new finald demand.
Note
-----
The system can not be reconstructed after this steps
because all absolute data is removed. Save the Y data in case
a reconstruction might be necessary.
"""
# Development note: The coefficient attributes are
# defined in self.__coefficients__
[setattr(self, key, None)
for key in self.get_DataFrame(
data=False,
with_unit=False,
with_population=False)
if key not in self.__coefficients__]
return self | python | def reset_to_coefficients(self):
""" Keeps only the coefficient.
This can be used to recalculate the IO tables for a new finald demand.
Note
-----
The system can not be reconstructed after this steps
because all absolute data is removed. Save the Y data in case
a reconstruction might be necessary.
"""
# Development note: The coefficient attributes are
# defined in self.__coefficients__
[setattr(self, key, None)
for key in self.get_DataFrame(
data=False,
with_unit=False,
with_population=False)
if key not in self.__coefficients__]
return self | Keeps only the coefficient.
This can be used to recalculate the IO tables for a new finald demand.
Note
-----
The system can not be reconstructed after this steps
because all absolute data is removed. Save the Y data in case
a reconstruction might be necessary. | https://github.com/konstantinstadler/pymrio/blob/d764aa0dd2150200e867a9713a98ddae203e12d4/pymrio/core/mriosystem.py#L168-L189 |
konstantinstadler/pymrio | pymrio/core/mriosystem.py | CoreSystem.copy | def copy(self, new_name=None):
""" Returns a deep copy of the system
Parameters
-----------
new_name: str, optional
Set a new meta name parameter.
Default: <old_name>_copy
"""
_tmp = copy.deepcopy(self)
if not new_name:
new_name = self.name + '_copy'
if str(type(self)) == "<class 'pymrio.core.mriosystem.IOSystem'>":
_tmp.meta.note('IOSystem copy {new} based on {old}'.format(
new=new_name, old=self.meta.name))
_tmp.meta.change_meta('name', new_name, log=False)
else:
_tmp.name = new_name
return _tmp | python | def copy(self, new_name=None):
""" Returns a deep copy of the system
Parameters
-----------
new_name: str, optional
Set a new meta name parameter.
Default: <old_name>_copy
"""
_tmp = copy.deepcopy(self)
if not new_name:
new_name = self.name + '_copy'
if str(type(self)) == "<class 'pymrio.core.mriosystem.IOSystem'>":
_tmp.meta.note('IOSystem copy {new} based on {old}'.format(
new=new_name, old=self.meta.name))
_tmp.meta.change_meta('name', new_name, log=False)
else:
_tmp.name = new_name
return _tmp | Returns a deep copy of the system
Parameters
-----------
new_name: str, optional
Set a new meta name parameter.
Default: <old_name>_copy | https://github.com/konstantinstadler/pymrio/blob/d764aa0dd2150200e867a9713a98ddae203e12d4/pymrio/core/mriosystem.py#L191-L210 |
konstantinstadler/pymrio | pymrio/core/mriosystem.py | CoreSystem.get_Y_categories | def get_Y_categories(self, entries=None):
""" Returns names of y cat. of the IOSystem as unique names in order
Parameters
----------
entries : List, optional
If given, retuns an list with None for all values not in entries.
Returns
-------
Index
List of categories, None if no attribute to determine
list is available
"""
possible_dataframes = ['Y', 'FY']
for df in possible_dataframes:
if (df in self.__dict__) and (getattr(self, df) is not None):
try:
ind = getattr(self, df).columns.get_level_values(
'category').unique()
except (AssertionError, KeyError):
ind = getattr(self, df).columns.get_level_values(
1).unique()
if entries:
if type(entries) is str:
entries = [entries]
ind = ind.tolist()
return [None if ee not in entries else ee for ee in ind]
else:
return ind
else:
logging.warn("No attributes available to get Y categories")
return None | python | def get_Y_categories(self, entries=None):
""" Returns names of y cat. of the IOSystem as unique names in order
Parameters
----------
entries : List, optional
If given, retuns an list with None for all values not in entries.
Returns
-------
Index
List of categories, None if no attribute to determine
list is available
"""
possible_dataframes = ['Y', 'FY']
for df in possible_dataframes:
if (df in self.__dict__) and (getattr(self, df) is not None):
try:
ind = getattr(self, df).columns.get_level_values(
'category').unique()
except (AssertionError, KeyError):
ind = getattr(self, df).columns.get_level_values(
1).unique()
if entries:
if type(entries) is str:
entries = [entries]
ind = ind.tolist()
return [None if ee not in entries else ee for ee in ind]
else:
return ind
else:
logging.warn("No attributes available to get Y categories")
return None | Returns names of y cat. of the IOSystem as unique names in order
Parameters
----------
entries : List, optional
If given, retuns an list with None for all values not in entries.
Returns
-------
Index
List of categories, None if no attribute to determine
list is available | https://github.com/konstantinstadler/pymrio/blob/d764aa0dd2150200e867a9713a98ddae203e12d4/pymrio/core/mriosystem.py#L212-L245 |
konstantinstadler/pymrio | pymrio/core/mriosystem.py | CoreSystem.get_index | def get_index(self, as_dict=False, grouping_pattern=None):
""" Returns the index of the DataFrames in the system
Parameters
----------
as_dict: boolean, optional
If True, returns a 1:1 key-value matching for further processing
prior to groupby functions. Otherwise (default) the index
is returned as pandas index.
grouping_pattern: dict, optional
Dictionary with keys being regex patterns matching index and
values the name for the grouping. If the index is a pandas
multiindex, the keys must be tuples of length levels in the
multiindex, with a valid regex expression at each position.
Otherwise, the keys need to be strings.
Only relevant if as_dict is True.
"""
possible_dataframes = ['A', 'L', 'Z', 'Y', 'F', 'FY', 'M', 'S',
'D_cba', 'D_pba', 'D_imp', 'D_exp',
'D_cba_reg', 'D_pba_reg',
'D_imp_reg', 'D_exp_reg',
'D_cba_cap', 'D_pba_cap',
'D_imp_cap', 'D_exp_cap', ]
for df in possible_dataframes:
if (df in self.__dict__) and (getattr(self, df) is not None):
orig_idx = getattr(self, df).index
break
else:
logging.warn("No attributes available to get index")
return None
if as_dict:
dd = {k: k for k in orig_idx}
if grouping_pattern:
for pattern, new_group in grouping_pattern.items():
if type(pattern) is str:
dd.update({k: new_group for k, v in dd.items() if
re.match(pattern, k)})
else:
dd.update({k: new_group for k, v in dd.items() if
all([re.match(pat, k[nr])
for nr, pat in enumerate(pattern)])})
return dd
else:
return orig_idx | python | def get_index(self, as_dict=False, grouping_pattern=None):
""" Returns the index of the DataFrames in the system
Parameters
----------
as_dict: boolean, optional
If True, returns a 1:1 key-value matching for further processing
prior to groupby functions. Otherwise (default) the index
is returned as pandas index.
grouping_pattern: dict, optional
Dictionary with keys being regex patterns matching index and
values the name for the grouping. If the index is a pandas
multiindex, the keys must be tuples of length levels in the
multiindex, with a valid regex expression at each position.
Otherwise, the keys need to be strings.
Only relevant if as_dict is True.
"""
possible_dataframes = ['A', 'L', 'Z', 'Y', 'F', 'FY', 'M', 'S',
'D_cba', 'D_pba', 'D_imp', 'D_exp',
'D_cba_reg', 'D_pba_reg',
'D_imp_reg', 'D_exp_reg',
'D_cba_cap', 'D_pba_cap',
'D_imp_cap', 'D_exp_cap', ]
for df in possible_dataframes:
if (df in self.__dict__) and (getattr(self, df) is not None):
orig_idx = getattr(self, df).index
break
else:
logging.warn("No attributes available to get index")
return None
if as_dict:
dd = {k: k for k in orig_idx}
if grouping_pattern:
for pattern, new_group in grouping_pattern.items():
if type(pattern) is str:
dd.update({k: new_group for k, v in dd.items() if
re.match(pattern, k)})
else:
dd.update({k: new_group for k, v in dd.items() if
all([re.match(pat, k[nr])
for nr, pat in enumerate(pattern)])})
return dd
else:
return orig_idx | Returns the index of the DataFrames in the system
Parameters
----------
as_dict: boolean, optional
If True, returns a 1:1 key-value matching for further processing
prior to groupby functions. Otherwise (default) the index
is returned as pandas index.
grouping_pattern: dict, optional
Dictionary with keys being regex patterns matching index and
values the name for the grouping. If the index is a pandas
multiindex, the keys must be tuples of length levels in the
multiindex, with a valid regex expression at each position.
Otherwise, the keys need to be strings.
Only relevant if as_dict is True. | https://github.com/konstantinstadler/pymrio/blob/d764aa0dd2150200e867a9713a98ddae203e12d4/pymrio/core/mriosystem.py#L247-L295 |
konstantinstadler/pymrio | pymrio/core/mriosystem.py | CoreSystem.set_index | def set_index(self, index):
""" Sets the pd dataframe index of all dataframes in the system to index
"""
for df in self.get_DataFrame(data=True, with_population=False):
df.index = index | python | def set_index(self, index):
""" Sets the pd dataframe index of all dataframes in the system to index
"""
for df in self.get_DataFrame(data=True, with_population=False):
df.index = index | Sets the pd dataframe index of all dataframes in the system to index | https://github.com/konstantinstadler/pymrio/blob/d764aa0dd2150200e867a9713a98ddae203e12d4/pymrio/core/mriosystem.py#L297-L301 |
konstantinstadler/pymrio | pymrio/core/mriosystem.py | CoreSystem.get_DataFrame | def get_DataFrame(self, data=False, with_unit=True, with_population=True):
""" Yields all panda.DataFrames or there names
Notes
-----
For IOSystem this does not include the DataFrames in the extensions.
Parameters
----------
data : boolean, optional
If True, returns a generator which yields the DataFrames.
If False, returns a generator which
yields only the names of the DataFrames
with_unit: boolean, optional
If True, includes the 'unit' DataFrame
If False, does not include the 'unit' DataFrame.
The method than only yields the numerical data tables
with_population: boolean, optional
If True, includes the 'population' vector
If False, does not include the 'population' vector.
Returns
-------
DataFrames or string generator, depending on parameter data
"""
for key in self.__dict__:
if (key is 'unit') and not with_unit:
continue
if (key is 'population') and not with_population:
continue
if type(self.__dict__[key]) is pd.DataFrame:
if data:
yield getattr(self, key)
else:
yield key | python | def get_DataFrame(self, data=False, with_unit=True, with_population=True):
""" Yields all panda.DataFrames or there names
Notes
-----
For IOSystem this does not include the DataFrames in the extensions.
Parameters
----------
data : boolean, optional
If True, returns a generator which yields the DataFrames.
If False, returns a generator which
yields only the names of the DataFrames
with_unit: boolean, optional
If True, includes the 'unit' DataFrame
If False, does not include the 'unit' DataFrame.
The method than only yields the numerical data tables
with_population: boolean, optional
If True, includes the 'population' vector
If False, does not include the 'population' vector.
Returns
-------
DataFrames or string generator, depending on parameter data
"""
for key in self.__dict__:
if (key is 'unit') and not with_unit:
continue
if (key is 'population') and not with_population:
continue
if type(self.__dict__[key]) is pd.DataFrame:
if data:
yield getattr(self, key)
else:
yield key | Yields all panda.DataFrames or there names
Notes
-----
For IOSystem this does not include the DataFrames in the extensions.
Parameters
----------
data : boolean, optional
If True, returns a generator which yields the DataFrames.
If False, returns a generator which
yields only the names of the DataFrames
with_unit: boolean, optional
If True, includes the 'unit' DataFrame
If False, does not include the 'unit' DataFrame.
The method than only yields the numerical data tables
with_population: boolean, optional
If True, includes the 'population' vector
If False, does not include the 'population' vector.
Returns
-------
DataFrames or string generator, depending on parameter data | https://github.com/konstantinstadler/pymrio/blob/d764aa0dd2150200e867a9713a98ddae203e12d4/pymrio/core/mriosystem.py#L384-L422 |
konstantinstadler/pymrio | pymrio/core/mriosystem.py | CoreSystem.save | def save(self, path, table_format='txt', sep='\t',
table_ext=None, float_format='%.12g'):
""" Saving the system to path
Parameters
----------
path : pathlib.Path or string
path for the saved data (will be created if necessary, data
within will be overwritten).
table_format : string
Format to save the DataFrames:
- 'pkl' : Binary pickle files,
alias: 'pickle', 'bin', 'binary'
- 'txt' : Text files (default), alias: 'text', 'csv'
table_ext : string, optional
File extension,
default depends on table_format(.pkl for pickle, .txt for text)
sep : string, optional
Field delimiter for the output file, only for txt files.
Default: tab ('\t')
float_format : string, optional
Format for saving the DataFrames,
default = '%.12g', only for txt files
"""
if type(path) is str:
path = path.rstrip('\\')
path = Path(path)
path.mkdir(parents=True, exist_ok=True)
para_file_path = path / DEFAULT_FILE_NAMES['filepara']
file_para = dict()
file_para['files'] = dict()
if table_format in ['text', 'csv', 'txt']:
table_format = 'txt'
elif table_format in ['pickle', 'bin', 'binary', 'pkl']:
table_format = 'pkl'
else:
raise ValueError('Unknown table format "{}" - '
'must be "txt" or "pkl"'.format(table_format))
return None
if not table_ext:
if table_format == 'txt':
table_ext = '.txt'
if table_format == 'pkl':
table_ext = '.pkl'
if str(type(self)) == "<class 'pymrio.core.mriosystem.IOSystem'>":
file_para['systemtype'] = GENERIC_NAMES['iosys']
elif str(type(self)) == "<class 'pymrio.core.mriosystem.Extension'>":
file_para['systemtype'] = GENERIC_NAMES['ext']
file_para['name'] = self.name
else:
logging.warn('Unknown system type {} - set to "undef"'.format(
str(type(self))))
file_para['systemtype'] = 'undef'
for df, df_name in zip(self.get_DataFrame(data=True),
self.get_DataFrame()):
if type(df.index) is pd.MultiIndex:
nr_index_col = len(df.index.levels)
else:
nr_index_col = 1
if type(df.columns) is pd.MultiIndex:
nr_header = len(df.columns.levels)
else:
nr_header = 1
save_file = df_name + table_ext
save_file_with_path = path / save_file
logging.info('Save file {}'.format(save_file_with_path))
if table_format == 'txt':
df.to_csv(save_file_with_path, sep=sep,
float_format=float_format)
else:
df.to_pickle(save_file_with_path)
file_para['files'][df_name] = dict()
file_para['files'][df_name]['name'] = save_file
file_para['files'][df_name]['nr_index_col'] = str(nr_index_col)
file_para['files'][df_name]['nr_header'] = str(nr_header)
with para_file_path.open(mode='w') as pf:
json.dump(file_para, pf, indent=4)
if file_para['systemtype'] == GENERIC_NAMES['iosys']:
if not self.meta:
self.meta = MRIOMetaData(name=self.name,
location=path)
self.meta._add_fileio("Saved {} to {}".format(self.name, path))
self.meta.save(location=path)
return self | python | def save(self, path, table_format='txt', sep='\t',
table_ext=None, float_format='%.12g'):
""" Saving the system to path
Parameters
----------
path : pathlib.Path or string
path for the saved data (will be created if necessary, data
within will be overwritten).
table_format : string
Format to save the DataFrames:
- 'pkl' : Binary pickle files,
alias: 'pickle', 'bin', 'binary'
- 'txt' : Text files (default), alias: 'text', 'csv'
table_ext : string, optional
File extension,
default depends on table_format(.pkl for pickle, .txt for text)
sep : string, optional
Field delimiter for the output file, only for txt files.
Default: tab ('\t')
float_format : string, optional
Format for saving the DataFrames,
default = '%.12g', only for txt files
"""
if type(path) is str:
path = path.rstrip('\\')
path = Path(path)
path.mkdir(parents=True, exist_ok=True)
para_file_path = path / DEFAULT_FILE_NAMES['filepara']
file_para = dict()
file_para['files'] = dict()
if table_format in ['text', 'csv', 'txt']:
table_format = 'txt'
elif table_format in ['pickle', 'bin', 'binary', 'pkl']:
table_format = 'pkl'
else:
raise ValueError('Unknown table format "{}" - '
'must be "txt" or "pkl"'.format(table_format))
return None
if not table_ext:
if table_format == 'txt':
table_ext = '.txt'
if table_format == 'pkl':
table_ext = '.pkl'
if str(type(self)) == "<class 'pymrio.core.mriosystem.IOSystem'>":
file_para['systemtype'] = GENERIC_NAMES['iosys']
elif str(type(self)) == "<class 'pymrio.core.mriosystem.Extension'>":
file_para['systemtype'] = GENERIC_NAMES['ext']
file_para['name'] = self.name
else:
logging.warn('Unknown system type {} - set to "undef"'.format(
str(type(self))))
file_para['systemtype'] = 'undef'
for df, df_name in zip(self.get_DataFrame(data=True),
self.get_DataFrame()):
if type(df.index) is pd.MultiIndex:
nr_index_col = len(df.index.levels)
else:
nr_index_col = 1
if type(df.columns) is pd.MultiIndex:
nr_header = len(df.columns.levels)
else:
nr_header = 1
save_file = df_name + table_ext
save_file_with_path = path / save_file
logging.info('Save file {}'.format(save_file_with_path))
if table_format == 'txt':
df.to_csv(save_file_with_path, sep=sep,
float_format=float_format)
else:
df.to_pickle(save_file_with_path)
file_para['files'][df_name] = dict()
file_para['files'][df_name]['name'] = save_file
file_para['files'][df_name]['nr_index_col'] = str(nr_index_col)
file_para['files'][df_name]['nr_header'] = str(nr_header)
with para_file_path.open(mode='w') as pf:
json.dump(file_para, pf, indent=4)
if file_para['systemtype'] == GENERIC_NAMES['iosys']:
if not self.meta:
self.meta = MRIOMetaData(name=self.name,
location=path)
self.meta._add_fileio("Saved {} to {}".format(self.name, path))
self.meta.save(location=path)
return self | Saving the system to path
Parameters
----------
path : pathlib.Path or string
path for the saved data (will be created if necessary, data
within will be overwritten).
table_format : string
Format to save the DataFrames:
- 'pkl' : Binary pickle files,
alias: 'pickle', 'bin', 'binary'
- 'txt' : Text files (default), alias: 'text', 'csv'
table_ext : string, optional
File extension,
default depends on table_format(.pkl for pickle, .txt for text)
sep : string, optional
Field delimiter for the output file, only for txt files.
Default: tab ('\t')
float_format : string, optional
Format for saving the DataFrames,
default = '%.12g', only for txt files | https://github.com/konstantinstadler/pymrio/blob/d764aa0dd2150200e867a9713a98ddae203e12d4/pymrio/core/mriosystem.py#L424-L527 |
konstantinstadler/pymrio | pymrio/core/mriosystem.py | CoreSystem.rename_regions | def rename_regions(self, regions):
""" Sets new names for the regions
Parameters
----------
regions : list or dict
In case of dict: {'old_name' : 'new_name'} with a
entry for each old_name which should be renamed
In case of list: List of new names in order and complete
without repetition
"""
if type(regions) is list:
regions = {old: new for old, new in
zip(self.get_regions(), regions)}
for df in self.get_DataFrame(data=True):
df.rename(index=regions, columns=regions, inplace=True)
try:
for ext in self.get_extensions(data=True):
for df in ext.get_DataFrame(data=True):
df.rename(index=regions, columns=regions, inplace=True)
except:
pass
self.meta._add_modify("Changed country names")
return self | python | def rename_regions(self, regions):
""" Sets new names for the regions
Parameters
----------
regions : list or dict
In case of dict: {'old_name' : 'new_name'} with a
entry for each old_name which should be renamed
In case of list: List of new names in order and complete
without repetition
"""
if type(regions) is list:
regions = {old: new for old, new in
zip(self.get_regions(), regions)}
for df in self.get_DataFrame(data=True):
df.rename(index=regions, columns=regions, inplace=True)
try:
for ext in self.get_extensions(data=True):
for df in ext.get_DataFrame(data=True):
df.rename(index=regions, columns=regions, inplace=True)
except:
pass
self.meta._add_modify("Changed country names")
return self | Sets new names for the regions
Parameters
----------
regions : list or dict
In case of dict: {'old_name' : 'new_name'} with a
entry for each old_name which should be renamed
In case of list: List of new names in order and complete
without repetition | https://github.com/konstantinstadler/pymrio/blob/d764aa0dd2150200e867a9713a98ddae203e12d4/pymrio/core/mriosystem.py#L529-L557 |
konstantinstadler/pymrio | pymrio/core/mriosystem.py | CoreSystem.rename_sectors | def rename_sectors(self, sectors):
""" Sets new names for the sectors
Parameters
----------
sectors : list or dict
In case of dict: {'old_name' : 'new_name'} with an
entry for each old_name which should be renamed
In case of list: List of new names in order and
complete without repetition
"""
if type(sectors) is list:
sectors = {old: new for old, new in
zip(self.get_sectors(), sectors)}
for df in self.get_DataFrame(data=True):
df.rename(index=sectors, columns=sectors, inplace=True)
try:
for ext in self.get_extensions(data=True):
for df in ext.get_DataFrame(data=True):
df.rename(index=sectors, columns=sectors, inplace=True)
except:
pass
self.meta._add_modify("Changed sector names")
return self | python | def rename_sectors(self, sectors):
""" Sets new names for the sectors
Parameters
----------
sectors : list or dict
In case of dict: {'old_name' : 'new_name'} with an
entry for each old_name which should be renamed
In case of list: List of new names in order and
complete without repetition
"""
if type(sectors) is list:
sectors = {old: new for old, new in
zip(self.get_sectors(), sectors)}
for df in self.get_DataFrame(data=True):
df.rename(index=sectors, columns=sectors, inplace=True)
try:
for ext in self.get_extensions(data=True):
for df in ext.get_DataFrame(data=True):
df.rename(index=sectors, columns=sectors, inplace=True)
except:
pass
self.meta._add_modify("Changed sector names")
return self | Sets new names for the sectors
Parameters
----------
sectors : list or dict
In case of dict: {'old_name' : 'new_name'} with an
entry for each old_name which should be renamed
In case of list: List of new names in order and
complete without repetition | https://github.com/konstantinstadler/pymrio/blob/d764aa0dd2150200e867a9713a98ddae203e12d4/pymrio/core/mriosystem.py#L559-L586 |
konstantinstadler/pymrio | pymrio/core/mriosystem.py | CoreSystem.rename_Y_categories | def rename_Y_categories(self, Y_categories):
""" Sets new names for the Y_categories
Parameters
----------
Y_categories : list or dict
In case of dict: {'old_name' : 'new_name'} with an
entry for each old_name which should be renamed
In case of list: List of new names in order and
complete without repetition
"""
if type(Y_categories) is list:
Y_categories = {old: new for old, new in
zip(self.get_Y_categories(), Y_categories)}
for df in self.get_DataFrame(data=True):
df.rename(index=Y_categories, columns=Y_categories, inplace=True)
try:
for ext in self.get_extensions(data=True):
for df in ext.get_DataFrame(data=True):
df.rename(index=Y_categories,
columns=Y_categories,
inplace=True)
except:
pass
self.meta._add_modify("Changed Y category names")
return self | python | def rename_Y_categories(self, Y_categories):
""" Sets new names for the Y_categories
Parameters
----------
Y_categories : list or dict
In case of dict: {'old_name' : 'new_name'} with an
entry for each old_name which should be renamed
In case of list: List of new names in order and
complete without repetition
"""
if type(Y_categories) is list:
Y_categories = {old: new for old, new in
zip(self.get_Y_categories(), Y_categories)}
for df in self.get_DataFrame(data=True):
df.rename(index=Y_categories, columns=Y_categories, inplace=True)
try:
for ext in self.get_extensions(data=True):
for df in ext.get_DataFrame(data=True):
df.rename(index=Y_categories,
columns=Y_categories,
inplace=True)
except:
pass
self.meta._add_modify("Changed Y category names")
return self | Sets new names for the Y_categories
Parameters
----------
Y_categories : list or dict
In case of dict: {'old_name' : 'new_name'} with an
entry for each old_name which should be renamed
In case of list: List of new names in order and
complete without repetition | https://github.com/konstantinstadler/pymrio/blob/d764aa0dd2150200e867a9713a98ddae203e12d4/pymrio/core/mriosystem.py#L588-L618 |
konstantinstadler/pymrio | pymrio/core/mriosystem.py | Extension.calc_system | def calc_system(self, x, Y, Y_agg=None, L=None, population=None):
""" Calculates the missing part of the extension plus accounts
This method allows to specify an aggregated Y_agg for the
account calculation (see Y_agg below). However, the full Y needs
to be specified for the calculation of FY or SY.
Calculates:
- for each sector and country:
S, SY (if FY available), M, D_cba, D_pba_sector, D_imp_sector,
D_exp_sector
- for each region:
D_cba_reg, D_pba_reg, D_imp_reg, D_exp_reg,
- for each region (if population vector is given):
D_cba_cap, D_pba_cap, D_imp_cap, D_exp_cap
Notes
-----
Only attributes which are not None are recalculated (for D_* this is
checked for each group (reg, cap, and w/o appendix)).
Parameters
----------
x : pandas.DataFrame or numpy.array
Industry output column vector
Y : pandas.DataFrame or numpy.arry
Full final demand array
Y_agg : pandas.DataFrame or np.array, optional
The final demand aggregated (one category per country). Can be
used to restrict the calculation of CBA of a specific category
(e.g. households). Default: y is aggregated over all categories
L : pandas.DataFrame or numpy.array, optional
Leontief input output table L. If this is not given,
the method recalculates M based on D_cba (must be present in
the extension).
population : pandas.DataFrame or np.array, optional
Row vector with population per region
"""
if Y_agg is None:
try:
Y_agg = Y.sum(level='region',
axis=1).reindex(self.get_regions(),
axis=1)
except (AssertionError, KeyError):
Y_agg = Y.sum(level=0,
axis=1,).reindex(self.get_regions(),
axis=1)
y_vec = Y.sum(axis=0)
if self.F is None:
self.F = calc_F(self.S, x)
logging.debug(
'{} - F calculated'.format(self.name))
if self.S is None:
self.S = calc_S(self.F, x)
logging.debug('{} - S calculated'.format(self.name))
if (self.FY is None) and (self.SY is not None):
self.FY = calc_FY(self.SY, y_vec)
logging.debug('{} - FY calculated'.format(self.name))
if (self.SY is None) and (self.FY is not None):
self.SY = calc_SY(self.FY, y_vec)
logging.debug('{} - SY calculated'.format(self.name))
if self.M is None:
if L is not None:
self.M = calc_M(self.S, L)
logging.debug('{} - M calculated based on L'.format(
self.name))
else:
try:
self.M = recalc_M(self.S, self.D_cba,
Y=Y_agg,
nr_sectors=self.get_sectors().size)
logging.debug(
'{} - M calculated based on '
'D_cba and Y'.format(self.name))
except Exception as ex:
logging.debug(
'Recalculation of M not possible - cause: {}'.
format(ex))
FY_agg = 0
if self.FY is not None:
# FY_agg = ioutil.agg_columns(
# ext['FY'], self.get_Y_categories().size)
try:
FY_agg = (self.FY.sum(level='region', axis=1).
reindex(self.get_regions(), axis=1))
except (AssertionError, KeyError):
FY_agg = (self.FY.sum(level=0, axis=1).
reindex(self.get_regions(), axis=1))
if ((self.D_cba is None) or
(self.D_pba is None) or
(self.D_imp is None) or
(self.D_exp is None)):
if L is None:
logging.debug(
'Not possilbe to calculate D accounts - L not present')
return
else:
self.D_cba, self.D_pba, self.D_imp, self.D_exp = (
calc_accounts(self.S, L, Y_agg, self.get_sectors().size))
logging.debug(
'{} - Accounts D calculated'.format(self.name))
# aggregate to country
if ((self.D_cba_reg is None) or (self.D_pba_reg is None) or
(self.D_imp_reg is None) or (self.D_exp_reg is None)):
try:
self.D_cba_reg = (
self.D_cba.sum(level='region', axis=1).
reindex(self.get_regions(), axis=1) + FY_agg)
except (AssertionError, KeyError):
self.D_cba_reg = (
self.D_cba.sum(level=0, axis=1).
reindex(self.get_regions(), axis=1) + FY_agg)
try:
self.D_pba_reg = (
self.D_pba.sum(level='region', axis=1).
reindex(self.get_regions(), axis=1) + FY_agg)
except (AssertionError, KeyError):
self.D_pba_reg = (
self.D_pba.sum(level=0, axis=1).
reindex(self.get_regions(), axis=1) + FY_agg)
try:
self.D_imp_reg = (
self.D_imp.sum(level='region', axis=1).
reindex(self.get_regions(), axis=1))
except (AssertionError, KeyError):
self.D_imp_reg = (
self.D_imp.sum(level=0, axis=1).
reindex(self.get_regions(), axis=1))
try:
self.D_exp_reg = (
self.D_exp.sum(level='region', axis=1).
reindex(self.get_regions(), axis=1))
except (AssertionError, KeyError):
self.D_exp_reg = (
self.D_exp.sum(level=0, axis=1).
reindex(self.get_regions(), axis=1))
logging.debug(
'{} - Accounts D for regions calculated'.format(self.name))
# calc accounts per capita if population data is available
if population is not None:
if type(population) is pd.DataFrame:
# check for right order:
if (population.columns.tolist() !=
self.D_cba_reg.columns.tolist()):
logging.warning(
'Population regions are inconsistent with IO regions')
population = population.values
if ((self.D_cba_cap is None) or (self.D_pba_cap is None) or
(self.D_imp_cap is None) or (self.D_exp_cap is None)):
self.D_cba_cap = self.D_cba_reg.dot(
np.diagflat(1./population))
self.D_pba_cap = self.D_pba_reg.dot(
np.diagflat(1./population))
self.D_imp_cap = self.D_imp_reg.dot(
np.diagflat(1./population))
self.D_exp_cap = self.D_exp_reg.dot(
np.diagflat(1./population))
self.D_cba_cap.columns = self.D_cba_reg.columns
self.D_pba_cap.columns = self.D_pba_reg.columns
self.D_imp_cap.columns = self.D_imp_reg.columns
self.D_exp_cap.columns = self.D_exp_reg.columns
logging.debug(
'{} - Accounts D per capita calculated'.format(self.name))
return self | python | def calc_system(self, x, Y, Y_agg=None, L=None, population=None):
""" Calculates the missing part of the extension plus accounts
This method allows to specify an aggregated Y_agg for the
account calculation (see Y_agg below). However, the full Y needs
to be specified for the calculation of FY or SY.
Calculates:
- for each sector and country:
S, SY (if FY available), M, D_cba, D_pba_sector, D_imp_sector,
D_exp_sector
- for each region:
D_cba_reg, D_pba_reg, D_imp_reg, D_exp_reg,
- for each region (if population vector is given):
D_cba_cap, D_pba_cap, D_imp_cap, D_exp_cap
Notes
-----
Only attributes which are not None are recalculated (for D_* this is
checked for each group (reg, cap, and w/o appendix)).
Parameters
----------
x : pandas.DataFrame or numpy.array
Industry output column vector
Y : pandas.DataFrame or numpy.arry
Full final demand array
Y_agg : pandas.DataFrame or np.array, optional
The final demand aggregated (one category per country). Can be
used to restrict the calculation of CBA of a specific category
(e.g. households). Default: y is aggregated over all categories
L : pandas.DataFrame or numpy.array, optional
Leontief input output table L. If this is not given,
the method recalculates M based on D_cba (must be present in
the extension).
population : pandas.DataFrame or np.array, optional
Row vector with population per region
"""
if Y_agg is None:
try:
Y_agg = Y.sum(level='region',
axis=1).reindex(self.get_regions(),
axis=1)
except (AssertionError, KeyError):
Y_agg = Y.sum(level=0,
axis=1,).reindex(self.get_regions(),
axis=1)
y_vec = Y.sum(axis=0)
if self.F is None:
self.F = calc_F(self.S, x)
logging.debug(
'{} - F calculated'.format(self.name))
if self.S is None:
self.S = calc_S(self.F, x)
logging.debug('{} - S calculated'.format(self.name))
if (self.FY is None) and (self.SY is not None):
self.FY = calc_FY(self.SY, y_vec)
logging.debug('{} - FY calculated'.format(self.name))
if (self.SY is None) and (self.FY is not None):
self.SY = calc_SY(self.FY, y_vec)
logging.debug('{} - SY calculated'.format(self.name))
if self.M is None:
if L is not None:
self.M = calc_M(self.S, L)
logging.debug('{} - M calculated based on L'.format(
self.name))
else:
try:
self.M = recalc_M(self.S, self.D_cba,
Y=Y_agg,
nr_sectors=self.get_sectors().size)
logging.debug(
'{} - M calculated based on '
'D_cba and Y'.format(self.name))
except Exception as ex:
logging.debug(
'Recalculation of M not possible - cause: {}'.
format(ex))
FY_agg = 0
if self.FY is not None:
# FY_agg = ioutil.agg_columns(
# ext['FY'], self.get_Y_categories().size)
try:
FY_agg = (self.FY.sum(level='region', axis=1).
reindex(self.get_regions(), axis=1))
except (AssertionError, KeyError):
FY_agg = (self.FY.sum(level=0, axis=1).
reindex(self.get_regions(), axis=1))
if ((self.D_cba is None) or
(self.D_pba is None) or
(self.D_imp is None) or
(self.D_exp is None)):
if L is None:
logging.debug(
'Not possilbe to calculate D accounts - L not present')
return
else:
self.D_cba, self.D_pba, self.D_imp, self.D_exp = (
calc_accounts(self.S, L, Y_agg, self.get_sectors().size))
logging.debug(
'{} - Accounts D calculated'.format(self.name))
# aggregate to country
if ((self.D_cba_reg is None) or (self.D_pba_reg is None) or
(self.D_imp_reg is None) or (self.D_exp_reg is None)):
try:
self.D_cba_reg = (
self.D_cba.sum(level='region', axis=1).
reindex(self.get_regions(), axis=1) + FY_agg)
except (AssertionError, KeyError):
self.D_cba_reg = (
self.D_cba.sum(level=0, axis=1).
reindex(self.get_regions(), axis=1) + FY_agg)
try:
self.D_pba_reg = (
self.D_pba.sum(level='region', axis=1).
reindex(self.get_regions(), axis=1) + FY_agg)
except (AssertionError, KeyError):
self.D_pba_reg = (
self.D_pba.sum(level=0, axis=1).
reindex(self.get_regions(), axis=1) + FY_agg)
try:
self.D_imp_reg = (
self.D_imp.sum(level='region', axis=1).
reindex(self.get_regions(), axis=1))
except (AssertionError, KeyError):
self.D_imp_reg = (
self.D_imp.sum(level=0, axis=1).
reindex(self.get_regions(), axis=1))
try:
self.D_exp_reg = (
self.D_exp.sum(level='region', axis=1).
reindex(self.get_regions(), axis=1))
except (AssertionError, KeyError):
self.D_exp_reg = (
self.D_exp.sum(level=0, axis=1).
reindex(self.get_regions(), axis=1))
logging.debug(
'{} - Accounts D for regions calculated'.format(self.name))
# calc accounts per capita if population data is available
if population is not None:
if type(population) is pd.DataFrame:
# check for right order:
if (population.columns.tolist() !=
self.D_cba_reg.columns.tolist()):
logging.warning(
'Population regions are inconsistent with IO regions')
population = population.values
if ((self.D_cba_cap is None) or (self.D_pba_cap is None) or
(self.D_imp_cap is None) or (self.D_exp_cap is None)):
self.D_cba_cap = self.D_cba_reg.dot(
np.diagflat(1./population))
self.D_pba_cap = self.D_pba_reg.dot(
np.diagflat(1./population))
self.D_imp_cap = self.D_imp_reg.dot(
np.diagflat(1./population))
self.D_exp_cap = self.D_exp_reg.dot(
np.diagflat(1./population))
self.D_cba_cap.columns = self.D_cba_reg.columns
self.D_pba_cap.columns = self.D_pba_reg.columns
self.D_imp_cap.columns = self.D_imp_reg.columns
self.D_exp_cap.columns = self.D_exp_reg.columns
logging.debug(
'{} - Accounts D per capita calculated'.format(self.name))
return self | Calculates the missing part of the extension plus accounts
This method allows to specify an aggregated Y_agg for the
account calculation (see Y_agg below). However, the full Y needs
to be specified for the calculation of FY or SY.
Calculates:
- for each sector and country:
S, SY (if FY available), M, D_cba, D_pba_sector, D_imp_sector,
D_exp_sector
- for each region:
D_cba_reg, D_pba_reg, D_imp_reg, D_exp_reg,
- for each region (if population vector is given):
D_cba_cap, D_pba_cap, D_imp_cap, D_exp_cap
Notes
-----
Only attributes which are not None are recalculated (for D_* this is
checked for each group (reg, cap, and w/o appendix)).
Parameters
----------
x : pandas.DataFrame or numpy.array
Industry output column vector
Y : pandas.DataFrame or numpy.arry
Full final demand array
Y_agg : pandas.DataFrame or np.array, optional
The final demand aggregated (one category per country). Can be
used to restrict the calculation of CBA of a specific category
(e.g. households). Default: y is aggregated over all categories
L : pandas.DataFrame or numpy.array, optional
Leontief input output table L. If this is not given,
the method recalculates M based on D_cba (must be present in
the extension).
population : pandas.DataFrame or np.array, optional
Row vector with population per region | https://github.com/konstantinstadler/pymrio/blob/d764aa0dd2150200e867a9713a98ddae203e12d4/pymrio/core/mriosystem.py#L727-L907 |
konstantinstadler/pymrio | pymrio/core/mriosystem.py | Extension.plot_account | def plot_account(self, row, per_capita=False, sector=None,
file_name=False, file_dpi=600,
population=None, **kwargs):
""" Plots D_pba, D_cba, D_imp and D_exp for the specified row (account)
Plot either the total country accounts or for a specific sector,
depending on the 'sector' parameter.
Per default the accounts are plotted as bar charts.
However, any valid keyword for the pandas.DataFrame.plot
method can be passed.
Notes
-----
This looks prettier with the seaborn module
(import seaborn before calling this method)
Parameters
----------
row : string, tuple or int
A valid index for the row in the extension which
should be plotted (one(!) row - no list allowed)
per_capita : boolean, optional
Plot the per capita accounts instead of the absolute values
default is False
sector: string, optional
Plot the results for a specific sector of the IO table. If
None is given (default), the total regional accounts are plotted.
population : pandas.DataFrame or np.array, optional
Vector with population per region. This must be given if
values should be plotted per_capita for a specific sector since
these values are calculated on the fly.
file_name : path string, optional
If given, saves the plot to the given filename
file_dpi : int, optional
Dpi for saving the figure, default 600
**kwargs : key word arguments, optional
This will be passed directly to the pd.DataFrame.plot method
Returns
-------
Axis as given by pandas.DataFrame.plot, None in case of errors
"""
# necessary if row is given for Multiindex without brackets
if type(per_capita) is not bool:
logging.error('per_capita parameter must be boolean')
return None
if type(row) is int:
row = self.D_cba.ix[row].name
name_row = (str(row).
replace('(', '').
replace(')', '').
replace("'", "").
replace('[', '').
replace(']', ''))
if sector:
graph_name = name_row + ' for sector ' + sector
else:
graph_name = name_row + ' total account'
if per_capita:
graph_name = graph_name + ' - per capita'
graph_name = self.name + ' - ' + graph_name
if self.unit is not None:
try:
# for multiindex the entry is given with header,
# for single index just the entry
y_label_name = (name_row +
' (' +
str(self.unit.ix[row, 'unit'].tolist()[0]) +
')')
except:
y_label_name = (name_row + ' (' +
str(self.unit.ix[row, 'unit']) + ')')
else:
y_label_name = name_row
if 'kind' not in kwargs:
kwargs['kind'] = 'bar'
if 'colormap' not in kwargs:
kwargs['colormap'] = 'Spectral'
accounts = collections.OrderedDict()
if sector:
accounts['Footprint'] = 'D_cba'
accounts['Territorial'] = 'D_pba'
accounts['Imports'] = 'D_imp'
accounts['Exports'] = 'D_exp'
else:
if per_capita:
accounts['Footprint'] = 'D_cba_cap'
accounts['Territorial'] = 'D_pba_cap'
accounts['Imports'] = 'D_imp_cap'
accounts['Exports'] = 'D_exp_cap'
else:
accounts['Footprint'] = 'D_cba_reg'
accounts['Territorial'] = 'D_pba_reg'
accounts['Imports'] = 'D_imp_reg'
accounts['Exports'] = 'D_exp_reg'
data_row = pd.DataFrame(columns=[key for key in accounts])
for key in accounts:
if sector:
try:
_data = pd.DataFrame(
getattr(self, accounts[key]).xs(
key=sector, axis=1, level='sector').ix[row].T)
except (AssertionError, KeyError):
_data = pd.DataFrame(
getattr(self, accounts[key]).xs(
key=sector, axis=1, level=1).ix[row].T)
if per_capita:
if population is not None:
if type(population) is pd.DataFrame:
# check for right order:
if (population.columns.tolist() !=
self.D_cba_reg.columns.tolist()):
logging.warning(
'Population regions are inconsistent '
'with IO regions')
population = population.values
population = population.reshape((-1, 1))
_data = _data / population
else:
logging.error('Population must be given for sector '
'results per capita')
return
else:
_data = pd.DataFrame(getattr(self, accounts[key]).ix[row].T)
_data.columns = [key]
data_row[key] = _data[key]
if 'title' not in kwargs:
kwargs['title'] = graph_name
ax = data_row.plot(**kwargs)
plt.xlabel('Regions')
plt.ylabel(y_label_name)
plt.legend(loc='best')
try:
plt.tight_layout()
except:
pass
if file_name:
plt.savefig(file_name, dpi=file_dpi)
return ax | python | def plot_account(self, row, per_capita=False, sector=None,
file_name=False, file_dpi=600,
population=None, **kwargs):
""" Plots D_pba, D_cba, D_imp and D_exp for the specified row (account)
Plot either the total country accounts or for a specific sector,
depending on the 'sector' parameter.
Per default the accounts are plotted as bar charts.
However, any valid keyword for the pandas.DataFrame.plot
method can be passed.
Notes
-----
This looks prettier with the seaborn module
(import seaborn before calling this method)
Parameters
----------
row : string, tuple or int
A valid index for the row in the extension which
should be plotted (one(!) row - no list allowed)
per_capita : boolean, optional
Plot the per capita accounts instead of the absolute values
default is False
sector: string, optional
Plot the results for a specific sector of the IO table. If
None is given (default), the total regional accounts are plotted.
population : pandas.DataFrame or np.array, optional
Vector with population per region. This must be given if
values should be plotted per_capita for a specific sector since
these values are calculated on the fly.
file_name : path string, optional
If given, saves the plot to the given filename
file_dpi : int, optional
Dpi for saving the figure, default 600
**kwargs : key word arguments, optional
This will be passed directly to the pd.DataFrame.plot method
Returns
-------
Axis as given by pandas.DataFrame.plot, None in case of errors
"""
# necessary if row is given for Multiindex without brackets
if type(per_capita) is not bool:
logging.error('per_capita parameter must be boolean')
return None
if type(row) is int:
row = self.D_cba.ix[row].name
name_row = (str(row).
replace('(', '').
replace(')', '').
replace("'", "").
replace('[', '').
replace(']', ''))
if sector:
graph_name = name_row + ' for sector ' + sector
else:
graph_name = name_row + ' total account'
if per_capita:
graph_name = graph_name + ' - per capita'
graph_name = self.name + ' - ' + graph_name
if self.unit is not None:
try:
# for multiindex the entry is given with header,
# for single index just the entry
y_label_name = (name_row +
' (' +
str(self.unit.ix[row, 'unit'].tolist()[0]) +
')')
except:
y_label_name = (name_row + ' (' +
str(self.unit.ix[row, 'unit']) + ')')
else:
y_label_name = name_row
if 'kind' not in kwargs:
kwargs['kind'] = 'bar'
if 'colormap' not in kwargs:
kwargs['colormap'] = 'Spectral'
accounts = collections.OrderedDict()
if sector:
accounts['Footprint'] = 'D_cba'
accounts['Territorial'] = 'D_pba'
accounts['Imports'] = 'D_imp'
accounts['Exports'] = 'D_exp'
else:
if per_capita:
accounts['Footprint'] = 'D_cba_cap'
accounts['Territorial'] = 'D_pba_cap'
accounts['Imports'] = 'D_imp_cap'
accounts['Exports'] = 'D_exp_cap'
else:
accounts['Footprint'] = 'D_cba_reg'
accounts['Territorial'] = 'D_pba_reg'
accounts['Imports'] = 'D_imp_reg'
accounts['Exports'] = 'D_exp_reg'
data_row = pd.DataFrame(columns=[key for key in accounts])
for key in accounts:
if sector:
try:
_data = pd.DataFrame(
getattr(self, accounts[key]).xs(
key=sector, axis=1, level='sector').ix[row].T)
except (AssertionError, KeyError):
_data = pd.DataFrame(
getattr(self, accounts[key]).xs(
key=sector, axis=1, level=1).ix[row].T)
if per_capita:
if population is not None:
if type(population) is pd.DataFrame:
# check for right order:
if (population.columns.tolist() !=
self.D_cba_reg.columns.tolist()):
logging.warning(
'Population regions are inconsistent '
'with IO regions')
population = population.values
population = population.reshape((-1, 1))
_data = _data / population
else:
logging.error('Population must be given for sector '
'results per capita')
return
else:
_data = pd.DataFrame(getattr(self, accounts[key]).ix[row].T)
_data.columns = [key]
data_row[key] = _data[key]
if 'title' not in kwargs:
kwargs['title'] = graph_name
ax = data_row.plot(**kwargs)
plt.xlabel('Regions')
plt.ylabel(y_label_name)
plt.legend(loc='best')
try:
plt.tight_layout()
except:
pass
if file_name:
plt.savefig(file_name, dpi=file_dpi)
return ax | Plots D_pba, D_cba, D_imp and D_exp for the specified row (account)
Plot either the total country accounts or for a specific sector,
depending on the 'sector' parameter.
Per default the accounts are plotted as bar charts.
However, any valid keyword for the pandas.DataFrame.plot
method can be passed.
Notes
-----
This looks prettier with the seaborn module
(import seaborn before calling this method)
Parameters
----------
row : string, tuple or int
A valid index for the row in the extension which
should be plotted (one(!) row - no list allowed)
per_capita : boolean, optional
Plot the per capita accounts instead of the absolute values
default is False
sector: string, optional
Plot the results for a specific sector of the IO table. If
None is given (default), the total regional accounts are plotted.
population : pandas.DataFrame or np.array, optional
Vector with population per region. This must be given if
values should be plotted per_capita for a specific sector since
these values are calculated on the fly.
file_name : path string, optional
If given, saves the plot to the given filename
file_dpi : int, optional
Dpi for saving the figure, default 600
**kwargs : key word arguments, optional
This will be passed directly to the pd.DataFrame.plot method
Returns
-------
Axis as given by pandas.DataFrame.plot, None in case of errors | https://github.com/konstantinstadler/pymrio/blob/d764aa0dd2150200e867a9713a98ddae203e12d4/pymrio/core/mriosystem.py#L909-L1063 |
konstantinstadler/pymrio | pymrio/core/mriosystem.py | Extension.report_accounts | def report_accounts(self, path, per_region=True, per_capita=False,
pic_size=1000, format='rst', ffname=None, **kwargs):
""" Writes a report to the given path for the regional accounts
The report consists of a text file and a folder with the pics
(both names following parameter name)
Notes
----
This looks prettier with the seaborn module
(import seaborn before calling this method)
Parameters
----------
path : pathlib.Path or string
Root path for the report
per_region : boolean, optional
If true, reports the accounts per region
per_capita : boolean, optional
If true, reports the accounts per capita
If per_capita and per_region are False, nothing will be done
pic_size : int, optional
size for the figures in px, 1000 by default
format : string, optional
file format of the report:
'rst'(default), 'html', 'latex', ...
except for rst all depend on the module docutils (all writer_name
from docutils can be used as format)
ffname : string, optional
root file name (without extension, per_capita or per_region will be
attached) and folder names If None gets passed (default), self.name
with be modified to get a valid name for the operation system
without blanks
**kwargs : key word arguments, optional
This will be passed directly to the pd.DataFrame.plot method
(through the self.plot_account method)
"""
if not per_region and not per_capita:
return
_plt = plt.isinteractive()
_rcParams = mpl.rcParams.copy()
rcParams = {
'figure.figsize': (10, 5),
'figure.dpi': 350,
'axes.titlesize': 20,
'axes.labelsize': 20,
}
plt.ioff()
if type(path) is str:
path = path.rstrip('\\')
path = Path(path)
path.mkdir(parents=True, exist_ok=True)
if ffname is None:
valid_char = string.ascii_letters + string.digits + '_'
ffname = self.name.replace(' ', '_')
ffname = "".join([r for r in ffname if r in valid_char])
rep_spec = collections.namedtuple('rep_spec',
['make',
'spec_string',
'is_per_capita'])
reports_to_write = {'per region accounts': rep_spec(
per_region, '_per_region', False),
'per capita accounts': rep_spec(
per_capita, '_per_capita', True)}
logging.info('Write report for {}'.format(self.name))
fig_name_list = []
for arep in reports_to_write:
if not reports_to_write[arep].make:
continue
report_txt = []
report_txt.append('###########')
report_txt.append('MRIO report')
report_txt.append('###########')
report_txt.append('\n')
_ext = 'Extension: ' + self.name + ' - ' + str(arep)
report_txt.append(_ext)
report_txt.append('='*len(_ext))
report_txt.append('.. contents::\n\n')
curr_ffname = ffname + reports_to_write[arep].spec_string
subfolder = path / curr_ffname
subfolder.mkdir(parents=True, exist_ok=True)
for row in self.get_rows():
name_row = (str(row).
replace('(', '').
replace(')', '').
replace("'", "").
replace(' ', '_').
replace(', ', '_').
replace('__', '_'))
graph_name = (self.name + ' - ' + str(row).
replace('(', '').
replace(')', '').
replace("'", ""))
# get valid file name
def clean(varStr):
return re.sub('\W|^(?=\d)', '_', varStr)
file_name = (clean(name_row +
reports_to_write[arep].spec_string))
# possibility of still having __ in there:
file_name = re.sub('_+', '_', file_name)
# restrict file length
file_name = file_name[:50]
def file_name_nr(a, c):
return a + '_' + str(c)
_loopco = 0
while file_name_nr(file_name, _loopco) in fig_name_list:
_loopco += 1
file_name = file_name_nr(file_name, _loopco)
fig_name_list.append(file_name)
file_name = file_name + '.png'
file_name = subfolder / file_name
file_name_rel = file_name.relative_to(path)
self.plot_account(row, file_name=file_name,
per_capita=reports_to_write[arep].
is_per_capita, **kwargs)
plt.close()
report_txt.append(graph_name)
report_txt.append('-'*len(graph_name) + '\n\n')
report_txt.append('.. image:: ' + file_name_rel)
report_txt.append(' :width: {} \n'.format(int(pic_size)))
# write report file and convert to given format
report_txt.append('\nReport written on ' +
time.strftime("%Y%m%d %H%M%S"))
fin_txt = '\n'.join(report_txt)
if format is not 'rst':
try:
import docutils.core as dc
if format == 'tex':
format == 'latex'
fin_txt = dc.publish_string(
fin_txt, writer_name=format,
settings_overrides={'output_encoding': 'unicode'})
except:
logging.warn(
'Module docutils not available - write rst instead')
format = 'rst'
format_str = {'latex': 'tex',
'tex': 'tex',
'rst': 'txt',
'txt': 'txt',
'html': 'html'}
_repfile = curr_ffname + '.' + format_str.get(format, str(format))
with open(path / _repfile, 'w') as out_file:
out_file.write(fin_txt)
logging.info('Report for {what} written to {file_where}'.
format(what=arep, file_where=str(_repfile)))
# restore plot status
mpl.rcParams.update(_rcParams)
if _plt:
plt.ion() | python | def report_accounts(self, path, per_region=True, per_capita=False,
pic_size=1000, format='rst', ffname=None, **kwargs):
""" Writes a report to the given path for the regional accounts
The report consists of a text file and a folder with the pics
(both names following parameter name)
Notes
----
This looks prettier with the seaborn module
(import seaborn before calling this method)
Parameters
----------
path : pathlib.Path or string
Root path for the report
per_region : boolean, optional
If true, reports the accounts per region
per_capita : boolean, optional
If true, reports the accounts per capita
If per_capita and per_region are False, nothing will be done
pic_size : int, optional
size for the figures in px, 1000 by default
format : string, optional
file format of the report:
'rst'(default), 'html', 'latex', ...
except for rst all depend on the module docutils (all writer_name
from docutils can be used as format)
ffname : string, optional
root file name (without extension, per_capita or per_region will be
attached) and folder names If None gets passed (default), self.name
with be modified to get a valid name for the operation system
without blanks
**kwargs : key word arguments, optional
This will be passed directly to the pd.DataFrame.plot method
(through the self.plot_account method)
"""
if not per_region and not per_capita:
return
_plt = plt.isinteractive()
_rcParams = mpl.rcParams.copy()
rcParams = {
'figure.figsize': (10, 5),
'figure.dpi': 350,
'axes.titlesize': 20,
'axes.labelsize': 20,
}
plt.ioff()
if type(path) is str:
path = path.rstrip('\\')
path = Path(path)
path.mkdir(parents=True, exist_ok=True)
if ffname is None:
valid_char = string.ascii_letters + string.digits + '_'
ffname = self.name.replace(' ', '_')
ffname = "".join([r for r in ffname if r in valid_char])
rep_spec = collections.namedtuple('rep_spec',
['make',
'spec_string',
'is_per_capita'])
reports_to_write = {'per region accounts': rep_spec(
per_region, '_per_region', False),
'per capita accounts': rep_spec(
per_capita, '_per_capita', True)}
logging.info('Write report for {}'.format(self.name))
fig_name_list = []
for arep in reports_to_write:
if not reports_to_write[arep].make:
continue
report_txt = []
report_txt.append('###########')
report_txt.append('MRIO report')
report_txt.append('###########')
report_txt.append('\n')
_ext = 'Extension: ' + self.name + ' - ' + str(arep)
report_txt.append(_ext)
report_txt.append('='*len(_ext))
report_txt.append('.. contents::\n\n')
curr_ffname = ffname + reports_to_write[arep].spec_string
subfolder = path / curr_ffname
subfolder.mkdir(parents=True, exist_ok=True)
for row in self.get_rows():
name_row = (str(row).
replace('(', '').
replace(')', '').
replace("'", "").
replace(' ', '_').
replace(', ', '_').
replace('__', '_'))
graph_name = (self.name + ' - ' + str(row).
replace('(', '').
replace(')', '').
replace("'", ""))
# get valid file name
def clean(varStr):
return re.sub('\W|^(?=\d)', '_', varStr)
file_name = (clean(name_row +
reports_to_write[arep].spec_string))
# possibility of still having __ in there:
file_name = re.sub('_+', '_', file_name)
# restrict file length
file_name = file_name[:50]
def file_name_nr(a, c):
return a + '_' + str(c)
_loopco = 0
while file_name_nr(file_name, _loopco) in fig_name_list:
_loopco += 1
file_name = file_name_nr(file_name, _loopco)
fig_name_list.append(file_name)
file_name = file_name + '.png'
file_name = subfolder / file_name
file_name_rel = file_name.relative_to(path)
self.plot_account(row, file_name=file_name,
per_capita=reports_to_write[arep].
is_per_capita, **kwargs)
plt.close()
report_txt.append(graph_name)
report_txt.append('-'*len(graph_name) + '\n\n')
report_txt.append('.. image:: ' + file_name_rel)
report_txt.append(' :width: {} \n'.format(int(pic_size)))
# write report file and convert to given format
report_txt.append('\nReport written on ' +
time.strftime("%Y%m%d %H%M%S"))
fin_txt = '\n'.join(report_txt)
if format is not 'rst':
try:
import docutils.core as dc
if format == 'tex':
format == 'latex'
fin_txt = dc.publish_string(
fin_txt, writer_name=format,
settings_overrides={'output_encoding': 'unicode'})
except:
logging.warn(
'Module docutils not available - write rst instead')
format = 'rst'
format_str = {'latex': 'tex',
'tex': 'tex',
'rst': 'txt',
'txt': 'txt',
'html': 'html'}
_repfile = curr_ffname + '.' + format_str.get(format, str(format))
with open(path / _repfile, 'w') as out_file:
out_file.write(fin_txt)
logging.info('Report for {what} written to {file_where}'.
format(what=arep, file_where=str(_repfile)))
# restore plot status
mpl.rcParams.update(_rcParams)
if _plt:
plt.ion() | Writes a report to the given path for the regional accounts
The report consists of a text file and a folder with the pics
(both names following parameter name)
Notes
----
This looks prettier with the seaborn module
(import seaborn before calling this method)
Parameters
----------
path : pathlib.Path or string
Root path for the report
per_region : boolean, optional
If true, reports the accounts per region
per_capita : boolean, optional
If true, reports the accounts per capita
If per_capita and per_region are False, nothing will be done
pic_size : int, optional
size for the figures in px, 1000 by default
format : string, optional
file format of the report:
'rst'(default), 'html', 'latex', ...
except for rst all depend on the module docutils (all writer_name
from docutils can be used as format)
ffname : string, optional
root file name (without extension, per_capita or per_region will be
attached) and folder names If None gets passed (default), self.name
with be modified to get a valid name for the operation system
without blanks
**kwargs : key word arguments, optional
This will be passed directly to the pd.DataFrame.plot method
(through the self.plot_account method) | https://github.com/konstantinstadler/pymrio/blob/d764aa0dd2150200e867a9713a98ddae203e12d4/pymrio/core/mriosystem.py#L1065-L1237 |
konstantinstadler/pymrio | pymrio/core/mriosystem.py | Extension.get_rows | def get_rows(self):
""" Returns the name of the rows of the extension"""
possible_dataframes = ['F', 'FY', 'M', 'S',
'D_cba', 'D_pba', 'D_imp', 'D_exp',
'D_cba_reg', 'D_pba_reg',
'D_imp_reg', 'D_exp_reg',
'D_cba_cap', 'D_pba_cap',
'D_imp_cap', 'D_exp_cap', ]
for df in possible_dataframes:
if (df in self.__dict__) and (getattr(self, df) is not None):
return getattr(self, df).index.get_values()
else:
logging.warn("No attributes available to get row names")
return None | python | def get_rows(self):
""" Returns the name of the rows of the extension"""
possible_dataframes = ['F', 'FY', 'M', 'S',
'D_cba', 'D_pba', 'D_imp', 'D_exp',
'D_cba_reg', 'D_pba_reg',
'D_imp_reg', 'D_exp_reg',
'D_cba_cap', 'D_pba_cap',
'D_imp_cap', 'D_exp_cap', ]
for df in possible_dataframes:
if (df in self.__dict__) and (getattr(self, df) is not None):
return getattr(self, df).index.get_values()
else:
logging.warn("No attributes available to get row names")
return None | Returns the name of the rows of the extension | https://github.com/konstantinstadler/pymrio/blob/d764aa0dd2150200e867a9713a98ddae203e12d4/pymrio/core/mriosystem.py#L1239-L1252 |
konstantinstadler/pymrio | pymrio/core/mriosystem.py | Extension.get_row_data | def get_row_data(self, row, name=None):
""" Returns a dict with all available data for a row in the extension
Parameters
----------
row : tuple, list, string
A valid index for the extension DataFrames
name : string, optional
If given, adds a key 'name' with the given value to the dict. In
that case the dict can be
used directly to build a new extension.
Returns
-------
dict object with the data (pandas DataFrame)for the specific rows
"""
retdict = {}
for rowname, data in zip(self.get_DataFrame(),
self.get_DataFrame(data=True)):
retdict[rowname] = pd.DataFrame(data.ix[row])
if name:
retdict['name'] = name
return retdict | python | def get_row_data(self, row, name=None):
""" Returns a dict with all available data for a row in the extension
Parameters
----------
row : tuple, list, string
A valid index for the extension DataFrames
name : string, optional
If given, adds a key 'name' with the given value to the dict. In
that case the dict can be
used directly to build a new extension.
Returns
-------
dict object with the data (pandas DataFrame)for the specific rows
"""
retdict = {}
for rowname, data in zip(self.get_DataFrame(),
self.get_DataFrame(data=True)):
retdict[rowname] = pd.DataFrame(data.ix[row])
if name:
retdict['name'] = name
return retdict | Returns a dict with all available data for a row in the extension
Parameters
----------
row : tuple, list, string
A valid index for the extension DataFrames
name : string, optional
If given, adds a key 'name' with the given value to the dict. In
that case the dict can be
used directly to build a new extension.
Returns
-------
dict object with the data (pandas DataFrame)for the specific rows | https://github.com/konstantinstadler/pymrio/blob/d764aa0dd2150200e867a9713a98ddae203e12d4/pymrio/core/mriosystem.py#L1254-L1276 |
konstantinstadler/pymrio | pymrio/core/mriosystem.py | Extension.diag_stressor | def diag_stressor(self, stressor, name=None):
""" Diagonalize one row of the stressor matrix for a flow analysis.
This method takes one row of the F matrix and diagonalize
to the full region/sector format. Footprints calculation based
on this matrix show the flow of embodied stressors from the source
region/sector (row index) to the final consumer (column index).
Note
----
Since the type of analysis based on the disaggregated matrix is based
on flow, direct household emissions (FY) are not included.
Parameters
----------
stressor : str or int - valid index for one row of the F matrix
This must be a tuple for a multiindex, a string otherwise.
The stressor to diagonalize.
name : string (optional)
The new name for the extension,
if None (default): string based on the given stressor (row name)
Returns
-------
Extension
"""
if type(stressor) is int:
stressor = self.F.index[stressor]
if len(stressor) == 1:
stressor = stressor[0]
if not name:
if type(stressor) is str:
name = stressor
else:
name = '_'.join(stressor) + '_diag'
ext_diag = Extension(name)
ext_diag.F = pd.DataFrame(
index=self.F.columns,
columns=self.F.columns,
data=np.diag(self.F.loc[stressor, :])
)
try:
ext_diag.unit = pd.DataFrame(
index=ext_diag.F.index,
columns=self.unit.columns,
data=self.unit.loc[stressor].unit)
except AttributeError:
# If no unit in stressor, self.unit.columns break
ext_diag.unit = None
return ext_diag | python | def diag_stressor(self, stressor, name=None):
""" Diagonalize one row of the stressor matrix for a flow analysis.
This method takes one row of the F matrix and diagonalize
to the full region/sector format. Footprints calculation based
on this matrix show the flow of embodied stressors from the source
region/sector (row index) to the final consumer (column index).
Note
----
Since the type of analysis based on the disaggregated matrix is based
on flow, direct household emissions (FY) are not included.
Parameters
----------
stressor : str or int - valid index for one row of the F matrix
This must be a tuple for a multiindex, a string otherwise.
The stressor to diagonalize.
name : string (optional)
The new name for the extension,
if None (default): string based on the given stressor (row name)
Returns
-------
Extension
"""
if type(stressor) is int:
stressor = self.F.index[stressor]
if len(stressor) == 1:
stressor = stressor[0]
if not name:
if type(stressor) is str:
name = stressor
else:
name = '_'.join(stressor) + '_diag'
ext_diag = Extension(name)
ext_diag.F = pd.DataFrame(
index=self.F.columns,
columns=self.F.columns,
data=np.diag(self.F.loc[stressor, :])
)
try:
ext_diag.unit = pd.DataFrame(
index=ext_diag.F.index,
columns=self.unit.columns,
data=self.unit.loc[stressor].unit)
except AttributeError:
# If no unit in stressor, self.unit.columns break
ext_diag.unit = None
return ext_diag | Diagonalize one row of the stressor matrix for a flow analysis.
This method takes one row of the F matrix and diagonalize
to the full region/sector format. Footprints calculation based
on this matrix show the flow of embodied stressors from the source
region/sector (row index) to the final consumer (column index).
Note
----
Since the type of analysis based on the disaggregated matrix is based
on flow, direct household emissions (FY) are not included.
Parameters
----------
stressor : str or int - valid index for one row of the F matrix
This must be a tuple for a multiindex, a string otherwise.
The stressor to diagonalize.
name : string (optional)
The new name for the extension,
if None (default): string based on the given stressor (row name)
Returns
-------
Extension | https://github.com/konstantinstadler/pymrio/blob/d764aa0dd2150200e867a9713a98ddae203e12d4/pymrio/core/mriosystem.py#L1278-L1333 |
konstantinstadler/pymrio | pymrio/core/mriosystem.py | IOSystem.calc_system | def calc_system(self):
"""
Calculates the missing part of the core IOSystem
The method checks Z, x, A, L and calculates all which are None
"""
# Possible cases:
# 1) Z given, rest can be None and calculated
# 2) A and x given, rest can be calculated
# 3) A and Y , calc L (if not given) - calc x and the rest
# this catches case 3
if self.x is None and self.Z is None:
# in that case we need L or at least A to calculate it
if self.L is None:
self.L = calc_L(self.A)
logging.info('Leontief matrix L calculated')
self.x = calc_x_from_L(self.L, self.Y.sum(axis=1))
self.meta._add_modify('Industry Output x calculated')
# this chains of ifs catch cases 1 and 2
if self.Z is None:
self.Z = calc_Z(self.A, self.x)
self.meta._add_modify('Flow matrix Z calculated')
if self.x is None:
self.x = calc_x(self.Z, self.Y)
self.meta._add_modify('Industry output x calculated')
if self.A is None:
self.A = calc_A(self.Z, self.x)
self.meta._add_modify('Coefficient matrix A calculated')
if self.L is None:
self.L = calc_L(self.A)
self.meta._add_modify('Leontief matrix L calculated')
return self | python | def calc_system(self):
"""
Calculates the missing part of the core IOSystem
The method checks Z, x, A, L and calculates all which are None
"""
# Possible cases:
# 1) Z given, rest can be None and calculated
# 2) A and x given, rest can be calculated
# 3) A and Y , calc L (if not given) - calc x and the rest
# this catches case 3
if self.x is None and self.Z is None:
# in that case we need L or at least A to calculate it
if self.L is None:
self.L = calc_L(self.A)
logging.info('Leontief matrix L calculated')
self.x = calc_x_from_L(self.L, self.Y.sum(axis=1))
self.meta._add_modify('Industry Output x calculated')
# this chains of ifs catch cases 1 and 2
if self.Z is None:
self.Z = calc_Z(self.A, self.x)
self.meta._add_modify('Flow matrix Z calculated')
if self.x is None:
self.x = calc_x(self.Z, self.Y)
self.meta._add_modify('Industry output x calculated')
if self.A is None:
self.A = calc_A(self.Z, self.x)
self.meta._add_modify('Coefficient matrix A calculated')
if self.L is None:
self.L = calc_L(self.A)
self.meta._add_modify('Leontief matrix L calculated')
return self | Calculates the missing part of the core IOSystem
The method checks Z, x, A, L and calculates all which are None | https://github.com/konstantinstadler/pymrio/blob/d764aa0dd2150200e867a9713a98ddae203e12d4/pymrio/core/mriosystem.py#L1454-L1492 |
konstantinstadler/pymrio | pymrio/core/mriosystem.py | IOSystem.calc_extensions | def calc_extensions(self, extensions=None, Y_agg=None):
""" Calculates the extension and their accounts
For the calculation, y is aggregated across specified y categories
The method calls .calc_system of each extension (or these given in the
extensions parameter)
Parameters
----------
extensions : list of strings, optional
A list of key names of extensions which shall be calculated.
Default: all dictionaries of IOSystem are assumed to be extensions
Y_agg : pandas.DataFrame or np.array, optional
The final demand aggregated (one category per country). Can be
used to restrict the calculation of CBA of a specific category
(e.g. households). Default: y is aggregated over all categories
"""
ext_list = list(self.get_extensions(data=False))
extensions = extensions or ext_list
if type(extensions) == str:
extensions = [extensions]
for ext_name in extensions:
self.meta._add_modify(
'Calculating accounts for extension {}'.format(ext_name))
ext = getattr(self, ext_name)
ext.calc_system(x=self.x,
Y=self.Y,
L=self.L,
Y_agg=Y_agg,
population=self.population
)
return self | python | def calc_extensions(self, extensions=None, Y_agg=None):
""" Calculates the extension and their accounts
For the calculation, y is aggregated across specified y categories
The method calls .calc_system of each extension (or these given in the
extensions parameter)
Parameters
----------
extensions : list of strings, optional
A list of key names of extensions which shall be calculated.
Default: all dictionaries of IOSystem are assumed to be extensions
Y_agg : pandas.DataFrame or np.array, optional
The final demand aggregated (one category per country). Can be
used to restrict the calculation of CBA of a specific category
(e.g. households). Default: y is aggregated over all categories
"""
ext_list = list(self.get_extensions(data=False))
extensions = extensions or ext_list
if type(extensions) == str:
extensions = [extensions]
for ext_name in extensions:
self.meta._add_modify(
'Calculating accounts for extension {}'.format(ext_name))
ext = getattr(self, ext_name)
ext.calc_system(x=self.x,
Y=self.Y,
L=self.L,
Y_agg=Y_agg,
population=self.population
)
return self | Calculates the extension and their accounts
For the calculation, y is aggregated across specified y categories
The method calls .calc_system of each extension (or these given in the
extensions parameter)
Parameters
----------
extensions : list of strings, optional
A list of key names of extensions which shall be calculated.
Default: all dictionaries of IOSystem are assumed to be extensions
Y_agg : pandas.DataFrame or np.array, optional
The final demand aggregated (one category per country). Can be
used to restrict the calculation of CBA of a specific category
(e.g. households). Default: y is aggregated over all categories | https://github.com/konstantinstadler/pymrio/blob/d764aa0dd2150200e867a9713a98ddae203e12d4/pymrio/core/mriosystem.py#L1494-L1527 |
konstantinstadler/pymrio | pymrio/core/mriosystem.py | IOSystem.report_accounts | def report_accounts(self, path, per_region=True,
per_capita=False, pic_size=1000,
format='rst', **kwargs):
""" Generates a report to the given path for all extension
This method calls .report_accounts for all extensions
Notes
-----
This looks prettier with the seaborn module (import seaborn before
calling this method)
Parameters
----------
path : string
Root path for the report
per_region : boolean, optional
If true, reports the accounts per region
per_capita : boolean, optional
If true, reports the accounts per capita
If per_capita and per_region are False, nothing will be done
pic_size : int, optional
size for the figures in px, 1000 by default
format : string, optional
file format of the report:
'rst'(default), 'html', 'latex', ...
except for rst all depend on the module docutils (all writer_name
from docutils can be used as format)
ffname : string, optional
root file name (without extension, per_capita or per_region will be
attached) and folder names If None gets passed (default), self.name
with be modified to get a valid name for the operation system
without blanks
**kwargs : key word arguments, optional
This will be passed directly to the pd.DataFrame.plot method
(through the self.plot_account method)
"""
for ext in self.get_extensions(data=True):
ext.report_accounts(path=path,
per_region=per_region,
per_capita=per_capita,
pic_size=pic_size,
format=format,
**kwargs) | python | def report_accounts(self, path, per_region=True,
per_capita=False, pic_size=1000,
format='rst', **kwargs):
""" Generates a report to the given path for all extension
This method calls .report_accounts for all extensions
Notes
-----
This looks prettier with the seaborn module (import seaborn before
calling this method)
Parameters
----------
path : string
Root path for the report
per_region : boolean, optional
If true, reports the accounts per region
per_capita : boolean, optional
If true, reports the accounts per capita
If per_capita and per_region are False, nothing will be done
pic_size : int, optional
size for the figures in px, 1000 by default
format : string, optional
file format of the report:
'rst'(default), 'html', 'latex', ...
except for rst all depend on the module docutils (all writer_name
from docutils can be used as format)
ffname : string, optional
root file name (without extension, per_capita or per_region will be
attached) and folder names If None gets passed (default), self.name
with be modified to get a valid name for the operation system
without blanks
**kwargs : key word arguments, optional
This will be passed directly to the pd.DataFrame.plot method
(through the self.plot_account method)
"""
for ext in self.get_extensions(data=True):
ext.report_accounts(path=path,
per_region=per_region,
per_capita=per_capita,
pic_size=pic_size,
format=format,
**kwargs) | Generates a report to the given path for all extension
This method calls .report_accounts for all extensions
Notes
-----
This looks prettier with the seaborn module (import seaborn before
calling this method)
Parameters
----------
path : string
Root path for the report
per_region : boolean, optional
If true, reports the accounts per region
per_capita : boolean, optional
If true, reports the accounts per capita
If per_capita and per_region are False, nothing will be done
pic_size : int, optional
size for the figures in px, 1000 by default
format : string, optional
file format of the report:
'rst'(default), 'html', 'latex', ...
except for rst all depend on the module docutils (all writer_name
from docutils can be used as format)
ffname : string, optional
root file name (without extension, per_capita or per_region will be
attached) and folder names If None gets passed (default), self.name
with be modified to get a valid name for the operation system
without blanks
**kwargs : key word arguments, optional
This will be passed directly to the pd.DataFrame.plot method
(through the self.plot_account method) | https://github.com/konstantinstadler/pymrio/blob/d764aa0dd2150200e867a9713a98ddae203e12d4/pymrio/core/mriosystem.py#L1529-L1574 |
konstantinstadler/pymrio | pymrio/core/mriosystem.py | IOSystem.get_extensions | def get_extensions(self, data=False):
""" Yields the extensions or their names
Parameters
----------
data : boolean, optional
If True, returns a generator which yields the extensions.
If False, returns a generator which yields the names of
the extensions (default)
Returns
-------
Generator for Extension or string
"""
ext_list = [key for key in
self.__dict__ if type(self.__dict__[key]) is Extension]
for key in ext_list:
if data:
yield getattr(self, key)
else:
yield key | python | def get_extensions(self, data=False):
""" Yields the extensions or their names
Parameters
----------
data : boolean, optional
If True, returns a generator which yields the extensions.
If False, returns a generator which yields the names of
the extensions (default)
Returns
-------
Generator for Extension or string
"""
ext_list = [key for key in
self.__dict__ if type(self.__dict__[key]) is Extension]
for key in ext_list:
if data:
yield getattr(self, key)
else:
yield key | Yields the extensions or their names
Parameters
----------
data : boolean, optional
If True, returns a generator which yields the extensions.
If False, returns a generator which yields the names of
the extensions (default)
Returns
-------
Generator for Extension or string | https://github.com/konstantinstadler/pymrio/blob/d764aa0dd2150200e867a9713a98ddae203e12d4/pymrio/core/mriosystem.py#L1576-L1598 |
konstantinstadler/pymrio | pymrio/core/mriosystem.py | IOSystem.reset_full | def reset_full(self, force=False):
""" Remove all accounts which can be recalculated based on Z, Y, F, FY
Parameters
----------
force: boolean, optional
If True, reset to flows although the system can not be
recalculated. Default: False
"""
super().reset_full(force=force, _meta=self.meta)
return self | python | def reset_full(self, force=False):
""" Remove all accounts which can be recalculated based on Z, Y, F, FY
Parameters
----------
force: boolean, optional
If True, reset to flows although the system can not be
recalculated. Default: False
"""
super().reset_full(force=force, _meta=self.meta)
return self | Remove all accounts which can be recalculated based on Z, Y, F, FY
Parameters
----------
force: boolean, optional
If True, reset to flows although the system can not be
recalculated. Default: False | https://github.com/konstantinstadler/pymrio/blob/d764aa0dd2150200e867a9713a98ddae203e12d4/pymrio/core/mriosystem.py#L1600-L1611 |
konstantinstadler/pymrio | pymrio/core/mriosystem.py | IOSystem.reset_all_full | def reset_all_full(self, force=False):
""" Removes all accounts that can be recalculated (IOSystem and extensions)
This calls reset_full for the core system and all extension.
Parameters
----------
force: boolean, optional
If True, reset to flows although the system can not be
recalculated. Default: False
"""
self.reset_full()
[ee.reset_full() for ee in self.get_extensions(data=True)]
self.meta._add_modify("Reset all calculated data")
return self | python | def reset_all_full(self, force=False):
""" Removes all accounts that can be recalculated (IOSystem and extensions)
This calls reset_full for the core system and all extension.
Parameters
----------
force: boolean, optional
If True, reset to flows although the system can not be
recalculated. Default: False
"""
self.reset_full()
[ee.reset_full() for ee in self.get_extensions(data=True)]
self.meta._add_modify("Reset all calculated data")
return self | Removes all accounts that can be recalculated (IOSystem and extensions)
This calls reset_full for the core system and all extension.
Parameters
----------
force: boolean, optional
If True, reset to flows although the system can not be
recalculated. Default: False | https://github.com/konstantinstadler/pymrio/blob/d764aa0dd2150200e867a9713a98ddae203e12d4/pymrio/core/mriosystem.py#L1613-L1629 |
konstantinstadler/pymrio | pymrio/core/mriosystem.py | IOSystem.reset_to_flows | def reset_to_flows(self, force=False):
""" Keeps only the absolute values.
This removes all attributes which can not be aggregated and must be
recalculated after the aggregation.
Parameters
----------
force: boolean, optional
If True, reset to flows although the system can not be
recalculated. Default: False
"""
super().reset_to_flows(force=force, _meta=self.meta)
return self | python | def reset_to_flows(self, force=False):
""" Keeps only the absolute values.
This removes all attributes which can not be aggregated and must be
recalculated after the aggregation.
Parameters
----------
force: boolean, optional
If True, reset to flows although the system can not be
recalculated. Default: False
"""
super().reset_to_flows(force=force, _meta=self.meta)
return self | Keeps only the absolute values.
This removes all attributes which can not be aggregated and must be
recalculated after the aggregation.
Parameters
----------
force: boolean, optional
If True, reset to flows although the system can not be
recalculated. Default: False | https://github.com/konstantinstadler/pymrio/blob/d764aa0dd2150200e867a9713a98ddae203e12d4/pymrio/core/mriosystem.py#L1631-L1645 |
konstantinstadler/pymrio | pymrio/core/mriosystem.py | IOSystem.reset_all_to_flows | def reset_all_to_flows(self, force=False):
""" Resets the IOSystem and all extensions to absolute flows
This method calls reset_to_flows for the IOSystem and for
all Extensions in the system.
Parameters
----------
force: boolean, optional
If True, reset to flows although the system can not be
recalculated. Default: False
"""
self.reset_to_flows(force=force)
[ee.reset_to_flows(force=force)
for ee in self.get_extensions(data=True)]
self.meta._add_modify("Reset full system to absolute flows")
return self | python | def reset_all_to_flows(self, force=False):
""" Resets the IOSystem and all extensions to absolute flows
This method calls reset_to_flows for the IOSystem and for
all Extensions in the system.
Parameters
----------
force: boolean, optional
If True, reset to flows although the system can not be
recalculated. Default: False
"""
self.reset_to_flows(force=force)
[ee.reset_to_flows(force=force)
for ee in self.get_extensions(data=True)]
self.meta._add_modify("Reset full system to absolute flows")
return self | Resets the IOSystem and all extensions to absolute flows
This method calls reset_to_flows for the IOSystem and for
all Extensions in the system.
Parameters
----------
force: boolean, optional
If True, reset to flows although the system can not be
recalculated. Default: False | https://github.com/konstantinstadler/pymrio/blob/d764aa0dd2150200e867a9713a98ddae203e12d4/pymrio/core/mriosystem.py#L1647-L1665 |
konstantinstadler/pymrio | pymrio/core/mriosystem.py | IOSystem.reset_all_to_coefficients | def reset_all_to_coefficients(self):
""" Resets the IOSystem and all extensions to coefficients.
This method calls reset_to_coefficients for the IOSystem and for
all Extensions in the system
Note
-----
The system can not be reconstructed after this steps
because all absolute data is removed. Save the Y data in case
a reconstruction might be necessary.
"""
self.reset_to_coefficients()
[ee.reset_to_coefficients() for ee in self.get_extensions(data=True)]
self.meta._add_modify("Reset full system to coefficients")
return self | python | def reset_all_to_coefficients(self):
""" Resets the IOSystem and all extensions to coefficients.
This method calls reset_to_coefficients for the IOSystem and for
all Extensions in the system
Note
-----
The system can not be reconstructed after this steps
because all absolute data is removed. Save the Y data in case
a reconstruction might be necessary.
"""
self.reset_to_coefficients()
[ee.reset_to_coefficients() for ee in self.get_extensions(data=True)]
self.meta._add_modify("Reset full system to coefficients")
return self | Resets the IOSystem and all extensions to coefficients.
This method calls reset_to_coefficients for the IOSystem and for
all Extensions in the system
Note
-----
The system can not be reconstructed after this steps
because all absolute data is removed. Save the Y data in case
a reconstruction might be necessary. | https://github.com/konstantinstadler/pymrio/blob/d764aa0dd2150200e867a9713a98ddae203e12d4/pymrio/core/mriosystem.py#L1667-L1684 |
konstantinstadler/pymrio | pymrio/core/mriosystem.py | IOSystem.save_all | def save_all(self, path, table_format='txt', sep='\t',
table_ext=None, float_format='%.12g'):
""" Saves the system and all extensions
Extensions are saved in separate folders (names based on extension)
Parameters are passed to the .save methods of the IOSystem and
Extensions. See parameters description there.
"""
if type(path) is str:
path = path.rstrip('\\')
path = Path(path)
path.mkdir(parents=True, exist_ok=True)
self.save(path=path,
table_format=table_format,
sep=sep,
table_ext=table_ext,
float_format=float_format)
for ext, ext_name in zip(self.get_extensions(data=True),
self.get_extensions()):
ext_path = path / ext_name
ext.save(path=ext_path,
table_format=table_format,
sep=sep,
table_ext=table_ext,
float_format=float_format)
return self | python | def save_all(self, path, table_format='txt', sep='\t',
table_ext=None, float_format='%.12g'):
""" Saves the system and all extensions
Extensions are saved in separate folders (names based on extension)
Parameters are passed to the .save methods of the IOSystem and
Extensions. See parameters description there.
"""
if type(path) is str:
path = path.rstrip('\\')
path = Path(path)
path.mkdir(parents=True, exist_ok=True)
self.save(path=path,
table_format=table_format,
sep=sep,
table_ext=table_ext,
float_format=float_format)
for ext, ext_name in zip(self.get_extensions(data=True),
self.get_extensions()):
ext_path = path / ext_name
ext.save(path=ext_path,
table_format=table_format,
sep=sep,
table_ext=table_ext,
float_format=float_format)
return self | Saves the system and all extensions
Extensions are saved in separate folders (names based on extension)
Parameters are passed to the .save methods of the IOSystem and
Extensions. See parameters description there. | https://github.com/konstantinstadler/pymrio/blob/d764aa0dd2150200e867a9713a98ddae203e12d4/pymrio/core/mriosystem.py#L1686-L1717 |
konstantinstadler/pymrio | pymrio/core/mriosystem.py | IOSystem.aggregate | def aggregate(self, region_agg=None, sector_agg=None,
region_names=None, sector_names=None,
inplace=True, pre_aggregation=False):
""" Aggregates the IO system.
Aggregation can be given as vector (use pymrio.build_agg_vec) or
aggregation matrix. In the case of a vector this must be of length
self.get_regions() / self.get_sectors() respectively with the new
position as integer or a string of the new name. In the case of
strings the final output order can be specified in region_dict and
sector_dict in the format {str1 = int_pos, str2 = int_pos, ...}.
If the sector / region concordance is given as matrix or numerical
vector, generic names will be used for the new sectors/regions. One
can define specific names by defining the aggregation as string
vector
Parameters
----------
region_agg : list, array or string, optional
The aggregation vector or matrix for the regions (np.ndarray or
list). If string: aggregates to one total region and names is
to the given string.
Pandas Dataframe with columns 'orignal' and 'aggregated'.
This is the output from the country_converter.agg_conc
sector_agg : list, arrays or string, optional
The aggregation vector or matrix for the sectors (np.ndarray or
list).If string: aggregates to one total region and names is
to the given string.
region_names : list, optional
Names for the aggregated regions.
If concordance matrix - in order of rows in this matrix
If concordance vector - in order or num. values in this vector
If string based - same order as the passed string
Not considered if passing a DataFrame - in this case give the
names in the column 'aggregated'
sector_names : list, optional
Names for the aggregated sectors. Same behaviour as
'region_names'
inplace : boolean, optional
If True, aggregates the IOSystem in place (default),
otherwise aggregation happens on a copy of the IOSystem.
Regardless of the setting, the IOSystem is returned to
allow for chained operations.
Returns
-------
IOSystem
Aggregated IOSystem (if inplace is False)
"""
# Development note: This can not be put in the CoreSystem b/c
# than the recalculation of the extension coefficients would not
# work.
if not inplace:
self = self.copy()
try:
self.reset_to_flows()
except ResetError:
raise AggregationError("System under-defined for aggregation - "
"do a 'calc_all' before aggregation")
if type(region_names) is str:
region_names = [region_names]
if type(sector_names) is str:
sector_names = [sector_names]
if type(region_agg) is pd.DataFrame:
if (('original' not in region_agg.columns) or
('aggregated' not in region_agg.columns)):
raise ValueError('Passed DataFrame must include the columns '
'"original" and "aggregated"')
region_agg = (region_agg
.set_index('original')
.reindex(self.get_regions(),
fill_value=MISSING_AGG_ENTRY['region'])
.loc[:, 'aggregated'])
if type(sector_agg) is pd.DataFrame:
if (('original' not in sector_agg.columns) or
('aggregated' not in sector_agg.columns)):
raise ValueError('Passed DataFrame must include the columns '
'"original" and "aggregated"')
sector_agg = (sector_agg
.set_index('original')
.reindex(self.get_sectors(),
fill_value=MISSING_AGG_ENTRY['sector'])
.loc[:, 'aggregated'])
# fill the aggregation matrix with 1:1 mapping
# if input not given and get names if not given
_same_regions = False
_same_sectors = False
if region_agg is None:
region_agg = self.get_regions()
region_names = region_names or self.get_regions()
_same_regions = True
if sector_agg is None:
sector_agg = self.get_sectors()
sector_names = sector_names or self.get_sectors()
_same_sectors = True
# capture total aggregation case
if type(region_agg) is str:
region_agg = [region_agg] * len(self.get_regions())
if type(sector_agg) is str:
sector_agg = [sector_agg] * len(self.get_sectors())
if ioutil.is_vector(region_agg):
region_conc = ioutil.build_agg_matrix(region_agg)
else:
region_conc = region_agg
if ioutil.is_vector(sector_agg):
sector_conc = ioutil.build_agg_matrix(sector_agg)
else:
sector_conc = sector_agg
# build the new names
if (not _same_regions) and (not region_names):
if isinstance(region_agg, np.ndarray):
region_agg = region_agg.flatten().tolist()
if type(region_agg[0]) is str:
region_names = ioutil.unique_element(region_agg)
else:
# rows in the concordance matrix give the new number of
# regions
region_names = [GENERIC_NAMES['region'] +
str(nr) for nr in
range(region_conc.shape[0])]
if (not _same_sectors) and (not sector_names):
if isinstance(sector_agg, np.ndarray):
sector_agg = (sector_agg.flatten().tolist())
if type(sector_agg[0]) is str:
sector_names = ioutil.unique_element(sector_agg)
else:
sector_names = [GENERIC_NAMES['sector'] +
str(nr) for nr in
range(sector_conc.shape[0])]
# Assert right shapes
if not sector_conc.shape[1] == len(self.get_sectors()):
raise ValueError('Sector aggregation does not '
'correspond to the number of sectors.')
if not region_conc.shape[1] == len(self.get_regions()):
raise ValueError('Region aggregation does not '
'correspond to the number of regions.')
if not len(sector_names) == sector_conc.shape[0]:
raise ValueError('New sector names do not '
'match sector aggregation.')
if not len(region_names) == region_conc.shape[0]:
raise ValueError('New region names do not '
'match region aggregation.')
# build pandas.MultiIndex for the aggregated system
_reg_list_for_sec = [[r] * sector_conc.shape[0] for r in region_names]
_reg_list_for_sec = [entry for entrylist in
_reg_list_for_sec for entry in entrylist]
_reg_list_for_Ycat = [[r] * len(self.get_Y_categories()) for r in
region_names]
_reg_list_for_Ycat = [entry for entrylist in
_reg_list_for_Ycat for entry in entrylist]
_sec_list = list(sector_names) * region_conc.shape[0]
_Ycat_list = list(self.get_Y_categories()) * region_conc.shape[0]
mi_reg_sec = pd.MultiIndex.from_arrays(
[_reg_list_for_sec, _sec_list],
names=['region', 'sector'])
mi_reg_Ycat = pd.MultiIndex.from_arrays(
[_reg_list_for_Ycat, _Ycat_list],
names=['region', 'category'])
# arrange the whole concordance matrix
conc = np.kron(region_conc, sector_conc)
conc_y = np.kron(region_conc, np.eye(len(self.get_Y_categories())))
# Aggregate
self.meta._add_modify('Aggregate final demand y')
self.Y = pd.DataFrame(
data=conc.dot(self.Y).dot(conc_y.T),
index=mi_reg_sec,
columns=mi_reg_Ycat,
)
self.meta._add_modify('Aggregate transaction matrix Z')
self.Z = pd.DataFrame(
data=conc.dot(self.Z).dot(conc.T),
index=mi_reg_sec,
columns=mi_reg_sec,
)
if self.x is not None:
# x could also be obtained from the
# aggregated Z, but aggregate if available
self.x = pd.DataFrame(
data=conc.dot(self.x),
index=mi_reg_sec,
columns=self.x.columns,
)
self.meta._add_modify('Aggregate industry output x')
else:
self.x = calc_x(self.Z, self.Y)
if self.population is not None:
self.meta._add_modify('Aggregate population vector')
self.population = pd.DataFrame(
data=region_conc.dot(self.population.T).T,
columns=region_names,
index=self.population.index,
)
for extension in self.get_extensions(data=True):
self.meta._add_modify('Aggregate extensions...')
extension.reset_to_flows()
st_redo_unit = False
for ik_name, ik_df in zip(
extension.get_DataFrame(data=False, with_unit=False),
extension.get_DataFrame(data=True, with_unit=False)):
# Without unit - this is reset aftwards if necessary
if ik_df.index.names == ['region',
'sector'] == ik_df.columns.names:
# Full disaggregated extensions - aggregate both axis
# (this is the case if the extions shows the flows from
# pda to cba)
extension.__dict__[ik_name] = pd.DataFrame(
data=conc.dot(ik_df).dot(conc.T))
# next step must be done afterwards due to unknown reasons
extension.__dict__[ik_name].columns = mi_reg_sec
extension.__dict__[ik_name].index = mi_reg_sec
st_redo_unit = True
elif (ik_df.index.names == ['region', 'sector'] and
ik_df.columns.names == ['region', 'category']):
# Full disaggregated finald demand satellite account.
# Thats not implemented yet - but aggregation is in place
extension.__dict__[ik_name] = pd.DataFrame(
data=conc.dot(ik_df).dot(conc_y.T))
# next step must be done afterwards due to unknown reasons
extension.__dict__[ik_name].columns = mi_reg_Ycat
extension.__dict__[ik_name].index = mi_reg_sec
elif ik_df.columns.names == ['region', 'category']:
# Satellite account connected to final demand (e.g. FY)
extension.__dict__[ik_name] = pd.DataFrame(
data=ik_df.dot(conc_y.T))
# next step must be done afterwards due to unknown reasons
extension.__dict__[ik_name].columns = mi_reg_Ycat
extension.__dict__[ik_name].index = ik_df.index
else:
# Standard case - aggregated columns, keep stressor rows
extension.__dict__[ik_name] = pd.DataFrame(
data=ik_df.dot(conc.T))
# next step must be done afterwards due to unknown reasons
extension.__dict__[ik_name].columns = mi_reg_sec
extension.__dict__[ik_name].index = ik_df.index
if st_redo_unit:
try:
_value = extension.unit.iloc[0].tolist()[0]
extension.unit = pd.DataFrame(
index=mi_reg_sec,
columns=extension.unit.columns,
data=_value)
except AttributeError:
# could fail if no unit available
extension.unit = None
self.calc_extensions()
return self | python | def aggregate(self, region_agg=None, sector_agg=None,
region_names=None, sector_names=None,
inplace=True, pre_aggregation=False):
""" Aggregates the IO system.
Aggregation can be given as vector (use pymrio.build_agg_vec) or
aggregation matrix. In the case of a vector this must be of length
self.get_regions() / self.get_sectors() respectively with the new
position as integer or a string of the new name. In the case of
strings the final output order can be specified in region_dict and
sector_dict in the format {str1 = int_pos, str2 = int_pos, ...}.
If the sector / region concordance is given as matrix or numerical
vector, generic names will be used for the new sectors/regions. One
can define specific names by defining the aggregation as string
vector
Parameters
----------
region_agg : list, array or string, optional
The aggregation vector or matrix for the regions (np.ndarray or
list). If string: aggregates to one total region and names is
to the given string.
Pandas Dataframe with columns 'orignal' and 'aggregated'.
This is the output from the country_converter.agg_conc
sector_agg : list, arrays or string, optional
The aggregation vector or matrix for the sectors (np.ndarray or
list).If string: aggregates to one total region and names is
to the given string.
region_names : list, optional
Names for the aggregated regions.
If concordance matrix - in order of rows in this matrix
If concordance vector - in order or num. values in this vector
If string based - same order as the passed string
Not considered if passing a DataFrame - in this case give the
names in the column 'aggregated'
sector_names : list, optional
Names for the aggregated sectors. Same behaviour as
'region_names'
inplace : boolean, optional
If True, aggregates the IOSystem in place (default),
otherwise aggregation happens on a copy of the IOSystem.
Regardless of the setting, the IOSystem is returned to
allow for chained operations.
Returns
-------
IOSystem
Aggregated IOSystem (if inplace is False)
"""
# Development note: This can not be put in the CoreSystem b/c
# than the recalculation of the extension coefficients would not
# work.
if not inplace:
self = self.copy()
try:
self.reset_to_flows()
except ResetError:
raise AggregationError("System under-defined for aggregation - "
"do a 'calc_all' before aggregation")
if type(region_names) is str:
region_names = [region_names]
if type(sector_names) is str:
sector_names = [sector_names]
if type(region_agg) is pd.DataFrame:
if (('original' not in region_agg.columns) or
('aggregated' not in region_agg.columns)):
raise ValueError('Passed DataFrame must include the columns '
'"original" and "aggregated"')
region_agg = (region_agg
.set_index('original')
.reindex(self.get_regions(),
fill_value=MISSING_AGG_ENTRY['region'])
.loc[:, 'aggregated'])
if type(sector_agg) is pd.DataFrame:
if (('original' not in sector_agg.columns) or
('aggregated' not in sector_agg.columns)):
raise ValueError('Passed DataFrame must include the columns '
'"original" and "aggregated"')
sector_agg = (sector_agg
.set_index('original')
.reindex(self.get_sectors(),
fill_value=MISSING_AGG_ENTRY['sector'])
.loc[:, 'aggregated'])
# fill the aggregation matrix with 1:1 mapping
# if input not given and get names if not given
_same_regions = False
_same_sectors = False
if region_agg is None:
region_agg = self.get_regions()
region_names = region_names or self.get_regions()
_same_regions = True
if sector_agg is None:
sector_agg = self.get_sectors()
sector_names = sector_names or self.get_sectors()
_same_sectors = True
# capture total aggregation case
if type(region_agg) is str:
region_agg = [region_agg] * len(self.get_regions())
if type(sector_agg) is str:
sector_agg = [sector_agg] * len(self.get_sectors())
if ioutil.is_vector(region_agg):
region_conc = ioutil.build_agg_matrix(region_agg)
else:
region_conc = region_agg
if ioutil.is_vector(sector_agg):
sector_conc = ioutil.build_agg_matrix(sector_agg)
else:
sector_conc = sector_agg
# build the new names
if (not _same_regions) and (not region_names):
if isinstance(region_agg, np.ndarray):
region_agg = region_agg.flatten().tolist()
if type(region_agg[0]) is str:
region_names = ioutil.unique_element(region_agg)
else:
# rows in the concordance matrix give the new number of
# regions
region_names = [GENERIC_NAMES['region'] +
str(nr) for nr in
range(region_conc.shape[0])]
if (not _same_sectors) and (not sector_names):
if isinstance(sector_agg, np.ndarray):
sector_agg = (sector_agg.flatten().tolist())
if type(sector_agg[0]) is str:
sector_names = ioutil.unique_element(sector_agg)
else:
sector_names = [GENERIC_NAMES['sector'] +
str(nr) for nr in
range(sector_conc.shape[0])]
# Assert right shapes
if not sector_conc.shape[1] == len(self.get_sectors()):
raise ValueError('Sector aggregation does not '
'correspond to the number of sectors.')
if not region_conc.shape[1] == len(self.get_regions()):
raise ValueError('Region aggregation does not '
'correspond to the number of regions.')
if not len(sector_names) == sector_conc.shape[0]:
raise ValueError('New sector names do not '
'match sector aggregation.')
if not len(region_names) == region_conc.shape[0]:
raise ValueError('New region names do not '
'match region aggregation.')
# build pandas.MultiIndex for the aggregated system
_reg_list_for_sec = [[r] * sector_conc.shape[0] for r in region_names]
_reg_list_for_sec = [entry for entrylist in
_reg_list_for_sec for entry in entrylist]
_reg_list_for_Ycat = [[r] * len(self.get_Y_categories()) for r in
region_names]
_reg_list_for_Ycat = [entry for entrylist in
_reg_list_for_Ycat for entry in entrylist]
_sec_list = list(sector_names) * region_conc.shape[0]
_Ycat_list = list(self.get_Y_categories()) * region_conc.shape[0]
mi_reg_sec = pd.MultiIndex.from_arrays(
[_reg_list_for_sec, _sec_list],
names=['region', 'sector'])
mi_reg_Ycat = pd.MultiIndex.from_arrays(
[_reg_list_for_Ycat, _Ycat_list],
names=['region', 'category'])
# arrange the whole concordance matrix
conc = np.kron(region_conc, sector_conc)
conc_y = np.kron(region_conc, np.eye(len(self.get_Y_categories())))
# Aggregate
self.meta._add_modify('Aggregate final demand y')
self.Y = pd.DataFrame(
data=conc.dot(self.Y).dot(conc_y.T),
index=mi_reg_sec,
columns=mi_reg_Ycat,
)
self.meta._add_modify('Aggregate transaction matrix Z')
self.Z = pd.DataFrame(
data=conc.dot(self.Z).dot(conc.T),
index=mi_reg_sec,
columns=mi_reg_sec,
)
if self.x is not None:
# x could also be obtained from the
# aggregated Z, but aggregate if available
self.x = pd.DataFrame(
data=conc.dot(self.x),
index=mi_reg_sec,
columns=self.x.columns,
)
self.meta._add_modify('Aggregate industry output x')
else:
self.x = calc_x(self.Z, self.Y)
if self.population is not None:
self.meta._add_modify('Aggregate population vector')
self.population = pd.DataFrame(
data=region_conc.dot(self.population.T).T,
columns=region_names,
index=self.population.index,
)
for extension in self.get_extensions(data=True):
self.meta._add_modify('Aggregate extensions...')
extension.reset_to_flows()
st_redo_unit = False
for ik_name, ik_df in zip(
extension.get_DataFrame(data=False, with_unit=False),
extension.get_DataFrame(data=True, with_unit=False)):
# Without unit - this is reset aftwards if necessary
if ik_df.index.names == ['region',
'sector'] == ik_df.columns.names:
# Full disaggregated extensions - aggregate both axis
# (this is the case if the extions shows the flows from
# pda to cba)
extension.__dict__[ik_name] = pd.DataFrame(
data=conc.dot(ik_df).dot(conc.T))
# next step must be done afterwards due to unknown reasons
extension.__dict__[ik_name].columns = mi_reg_sec
extension.__dict__[ik_name].index = mi_reg_sec
st_redo_unit = True
elif (ik_df.index.names == ['region', 'sector'] and
ik_df.columns.names == ['region', 'category']):
# Full disaggregated finald demand satellite account.
# Thats not implemented yet - but aggregation is in place
extension.__dict__[ik_name] = pd.DataFrame(
data=conc.dot(ik_df).dot(conc_y.T))
# next step must be done afterwards due to unknown reasons
extension.__dict__[ik_name].columns = mi_reg_Ycat
extension.__dict__[ik_name].index = mi_reg_sec
elif ik_df.columns.names == ['region', 'category']:
# Satellite account connected to final demand (e.g. FY)
extension.__dict__[ik_name] = pd.DataFrame(
data=ik_df.dot(conc_y.T))
# next step must be done afterwards due to unknown reasons
extension.__dict__[ik_name].columns = mi_reg_Ycat
extension.__dict__[ik_name].index = ik_df.index
else:
# Standard case - aggregated columns, keep stressor rows
extension.__dict__[ik_name] = pd.DataFrame(
data=ik_df.dot(conc.T))
# next step must be done afterwards due to unknown reasons
extension.__dict__[ik_name].columns = mi_reg_sec
extension.__dict__[ik_name].index = ik_df.index
if st_redo_unit:
try:
_value = extension.unit.iloc[0].tolist()[0]
extension.unit = pd.DataFrame(
index=mi_reg_sec,
columns=extension.unit.columns,
data=_value)
except AttributeError:
# could fail if no unit available
extension.unit = None
self.calc_extensions()
return self | Aggregates the IO system.
Aggregation can be given as vector (use pymrio.build_agg_vec) or
aggregation matrix. In the case of a vector this must be of length
self.get_regions() / self.get_sectors() respectively with the new
position as integer or a string of the new name. In the case of
strings the final output order can be specified in region_dict and
sector_dict in the format {str1 = int_pos, str2 = int_pos, ...}.
If the sector / region concordance is given as matrix or numerical
vector, generic names will be used for the new sectors/regions. One
can define specific names by defining the aggregation as string
vector
Parameters
----------
region_agg : list, array or string, optional
The aggregation vector or matrix for the regions (np.ndarray or
list). If string: aggregates to one total region and names is
to the given string.
Pandas Dataframe with columns 'orignal' and 'aggregated'.
This is the output from the country_converter.agg_conc
sector_agg : list, arrays or string, optional
The aggregation vector or matrix for the sectors (np.ndarray or
list).If string: aggregates to one total region and names is
to the given string.
region_names : list, optional
Names for the aggregated regions.
If concordance matrix - in order of rows in this matrix
If concordance vector - in order or num. values in this vector
If string based - same order as the passed string
Not considered if passing a DataFrame - in this case give the
names in the column 'aggregated'
sector_names : list, optional
Names for the aggregated sectors. Same behaviour as
'region_names'
inplace : boolean, optional
If True, aggregates the IOSystem in place (default),
otherwise aggregation happens on a copy of the IOSystem.
Regardless of the setting, the IOSystem is returned to
allow for chained operations.
Returns
-------
IOSystem
Aggregated IOSystem (if inplace is False) | https://github.com/konstantinstadler/pymrio/blob/d764aa0dd2150200e867a9713a98ddae203e12d4/pymrio/core/mriosystem.py#L1719-L1995 |
konstantinstadler/pymrio | pymrio/core/mriosystem.py | IOSystem.remove_extension | def remove_extension(self, ext=None):
""" Remove extension from IOSystem
For single Extensions the same can be achieved with del
IOSystem_name.Extension_name
Parameters
----------
ext : string or list, optional
The extension to remove, this can be given as the name of the
instance or of Extension.name (the latter will be checked if no
instance was found)
If ext is None (default) all Extensions will be removed
"""
if ext is None:
ext = list(self.get_extensions())
if type(ext) is str:
ext = [ext]
for ee in ext:
try:
del self.__dict__[ee]
except KeyError:
for exinstancename, exdata in zip(
self.get_extensions(data=False),
self.get_extensions(data=True)):
if exdata.name == ee:
del self.__dict__[exinstancename]
finally:
self.meta._add_modify("Removed extension {}".format(ee))
return self | python | def remove_extension(self, ext=None):
""" Remove extension from IOSystem
For single Extensions the same can be achieved with del
IOSystem_name.Extension_name
Parameters
----------
ext : string or list, optional
The extension to remove, this can be given as the name of the
instance or of Extension.name (the latter will be checked if no
instance was found)
If ext is None (default) all Extensions will be removed
"""
if ext is None:
ext = list(self.get_extensions())
if type(ext) is str:
ext = [ext]
for ee in ext:
try:
del self.__dict__[ee]
except KeyError:
for exinstancename, exdata in zip(
self.get_extensions(data=False),
self.get_extensions(data=True)):
if exdata.name == ee:
del self.__dict__[exinstancename]
finally:
self.meta._add_modify("Removed extension {}".format(ee))
return self | Remove extension from IOSystem
For single Extensions the same can be achieved with del
IOSystem_name.Extension_name
Parameters
----------
ext : string or list, optional
The extension to remove, this can be given as the name of the
instance or of Extension.name (the latter will be checked if no
instance was found)
If ext is None (default) all Extensions will be removed | https://github.com/konstantinstadler/pymrio/blob/d764aa0dd2150200e867a9713a98ddae203e12d4/pymrio/core/mriosystem.py#L1997-L2028 |
konstantinstadler/pymrio | pymrio/tools/ioutil.py | is_vector | def is_vector(inp):
""" Returns true if the input can be interpreted as a 'true' vector
Note
----
Does only check dimensions, not if type is numeric
Parameters
----------
inp : numpy.ndarray or something that can be converted into ndarray
Returns
-------
Boolean
True for vectors: ndim = 1 or ndim = 2 and shape of one axis = 1
False for all other arrays
"""
inp = np.asarray(inp)
nr_dim = np.ndim(inp)
if nr_dim == 1:
return True
elif (nr_dim == 2) and (1 in inp.shape):
return True
else:
return False | python | def is_vector(inp):
""" Returns true if the input can be interpreted as a 'true' vector
Note
----
Does only check dimensions, not if type is numeric
Parameters
----------
inp : numpy.ndarray or something that can be converted into ndarray
Returns
-------
Boolean
True for vectors: ndim = 1 or ndim = 2 and shape of one axis = 1
False for all other arrays
"""
inp = np.asarray(inp)
nr_dim = np.ndim(inp)
if nr_dim == 1:
return True
elif (nr_dim == 2) and (1 in inp.shape):
return True
else:
return False | Returns true if the input can be interpreted as a 'true' vector
Note
----
Does only check dimensions, not if type is numeric
Parameters
----------
inp : numpy.ndarray or something that can be converted into ndarray
Returns
-------
Boolean
True for vectors: ndim = 1 or ndim = 2 and shape of one axis = 1
False for all other arrays | https://github.com/konstantinstadler/pymrio/blob/d764aa0dd2150200e867a9713a98ddae203e12d4/pymrio/tools/ioutil.py#L19-L43 |
konstantinstadler/pymrio | pymrio/tools/ioutil.py | get_repo_content | def get_repo_content(path):
""" List of files in a repo (path or zip)
Parameters
----------
path: string or pathlib.Path
Returns
-------
Returns a namedtuple with .iszip and .filelist
The path in filelist are pure strings.
"""
path = Path(path)
if zipfile.is_zipfile(str(path)):
with zipfile.ZipFile(str(path)) as zz:
filelist = [info.filename for info in zz.infolist()]
iszip = True
else:
iszip = False
filelist = [str(f) for f in path.glob('**/*') if f.is_file()]
return namedtuple('repocontent', ['iszip', 'filelist'])(iszip, filelist) | python | def get_repo_content(path):
""" List of files in a repo (path or zip)
Parameters
----------
path: string or pathlib.Path
Returns
-------
Returns a namedtuple with .iszip and .filelist
The path in filelist are pure strings.
"""
path = Path(path)
if zipfile.is_zipfile(str(path)):
with zipfile.ZipFile(str(path)) as zz:
filelist = [info.filename for info in zz.infolist()]
iszip = True
else:
iszip = False
filelist = [str(f) for f in path.glob('**/*') if f.is_file()]
return namedtuple('repocontent', ['iszip', 'filelist'])(iszip, filelist) | List of files in a repo (path or zip)
Parameters
----------
path: string or pathlib.Path
Returns
-------
Returns a namedtuple with .iszip and .filelist
The path in filelist are pure strings. | https://github.com/konstantinstadler/pymrio/blob/d764aa0dd2150200e867a9713a98ddae203e12d4/pymrio/tools/ioutil.py#L46-L71 |
konstantinstadler/pymrio | pymrio/tools/ioutil.py | get_file_para | def get_file_para(path, path_in_arc=''):
""" Generic method to read the file parameter file
Helper function to consistently read the file parameter file, which can
either be uncompressed or included in a zip archive. By default, the file
name is to be expected as set in DEFAULT_FILE_NAMES['filepara'] (currently
file_parameters.json), but can defined otherwise by including the file
name of the parameter file in the parameter path.
Parameters
----------
path: pathlib.Path or string
Path or path with para file name for the data to load.
This must either point to the directory containing the uncompressed
data or the location of a compressed zip file with the data. In the
later case the parameter 'path_in_arc' needs to be specific to
further indicate the location of the data in the compressed file.
path_in_arc: string, optional
Path to the data in the zip file (where the fileparameters file is
located). path_in_arc must be given without leading dot and slash;
thus to point to the data in the root of the compressed file pass ''
(default), for data in e.g. the folder 'emissions' pass 'emissions/'.
Only used if parameter 'path' points to an compressed zip file.
Returns
-------
Returns a namedtuple with
.folder: str with the absolute path containing the
file parameter file. In case of a zip the path
is relative to the root in the zip
.name: Filename without folder of the used parameter file.
.content: Dictionary with the content oft the file parameter file
Raises
------
FileNotFoundError if parameter file not found
"""
if type(path) is str:
path = Path(path.rstrip('\\'))
if zipfile.is_zipfile(str(path)):
para_file_folder = str(path_in_arc)
with zipfile.ZipFile(file=str(path)) as zf:
files = zf.namelist()
else:
para_file_folder = str(path)
files = [str(f) for f in path.glob('**/*')]
if para_file_folder not in files:
para_file_full_path = os.path.join(
para_file_folder, DEFAULT_FILE_NAMES['filepara'])
else:
para_file_full_path = para_file_folder
para_file_folder = os.path.dirname(para_file_full_path)
if para_file_full_path not in files:
raise FileNotFoundError(
'File parameter file {} not found'.format(
para_file_full_path))
if zipfile.is_zipfile(str(path)):
with zipfile.ZipFile(file=str(path)) as zf:
para_file_content = json.loads(
zf.read(para_file_full_path).decode('utf-8'))
else:
with open(para_file_full_path, 'r') as pf:
para_file_content = json.load(pf)
return namedtuple('file_parameter',
['folder', 'name', 'content'])(
para_file_folder,
os.path.basename(para_file_full_path),
para_file_content) | python | def get_file_para(path, path_in_arc=''):
""" Generic method to read the file parameter file
Helper function to consistently read the file parameter file, which can
either be uncompressed or included in a zip archive. By default, the file
name is to be expected as set in DEFAULT_FILE_NAMES['filepara'] (currently
file_parameters.json), but can defined otherwise by including the file
name of the parameter file in the parameter path.
Parameters
----------
path: pathlib.Path or string
Path or path with para file name for the data to load.
This must either point to the directory containing the uncompressed
data or the location of a compressed zip file with the data. In the
later case the parameter 'path_in_arc' needs to be specific to
further indicate the location of the data in the compressed file.
path_in_arc: string, optional
Path to the data in the zip file (where the fileparameters file is
located). path_in_arc must be given without leading dot and slash;
thus to point to the data in the root of the compressed file pass ''
(default), for data in e.g. the folder 'emissions' pass 'emissions/'.
Only used if parameter 'path' points to an compressed zip file.
Returns
-------
Returns a namedtuple with
.folder: str with the absolute path containing the
file parameter file. In case of a zip the path
is relative to the root in the zip
.name: Filename without folder of the used parameter file.
.content: Dictionary with the content oft the file parameter file
Raises
------
FileNotFoundError if parameter file not found
"""
if type(path) is str:
path = Path(path.rstrip('\\'))
if zipfile.is_zipfile(str(path)):
para_file_folder = str(path_in_arc)
with zipfile.ZipFile(file=str(path)) as zf:
files = zf.namelist()
else:
para_file_folder = str(path)
files = [str(f) for f in path.glob('**/*')]
if para_file_folder not in files:
para_file_full_path = os.path.join(
para_file_folder, DEFAULT_FILE_NAMES['filepara'])
else:
para_file_full_path = para_file_folder
para_file_folder = os.path.dirname(para_file_full_path)
if para_file_full_path not in files:
raise FileNotFoundError(
'File parameter file {} not found'.format(
para_file_full_path))
if zipfile.is_zipfile(str(path)):
with zipfile.ZipFile(file=str(path)) as zf:
para_file_content = json.loads(
zf.read(para_file_full_path).decode('utf-8'))
else:
with open(para_file_full_path, 'r') as pf:
para_file_content = json.load(pf)
return namedtuple('file_parameter',
['folder', 'name', 'content'])(
para_file_folder,
os.path.basename(para_file_full_path),
para_file_content) | Generic method to read the file parameter file
Helper function to consistently read the file parameter file, which can
either be uncompressed or included in a zip archive. By default, the file
name is to be expected as set in DEFAULT_FILE_NAMES['filepara'] (currently
file_parameters.json), but can defined otherwise by including the file
name of the parameter file in the parameter path.
Parameters
----------
path: pathlib.Path or string
Path or path with para file name for the data to load.
This must either point to the directory containing the uncompressed
data or the location of a compressed zip file with the data. In the
later case the parameter 'path_in_arc' needs to be specific to
further indicate the location of the data in the compressed file.
path_in_arc: string, optional
Path to the data in the zip file (where the fileparameters file is
located). path_in_arc must be given without leading dot and slash;
thus to point to the data in the root of the compressed file pass ''
(default), for data in e.g. the folder 'emissions' pass 'emissions/'.
Only used if parameter 'path' points to an compressed zip file.
Returns
-------
Returns a namedtuple with
.folder: str with the absolute path containing the
file parameter file. In case of a zip the path
is relative to the root in the zip
.name: Filename without folder of the used parameter file.
.content: Dictionary with the content oft the file parameter file
Raises
------
FileNotFoundError if parameter file not found | https://github.com/konstantinstadler/pymrio/blob/d764aa0dd2150200e867a9713a98ddae203e12d4/pymrio/tools/ioutil.py#L74-L151 |
konstantinstadler/pymrio | pymrio/tools/ioutil.py | build_agg_matrix | def build_agg_matrix(agg_vector, pos_dict=None):
""" Agg. matrix based on mapping given in input as numerical or str vector.
The aggregation matrix has the from nxm with
-n new classificaction
-m old classification
Parameters
----------
agg_vector : list or vector like numpy ndarray
This can be row or column vector.
Length m with position given for n and -1 if values
should not be included
or
length m with id_string for the aggregation
pos_dict : dictionary
(only possible if agg_vector is given as string)
output order for the new matrix
must be given as dict with
'string in agg_vector' = pos
(as int, -1 if value should not be included in the aggregation)
Example 1:
input vector: np.array([0, 1, 1, 2]) or ['a', 'b', 'b', 'c']
agg matrix:
m0 m1 m2 m3
n0 1 0 0 0
n1 0 1 1 0
n2 0 0 0 1
Example 2:
input vector: np.array([1, 0, 0, 2]) or
(['b', 'a', 'a', 'c'], dict(a=0,b=1,c=2))
agg matrix:
m0 m1 m2 m3
n0 0 1 1 0
n1 1 0 0 0
n2 0 0 0 1
"""
if isinstance(agg_vector, np.ndarray):
agg_vector = agg_vector.flatten().tolist()
if type(agg_vector[0]) == str:
str_vector = agg_vector
agg_vector = np.zeros(len(str_vector))
if pos_dict:
if len(pos_dict.keys()) != len(set(str_vector)):
raise ValueError(
'Posistion elements inconsistent with aggregation vector')
seen = pos_dict
else:
seen = {}
counter = 0
for ind, item in enumerate(str_vector):
if item not in seen:
seen[item] = counter
counter += 1
agg_vector[ind] = seen[item]
agg_vector = np.array(agg_vector, dtype=int)
agg_vector = agg_vector.reshape((1, -1))
row_corr = agg_vector
col_corr = np.arange(agg_vector.size)
agg_matrix = np.zeros((row_corr.max()+1, col_corr.max()+1))
agg_matrix[row_corr, col_corr] = 1
# set columns with -1 value to 0
agg_matrix[np.tile(agg_vector == -1, (np.shape(agg_matrix)[0], 1))] = 0
return agg_matrix | python | def build_agg_matrix(agg_vector, pos_dict=None):
""" Agg. matrix based on mapping given in input as numerical or str vector.
The aggregation matrix has the from nxm with
-n new classificaction
-m old classification
Parameters
----------
agg_vector : list or vector like numpy ndarray
This can be row or column vector.
Length m with position given for n and -1 if values
should not be included
or
length m with id_string for the aggregation
pos_dict : dictionary
(only possible if agg_vector is given as string)
output order for the new matrix
must be given as dict with
'string in agg_vector' = pos
(as int, -1 if value should not be included in the aggregation)
Example 1:
input vector: np.array([0, 1, 1, 2]) or ['a', 'b', 'b', 'c']
agg matrix:
m0 m1 m2 m3
n0 1 0 0 0
n1 0 1 1 0
n2 0 0 0 1
Example 2:
input vector: np.array([1, 0, 0, 2]) or
(['b', 'a', 'a', 'c'], dict(a=0,b=1,c=2))
agg matrix:
m0 m1 m2 m3
n0 0 1 1 0
n1 1 0 0 0
n2 0 0 0 1
"""
if isinstance(agg_vector, np.ndarray):
agg_vector = agg_vector.flatten().tolist()
if type(agg_vector[0]) == str:
str_vector = agg_vector
agg_vector = np.zeros(len(str_vector))
if pos_dict:
if len(pos_dict.keys()) != len(set(str_vector)):
raise ValueError(
'Posistion elements inconsistent with aggregation vector')
seen = pos_dict
else:
seen = {}
counter = 0
for ind, item in enumerate(str_vector):
if item not in seen:
seen[item] = counter
counter += 1
agg_vector[ind] = seen[item]
agg_vector = np.array(agg_vector, dtype=int)
agg_vector = agg_vector.reshape((1, -1))
row_corr = agg_vector
col_corr = np.arange(agg_vector.size)
agg_matrix = np.zeros((row_corr.max()+1, col_corr.max()+1))
agg_matrix[row_corr, col_corr] = 1
# set columns with -1 value to 0
agg_matrix[np.tile(agg_vector == -1, (np.shape(agg_matrix)[0], 1))] = 0
return agg_matrix | Agg. matrix based on mapping given in input as numerical or str vector.
The aggregation matrix has the from nxm with
-n new classificaction
-m old classification
Parameters
----------
agg_vector : list or vector like numpy ndarray
This can be row or column vector.
Length m with position given for n and -1 if values
should not be included
or
length m with id_string for the aggregation
pos_dict : dictionary
(only possible if agg_vector is given as string)
output order for the new matrix
must be given as dict with
'string in agg_vector' = pos
(as int, -1 if value should not be included in the aggregation)
Example 1:
input vector: np.array([0, 1, 1, 2]) or ['a', 'b', 'b', 'c']
agg matrix:
m0 m1 m2 m3
n0 1 0 0 0
n1 0 1 1 0
n2 0 0 0 1
Example 2:
input vector: np.array([1, 0, 0, 2]) or
(['b', 'a', 'a', 'c'], dict(a=0,b=1,c=2))
agg matrix:
m0 m1 m2 m3
n0 0 1 1 0
n1 1 0 0 0
n2 0 0 0 1 | https://github.com/konstantinstadler/pymrio/blob/d764aa0dd2150200e867a9713a98ddae203e12d4/pymrio/tools/ioutil.py#L154-L230 |
konstantinstadler/pymrio | pymrio/tools/ioutil.py | diagonalize_blocks | def diagonalize_blocks(arr, blocksize):
""" Diagonalize sections of columns of an array for the whole array
Parameters
----------
arr : numpy array
Input array
blocksize : int
number of rows/colums forming one block
Returns
-------
numpy ndarray with shape (columns 'arr' * blocksize,
columns 'arr' * blocksize)
Example
--------
arr: output: (blocksize = 3)
3 1 3 0 0 1 0 0
4 2 0 4 0 0 2 0
5 3 0 0 5 0 0 3
6 9 6 0 0 9 0 0
7 6 0 7 0 0 6 0
8 4 0 0 8 0 0 4
"""
nr_col = arr.shape[1]
nr_row = arr.shape[0]
if np.mod(nr_row, blocksize):
raise ValueError(
'Number of rows of input array must be a multiple of blocksize')
arr_diag = np.zeros((nr_row, blocksize*nr_col))
for col_ind, col_val in enumerate(arr.T):
col_start = col_ind*blocksize
col_end = blocksize + col_ind*blocksize
for _ind in range(int(nr_row/blocksize)):
row_start = _ind*blocksize
row_end = blocksize + _ind * blocksize
arr_diag[row_start:row_end,
col_start:col_end] = np.diag(col_val[row_start:row_end])
return arr_diag | python | def diagonalize_blocks(arr, blocksize):
""" Diagonalize sections of columns of an array for the whole array
Parameters
----------
arr : numpy array
Input array
blocksize : int
number of rows/colums forming one block
Returns
-------
numpy ndarray with shape (columns 'arr' * blocksize,
columns 'arr' * blocksize)
Example
--------
arr: output: (blocksize = 3)
3 1 3 0 0 1 0 0
4 2 0 4 0 0 2 0
5 3 0 0 5 0 0 3
6 9 6 0 0 9 0 0
7 6 0 7 0 0 6 0
8 4 0 0 8 0 0 4
"""
nr_col = arr.shape[1]
nr_row = arr.shape[0]
if np.mod(nr_row, blocksize):
raise ValueError(
'Number of rows of input array must be a multiple of blocksize')
arr_diag = np.zeros((nr_row, blocksize*nr_col))
for col_ind, col_val in enumerate(arr.T):
col_start = col_ind*blocksize
col_end = blocksize + col_ind*blocksize
for _ind in range(int(nr_row/blocksize)):
row_start = _ind*blocksize
row_end = blocksize + _ind * blocksize
arr_diag[row_start:row_end,
col_start:col_end] = np.diag(col_val[row_start:row_end])
return arr_diag | Diagonalize sections of columns of an array for the whole array
Parameters
----------
arr : numpy array
Input array
blocksize : int
number of rows/colums forming one block
Returns
-------
numpy ndarray with shape (columns 'arr' * blocksize,
columns 'arr' * blocksize)
Example
--------
arr: output: (blocksize = 3)
3 1 3 0 0 1 0 0
4 2 0 4 0 0 2 0
5 3 0 0 5 0 0 3
6 9 6 0 0 9 0 0
7 6 0 7 0 0 6 0
8 4 0 0 8 0 0 4 | https://github.com/konstantinstadler/pymrio/blob/d764aa0dd2150200e867a9713a98ddae203e12d4/pymrio/tools/ioutil.py#L233-L280 |
konstantinstadler/pymrio | pymrio/tools/ioutil.py | set_block | def set_block(arr, arr_block):
""" Sets the diagonal blocks of an array to an given array
Parameters
----------
arr : numpy ndarray
the original array
block_arr : numpy ndarray
the block array for the new diagonal
Returns
-------
numpy ndarray (the modified array)
"""
nr_col = arr.shape[1]
nr_row = arr.shape[0]
nr_col_block = arr_block.shape[1]
nr_row_block = arr_block.shape[0]
if np.mod(nr_row, nr_row_block) or np.mod(nr_col, nr_col_block):
raise ValueError('Number of rows/columns of the input array '
'must be a multiple of block shape')
if nr_row/nr_row_block != nr_col/nr_col_block:
raise ValueError('Block array can not be filled as '
'diagonal blocks in the given array')
arr_out = arr.copy()
for row_ind in range(int(nr_row/nr_row_block)):
row_start = row_ind*nr_row_block
row_end = nr_row_block+nr_row_block*row_ind
col_start = row_ind*nr_col_block
col_end = nr_col_block+nr_col_block*row_ind
arr_out[row_start:row_end, col_start:col_end] = arr_block
return arr_out | python | def set_block(arr, arr_block):
""" Sets the diagonal blocks of an array to an given array
Parameters
----------
arr : numpy ndarray
the original array
block_arr : numpy ndarray
the block array for the new diagonal
Returns
-------
numpy ndarray (the modified array)
"""
nr_col = arr.shape[1]
nr_row = arr.shape[0]
nr_col_block = arr_block.shape[1]
nr_row_block = arr_block.shape[0]
if np.mod(nr_row, nr_row_block) or np.mod(nr_col, nr_col_block):
raise ValueError('Number of rows/columns of the input array '
'must be a multiple of block shape')
if nr_row/nr_row_block != nr_col/nr_col_block:
raise ValueError('Block array can not be filled as '
'diagonal blocks in the given array')
arr_out = arr.copy()
for row_ind in range(int(nr_row/nr_row_block)):
row_start = row_ind*nr_row_block
row_end = nr_row_block+nr_row_block*row_ind
col_start = row_ind*nr_col_block
col_end = nr_col_block+nr_col_block*row_ind
arr_out[row_start:row_end, col_start:col_end] = arr_block
return arr_out | Sets the diagonal blocks of an array to an given array
Parameters
----------
arr : numpy ndarray
the original array
block_arr : numpy ndarray
the block array for the new diagonal
Returns
-------
numpy ndarray (the modified array) | https://github.com/konstantinstadler/pymrio/blob/d764aa0dd2150200e867a9713a98ddae203e12d4/pymrio/tools/ioutil.py#L283-L321 |
konstantinstadler/pymrio | pymrio/tools/ioutil.py | unique_element | def unique_element(ll):
""" returns unique elements from a list preserving the original order """
seen = {}
result = []
for item in ll:
if item in seen:
continue
seen[item] = 1
result.append(item)
return result | python | def unique_element(ll):
""" returns unique elements from a list preserving the original order """
seen = {}
result = []
for item in ll:
if item in seen:
continue
seen[item] = 1
result.append(item)
return result | returns unique elements from a list preserving the original order | https://github.com/konstantinstadler/pymrio/blob/d764aa0dd2150200e867a9713a98ddae203e12d4/pymrio/tools/ioutil.py#L324-L333 |
konstantinstadler/pymrio | pymrio/tools/ioutil.py | build_agg_vec | def build_agg_vec(agg_vec, **source):
""" Builds an combined aggregation vector based on various classifications
This function build an aggregation vector based on the order in agg_vec.
The naming and actual mapping is given in source, either explicitly or by
pointing to a folder with the mapping.
>>> build_agg_vec(['EU', 'OECD'], path = 'test')
['EU', 'EU', 'EU', 'OECD', 'REST', 'REST']
>>> build_agg_vec(['OECD', 'EU'], path = 'test', miss='RoW')
['OECD', 'EU', 'OECD', 'OECD', 'RoW', 'RoW']
>>> build_agg_vec(['EU', 'orig_regions'], path = 'test')
['EU', 'EU', 'EU', 'reg4', 'reg5', 'reg6']
>>> build_agg_vec(['supreg1', 'other'], path = 'test',
>>> other = [None, None, 'other1', 'other1', 'other2', 'other2'])
['supreg1', 'supreg1', 'other1', 'other1', 'other2', 'other2']
Parameters
----------
agg_vec : list
A list of sector or regions to which the IOSystem shall be aggregated.
The order in agg_vec is important:
If a string was assigned to one specific entry it will not be
overwritten if it is given in the next vector, e.g. ['EU', 'OECD']
would aggregate first into EU and the remaining one into OECD, whereas
['OECD', 'EU'] would first aggregate all countries into OECD and than
the remaining countries into EU.
source : list or string
Definition of the vectors in agg_vec. The input vectors (either in the
file or given as list for the entries in agg_vec) must be as long as
the desired output with a string for every position which should be
aggregated and None for position which should not be used.
Special keywords:
- path : Path to a folder with concordance matrices.
The files in the folder can have any extension but must be
in text format (tab separated) with one entry per row.
The last column in the file will be taken as aggregation
vectors (other columns can be used for documentation).
Values must be given for every entry in the original
classification (string None for all values not used) If
the same entry is given in source and as text file in
path than the one in source will be used.
Two special path entries are available so far:
- 'exio2'
Concordance matrices for EXIOBASE 2.0
- 'test'
Concordance matrices for the test IO system
If a entry is not found in source and no path is given
the current directory will be searched for the definition.
- miss : Entry to use for missing values, default: 'REST'
Returns
-------
list (aggregation vector)
"""
# build a dict with aggregation vectors in source and folder
if type(agg_vec) is str:
agg_vec = [agg_vec]
agg_dict = dict()
for entry in agg_vec:
try:
agg_dict[entry] = source[entry]
except KeyError:
folder = source.get('path', './')
folder = os.path.join(PYMRIO_PATH[folder], 'concordance')
for file in os.listdir(folder):
if entry == os.path.splitext(file)[0]:
_tmp = np.genfromtxt(os.path.join(folder, file), dtype=str)
if _tmp.ndim == 1:
agg_dict[entry] = [None if ee == 'None'
else ee for ee in _tmp.tolist()]
else:
agg_dict[entry] = [None if ee == 'None'
else ee
for ee in _tmp[:, -1].tolist()]
break
else:
logging.error(
'Aggregation vector -- {} -- not found'
.format(str(entry)))
# build the summary aggregation vector
def _rep(ll, ii, vv): ll[ii] = vv
miss_val = source.get('miss', 'REST')
vec_list = [agg_dict[ee] for ee in agg_vec]
out = [None, ] * len(vec_list[0])
for currvec in vec_list:
if len(currvec) != len(out):
logging.warn('Inconsistent vector length')
[_rep(out, ind, val) for ind, val in
enumerate(currvec) if not out[ind]]
[_rep(out, ind, miss_val) for ind, val in enumerate(out) if not val]
return out | python | def build_agg_vec(agg_vec, **source):
""" Builds an combined aggregation vector based on various classifications
This function build an aggregation vector based on the order in agg_vec.
The naming and actual mapping is given in source, either explicitly or by
pointing to a folder with the mapping.
>>> build_agg_vec(['EU', 'OECD'], path = 'test')
['EU', 'EU', 'EU', 'OECD', 'REST', 'REST']
>>> build_agg_vec(['OECD', 'EU'], path = 'test', miss='RoW')
['OECD', 'EU', 'OECD', 'OECD', 'RoW', 'RoW']
>>> build_agg_vec(['EU', 'orig_regions'], path = 'test')
['EU', 'EU', 'EU', 'reg4', 'reg5', 'reg6']
>>> build_agg_vec(['supreg1', 'other'], path = 'test',
>>> other = [None, None, 'other1', 'other1', 'other2', 'other2'])
['supreg1', 'supreg1', 'other1', 'other1', 'other2', 'other2']
Parameters
----------
agg_vec : list
A list of sector or regions to which the IOSystem shall be aggregated.
The order in agg_vec is important:
If a string was assigned to one specific entry it will not be
overwritten if it is given in the next vector, e.g. ['EU', 'OECD']
would aggregate first into EU and the remaining one into OECD, whereas
['OECD', 'EU'] would first aggregate all countries into OECD and than
the remaining countries into EU.
source : list or string
Definition of the vectors in agg_vec. The input vectors (either in the
file or given as list for the entries in agg_vec) must be as long as
the desired output with a string for every position which should be
aggregated and None for position which should not be used.
Special keywords:
- path : Path to a folder with concordance matrices.
The files in the folder can have any extension but must be
in text format (tab separated) with one entry per row.
The last column in the file will be taken as aggregation
vectors (other columns can be used for documentation).
Values must be given for every entry in the original
classification (string None for all values not used) If
the same entry is given in source and as text file in
path than the one in source will be used.
Two special path entries are available so far:
- 'exio2'
Concordance matrices for EXIOBASE 2.0
- 'test'
Concordance matrices for the test IO system
If a entry is not found in source and no path is given
the current directory will be searched for the definition.
- miss : Entry to use for missing values, default: 'REST'
Returns
-------
list (aggregation vector)
"""
# build a dict with aggregation vectors in source and folder
if type(agg_vec) is str:
agg_vec = [agg_vec]
agg_dict = dict()
for entry in agg_vec:
try:
agg_dict[entry] = source[entry]
except KeyError:
folder = source.get('path', './')
folder = os.path.join(PYMRIO_PATH[folder], 'concordance')
for file in os.listdir(folder):
if entry == os.path.splitext(file)[0]:
_tmp = np.genfromtxt(os.path.join(folder, file), dtype=str)
if _tmp.ndim == 1:
agg_dict[entry] = [None if ee == 'None'
else ee for ee in _tmp.tolist()]
else:
agg_dict[entry] = [None if ee == 'None'
else ee
for ee in _tmp[:, -1].tolist()]
break
else:
logging.error(
'Aggregation vector -- {} -- not found'
.format(str(entry)))
# build the summary aggregation vector
def _rep(ll, ii, vv): ll[ii] = vv
miss_val = source.get('miss', 'REST')
vec_list = [agg_dict[ee] for ee in agg_vec]
out = [None, ] * len(vec_list[0])
for currvec in vec_list:
if len(currvec) != len(out):
logging.warn('Inconsistent vector length')
[_rep(out, ind, val) for ind, val in
enumerate(currvec) if not out[ind]]
[_rep(out, ind, miss_val) for ind, val in enumerate(out) if not val]
return out | Builds an combined aggregation vector based on various classifications
This function build an aggregation vector based on the order in agg_vec.
The naming and actual mapping is given in source, either explicitly or by
pointing to a folder with the mapping.
>>> build_agg_vec(['EU', 'OECD'], path = 'test')
['EU', 'EU', 'EU', 'OECD', 'REST', 'REST']
>>> build_agg_vec(['OECD', 'EU'], path = 'test', miss='RoW')
['OECD', 'EU', 'OECD', 'OECD', 'RoW', 'RoW']
>>> build_agg_vec(['EU', 'orig_regions'], path = 'test')
['EU', 'EU', 'EU', 'reg4', 'reg5', 'reg6']
>>> build_agg_vec(['supreg1', 'other'], path = 'test',
>>> other = [None, None, 'other1', 'other1', 'other2', 'other2'])
['supreg1', 'supreg1', 'other1', 'other1', 'other2', 'other2']
Parameters
----------
agg_vec : list
A list of sector or regions to which the IOSystem shall be aggregated.
The order in agg_vec is important:
If a string was assigned to one specific entry it will not be
overwritten if it is given in the next vector, e.g. ['EU', 'OECD']
would aggregate first into EU and the remaining one into OECD, whereas
['OECD', 'EU'] would first aggregate all countries into OECD and than
the remaining countries into EU.
source : list or string
Definition of the vectors in agg_vec. The input vectors (either in the
file or given as list for the entries in agg_vec) must be as long as
the desired output with a string for every position which should be
aggregated and None for position which should not be used.
Special keywords:
- path : Path to a folder with concordance matrices.
The files in the folder can have any extension but must be
in text format (tab separated) with one entry per row.
The last column in the file will be taken as aggregation
vectors (other columns can be used for documentation).
Values must be given for every entry in the original
classification (string None for all values not used) If
the same entry is given in source and as text file in
path than the one in source will be used.
Two special path entries are available so far:
- 'exio2'
Concordance matrices for EXIOBASE 2.0
- 'test'
Concordance matrices for the test IO system
If a entry is not found in source and no path is given
the current directory will be searched for the definition.
- miss : Entry to use for missing values, default: 'REST'
Returns
-------
list (aggregation vector) | https://github.com/konstantinstadler/pymrio/blob/d764aa0dd2150200e867a9713a98ddae203e12d4/pymrio/tools/ioutil.py#L336-L444 |
konstantinstadler/pymrio | pymrio/tools/ioutil.py | find_first_number | def find_first_number(ll):
""" Returns nr of first entry parseable to float in ll, None otherwise"""
for nr, entry in enumerate(ll):
try:
float(entry)
except (ValueError, TypeError) as e:
pass
else:
return nr
return None | python | def find_first_number(ll):
""" Returns nr of first entry parseable to float in ll, None otherwise"""
for nr, entry in enumerate(ll):
try:
float(entry)
except (ValueError, TypeError) as e:
pass
else:
return nr
return None | Returns nr of first entry parseable to float in ll, None otherwise | https://github.com/konstantinstadler/pymrio/blob/d764aa0dd2150200e867a9713a98ddae203e12d4/pymrio/tools/ioutil.py#L447-L456 |
konstantinstadler/pymrio | pymrio/tools/ioutil.py | sniff_csv_format | def sniff_csv_format(csv_file,
potential_sep=['\t', ',', ';', '|', '-', '_'],
max_test_lines=10,
zip_file=None):
""" Tries to get the separator, nr of index cols and header rows in a csv file
Parameters
----------
csv_file: str
Path to a csv file
potential_sep: list, optional
List of potential separators (delimiters) to test.
Default: '\t', ',', ';', '|', '-', '_'
max_test_lines: int, optional
How many lines to test, default: 10 or available lines in csv_file
zip_file: str, optional
Path to a zip file containing the csv file (if any, default: None).
If a zip file is given, the path given at 'csv_file' is assumed
to be the path to the file within the zip_file.
Returns
-------
dict with
sep: string (separator)
nr_index_col: int
nr_header_row: int
Entries are set to None if inconsistent information in the file
"""
def read_first_lines(filehandle):
lines = []
for i in range(max_test_lines):
line = ff.readline()
if line == '':
break
try:
line = line.decode('utf-8')
except AttributeError:
pass
lines.append(line[:-1])
return lines
if zip_file:
with zipfile.ZipFile(zip_file, 'r') as zz:
with zz.open(csv_file, 'r') as ff:
test_lines = read_first_lines(ff)
else:
with open(csv_file, 'r') as ff:
test_lines = read_first_lines(ff)
sep_aly_lines = [sorted([(line.count(sep), sep)
for sep in potential_sep if line.count(sep) > 0],
key=lambda x: x[0], reverse=True) for line in test_lines]
for nr, (count, sep) in enumerate(sep_aly_lines[0]):
for line in sep_aly_lines:
if line[nr][0] == count:
break
else:
sep = None
if sep:
break
nr_header_row = None
nr_index_col = None
if sep:
nr_index_col = find_first_number(test_lines[-1].split(sep))
if nr_index_col:
for nr_header_row, line in enumerate(test_lines):
if find_first_number(line.split(sep)) == nr_index_col:
break
return dict(sep=sep,
nr_header_row=nr_header_row,
nr_index_col=nr_index_col) | python | def sniff_csv_format(csv_file,
potential_sep=['\t', ',', ';', '|', '-', '_'],
max_test_lines=10,
zip_file=None):
""" Tries to get the separator, nr of index cols and header rows in a csv file
Parameters
----------
csv_file: str
Path to a csv file
potential_sep: list, optional
List of potential separators (delimiters) to test.
Default: '\t', ',', ';', '|', '-', '_'
max_test_lines: int, optional
How many lines to test, default: 10 or available lines in csv_file
zip_file: str, optional
Path to a zip file containing the csv file (if any, default: None).
If a zip file is given, the path given at 'csv_file' is assumed
to be the path to the file within the zip_file.
Returns
-------
dict with
sep: string (separator)
nr_index_col: int
nr_header_row: int
Entries are set to None if inconsistent information in the file
"""
def read_first_lines(filehandle):
lines = []
for i in range(max_test_lines):
line = ff.readline()
if line == '':
break
try:
line = line.decode('utf-8')
except AttributeError:
pass
lines.append(line[:-1])
return lines
if zip_file:
with zipfile.ZipFile(zip_file, 'r') as zz:
with zz.open(csv_file, 'r') as ff:
test_lines = read_first_lines(ff)
else:
with open(csv_file, 'r') as ff:
test_lines = read_first_lines(ff)
sep_aly_lines = [sorted([(line.count(sep), sep)
for sep in potential_sep if line.count(sep) > 0],
key=lambda x: x[0], reverse=True) for line in test_lines]
for nr, (count, sep) in enumerate(sep_aly_lines[0]):
for line in sep_aly_lines:
if line[nr][0] == count:
break
else:
sep = None
if sep:
break
nr_header_row = None
nr_index_col = None
if sep:
nr_index_col = find_first_number(test_lines[-1].split(sep))
if nr_index_col:
for nr_header_row, line in enumerate(test_lines):
if find_first_number(line.split(sep)) == nr_index_col:
break
return dict(sep=sep,
nr_header_row=nr_header_row,
nr_index_col=nr_index_col) | Tries to get the separator, nr of index cols and header rows in a csv file
Parameters
----------
csv_file: str
Path to a csv file
potential_sep: list, optional
List of potential separators (delimiters) to test.
Default: '\t', ',', ';', '|', '-', '_'
max_test_lines: int, optional
How many lines to test, default: 10 or available lines in csv_file
zip_file: str, optional
Path to a zip file containing the csv file (if any, default: None).
If a zip file is given, the path given at 'csv_file' is assumed
to be the path to the file within the zip_file.
Returns
-------
dict with
sep: string (separator)
nr_index_col: int
nr_header_row: int
Entries are set to None if inconsistent information in the file | https://github.com/konstantinstadler/pymrio/blob/d764aa0dd2150200e867a9713a98ddae203e12d4/pymrio/tools/ioutil.py#L459-L540 |
tmc/gevent-zeromq | gevent_zeromq/poll.py | GreenPoller._get_descriptors | def _get_descriptors(self):
"""Returns three elements tuple with socket descriptors ready
for gevent.select.select
"""
rlist = []
wlist = []
xlist = []
for socket, flags in self.sockets.items():
if isinstance(socket, zmq.Socket):
rlist.append(socket.getsockopt(zmq.FD))
continue
elif isinstance(socket, int):
fd = socket
elif hasattr(socket, 'fileno'):
try:
fd = int(socket.fileno())
except:
raise ValueError('fileno() must return an valid integer fd')
else:
raise TypeError('Socket must be a 0MQ socket, an integer fd '
'or have a fileno() method: %r' % socket)
if flags & zmq.POLLIN:
rlist.append(fd)
if flags & zmq.POLLOUT:
wlist.append(fd)
if flags & zmq.POLLERR:
xlist.append(fd)
return (rlist, wlist, xlist) | python | def _get_descriptors(self):
"""Returns three elements tuple with socket descriptors ready
for gevent.select.select
"""
rlist = []
wlist = []
xlist = []
for socket, flags in self.sockets.items():
if isinstance(socket, zmq.Socket):
rlist.append(socket.getsockopt(zmq.FD))
continue
elif isinstance(socket, int):
fd = socket
elif hasattr(socket, 'fileno'):
try:
fd = int(socket.fileno())
except:
raise ValueError('fileno() must return an valid integer fd')
else:
raise TypeError('Socket must be a 0MQ socket, an integer fd '
'or have a fileno() method: %r' % socket)
if flags & zmq.POLLIN:
rlist.append(fd)
if flags & zmq.POLLOUT:
wlist.append(fd)
if flags & zmq.POLLERR:
xlist.append(fd)
return (rlist, wlist, xlist) | Returns three elements tuple with socket descriptors ready
for gevent.select.select | https://github.com/tmc/gevent-zeromq/blob/b15d50deedda3d2cdb701106d4b315c7a06353e3/gevent_zeromq/poll.py#L14-L44 |
tmc/gevent-zeromq | gevent_zeromq/poll.py | GreenPoller.poll | def poll(self, timeout=-1):
"""Overridden method to ensure that the green version of
Poller is used.
Behaves the same as :meth:`zmq.core.Poller.poll`
"""
if timeout is None:
timeout = -1
if timeout < 0:
timeout = -1
rlist = None
wlist = None
xlist = None
if timeout > 0:
tout = gevent.Timeout.start_new(timeout/1000.0)
try:
# Loop until timeout or events available
rlist, wlist, xlist = self._get_descriptors()
while True:
events = super(GreenPoller, self).poll(0)
if events or timeout == 0:
return events
# wait for activity on sockets in a green way
select.select(rlist, wlist, xlist)
except gevent.Timeout, t:
if t is not tout:
raise
return []
finally:
if timeout > 0:
tout.cancel() | python | def poll(self, timeout=-1):
"""Overridden method to ensure that the green version of
Poller is used.
Behaves the same as :meth:`zmq.core.Poller.poll`
"""
if timeout is None:
timeout = -1
if timeout < 0:
timeout = -1
rlist = None
wlist = None
xlist = None
if timeout > 0:
tout = gevent.Timeout.start_new(timeout/1000.0)
try:
# Loop until timeout or events available
rlist, wlist, xlist = self._get_descriptors()
while True:
events = super(GreenPoller, self).poll(0)
if events or timeout == 0:
return events
# wait for activity on sockets in a green way
select.select(rlist, wlist, xlist)
except gevent.Timeout, t:
if t is not tout:
raise
return []
finally:
if timeout > 0:
tout.cancel() | Overridden method to ensure that the green version of
Poller is used.
Behaves the same as :meth:`zmq.core.Poller.poll` | https://github.com/tmc/gevent-zeromq/blob/b15d50deedda3d2cdb701106d4b315c7a06353e3/gevent_zeromq/poll.py#L46-L83 |
shichao-an/115wangpan | u115/api.py | _instantiate_task | def _instantiate_task(api, kwargs):
"""Create a Task object from raw kwargs"""
file_id = kwargs['file_id']
kwargs['file_id'] = file_id if str(file_id).strip() else None
kwargs['cid'] = kwargs['file_id'] or None
kwargs['rate_download'] = kwargs['rateDownload']
kwargs['percent_done'] = kwargs['percentDone']
kwargs['add_time'] = get_utcdatetime(kwargs['add_time'])
kwargs['last_update'] = get_utcdatetime(kwargs['last_update'])
is_transferred = (kwargs['status'] == 2 and kwargs['move'] == 1)
if is_transferred:
kwargs['pid'] = api.downloads_directory.cid
else:
kwargs['pid'] = None
del kwargs['rateDownload']
del kwargs['percentDone']
if 'url' in kwargs:
if not kwargs['url']:
kwargs['url'] = None
else:
kwargs['url'] = None
task = Task(api, **kwargs)
if is_transferred:
task._parent = api.downloads_directory
return task | python | def _instantiate_task(api, kwargs):
"""Create a Task object from raw kwargs"""
file_id = kwargs['file_id']
kwargs['file_id'] = file_id if str(file_id).strip() else None
kwargs['cid'] = kwargs['file_id'] or None
kwargs['rate_download'] = kwargs['rateDownload']
kwargs['percent_done'] = kwargs['percentDone']
kwargs['add_time'] = get_utcdatetime(kwargs['add_time'])
kwargs['last_update'] = get_utcdatetime(kwargs['last_update'])
is_transferred = (kwargs['status'] == 2 and kwargs['move'] == 1)
if is_transferred:
kwargs['pid'] = api.downloads_directory.cid
else:
kwargs['pid'] = None
del kwargs['rateDownload']
del kwargs['percentDone']
if 'url' in kwargs:
if not kwargs['url']:
kwargs['url'] = None
else:
kwargs['url'] = None
task = Task(api, **kwargs)
if is_transferred:
task._parent = api.downloads_directory
return task | Create a Task object from raw kwargs | https://github.com/shichao-an/115wangpan/blob/e7cc935313f675e886bceca831fcffcdedf1e880/u115/api.py#L1740-L1764 |
shichao-an/115wangpan | u115/api.py | RequestHandler.get | def get(self, url, params=None):
"""
Initiate a GET request
"""
r = self.session.get(url, params=params)
return self._response_parser(r, expect_json=False) | python | def get(self, url, params=None):
"""
Initiate a GET request
"""
r = self.session.get(url, params=params)
return self._response_parser(r, expect_json=False) | Initiate a GET request | https://github.com/shichao-an/115wangpan/blob/e7cc935313f675e886bceca831fcffcdedf1e880/u115/api.py#L56-L61 |
shichao-an/115wangpan | u115/api.py | RequestHandler.post | def post(self, url, data, params=None):
"""
Initiate a POST request
"""
r = self.session.post(url, data=data, params=params)
return self._response_parser(r, expect_json=False) | python | def post(self, url, data, params=None):
"""
Initiate a POST request
"""
r = self.session.post(url, data=data, params=params)
return self._response_parser(r, expect_json=False) | Initiate a POST request | https://github.com/shichao-an/115wangpan/blob/e7cc935313f675e886bceca831fcffcdedf1e880/u115/api.py#L63-L68 |
shichao-an/115wangpan | u115/api.py | RequestHandler.send | def send(self, request, expect_json=True, ignore_content=False):
"""
Send a formatted API request
:param request: a formatted request object
:type request: :class:`.Request`
:param bool expect_json: if True, raise :class:`.InvalidAPIAccess` if
response is not in JSON format
:param bool ignore_content: whether to ignore setting content of the
Response object
"""
r = self.session.request(method=request.method,
url=request.url,
params=request.params,
data=request.data,
files=request.files,
headers=request.headers)
return self._response_parser(r, expect_json, ignore_content) | python | def send(self, request, expect_json=True, ignore_content=False):
"""
Send a formatted API request
:param request: a formatted request object
:type request: :class:`.Request`
:param bool expect_json: if True, raise :class:`.InvalidAPIAccess` if
response is not in JSON format
:param bool ignore_content: whether to ignore setting content of the
Response object
"""
r = self.session.request(method=request.method,
url=request.url,
params=request.params,
data=request.data,
files=request.files,
headers=request.headers)
return self._response_parser(r, expect_json, ignore_content) | Send a formatted API request
:param request: a formatted request object
:type request: :class:`.Request`
:param bool expect_json: if True, raise :class:`.InvalidAPIAccess` if
response is not in JSON format
:param bool ignore_content: whether to ignore setting content of the
Response object | https://github.com/shichao-an/115wangpan/blob/e7cc935313f675e886bceca831fcffcdedf1e880/u115/api.py#L70-L87 |
shichao-an/115wangpan | u115/api.py | RequestHandler._response_parser | def _response_parser(self, r, expect_json=True, ignore_content=False):
"""
:param :class:`requests.Response` r: a response object of the Requests
library
:param bool expect_json: if True, raise :class:`.InvalidAPIAccess` if
response is not in JSON format
:param bool ignore_content: whether to ignore setting content of the
Response object
"""
if r.ok:
try:
j = r.json()
return Response(j.get('state'), j)
except ValueError:
# No JSON-encoded data returned
if expect_json:
logger = logging.getLogger(conf.LOGGING_API_LOGGER)
logger.debug(r.text)
raise InvalidAPIAccess('Invalid API access.')
# Raw response
if ignore_content:
res = Response(True, None)
else:
res = Response(True, r.text)
return res
else:
r.raise_for_status() | python | def _response_parser(self, r, expect_json=True, ignore_content=False):
"""
:param :class:`requests.Response` r: a response object of the Requests
library
:param bool expect_json: if True, raise :class:`.InvalidAPIAccess` if
response is not in JSON format
:param bool ignore_content: whether to ignore setting content of the
Response object
"""
if r.ok:
try:
j = r.json()
return Response(j.get('state'), j)
except ValueError:
# No JSON-encoded data returned
if expect_json:
logger = logging.getLogger(conf.LOGGING_API_LOGGER)
logger.debug(r.text)
raise InvalidAPIAccess('Invalid API access.')
# Raw response
if ignore_content:
res = Response(True, None)
else:
res = Response(True, r.text)
return res
else:
r.raise_for_status() | :param :class:`requests.Response` r: a response object of the Requests
library
:param bool expect_json: if True, raise :class:`.InvalidAPIAccess` if
response is not in JSON format
:param bool ignore_content: whether to ignore setting content of the
Response object | https://github.com/shichao-an/115wangpan/blob/e7cc935313f675e886bceca831fcffcdedf1e880/u115/api.py#L89-L115 |
shichao-an/115wangpan | u115/api.py | API.load_cookies | def load_cookies(self, ignore_discard=True, ignore_expires=True):
"""Load cookies from the file :attr:`.API.cookies_filename`"""
self._init_cookies()
if os.path.exists(self.cookies.filename):
self.cookies.load(ignore_discard=ignore_discard,
ignore_expires=ignore_expires)
self._reset_cache() | python | def load_cookies(self, ignore_discard=True, ignore_expires=True):
"""Load cookies from the file :attr:`.API.cookies_filename`"""
self._init_cookies()
if os.path.exists(self.cookies.filename):
self.cookies.load(ignore_discard=ignore_discard,
ignore_expires=ignore_expires)
self._reset_cache() | Load cookies from the file :attr:`.API.cookies_filename` | https://github.com/shichao-an/115wangpan/blob/e7cc935313f675e886bceca831fcffcdedf1e880/u115/api.py#L250-L256 |
shichao-an/115wangpan | u115/api.py | API.save_cookies | def save_cookies(self, ignore_discard=True, ignore_expires=True):
"""Save cookies to the file :attr:`.API.cookies_filename`"""
if not isinstance(self.cookies, cookielib.FileCookieJar):
m = 'Cookies must be a cookielib.FileCookieJar object to be saved.'
raise APIError(m)
self.cookies.save(ignore_discard=ignore_discard,
ignore_expires=ignore_expires) | python | def save_cookies(self, ignore_discard=True, ignore_expires=True):
"""Save cookies to the file :attr:`.API.cookies_filename`"""
if not isinstance(self.cookies, cookielib.FileCookieJar):
m = 'Cookies must be a cookielib.FileCookieJar object to be saved.'
raise APIError(m)
self.cookies.save(ignore_discard=ignore_discard,
ignore_expires=ignore_expires) | Save cookies to the file :attr:`.API.cookies_filename` | https://github.com/shichao-an/115wangpan/blob/e7cc935313f675e886bceca831fcffcdedf1e880/u115/api.py#L258-L264 |
shichao-an/115wangpan | u115/api.py | API.login | def login(self, username=None, password=None,
section='default'):
"""
Created the passport with ``username`` and ``password`` and log in.
If either ``username`` or ``password`` is None or omitted, the
credentials file will be parsed.
:param str username: username to login (email, phone number or user ID)
:param str password: password
:param str section: section name in the credential file
:raise: raises :class:`.AuthenticationError` if failed to login
"""
if self.has_logged_in:
return True
if username is None or password is None:
credential = conf.get_credential(section)
username = credential['username']
password = credential['password']
passport = Passport(username, password)
r = self.http.post(LOGIN_URL, passport.form)
if r.state is True:
# Bind this passport to API
self.passport = passport
passport.data = r.content['data']
self._user_id = r.content['data']['USER_ID']
return True
else:
msg = None
if 'err_name' in r.content:
if r.content['err_name'] == 'account':
msg = 'Account does not exist.'
elif r.content['err_name'] == 'passwd':
msg = 'Password is incorrect.'
raise AuthenticationError(msg) | python | def login(self, username=None, password=None,
section='default'):
"""
Created the passport with ``username`` and ``password`` and log in.
If either ``username`` or ``password`` is None or omitted, the
credentials file will be parsed.
:param str username: username to login (email, phone number or user ID)
:param str password: password
:param str section: section name in the credential file
:raise: raises :class:`.AuthenticationError` if failed to login
"""
if self.has_logged_in:
return True
if username is None or password is None:
credential = conf.get_credential(section)
username = credential['username']
password = credential['password']
passport = Passport(username, password)
r = self.http.post(LOGIN_URL, passport.form)
if r.state is True:
# Bind this passport to API
self.passport = passport
passport.data = r.content['data']
self._user_id = r.content['data']['USER_ID']
return True
else:
msg = None
if 'err_name' in r.content:
if r.content['err_name'] == 'account':
msg = 'Account does not exist.'
elif r.content['err_name'] == 'passwd':
msg = 'Password is incorrect.'
raise AuthenticationError(msg) | Created the passport with ``username`` and ``password`` and log in.
If either ``username`` or ``password`` is None or omitted, the
credentials file will be parsed.
:param str username: username to login (email, phone number or user ID)
:param str password: password
:param str section: section name in the credential file
:raise: raises :class:`.AuthenticationError` if failed to login | https://github.com/shichao-an/115wangpan/blob/e7cc935313f675e886bceca831fcffcdedf1e880/u115/api.py#L280-L315 |
shichao-an/115wangpan | u115/api.py | API.user_id | def user_id(self):
"""
User id of the current API user
"""
if self._user_id is None:
if self.has_logged_in:
self._user_id = self._req_get_user_aq()['data']['uid']
else:
raise AuthenticationError('Not logged in.')
return self._user_id | python | def user_id(self):
"""
User id of the current API user
"""
if self._user_id is None:
if self.has_logged_in:
self._user_id = self._req_get_user_aq()['data']['uid']
else:
raise AuthenticationError('Not logged in.')
return self._user_id | User id of the current API user | https://github.com/shichao-an/115wangpan/blob/e7cc935313f675e886bceca831fcffcdedf1e880/u115/api.py#L328-L337 |
shichao-an/115wangpan | u115/api.py | API.username | def username(self):
"""
Username of the current API user
"""
if self._username is None:
if self.has_logged_in:
self._username = self._get_username()
else:
raise AuthenticationError('Not logged in.')
return self._username | python | def username(self):
"""
Username of the current API user
"""
if self._username is None:
if self.has_logged_in:
self._username = self._get_username()
else:
raise AuthenticationError('Not logged in.')
return self._username | Username of the current API user | https://github.com/shichao-an/115wangpan/blob/e7cc935313f675e886bceca831fcffcdedf1e880/u115/api.py#L340-L349 |
shichao-an/115wangpan | u115/api.py | API.has_logged_in | def has_logged_in(self):
"""Check whether the API has logged in"""
r = self.http.get(CHECKPOINT_URL)
if r.state is False:
return True
# If logged out, flush cache
self._reset_cache()
return False | python | def has_logged_in(self):
"""Check whether the API has logged in"""
r = self.http.get(CHECKPOINT_URL)
if r.state is False:
return True
# If logged out, flush cache
self._reset_cache()
return False | Check whether the API has logged in | https://github.com/shichao-an/115wangpan/blob/e7cc935313f675e886bceca831fcffcdedf1e880/u115/api.py#L352-L359 |
shichao-an/115wangpan | u115/api.py | API.receiver_directory | def receiver_directory(self):
"""Parent directory of the downloads directory"""
if self._receiver_directory is None:
self._receiver_directory = self.downloads_directory.parent
return self._receiver_directory | python | def receiver_directory(self):
"""Parent directory of the downloads directory"""
if self._receiver_directory is None:
self._receiver_directory = self.downloads_directory.parent
return self._receiver_directory | Parent directory of the downloads directory | https://github.com/shichao-an/115wangpan/blob/e7cc935313f675e886bceca831fcffcdedf1e880/u115/api.py#L382-L386 |
shichao-an/115wangpan | u115/api.py | API.add_task_bt | def add_task_bt(self, filename, select=False):
"""
Add a new BT task
:param str filename: path to torrent file to upload
:param bool select: whether to select files in the torrent.
* True: it returns the opened torrent (:class:`.Torrent`) and
can then iterate files in :attr:`.Torrent.files` and
select/unselect them before calling :meth:`.Torrent.submit`
* False: it will submit the torrent with default selected files
"""
filename = eval_path(filename)
u = self.upload(filename, self.torrents_directory)
t = self._load_torrent(u)
if select:
return t
return t.submit() | python | def add_task_bt(self, filename, select=False):
"""
Add a new BT task
:param str filename: path to torrent file to upload
:param bool select: whether to select files in the torrent.
* True: it returns the opened torrent (:class:`.Torrent`) and
can then iterate files in :attr:`.Torrent.files` and
select/unselect them before calling :meth:`.Torrent.submit`
* False: it will submit the torrent with default selected files
"""
filename = eval_path(filename)
u = self.upload(filename, self.torrents_directory)
t = self._load_torrent(u)
if select:
return t
return t.submit() | Add a new BT task
:param str filename: path to torrent file to upload
:param bool select: whether to select files in the torrent.
* True: it returns the opened torrent (:class:`.Torrent`) and
can then iterate files in :attr:`.Torrent.files` and
select/unselect them before calling :meth:`.Torrent.submit`
* False: it will submit the torrent with default selected files | https://github.com/shichao-an/115wangpan/blob/e7cc935313f675e886bceca831fcffcdedf1e880/u115/api.py#L421-L439 |
shichao-an/115wangpan | u115/api.py | API.get_storage_info | def get_storage_info(self, human=False):
"""
Get storage info
:param bool human: whether return human-readable size
:return: total and used storage
:rtype: dict
"""
res = self._req_get_storage_info()
if human:
res['total'] = humanize.naturalsize(res['total'], binary=True)
res['used'] = humanize.naturalsize(res['used'], binary=True)
return res | python | def get_storage_info(self, human=False):
"""
Get storage info
:param bool human: whether return human-readable size
:return: total and used storage
:rtype: dict
"""
res = self._req_get_storage_info()
if human:
res['total'] = humanize.naturalsize(res['total'], binary=True)
res['used'] = humanize.naturalsize(res['used'], binary=True)
return res | Get storage info
:param bool human: whether return human-readable size
:return: total and used storage
:rtype: dict | https://github.com/shichao-an/115wangpan/blob/e7cc935313f675e886bceca831fcffcdedf1e880/u115/api.py#L450-L463 |
shichao-an/115wangpan | u115/api.py | API.upload | def upload(self, filename, directory=None):
"""
Upload a file ``filename`` to ``directory``
:param str filename: path to the file to upload
:param directory: destionation :class:`.Directory`, defaults to
:attribute:`.API.downloads_directory` if None
:return: the uploaded file
:rtype: :class:`.File`
"""
filename = eval_path(filename)
if directory is None:
directory = self.downloads_directory
# First request
res1 = self._req_upload(filename, directory)
data1 = res1['data']
file_id = data1['file_id']
# Second request
res2 = self._req_file(file_id)
data2 = res2['data'][0]
data2.update(**data1)
return _instantiate_uploaded_file(self, data2) | python | def upload(self, filename, directory=None):
"""
Upload a file ``filename`` to ``directory``
:param str filename: path to the file to upload
:param directory: destionation :class:`.Directory`, defaults to
:attribute:`.API.downloads_directory` if None
:return: the uploaded file
:rtype: :class:`.File`
"""
filename = eval_path(filename)
if directory is None:
directory = self.downloads_directory
# First request
res1 = self._req_upload(filename, directory)
data1 = res1['data']
file_id = data1['file_id']
# Second request
res2 = self._req_file(file_id)
data2 = res2['data'][0]
data2.update(**data1)
return _instantiate_uploaded_file(self, data2) | Upload a file ``filename`` to ``directory``
:param str filename: path to the file to upload
:param directory: destionation :class:`.Directory`, defaults to
:attribute:`.API.downloads_directory` if None
:return: the uploaded file
:rtype: :class:`.File` | https://github.com/shichao-an/115wangpan/blob/e7cc935313f675e886bceca831fcffcdedf1e880/u115/api.py#L465-L488 |
shichao-an/115wangpan | u115/api.py | API.download | def download(self, obj, path=None, show_progress=True, resume=True,
auto_retry=True, proapi=False):
"""
Download a file
:param obj: :class:`.File` object
:param str path: local path
:param bool show_progress: whether to show download progress
:param bool resume: whether to resume on unfinished downloads
identified by filename
:param bool auto_retry: whether to retry automatically upon closed
transfer until the file's download is finished
:param bool proapi: whether to use pro API
"""
url = obj.get_download_url(proapi)
download(url, path=path, session=self.http.session,
show_progress=show_progress, resume=resume,
auto_retry=auto_retry) | python | def download(self, obj, path=None, show_progress=True, resume=True,
auto_retry=True, proapi=False):
"""
Download a file
:param obj: :class:`.File` object
:param str path: local path
:param bool show_progress: whether to show download progress
:param bool resume: whether to resume on unfinished downloads
identified by filename
:param bool auto_retry: whether to retry automatically upon closed
transfer until the file's download is finished
:param bool proapi: whether to use pro API
"""
url = obj.get_download_url(proapi)
download(url, path=path, session=self.http.session,
show_progress=show_progress, resume=resume,
auto_retry=auto_retry) | Download a file
:param obj: :class:`.File` object
:param str path: local path
:param bool show_progress: whether to show download progress
:param bool resume: whether to resume on unfinished downloads
identified by filename
:param bool auto_retry: whether to retry automatically upon closed
transfer until the file's download is finished
:param bool proapi: whether to use pro API | https://github.com/shichao-an/115wangpan/blob/e7cc935313f675e886bceca831fcffcdedf1e880/u115/api.py#L490-L507 |
shichao-an/115wangpan | u115/api.py | API.search | def search(self, keyword, count=30):
"""
Search files or directories
:param str keyword: keyword
:param int count: number of entries to be listed
"""
kwargs = {}
kwargs['search_value'] = keyword
root = self.root_directory
entries = root._load_entries(func=self._req_files_search,
count=count, page=1, **kwargs)
res = []
for entry in entries:
if 'pid' in entry:
res.append(_instantiate_directory(self, entry))
else:
res.append(_instantiate_file(self, entry))
return res | python | def search(self, keyword, count=30):
"""
Search files or directories
:param str keyword: keyword
:param int count: number of entries to be listed
"""
kwargs = {}
kwargs['search_value'] = keyword
root = self.root_directory
entries = root._load_entries(func=self._req_files_search,
count=count, page=1, **kwargs)
res = []
for entry in entries:
if 'pid' in entry:
res.append(_instantiate_directory(self, entry))
else:
res.append(_instantiate_file(self, entry))
return res | Search files or directories
:param str keyword: keyword
:param int count: number of entries to be listed | https://github.com/shichao-an/115wangpan/blob/e7cc935313f675e886bceca831fcffcdedf1e880/u115/api.py#L509-L528 |
shichao-an/115wangpan | u115/api.py | API.move | def move(self, entries, directory):
"""
Move one or more entries (file or directory) to the destination
directory
:param list entries: a list of source entries (:class:`.BaseFile`
object)
:param directory: destination directory
:return: whether the action is successful
:raise: :class:`.APIError` if something bad happened
"""
fcids = []
for entry in entries:
if isinstance(entry, File):
fcid = entry.fid
elif isinstance(entry, Directory):
fcid = entry.cid
else:
raise APIError('Invalid BaseFile instance for an entry.')
fcids.append(fcid)
if not isinstance(directory, Directory):
raise APIError('Invalid destination directory.')
if self._req_files_move(directory.cid, fcids):
for entry in entries:
if isinstance(entry, File):
entry.cid = directory.cid
entry.reload()
return True
else:
raise APIError('Error moving entries.') | python | def move(self, entries, directory):
"""
Move one or more entries (file or directory) to the destination
directory
:param list entries: a list of source entries (:class:`.BaseFile`
object)
:param directory: destination directory
:return: whether the action is successful
:raise: :class:`.APIError` if something bad happened
"""
fcids = []
for entry in entries:
if isinstance(entry, File):
fcid = entry.fid
elif isinstance(entry, Directory):
fcid = entry.cid
else:
raise APIError('Invalid BaseFile instance for an entry.')
fcids.append(fcid)
if not isinstance(directory, Directory):
raise APIError('Invalid destination directory.')
if self._req_files_move(directory.cid, fcids):
for entry in entries:
if isinstance(entry, File):
entry.cid = directory.cid
entry.reload()
return True
else:
raise APIError('Error moving entries.') | Move one or more entries (file or directory) to the destination
directory
:param list entries: a list of source entries (:class:`.BaseFile`
object)
:param directory: destination directory
:return: whether the action is successful
:raise: :class:`.APIError` if something bad happened | https://github.com/shichao-an/115wangpan/blob/e7cc935313f675e886bceca831fcffcdedf1e880/u115/api.py#L530-L559 |
shichao-an/115wangpan | u115/api.py | API.edit | def edit(self, entry, name, mark=False):
"""
Edit an entry (file or directory)
:param entry: :class:`.BaseFile` object
:param str name: new name for the entry
:param bool mark: whether to bookmark the entry
"""
fcid = None
if isinstance(entry, File):
fcid = entry.fid
elif isinstance(entry, Directory):
fcid = entry.cid
else:
raise APIError('Invalid BaseFile instance for an entry.')
is_mark = 0
if mark is True:
is_mark = 1
if self._req_files_edit(fcid, name, is_mark):
entry.reload()
return True
else:
raise APIError('Error editing the entry.') | python | def edit(self, entry, name, mark=False):
"""
Edit an entry (file or directory)
:param entry: :class:`.BaseFile` object
:param str name: new name for the entry
:param bool mark: whether to bookmark the entry
"""
fcid = None
if isinstance(entry, File):
fcid = entry.fid
elif isinstance(entry, Directory):
fcid = entry.cid
else:
raise APIError('Invalid BaseFile instance for an entry.')
is_mark = 0
if mark is True:
is_mark = 1
if self._req_files_edit(fcid, name, is_mark):
entry.reload()
return True
else:
raise APIError('Error editing the entry.') | Edit an entry (file or directory)
:param entry: :class:`.BaseFile` object
:param str name: new name for the entry
:param bool mark: whether to bookmark the entry | https://github.com/shichao-an/115wangpan/blob/e7cc935313f675e886bceca831fcffcdedf1e880/u115/api.py#L561-L583 |
shichao-an/115wangpan | u115/api.py | API.mkdir | def mkdir(self, parent, name):
"""
Create a directory
:param parent: the parent directory
:param str name: the name of the new directory
:return: the new directory
:rtype: :class:`.Directory`
"""
pid = None
cid = None
if isinstance(parent, Directory):
pid = parent.cid
else:
raise('Invalid Directory instance.')
cid = self._req_files_add(pid, name)['cid']
return self._load_directory(cid) | python | def mkdir(self, parent, name):
"""
Create a directory
:param parent: the parent directory
:param str name: the name of the new directory
:return: the new directory
:rtype: :class:`.Directory`
"""
pid = None
cid = None
if isinstance(parent, Directory):
pid = parent.cid
else:
raise('Invalid Directory instance.')
cid = self._req_files_add(pid, name)['cid']
return self._load_directory(cid) | Create a directory
:param parent: the parent directory
:param str name: the name of the new directory
:return: the new directory
:rtype: :class:`.Directory` | https://github.com/shichao-an/115wangpan/blob/e7cc935313f675e886bceca831fcffcdedf1e880/u115/api.py#L585-L602 |
shichao-an/115wangpan | u115/api.py | API._req_offline_space | def _req_offline_space(self):
"""Required before accessing lixian tasks"""
url = 'http://115.com/'
params = {
'ct': 'offline',
'ac': 'space',
'_': get_timestamp(13)
}
_sign = os.environ.get('U115_BROWSER_SIGN')
if _sign is not None:
_time = os.environ.get('U115_BROWSER_TIME')
if _time is None:
msg = 'U115_BROWSER_TIME is required given U115_BROWSER_SIGN.'
raise APIError(msg)
params['sign'] = _sign
params['time'] = _time
params['uid'] = self.user_id
req = Request(url=url, params=params)
r = self.http.send(req)
if r.state:
self._signatures['offline_space'] = r.content['sign']
self._lixian_timestamp = r.content['time']
else:
msg = 'Failed to retrieve signatures.'
raise RequestFailure(msg) | python | def _req_offline_space(self):
"""Required before accessing lixian tasks"""
url = 'http://115.com/'
params = {
'ct': 'offline',
'ac': 'space',
'_': get_timestamp(13)
}
_sign = os.environ.get('U115_BROWSER_SIGN')
if _sign is not None:
_time = os.environ.get('U115_BROWSER_TIME')
if _time is None:
msg = 'U115_BROWSER_TIME is required given U115_BROWSER_SIGN.'
raise APIError(msg)
params['sign'] = _sign
params['time'] = _time
params['uid'] = self.user_id
req = Request(url=url, params=params)
r = self.http.send(req)
if r.state:
self._signatures['offline_space'] = r.content['sign']
self._lixian_timestamp = r.content['time']
else:
msg = 'Failed to retrieve signatures.'
raise RequestFailure(msg) | Required before accessing lixian tasks | https://github.com/shichao-an/115wangpan/blob/e7cc935313f675e886bceca831fcffcdedf1e880/u115/api.py#L604-L628 |
shichao-an/115wangpan | u115/api.py | API._req_lixian_task_lists | def _req_lixian_task_lists(self, page=1):
"""
This request will cause the system to create a default downloads
directory if it does not exist
"""
url = 'http://115.com/lixian/'
params = {'ct': 'lixian', 'ac': 'task_lists'}
self._load_signatures()
data = {
'page': page,
'uid': self.user_id,
'sign': self._signatures['offline_space'],
'time': self._lixian_timestamp,
}
req = Request(method='POST', url=url, params=params, data=data)
res = self.http.send(req)
if res.state:
self._task_count = res.content['count']
self._task_quota = res.content['quota']
return res.content['tasks']
else:
msg = 'Failed to get tasks.'
raise RequestFailure(msg) | python | def _req_lixian_task_lists(self, page=1):
"""
This request will cause the system to create a default downloads
directory if it does not exist
"""
url = 'http://115.com/lixian/'
params = {'ct': 'lixian', 'ac': 'task_lists'}
self._load_signatures()
data = {
'page': page,
'uid': self.user_id,
'sign': self._signatures['offline_space'],
'time': self._lixian_timestamp,
}
req = Request(method='POST', url=url, params=params, data=data)
res = self.http.send(req)
if res.state:
self._task_count = res.content['count']
self._task_quota = res.content['quota']
return res.content['tasks']
else:
msg = 'Failed to get tasks.'
raise RequestFailure(msg) | This request will cause the system to create a default downloads
directory if it does not exist | https://github.com/shichao-an/115wangpan/blob/e7cc935313f675e886bceca831fcffcdedf1e880/u115/api.py#L630-L652 |
shichao-an/115wangpan | u115/api.py | API._req_lixian_get_id | def _req_lixian_get_id(self, torrent=False):
"""Get `cid` of lixian space directory"""
url = 'http://115.com/'
params = {
'ct': 'lixian',
'ac': 'get_id',
'torrent': 1 if torrent else None,
'_': get_timestamp(13)
}
req = Request(method='GET', url=url, params=params)
res = self.http.send(req)
return res.content | python | def _req_lixian_get_id(self, torrent=False):
"""Get `cid` of lixian space directory"""
url = 'http://115.com/'
params = {
'ct': 'lixian',
'ac': 'get_id',
'torrent': 1 if torrent else None,
'_': get_timestamp(13)
}
req = Request(method='GET', url=url, params=params)
res = self.http.send(req)
return res.content | Get `cid` of lixian space directory | https://github.com/shichao-an/115wangpan/blob/e7cc935313f675e886bceca831fcffcdedf1e880/u115/api.py#L654-L665 |
shichao-an/115wangpan | u115/api.py | API._req_lixian_torrent | def _req_lixian_torrent(self, u):
"""
:param u: uploaded torrent file
"""
self._load_signatures()
url = 'http://115.com/lixian/'
params = {
'ct': 'lixian',
'ac': 'torrent',
}
data = {
'pickcode': u.pickcode,
'sha1': u.sha,
'uid': self.user_id,
'sign': self._signatures['offline_space'],
'time': self._lixian_timestamp,
}
req = Request(method='POST', url=url, params=params, data=data)
res = self.http.send(req)
if res.state:
return res.content
else:
msg = res.content.get('error_msg')
self.logger.error(msg)
raise RequestFailure('Failed to open torrent.') | python | def _req_lixian_torrent(self, u):
"""
:param u: uploaded torrent file
"""
self._load_signatures()
url = 'http://115.com/lixian/'
params = {
'ct': 'lixian',
'ac': 'torrent',
}
data = {
'pickcode': u.pickcode,
'sha1': u.sha,
'uid': self.user_id,
'sign': self._signatures['offline_space'],
'time': self._lixian_timestamp,
}
req = Request(method='POST', url=url, params=params, data=data)
res = self.http.send(req)
if res.state:
return res.content
else:
msg = res.content.get('error_msg')
self.logger.error(msg)
raise RequestFailure('Failed to open torrent.') | :param u: uploaded torrent file | https://github.com/shichao-an/115wangpan/blob/e7cc935313f675e886bceca831fcffcdedf1e880/u115/api.py#L667-L692 |
shichao-an/115wangpan | u115/api.py | API._req_aps_natsort_files | def _req_aps_natsort_files(self, cid, offset, limit, o='file_name',
asc=1, aid=1, show_dir=1, code=None, scid=None,
snap=0, natsort=1, source=None, type=0,
format='json', star=None, is_share=None):
"""
When :meth:`.API._req_files` is called with `o='filename'` and
`natsort=1`, API access will fail
and :meth:`.API._req_aps_natsort_files` is subsequently called with
the same kwargs. Refer to the implementation in
:meth:`.Directory.list`
"""
params = locals()
del params['self']
req = Request(method='GET', url=self.aps_natsort_url, params=params)
res = self.http.send(req)
if res.state:
return res.content
else:
raise RequestFailure('Failed to access files API.') | python | def _req_aps_natsort_files(self, cid, offset, limit, o='file_name',
asc=1, aid=1, show_dir=1, code=None, scid=None,
snap=0, natsort=1, source=None, type=0,
format='json', star=None, is_share=None):
"""
When :meth:`.API._req_files` is called with `o='filename'` and
`natsort=1`, API access will fail
and :meth:`.API._req_aps_natsort_files` is subsequently called with
the same kwargs. Refer to the implementation in
:meth:`.Directory.list`
"""
params = locals()
del params['self']
req = Request(method='GET', url=self.aps_natsort_url, params=params)
res = self.http.send(req)
if res.state:
return res.content
else:
raise RequestFailure('Failed to access files API.') | When :meth:`.API._req_files` is called with `o='filename'` and
`natsort=1`, API access will fail
and :meth:`.API._req_aps_natsort_files` is subsequently called with
the same kwargs. Refer to the implementation in
:meth:`.Directory.list` | https://github.com/shichao-an/115wangpan/blob/e7cc935313f675e886bceca831fcffcdedf1e880/u115/api.py#L770-L788 |
shichao-an/115wangpan | u115/api.py | API._req_files_edit | def _req_files_edit(self, fid, file_name=None, is_mark=0):
"""Edit a file or directory"""
url = self.web_api_url + '/edit'
data = locals()
del data['self']
req = Request(method='POST', url=url, data=data)
res = self.http.send(req)
if res.state:
return True
else:
raise RequestFailure('Failed to access files API.') | python | def _req_files_edit(self, fid, file_name=None, is_mark=0):
"""Edit a file or directory"""
url = self.web_api_url + '/edit'
data = locals()
del data['self']
req = Request(method='POST', url=url, data=data)
res = self.http.send(req)
if res.state:
return True
else:
raise RequestFailure('Failed to access files API.') | Edit a file or directory | https://github.com/shichao-an/115wangpan/blob/e7cc935313f675e886bceca831fcffcdedf1e880/u115/api.py#L830-L840 |
shichao-an/115wangpan | u115/api.py | API._req_files_add | def _req_files_add(self, pid, cname):
"""
Add a directory
:param str pid: parent directory id
:param str cname: directory name
"""
url = self.web_api_url + '/add'
data = locals()
del data['self']
req = Request(method='POST', url=url, data=data)
res = self.http.send(req)
if res.state:
return res.content
else:
raise RequestFailure('Failed to access files API.') | python | def _req_files_add(self, pid, cname):
"""
Add a directory
:param str pid: parent directory id
:param str cname: directory name
"""
url = self.web_api_url + '/add'
data = locals()
del data['self']
req = Request(method='POST', url=url, data=data)
res = self.http.send(req)
if res.state:
return res.content
else:
raise RequestFailure('Failed to access files API.') | Add a directory
:param str pid: parent directory id
:param str cname: directory name | https://github.com/shichao-an/115wangpan/blob/e7cc935313f675e886bceca831fcffcdedf1e880/u115/api.py#L842-L856 |
shichao-an/115wangpan | u115/api.py | API._req_files_move | def _req_files_move(self, pid, fids):
"""
Move files or directories
:param str pid: destination directory id
:param list fids: a list of ids of files or directories to be moved
"""
url = self.web_api_url + '/move'
data = {}
data['pid'] = pid
for i, fid in enumerate(fids):
data['fid[%d]' % i] = fid
req = Request(method='POST', url=url, data=data)
res = self.http.send(req)
if res.state:
return True
else:
raise RequestFailure('Failed to access files API.') | python | def _req_files_move(self, pid, fids):
"""
Move files or directories
:param str pid: destination directory id
:param list fids: a list of ids of files or directories to be moved
"""
url = self.web_api_url + '/move'
data = {}
data['pid'] = pid
for i, fid in enumerate(fids):
data['fid[%d]' % i] = fid
req = Request(method='POST', url=url, data=data)
res = self.http.send(req)
if res.state:
return True
else:
raise RequestFailure('Failed to access files API.') | Move files or directories
:param str pid: destination directory id
:param list fids: a list of ids of files or directories to be moved | https://github.com/shichao-an/115wangpan/blob/e7cc935313f675e886bceca831fcffcdedf1e880/u115/api.py#L858-L874 |
shichao-an/115wangpan | u115/api.py | API._req_directory | def _req_directory(self, cid):
"""Return name and pid of by cid"""
res = self._req_files(cid=cid, offset=0, limit=1, show_dir=1)
path = res['path']
count = res['count']
for d in path:
if str(d['cid']) == str(cid):
res = {
'cid': d['cid'],
'name': d['name'],
'pid': d['pid'],
'count': count,
}
return res
else:
raise RequestFailure('No directory found.') | python | def _req_directory(self, cid):
"""Return name and pid of by cid"""
res = self._req_files(cid=cid, offset=0, limit=1, show_dir=1)
path = res['path']
count = res['count']
for d in path:
if str(d['cid']) == str(cid):
res = {
'cid': d['cid'],
'name': d['name'],
'pid': d['pid'],
'count': count,
}
return res
else:
raise RequestFailure('No directory found.') | Return name and pid of by cid | https://github.com/shichao-an/115wangpan/blob/e7cc935313f675e886bceca831fcffcdedf1e880/u115/api.py#L886-L901 |
shichao-an/115wangpan | u115/api.py | API._req_upload | def _req_upload(self, filename, directory):
"""Raw request to upload a file ``filename``"""
self._upload_url = self._load_upload_url()
self.http.get('http://upload.115.com/crossdomain.xml')
b = os.path.basename(filename)
target = 'U_1_' + str(directory.cid)
files = {
'Filename': ('', quote(b), ''),
'target': ('', target, ''),
'Filedata': (quote(b), open(filename, 'rb'), ''),
'Upload': ('', 'Submit Query', ''),
}
req = Request(method='POST', url=self._upload_url, files=files)
res = self.http.send(req)
if res.state:
return res.content
else:
msg = None
if res.content['code'] == 990002:
msg = 'Invalid parameter.'
elif res.content['code'] == 1001:
msg = 'Torrent upload failed. Please try again later.'
raise RequestFailure(msg) | python | def _req_upload(self, filename, directory):
"""Raw request to upload a file ``filename``"""
self._upload_url = self._load_upload_url()
self.http.get('http://upload.115.com/crossdomain.xml')
b = os.path.basename(filename)
target = 'U_1_' + str(directory.cid)
files = {
'Filename': ('', quote(b), ''),
'target': ('', target, ''),
'Filedata': (quote(b), open(filename, 'rb'), ''),
'Upload': ('', 'Submit Query', ''),
}
req = Request(method='POST', url=self._upload_url, files=files)
res = self.http.send(req)
if res.state:
return res.content
else:
msg = None
if res.content['code'] == 990002:
msg = 'Invalid parameter.'
elif res.content['code'] == 1001:
msg = 'Torrent upload failed. Please try again later.'
raise RequestFailure(msg) | Raw request to upload a file ``filename`` | https://github.com/shichao-an/115wangpan/blob/e7cc935313f675e886bceca831fcffcdedf1e880/u115/api.py#L938-L960 |
shichao-an/115wangpan | u115/api.py | API._load_root_directory | def _load_root_directory(self):
"""
Load root directory, which has a cid of 0
"""
kwargs = self._req_directory(0)
self._root_directory = Directory(api=self, **kwargs) | python | def _load_root_directory(self):
"""
Load root directory, which has a cid of 0
"""
kwargs = self._req_directory(0)
self._root_directory = Directory(api=self, **kwargs) | Load root directory, which has a cid of 0 | https://github.com/shichao-an/115wangpan/blob/e7cc935313f675e886bceca831fcffcdedf1e880/u115/api.py#L1016-L1021 |
shichao-an/115wangpan | u115/api.py | API._load_torrents_directory | def _load_torrents_directory(self):
"""
Load torrents directory
If it does not exist yet, this request will cause the system to create
one
"""
r = self._req_lixian_get_id(torrent=True)
self._downloads_directory = self._load_directory(r['cid']) | python | def _load_torrents_directory(self):
"""
Load torrents directory
If it does not exist yet, this request will cause the system to create
one
"""
r = self._req_lixian_get_id(torrent=True)
self._downloads_directory = self._load_directory(r['cid']) | Load torrents directory
If it does not exist yet, this request will cause the system to create
one | https://github.com/shichao-an/115wangpan/blob/e7cc935313f675e886bceca831fcffcdedf1e880/u115/api.py#L1023-L1031 |
shichao-an/115wangpan | u115/api.py | API._load_downloads_directory | def _load_downloads_directory(self):
"""
Load downloads directory
If it does not exist yet, this request will cause the system to create
one
"""
r = self._req_lixian_get_id(torrent=False)
self._downloads_directory = self._load_directory(r['cid']) | python | def _load_downloads_directory(self):
"""
Load downloads directory
If it does not exist yet, this request will cause the system to create
one
"""
r = self._req_lixian_get_id(torrent=False)
self._downloads_directory = self._load_directory(r['cid']) | Load downloads directory
If it does not exist yet, this request will cause the system to create
one | https://github.com/shichao-an/115wangpan/blob/e7cc935313f675e886bceca831fcffcdedf1e880/u115/api.py#L1033-L1041 |
shichao-an/115wangpan | u115/api.py | API._parse_src_js_var | def _parse_src_js_var(self, variable):
"""Parse JavaScript variables in the source page"""
src_url = 'http://115.com'
r = self.http.get(src_url)
soup = BeautifulSoup(r.content)
scripts = [script.text for script in soup.find_all('script')]
text = '\n'.join(scripts)
pattern = "%s\s*=\s*(.*);" % (variable.upper())
m = re.search(pattern, text)
if not m:
msg = 'Cannot parse source JavaScript for %s.' % variable
raise APIError(msg)
return json.loads(m.group(1).strip()) | python | def _parse_src_js_var(self, variable):
"""Parse JavaScript variables in the source page"""
src_url = 'http://115.com'
r = self.http.get(src_url)
soup = BeautifulSoup(r.content)
scripts = [script.text for script in soup.find_all('script')]
text = '\n'.join(scripts)
pattern = "%s\s*=\s*(.*);" % (variable.upper())
m = re.search(pattern, text)
if not m:
msg = 'Cannot parse source JavaScript for %s.' % variable
raise APIError(msg)
return json.loads(m.group(1).strip()) | Parse JavaScript variables in the source page | https://github.com/shichao-an/115wangpan/blob/e7cc935313f675e886bceca831fcffcdedf1e880/u115/api.py#L1051-L1064 |
shichao-an/115wangpan | u115/api.py | BaseFile.delete | def delete(self):
"""
Delete this file or directory
:return: whether deletion is successful
:raise: :class:`.APIError` if this file or directory is already deleted
"""
fcid = None
pid = None
if isinstance(self, File):
fcid = self.fid
pid = self.cid
elif isinstance(self, Directory):
fcid = self.cid
pid = self.pid
else:
raise APIError('Invalid BaseFile instance.')
if not self._deleted:
if self.api._req_rb_delete(fcid, pid):
self._deleted = True
return True
else:
raise APIError('This file or directory is already deleted.') | python | def delete(self):
"""
Delete this file or directory
:return: whether deletion is successful
:raise: :class:`.APIError` if this file or directory is already deleted
"""
fcid = None
pid = None
if isinstance(self, File):
fcid = self.fid
pid = self.cid
elif isinstance(self, Directory):
fcid = self.cid
pid = self.pid
else:
raise APIError('Invalid BaseFile instance.')
if not self._deleted:
if self.api._req_rb_delete(fcid, pid):
self._deleted = True
return True
else:
raise APIError('This file or directory is already deleted.') | Delete this file or directory
:return: whether deletion is successful
:raise: :class:`.APIError` if this file or directory is already deleted | https://github.com/shichao-an/115wangpan/blob/e7cc935313f675e886bceca831fcffcdedf1e880/u115/api.py#L1158-L1183 |
shichao-an/115wangpan | u115/api.py | BaseFile.edit | def edit(self, name, mark=False):
"""
Edit this file or directory
:param str name: new name for this entry
:param bool mark: whether to bookmark this entry
"""
self.api.edit(self, name, mark) | python | def edit(self, name, mark=False):
"""
Edit this file or directory
:param str name: new name for this entry
:param bool mark: whether to bookmark this entry
"""
self.api.edit(self, name, mark) | Edit this file or directory
:param str name: new name for this entry
:param bool mark: whether to bookmark this entry | https://github.com/shichao-an/115wangpan/blob/e7cc935313f675e886bceca831fcffcdedf1e880/u115/api.py#L1195-L1202 |
shichao-an/115wangpan | u115/api.py | File.directory | def directory(self):
"""Directory that holds this file"""
if self._directory is None:
self._directory = self.api._load_directory(self.cid)
return self._directory | python | def directory(self):
"""Directory that holds this file"""
if self._directory is None:
self._directory = self.api._load_directory(self.cid)
return self._directory | Directory that holds this file | https://github.com/shichao-an/115wangpan/blob/e7cc935313f675e886bceca831fcffcdedf1e880/u115/api.py#L1258-L1262 |
shichao-an/115wangpan | u115/api.py | File.get_download_url | def get_download_url(self, proapi=False):
"""
Get this file's download URL
:param bool proapi: whether to use pro API
"""
if self._download_url is None:
self._download_url = \
self.api._req_files_download_url(self.pickcode, proapi)
return self._download_url | python | def get_download_url(self, proapi=False):
"""
Get this file's download URL
:param bool proapi: whether to use pro API
"""
if self._download_url is None:
self._download_url = \
self.api._req_files_download_url(self.pickcode, proapi)
return self._download_url | Get this file's download URL
:param bool proapi: whether to use pro API | https://github.com/shichao-an/115wangpan/blob/e7cc935313f675e886bceca831fcffcdedf1e880/u115/api.py#L1264-L1274 |
shichao-an/115wangpan | u115/api.py | File.download | def download(self, path=None, show_progress=True, resume=True,
auto_retry=True, proapi=False):
"""Download this file"""
self.api.download(self, path, show_progress, resume, auto_retry,
proapi) | python | def download(self, path=None, show_progress=True, resume=True,
auto_retry=True, proapi=False):
"""Download this file"""
self.api.download(self, path, show_progress, resume, auto_retry,
proapi) | Download this file | https://github.com/shichao-an/115wangpan/blob/e7cc935313f675e886bceca831fcffcdedf1e880/u115/api.py#L1281-L1285 |
shichao-an/115wangpan | u115/api.py | File.reload | def reload(self):
"""
Reload file info and metadata
* name
* sha
* pickcode
"""
res = self.api._req_file(self.fid)
data = res['data'][0]
self.name = data['file_name']
self.sha = data['sha1']
self.pickcode = data['pick_code'] | python | def reload(self):
"""
Reload file info and metadata
* name
* sha
* pickcode
"""
res = self.api._req_file(self.fid)
data = res['data'][0]
self.name = data['file_name']
self.sha = data['sha1']
self.pickcode = data['pick_code'] | Reload file info and metadata
* name
* sha
* pickcode | https://github.com/shichao-an/115wangpan/blob/e7cc935313f675e886bceca831fcffcdedf1e880/u115/api.py#L1302-L1315 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.