Code
stringlengths 103
85.9k
| Summary
listlengths 0
94
|
---|---|
Please provide a description of the function:def decision_function(self, X, method='most-wins'):
X = _check_2d_inp(X, reshape = True)
if method == 'most-wins':
return self._decision_function_winners(X)
elif method == 'goodness':
return self._decision_function_goodness(X)
else:
raise ValueError("method must be one of 'most-wins' or 'goodness'.") | [
"\n Calculate a 'goodness' distribution over labels\n \n Note\n ----\n Predictions can be calculated either by counting which class wins the most\n pairwise comparisons (as in [1] and [2]), or - for classifiers with a 'predict_proba'\n method - by taking into account also the margins of the prediction difference\n for one class over the other for each comparison.\n \n If passing method = 'most-wins', this 'decision_function' will output the proportion\n of comparisons that each class won. If passing method = 'goodness', it sums the\n outputs from 'predict_proba' from each pairwise comparison and divides it by the\n number of comparisons.\n \n Using method = 'goodness' requires the base classifier to have a 'predict_proba' method.\n \n Parameters\n ----------\n X : array (n_samples, n_features)\n Data for which to predict the cost of each label.\n method : str, either 'most-wins' or 'goodness':\n How to decide the best label (see Note)\n \n Returns\n -------\n pred : array (n_samples, n_classes)\n A goodness score (more is better) for each label and observation.\n If passing method='most-wins', it counts the proportion of comparisons\n that each class won.\n If passing method='goodness', it sums the outputs from 'predict_proba' from\n each pairwise comparison and divides it by the number of comparisons.\n \n References\n ----------\n [1] Beygelzimer, A., Dani, V., Hayes, T., Langford, J., & Zadrozny, B. (2005)\n Error limiting reductions between classification tasks.\n [2] Beygelzimer, A., Langford, J., & Zadrozny, B. (2008).\n Machine learning techniques-reductions between prediction quality metrics.\n "
]
|
Please provide a description of the function:def predict(self, X, method = 'most-wins'):
X = _check_2d_inp(X, reshape = True)
if method == 'most-wins':
return self._predict_winners(X)
elif method == 'goodness':
goodness = self._decision_function_goodness(X)
if (len(goodness.shape) == 1) or (goodness.shape[0] == 1):
return np.argmax(goodness)
else:
return np.argmax(goodness, axis=1)
else:
raise ValueError("method must be one of 'most-wins' or 'goodness'.") | [
"\n Predict the less costly class for a given observation\n \n Note\n ----\n Predictions can be calculated either by counting which class wins the most\n pairwise comparisons (as in [1] and [2]), or - for classifiers with a 'predict_proba'\n method - by taking into account also the margins of the prediction difference\n for one class over the other for each comparison.\n \n Using method = 'goodness' requires the base classifier to have a 'predict_proba' method.\n \n Parameters\n ----------\n X : array (n_samples, n_features)\n Data for which to predict minimum cost label.\n method : str, either 'most-wins' or 'goodness':\n How to decide the best label (see Note)\n \n Returns\n -------\n y_hat : array (n_samples,)\n Label with expected minimum cost for each observation.\n \n References\n ----------\n [1] Beygelzimer, A., Dani, V., Hayes, T., Langford, J., & Zadrozny, B. (2005)\n Error limiting reductions between classification tasks.\n [2] Beygelzimer, A., Langford, J., & Zadrozny, B. (2008).\n Machine learning techniques-reductions between prediction quality metrics.\n "
]
|
Please provide a description of the function:def fit(self, X, C):
X,C = _check_fit_input(X,C)
C = np.asfortranarray(C)
nclasses=C.shape[1]
self.tree=_BinTree(nclasses)
self.classifiers=[deepcopy(self.base_classifier) for c in range(nclasses-1)]
classifier_queue=self.tree.is_at_bottom
next_round=list()
already_fitted=set()
labels_take=-np.ones((X.shape[0],len(self.classifiers)))
while True:
for c in classifier_queue:
if c in already_fitted or (c is None):
continue
child1, child2 = self.tree.childs[c]
if (child1>0) and (child1 not in already_fitted):
continue
if (child2>0) and (child2 not in already_fitted):
continue
if child1<=0:
class1=-np.repeat(child1,X.shape[0]).astype("int64")
else:
class1=labels_take[:, child1].astype("int64")
if child2<=0:
class2=-np.repeat(child2,X.shape[0]).astype("int64")
else:
class2=labels_take[:, child2].astype("int64")
cost1=C[np.arange(X.shape[0]),np.clip(class1,a_min=0,a_max=None)]
cost2=C[np.arange(X.shape[0]),np.clip(class2,a_min=0,a_max=None)]
y=(cost1<cost2).astype('uint8')
w=np.abs(cost1-cost2)
valid_obs=w>0
if child1>0:
valid_obs=valid_obs&(labels_take[:,child1]>=0)
if child2>0:
valid_obs=valid_obs&(labels_take[:,child2]>=0)
X_take=X[valid_obs,:]
y_take=y[valid_obs]
w_take=w[valid_obs]
w_take=_standardize_weights(w_take)
self.classifiers[c].fit(X_take,y_take,sample_weight=w_take)
labels_arr=np.c_[class1,class2].astype("int64")
labels_take[valid_obs,c]=labels_arr[np.repeat(0,X_take.shape[0]),\
self.classifiers[c].predict(X_take).reshape(-1).astype('uint8')]
already_fitted.add(c)
next_round.append(self.tree.parents[c])
if c==0 or (len(classifier_queue)==0):
break
classifier_queue=list(set(next_round))
next_round=list()
if (len(classifier_queue)==0):
break
return self | [
"\n Fit a filter tree classifier\n \n Note\n ----\n Shifting the order of the classes within the cost array will produce different\n results, as it will build a different binary tree comparing different classes\n at each node.\n \n Parameters\n ----------\n X : array (n_samples, n_features)\n The data on which to fit a cost-sensitive classifier.\n C : array (n_samples, n_classes)\n The cost of predicting each label for each observation (more means worse).\n "
]
|
Please provide a description of the function:def predict(self, X):
X = _check_2d_inp(X, reshape = True)
if X.shape[0] == 1:
return self._predict(X)
else:
shape_single = list(X.shape)
shape_single[0] = 1
pred = np.empty(X.shape[0], dtype = "int64")
Parallel(n_jobs=self.njobs, verbose=0, require="sharedmem")(delayed(self._predict)(row, pred, shape_single, X) for row in range(X.shape[0]))
return pred | [
"\n Predict the less costly class for a given observation\n \n Note\n ----\n The implementation here happens in a Python loop rather than in some\n NumPy array operations, thus it will be slower than the other algorithms\n here, even though in theory it implies fewer comparisons.\n \n Parameters\n ----------\n X : array (n_samples, n_features)\n Data for which to predict minimum cost label.\n method : str, either 'most-wins' or 'goodness':\n How to decide the best label (see Note)\n \n Returns\n -------\n y_hat : array (n_samples,)\n Label with expected minimum cost for each observation.\n "
]
|
Please provide a description of the function:def fit(self, X, y, sample_weight=None):
assert self.extra_rej_const >= 0
if sample_weight is None:
sample_weight = np.ones(y.shape[0])
else:
if isinstance(sample_weight, list):
sample_weight = np.array(sample_weight)
if len(sample_weight.shape):
sample_weight = sample_weight.reshape(-1)
assert sample_weight.shape[0] == X.shape[0]
assert sample_weight.min() > 0
Z = sample_weight.max() + self.extra_rej_const
sample_weight = sample_weight / Z # sample weight is now acceptance prob
self.classifiers = [deepcopy(self.base_classifier) for c in range(self.n_samples)]
### Note: don't parallelize random number generation, as it's not always thread-safe
take_all = np.random.random(size = (self.n_samples, X.shape[0]))
Parallel(n_jobs=self.njobs, verbose=0, require="sharedmem")(delayed(self._fit)(c, take_all, X, y, sample_weight) for c in range(self.n_samples))
return self | [
"\n Fit a binary classifier with sample weights to data.\n \n Note\n ----\n Examples at each sample are accepted with probability = weight/Z,\n where Z = max(weight) + extra_rej_const.\n Larger values for extra_rej_const ensure that no example gets selected in\n every single sample, but results in smaller sample sizes as more examples are rejected.\n \n Parameters\n ----------\n X : array (n_samples, n_features)\n Data on which to fit the model.\n y : array (n_samples,) or (n_samples, 1)\n Class of each observation.\n sample_weight : array (n_samples,) or (n_samples, 1)\n Weights indicating how important is each observation in the loss function.\n "
]
|
Please provide a description of the function:def decision_function(self, X, aggregation = 'raw'):
if aggregation == 'weighted':
if 'predict_proba' not in dir(self.classifiers[0]):
raise Exception("'aggregation='weighted'' is only available for classifiers with 'predict_proba' method.")
preds = np.empty((X.shape[0], self.n_samples), dtype = "float64")
if aggregation == "raw":
Parallel(n_jobs=self.njobs, verbose=0, require="sharedmem")(delayed(self._decision_function_raw)(c, preds, X) for c in range(self.nsamples))
elif aggregation == "weighted":
Parallel(n_jobs=self.njobs, verbose=0, require="sharedmem")(delayed(self._decision_function_weighted)(c, preds, X) for c in range(self.nsamples))
else:
raise ValueError("'aggregation' must be one of 'raw' or 'weighted'.")
return preds.mean(axis = 1).reshape(-1) | [
"\n Calculate how preferred is positive class according to classifiers\n \n Note\n ----\n If passing aggregation = 'raw', it will output the proportion of the classifiers\n that voted for the positive class.\n If passing aggregation = 'weighted', it will output the average predicted probability\n for the positive class for each classifier.\n \n Calculating it with aggregation = 'weighted' requires the base classifier to have a\n 'predict_proba' method.\n \n Parameters\n ----------\n X : array (n_samples, n_features):\n Observations for which to determine class likelihood.\n aggregation : str, either 'raw' or 'weighted'\n How to compute the 'goodness' of the positive class (see Note)\n \n Returns\n -------\n pred : array (n_samples,)\n Score for the positive class (see Note)\n "
]
|
Please provide a description of the function:def fit(self, X, C):
X, C = _check_fit_input(X, C)
C = np.asfortranarray(C)
self.nclasses = C.shape[1]
self.classifiers = [deepcopy(self.base_classifier) for i in range(self.nclasses)]
if not self.weight_simple_diff:
C = WeightedAllPairs._calculate_v(self, C)
Parallel(n_jobs=self.njobs, verbose=0, require="sharedmem")(delayed(self._fit)(c, X, C) for c in range(self.nclasses))
return self | [
"\n Fit one weighted classifier per class\n \n Parameters\n ----------\n X : array (n_samples, n_features)\n The data on which to fit a cost-sensitive classifier.\n C : array (n_samples, n_classes)\n The cost of predicting each label for each observation (more means worse).\n "
]
|
Please provide a description of the function:def decision_function(self, X):
X = _check_2d_inp(X)
preds = np.empty((X.shape[0], self.nclasses))
available_methods = dir(self.classifiers[0])
if "decision_function" in available_methods:
Parallel(n_jobs=self.njobs, verbose=0, require="sharedmem")(delayed(self._decision_function_decision_function)(c, preds, X) for c in range(self.nclasses))
apply_softmax = True
elif "predict_proba" in available_methods:
Parallel(n_jobs=self.njobs, verbose=0, require="sharedmem")(delayed(self._decision_function_predict_proba)(c, preds, X) for c in range(self.nclasses))
apply_softmax = False
elif "predict" in available_methods:
Parallel(n_jobs=self.njobs, verbose=0, require="sharedmem")(delayed(self.decision_function_predict)(c, preds, X) for c in range(self.nclasses))
apply_softmax = False
else:
raise ValueError("'base_classifier' must have at least one of 'decision_function', 'predict_proba', 'Predict'.")
if apply_softmax:
preds = np.exp(preds - preds.max(axis=1).reshape((-1, 1)))
preds = preds / preds.sum(axis=1).reshape((-1, 1))
return preds | [
"\n Calculate a 'goodness' distribution over labels\n \n Parameters\n ----------\n X : array (n_samples, n_features)\n Data for which to predict the cost of each label.\n \n Returns\n -------\n pred : array (n_samples, n_classes)\n A goodness score (more is better) for each label and observation.\n If passing apply_softmax=True, these are standardized to sum up to 1 (per row).\n "
]
|
Please provide a description of the function:def predict(self, X):
X = _check_2d_inp(X)
return np.argmax(self.decision_function(X), axis=1) | [
"\n Predict the less costly class for a given observation\n \n Parameters\n ----------\n X : array (n_samples, n_features)\n Data for which to predict minimum cost label.\n \n Returns\n -------\n y_hat : array (n_samples,)\n Label with expected minimum cost for each observation.\n "
]
|
Please provide a description of the function:def fit(self, X, C):
X, C = _check_fit_input(X, C)
C = np.asfortranarray(C)
self.nclasses = C.shape[1]
self.regressors = [deepcopy(self.base_regressor) for i in range(self.nclasses)]
Parallel(n_jobs=self.njobs, verbose=0, require="sharedmem")(delayed(self._fit)(c, X, C) for c in range(self.nclasses))
return self | [
"\n Fit one regressor per class\n \n Parameters\n ----------\n X : array (n_samples, n_features)\n The data on which to fit a cost-sensitive classifier.\n C : array (n_samples, n_classes)\n The cost of predicting each label for each observation (more means worse).\n "
]
|
Please provide a description of the function:def decision_function(self, X, apply_softmax = True):
X = _check_2d_inp(X, reshape = True)
preds = np.empty((X.shape[0], self.nclasses), dtype = "float64")
Parallel(n_jobs=self.njobs, verbose=0, require="sharedmem")(delayed(self._decision_function)(c, preds, X) for c in range(self.nclasses))
if not apply_softmax:
return preds
else:
preds = np.exp(preds - preds.max(axis=1).reshape((-1, 1)))
preds = preds/ preds.sum(axis=1).reshape((-1, 1))
return 1 - preds | [
"\n Get cost estimates for each observation\n \n Note\n ----\n If called with apply_softmax = False, this will output the predicted\n COST rather than the 'goodness' - meaning, more is worse.\n \n If called with apply_softmax = True, it will output one minus the softmax on the costs,\n producing a distribution over the choices summing up to 1 where more is better.\n \n \n Parameters\n ----------\n X : array (n_samples, n_features)\n Data for which to predict the cost of each label.\n apply_softmax : bool\n Whether to apply a softmax transform to the costs (see Note).\n \n Returns\n -------\n pred : array (n_samples, n_classes)\n Either predicted cost or a distribution of 'goodness' over the choices,\n according to the apply_softmax argument.\n "
]
|
Please provide a description of the function:def predict(self, X):
X = _check_2d_inp(X)
return np.argmin(self.decision_function(X, False), axis=1) | [
"\n Predict the less costly class for a given observation\n \n Parameters\n ----------\n X : array (n_samples, n_features)\n Data for which to predict minimum cost labels.\n \n Returns\n -------\n y_hat : array (n_samples,)\n Label with expected minimum cost for each observation.\n "
]
|
Please provide a description of the function:def read_ix(ix, **kwargs):
if not isinstance(ix, ixmp.TimeSeries):
error = 'not recognized as valid ixmp class: {}'.format(ix)
raise ValueError(error)
df = ix.timeseries(iamc=False, **kwargs)
df['model'] = ix.model
df['scenario'] = ix.scenario
return df, 'year', [] | [
"Read timeseries data from an ixmp object\n\n Parameters\n ----------\n ix: ixmp.TimeSeries or ixmp.Scenario\n this option requires the ixmp package as a dependency\n kwargs: arguments passed to ixmp.TimeSeries.timeseries()\n "
]
|
Please provide a description of the function:def requires_package(pkg, msg, error_type=ImportError):
def _requires_package(func):
def wrapper(*args, **kwargs):
if pkg is None:
raise error_type(msg)
return func(*args, **kwargs)
return wrapper
return _requires_package | [
"Decorator when a function requires an optional dependency\n\n Parameters\n ----------\n pkg : imported package object\n msg : string\n Message to show to user with error_type\n error_type : python error class\n "
]
|
Please provide a description of the function:def write_sheet(writer, name, df, index=False):
if index:
df = df.reset_index()
df.to_excel(writer, name, index=False)
worksheet = writer.sheets[name]
for i, col in enumerate(df.columns):
if df.dtypes[col].name.startswith(('float', 'int')):
width = len(str(col)) + 2
else:
width = max([df[col].map(lambda x: len(str(x or 'None'))).max(),
len(col)]) + 2
xls_col = '{c}:{c}'.format(c=NUMERIC_TO_STR[i])
worksheet.set_column(xls_col, width) | [
"Write a pandas DataFrame to an ExcelWriter,\n auto-formatting column width depending on maxwidth of data and colum header\n\n Parameters\n ----------\n writer: pandas.ExcelWriter\n an instance of a pandas ExcelWriter\n name: string\n name of the sheet to be written\n df: pandas.DataFrame\n a pandas DataFrame to be written to the sheet\n index: boolean, default False\n flag whether index should be written to the sheet\n "
]
|
Please provide a description of the function:def read_pandas(fname, *args, **kwargs):
if not os.path.exists(fname):
raise ValueError('no data file `{}` found!'.format(fname))
if fname.endswith('csv'):
df = pd.read_csv(fname, *args, **kwargs)
else:
xl = pd.ExcelFile(fname)
if len(xl.sheet_names) > 1 and 'sheet_name' not in kwargs:
kwargs['sheet_name'] = 'data'
df = pd.read_excel(fname, *args, **kwargs)
return df | [
"Read a file and return a pd.DataFrame"
]
|
Please provide a description of the function:def read_file(fname, *args, **kwargs):
if not isstr(fname):
raise ValueError('reading multiple files not supported, '
'please use `pyam.IamDataFrame.append()`')
logger().info('Reading `{}`'.format(fname))
format_kwargs = {}
# extract kwargs that are intended for `format_data`
for c in [i for i in IAMC_IDX + ['year', 'time', 'value'] if i in kwargs]:
format_kwargs[c] = kwargs.pop(c)
return format_data(read_pandas(fname, *args, **kwargs), **format_kwargs) | [
"Read data from a file saved in the standard IAMC format\n or a table with year/value columns\n "
]
|
Please provide a description of the function:def format_data(df, **kwargs):
if isinstance(df, pd.Series):
df = df.to_frame()
# Check for R-style year columns, converting where necessary
def convert_r_columns(c):
try:
first = c[0]
second = c[1:]
if first == 'X':
try:
# bingo! was X2015 R-style, return the integer
return int(second)
except:
# nope, not an int, fall down to final return statement
pass
except:
# not a string/iterable/etc, fall down to final return statement
pass
return c
df.columns = df.columns.map(convert_r_columns)
# if `value` is given but not `variable`,
# melt value columns and use column name as `variable`
if 'value' in kwargs and 'variable' not in kwargs:
value = kwargs.pop('value')
value = value if islistable(value) else [value]
_df = df.set_index(list(set(df.columns) - set(value)))
dfs = []
for v in value:
if v not in df.columns:
raise ValueError('column `{}` does not exist!'.format(v))
vdf = _df[v].to_frame().rename(columns={v: 'value'})
vdf['variable'] = v
dfs.append(vdf.reset_index())
df = pd.concat(dfs).reset_index(drop=True)
# otherwise, rename columns or concat to IAMC-style or do a fill-by-value
for col, value in kwargs.items():
if col in df:
raise ValueError('conflict of kwarg with column `{}` in dataframe!'
.format(col))
if isstr(value) and value in df:
df.rename(columns={value: col}, inplace=True)
elif islistable(value) and all([c in df.columns for c in value]):
df[col] = df.apply(lambda x: concat_with_pipe(x, value), axis=1)
df.drop(value, axis=1, inplace=True)
elif isstr(value):
df[col] = value
else:
raise ValueError('invalid argument for casting `{}: {}`'
.format(col, value))
# all lower case
str_cols = [c for c in df.columns if isstr(c)]
df.rename(columns={c: str(c).lower() for c in str_cols}, inplace=True)
if 'notes' in df.columns: # this came from the database
logger().info('Ignoring notes column in dataframe')
df.drop(columns='notes', inplace=True)
col = df.columns[0] # first column has database copyright notice
df = df[~df[col].str.contains('database', case=False)]
if 'scenario' in df.columns and 'model' not in df.columns:
# model and scenario are jammed together in RCP data
scen = df['scenario']
df['model'] = scen.apply(lambda s: s.split('-')[0].strip())
df['scenario'] = scen.apply(
lambda s: '-'.join(s.split('-')[1:]).strip())
# reset the index if meaningful entries are included there
if not list(df.index.names) == [None]:
df.reset_index(inplace=True)
# format columns to lower-case and check that all required columns exist
if not set(IAMC_IDX).issubset(set(df.columns)):
missing = list(set(IAMC_IDX) - set(df.columns))
raise ValueError("missing required columns `{}`!".format(missing))
# check whether data in wide format (IAMC) or long format (`value` column)
if 'value' in df.columns:
# check if time column is given as `year` (int) or `time` (datetime)
cols = df.columns
if 'year' in cols:
time_col = 'year'
elif 'time' in cols:
time_col = 'time'
else:
msg = 'invalid time format, must have either `year` or `time`!'
raise ValueError(msg)
extra_cols = list(set(cols) - set(IAMC_IDX + [time_col, 'value']))
else:
# if in wide format, check if columns are years (int) or datetime
cols = set(df.columns) - set(IAMC_IDX)
year_cols, time_cols, extra_cols = [], [], []
for i in cols:
try:
int(i) # this is a year
year_cols.append(i)
except (ValueError, TypeError):
try:
dateutil.parser.parse(str(i)) # this is datetime
time_cols.append(i)
except ValueError:
extra_cols.append(i) # some other string
if year_cols and not time_cols:
time_col = 'year'
melt_cols = year_cols
elif not year_cols and time_cols:
time_col = 'time'
melt_cols = time_cols
else:
msg = 'invalid column format, must be either years or `datetime`!'
raise ValueError(msg)
df = pd.melt(df, id_vars=IAMC_IDX + extra_cols, var_name=time_col,
value_vars=sorted(melt_cols), value_name='value')
# cast value columns to numeric, drop NaN's, sort data
df['value'] = df['value'].astype('float64')
df.dropna(inplace=True)
# check for duplicates and return sorted data
idx_cols = IAMC_IDX + [time_col] + extra_cols
if any(df[idx_cols].duplicated()):
raise ValueError('duplicate rows in `data`!')
return sort_data(df, idx_cols), time_col, extra_cols | [
"Convert a `pd.Dataframe` or `pd.Series` to the required format"
]
|
Please provide a description of the function:def sort_data(data, cols):
return data.sort_values(cols)[cols + ['value']].reset_index(drop=True) | [
"Sort `data` rows and order columns"
]
|
Please provide a description of the function:def find_depth(data, s='', level=None):
# remove wildcard as last character from string, escape regex characters
_s = re.compile('^' + _escape_regexp(s.rstrip('*')))
_p = re.compile('\\|')
# find depth
def _count_pipes(val):
return len(_p.findall(re.sub(_s, '', val))) if _s.match(val) else None
n_pipes = map(_count_pipes, data)
# if no level test is specified, return the depth as int
if level is None:
return list(n_pipes)
# if `level` is given, set function for finding depth level =, >=, <= |s
if not isstr(level):
test = lambda x: level == x if x is not None else False
elif level[-1] == '-':
level = int(level[:-1])
test = lambda x: level >= x if x is not None else False
elif level[-1] == '+':
level = int(level[:-1])
test = lambda x: level <= x if x is not None else False
else:
raise ValueError('Unknown level type: `{}`'.format(level))
return list(map(test, n_pipes)) | [
"\n return or assert the depth (number of `|`) of variables\n\n Parameters\n ----------\n data : pd.Series of strings\n IAMC-style variables\n s : str, default ''\n remove leading `s` from any variable in `data`\n level : int or str, default None\n if None, return depth (number of `|`); else, return list of booleans\n whether depth satisfies the condition (equality if `level` is int,\n >= if `.+`, <= if `.-`)\n "
]
|
Please provide a description of the function:def pattern_match(data, values, level=None, regexp=False, has_nan=True):
matches = np.array([False] * len(data))
if not isinstance(values, collections.Iterable) or isstr(values):
values = [values]
# issue (#40) with string-to-nan comparison, replace nan by empty string
_data = data.copy()
if has_nan:
_data.loc[[np.isnan(i) if not isstr(i) else False for i in _data]] = ''
for s in values:
if isstr(s):
pattern = re.compile(_escape_regexp(s) + '$' if not regexp else s)
subset = filter(pattern.match, _data)
depth = True if level is None else find_depth(_data, s, level)
matches |= (_data.isin(subset) & depth)
else:
matches |= data == s
return matches | [
"\n matching of model/scenario names, variables, regions, and meta columns to\n pseudo-regex (if `regexp == False`) for filtering (str, int, bool)\n "
]
|
Please provide a description of the function:def _escape_regexp(s):
return (
str(s)
.replace('|', '\\|')
.replace('.', '\.') # `.` has to be replaced before `*`
.replace('*', '.*')
.replace('+', '\+')
.replace('(', '\(')
.replace(')', '\)')
.replace('$', '\\$')
) | [
"escape characters with specific regexp use"
]
|
Please provide a description of the function:def years_match(data, years):
years = [years] if isinstance(years, int) else years
dt = datetime.datetime
if isinstance(years, dt) or isinstance(years[0], dt):
error_msg = "`year` can only be filtered with ints or lists of ints"
raise TypeError(error_msg)
return data.isin(years) | [
"\n matching of year columns for data filtering\n "
]
|
Please provide a description of the function:def hour_match(data, hours):
hours = [hours] if isinstance(hours, int) else hours
return data.isin(hours) | [
"\n matching of days in time columns for data filtering\n "
]
|
Please provide a description of the function:def datetime_match(data, dts):
dts = dts if islistable(dts) else [dts]
if any([not isinstance(i, datetime.datetime) for i in dts]):
error_msg = (
"`time` can only be filtered by datetimes"
)
raise TypeError(error_msg)
return data.isin(dts) | [
"\n matching of datetimes in time columns for data filtering\n "
]
|
Please provide a description of the function:def to_int(x, index=False):
_x = x.index if index else x
cols = list(map(int, _x))
error = _x[cols != _x]
if not error.empty:
raise ValueError('invalid values `{}`'.format(list(error)))
if index:
x.index = cols
return x
else:
return _x | [
"Formatting series or timeseries columns to int and checking validity.\n If `index=False`, the function works on the `pd.Series x`; else,\n the function casts the index of `x` to int and returns x with a new index.\n "
]
|
Please provide a description of the function:def concat_with_pipe(x, cols=None):
cols = cols or x.index
return '|'.join([x[i] for i in cols if x[i] not in [None, np.nan]]) | [
"Concatenate a `pd.Series` separated by `|`, drop `None` or `np.nan`"
]
|
Please provide a description of the function:def reduce_hierarchy(x, depth):
_x = x.split('|')
depth = len(_x) + depth - 1 if depth < 0 else depth
return '|'.join(_x[0:(depth + 1)]) | [
"Reduce the hierarchy (depth by `|`) string to the specified level"
]
|
Please provide a description of the function:def _aggregate(df, by):
by = [by] if isstr(by) else by
cols = [c for c in list(df.columns) if c not in ['value'] + by]
return df.groupby(cols).sum()['value'] | [
"Aggregate `df` by specified column(s), return indexed `pd.Series`"
]
|
Please provide a description of the function:def _check_rows(rows, check, in_range=True, return_test='any'):
valid_checks = set(['up', 'lo', 'year'])
if not set(check.keys()).issubset(valid_checks):
msg = 'Unknown checking type: {}'
raise ValueError(msg.format(check.keys() - valid_checks))
where_idx = set(rows.index[rows['year'] == check['year']]) \
if 'year' in check else set(rows.index)
rows = rows.loc[list(where_idx)]
up_op = rows['value'].__le__ if in_range else rows['value'].__gt__
lo_op = rows['value'].__ge__ if in_range else rows['value'].__lt__
check_idx = []
for (bd, op) in [('up', up_op), ('lo', lo_op)]:
if bd in check:
check_idx.append(set(rows.index[op(check[bd])]))
if return_test is 'any':
ret = where_idx & set.union(*check_idx)
elif return_test == 'all':
ret = where_idx if where_idx == set.intersection(*check_idx) else set()
else:
raise ValueError('Unknown return test: {}'.format(return_test))
return ret | [
"Check all rows to be in/out of a certain range and provide testing on\n return values based on provided conditions\n\n Parameters\n ----------\n rows: pd.DataFrame\n data rows\n check: dict\n dictionary with possible values of 'up', 'lo', and 'year'\n in_range: bool, optional\n check if values are inside or outside of provided range\n return_test: str, optional\n possible values:\n - 'any': default, return scenarios where check passes for any entry\n - 'all': test if all values match checks, if not, return empty set\n "
]
|
Please provide a description of the function:def _apply_criteria(df, criteria, **kwargs):
idxs = []
for var, check in criteria.items():
_df = df[df['variable'] == var]
for group in _df.groupby(META_IDX):
grp_idxs = _check_rows(group[-1], check, **kwargs)
idxs.append(grp_idxs)
df = df.loc[itertools.chain(*idxs)]
return df | [
"Apply criteria individually to every model/scenario instance"
]
|
Please provide a description of the function:def _make_index(df, cols=META_IDX):
return pd.MultiIndex.from_tuples(
pd.unique(list(zip(*[df[col] for col in cols]))), names=tuple(cols)) | [
"Create an index from the columns of a dataframe"
]
|
Please provide a description of the function:def validate(df, criteria={}, exclude_on_fail=False, **kwargs):
fdf = df.filter(**kwargs)
if len(fdf.data) > 0:
vdf = fdf.validate(criteria=criteria, exclude_on_fail=exclude_on_fail)
df.meta['exclude'] |= fdf.meta['exclude'] # update if any excluded
return vdf | [
"Validate scenarios using criteria on timeseries values\n\n Parameters\n ----------\n df: IamDataFrame instance\n args: see `IamDataFrame.validate()` for details\n kwargs: passed to `df.filter()`\n "
]
|
Please provide a description of the function:def require_variable(df, variable, unit=None, year=None, exclude_on_fail=False,
**kwargs):
fdf = df.filter(**kwargs)
if len(fdf.data) > 0:
vdf = fdf.require_variable(variable=variable, unit=unit, year=year,
exclude_on_fail=exclude_on_fail)
df.meta['exclude'] |= fdf.meta['exclude'] # update if any excluded
return vdf | [
"Check whether all scenarios have a required variable\n\n Parameters\n ----------\n df: IamDataFrame instance\n args: see `IamDataFrame.require_variable()` for details\n kwargs: passed to `df.filter()`\n "
]
|
Please provide a description of the function:def categorize(df, name, value, criteria,
color=None, marker=None, linestyle=None, **kwargs):
fdf = df.filter(**kwargs)
fdf.categorize(name=name, value=value, criteria=criteria, color=color,
marker=marker, linestyle=linestyle)
# update metadata
if name in df.meta:
df.meta[name].update(fdf.meta[name])
else:
df.meta[name] = fdf.meta[name] | [
"Assign scenarios to a category according to specific criteria\n or display the category assignment\n\n Parameters\n ----------\n df: IamDataFrame instance\n args: see `IamDataFrame.categorize()` for details\n kwargs: passed to `df.filter()`\n "
]
|
Please provide a description of the function:def check_aggregate(df, variable, components=None, exclude_on_fail=False,
multiplier=1, **kwargs):
fdf = df.filter(**kwargs)
if len(fdf.data) > 0:
vdf = fdf.check_aggregate(variable=variable, components=components,
exclude_on_fail=exclude_on_fail,
multiplier=multiplier)
df.meta['exclude'] |= fdf.meta['exclude'] # update if any excluded
return vdf | [
"Check whether the timeseries values match the aggregation\n of sub-categories\n\n Parameters\n ----------\n df: IamDataFrame instance\n args: see IamDataFrame.check_aggregate() for details\n kwargs: passed to `df.filter()`\n "
]
|
Please provide a description of the function:def filter_by_meta(data, df, join_meta=False, **kwargs):
if not set(META_IDX).issubset(data.index.names + list(data.columns)):
raise ValueError('missing required index dimensions or columns!')
meta = pd.DataFrame(df.meta[list(set(kwargs) - set(META_IDX))].copy())
# filter meta by columns
keep = np.array([True] * len(meta))
apply_filter = False
for col, values in kwargs.items():
if col in META_IDX and values is not None:
_col = meta.index.get_level_values(0 if col is 'model' else 1)
keep &= pattern_match(_col, values, has_nan=False)
apply_filter = True
elif values is not None:
keep &= pattern_match(meta[col], values)
apply_filter |= values is not None
meta = meta[keep]
# set the data index to META_IDX and apply filtered meta index
data = data.copy()
idx = list(data.index.names) if not data.index.names == [None] else None
data = data.reset_index().set_index(META_IDX)
meta = meta.loc[meta.index.intersection(data.index)]
meta.index.names = META_IDX
if apply_filter:
data = data.loc[meta.index]
data.index.names = META_IDX
# join meta (optional), reset index to format as input arg
data = data.join(meta) if join_meta else data
data = data.reset_index().set_index(idx or 'index')
if idx is None:
data.index.name = None
return data | [
"Filter by and join meta columns from an IamDataFrame to a pd.DataFrame\n\n Parameters\n ----------\n data: pd.DataFrame instance\n DataFrame to which meta columns are to be joined,\n index or columns must include `['model', 'scenario']`\n df: IamDataFrame instance\n IamDataFrame from which meta columns are filtered and joined (optional)\n join_meta: bool, default False\n join selected columns from `df.meta` on `data`\n kwargs:\n meta columns to be filtered/joined, where `col=...` applies filters\n by the given arguments (using `utils.pattern_match()`) and `col=None`\n joins the column without filtering (setting col to `np.nan`\n if `(model, scenario) not in df.meta.index`)\n "
]
|
Please provide a description of the function:def compare(left, right, left_label='left', right_label='right',
drop_close=True, **kwargs):
ret = pd.concat({right_label: right.data.set_index(right._LONG_IDX),
left_label: left.data.set_index(left._LONG_IDX)}, axis=1)
ret.columns = ret.columns.droplevel(1)
if drop_close:
ret = ret[~np.isclose(ret[left_label], ret[right_label], **kwargs)]
return ret[[right_label, left_label]] | [
"Compare the data in two IamDataFrames and return a pd.DataFrame\n\n Parameters\n ----------\n left, right: IamDataFrames\n the IamDataFrames to be compared\n left_label, right_label: str, default `left`, `right`\n column names of the returned dataframe\n drop_close: bool, default True\n remove all data where `left` and `right` are close\n kwargs: passed to `np.isclose()`\n "
]
|
Please provide a description of the function:def concat(dfs):
if isstr(dfs) or not hasattr(dfs, '__iter__'):
msg = 'Argument must be a non-string iterable (e.g., list or tuple)'
raise TypeError(msg)
_df = None
for df in dfs:
df = df if isinstance(df, IamDataFrame) else IamDataFrame(df)
if _df is None:
_df = copy.deepcopy(df)
else:
_df.append(df, inplace=True)
return _df | [
"Concatenate a series of `pyam.IamDataFrame`-like objects together"
]
|
Please provide a description of the function:def variables(self, include_units=False):
if include_units:
return self.data[['variable', 'unit']].drop_duplicates()\
.reset_index(drop=True).sort_values('variable')
else:
return pd.Series(self.data.variable.unique(), name='variable') | [
"Get a list of variables\n\n Parameters\n ----------\n include_units: boolean, default False\n include the units\n "
]
|
Please provide a description of the function:def append(self, other, ignore_meta_conflict=False, inplace=False,
**kwargs):
if not isinstance(other, IamDataFrame):
other = IamDataFrame(other, **kwargs)
ignore_meta_conflict = True
if self.time_col is not other.time_col:
raise ValueError('incompatible time format (years vs. datetime)!')
ret = copy.deepcopy(self) if not inplace else self
diff = other.meta.index.difference(ret.meta.index)
intersect = other.meta.index.intersection(ret.meta.index)
# merge other.meta columns not in self.meta for existing scenarios
if not intersect.empty:
# if not ignored, check that overlapping meta dataframes are equal
if not ignore_meta_conflict:
cols = [i for i in other.meta.columns if i in ret.meta.columns]
if not ret.meta.loc[intersect, cols].equals(
other.meta.loc[intersect, cols]):
conflict_idx = (
pd.concat([ret.meta.loc[intersect, cols],
other.meta.loc[intersect, cols]]
).drop_duplicates()
.index.drop_duplicates()
)
msg = 'conflict in `meta` for scenarios {}'.format(
[i for i in pd.DataFrame(index=conflict_idx).index])
raise ValueError(msg)
cols = [i for i in other.meta.columns if i not in ret.meta.columns]
_meta = other.meta.loc[intersect, cols]
ret.meta = ret.meta.merge(_meta, how='outer',
left_index=True, right_index=True)
# join other.meta for new scenarios
if not diff.empty:
# sorting not supported by ` pd.append()` prior to version 23
sort_kwarg = {} if int(pd.__version__.split('.')[1]) < 23 \
else dict(sort=False)
ret.meta = ret.meta.append(other.meta.loc[diff, :], **sort_kwarg)
# append other.data (verify integrity for no duplicates)
_data = ret.data.set_index(ret._LONG_IDX).append(
other.data.set_index(other._LONG_IDX), verify_integrity=True)
# merge extra columns in `data` and set `LONG_IDX`
ret.extra_cols += [i for i in other.extra_cols
if i not in ret.extra_cols]
ret._LONG_IDX = IAMC_IDX + [ret.time_col] + ret.extra_cols
ret.data = sort_data(_data.reset_index(), ret._LONG_IDX)
if not inplace:
return ret | [
"Append any castable object to this IamDataFrame.\n Columns in `other.meta` that are not in `self.meta` are always merged,\n duplicate region-variable-unit-year rows raise a ValueError.\n\n Parameters\n ----------\n other: pyam.IamDataFrame, ixmp.TimeSeries, ixmp.Scenario,\n pd.DataFrame or data file\n An IamDataFrame, TimeSeries or Scenario (requires `ixmp`),\n pandas.DataFrame or data file with IAMC-format data columns\n ignore_meta_conflict : bool, default False\n If False and `other` is an IamDataFrame, raise an error if\n any meta columns present in `self` and `other` are not identical.\n inplace : bool, default False\n If True, do operation inplace and return None\n kwargs are passed through to `IamDataFrame(other, **kwargs)`\n "
]
|
Please provide a description of the function:def pivot_table(self, index, columns, values='value',
aggfunc='count', fill_value=None, style=None):
index = [index] if isstr(index) else index
columns = [columns] if isstr(columns) else columns
df = self.data
# allow 'aggfunc' to be passed as string for easier user interface
if isstr(aggfunc):
if aggfunc == 'count':
df = self.data.groupby(index + columns, as_index=False).count()
fill_value = 0
elif aggfunc == 'mean':
df = self.data.groupby(index + columns, as_index=False).mean()\
.round(2)
aggfunc = np.sum
fill_value = 0 if style == 'heatmap' else ""
elif aggfunc == 'sum':
aggfunc = np.sum
fill_value = 0 if style == 'heatmap' else ""
df = df.pivot_table(values=values, index=index, columns=columns,
aggfunc=aggfunc, fill_value=fill_value)
return df | [
"Returns a pivot table\n\n Parameters\n ----------\n index: str or list of strings\n rows for Pivot table\n columns: str or list of strings\n columns for Pivot table\n values: str, default 'value'\n dataframe column to aggregate or count\n aggfunc: str or function, default 'count'\n function used for aggregation,\n accepts 'count', 'mean', and 'sum'\n fill_value: scalar, default None\n value to replace missing values with\n style: str, default None\n output style for pivot table formatting\n accepts 'highlight_not_max', 'heatmap'\n "
]
|
Please provide a description of the function:def interpolate(self, year):
df = self.pivot_table(index=IAMC_IDX, columns=['year'],
values='value', aggfunc=np.sum)
# drop year-rows where values are already defined
if year in df.columns:
df = df[np.isnan(df[year])]
fill_values = df.apply(fill_series,
raw=False, axis=1, year=year)
fill_values = fill_values.dropna().reset_index()
fill_values = fill_values.rename(columns={0: "value"})
fill_values['year'] = year
self.data = self.data.append(fill_values, ignore_index=True) | [
"Interpolate missing values in timeseries (linear interpolation)\n\n Parameters\n ----------\n year: int\n year to be interpolated\n "
]
|
Please provide a description of the function:def as_pandas(self, with_metadata=False):
if with_metadata:
cols = self._discover_meta_cols(**with_metadata) \
if isinstance(with_metadata, dict) else self.meta.columns
return (
self.data
.set_index(META_IDX)
.join(self.meta[cols])
.reset_index()
)
else:
return self.data.copy() | [
"Return this as a pd.DataFrame\n\n Parameters\n ----------\n with_metadata : bool, default False or dict\n if True, join data with all meta columns; if a dict, discover\n meaningful meta columns from values (in key-value)\n "
]
|
Please provide a description of the function:def _discover_meta_cols(self, **kwargs):
cols = set(['exclude'])
for arg, value in kwargs.items():
if isstr(value) and value in self.meta.columns:
cols.add(value)
return list(cols) | [
"Return the subset of `kwargs` values (not keys!) matching\n a `meta` column name"
]
|
Please provide a description of the function:def timeseries(self, iamc_index=False):
index = IAMC_IDX if iamc_index else IAMC_IDX + self.extra_cols
df = (
self.data
.pivot_table(index=index, columns=self.time_col)
.value # column name
.rename_axis(None, axis=1)
)
if df.index.has_duplicates:
raise ValueError('timeseries object has duplicates in index ',
'use `iamc_index=False`')
return df | [
"Returns a pd.DataFrame in wide format (years or timedate as columns)\n\n Parameters\n ----------\n iamc_index: bool, default False\n if True, use `['model', 'scenario', 'region', 'variable', 'unit']`;\n else, use all `data` columns\n "
]
|
Please provide a description of the function:def set_meta(self, meta, name=None, index=None):
# check that name is valid and doesn't conflict with data columns
if (name or (hasattr(meta, 'name') and meta.name)) in [None, False]:
raise ValueError('Must pass a name or use a named pd.Series')
name = name or meta.name
if name in self.data.columns:
raise ValueError('`{}` already exists in `data`!'.format(name))
# check if meta has a valid index and use it for further workflow
if hasattr(meta, 'index') and hasattr(meta.index, 'names') \
and set(META_IDX).issubset(meta.index.names):
index = meta.index
# if no valid index is provided, add meta as new column `name` and exit
if index is None:
self.meta[name] = list(meta) if islistable(meta) else meta
return # EXIT FUNCTION
# use meta.index if index arg is an IamDataFrame
if isinstance(index, IamDataFrame):
index = index.meta.index
# turn dataframe to index if index arg is a DataFrame
if isinstance(index, pd.DataFrame):
index = index.set_index(META_IDX).index
if not isinstance(index, pd.MultiIndex):
raise ValueError('index cannot be coerced to pd.MultiIndex')
# raise error if index is not unique
if index.duplicated().any():
raise ValueError("non-unique ['model', 'scenario'] index!")
# create pd.Series from meta, index and name if provided
meta = pd.Series(data=meta, index=index, name=name)
# reduce index dimensions to model-scenario only
meta = (
meta
.reset_index()
.reindex(columns=META_IDX + [name])
.set_index(META_IDX)
)
# check if trying to add model-scenario index not existing in self
diff = meta.index.difference(self.meta.index)
if not diff.empty:
error = "adding metadata for non-existing scenarios '{}'!"
raise ValueError(error.format(diff))
self._new_meta_column(name)
self.meta[name] = meta[name].combine_first(self.meta[name]) | [
"Add metadata columns as pd.Series, list or value (int/float/str)\n\n Parameters\n ----------\n meta: pd.Series, list, int, float or str\n column to be added to metadata\n (by `['model', 'scenario']` index if possible)\n name: str, optional\n meta column name (defaults to meta pd.Series.name);\n either a meta.name or the name kwarg must be defined\n index: pyam.IamDataFrame, pd.DataFrame or pd.MultiIndex, optional\n index to be used for setting meta column (`['model', 'scenario']`)\n "
]
|
Please provide a description of the function:def categorize(self, name, value, criteria,
color=None, marker=None, linestyle=None):
# add plotting run control
for kind, arg in [('color', color), ('marker', marker),
('linestyle', linestyle)]:
if arg:
run_control().update({kind: {name: {value: arg}}})
# find all data that matches categorization
rows = _apply_criteria(self.data, criteria,
in_range=True, return_test='all')
idx = _meta_idx(rows)
if len(idx) == 0:
logger().info("No scenarios satisfy the criteria")
return # EXIT FUNCTION
# update metadata dataframe
self._new_meta_column(name)
self.meta.loc[idx, name] = value
msg = '{} scenario{} categorized as `{}: {}`'
logger().info(msg.format(len(idx), '' if len(idx) == 1 else 's',
name, value)) | [
"Assign scenarios to a category according to specific criteria\n or display the category assignment\n\n Parameters\n ----------\n name: str\n category column name\n value: str\n category identifier\n criteria: dict\n dictionary with variables mapped to applicable checks\n ('up' and 'lo' for respective bounds, 'year' for years - optional)\n color: str\n assign a color to this category for plotting\n marker: str\n assign a marker to this category for plotting\n linestyle: str\n assign a linestyle to this category for plotting\n "
]
|
Please provide a description of the function:def _new_meta_column(self, name):
if name is None:
raise ValueError('cannot add a meta column `{}`'.format(name))
if name not in self.meta:
self.meta[name] = np.nan | [
"Add a column to meta if it doesn't exist, set to value `np.nan`"
]
|
Please provide a description of the function:def require_variable(self, variable, unit=None, year=None,
exclude_on_fail=False):
criteria = {'variable': variable}
if unit:
criteria.update({'unit': unit})
if year:
criteria.update({'year': year})
keep = self._apply_filters(**criteria)
idx = self.meta.index.difference(_meta_idx(self.data[keep]))
n = len(idx)
if n == 0:
logger().info('All scenarios have the required variable `{}`'
.format(variable))
return
msg = '{} scenario does not include required variable `{}`' if n == 1 \
else '{} scenarios do not include required variable `{}`'
if exclude_on_fail:
self.meta.loc[idx, 'exclude'] = True
msg += ', marked as `exclude: True` in metadata'
logger().info(msg.format(n, variable))
return pd.DataFrame(index=idx).reset_index() | [
"Check whether all scenarios have a required variable\n\n Parameters\n ----------\n variable: str\n required variable\n unit: str, default None\n name of unit (optional)\n year: int or list, default None\n years (optional)\n exclude_on_fail: bool, default False\n flag scenarios missing the required variables as `exclude: True`\n "
]
|
Please provide a description of the function:def validate(self, criteria={}, exclude_on_fail=False):
df = _apply_criteria(self.data, criteria, in_range=False)
if not df.empty:
msg = '{} of {} data points to not satisfy the criteria'
logger().info(msg.format(len(df), len(self.data)))
if exclude_on_fail and len(df) > 0:
self._exclude_on_fail(df)
return df | [
"Validate scenarios using criteria on timeseries values\n\n Parameters\n ----------\n criteria: dict\n dictionary with variable keys and check values\n ('up' and 'lo' for respective bounds, 'year' for years)\n exclude_on_fail: bool, default False\n flag scenarios failing validation as `exclude: True`\n "
]
|
Please provide a description of the function:def rename(self, mapping=None, inplace=False, append=False,
check_duplicates=True, **kwargs):
# combine `mapping` arg and mapping kwargs, ensure no rename conflicts
mapping = mapping or {}
duplicate = set(mapping).intersection(kwargs)
if duplicate:
msg = 'conflicting rename args for columns `{}`'.format(duplicate)
raise ValueError(msg)
mapping.update(kwargs)
# determine columns that are not `model` or `scenario`
data_cols = set(self._LONG_IDX) - set(META_IDX)
# changing index and data columns can cause model-scenario mismatch
if any(i in mapping for i in META_IDX)\
and any(i in mapping for i in data_cols):
msg = 'Renaming index and data cols simultaneously not supported!'
raise ValueError(msg)
# translate rename mapping to `filter()` arguments
filters = {col: _from.keys() for col, _from in mapping.items()}
# if append is True, downselect and append renamed data
if append:
df = self.filter(filters)
# note that `append(other, inplace=True)` returns None
return self.append(df.rename(mapping), inplace=inplace)
# if append is False, iterate over rename mapping and do groupby
ret = copy.deepcopy(self) if not inplace else self
# renaming is only applied where a filter matches for all given columns
rows = ret._apply_filters(**filters)
idx = ret.meta.index.isin(_make_index(ret.data[rows]))
# if `check_duplicates`, do the rename on a copy until after the check
_data = ret.data.copy() if check_duplicates else ret.data
# apply renaming changes
for col, _mapping in mapping.items():
if col in META_IDX:
_index = pd.DataFrame(index=ret.meta.index).reset_index()
_index.loc[idx, col] = _index.loc[idx, col].replace(_mapping)
if _index.duplicated().any():
raise ValueError('Renaming to non-unique `{}` index!'
.format(col))
ret.meta.index = _index.set_index(META_IDX).index
elif col not in data_cols:
raise ValueError('Renaming by `{}` not supported!'.format(col))
_data.loc[rows, col] = _data.loc[rows, col].replace(_mapping)
# check if duplicates exist between the renamed and not-renamed data
if check_duplicates:
merged = (
_data.loc[rows, self._LONG_IDX].drop_duplicates().append(
_data.loc[~rows, self._LONG_IDX].drop_duplicates())
)
if any(merged.duplicated()):
msg = 'Duplicated rows between original and renamed data!\n{}'
conflict_rows = merged.loc[merged.duplicated(), self._LONG_IDX]
raise ValueError(msg.format(conflict_rows.drop_duplicates()))
# merge using `groupby().sum()`
ret.data = _data.groupby(ret._LONG_IDX).sum().reset_index()
if not inplace:
return ret | [
"Rename and aggregate column entries using `groupby.sum()` on values.\n When renaming models or scenarios, the uniqueness of the index must be\n maintained, and the function will raise an error otherwise.\n\n Renaming is only applied to any data where a filter matches for all\n columns given in `mapping`. Renaming can only be applied to the `model`\n and `scenario` columns or to other data columns simultaneously.\n\n Parameters\n ----------\n mapping: dict or kwargs\n mapping of column name to rename-dictionary of that column\n >> {<column_name>: {<current_name_1>: <target_name_1>,\n >> <current_name_2>: <target_name_2>}}\n or kwargs as `column_name={<current_name_1>: <target_name_1>, ...}`\n inplace: bool, default False\n if True, do operation inplace and return None\n append: bool, default False\n if True, append renamed timeseries to IamDataFrame\n check_duplicates: bool, default True\n check whether conflict between existing and renamed data exists.\n If True, raise ValueError; if False, rename and merge\n with `groupby().sum()`.\n "
]
|
Please provide a description of the function:def convert_unit(self, conversion_mapping, inplace=False):
ret = copy.deepcopy(self) if not inplace else self
for current_unit, (new_unit, factor) in conversion_mapping.items():
factor = pd.to_numeric(factor)
where = ret.data['unit'] == current_unit
ret.data.loc[where, 'value'] *= factor
ret.data.loc[where, 'unit'] = new_unit
if not inplace:
return ret | [
"Converts units based on provided unit conversion factors\n\n Parameters\n ----------\n conversion_mapping: dict\n for each unit for which a conversion should be carried out,\n provide current unit and target unit and conversion factor\n {<current unit>: [<target unit>, <conversion factor>]}\n inplace: bool, default False\n if True, do operation inplace and return None\n "
]
|
Please provide a description of the function:def normalize(self, inplace=False, **kwargs):
if len(kwargs) > 1 or self.time_col not in kwargs:
raise ValueError('Only time(year)-based normalization supported')
ret = copy.deepcopy(self) if not inplace else self
df = ret.data
# change all below if supporting more in the future
cols = self.time_col
value = kwargs[self.time_col]
x = df.set_index(IAMC_IDX)
x['value'] /= x[x[cols] == value]['value']
ret.data = x.reset_index()
if not inplace:
return ret | [
"Normalize data to a given value. Currently only supports normalizing\n to a specific time.\n\n Parameters\n ----------\n inplace: bool, default False\n if True, do operation inplace and return None\n kwargs: the values on which to normalize (e.g., `year=2005`)\n "
]
|
Please provide a description of the function:def aggregate(self, variable, components=None, append=False):
# default components to all variables one level below `variable`
components = components or self._variable_components(variable)
if not len(components):
msg = 'cannot aggregate variable `{}` because it has no components'
logger().info(msg.format(variable))
return
rows = self._apply_filters(variable=components)
_data = _aggregate(self.data[rows], 'variable')
if append is True:
self.append(_data, variable=variable, inplace=True)
else:
return _data | [
"Compute the aggregate of timeseries components or sub-categories\n\n Parameters\n ----------\n variable: str\n variable for which the aggregate should be computed\n components: list of str, default None\n list of variables, defaults to all sub-categories of `variable`\n append: bool, default False\n append the aggregate timeseries to `data` and return None,\n else return aggregate timeseries\n "
]
|
Please provide a description of the function:def check_aggregate(self, variable, components=None, exclude_on_fail=False,
multiplier=1, **kwargs):
# compute aggregate from components, return None if no components
df_components = self.aggregate(variable, components)
if df_components is None:
return
# filter and groupby data, use `pd.Series.align` for matching index
rows = self._apply_filters(variable=variable)
df_variable, df_components = (
_aggregate(self.data[rows], 'variable').align(df_components)
)
# use `np.isclose` for checking match
diff = df_variable[~np.isclose(df_variable, multiplier * df_components,
**kwargs)]
if len(diff):
msg = '`{}` - {} of {} rows are not aggregates of components'
logger().info(msg.format(variable, len(diff), len(df_variable)))
if exclude_on_fail:
self._exclude_on_fail(diff.index.droplevel([2, 3, 4]))
return IamDataFrame(diff, variable=variable).timeseries() | [
"Check whether a timeseries matches the aggregation of its components\n\n Parameters\n ----------\n variable: str\n variable to be checked for matching aggregation of sub-categories\n components: list of str, default None\n list of variables, defaults to all sub-categories of `variable`\n exclude_on_fail: boolean, default False\n flag scenarios failing validation as `exclude: True`\n multiplier: number, default 1\n factor when comparing variable and sum of components\n kwargs: passed to `np.isclose()`\n "
]
|
Please provide a description of the function:def aggregate_region(self, variable, region='World', subregions=None,
components=None, append=False):
# default subregions to all regions other than `region`
if subregions is None:
rows = self._apply_filters(variable=variable)
subregions = set(self.data[rows].region) - set([region])
if not len(subregions):
msg = 'cannot aggregate variable `{}` to `{}` because it does not'\
' exist in any subregion'
logger().info(msg.format(variable, region))
return
# compute aggregate over all subregions
subregion_df = self.filter(region=subregions)
cols = ['region', 'variable']
_data = _aggregate(subregion_df.filter(variable=variable).data, cols)
# add components at the `region` level, defaults to all variables one
# level below `variable` that are only present in `region`
region_df = self.filter(region=region)
components = components or (
set(region_df._variable_components(variable)).difference(
subregion_df._variable_components(variable)))
if len(components):
rows = region_df._apply_filters(variable=components)
_data = _data.add(_aggregate(region_df.data[rows], cols),
fill_value=0)
if append is True:
self.append(_data, region=region, variable=variable, inplace=True)
else:
return _data | [
"Compute the aggregate of timeseries over a number of regions\n including variable components only defined at the `region` level\n\n Parameters\n ----------\n variable: str\n variable for which the aggregate should be computed\n region: str, default 'World'\n dimension\n subregions: list of str\n list of subregions, defaults to all regions other than `region`\n components: list of str\n list of variables, defaults to all sub-categories of `variable`\n included in `region` but not in any of `subregions`\n append: bool, default False\n append the aggregate timeseries to `data` and return None,\n else return aggregate timeseries\n "
]
|
Please provide a description of the function:def check_aggregate_region(self, variable, region='World', subregions=None,
components=None, exclude_on_fail=False,
**kwargs):
# compute aggregate from subregions, return None if no subregions
df_subregions = self.aggregate_region(variable, region, subregions,
components)
if df_subregions is None:
return
# filter and groupby data, use `pd.Series.align` for matching index
rows = self._apply_filters(region=region, variable=variable)
df_region, df_subregions = (
_aggregate(self.data[rows], ['region', 'variable'])
.align(df_subregions)
)
# use `np.isclose` for checking match
diff = df_region[~np.isclose(df_region, df_subregions, **kwargs)]
if len(diff):
msg = (
'`{}` - {} of {} rows are not aggregates of subregions'
)
logger().info(msg.format(variable, len(diff), len(df_region)))
if exclude_on_fail:
self._exclude_on_fail(diff.index.droplevel([2, 3]))
col_args = dict(region=region, variable=variable)
return IamDataFrame(diff, **col_args).timeseries() | [
"Check whether the region timeseries data match the aggregation\n of components\n\n Parameters\n ----------\n variable: str\n variable to be checked for matching aggregation of subregions\n region: str, default 'World'\n region to be checked for matching aggregation of subregions\n subregions: list of str\n list of subregions, defaults to all regions other than `region`\n components: list of str, default None\n list of variables, defaults to all sub-categories of `variable`\n included in `region` but not in any of `subregions`\n exclude_on_fail: boolean, default False\n flag scenarios failing validation as `exclude: True`\n kwargs: passed to `np.isclose()`\n "
]
|
Please provide a description of the function:def _variable_components(self, variable):
var_list = pd.Series(self.data.variable.unique())
return var_list[pattern_match(var_list, '{}|*'.format(variable), 0)] | [
"Get all components (sub-categories) of a variable\n\n For `variable='foo'`, return `['foo|bar']`, but don't include\n `'foo|bar|baz'`, which is a sub-sub-category"
]
|
Please provide a description of the function:def check_internal_consistency(self, **kwargs):
inconsistent_vars = {}
for variable in self.variables():
diff_agg = self.check_aggregate(variable, **kwargs)
if diff_agg is not None:
inconsistent_vars[variable + "-aggregate"] = diff_agg
diff_regional = self.check_aggregate_region(variable, **kwargs)
if diff_regional is not None:
inconsistent_vars[variable + "-regional"] = diff_regional
return inconsistent_vars if inconsistent_vars else None | [
"Check whether the database is internally consistent\n\n We check that all variables are equal to the sum of their sectoral\n components and that all the regions add up to the World total. If\n the check is passed, None is returned, otherwise a dictionary of\n inconsistent variables is returned.\n\n Note: at the moment, this method's regional checking is limited to\n checking that all the regions sum to the World region. We cannot\n make this more automatic unless we start to store how the regions\n relate, see\n [this issue](https://github.com/IAMconsortium/pyam/issues/106).\n\n Parameters\n ----------\n kwargs: passed to `np.isclose()`\n "
]
|
Please provide a description of the function:def _exclude_on_fail(self, df):
idx = df if isinstance(df, pd.MultiIndex) else _meta_idx(df)
self.meta.loc[idx, 'exclude'] = True
logger().info('{} non-valid scenario{} will be excluded'
.format(len(idx), '' if len(idx) == 1 else 's')) | [
"Assign a selection of scenarios as `exclude: True` in meta"
]
|
Please provide a description of the function:def filter(self, filters=None, keep=True, inplace=False, **kwargs):
if filters is not None:
msg = '`filters` keyword argument in `filter()` is deprecated ' + \
'and will be removed in the next release'
warnings.warn(msg)
kwargs.update(filters)
_keep = self._apply_filters(**kwargs)
_keep = _keep if keep else ~_keep
ret = copy.deepcopy(self) if not inplace else self
ret.data = ret.data[_keep]
idx = _make_index(ret.data)
if len(idx) == 0:
logger().warning('Filtered IamDataFrame is empty!')
ret.meta = ret.meta.loc[idx]
if not inplace:
return ret | [
"Return a filtered IamDataFrame (i.e., a subset of current data)\n\n Parameters\n ----------\n keep: bool, default True\n keep all scenarios satisfying the filters (if True) or the inverse\n inplace: bool, default False\n if True, do operation inplace and return None\n filters by kwargs or dict (deprecated):\n The following columns are available for filtering:\n - metadata columns: filter by category assignment\n - 'model', 'scenario', 'region', 'variable', 'unit':\n string or list of strings, where `*` can be used as a wildcard\n - 'level': the maximum \"depth\" of IAM variables (number of '|')\n (excluding the strings given in the 'variable' argument)\n - 'year': takes an integer, a list of integers or a range\n note that the last year of a range is not included,\n so `range(2010, 2015)` is interpreted as `[2010, ..., 2014]`\n - arguments for filtering by `datetime.datetime`\n ('month', 'hour', 'time')\n - 'regexp=True' disables pseudo-regexp syntax in `pattern_match()`\n "
]
|
Please provide a description of the function:def _apply_filters(self, **filters):
regexp = filters.pop('regexp', False)
keep = np.array([True] * len(self.data))
# filter by columns and list of values
for col, values in filters.items():
# treat `_apply_filters(col=None)` as no filter applied
if values is None:
continue
if col in self.meta.columns:
matches = pattern_match(self.meta[col], values, regexp=regexp)
cat_idx = self.meta[matches].index
keep_col = (self.data[META_IDX].set_index(META_IDX)
.index.isin(cat_idx))
elif col == 'variable':
level = filters['level'] if 'level' in filters else None
keep_col = pattern_match(self.data[col], values, level, regexp)
elif col == 'year':
_data = self.data[col] if self.time_col is not 'time' \
else self.data['time'].apply(lambda x: x.year)
keep_col = years_match(_data, values)
elif col == 'month' and self.time_col is 'time':
keep_col = month_match(self.data['time']
.apply(lambda x: x.month),
values)
elif col == 'day' and self.time_col is 'time':
if isinstance(values, str):
wday = True
elif isinstance(values, list) and isinstance(values[0], str):
wday = True
else:
wday = False
if wday:
days = self.data['time'].apply(lambda x: x.weekday())
else: # ints or list of ints
days = self.data['time'].apply(lambda x: x.day)
keep_col = day_match(days, values)
elif col == 'hour' and self.time_col is 'time':
keep_col = hour_match(self.data['time']
.apply(lambda x: x.hour),
values)
elif col == 'time' and self.time_col is 'time':
keep_col = datetime_match(self.data[col], values)
elif col == 'level':
if 'variable' not in filters.keys():
keep_col = find_depth(self.data['variable'], level=values)
else:
continue
elif col in self.data.columns:
keep_col = pattern_match(self.data[col], values, regexp=regexp)
else:
_raise_filter_error(col)
keep &= keep_col
return keep | [
"Determine rows to keep in data for given set of filters\n\n Parameters\n ----------\n filters: dict\n dictionary of filters ({col: values}}); uses a pseudo-regexp syntax\n by default, but accepts `regexp: True` to use regexp directly\n "
]
|
Please provide a description of the function:def col_apply(self, col, func, *args, **kwargs):
if col in self.data:
self.data[col] = self.data[col].apply(func, *args, **kwargs)
else:
self.meta[col] = self.meta[col].apply(func, *args, **kwargs) | [
"Apply a function to a column\n\n Parameters\n ----------\n col: string\n column in either data or metadata\n func: functional\n function to apply\n "
]
|
Please provide a description of the function:def _to_file_format(self, iamc_index):
df = self.timeseries(iamc_index=iamc_index).reset_index()
df = df.rename(columns={c: str(c).title() for c in df.columns})
return df | [
"Return a dataframe suitable for writing to a file"
]
|
Please provide a description of the function:def to_csv(self, path, iamc_index=False, **kwargs):
self._to_file_format(iamc_index).to_csv(path, index=False, **kwargs) | [
"Write timeseries data to a csv file\n\n Parameters\n ----------\n path: string\n file path\n iamc_index: bool, default False\n if True, use `['model', 'scenario', 'region', 'variable', 'unit']`;\n else, use all `data` columns\n "
]
|
Please provide a description of the function:def to_excel(self, excel_writer, sheet_name='data',
iamc_index=False, **kwargs):
if not isinstance(excel_writer, pd.ExcelWriter):
close = True
excel_writer = pd.ExcelWriter(excel_writer)
self._to_file_format(iamc_index)\
.to_excel(excel_writer, sheet_name=sheet_name, index=False,
**kwargs)
if close:
excel_writer.close() | [
"Write timeseries data to Excel format\n\n Parameters\n ----------\n excel_writer: string or ExcelWriter object\n file path or existing ExcelWriter\n sheet_name: string, default 'data'\n name of sheet which will contain `IamDataFrame.timeseries()` data\n iamc_index: bool, default False\n if True, use `['model', 'scenario', 'region', 'variable', 'unit']`;\n else, use all `data` columns\n "
]
|
Please provide a description of the function:def export_metadata(self, path):
writer = pd.ExcelWriter(path)
write_sheet(writer, 'meta', self.meta, index=True)
writer.save() | [
"Export metadata to Excel\n\n Parameters\n ----------\n path: string\n path/filename for xlsx file of metadata export\n "
]
|
Please provide a description of the function:def load_metadata(self, path, *args, **kwargs):
if not os.path.exists(path):
raise ValueError("no metadata file '" + path + "' found!")
if path.endswith('csv'):
df = pd.read_csv(path, *args, **kwargs)
else:
xl = pd.ExcelFile(path)
if len(xl.sheet_names) > 1 and 'sheet_name' not in kwargs:
kwargs['sheet_name'] = 'meta'
df = pd.read_excel(path, *args, **kwargs)
req_cols = ['model', 'scenario', 'exclude']
if not set(req_cols).issubset(set(df.columns)):
e = 'File `{}` does not have required columns ({})!'
raise ValueError(e.format(path, req_cols))
# set index, filter to relevant scenarios from imported metadata file
df.set_index(META_IDX, inplace=True)
idx = self.meta.index.intersection(df.index)
n_invalid = len(df) - len(idx)
if n_invalid > 0:
msg = 'Ignoring {} scenario{} from imported metadata'
logger().info(msg.format(n_invalid, 's' if n_invalid > 1 else ''))
if idx.empty:
raise ValueError('No valid scenarios in imported metadata file!')
df = df.loc[idx]
# Merge in imported metadata
msg = 'Importing metadata for {} scenario{} (for total of {})'
logger().info(msg.format(len(df), 's' if len(df) > 1 else '',
len(self.meta)))
for col in df.columns:
self._new_meta_column(col)
self.meta[col] = df[col].combine_first(self.meta[col])
# set column `exclude` to bool
self.meta.exclude = self.meta.exclude.astype('bool') | [
"Load metadata exported from `pyam.IamDataFrame` instance\n\n Parameters\n ----------\n path: string\n xlsx file with metadata exported from `pyam.IamDataFrame` instance\n "
]
|
Please provide a description of the function:def line_plot(self, x='year', y='value', **kwargs):
df = self.as_pandas(with_metadata=kwargs)
# pivot data if asked for explicit variable name
variables = df['variable'].unique()
if x in variables or y in variables:
keep_vars = set([x, y]) & set(variables)
df = df[df['variable'].isin(keep_vars)]
idx = list(set(df.columns) - set(['value']))
df = (df
.reset_index()
.set_index(idx)
.value # df -> series
.unstack(level='variable') # keep_vars are columns
.rename_axis(None, axis=1) # rm column index name
.reset_index()
.set_index(META_IDX)
)
if x != 'year' and y != 'year':
df = df.drop('year', axis=1) # years causes NaNs
ax, handles, labels = plotting.line_plot(
df.dropna(), x=x, y=y, **kwargs)
return ax | [
"Plot timeseries lines of existing data\n\n see pyam.plotting.line_plot() for all available options\n "
]
|
Please provide a description of the function:def stack_plot(self, *args, **kwargs):
df = self.as_pandas(with_metadata=True)
ax = plotting.stack_plot(df, *args, **kwargs)
return ax | [
"Plot timeseries stacks of existing data\n\n see pyam.plotting.stack_plot() for all available options\n "
]
|
Please provide a description of the function:def scatter(self, x, y, **kwargs):
variables = self.data['variable'].unique()
xisvar = x in variables
yisvar = y in variables
if not xisvar and not yisvar:
cols = [x, y] + self._discover_meta_cols(**kwargs)
df = self.meta[cols].reset_index()
elif xisvar and yisvar:
# filter pivot both and rename
dfx = (
self
.filter(variable=x)
.as_pandas(with_metadata=kwargs)
.rename(columns={'value': x, 'unit': 'xunit'})
.set_index(YEAR_IDX)
.drop('variable', axis=1)
)
dfy = (
self
.filter(variable=y)
.as_pandas(with_metadata=kwargs)
.rename(columns={'value': y, 'unit': 'yunit'})
.set_index(YEAR_IDX)
.drop('variable', axis=1)
)
df = dfx.join(dfy, lsuffix='_left', rsuffix='').reset_index()
else:
# filter, merge with meta, and rename value column to match var
var = x if xisvar else y
df = (
self
.filter(variable=var)
.as_pandas(with_metadata=kwargs)
.rename(columns={'value': var})
)
ax = plotting.scatter(df.dropna(), x, y, **kwargs)
return ax | [
"Plot a scatter chart using metadata columns\n\n see pyam.plotting.scatter() for all available options\n "
]
|
Please provide a description of the function:def map_regions(self, map_col, agg=None, copy_col=None, fname=None,
region_col=None, remove_duplicates=False, inplace=False):
models = self.meta.index.get_level_values('model').unique()
fname = fname or run_control()['region_mapping']['default']
mapping = read_pandas(fname).rename(str.lower, axis='columns')
map_col = map_col.lower()
ret = copy.deepcopy(self) if not inplace else self
_df = ret.data
columns_orderd = _df.columns
# merge data
dfs = []
for model in models:
df = _df[_df['model'] == model]
_col = region_col or '{}.REGION'.format(model)
_map = mapping.rename(columns={_col.lower(): 'region'})
_map = _map[['region', map_col]].dropna().drop_duplicates()
_map = _map[_map['region'].isin(_df['region'])]
if remove_duplicates and _map['region'].duplicated().any():
# find duplicates
where_dup = _map['region'].duplicated(keep=False)
dups = _map[where_dup]
logger().warning(.format(dups['region'].unique()))
# get non duplicates
_map = _map[~where_dup]
# order duplicates by the count frequency
dups = (dups
.groupby(['region', map_col])
.size()
.reset_index(name='count')
.sort_values(by='count', ascending=False)
.drop('count', axis=1))
# take top occurance
dups = dups[~dups['region'].duplicated(keep='first')]
# combine them back
_map = pd.concat([_map, dups])
if copy_col is not None:
df[copy_col] = df['region']
df = (df
.merge(_map, on='region')
.drop('region', axis=1)
.rename(columns={map_col: 'region'})
)
dfs.append(df)
df = pd.concat(dfs)
# perform aggregations
if agg == 'sum':
df = df.groupby(self._LONG_IDX).sum().reset_index()
ret.data = (df
.reindex(columns=columns_orderd)
.sort_values(SORT_IDX)
.reset_index(drop=True)
)
if not inplace:
return ret | [
"Plot regional data for a single model, scenario, variable, and year\n\n see pyam.plotting.region_plot() for all available options\n\n Parameters\n ----------\n map_col: string\n The column used to map new regions to. Common examples include\n iso and 5_region.\n agg: string, optional\n Perform a data aggregation. Options include: sum.\n copy_col: string, optional\n Copy the existing region data into a new column for later use.\n fname: string, optional\n Use a non-default region mapping file\n region_col: string, optional\n Use a non-default column name for regions to map from.\n remove_duplicates: bool, optional, default: False\n If there are duplicates in the mapping from one regional level to\n another, then remove these duplicates by counting the most common\n mapped value.\n This option is most useful when mapping from high resolution\n (e.g., model regions) to low resolution (e.g., 5_region).\n inplace : bool, default False\n if True, do operation inplace and return None\n ",
"\n Duplicate entries found for the following regions.\n Mapping will occur only for the most common instance.\n {}"
]
|
Please provide a description of the function:def region_plot(self, **kwargs):
df = self.as_pandas(with_metadata=True)
ax = plotting.region_plot(df, **kwargs)
return ax | [
"Plot regional data for a single model, scenario, variable, and year\n\n see pyam.plotting.region_plot() for all available options\n "
]
|
Please provide a description of the function:def update(self, rc):
rc = self._load_yaml(rc)
self.store = _recursive_update(self.store, rc) | [
"Add additional run control parameters\n\n Parameters\n ----------\n rc : string, file, dictionary, optional\n a path to a YAML file, a file handle for a YAML file, or a\n dictionary describing run control configuration\n "
]
|
Please provide a description of the function:def recursive_update(self, k, d):
u = self.__getitem__(k)
self.store[k] = _recursive_update(u, d) | [
"Recursively update a top-level option in the run control\n\n Parameters\n ----------\n k : string\n the top-level key\n d : dictionary or similar\n the dictionary to use for updating\n "
]
|
Please provide a description of the function:def fill_series(x, year):
x = x.dropna()
if year in x.index and not np.isnan(x[year]):
return x[year]
else:
prev = [i for i in x.index if i < year]
nxt = [i for i in x.index if i > year]
if prev and nxt:
p = max(prev)
n = min(nxt)
return ((n - year) * x[p] + (year - p) * x[n]) / (n - p)
else:
return np.nan | [
"Returns the value of a timeseries (indexed over years) for a year\n by linear interpolation.\n\n Parameters\n ----------\n x: pandas.Series\n a timeseries to be interpolated\n year: int\n year of interpolation\n "
]
|
Please provide a description of the function:def cumulative(x, first_year, last_year):
# if the timeseries does not cover the range `[first_year, last_year]`,
# return nan to avoid erroneous aggregation
if min(x.index) > first_year:
logger().warning('the timeseries `{}` does not start by {}'.format(
x.name or x, first_year))
return np.nan
if max(x.index) < last_year:
logger().warning('the timeseries `{}` does not extend until {}'
.format(x.name or x, last_year))
return np.nan
# make sure we're using integers
to_int(x, index=True)
x[first_year] = fill_series(x, first_year)
x[last_year] = fill_series(x, last_year)
years = [i for i in x.index if i >= first_year and i <= last_year
and ~np.isnan(x[i])]
years.sort()
# loop over years
if not np.isnan(x[first_year]) and not np.isnan(x[last_year]):
value = 0
for (i, yr) in enumerate(years[:-1]):
next_yr = years[i + 1]
# the summation is shifted to include the first year fully in sum,
# otherwise, would return a weighted average of `yr` and `next_yr`
value += ((next_yr - yr - 1) * x[next_yr] +
(next_yr - yr + 1) * x[yr]) / 2
# the loop above does not include the last element in range
# (`last_year`), therefore added explicitly
value += x[last_year]
return value | [
"Returns the cumulative sum of a timeseries (indexed over years),\n implements linear interpolation between years, ignores nan's in the range.\n The function includes the last-year value of the series, and\n raises a warning if start_year or last_year is outside of\n the timeseries range and returns nan\n\n Parameters\n ----------\n x: pandas.Series\n a timeseries to be summed over time\n first_year: int\n first year of the sum\n last_year: int\n last year of the sum (inclusive)\n "
]
|
Please provide a description of the function:def cross_threshold(x, threshold=0, direction=['from above', 'from below']):
prev_yr, prev_val = None, None
years = []
direction = [direction] if isstr(direction) else list(direction)
if not set(direction).issubset(set(['from above', 'from below'])):
raise ValueError('invalid direction `{}`'.format(direction))
for yr, val in zip(x.index, x.values):
if np.isnan(val): # ignore nans in the timeseries
continue
if prev_val is None:
prev_yr, prev_val = yr, val
continue
if not np.sign(prev_val - threshold) == np.sign(val - threshold):
if ('from above' in direction and prev_val > val) \
or ('from below' in direction and prev_val < val):
change = (val - prev_val) / (yr - prev_yr)
# add one because int() rounds down
cross_yr = prev_yr + int((threshold - prev_val) / change) + 1
years.append(cross_yr)
prev_yr, prev_val = yr, val
return years | [
"Returns a list of the years in which a timeseries (indexed over years)\n crosses a given threshold\n\n Parameters\n ----------\n x: pandas.Series\n a timeseries indexed over years\n threshold: float, default 0\n the threshold that the timeseries is checked against\n direction: str, optional, default `['from above', 'from below']`\n whether to return all years where the threshold is crossed\n or only where threshold is crossed in a specific direction\n "
]
|
Please provide a description of the function:def read_iiasa(name, meta=False, **kwargs):
conn = Connection(name)
# data
df = conn.query(**kwargs)
df = IamDataFrame(df)
# metadata
if meta:
mdf = conn.metadata()
# only data for models/scenarios in df
mdf = mdf[mdf.model.isin(df['model'].unique()) &
mdf.scenario.isin(df['scenario'].unique())]
# get subset of data if meta is a list
if islistable(meta):
mdf = mdf[['model', 'scenario'] + meta]
mdf = mdf.set_index(['model', 'scenario'])
# we have to loop here because `set_meta()` can only take series
for col in mdf:
df.set_meta(mdf[col])
return df | [
"\n Query an IIASA database. See Connection.query() for more documentation\n\n Parameters\n ----------\n name : str\n A valid IIASA database name, see pyam.iiasa.valid_connection_names()\n meta : bool or list of strings\n If not False, also include metadata indicators (or subset if provided).\n kwargs :\n Arguments for pyam.iiasa.Connection.query()\n "
]
|
Please provide a description of the function:def scenario_list(self, default=True):
default = 'true' if default else 'false'
add_url = 'runs?getOnlyDefaultRuns={}'
url = self.base_url + add_url.format(default)
headers = {'Authorization': 'Bearer {}'.format(self.auth())}
r = requests.get(url, headers=headers)
return pd.read_json(r.content, orient='records') | [
"\n Metadata regarding the list of scenarios (e.g., models, scenarios,\n run identifier, etc.) in the connected data source.\n\n Parameter\n ---------\n default : bool, optional, default True\n Return *only* the default version of each Scenario.\n Any (`model`, `scenario`) without a default version is omitted.\n If :obj:`False`, return all versions.\n "
]
|
Please provide a description of the function:def available_metadata(self):
url = self.base_url + 'metadata/types'
headers = {'Authorization': 'Bearer {}'.format(self.auth())}
r = requests.get(url, headers=headers)
return pd.read_json(r.content, orient='records')['name'] | [
"\n List all scenario metadata indicators available in the connected\n data source\n "
]
|
Please provide a description of the function:def metadata(self, default=True):
# at present this reads in all data for all scenarios, it could be sped
# up in the future to try to query a subset
default = 'true' if default else 'false'
add_url = 'runs?getOnlyDefaultRuns={}&includeMetadata=true'
url = self.base_url + add_url.format(default)
headers = {'Authorization': 'Bearer {}'.format(self.auth())}
r = requests.get(url, headers=headers)
df = pd.read_json(r.content, orient='records')
def extract(row):
return (
pd.concat([row[['model', 'scenario']],
pd.Series(row.metadata)])
.to_frame()
.T
.set_index(['model', 'scenario'])
)
return pd.concat([extract(row) for idx, row in df.iterrows()],
sort=False).reset_index() | [
"\n Metadata of scenarios in the connected data source\n\n Parameter\n ---------\n default : bool, optional, default True\n Return *only* the default version of each Scenario.\n Any (`model`, `scenario`) without a default version is omitted.\n If :obj:`False`, return all versions.\n "
]
|
Please provide a description of the function:def variables(self):
url = self.base_url + 'ts'
headers = {'Authorization': 'Bearer {}'.format(self.auth())}
r = requests.get(url, headers=headers)
df = pd.read_json(r.content, orient='records')
return pd.Series(df['variable'].unique(), name='variable') | [
"All variables in the connected data source"
]
|
Please provide a description of the function:def format_rows(row, center, fullrange=None, interquartile=None,
custom_format='{:.2f}'):
if (fullrange or 0) + (interquartile or 0) == 1:
legend = '{} ({})'.format(center, 'max, min' if fullrange is True
else 'interquartile range')
index = row.index.droplevel(2).drop_duplicates()
count_arg = dict(tuples=[('count', '')], names=[None, legend])
else:
msg = 'displaying multiple range formats simultaneously not supported'
raise NotImplementedError(msg)
ret = pd.Series(index=pd.MultiIndex.from_tuples(**count_arg).append(index))
row = row.sort_index()
center = '50%' if center == 'median' else center
# get maximum of `count` and write to first entry of return series
count = max([i for i in row.loc[(slice(None), slice(None), 'count')]
if not np.isnan(i)])
ret.loc[('count', '')] = ('{:.0f}'.format(count)) if count > 1 else ''
# set upper and lower for the range
upper, lower = ('max', 'min') if fullrange is True else ('75%', '25%')
# format `describe()` columns to string output
for i in index:
x = row.loc[i]
_count = x['count']
if np.isnan(_count) or _count == 0:
s = ''
elif _count > 1:
s = '{f} ({f}, {f})'.format(f=custom_format)\
.format(x[center], x[upper], x[lower])
elif _count == 1:
s = '{f}'.format(f=custom_format).format(x['50%'])
# add count of this section as `[]` if different from count_max
if 0 < _count < count:
s += ' [{:.0f}]'.format(_count)
ret.loc[i] = s
return ret | [
"Format a row with `describe()` columns to a concise string"
]
|
Please provide a description of the function:def add(self, data, header, row=None, subheader=None):
# verify validity of specifications
if self.rows is not None and row is None:
raise ValueError('row specification required')
if self.rows is None and row is not None:
raise ValueError('row arg illegal for this `Statistics` instance')
if isinstance(data, pd.Series):
if subheader is not None:
data.name = subheader
elif data.name is None:
msg = '`data` must be named `pd.Series` or provide `subheader`'
raise ValueError(msg)
data = pd.DataFrame(data)
if self.rows is not None and row not in self.rows:
self.rows.append(row)
_stats = None
# describe with groupby feature
if self.groupby is not None:
filter_args = dict(data=data, df=self.df, join_meta=True)
filter_args.update(self.groupby)
_stats = (
filter_by_meta(**filter_args).groupby(self.col)
.describe(percentiles=self.percentiles)
)
_stats = pd.concat([_stats], keys=[self.col], names=[''], axis=0)
if self.rows:
_stats['row'] = row
_stats.set_index('row', append=True, inplace=True)
_stats.index.names = [''] * 3 if self.rows else [''] * 2
# describe with filter feature
for (idx, _filter) in self.filters:
filter_args = dict(data=data, df=self.df)
filter_args.update(_filter)
_stats_f = (
filter_by_meta(**filter_args)
.describe(percentiles=self.percentiles)
)
_stats_f = pd.DataFrame(_stats_f.unstack()).T
if self.idx_depth == 1:
levels = [[idx]]
else:
levels = [[idx[0]], [idx[1]]]
lvls, lbls = (levels, [[0]] * self.idx_depth) if not self.rows \
else (levels + [[row]], [[0]] * (self.idx_depth + 1))
_stats_f.index = pd.MultiIndex(levels=lvls, labels=lbls)
_stats = _stats_f if _stats is None else _stats.append(_stats_f)
# add header
_stats = pd.concat([_stats], keys=[header], names=[''], axis=1)
subheader = _stats.columns.get_level_values(1).unique()
self._add_to_header(header, subheader)
# set statistics
if self.stats is None:
self.stats = _stats
else:
self.stats = _stats.combine_first(self.stats) | [
"Filter `data` by arguments of this SummaryStats instance,\n then apply `pd.describe()` and format the statistics\n\n Parameters\n ----------\n data : pd.DataFrame or pd.Series\n data for which summary statistics should be computed\n header : str\n column name for descriptive statistics\n row : str\n row name for descriptive statistics\n (required if `pyam.Statistics(rows=True)`)\n subheader : str, optional\n column name (level=1) if data is a unnamed `pd.Series`\n "
]
|
Please provide a description of the function:def reindex(self, copy=True):
ret = deepcopy(self) if copy else self
ret.stats = ret.stats.reindex(index=ret._idx, level=0)
if ret.idx_depth == 2:
ret.stats = ret.stats.reindex(index=ret._sub_idx, level=1)
if ret.rows is not None:
ret.stats = ret.stats.reindex(index=ret.rows, level=ret.idx_depth)
ret.stats = ret.stats.reindex(columns=ret._headers, level=0)
ret.stats = ret.stats.reindex(columns=ret._subheaders, level=1)
ret.stats = ret.stats.reindex(columns=ret._describe_cols, level=2)
if copy:
return ret | [
"Reindex the summary statistics dataframe"
]
|
Please provide a description of the function:def summarize(self, center='mean', fullrange=None, interquartile=None,
custom_format='{:.2f}'):
# call `reindex()` to reorder index and columns
self.reindex(copy=False)
center = 'median' if center == '50%' else center
if fullrange is None and interquartile is None:
fullrange = True
return self.stats.apply(format_rows, center=center,
fullrange=fullrange,
interquartile=interquartile,
custom_format=custom_format,
axis=1, raw=False) | [
"Format the compiled statistics to a concise string output\n\n Parameter\n ---------\n center : str, default `mean`\n what to return as 'center' of the summary: `mean`, `50%`, `median`\n fullrange : bool, default None\n return full range of data if True or `fullrange`, `interquartile`\n and `format_spec` are None\n interquartile : bool, default None\n return interquartile range if True\n custom_format : formatting specifications\n "
]
|
Please provide a description of the function:def reset_default_props(**kwargs):
global _DEFAULT_PROPS
pcycle = plt.rcParams['axes.prop_cycle']
_DEFAULT_PROPS = {
'color': itertools.cycle(_get_standard_colors(**kwargs))
if len(kwargs) > 0 else itertools.cycle([x['color'] for x in pcycle]),
'marker': itertools.cycle(['o', 'x', '.', '+', '*']),
'linestyle': itertools.cycle(['-', '--', '-.', ':']),
} | [
"Reset properties to initial cycle point"
]
|
Please provide a description of the function:def default_props(reset=False, **kwargs):
global _DEFAULT_PROPS
if _DEFAULT_PROPS is None or reset:
reset_default_props(**kwargs)
return _DEFAULT_PROPS | [
"Return current default properties\n\n Parameters\n ----------\n reset : bool\n if True, reset properties and return\n default: False\n "
]
|
Please provide a description of the function:def assign_style_props(df, color=None, marker=None, linestyle=None,
cmap=None):
if color is None and cmap is not None:
raise ValueError('`cmap` must be provided with the `color` argument')
# determine color, marker, and linestyle for each line
n = len(df[color].unique()) if color in df.columns else \
len(df[list(set(df.columns) & set(IAMC_IDX))].drop_duplicates())
defaults = default_props(reset=True, num_colors=n, colormap=cmap)
props = {}
rc = run_control()
kinds = [('color', color), ('marker', marker), ('linestyle', linestyle)]
for kind, var in kinds:
rc_has_kind = kind in rc
if var in df.columns:
rc_has_var = rc_has_kind and var in rc[kind]
props_for_kind = {}
for val in df[var].unique():
if rc_has_var and val in rc[kind][var]:
props_for_kind[val] = rc[kind][var][val]
# cycle any way to keep defaults the same
next(defaults[kind])
else:
props_for_kind[val] = next(defaults[kind])
props[kind] = props_for_kind
# update for special properties only if they exist in props
if 'color' in props:
d = props['color']
values = list(d.values())
# find if any colors in our properties corresponds with special colors
# we know about
overlap_idx = np.in1d(values, list(PYAM_COLORS.keys()))
if overlap_idx.any(): # some exist in our special set
keys = np.array(list(d.keys()))[overlap_idx]
values = np.array(values)[overlap_idx]
# translate each from pyam name, like AR6-SSP2-45 to proper color
# designation
for k, v in zip(keys, values):
d[k] = PYAM_COLORS[v]
# replace props with updated dict without special colors
props['color'] = d
return props | [
"Assign the style properties for a plot\n\n Parameters\n ----------\n df : pd.DataFrame\n data to be used for style properties\n "
]
|
Please provide a description of the function:def reshape_line_plot(df, x, y):
idx = list(df.columns.drop(y))
if df.duplicated(idx).any():
warnings.warn('Duplicated index found.')
df = df.drop_duplicates(idx, keep='last')
df = df.set_index(idx)[y].unstack(x).T
return df | [
"Reshape data from long form to \"line plot form\".\n\n Line plot form has x value as the index with one column for each line.\n Each column has data points as values and all metadata as column headers.\n "
]
|
Please provide a description of the function:def reshape_bar_plot(df, x, y, bars):
idx = [bars, x]
if df.duplicated(idx).any():
warnings.warn('Duplicated index found.')
df = df.drop_duplicates(idx, keep='last')
df = df.set_index(idx)[y].unstack(x).T
return df | [
"Reshape data from long form to \"bar plot form\".\n\n Bar plot form has x value as the index with one column for bar grouping.\n Table values come from y values.\n "
]
|
Please provide a description of the function:def read_shapefile(fname, region_col=None, **kwargs):
gdf = gpd.read_file(fname, **kwargs)
if region_col is not None:
gdf = gdf.rename(columns={region_col: 'region'})
if 'region' not in gdf.columns:
raise IOError('Must provide a region column')
gdf['region'] = gdf['region'].str.upper()
return gdf | [
"Read a shapefile for use in regional plots. Shapefiles must have a\n column denoted as \"region\".\n\n Parameters\n ----------\n fname : string\n path to shapefile to be read by geopandas\n region_col : string, default None\n if provided, rename a column in the shapefile to \"region\"\n "
]
|
Please provide a description of the function:def region_plot(df, column='value', ax=None, crs=None, gdf=None,
add_features=True, vmin=None, vmax=None, cmap=None,
cbar=True, legend=False, title=True):
for col in ['model', 'scenario', 'year', 'variable']:
if len(df[col].unique()) > 1:
msg = 'Can not plot multiple {}s in region_plot'
raise ValueError(msg.format(col))
crs = crs or cartopy.crs.PlateCarree()
if ax is None:
fig, ax = plt.subplots(subplot_kw=dict(projection=crs))
elif not isinstance(ax, cartopy.mpl.geoaxes.GeoAxesSubplot):
msg = 'Must provide a cartopy axes object, not: {}'
raise ValueError(msg.format(type(ax)))
gdf = gdf or read_shapefile(gpd.datasets.get_path('naturalearth_lowres'),
region_col='iso_a3')
data = gdf.merge(df, on='region', how='inner').to_crs(crs.proj4_init)
if data.empty: # help users with iso codes
df['region'] = df['region'].str.upper()
data = gdf.merge(df, on='region', how='inner').to_crs(crs.proj4_init)
if data.empty:
raise ValueError('No data to plot')
if add_features:
ax.add_feature(cartopy.feature.LAND)
ax.add_feature(cartopy.feature.OCEAN)
ax.add_feature(cartopy.feature.COASTLINE)
ax.add_feature(cartopy.feature.BORDERS)
vmin = vmin if vmin is not None else data['value'].min()
vmax = vmax if vmax is not None else data['value'].max()
norm = colors.Normalize(vmin=vmin, vmax=vmax)
cmap = plt.get_cmap(cmap)
scalar_map = cmx.ScalarMappable(norm=norm, cmap=cmap)
labels = []
handles = []
for _, row in data.iterrows():
label = row['label'] if 'label' in row else row['region']
color = scalar_map.to_rgba(row['value'])
ax.add_geometries(
[row['geometry']],
crs,
facecolor=color,
label=label,
)
if label not in labels:
labels.append(label)
handle = mpatches.Rectangle((0, 0), 5, 5, facecolor=color)
handles.append(handle)
if cbar:
scalar_map._A = [] # for some reason you have to clear this
if cbar is True: # use some defaults
cbar = dict(
fraction=0.022, # these are magic numbers
pad=0.02, # that just seem to "work"
)
plt.colorbar(scalar_map, ax=ax, **cbar)
if legend is not False:
if legend is True: # use some defaults
legend = dict(
bbox_to_anchor=(1.32, 0.5) if cbar else (1.2, 0.5),
loc='right',
)
_add_legend(ax, handles, labels, legend)
if title:
var = df['variable'].unique()[0]
unit = df['unit'].unique()[0]
year = df['year'].unique()[0]
default_title = '{} ({}) in {}'.format(var, unit, year)
title = default_title if title is True else title
ax.set_title(title)
return ax | [
"Plot data on a map.\n\n Parameters\n ----------\n df : pd.DataFrame\n Data to plot as a long-form data frame\n column : string, optional, default: 'value'\n The column to use for plotting values\n ax : matplotlib.Axes, optional\n crs : cartopy.crs, optional\n The crs to plot, PlateCarree is used by default.\n gdf : geopandas.GeoDataFrame, optional\n The geometries to plot. The gdf must have a \"region\" column.\n add_features : bool, optional, default: True\n If true, add land, ocean, coastline, and border features.\n vmin : numeric, optional\n The minimum value to plot.\n vmax : numeric, optional\n The maximum value to plot.\n cmap : string, optional\n The colormap to use.\n cbar : bool or dictionary, optional, default: True\n Add a colorbar. If a dictionary is provided, it will be used as keyword\n arguments in creating the colorbar.\n legend : bool or dictionary, optional, default: False\n Add a legend. If a dictionary is provided, it will be used as keyword\n arguments in creating the legend.\n title : bool or string, optional\n Display a default or custom title.\n "
]
|
Please provide a description of the function:def pie_plot(df, value='value', category='variable',
ax=None, legend=False, title=True, cmap=None,
**kwargs):
for col in set(SORT_IDX) - set([category]):
if len(df[col].unique()) > 1:
msg = 'Can not plot multiple {}s in pie_plot with value={},' +\
' category={}'
raise ValueError(msg.format(col, value, category))
if ax is None:
fig, ax = plt.subplots()
# get data, set negative values to explode
_df = df.groupby(category)[value].sum()
where = _df > 0
explode = tuple(0 if _ else 0.2 for _ in where)
_df = _df.abs()
# explicitly get colors
defaults = default_props(reset=True, num_colors=len(_df.index),
colormap=cmap)['color']
rc = run_control()
color = []
for key, c in zip(_df.index, defaults):
if 'color' in rc and \
category in rc['color'] and \
key in rc['color'][category]:
c = rc['color'][category][key]
color.append(c)
# plot data
_df.plot(kind='pie', colors=color, ax=ax, explode=explode, **kwargs)
# add legend
ax.legend(loc='center left', bbox_to_anchor=(1.0, 0.5), labels=_df.index)
if not legend:
ax.legend_.remove()
# remove label
ax.set_ylabel('')
return ax | [
"Plot data as a bar chart.\n\n Parameters\n ----------\n df : pd.DataFrame\n Data to plot as a long-form data frame\n value : string, optional\n The column to use for data values\n default: value\n category : string, optional\n The column to use for labels\n default: variable\n ax : matplotlib.Axes, optional\n legend : bool, optional\n Include a legend\n default: False\n title : bool or string, optional\n Display a default or custom title.\n cmap : string, optional\n A colormap to use.\n default: None\n kwargs : Additional arguments to pass to the pd.DataFrame.plot() function\n "
]
|
Please provide a description of the function:def stack_plot(df, x='year', y='value', stack='variable',
ax=None, legend=True, title=True, cmap=None, total=None,
**kwargs):
for col in set(SORT_IDX) - set([x, stack]):
if len(df[col].unique()) > 1:
msg = 'Can not plot multiple {}s in stack_plot with x={}, stack={}'
raise ValueError(msg.format(col, x, stack))
if ax is None:
fig, ax = plt.subplots()
# long form to one column per bar group
_df = reshape_bar_plot(df, x, y, stack)
# Line below is for interpolation. On datetimes I think you'd downcast to
# seconds first and then cast back to datetime at the end..?
_df.index = _df.index.astype(float)
time_original = _df.index.values
first_zero_times = pd.DataFrame(index=["first_zero_time"])
both_positive_and_negative = _df.apply(
lambda x: (x >= 0).any() and (x < 0).any()
)
for col in _df.loc[:, both_positive_and_negative]:
values = _df[col].dropna().values
positive = (values >= 0)
negative = (values < 0)
pos_to_neg = positive[:-1] & negative[1:]
neg_to_pos = positive[1:] & negative[:-1]
crosses = np.argwhere(pos_to_neg | neg_to_pos)
for i, cross in enumerate(crosses):
cross = cross[0] # get location
x_1 = time_original[cross]
x_2 = time_original[cross + 1]
y_1 = values[cross]
y_2 = values[cross + 1]
zero_time = x_1 - y_1 * (x_2 - x_1) / (y_2 - y_1)
if i == 0:
first_zero_times.loc[:, col] = zero_time
if zero_time not in _df.index.values:
_df.loc[zero_time, :] = np.nan
first_zero_times = first_zero_times.sort_values(
by="first_zero_time",
axis=1,
)
_df = _df.reindex(sorted(_df.index)).interpolate(method="values")
# Sort lines so that negative timeseries are on the right, positive
# timeseries are on the left and timeseries which go from positive to
# negative are ordered such that the timeseries which goes negative first
# is on the right (case of timeseries which go from negative to positive
# is an edge case we haven't thought about as it's unlikely to apply to
# us).
pos_cols = [c for c in _df if (_df[c] > 0).all()]
cross_cols = first_zero_times.columns[::-1].tolist()
neg_cols = [c for c in _df if (_df[c] < 0).all()]
col_order = pos_cols + cross_cols + neg_cols
_df = _df[col_order]
# explicitly get colors
defaults = default_props(reset=True, num_colors=len(_df.columns),
colormap=cmap)['color']
rc = run_control()
colors = {}
for key in _df.columns:
c = next(defaults)
c_in_rc = 'color' in rc
if c_in_rc and stack in rc['color'] and key in rc['color'][stack]:
c = rc['color'][stack][key]
colors[key] = c
# plot stacks, starting from the top and working our way down to the bottom
negative_only_cumulative = _df.applymap(
lambda x: x if x < 0 else 0
).cumsum(axis=1)
positive_only_cumulative = _df.applymap(lambda x: x if x >= 0 else 0)[
col_order[::-1]
].cumsum(axis=1)[
col_order
]
time = _df.index.values
upper = positive_only_cumulative.iloc[:, 0].values
for j, col in enumerate(_df):
noc_tr = negative_only_cumulative.iloc[:, j].values
try:
poc_nr = positive_only_cumulative.iloc[:, j + 1].values
except IndexError:
poc_nr = np.zeros_like(upper)
lower = poc_nr.copy()
if (noc_tr < 0).any():
lower[np.where(poc_nr == 0)] = noc_tr[np.where(poc_nr == 0)]
ax.fill_between(time, lower, upper, label=col,
color=colors[col], **kwargs)
upper = lower.copy()
# add total
if (total is not None) and total: # cover case where total=False
if isinstance(total, bool): # can now assume total=True
total = {}
total.setdefault("label", "Total")
total.setdefault("color", "black")
total.setdefault("lw", 4.0)
ax.plot(time, _df.sum(axis=1), **total)
# add legend
ax.legend(loc='center left', bbox_to_anchor=(1.0, 0.5))
if not legend:
ax.legend_.remove()
# add default labels if possible
ax.set_xlabel(x.capitalize())
units = df['unit'].unique()
if len(units) == 1:
ax.set_ylabel(units[0])
# build a default title if possible
_title = []
for var in ['model', 'scenario', 'region', 'variable']:
values = df[var].unique()
if len(values) == 1:
_title.append('{}: {}'.format(var, values[0]))
if title and _title:
title = ' '.join(_title) if title is True else title
ax.set_title(title)
return ax | [
"Plot data as a stack chart.\n\n Parameters\n ----------\n df : pd.DataFrame\n Data to plot as a long-form data frame\n x : string, optional\n The column to use for x-axis values\n default: year\n y : string, optional\n The column to use for y-axis values\n default: value\n stack: string, optional\n The column to use for stack groupings\n default: variable\n ax : matplotlib.Axes, optional\n legend : bool, optional\n Include a legend\n default: False\n title : bool or string, optional\n Display a default or custom title.\n cmap : string, optional\n A colormap to use.\n default: None\n total : bool or dict, optional\n If True, plot a total line with default pyam settings. If a dict, then\n plot the total line using the dict key-value pairs as keyword arguments\n to ax.plot(). If None, do not plot the total line.\n default : None\n kwargs : Additional arguments to pass to the pd.DataFrame.plot() function\n "
]
|
Please provide a description of the function:def bar_plot(df, x='year', y='value', bars='variable',
ax=None, orient='v', legend=True, title=True, cmap=None,
**kwargs):
for col in set(SORT_IDX) - set([x, bars]):
if len(df[col].unique()) > 1:
msg = 'Can not plot multiple {}s in bar_plot with x={}, bars={}'
raise ValueError(msg.format(col, x, bars))
if ax is None:
fig, ax = plt.subplots()
# long form to one column per bar group
_df = reshape_bar_plot(df, x, y, bars)
# explicitly get colors
defaults = default_props(reset=True, num_colors=len(_df.columns),
colormap=cmap)['color']
rc = run_control()
color = []
for key in _df.columns:
c = next(defaults)
if 'color' in rc and bars in rc['color'] and key in rc['color'][bars]:
c = rc['color'][bars][key]
color.append(c)
# plot data
kind = 'bar' if orient.startswith('v') else 'barh'
_df.plot(kind=kind, color=color, ax=ax, **kwargs)
# add legend
ax.legend(loc='center left', bbox_to_anchor=(1.0, 0.5))
if not legend:
ax.legend_.remove()
# add default labels if possible
if orient == 'v':
ax.set_xlabel(x.capitalize())
else:
ax.set_ylabel(x.capitalize())
units = df['unit'].unique()
if len(units) == 1 and y == 'value':
if orient == 'v':
ax.set_ylabel(units[0])
else:
ax.set_xlabel(units[0])
# build a default title if possible
_title = []
for var in ['model', 'scenario', 'region', 'variable']:
values = df[var].unique()
if len(values) == 1:
_title.append('{}: {}'.format(var, values[0]))
if title and _title:
title = ' '.join(_title) if title is True else title
ax.set_title(title)
return ax | [
"Plot data as a bar chart.\n\n Parameters\n ----------\n df : pd.DataFrame\n Data to plot as a long-form data frame\n x : string, optional\n The column to use for x-axis values\n default: year\n y : string, optional\n The column to use for y-axis values\n default: value\n bars: string, optional\n The column to use for bar groupings\n default: variable\n ax : matplotlib.Axes, optional\n orient : string, optional\n Vertical or horizontal orientation.\n default: variable\n legend : bool, optional\n Include a legend\n default: False\n title : bool or string, optional\n Display a default or custom title.\n cmap : string, optional\n A colormap to use.\n default: None\n kwargs : Additional arguments to pass to the pd.DataFrame.plot() function\n "
]
|
Please provide a description of the function:def add_net_values_to_bar_plot(axs, color='k'):
axs = axs if isinstance(axs, Iterable) else [axs]
for ax in axs:
box_args = _get_boxes(ax)
for x, args in box_args.items():
rect = mpatches.Rectangle(*args, color=color)
ax.add_patch(rect) | [
"Add net values next to an existing vertical stacked bar chart\n\n Parameters\n ----------\n axs : matplotlib.Axes or list thereof\n color : str, optional, default: black\n the color of the bars to add\n "
]
|
Please provide a description of the function:def scatter(df, x, y, ax=None, legend=None, title=None,
color=None, marker='o', linestyle=None, cmap=None,
groupby=['model', 'scenario'], with_lines=False, **kwargs):
if ax is None:
fig, ax = plt.subplots()
# assign styling properties
props = assign_style_props(df, color=color, marker=marker,
linestyle=linestyle, cmap=cmap)
# group data
groups = df.groupby(groupby)
# loop over grouped dataframe, plot data
legend_data = []
for name, group in groups:
pargs = {}
labels = []
for key, kind, var in [('c', 'color', color),
('marker', 'marker', marker),
('linestyle', 'linestyle', linestyle)]:
if kind in props:
label = group[var].values[0]
pargs[key] = props[kind][group[var].values[0]]
labels.append(repr(label).lstrip("u'").strip("'"))
else:
pargs[key] = var
if len(labels) > 0:
legend_data.append(' '.join(labels))
else:
legend_data.append(' '.join(name))
kwargs.update(pargs)
if with_lines:
ax.plot(group[x], group[y], **kwargs)
else:
kwargs.pop('linestyle') # scatter() can't take a linestyle
ax.scatter(group[x], group[y], **kwargs)
# build legend handles and labels
handles, labels = ax.get_legend_handles_labels()
if legend_data != [''] * len(legend_data):
labels = sorted(list(set(tuple(legend_data))))
idxs = [legend_data.index(d) for d in labels]
handles = [handles[i] for i in idxs]
if legend is None and len(labels) < 13 or legend is not False:
_add_legend(ax, handles, labels, legend)
# add labels and title
ax.set_xlabel(x)
ax.set_ylabel(y)
if title:
ax.set_title(title)
return ax | [
"Plot data as a scatter chart.\n\n Parameters\n ----------\n df : pd.DataFrame\n Data to plot as a long-form data frame\n x : str\n column to be plotted on the x-axis\n y : str\n column to be plotted on the y-axis\n ax : matplotlib.Axes, optional\n legend : bool, optional\n Include a legend (`None` displays legend only if less than 13 entries)\n default: None\n title : bool or string, optional\n Display a custom title.\n color : string, optional\n A valid matplotlib color or column name. If a column name, common\n values will be provided the same color.\n default: None\n marker : string\n A valid matplotlib marker or column name. If a column name, common\n values will be provided the same marker.\n default: 'o'\n linestyle : string, optional\n A valid matplotlib linestyle or column name. If a column name, common\n values will be provided the same linestyle.\n default: None\n cmap : string, optional\n A colormap to use.\n default: None\n groupby : list-like, optional\n Data grouping for plotting.\n default: ['model', 'scenario']\n with_lines : bool, optional\n Make the scatter plot with lines connecting common data.\n default: False\n kwargs : Additional arguments to pass to the pd.DataFrame.plot() function\n "
]
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.