code
stringlengths 75
104k
| docstring
stringlengths 1
46.9k
| text
stringlengths 164
112k
|
---|---|---|
def _get_label(self, axis):
"""Return plot label for column for the given axis."""
if axis == 'x':
colname = self.x_col
else: # y
colname = self.y_col
if colname == self._idxname:
label = 'Index'
else:
col = self.tab[colname]
if col.unit:
label = '{0} ({1})'.format(col.name, col.unit)
else:
label = col.name
return label | Return plot label for column for the given axis. | Below is the the instruction that describes the task:
### Input:
Return plot label for column for the given axis.
### Response:
def _get_label(self, axis):
"""Return plot label for column for the given axis."""
if axis == 'x':
colname = self.x_col
else: # y
colname = self.y_col
if colname == self._idxname:
label = 'Index'
else:
col = self.tab[colname]
if col.unit:
label = '{0} ({1})'.format(col.name, col.unit)
else:
label = col.name
return label |
def pick_and_display_buffer(self, i):
"""
pick i-th buffer from list and display it
:param i: int
:return: None
"""
if len(self.buffers) == 1:
# we don't need to display anything
# listing is already displayed
return
else:
try:
self.display_buffer(self.buffers[i])
except IndexError:
# i > len
self.display_buffer(self.buffers[0]) | pick i-th buffer from list and display it
:param i: int
:return: None | Below is the the instruction that describes the task:
### Input:
pick i-th buffer from list and display it
:param i: int
:return: None
### Response:
def pick_and_display_buffer(self, i):
"""
pick i-th buffer from list and display it
:param i: int
:return: None
"""
if len(self.buffers) == 1:
# we don't need to display anything
# listing is already displayed
return
else:
try:
self.display_buffer(self.buffers[i])
except IndexError:
# i > len
self.display_buffer(self.buffers[0]) |
def varimp_plot(self, num_of_features=None, server=False):
"""
Plot the variable importance for a trained model.
:param num_of_features: the number of features shown in the plot (default is 10 or all if less than 10).
:param server: ?
:returns: None.
"""
assert_is_type(num_of_features, None, int)
assert_is_type(server, bool)
plt = _get_matplotlib_pyplot(server)
if not plt: return
# get the variable importances as a list of tuples, do not use pandas dataframe
importances = self.varimp(use_pandas=False)
# features labels correspond to the first value of each tuple in the importances list
feature_labels = [tup[0] for tup in importances]
# relative importances correspond to the first value of each tuple in the importances list
scaled_importances = [tup[2] for tup in importances]
# specify bar centers on the y axis, but flip the order so largest bar appears at top
pos = range(len(feature_labels))[::-1]
# specify the bar lengths
val = scaled_importances
# # check that num_of_features is an integer
# if num_of_features is None:
# num_of_features = len(val)
# default to 10 or less features if num_of_features is not specified
if num_of_features is None:
num_of_features = min(len(val), 10)
fig, ax = plt.subplots(1, 1, figsize=(14, 10))
# create separate plot for the case where num_of_features == 1
if num_of_features == 1:
plt.barh(pos[0:num_of_features], val[0:num_of_features], align="center",
height=0.8, color="#1F77B4", edgecolor="none")
# Hide the right and top spines, color others grey
ax.spines["right"].set_visible(False)
ax.spines["top"].set_visible(False)
ax.spines["bottom"].set_color("#7B7B7B")
ax.spines["left"].set_color("#7B7B7B")
# Only show ticks on the left and bottom spines
ax.yaxis.set_ticks_position("left")
ax.xaxis.set_ticks_position("bottom")
plt.yticks(pos[0:num_of_features], feature_labels[0:num_of_features])
ax.margins(None, 0.5)
else:
plt.barh(pos[0:num_of_features], val[0:num_of_features], align="center",
height=0.8, color="#1F77B4", edgecolor="none")
# Hide the right and top spines, color others grey
ax.spines["right"].set_visible(False)
ax.spines["top"].set_visible(False)
ax.spines["bottom"].set_color("#7B7B7B")
ax.spines["left"].set_color("#7B7B7B")
# Only show ticks on the left and bottom spines
ax.yaxis.set_ticks_position("left")
ax.xaxis.set_ticks_position("bottom")
plt.yticks(pos[0:num_of_features], feature_labels[0:num_of_features])
plt.ylim([min(pos[0:num_of_features])- 1, max(pos[0:num_of_features])+1])
# ax.margins(y=0.5)
# check which algorithm was used to select right plot title
if self._model_json["algo"] == "gbm":
plt.title("Variable Importance: H2O GBM", fontsize=20)
if not server: plt.show()
elif self._model_json["algo"] == "drf":
plt.title("Variable Importance: H2O DRF", fontsize=20)
if not server: plt.show()
elif self._model_json["algo"] == "xgboost":
plt.title("Variable Importance: H2O XGBoost", fontsize=20)
if not server: plt.show()
# if H2ODeepLearningEstimator has variable_importances == True
elif self._model_json["algo"] == "deeplearning":
plt.title("Variable Importance: H2O Deep Learning", fontsize=20)
if not server: plt.show()
elif self._model_json["algo"] == "glm":
plt.title("Variable Importance: H2O GLM", fontsize=20)
if not server: plt.show()
else:
raise H2OValueError("A variable importances plot is not implemented for this type of model") | Plot the variable importance for a trained model.
:param num_of_features: the number of features shown in the plot (default is 10 or all if less than 10).
:param server: ?
:returns: None. | Below is the the instruction that describes the task:
### Input:
Plot the variable importance for a trained model.
:param num_of_features: the number of features shown in the plot (default is 10 or all if less than 10).
:param server: ?
:returns: None.
### Response:
def varimp_plot(self, num_of_features=None, server=False):
"""
Plot the variable importance for a trained model.
:param num_of_features: the number of features shown in the plot (default is 10 or all if less than 10).
:param server: ?
:returns: None.
"""
assert_is_type(num_of_features, None, int)
assert_is_type(server, bool)
plt = _get_matplotlib_pyplot(server)
if not plt: return
# get the variable importances as a list of tuples, do not use pandas dataframe
importances = self.varimp(use_pandas=False)
# features labels correspond to the first value of each tuple in the importances list
feature_labels = [tup[0] for tup in importances]
# relative importances correspond to the first value of each tuple in the importances list
scaled_importances = [tup[2] for tup in importances]
# specify bar centers on the y axis, but flip the order so largest bar appears at top
pos = range(len(feature_labels))[::-1]
# specify the bar lengths
val = scaled_importances
# # check that num_of_features is an integer
# if num_of_features is None:
# num_of_features = len(val)
# default to 10 or less features if num_of_features is not specified
if num_of_features is None:
num_of_features = min(len(val), 10)
fig, ax = plt.subplots(1, 1, figsize=(14, 10))
# create separate plot for the case where num_of_features == 1
if num_of_features == 1:
plt.barh(pos[0:num_of_features], val[0:num_of_features], align="center",
height=0.8, color="#1F77B4", edgecolor="none")
# Hide the right and top spines, color others grey
ax.spines["right"].set_visible(False)
ax.spines["top"].set_visible(False)
ax.spines["bottom"].set_color("#7B7B7B")
ax.spines["left"].set_color("#7B7B7B")
# Only show ticks on the left and bottom spines
ax.yaxis.set_ticks_position("left")
ax.xaxis.set_ticks_position("bottom")
plt.yticks(pos[0:num_of_features], feature_labels[0:num_of_features])
ax.margins(None, 0.5)
else:
plt.barh(pos[0:num_of_features], val[0:num_of_features], align="center",
height=0.8, color="#1F77B4", edgecolor="none")
# Hide the right and top spines, color others grey
ax.spines["right"].set_visible(False)
ax.spines["top"].set_visible(False)
ax.spines["bottom"].set_color("#7B7B7B")
ax.spines["left"].set_color("#7B7B7B")
# Only show ticks on the left and bottom spines
ax.yaxis.set_ticks_position("left")
ax.xaxis.set_ticks_position("bottom")
plt.yticks(pos[0:num_of_features], feature_labels[0:num_of_features])
plt.ylim([min(pos[0:num_of_features])- 1, max(pos[0:num_of_features])+1])
# ax.margins(y=0.5)
# check which algorithm was used to select right plot title
if self._model_json["algo"] == "gbm":
plt.title("Variable Importance: H2O GBM", fontsize=20)
if not server: plt.show()
elif self._model_json["algo"] == "drf":
plt.title("Variable Importance: H2O DRF", fontsize=20)
if not server: plt.show()
elif self._model_json["algo"] == "xgboost":
plt.title("Variable Importance: H2O XGBoost", fontsize=20)
if not server: plt.show()
# if H2ODeepLearningEstimator has variable_importances == True
elif self._model_json["algo"] == "deeplearning":
plt.title("Variable Importance: H2O Deep Learning", fontsize=20)
if not server: plt.show()
elif self._model_json["algo"] == "glm":
plt.title("Variable Importance: H2O GLM", fontsize=20)
if not server: plt.show()
else:
raise H2OValueError("A variable importances plot is not implemented for this type of model") |
def find_birthdays(request):
"""Return information on user birthdays."""
today = date.today()
custom = False
yr_inc = 0
if "birthday_month" in request.GET and "birthday_day" in request.GET:
try:
mon = int(request.GET["birthday_month"])
day = int(request.GET["birthday_day"])
yr = today.year
""" If searching a date that already happened this year, skip to the next year. """
if mon < today.month or (mon == today.month and day < today.day):
yr += 1
yr_inc = 1
real_today = today
today = date(yr, mon, day)
if today:
custom = True
else:
today = real_today
except ValueError:
pass
key = "birthdays:{}".format(today)
cached = cache.get(key)
if cached:
logger.debug("Birthdays on {} loaded " "from cache.".format(today))
logger.debug(cached)
return cached
else:
logger.debug("Loading and caching birthday info for {}".format(today))
tomorrow = today + timedelta(days=1)
try:
data = {
"custom": custom,
"today": {
"date": today,
"users": [{
"id": u.id,
"full_name": u.full_name,
"grade": {
"name": u.grade.name
},
"age": (u.age + yr_inc) if u.age is not None else -1,
"public": u.properties.attribute_is_public("show_birthday")
} if u else {} for u in User.objects.users_with_birthday(today.month, today.day)],
"inc": 0,
},
"tomorrow": {
"date": tomorrow,
"users": [{
"id": u.id,
"full_name": u.full_name,
"grade": {
"name": u.grade.name
},
"age": (u.age - 1) if u.age is not None else -1,
"public": u.properties.attribute_is_public("show_birthday")
} for u in User.objects.users_with_birthday(tomorrow.month, tomorrow.day)],
"inc": 1,
},
} # yapf: disable
except AttributeError:
return None
else:
cache.set(key, data, timeout=60 * 60 * 6)
return data | Return information on user birthdays. | Below is the the instruction that describes the task:
### Input:
Return information on user birthdays.
### Response:
def find_birthdays(request):
"""Return information on user birthdays."""
today = date.today()
custom = False
yr_inc = 0
if "birthday_month" in request.GET and "birthday_day" in request.GET:
try:
mon = int(request.GET["birthday_month"])
day = int(request.GET["birthday_day"])
yr = today.year
""" If searching a date that already happened this year, skip to the next year. """
if mon < today.month or (mon == today.month and day < today.day):
yr += 1
yr_inc = 1
real_today = today
today = date(yr, mon, day)
if today:
custom = True
else:
today = real_today
except ValueError:
pass
key = "birthdays:{}".format(today)
cached = cache.get(key)
if cached:
logger.debug("Birthdays on {} loaded " "from cache.".format(today))
logger.debug(cached)
return cached
else:
logger.debug("Loading and caching birthday info for {}".format(today))
tomorrow = today + timedelta(days=1)
try:
data = {
"custom": custom,
"today": {
"date": today,
"users": [{
"id": u.id,
"full_name": u.full_name,
"grade": {
"name": u.grade.name
},
"age": (u.age + yr_inc) if u.age is not None else -1,
"public": u.properties.attribute_is_public("show_birthday")
} if u else {} for u in User.objects.users_with_birthday(today.month, today.day)],
"inc": 0,
},
"tomorrow": {
"date": tomorrow,
"users": [{
"id": u.id,
"full_name": u.full_name,
"grade": {
"name": u.grade.name
},
"age": (u.age - 1) if u.age is not None else -1,
"public": u.properties.attribute_is_public("show_birthday")
} for u in User.objects.users_with_birthday(tomorrow.month, tomorrow.day)],
"inc": 1,
},
} # yapf: disable
except AttributeError:
return None
else:
cache.set(key, data, timeout=60 * 60 * 6)
return data |
def to_pinyin(s, delimiter=' ', all_readings=False, container='[]',
accented=True):
"""Convert a string's Chinese characters to Pinyin readings.
*s* is a string containing Chinese characters. *accented* is a
boolean value indicating whether to return accented or numbered Pinyin
readings.
*delimiter* is the character used to indicate word boundaries in *s*.
This is used to differentiate between words and characters so that a more
accurate reading can be returned.
*all_readings* is a boolean value indicating whether or not to return all
possible readings in the case of words/characters that have multiple
readings. *container* is a two character string that is used to
enclose words/characters if *all_readings* is ``True``. The default
``'[]'`` is used like this: ``'[READING1/READING2]'``.
Characters not recognized as Chinese are left untouched.
"""
hanzi = s
pinyin = ''
# Process the given string.
while hanzi:
# Get the next match in the given string.
match = re.search('[^%s%s]+' % (delimiter, zhon.hanzi.punctuation),
hanzi)
# There are no more matches, but the string isn't finished yet.
if match is None and hanzi:
pinyin += hanzi
break
match_start, match_end = match.span()
# Process the punctuation marks that occur before the match.
if match_start > 0:
pinyin += hanzi[0:match_start]
# Get the Chinese word/character readings.
readings = _hanzi_to_pinyin(match.group())
# Process the returned word readings.
if match.group() in _WORDS:
if all_readings:
reading = _enclose_readings(container,
_READING_SEPARATOR.join(readings))
else:
reading = readings[0]
pinyin += reading
# Process the returned character readings.
else:
# Process each character individually.
for character in readings:
# Don't touch unrecognized characters.
if isinstance(character, str):
pinyin += character
# Format multiple readings.
elif isinstance(character, list) and all_readings:
pinyin += _enclose_readings(
container, _READING_SEPARATOR.join(character))
# Select and format the most common reading.
elif isinstance(character, list) and not all_readings:
# Add an apostrophe to separate syllables.
if (pinyin and character[0][0] in zhon.pinyin.vowels and
pinyin[-1] in zhon.pinyin.lowercase):
pinyin += "'"
pinyin += character[0]
# Move ahead in the given string.
hanzi = hanzi[match_end:]
if accented:
return pinyin
else:
return accented_to_numbered(pinyin) | Convert a string's Chinese characters to Pinyin readings.
*s* is a string containing Chinese characters. *accented* is a
boolean value indicating whether to return accented or numbered Pinyin
readings.
*delimiter* is the character used to indicate word boundaries in *s*.
This is used to differentiate between words and characters so that a more
accurate reading can be returned.
*all_readings* is a boolean value indicating whether or not to return all
possible readings in the case of words/characters that have multiple
readings. *container* is a two character string that is used to
enclose words/characters if *all_readings* is ``True``. The default
``'[]'`` is used like this: ``'[READING1/READING2]'``.
Characters not recognized as Chinese are left untouched. | Below is the the instruction that describes the task:
### Input:
Convert a string's Chinese characters to Pinyin readings.
*s* is a string containing Chinese characters. *accented* is a
boolean value indicating whether to return accented or numbered Pinyin
readings.
*delimiter* is the character used to indicate word boundaries in *s*.
This is used to differentiate between words and characters so that a more
accurate reading can be returned.
*all_readings* is a boolean value indicating whether or not to return all
possible readings in the case of words/characters that have multiple
readings. *container* is a two character string that is used to
enclose words/characters if *all_readings* is ``True``. The default
``'[]'`` is used like this: ``'[READING1/READING2]'``.
Characters not recognized as Chinese are left untouched.
### Response:
def to_pinyin(s, delimiter=' ', all_readings=False, container='[]',
accented=True):
"""Convert a string's Chinese characters to Pinyin readings.
*s* is a string containing Chinese characters. *accented* is a
boolean value indicating whether to return accented or numbered Pinyin
readings.
*delimiter* is the character used to indicate word boundaries in *s*.
This is used to differentiate between words and characters so that a more
accurate reading can be returned.
*all_readings* is a boolean value indicating whether or not to return all
possible readings in the case of words/characters that have multiple
readings. *container* is a two character string that is used to
enclose words/characters if *all_readings* is ``True``. The default
``'[]'`` is used like this: ``'[READING1/READING2]'``.
Characters not recognized as Chinese are left untouched.
"""
hanzi = s
pinyin = ''
# Process the given string.
while hanzi:
# Get the next match in the given string.
match = re.search('[^%s%s]+' % (delimiter, zhon.hanzi.punctuation),
hanzi)
# There are no more matches, but the string isn't finished yet.
if match is None and hanzi:
pinyin += hanzi
break
match_start, match_end = match.span()
# Process the punctuation marks that occur before the match.
if match_start > 0:
pinyin += hanzi[0:match_start]
# Get the Chinese word/character readings.
readings = _hanzi_to_pinyin(match.group())
# Process the returned word readings.
if match.group() in _WORDS:
if all_readings:
reading = _enclose_readings(container,
_READING_SEPARATOR.join(readings))
else:
reading = readings[0]
pinyin += reading
# Process the returned character readings.
else:
# Process each character individually.
for character in readings:
# Don't touch unrecognized characters.
if isinstance(character, str):
pinyin += character
# Format multiple readings.
elif isinstance(character, list) and all_readings:
pinyin += _enclose_readings(
container, _READING_SEPARATOR.join(character))
# Select and format the most common reading.
elif isinstance(character, list) and not all_readings:
# Add an apostrophe to separate syllables.
if (pinyin and character[0][0] in zhon.pinyin.vowels and
pinyin[-1] in zhon.pinyin.lowercase):
pinyin += "'"
pinyin += character[0]
# Move ahead in the given string.
hanzi = hanzi[match_end:]
if accented:
return pinyin
else:
return accented_to_numbered(pinyin) |
def get_alert(self, id, **kwargs): # noqa: E501
"""Get a specific alert # noqa: E501
# noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_alert(id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str id: (required)
:return: ResponseContainerAlert
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.get_alert_with_http_info(id, **kwargs) # noqa: E501
else:
(data) = self.get_alert_with_http_info(id, **kwargs) # noqa: E501
return data | Get a specific alert # noqa: E501
# noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_alert(id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str id: (required)
:return: ResponseContainerAlert
If the method is called asynchronously,
returns the request thread. | Below is the the instruction that describes the task:
### Input:
Get a specific alert # noqa: E501
# noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_alert(id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str id: (required)
:return: ResponseContainerAlert
If the method is called asynchronously,
returns the request thread.
### Response:
def get_alert(self, id, **kwargs): # noqa: E501
"""Get a specific alert # noqa: E501
# noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_alert(id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str id: (required)
:return: ResponseContainerAlert
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.get_alert_with_http_info(id, **kwargs) # noqa: E501
else:
(data) = self.get_alert_with_http_info(id, **kwargs) # noqa: E501
return data |
def reinforce_grid(self):
""" Performs grid reinforcement measures for all MV and LV grids
Args:
Returns:
"""
# TODO: Finish method and enable LV case
for grid_district in self.mv_grid_districts():
# reinforce MV grid
grid_district.mv_grid.reinforce_grid()
# reinforce LV grids
for lv_load_area in grid_district.lv_load_areas():
if not lv_load_area.is_aggregated:
for lv_grid_district in lv_load_area.lv_grid_districts():
lv_grid_district.lv_grid.reinforce_grid() | Performs grid reinforcement measures for all MV and LV grids
Args:
Returns: | Below is the the instruction that describes the task:
### Input:
Performs grid reinforcement measures for all MV and LV grids
Args:
Returns:
### Response:
def reinforce_grid(self):
""" Performs grid reinforcement measures for all MV and LV grids
Args:
Returns:
"""
# TODO: Finish method and enable LV case
for grid_district in self.mv_grid_districts():
# reinforce MV grid
grid_district.mv_grid.reinforce_grid()
# reinforce LV grids
for lv_load_area in grid_district.lv_load_areas():
if not lv_load_area.is_aggregated:
for lv_grid_district in lv_load_area.lv_grid_districts():
lv_grid_district.lv_grid.reinforce_grid() |
def run(self, params=None, return_columns=None, return_timestamps=None,
initial_condition='original', reload=False):
""" Simulate the model's behavior over time.
Return a pandas dataframe with timestamps as rows,
model elements as columns.
Parameters
----------
params : dictionary
Keys are strings of model component names.
Values are numeric or pandas Series.
Numeric values represent constants over the model integration.
Timeseries will be interpolated to give time-varying input.
return_timestamps : list, numeric, numpy array(1-D)
Timestamps in model execution at which to return state information.
Defaults to model-file specified timesteps.
return_columns : list of string model component names
Returned dataframe will have corresponding columns.
Defaults to model stock values.
initial_condition : 'original'/'o', 'current'/'c', (t, {state})
The starting time, and the state of the system (the values of all the stocks)
at that starting time.
* 'original' (default) uses model-file specified initial condition
* 'current' uses the state of the model after the previous execution
* (t, {state}) lets the user specify a starting time and (possibly partial)
list of stock values.
reload : bool
If true, reloads the model from the translated model file before making changes
Examples
--------
>>> model.run(params={'exogenous_constant': 42})
>>> model.run(params={'exogenous_variable': timeseries_input})
>>> model.run(return_timestamps=[1, 2, 3.1415, 4, 10])
>>> model.run(return_timestamps=10)
>>> model.run(return_timestamps=np.linspace(1, 10, 20))
See Also
--------
pysd.set_components : handles setting model parameters
pysd.set_initial_condition : handles setting initial conditions
"""
if reload:
self.reload()
if params:
self.set_components(params)
self.set_initial_condition(initial_condition)
return_timestamps = self._format_return_timestamps(return_timestamps)
t_series = self._build_euler_timeseries(return_timestamps)
if return_columns is None:
return_columns = self._default_return_columns()
self.time.stage = 'Run'
self.clear_caches()
capture_elements, return_addresses = utils.get_return_elements(
return_columns, self.components._namespace, self.components._subscript_dict)
res = self._integrate(t_series, capture_elements, return_timestamps)
return_df = utils.make_flat_df(res, return_addresses)
return_df.index = return_timestamps
return return_df | Simulate the model's behavior over time.
Return a pandas dataframe with timestamps as rows,
model elements as columns.
Parameters
----------
params : dictionary
Keys are strings of model component names.
Values are numeric or pandas Series.
Numeric values represent constants over the model integration.
Timeseries will be interpolated to give time-varying input.
return_timestamps : list, numeric, numpy array(1-D)
Timestamps in model execution at which to return state information.
Defaults to model-file specified timesteps.
return_columns : list of string model component names
Returned dataframe will have corresponding columns.
Defaults to model stock values.
initial_condition : 'original'/'o', 'current'/'c', (t, {state})
The starting time, and the state of the system (the values of all the stocks)
at that starting time.
* 'original' (default) uses model-file specified initial condition
* 'current' uses the state of the model after the previous execution
* (t, {state}) lets the user specify a starting time and (possibly partial)
list of stock values.
reload : bool
If true, reloads the model from the translated model file before making changes
Examples
--------
>>> model.run(params={'exogenous_constant': 42})
>>> model.run(params={'exogenous_variable': timeseries_input})
>>> model.run(return_timestamps=[1, 2, 3.1415, 4, 10])
>>> model.run(return_timestamps=10)
>>> model.run(return_timestamps=np.linspace(1, 10, 20))
See Also
--------
pysd.set_components : handles setting model parameters
pysd.set_initial_condition : handles setting initial conditions | Below is the the instruction that describes the task:
### Input:
Simulate the model's behavior over time.
Return a pandas dataframe with timestamps as rows,
model elements as columns.
Parameters
----------
params : dictionary
Keys are strings of model component names.
Values are numeric or pandas Series.
Numeric values represent constants over the model integration.
Timeseries will be interpolated to give time-varying input.
return_timestamps : list, numeric, numpy array(1-D)
Timestamps in model execution at which to return state information.
Defaults to model-file specified timesteps.
return_columns : list of string model component names
Returned dataframe will have corresponding columns.
Defaults to model stock values.
initial_condition : 'original'/'o', 'current'/'c', (t, {state})
The starting time, and the state of the system (the values of all the stocks)
at that starting time.
* 'original' (default) uses model-file specified initial condition
* 'current' uses the state of the model after the previous execution
* (t, {state}) lets the user specify a starting time and (possibly partial)
list of stock values.
reload : bool
If true, reloads the model from the translated model file before making changes
Examples
--------
>>> model.run(params={'exogenous_constant': 42})
>>> model.run(params={'exogenous_variable': timeseries_input})
>>> model.run(return_timestamps=[1, 2, 3.1415, 4, 10])
>>> model.run(return_timestamps=10)
>>> model.run(return_timestamps=np.linspace(1, 10, 20))
See Also
--------
pysd.set_components : handles setting model parameters
pysd.set_initial_condition : handles setting initial conditions
### Response:
def run(self, params=None, return_columns=None, return_timestamps=None,
initial_condition='original', reload=False):
""" Simulate the model's behavior over time.
Return a pandas dataframe with timestamps as rows,
model elements as columns.
Parameters
----------
params : dictionary
Keys are strings of model component names.
Values are numeric or pandas Series.
Numeric values represent constants over the model integration.
Timeseries will be interpolated to give time-varying input.
return_timestamps : list, numeric, numpy array(1-D)
Timestamps in model execution at which to return state information.
Defaults to model-file specified timesteps.
return_columns : list of string model component names
Returned dataframe will have corresponding columns.
Defaults to model stock values.
initial_condition : 'original'/'o', 'current'/'c', (t, {state})
The starting time, and the state of the system (the values of all the stocks)
at that starting time.
* 'original' (default) uses model-file specified initial condition
* 'current' uses the state of the model after the previous execution
* (t, {state}) lets the user specify a starting time and (possibly partial)
list of stock values.
reload : bool
If true, reloads the model from the translated model file before making changes
Examples
--------
>>> model.run(params={'exogenous_constant': 42})
>>> model.run(params={'exogenous_variable': timeseries_input})
>>> model.run(return_timestamps=[1, 2, 3.1415, 4, 10])
>>> model.run(return_timestamps=10)
>>> model.run(return_timestamps=np.linspace(1, 10, 20))
See Also
--------
pysd.set_components : handles setting model parameters
pysd.set_initial_condition : handles setting initial conditions
"""
if reload:
self.reload()
if params:
self.set_components(params)
self.set_initial_condition(initial_condition)
return_timestamps = self._format_return_timestamps(return_timestamps)
t_series = self._build_euler_timeseries(return_timestamps)
if return_columns is None:
return_columns = self._default_return_columns()
self.time.stage = 'Run'
self.clear_caches()
capture_elements, return_addresses = utils.get_return_elements(
return_columns, self.components._namespace, self.components._subscript_dict)
res = self._integrate(t_series, capture_elements, return_timestamps)
return_df = utils.make_flat_df(res, return_addresses)
return_df.index = return_timestamps
return return_df |
def fetch_followers(account_file, outfile, limit, do_loop):
""" Fetch up to limit followers for each Twitter account in
account_file. Write results to outfile file in format:
screen_name user_id follower_id_1 follower_id_2 ..."""
print('Fetching followers for accounts in %s' % account_file)
niters = 1
while True:
outf = gzip.open(outfile, 'wt')
for screen_name in iter_lines(account_file):
timestamp = datetime.datetime.now().isoformat()
print('collecting followers for', screen_name)
followers = twutil.collect.followers_for_screen_name(screen_name, limit)
if len(followers) > 0:
outf.write('%s %s %s\n' % (timestamp, screen_name, ' '.join(followers)))
outf.flush()
else:
print('unknown user', screen_name)
outf.close()
if not do_loop:
return
else:
if niters == 1:
outfile = '%s.%d' % (outfile, niters)
else:
outfile = outfile[:outfile.rindex('.')] + '.%d' % niters
niters += 1 | Fetch up to limit followers for each Twitter account in
account_file. Write results to outfile file in format:
screen_name user_id follower_id_1 follower_id_2 ... | Below is the the instruction that describes the task:
### Input:
Fetch up to limit followers for each Twitter account in
account_file. Write results to outfile file in format:
screen_name user_id follower_id_1 follower_id_2 ...
### Response:
def fetch_followers(account_file, outfile, limit, do_loop):
""" Fetch up to limit followers for each Twitter account in
account_file. Write results to outfile file in format:
screen_name user_id follower_id_1 follower_id_2 ..."""
print('Fetching followers for accounts in %s' % account_file)
niters = 1
while True:
outf = gzip.open(outfile, 'wt')
for screen_name in iter_lines(account_file):
timestamp = datetime.datetime.now().isoformat()
print('collecting followers for', screen_name)
followers = twutil.collect.followers_for_screen_name(screen_name, limit)
if len(followers) > 0:
outf.write('%s %s %s\n' % (timestamp, screen_name, ' '.join(followers)))
outf.flush()
else:
print('unknown user', screen_name)
outf.close()
if not do_loop:
return
else:
if niters == 1:
outfile = '%s.%d' % (outfile, niters)
else:
outfile = outfile[:outfile.rindex('.')] + '.%d' % niters
niters += 1 |
def flatten_args(shapes):
r"""
Decorator to flatten structured arguments to a function.
Examples
--------
>>> @flatten_args([(5,), ()])
... def f(w, lambda_):
... return .5 * lambda_ * w.T.dot(w)
>>> np.isclose(f(np.array([2., .5, .6, -.2, .9, .2])), .546)
True
>>> w = np.array([2., .5, .6, -.2, .9])
>>> lambda_ = .2
>>> np.isclose(.5 * lambda_ * w.T.dot(w), .546)
True
Some other curious applications
>>> from operator import mul
>>> flatten_args_dec = flatten_args([(), (3,)])
>>> func = flatten_args_dec(mul)
>>> func(np.array([3.1, .6, 1.71, -1.2]))
array([ 1.86 , 5.301, -3.72 ])
>>> 3.1 * np.array([.6, 1.71, -1.2])
array([ 1.86 , 5.301, -3.72 ])
>>> flatten_args_dec = flatten_args([(9,), (15,)])
>>> func = flatten_args_dec(np.meshgrid)
>>> x, y = func(np.arange(-5, 7, .5)) # 7 - (-5) / 0.5 = 24 = 9 + 15
>>> x.shape
(15, 9)
>>> x[0, :]
array([-5. , -4.5, -4. , -3.5, -3. , -2.5, -2. , -1.5, -1. ])
>>> y.shape
(15, 9)
>>> y[:, 0]
array([-0.5, 0. , 0.5, 1. , 1.5, 2. , 2.5, 3. , 3.5, 4. , 4.5,
5. , 5.5, 6. , 6.5])
"""
def flatten_args_dec(func):
@wraps(func)
def new_func(array1d, *args, **kwargs):
args = tuple(unflatten(array1d, shapes)) + args
return func(*args, **kwargs)
return new_func
return flatten_args_dec | r"""
Decorator to flatten structured arguments to a function.
Examples
--------
>>> @flatten_args([(5,), ()])
... def f(w, lambda_):
... return .5 * lambda_ * w.T.dot(w)
>>> np.isclose(f(np.array([2., .5, .6, -.2, .9, .2])), .546)
True
>>> w = np.array([2., .5, .6, -.2, .9])
>>> lambda_ = .2
>>> np.isclose(.5 * lambda_ * w.T.dot(w), .546)
True
Some other curious applications
>>> from operator import mul
>>> flatten_args_dec = flatten_args([(), (3,)])
>>> func = flatten_args_dec(mul)
>>> func(np.array([3.1, .6, 1.71, -1.2]))
array([ 1.86 , 5.301, -3.72 ])
>>> 3.1 * np.array([.6, 1.71, -1.2])
array([ 1.86 , 5.301, -3.72 ])
>>> flatten_args_dec = flatten_args([(9,), (15,)])
>>> func = flatten_args_dec(np.meshgrid)
>>> x, y = func(np.arange(-5, 7, .5)) # 7 - (-5) / 0.5 = 24 = 9 + 15
>>> x.shape
(15, 9)
>>> x[0, :]
array([-5. , -4.5, -4. , -3.5, -3. , -2.5, -2. , -1.5, -1. ])
>>> y.shape
(15, 9)
>>> y[:, 0]
array([-0.5, 0. , 0.5, 1. , 1.5, 2. , 2.5, 3. , 3.5, 4. , 4.5,
5. , 5.5, 6. , 6.5]) | Below is the the instruction that describes the task:
### Input:
r"""
Decorator to flatten structured arguments to a function.
Examples
--------
>>> @flatten_args([(5,), ()])
... def f(w, lambda_):
... return .5 * lambda_ * w.T.dot(w)
>>> np.isclose(f(np.array([2., .5, .6, -.2, .9, .2])), .546)
True
>>> w = np.array([2., .5, .6, -.2, .9])
>>> lambda_ = .2
>>> np.isclose(.5 * lambda_ * w.T.dot(w), .546)
True
Some other curious applications
>>> from operator import mul
>>> flatten_args_dec = flatten_args([(), (3,)])
>>> func = flatten_args_dec(mul)
>>> func(np.array([3.1, .6, 1.71, -1.2]))
array([ 1.86 , 5.301, -3.72 ])
>>> 3.1 * np.array([.6, 1.71, -1.2])
array([ 1.86 , 5.301, -3.72 ])
>>> flatten_args_dec = flatten_args([(9,), (15,)])
>>> func = flatten_args_dec(np.meshgrid)
>>> x, y = func(np.arange(-5, 7, .5)) # 7 - (-5) / 0.5 = 24 = 9 + 15
>>> x.shape
(15, 9)
>>> x[0, :]
array([-5. , -4.5, -4. , -3.5, -3. , -2.5, -2. , -1.5, -1. ])
>>> y.shape
(15, 9)
>>> y[:, 0]
array([-0.5, 0. , 0.5, 1. , 1.5, 2. , 2.5, 3. , 3.5, 4. , 4.5,
5. , 5.5, 6. , 6.5])
### Response:
def flatten_args(shapes):
r"""
Decorator to flatten structured arguments to a function.
Examples
--------
>>> @flatten_args([(5,), ()])
... def f(w, lambda_):
... return .5 * lambda_ * w.T.dot(w)
>>> np.isclose(f(np.array([2., .5, .6, -.2, .9, .2])), .546)
True
>>> w = np.array([2., .5, .6, -.2, .9])
>>> lambda_ = .2
>>> np.isclose(.5 * lambda_ * w.T.dot(w), .546)
True
Some other curious applications
>>> from operator import mul
>>> flatten_args_dec = flatten_args([(), (3,)])
>>> func = flatten_args_dec(mul)
>>> func(np.array([3.1, .6, 1.71, -1.2]))
array([ 1.86 , 5.301, -3.72 ])
>>> 3.1 * np.array([.6, 1.71, -1.2])
array([ 1.86 , 5.301, -3.72 ])
>>> flatten_args_dec = flatten_args([(9,), (15,)])
>>> func = flatten_args_dec(np.meshgrid)
>>> x, y = func(np.arange(-5, 7, .5)) # 7 - (-5) / 0.5 = 24 = 9 + 15
>>> x.shape
(15, 9)
>>> x[0, :]
array([-5. , -4.5, -4. , -3.5, -3. , -2.5, -2. , -1.5, -1. ])
>>> y.shape
(15, 9)
>>> y[:, 0]
array([-0.5, 0. , 0.5, 1. , 1.5, 2. , 2.5, 3. , 3.5, 4. , 4.5,
5. , 5.5, 6. , 6.5])
"""
def flatten_args_dec(func):
@wraps(func)
def new_func(array1d, *args, **kwargs):
args = tuple(unflatten(array1d, shapes)) + args
return func(*args, **kwargs)
return new_func
return flatten_args_dec |
def events_for_unlock_lock(
initiator_state: InitiatorTransferState,
channel_state: NettingChannelState,
secret: Secret,
secrethash: SecretHash,
pseudo_random_generator: random.Random,
) -> List[Event]:
""" Unlocks the lock offchain, and emits the events for the successful payment. """
# next hop learned the secret, unlock the token locally and send the
# lock claim message to next hop
transfer_description = initiator_state.transfer_description
message_identifier = message_identifier_from_prng(pseudo_random_generator)
unlock_lock = channel.send_unlock(
channel_state=channel_state,
message_identifier=message_identifier,
payment_identifier=transfer_description.payment_identifier,
secret=secret,
secrethash=secrethash,
)
payment_sent_success = EventPaymentSentSuccess(
payment_network_identifier=channel_state.payment_network_identifier,
token_network_identifier=TokenNetworkID(channel_state.token_network_identifier),
identifier=transfer_description.payment_identifier,
amount=transfer_description.amount,
target=transfer_description.target,
secret=secret,
)
unlock_success = EventUnlockSuccess(
transfer_description.payment_identifier,
transfer_description.secrethash,
)
return [unlock_lock, payment_sent_success, unlock_success] | Unlocks the lock offchain, and emits the events for the successful payment. | Below is the the instruction that describes the task:
### Input:
Unlocks the lock offchain, and emits the events for the successful payment.
### Response:
def events_for_unlock_lock(
initiator_state: InitiatorTransferState,
channel_state: NettingChannelState,
secret: Secret,
secrethash: SecretHash,
pseudo_random_generator: random.Random,
) -> List[Event]:
""" Unlocks the lock offchain, and emits the events for the successful payment. """
# next hop learned the secret, unlock the token locally and send the
# lock claim message to next hop
transfer_description = initiator_state.transfer_description
message_identifier = message_identifier_from_prng(pseudo_random_generator)
unlock_lock = channel.send_unlock(
channel_state=channel_state,
message_identifier=message_identifier,
payment_identifier=transfer_description.payment_identifier,
secret=secret,
secrethash=secrethash,
)
payment_sent_success = EventPaymentSentSuccess(
payment_network_identifier=channel_state.payment_network_identifier,
token_network_identifier=TokenNetworkID(channel_state.token_network_identifier),
identifier=transfer_description.payment_identifier,
amount=transfer_description.amount,
target=transfer_description.target,
secret=secret,
)
unlock_success = EventUnlockSuccess(
transfer_description.payment_identifier,
transfer_description.secrethash,
)
return [unlock_lock, payment_sent_success, unlock_success] |
def make_data():
"""creates example data set"""
I,d = multidict({1:80, 2:270, 3:250, 4:160, 5:180}) # demand
J,M,f = multidict({1:[500,1000], 2:[500,1000], 3:[500,1000]}) # capacity, fixed costs
c = {(1,1):4, (1,2):6, (1,3):9, # transportation costs
(2,1):5, (2,2):4, (2,3):7,
(3,1):6, (3,2):3, (3,3):4,
(4,1):8, (4,2):5, (4,3):3,
(5,1):10, (5,2):8, (5,3):4,
}
return I,J,d,M,f,c | creates example data set | Below is the the instruction that describes the task:
### Input:
creates example data set
### Response:
def make_data():
"""creates example data set"""
I,d = multidict({1:80, 2:270, 3:250, 4:160, 5:180}) # demand
J,M,f = multidict({1:[500,1000], 2:[500,1000], 3:[500,1000]}) # capacity, fixed costs
c = {(1,1):4, (1,2):6, (1,3):9, # transportation costs
(2,1):5, (2,2):4, (2,3):7,
(3,1):6, (3,2):3, (3,3):4,
(4,1):8, (4,2):5, (4,3):3,
(5,1):10, (5,2):8, (5,3):4,
}
return I,J,d,M,f,c |
def is_in(self, *items):
"""Asserts that val is equal to one of the given items."""
if len(items) == 0:
raise ValueError('one or more args must be given')
else:
for i in items:
if self.val == i:
return self
self._err('Expected <%s> to be in %s, but was not.' % (self.val, self._fmt_items(items))) | Asserts that val is equal to one of the given items. | Below is the the instruction that describes the task:
### Input:
Asserts that val is equal to one of the given items.
### Response:
def is_in(self, *items):
"""Asserts that val is equal to one of the given items."""
if len(items) == 0:
raise ValueError('one or more args must be given')
else:
for i in items:
if self.val == i:
return self
self._err('Expected <%s> to be in %s, but was not.' % (self.val, self._fmt_items(items))) |
def en_last(self):
""" Report the energies from the last SCF present in the output.
Returns a |dict| providing the various energy values from the
last SCF cycle performed in the output. Keys are those of
:attr:`~opan.output.OrcaOutput.p_en`.
Any energy value not relevant to the parsed
output is assigned as |None|.
Returns
-------
last_ens
|dict| of |npfloat_|--
Energies from the last SCF present in the output.
"""
# Initialize the return dict
last_ens = dict()
# Iterate and store
for (k,l) in self.en.items():
last_ens.update({ k : l[-1] if l != [] else None })
##next (k,l)
# Should be ready to return?
return last_ens | Report the energies from the last SCF present in the output.
Returns a |dict| providing the various energy values from the
last SCF cycle performed in the output. Keys are those of
:attr:`~opan.output.OrcaOutput.p_en`.
Any energy value not relevant to the parsed
output is assigned as |None|.
Returns
-------
last_ens
|dict| of |npfloat_|--
Energies from the last SCF present in the output. | Below is the the instruction that describes the task:
### Input:
Report the energies from the last SCF present in the output.
Returns a |dict| providing the various energy values from the
last SCF cycle performed in the output. Keys are those of
:attr:`~opan.output.OrcaOutput.p_en`.
Any energy value not relevant to the parsed
output is assigned as |None|.
Returns
-------
last_ens
|dict| of |npfloat_|--
Energies from the last SCF present in the output.
### Response:
def en_last(self):
""" Report the energies from the last SCF present in the output.
Returns a |dict| providing the various energy values from the
last SCF cycle performed in the output. Keys are those of
:attr:`~opan.output.OrcaOutput.p_en`.
Any energy value not relevant to the parsed
output is assigned as |None|.
Returns
-------
last_ens
|dict| of |npfloat_|--
Energies from the last SCF present in the output.
"""
# Initialize the return dict
last_ens = dict()
# Iterate and store
for (k,l) in self.en.items():
last_ens.update({ k : l[-1] if l != [] else None })
##next (k,l)
# Should be ready to return?
return last_ens |
def add_padding(self, name,
left = 0, right = 0, top = 0, bottom = 0,
value = 0,
input_name = 'data', output_name = 'out',
padding_type = 'constant'):
"""
Add a padding layer to the model. Kindly refer to NeuralNetwork.proto for details.
Parameters
----------
name: str
The name of this layer.
left: int
Number of elements to be padded on the left side of the input blob.
right: int
Number of elements to be padded on the right side of the input blob.
top: int
Number of elements to be padded on the top of the input blob.
bottom: int
Number of elements to be padded on the bottom of the input blob.
value: float
Value of the elements padded. Used only when padding_type = 'constant'
input_name: str
The input blob name of this layer.
output_name: str
The output blob name of this layer.
padding_type: str
Type of the padding. Can be one of 'constant', 'reflection' or 'replication'
See Also
--------
add_crop, add_convolution, add_pooling
"""
# Currently only constant padding is supported.
spec = self.spec
nn_spec = self.nn_spec
# Add a new layer
spec_layer = nn_spec.layers.add()
spec_layer.name = name
spec_layer.input.append(input_name)
spec_layer.output.append(output_name)
spec_layer_params = spec_layer.padding
# Set the parameters
if padding_type == 'constant':
spec_layer_params.constant.value = value
elif padding_type == 'reflection':
spec_layer_params.reflection.MergeFromString(b'')
elif padding_type == 'replication':
spec_layer_params.replication.MergeFromString(b'')
else:
raise ValueError("Unknown padding_type %s" %(padding_type))
height_border = spec_layer_params.paddingAmounts.borderAmounts.add()
height_border.startEdgeSize = top
height_border.endEdgeSize = bottom
width_border = spec_layer_params.paddingAmounts.borderAmounts.add()
width_border.startEdgeSize = left
width_border.endEdgeSize = right | Add a padding layer to the model. Kindly refer to NeuralNetwork.proto for details.
Parameters
----------
name: str
The name of this layer.
left: int
Number of elements to be padded on the left side of the input blob.
right: int
Number of elements to be padded on the right side of the input blob.
top: int
Number of elements to be padded on the top of the input blob.
bottom: int
Number of elements to be padded on the bottom of the input blob.
value: float
Value of the elements padded. Used only when padding_type = 'constant'
input_name: str
The input blob name of this layer.
output_name: str
The output blob name of this layer.
padding_type: str
Type of the padding. Can be one of 'constant', 'reflection' or 'replication'
See Also
--------
add_crop, add_convolution, add_pooling | Below is the the instruction that describes the task:
### Input:
Add a padding layer to the model. Kindly refer to NeuralNetwork.proto for details.
Parameters
----------
name: str
The name of this layer.
left: int
Number of elements to be padded on the left side of the input blob.
right: int
Number of elements to be padded on the right side of the input blob.
top: int
Number of elements to be padded on the top of the input blob.
bottom: int
Number of elements to be padded on the bottom of the input blob.
value: float
Value of the elements padded. Used only when padding_type = 'constant'
input_name: str
The input blob name of this layer.
output_name: str
The output blob name of this layer.
padding_type: str
Type of the padding. Can be one of 'constant', 'reflection' or 'replication'
See Also
--------
add_crop, add_convolution, add_pooling
### Response:
def add_padding(self, name,
left = 0, right = 0, top = 0, bottom = 0,
value = 0,
input_name = 'data', output_name = 'out',
padding_type = 'constant'):
"""
Add a padding layer to the model. Kindly refer to NeuralNetwork.proto for details.
Parameters
----------
name: str
The name of this layer.
left: int
Number of elements to be padded on the left side of the input blob.
right: int
Number of elements to be padded on the right side of the input blob.
top: int
Number of elements to be padded on the top of the input blob.
bottom: int
Number of elements to be padded on the bottom of the input blob.
value: float
Value of the elements padded. Used only when padding_type = 'constant'
input_name: str
The input blob name of this layer.
output_name: str
The output blob name of this layer.
padding_type: str
Type of the padding. Can be one of 'constant', 'reflection' or 'replication'
See Also
--------
add_crop, add_convolution, add_pooling
"""
# Currently only constant padding is supported.
spec = self.spec
nn_spec = self.nn_spec
# Add a new layer
spec_layer = nn_spec.layers.add()
spec_layer.name = name
spec_layer.input.append(input_name)
spec_layer.output.append(output_name)
spec_layer_params = spec_layer.padding
# Set the parameters
if padding_type == 'constant':
spec_layer_params.constant.value = value
elif padding_type == 'reflection':
spec_layer_params.reflection.MergeFromString(b'')
elif padding_type == 'replication':
spec_layer_params.replication.MergeFromString(b'')
else:
raise ValueError("Unknown padding_type %s" %(padding_type))
height_border = spec_layer_params.paddingAmounts.borderAmounts.add()
height_border.startEdgeSize = top
height_border.endEdgeSize = bottom
width_border = spec_layer_params.paddingAmounts.borderAmounts.add()
width_border.startEdgeSize = left
width_border.endEdgeSize = right |
def get_family_form(self, *args, **kwargs):
"""Pass through to provider FamilyAdminSession.get_family_form_for_update"""
# Implemented from kitosid template for -
# osid.resource.BinAdminSession.get_bin_form_for_update_template
# This method might be a bit sketchy. Time will tell.
if isinstance(args[-1], list) or 'family_record_types' in kwargs:
return self.get_family_form_for_create(*args, **kwargs)
else:
return self.get_family_form_for_update(*args, **kwargs) | Pass through to provider FamilyAdminSession.get_family_form_for_update | Below is the the instruction that describes the task:
### Input:
Pass through to provider FamilyAdminSession.get_family_form_for_update
### Response:
def get_family_form(self, *args, **kwargs):
"""Pass through to provider FamilyAdminSession.get_family_form_for_update"""
# Implemented from kitosid template for -
# osid.resource.BinAdminSession.get_bin_form_for_update_template
# This method might be a bit sketchy. Time will tell.
if isinstance(args[-1], list) or 'family_record_types' in kwargs:
return self.get_family_form_for_create(*args, **kwargs)
else:
return self.get_family_form_for_update(*args, **kwargs) |
def index():
"""Show a list of available libraries, and resource files"""
kwdb = current_app.kwdb
libraries = get_collections(kwdb, libtype="library")
resource_files = get_collections(kwdb, libtype="resource")
return flask.render_template("libraryNames.html",
data={"libraries": libraries,
"version": __version__,
"resource_files": resource_files
}) | Show a list of available libraries, and resource files | Below is the the instruction that describes the task:
### Input:
Show a list of available libraries, and resource files
### Response:
def index():
"""Show a list of available libraries, and resource files"""
kwdb = current_app.kwdb
libraries = get_collections(kwdb, libtype="library")
resource_files = get_collections(kwdb, libtype="resource")
return flask.render_template("libraryNames.html",
data={"libraries": libraries,
"version": __version__,
"resource_files": resource_files
}) |
def _is_field_serializable(self, field_name):
"""Return True if the field can be serialized into a JSON doc."""
return (
self._meta.get_field(field_name).get_internal_type()
in self.SIMPLE_UPDATE_FIELD_TYPES
) | Return True if the field can be serialized into a JSON doc. | Below is the the instruction that describes the task:
### Input:
Return True if the field can be serialized into a JSON doc.
### Response:
def _is_field_serializable(self, field_name):
"""Return True if the field can be serialized into a JSON doc."""
return (
self._meta.get_field(field_name).get_internal_type()
in self.SIMPLE_UPDATE_FIELD_TYPES
) |
def is_all_field_none(self):
"""
:rtype: bool
"""
if self._uuid is not None:
return False
if self._created is not None:
return False
if self._updated is not None:
return False
if self._merchant_reference is not None:
return False
if self._description is not None:
return False
if self._status is not None:
return False
if self._amount_total is not None:
return False
if self._amount_paid is not None:
return False
if self._qr_code_token is not None:
return False
if self._tab_url is not None:
return False
if self._visibility is not None:
return False
if self._minimum_age is not None:
return False
if self._require_address is not None:
return False
if self._redirect_url is not None:
return False
if self._expiration is not None:
return False
if self._alias is not None:
return False
if self._cash_register_location is not None:
return False
if self._tab_item is not None:
return False
if self._tab_attachment is not None:
return False
return True | :rtype: bool | Below is the the instruction that describes the task:
### Input:
:rtype: bool
### Response:
def is_all_field_none(self):
"""
:rtype: bool
"""
if self._uuid is not None:
return False
if self._created is not None:
return False
if self._updated is not None:
return False
if self._merchant_reference is not None:
return False
if self._description is not None:
return False
if self._status is not None:
return False
if self._amount_total is not None:
return False
if self._amount_paid is not None:
return False
if self._qr_code_token is not None:
return False
if self._tab_url is not None:
return False
if self._visibility is not None:
return False
if self._minimum_age is not None:
return False
if self._require_address is not None:
return False
if self._redirect_url is not None:
return False
if self._expiration is not None:
return False
if self._alias is not None:
return False
if self._cash_register_location is not None:
return False
if self._tab_item is not None:
return False
if self._tab_attachment is not None:
return False
return True |
def output(self, _filename):
"""
_filename is not used
Args:
_filename(string)
"""
txt = ""
for contract in self.contracts:
print('Contract {}'.format(contract.name))
for function in contract.functions:
if function.contract == contract:
print('\tFunction {}'.format(function.full_name))
for node in function.nodes:
if node.expression:
print('\t\tExpression: {}'.format(node.expression))
print('\t\tIRs:')
for ir in node.irs:
print('\t\t\t{}'.format(ir))
elif node.irs:
print('\t\tIRs:')
for ir in node.irs:
print('\t\t\t{}'.format(ir))
for modifier in contract.modifiers:
if modifier.contract == contract:
print('\tModifier {}'.format(modifier.full_name))
for node in modifier.nodes:
print(node)
if node.expression:
print('\t\tExpression: {}'.format(node.expression))
print('\t\tIRs:')
for ir in node.irs:
print('\t\t\t{}'.format(ir))
self.info(txt) | _filename is not used
Args:
_filename(string) | Below is the the instruction that describes the task:
### Input:
_filename is not used
Args:
_filename(string)
### Response:
def output(self, _filename):
"""
_filename is not used
Args:
_filename(string)
"""
txt = ""
for contract in self.contracts:
print('Contract {}'.format(contract.name))
for function in contract.functions:
if function.contract == contract:
print('\tFunction {}'.format(function.full_name))
for node in function.nodes:
if node.expression:
print('\t\tExpression: {}'.format(node.expression))
print('\t\tIRs:')
for ir in node.irs:
print('\t\t\t{}'.format(ir))
elif node.irs:
print('\t\tIRs:')
for ir in node.irs:
print('\t\t\t{}'.format(ir))
for modifier in contract.modifiers:
if modifier.contract == contract:
print('\tModifier {}'.format(modifier.full_name))
for node in modifier.nodes:
print(node)
if node.expression:
print('\t\tExpression: {}'.format(node.expression))
print('\t\tIRs:')
for ir in node.irs:
print('\t\t\t{}'.format(ir))
self.info(txt) |
def close_all(self):
"""
Closes all editors
"""
if self._try_close_dirty_tabs():
while self.count():
widget = self.widget(0)
self.remove_tab(0)
self.tab_closed.emit(widget)
return True
return False | Closes all editors | Below is the the instruction that describes the task:
### Input:
Closes all editors
### Response:
def close_all(self):
"""
Closes all editors
"""
if self._try_close_dirty_tabs():
while self.count():
widget = self.widget(0)
self.remove_tab(0)
self.tab_closed.emit(widget)
return True
return False |
def match(select, tag, namespaces=None, flags=0, **kwargs):
"""Match node."""
return compile(select, namespaces, flags, **kwargs).match(tag) | Match node. | Below is the the instruction that describes the task:
### Input:
Match node.
### Response:
def match(select, tag, namespaces=None, flags=0, **kwargs):
"""Match node."""
return compile(select, namespaces, flags, **kwargs).match(tag) |
def do_cli(ctx, template, semantic_version):
"""Publish the application based on command line inputs."""
try:
template_data = get_template_data(template)
except ValueError as ex:
click.secho("Publish Failed", fg='red')
raise UserException(str(ex))
# Override SemanticVersion in template metadata when provided in command input
if semantic_version and SERVERLESS_REPO_APPLICATION in template_data.get(METADATA, {}):
template_data.get(METADATA).get(SERVERLESS_REPO_APPLICATION)[SEMANTIC_VERSION] = semantic_version
try:
publish_output = publish_application(template_data)
click.secho("Publish Succeeded", fg="green")
click.secho(_gen_success_message(publish_output))
except InvalidS3UriError:
click.secho("Publish Failed", fg='red')
raise UserException(
"Your SAM template contains invalid S3 URIs. Please make sure that you have uploaded application "
"artifacts to S3 by packaging the template. See more details in {}".format(SAM_PACKAGE_DOC))
except ServerlessRepoError as ex:
click.secho("Publish Failed", fg='red')
LOG.debug("Failed to publish application to serverlessrepo", exc_info=True)
error_msg = '{}\nPlease follow the instructions in {}'.format(str(ex), SAM_PUBLISH_DOC)
raise UserException(error_msg)
application_id = publish_output.get('application_id')
_print_console_link(ctx.region, application_id) | Publish the application based on command line inputs. | Below is the the instruction that describes the task:
### Input:
Publish the application based on command line inputs.
### Response:
def do_cli(ctx, template, semantic_version):
"""Publish the application based on command line inputs."""
try:
template_data = get_template_data(template)
except ValueError as ex:
click.secho("Publish Failed", fg='red')
raise UserException(str(ex))
# Override SemanticVersion in template metadata when provided in command input
if semantic_version and SERVERLESS_REPO_APPLICATION in template_data.get(METADATA, {}):
template_data.get(METADATA).get(SERVERLESS_REPO_APPLICATION)[SEMANTIC_VERSION] = semantic_version
try:
publish_output = publish_application(template_data)
click.secho("Publish Succeeded", fg="green")
click.secho(_gen_success_message(publish_output))
except InvalidS3UriError:
click.secho("Publish Failed", fg='red')
raise UserException(
"Your SAM template contains invalid S3 URIs. Please make sure that you have uploaded application "
"artifacts to S3 by packaging the template. See more details in {}".format(SAM_PACKAGE_DOC))
except ServerlessRepoError as ex:
click.secho("Publish Failed", fg='red')
LOG.debug("Failed to publish application to serverlessrepo", exc_info=True)
error_msg = '{}\nPlease follow the instructions in {}'.format(str(ex), SAM_PUBLISH_DOC)
raise UserException(error_msg)
application_id = publish_output.get('application_id')
_print_console_link(ctx.region, application_id) |
def toggle_reciprocal(self):
"""Flip my ``reciprocal_portal`` boolean, and draw (or stop drawing)
an extra arrow on the appropriate button to indicate the
fact.
"""
self.screen.boardview.reciprocal_portal = not self.screen.boardview.reciprocal_portal
if self.screen.boardview.reciprocal_portal:
assert(self.revarrow is None)
self.revarrow = ArrowWidget(
board=self.screen.boardview.board,
origin=self.ids.emptyright,
destination=self.ids.emptyleft
)
self.ids.portaladdbut.add_widget(self.revarrow)
else:
if hasattr(self, 'revarrow'):
self.ids.portaladdbut.remove_widget(self.revarrow)
self.revarrow = None | Flip my ``reciprocal_portal`` boolean, and draw (or stop drawing)
an extra arrow on the appropriate button to indicate the
fact. | Below is the the instruction that describes the task:
### Input:
Flip my ``reciprocal_portal`` boolean, and draw (or stop drawing)
an extra arrow on the appropriate button to indicate the
fact.
### Response:
def toggle_reciprocal(self):
"""Flip my ``reciprocal_portal`` boolean, and draw (or stop drawing)
an extra arrow on the appropriate button to indicate the
fact.
"""
self.screen.boardview.reciprocal_portal = not self.screen.boardview.reciprocal_portal
if self.screen.boardview.reciprocal_portal:
assert(self.revarrow is None)
self.revarrow = ArrowWidget(
board=self.screen.boardview.board,
origin=self.ids.emptyright,
destination=self.ids.emptyleft
)
self.ids.portaladdbut.add_widget(self.revarrow)
else:
if hasattr(self, 'revarrow'):
self.ids.portaladdbut.remove_widget(self.revarrow)
self.revarrow = None |
def compiler_info(compiler):
"""Determine the name + version of the compiler"""
(out, err) = subprocess.Popen(
['/bin/sh', '-c', '{0} -v'.format(compiler)],
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE
).communicate('')
gcc_clang = re.compile('(gcc|clang) version ([0-9]+(\\.[0-9]+)*)')
for line in (out + err).split('\n'):
mtch = gcc_clang.search(line)
if mtch:
return mtch.group(1) + ' ' + mtch.group(2)
return compiler | Determine the name + version of the compiler | Below is the the instruction that describes the task:
### Input:
Determine the name + version of the compiler
### Response:
def compiler_info(compiler):
"""Determine the name + version of the compiler"""
(out, err) = subprocess.Popen(
['/bin/sh', '-c', '{0} -v'.format(compiler)],
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE
).communicate('')
gcc_clang = re.compile('(gcc|clang) version ([0-9]+(\\.[0-9]+)*)')
for line in (out + err).split('\n'):
mtch = gcc_clang.search(line)
if mtch:
return mtch.group(1) + ' ' + mtch.group(2)
return compiler |
def Uri(self):
""" Constructs the connection URI from name, noSsl and port instance variables. """
return ("%s://%s%s" % (("https", "http")[self._noSsl == True], self._name, (":" + str(self._port), "")[
(((self._noSsl == False) and (self._port == 80)) or ((self._noSsl == True) and (self._port == 443)))])) | Constructs the connection URI from name, noSsl and port instance variables. | Below is the the instruction that describes the task:
### Input:
Constructs the connection URI from name, noSsl and port instance variables.
### Response:
def Uri(self):
""" Constructs the connection URI from name, noSsl and port instance variables. """
return ("%s://%s%s" % (("https", "http")[self._noSsl == True], self._name, (":" + str(self._port), "")[
(((self._noSsl == False) and (self._port == 80)) or ((self._noSsl == True) and (self._port == 443)))])) |
def isNonPairTag(self, isnonpair=None):
"""
True if element is listed in nonpair tag table (``br`` for example) or
if it ends with ``/>`` (``<hr />`` for example).
You can also change state from pair to nonpair if you use this as
setter.
Args:
isnonpair (bool, default None): If set, internal nonpair state is
changed.
Returns:
book: True if tag is nonpair.
"""
if isnonpair is None:
return self._isnonpairtag
if not self._istag:
return
if isnonpair:
self.endtag = None
self.childs = []
self._isnonpairtag = isnonpair | True if element is listed in nonpair tag table (``br`` for example) or
if it ends with ``/>`` (``<hr />`` for example).
You can also change state from pair to nonpair if you use this as
setter.
Args:
isnonpair (bool, default None): If set, internal nonpair state is
changed.
Returns:
book: True if tag is nonpair. | Below is the the instruction that describes the task:
### Input:
True if element is listed in nonpair tag table (``br`` for example) or
if it ends with ``/>`` (``<hr />`` for example).
You can also change state from pair to nonpair if you use this as
setter.
Args:
isnonpair (bool, default None): If set, internal nonpair state is
changed.
Returns:
book: True if tag is nonpair.
### Response:
def isNonPairTag(self, isnonpair=None):
"""
True if element is listed in nonpair tag table (``br`` for example) or
if it ends with ``/>`` (``<hr />`` for example).
You can also change state from pair to nonpair if you use this as
setter.
Args:
isnonpair (bool, default None): If set, internal nonpair state is
changed.
Returns:
book: True if tag is nonpair.
"""
if isnonpair is None:
return self._isnonpairtag
if not self._istag:
return
if isnonpair:
self.endtag = None
self.childs = []
self._isnonpairtag = isnonpair |
def at_line(self, line: FileLine) -> Iterator[Statement]:
"""
Returns an iterator over all of the statements located at a given line.
"""
num = line.num
for stmt in self.in_file(line.filename):
if stmt.location.start.line == num:
yield stmt | Returns an iterator over all of the statements located at a given line. | Below is the the instruction that describes the task:
### Input:
Returns an iterator over all of the statements located at a given line.
### Response:
def at_line(self, line: FileLine) -> Iterator[Statement]:
"""
Returns an iterator over all of the statements located at a given line.
"""
num = line.num
for stmt in self.in_file(line.filename):
if stmt.location.start.line == num:
yield stmt |
def accumulate_from_superclasses(cls, propname):
''' Traverse the class hierarchy and accumulate the special sets of names
``MetaHasProps`` stores on classes:
Args:
name (str) : name of the special attribute to collect.
Typically meaningful values are: ``__container_props__``,
``__properties__``, ``__properties_with_refs__``
'''
cachename = "__cached_all" + propname
# we MUST use cls.__dict__ NOT hasattr(). hasattr() would also look at base
# classes, and the cache must be separate for each class
if cachename not in cls.__dict__:
s = set()
for c in inspect.getmro(cls):
if issubclass(c, HasProps) and hasattr(c, propname):
base = getattr(c, propname)
s.update(base)
setattr(cls, cachename, s)
return cls.__dict__[cachename] | Traverse the class hierarchy and accumulate the special sets of names
``MetaHasProps`` stores on classes:
Args:
name (str) : name of the special attribute to collect.
Typically meaningful values are: ``__container_props__``,
``__properties__``, ``__properties_with_refs__`` | Below is the the instruction that describes the task:
### Input:
Traverse the class hierarchy and accumulate the special sets of names
``MetaHasProps`` stores on classes:
Args:
name (str) : name of the special attribute to collect.
Typically meaningful values are: ``__container_props__``,
``__properties__``, ``__properties_with_refs__``
### Response:
def accumulate_from_superclasses(cls, propname):
''' Traverse the class hierarchy and accumulate the special sets of names
``MetaHasProps`` stores on classes:
Args:
name (str) : name of the special attribute to collect.
Typically meaningful values are: ``__container_props__``,
``__properties__``, ``__properties_with_refs__``
'''
cachename = "__cached_all" + propname
# we MUST use cls.__dict__ NOT hasattr(). hasattr() would also look at base
# classes, and the cache must be separate for each class
if cachename not in cls.__dict__:
s = set()
for c in inspect.getmro(cls):
if issubclass(c, HasProps) and hasattr(c, propname):
base = getattr(c, propname)
s.update(base)
setattr(cls, cachename, s)
return cls.__dict__[cachename] |
def sort_flavor_list(request, flavors, with_menu_label=True):
"""Utility method to sort a list of flavors.
By default, returns the available flavors, sorted by RAM usage (ascending).
Override these behaviours with a ``CREATE_INSTANCE_FLAVOR_SORT`` dict
in ``local_settings.py``.
"""
def get_key(flavor, sort_key):
try:
return getattr(flavor, sort_key)
except AttributeError:
LOG.warning('Could not find sort key "%s". Using the default '
'"ram" instead.', sort_key)
return getattr(flavor, 'ram')
try:
flavor_sort = getattr(settings, 'CREATE_INSTANCE_FLAVOR_SORT', {})
sort_key = flavor_sort.get('key', 'ram')
rev = flavor_sort.get('reverse', False)
if not callable(sort_key):
def key(flavor):
return get_key(flavor, sort_key)
else:
key = sort_key
if with_menu_label:
flavor_list = [(flavor.id, '%s' % flavor.name)
for flavor in sorted(flavors, key=key, reverse=rev)]
else:
flavor_list = sorted(flavors, key=key, reverse=rev)
return flavor_list
except Exception:
exceptions.handle(request,
_('Unable to sort instance flavors.'))
return [] | Utility method to sort a list of flavors.
By default, returns the available flavors, sorted by RAM usage (ascending).
Override these behaviours with a ``CREATE_INSTANCE_FLAVOR_SORT`` dict
in ``local_settings.py``. | Below is the the instruction that describes the task:
### Input:
Utility method to sort a list of flavors.
By default, returns the available flavors, sorted by RAM usage (ascending).
Override these behaviours with a ``CREATE_INSTANCE_FLAVOR_SORT`` dict
in ``local_settings.py``.
### Response:
def sort_flavor_list(request, flavors, with_menu_label=True):
"""Utility method to sort a list of flavors.
By default, returns the available flavors, sorted by RAM usage (ascending).
Override these behaviours with a ``CREATE_INSTANCE_FLAVOR_SORT`` dict
in ``local_settings.py``.
"""
def get_key(flavor, sort_key):
try:
return getattr(flavor, sort_key)
except AttributeError:
LOG.warning('Could not find sort key "%s". Using the default '
'"ram" instead.', sort_key)
return getattr(flavor, 'ram')
try:
flavor_sort = getattr(settings, 'CREATE_INSTANCE_FLAVOR_SORT', {})
sort_key = flavor_sort.get('key', 'ram')
rev = flavor_sort.get('reverse', False)
if not callable(sort_key):
def key(flavor):
return get_key(flavor, sort_key)
else:
key = sort_key
if with_menu_label:
flavor_list = [(flavor.id, '%s' % flavor.name)
for flavor in sorted(flavors, key=key, reverse=rev)]
else:
flavor_list = sorted(flavors, key=key, reverse=rev)
return flavor_list
except Exception:
exceptions.handle(request,
_('Unable to sort instance flavors.'))
return [] |
def recalculate(self):
"""
Recalcualtes the slider scene for this widget.
"""
# recalculate the scene geometry
scene = self.scene()
w = self.calculateSceneWidth()
scene.setSceneRect(0, 0, w, self.height())
# recalculate the item layout
spacing = self.spacing()
x = self.width() / 4.0
y = self.height() / 2.0
for item in self.items():
pmap = item.pixmap()
item.setPos(x, y - pmap.height() / 1.5)
x += pmap.size().width() + spacing | Recalcualtes the slider scene for this widget. | Below is the the instruction that describes the task:
### Input:
Recalcualtes the slider scene for this widget.
### Response:
def recalculate(self):
"""
Recalcualtes the slider scene for this widget.
"""
# recalculate the scene geometry
scene = self.scene()
w = self.calculateSceneWidth()
scene.setSceneRect(0, 0, w, self.height())
# recalculate the item layout
spacing = self.spacing()
x = self.width() / 4.0
y = self.height() / 2.0
for item in self.items():
pmap = item.pixmap()
item.setPos(x, y - pmap.height() / 1.5)
x += pmap.size().width() + spacing |
def get_value(self):
"""
Evaluate self.expr to get the parameter's value
"""
if (self._value is None) and (self.expr is not None):
self._value = self.expr.get_value()
return self._value | Evaluate self.expr to get the parameter's value | Below is the the instruction that describes the task:
### Input:
Evaluate self.expr to get the parameter's value
### Response:
def get_value(self):
"""
Evaluate self.expr to get the parameter's value
"""
if (self._value is None) and (self.expr is not None):
self._value = self.expr.get_value()
return self._value |
def update_book(self, book_form):
"""Updates an existing book.
arg: book_form (osid.commenting.BookForm): the form
containing the elements to be updated
raise: IllegalState - ``book_form`` already used in an update
transaction
raise: InvalidArgument - the form contains an invalid value
raise: NullArgument - ``book_form`` is ``null``
raise: OperationFailed - unable to complete request
raise: PermissionDenied - authorization failure
raise: Unsupported - ``book_form`` did not originte from
``get_book_form_for_update()``
*compliance: mandatory -- This method must be implemented.*
"""
# Implemented from template for
# osid.resource.BinAdminSession.update_bin_template
if self._catalog_session is not None:
return self._catalog_session.update_catalog(catalog_form=book_form)
collection = JSONClientValidated('commenting',
collection='Book',
runtime=self._runtime)
if not isinstance(book_form, ABCBookForm):
raise errors.InvalidArgument('argument type is not an BookForm')
if not book_form.is_for_update():
raise errors.InvalidArgument('the BookForm is for update only, not create')
try:
if self._forms[book_form.get_id().get_identifier()] == UPDATED:
raise errors.IllegalState('book_form already used in an update transaction')
except KeyError:
raise errors.Unsupported('book_form did not originate from this session')
if not book_form.is_valid():
raise errors.InvalidArgument('one or more of the form elements is invalid')
collection.save(book_form._my_map) # save is deprecated - change to replace_one
self._forms[book_form.get_id().get_identifier()] = UPDATED
# Note: this is out of spec. The OSIDs don't require an object to be returned
return objects.Book(osid_object_map=book_form._my_map, runtime=self._runtime, proxy=self._proxy) | Updates an existing book.
arg: book_form (osid.commenting.BookForm): the form
containing the elements to be updated
raise: IllegalState - ``book_form`` already used in an update
transaction
raise: InvalidArgument - the form contains an invalid value
raise: NullArgument - ``book_form`` is ``null``
raise: OperationFailed - unable to complete request
raise: PermissionDenied - authorization failure
raise: Unsupported - ``book_form`` did not originte from
``get_book_form_for_update()``
*compliance: mandatory -- This method must be implemented.* | Below is the the instruction that describes the task:
### Input:
Updates an existing book.
arg: book_form (osid.commenting.BookForm): the form
containing the elements to be updated
raise: IllegalState - ``book_form`` already used in an update
transaction
raise: InvalidArgument - the form contains an invalid value
raise: NullArgument - ``book_form`` is ``null``
raise: OperationFailed - unable to complete request
raise: PermissionDenied - authorization failure
raise: Unsupported - ``book_form`` did not originte from
``get_book_form_for_update()``
*compliance: mandatory -- This method must be implemented.*
### Response:
def update_book(self, book_form):
"""Updates an existing book.
arg: book_form (osid.commenting.BookForm): the form
containing the elements to be updated
raise: IllegalState - ``book_form`` already used in an update
transaction
raise: InvalidArgument - the form contains an invalid value
raise: NullArgument - ``book_form`` is ``null``
raise: OperationFailed - unable to complete request
raise: PermissionDenied - authorization failure
raise: Unsupported - ``book_form`` did not originte from
``get_book_form_for_update()``
*compliance: mandatory -- This method must be implemented.*
"""
# Implemented from template for
# osid.resource.BinAdminSession.update_bin_template
if self._catalog_session is not None:
return self._catalog_session.update_catalog(catalog_form=book_form)
collection = JSONClientValidated('commenting',
collection='Book',
runtime=self._runtime)
if not isinstance(book_form, ABCBookForm):
raise errors.InvalidArgument('argument type is not an BookForm')
if not book_form.is_for_update():
raise errors.InvalidArgument('the BookForm is for update only, not create')
try:
if self._forms[book_form.get_id().get_identifier()] == UPDATED:
raise errors.IllegalState('book_form already used in an update transaction')
except KeyError:
raise errors.Unsupported('book_form did not originate from this session')
if not book_form.is_valid():
raise errors.InvalidArgument('one or more of the form elements is invalid')
collection.save(book_form._my_map) # save is deprecated - change to replace_one
self._forms[book_form.get_id().get_identifier()] = UPDATED
# Note: this is out of spec. The OSIDs don't require an object to be returned
return objects.Book(osid_object_map=book_form._my_map, runtime=self._runtime, proxy=self._proxy) |
def csv(self, client_id):
"""Download the query results as csv."""
logging.info('Exporting CSV file [{}]'.format(client_id))
query = (
db.session.query(Query)
.filter_by(client_id=client_id)
.one()
)
rejected_tables = security_manager.rejected_datasources(
query.sql, query.database, query.schema)
if rejected_tables:
flash(
security_manager.get_table_access_error_msg('{}'.format(rejected_tables)))
return redirect('/')
blob = None
if results_backend and query.results_key:
logging.info(
'Fetching CSV from results backend '
'[{}]'.format(query.results_key))
blob = results_backend.get(query.results_key)
if blob:
logging.info('Decompressing')
json_payload = utils.zlib_decompress_to_string(blob)
obj = json.loads(json_payload)
columns = [c['name'] for c in obj['columns']]
df = pd.DataFrame.from_records(obj['data'], columns=columns)
logging.info('Using pandas to convert to CSV')
csv = df.to_csv(index=False, **config.get('CSV_EXPORT'))
else:
logging.info('Running a query to turn into CSV')
sql = query.select_sql or query.executed_sql
df = query.database.get_df(sql, query.schema)
# TODO(bkyryliuk): add compression=gzip for big files.
csv = df.to_csv(index=False, **config.get('CSV_EXPORT'))
response = Response(csv, mimetype='text/csv')
response.headers['Content-Disposition'] = f'attachment; filename={query.name}.csv'
logging.info('Ready to return response')
return response | Download the query results as csv. | Below is the the instruction that describes the task:
### Input:
Download the query results as csv.
### Response:
def csv(self, client_id):
"""Download the query results as csv."""
logging.info('Exporting CSV file [{}]'.format(client_id))
query = (
db.session.query(Query)
.filter_by(client_id=client_id)
.one()
)
rejected_tables = security_manager.rejected_datasources(
query.sql, query.database, query.schema)
if rejected_tables:
flash(
security_manager.get_table_access_error_msg('{}'.format(rejected_tables)))
return redirect('/')
blob = None
if results_backend and query.results_key:
logging.info(
'Fetching CSV from results backend '
'[{}]'.format(query.results_key))
blob = results_backend.get(query.results_key)
if blob:
logging.info('Decompressing')
json_payload = utils.zlib_decompress_to_string(blob)
obj = json.loads(json_payload)
columns = [c['name'] for c in obj['columns']]
df = pd.DataFrame.from_records(obj['data'], columns=columns)
logging.info('Using pandas to convert to CSV')
csv = df.to_csv(index=False, **config.get('CSV_EXPORT'))
else:
logging.info('Running a query to turn into CSV')
sql = query.select_sql or query.executed_sql
df = query.database.get_df(sql, query.schema)
# TODO(bkyryliuk): add compression=gzip for big files.
csv = df.to_csv(index=False, **config.get('CSV_EXPORT'))
response = Response(csv, mimetype='text/csv')
response.headers['Content-Disposition'] = f'attachment; filename={query.name}.csv'
logging.info('Ready to return response')
return response |
def execute_once(self):
"""One step of execution."""
symbol = self.tape.get(self.head, self.EMPTY_SYMBOL)
index = self.alphabet.index(symbol)
rule = self.states[self.state][index]
if rule is None:
raise RuntimeError('Unexpected symbol: ' + symbol)
self.tape[self.head] = rule[0]
if rule[1] == 'L':
self.head -= 1
elif rule[1] == 'R':
self.head += 1
self.state = rule[2] | One step of execution. | Below is the the instruction that describes the task:
### Input:
One step of execution.
### Response:
def execute_once(self):
"""One step of execution."""
symbol = self.tape.get(self.head, self.EMPTY_SYMBOL)
index = self.alphabet.index(symbol)
rule = self.states[self.state][index]
if rule is None:
raise RuntimeError('Unexpected symbol: ' + symbol)
self.tape[self.head] = rule[0]
if rule[1] == 'L':
self.head -= 1
elif rule[1] == 'R':
self.head += 1
self.state = rule[2] |
def setProperty(self, full_path, protect, dummy = 7046):
"""Set property of a file.
:param full_path: The full path to get the file or directory property.
:param protect: 'Y' or 'N', 중요 표시
:return: ``True`` when success to set property or ``False``
"""
data = {'orgresource': full_path,
'protect': protect,
'userid': self.user_id,
'useridx': self.useridx,
'dummy': dummy,
}
s, metadata = self.POST('setProperty', data)
if s is True:
return True
else:
return False | Set property of a file.
:param full_path: The full path to get the file or directory property.
:param protect: 'Y' or 'N', 중요 표시
:return: ``True`` when success to set property or ``False`` | Below is the the instruction that describes the task:
### Input:
Set property of a file.
:param full_path: The full path to get the file or directory property.
:param protect: 'Y' or 'N', 중요 표시
:return: ``True`` when success to set property or ``False``
### Response:
def setProperty(self, full_path, protect, dummy = 7046):
"""Set property of a file.
:param full_path: The full path to get the file or directory property.
:param protect: 'Y' or 'N', 중요 표시
:return: ``True`` when success to set property or ``False``
"""
data = {'orgresource': full_path,
'protect': protect,
'userid': self.user_id,
'useridx': self.useridx,
'dummy': dummy,
}
s, metadata = self.POST('setProperty', data)
if s is True:
return True
else:
return False |
def head(self, item):
"""Makes a HEAD request on a specific item."""
uri = "/%s/%s" % (self.uri_base, utils.get_id(item))
return self._head(uri) | Makes a HEAD request on a specific item. | Below is the the instruction that describes the task:
### Input:
Makes a HEAD request on a specific item.
### Response:
def head(self, item):
"""Makes a HEAD request on a specific item."""
uri = "/%s/%s" % (self.uri_base, utils.get_id(item))
return self._head(uri) |
def utilisation(self):
"""Getter for various Utilisation variables"""
if self._utilisation is None:
api = "SYNO.Core.System.Utilization"
url = "%s/entry.cgi?api=%s&version=1&method=get" % (
self.base_url,
api)
self._utilisation = SynoUtilization(self._get_url(url))
return self._utilisation | Getter for various Utilisation variables | Below is the the instruction that describes the task:
### Input:
Getter for various Utilisation variables
### Response:
def utilisation(self):
"""Getter for various Utilisation variables"""
if self._utilisation is None:
api = "SYNO.Core.System.Utilization"
url = "%s/entry.cgi?api=%s&version=1&method=get" % (
self.base_url,
api)
self._utilisation = SynoUtilization(self._get_url(url))
return self._utilisation |
def U(self):
"""
Returns a distributed matrix whose columns are the left
singular vectors of the SingularValueDecomposition if computeU was set to be True.
"""
u = self.call("U")
if u is not None:
mat_name = u.getClass().getSimpleName()
if mat_name == "RowMatrix":
return RowMatrix(u)
elif mat_name == "IndexedRowMatrix":
return IndexedRowMatrix(u)
else:
raise TypeError("Expected RowMatrix/IndexedRowMatrix got %s" % mat_name) | Returns a distributed matrix whose columns are the left
singular vectors of the SingularValueDecomposition if computeU was set to be True. | Below is the the instruction that describes the task:
### Input:
Returns a distributed matrix whose columns are the left
singular vectors of the SingularValueDecomposition if computeU was set to be True.
### Response:
def U(self):
"""
Returns a distributed matrix whose columns are the left
singular vectors of the SingularValueDecomposition if computeU was set to be True.
"""
u = self.call("U")
if u is not None:
mat_name = u.getClass().getSimpleName()
if mat_name == "RowMatrix":
return RowMatrix(u)
elif mat_name == "IndexedRowMatrix":
return IndexedRowMatrix(u)
else:
raise TypeError("Expected RowMatrix/IndexedRowMatrix got %s" % mat_name) |
def set_api_key(config, key):
"""Configure a new API key."""
if not key and "X-Api-Key" in config.api_key:
del config.api_key["X-Api-Key"]
else:
config.api_key["X-Api-Key"] = key | Configure a new API key. | Below is the the instruction that describes the task:
### Input:
Configure a new API key.
### Response:
def set_api_key(config, key):
"""Configure a new API key."""
if not key and "X-Api-Key" in config.api_key:
del config.api_key["X-Api-Key"]
else:
config.api_key["X-Api-Key"] = key |
def _merge_single_runs(self, other_trajectory, used_runs):
""" Updates the `run_information` of the current trajectory."""
count = len(self) # Variable to count the increasing new run indices and create
# new run names
run_indices = range(len(other_trajectory))
run_name_dict = OrderedDict()
to_store_groups_with_annotations = []
for idx in run_indices:
# Iterate through all used runs and store annotated groups and mark results and
# derived parameters for merging
if idx in used_runs:
# Update the run information dict of the current trajectory
other_info_dict = other_trajectory.f_get_run_information(idx)
time_ = other_info_dict['time']
timestamp = other_info_dict['timestamp']
completed = other_info_dict['completed']
short_environment_hexsha = other_info_dict['short_environment_hexsha']
finish_timestamp = other_info_dict['finish_timestamp']
runtime = other_info_dict['runtime']
new_idx = used_runs[idx]
new_runname = self.f_wildcard('$', new_idx)
run_name_dict[idx] = new_runname
info_dict = dict(
idx=new_idx,
time=time_,
timestamp=timestamp,
completed=completed,
short_environment_hexsha=short_environment_hexsha,
finish_timestamp=finish_timestamp,
runtime=runtime)
self._add_run_info(**info_dict) | Updates the `run_information` of the current trajectory. | Below is the the instruction that describes the task:
### Input:
Updates the `run_information` of the current trajectory.
### Response:
def _merge_single_runs(self, other_trajectory, used_runs):
""" Updates the `run_information` of the current trajectory."""
count = len(self) # Variable to count the increasing new run indices and create
# new run names
run_indices = range(len(other_trajectory))
run_name_dict = OrderedDict()
to_store_groups_with_annotations = []
for idx in run_indices:
# Iterate through all used runs and store annotated groups and mark results and
# derived parameters for merging
if idx in used_runs:
# Update the run information dict of the current trajectory
other_info_dict = other_trajectory.f_get_run_information(idx)
time_ = other_info_dict['time']
timestamp = other_info_dict['timestamp']
completed = other_info_dict['completed']
short_environment_hexsha = other_info_dict['short_environment_hexsha']
finish_timestamp = other_info_dict['finish_timestamp']
runtime = other_info_dict['runtime']
new_idx = used_runs[idx]
new_runname = self.f_wildcard('$', new_idx)
run_name_dict[idx] = new_runname
info_dict = dict(
idx=new_idx,
time=time_,
timestamp=timestamp,
completed=completed,
short_environment_hexsha=short_environment_hexsha,
finish_timestamp=finish_timestamp,
runtime=runtime)
self._add_run_info(**info_dict) |
def analyze(self, chunkSize, *sinks):
""" Figure out the best diffs to use to reach all our required volumes. """
measureSize = False
if self.measureSize:
for sink in sinks:
if sink.isRemote:
measureSize = True
# Use destination (already uploaded) edges first
sinks = list(sinks)
sinks.reverse()
self.dest = sinks[0]
def currentSize():
return sum([
n.diffSize
for n in self.nodes.values()
if n.diff is not None and n.diff.sink != self.dest
])
while True:
self._analyzeDontMeasure(chunkSize, measureSize, *sinks)
if not measureSize:
return
estimatedSize = currentSize()
# logger.info("Measuring any estimated diffs")
for node in self.nodes.values():
edge = node.diff
if edge is not None and edge.sink != self.dest and edge.sizeIsEstimated:
edge.sink.measureSize(edge, chunkSize)
actualSize = currentSize()
logger.info(
"measured size (%s), estimated size (%s)",
humanize(actualSize), humanize(estimatedSize),
)
if actualSize <= 1.2 * estimatedSize:
return | Figure out the best diffs to use to reach all our required volumes. | Below is the the instruction that describes the task:
### Input:
Figure out the best diffs to use to reach all our required volumes.
### Response:
def analyze(self, chunkSize, *sinks):
""" Figure out the best diffs to use to reach all our required volumes. """
measureSize = False
if self.measureSize:
for sink in sinks:
if sink.isRemote:
measureSize = True
# Use destination (already uploaded) edges first
sinks = list(sinks)
sinks.reverse()
self.dest = sinks[0]
def currentSize():
return sum([
n.diffSize
for n in self.nodes.values()
if n.diff is not None and n.diff.sink != self.dest
])
while True:
self._analyzeDontMeasure(chunkSize, measureSize, *sinks)
if not measureSize:
return
estimatedSize = currentSize()
# logger.info("Measuring any estimated diffs")
for node in self.nodes.values():
edge = node.diff
if edge is not None and edge.sink != self.dest and edge.sizeIsEstimated:
edge.sink.measureSize(edge, chunkSize)
actualSize = currentSize()
logger.info(
"measured size (%s), estimated size (%s)",
humanize(actualSize), humanize(estimatedSize),
)
if actualSize <= 1.2 * estimatedSize:
return |
def importdb(indir):
"""Import a previously exported anchore DB"""
ecode = 0
try:
imgdir = os.path.join(indir, "images")
feeddir = os.path.join(indir, "feeds")
storedir = os.path.join(indir, "storedfiles")
for d in [indir, imgdir, feeddir, storedir]:
if not os.path.exists(d):
raise Exception ("specified directory "+str(indir)+" does not appear to be complete (missing "+str(d)+")")
anchore_print("importing images...")
#imagelist = []
for ifile in os.listdir(imgdir):
patt = re.match("(.*)\.json", ifile)
if patt:
imageId = patt.group(1)
if contexts['anchore_db'].is_image_present(imageId):
anchore_print("\timage ("+str(imageId)+") already exists in DB, skipping import.")
else:
#imagelist.append(patt.group(1))
thefile = os.path.join(imgdir, ifile)
with open(thefile, 'r') as FH:
imagedata = json.loads(FH.read())
try:
rc = contexts['anchore_db'].save_image_new(imageId, report=imagedata)
if not rc:
contexts['anchore_db'].delete_image(imageId)
raise Exception("save to anchore DB failed")
except Exception as err:
contexts['anchore_db'].delete_image(imageId)
raise err
thedir = os.path.join(storedir, imageId)
if os.path.exists(thedir):
for namespace in os.listdir(thedir):
thefile = os.path.join(thedir, namespace, "stored_files.tar.gz")
if os.path.exists(thefile):
contexts['anchore_db'].save_files_tarfile(imageId, namespace, thefile)
anchore_print("\timage ("+str(imageId)+") imported.")
anchore_print("importing feeds...")
thefile = os.path.join(feeddir, "feedmeta.json")
with open(thefile, 'r') as FH:
feedmeta = json.loads(FH.read())
if feedmeta:
contexts['anchore_db'].save_feedmeta(feedmeta)
for feed in feedmeta:
feedobj = feedmeta[feed]
for group in feedobj['groups']:
groupobj = feedobj['groups'][group]
datafiles = groupobj.pop('datafiles', [])
for datafile in datafiles:
thedir = os.path.join(feeddir, feed, group)
thefile = os.path.join(thedir, datafile)
if not os.path.exists(thefile):
pass
else:
with open(thefile, 'r') as FH:
contexts['anchore_db'].save_feed_group_data(feed, group, datafile, json.loads(FH.read()))
anchore_print("\tfeed ("+feed+" " + group + " " + datafile + ") imported")
#TODO import stored files
except Exception as err:
anchore_print_err("operation failed: " + str(err))
ecode = 1
sys.exit(ecode) | Import a previously exported anchore DB | Below is the the instruction that describes the task:
### Input:
Import a previously exported anchore DB
### Response:
def importdb(indir):
"""Import a previously exported anchore DB"""
ecode = 0
try:
imgdir = os.path.join(indir, "images")
feeddir = os.path.join(indir, "feeds")
storedir = os.path.join(indir, "storedfiles")
for d in [indir, imgdir, feeddir, storedir]:
if not os.path.exists(d):
raise Exception ("specified directory "+str(indir)+" does not appear to be complete (missing "+str(d)+")")
anchore_print("importing images...")
#imagelist = []
for ifile in os.listdir(imgdir):
patt = re.match("(.*)\.json", ifile)
if patt:
imageId = patt.group(1)
if contexts['anchore_db'].is_image_present(imageId):
anchore_print("\timage ("+str(imageId)+") already exists in DB, skipping import.")
else:
#imagelist.append(patt.group(1))
thefile = os.path.join(imgdir, ifile)
with open(thefile, 'r') as FH:
imagedata = json.loads(FH.read())
try:
rc = contexts['anchore_db'].save_image_new(imageId, report=imagedata)
if not rc:
contexts['anchore_db'].delete_image(imageId)
raise Exception("save to anchore DB failed")
except Exception as err:
contexts['anchore_db'].delete_image(imageId)
raise err
thedir = os.path.join(storedir, imageId)
if os.path.exists(thedir):
for namespace in os.listdir(thedir):
thefile = os.path.join(thedir, namespace, "stored_files.tar.gz")
if os.path.exists(thefile):
contexts['anchore_db'].save_files_tarfile(imageId, namespace, thefile)
anchore_print("\timage ("+str(imageId)+") imported.")
anchore_print("importing feeds...")
thefile = os.path.join(feeddir, "feedmeta.json")
with open(thefile, 'r') as FH:
feedmeta = json.loads(FH.read())
if feedmeta:
contexts['anchore_db'].save_feedmeta(feedmeta)
for feed in feedmeta:
feedobj = feedmeta[feed]
for group in feedobj['groups']:
groupobj = feedobj['groups'][group]
datafiles = groupobj.pop('datafiles', [])
for datafile in datafiles:
thedir = os.path.join(feeddir, feed, group)
thefile = os.path.join(thedir, datafile)
if not os.path.exists(thefile):
pass
else:
with open(thefile, 'r') as FH:
contexts['anchore_db'].save_feed_group_data(feed, group, datafile, json.loads(FH.read()))
anchore_print("\tfeed ("+feed+" " + group + " " + datafile + ") imported")
#TODO import stored files
except Exception as err:
anchore_print_err("operation failed: " + str(err))
ecode = 1
sys.exit(ecode) |
def consume_service(service_agreement_id, service_endpoint, account, files,
destination_folder, index=None):
"""
Call the brizo endpoint to get access to the different files that form the asset.
:param service_agreement_id: Service Agreement Id, str
:param service_endpoint: Url to consume, str
:param account: Account instance of the consumer signing this agreement, hex-str
:param files: List containing the files to be consumed, list
:param index: Index of the document that is going to be downloaded, int
:param destination_folder: Path, str
:return: True if was downloaded, bool
"""
signature = Keeper.get_instance().sign_hash(service_agreement_id, account)
if index is not None:
assert isinstance(index, int), logger.error('index has to be an integer.')
assert index >= 0, logger.error('index has to be 0 or a positive integer.')
assert index < len(files), logger.error(
'index can not be bigger than the number of files')
consume_url = Brizo._create_consume_url(service_endpoint, service_agreement_id, account,
None, signature, index)
logger.info(f'invoke consume endpoint with this url: {consume_url}')
response = Brizo._http_client.get(consume_url, stream=True)
file_name = Brizo._get_file_name(response)
Brizo.write_file(response, destination_folder, file_name)
else:
for i, _file in enumerate(files):
consume_url = Brizo._create_consume_url(service_endpoint, service_agreement_id,
account, _file,
signature, i)
logger.info(f'invoke consume endpoint with this url: {consume_url}')
response = Brizo._http_client.get(consume_url, stream=True)
file_name = Brizo._get_file_name(response)
Brizo.write_file(response, destination_folder, file_name) | Call the brizo endpoint to get access to the different files that form the asset.
:param service_agreement_id: Service Agreement Id, str
:param service_endpoint: Url to consume, str
:param account: Account instance of the consumer signing this agreement, hex-str
:param files: List containing the files to be consumed, list
:param index: Index of the document that is going to be downloaded, int
:param destination_folder: Path, str
:return: True if was downloaded, bool | Below is the the instruction that describes the task:
### Input:
Call the brizo endpoint to get access to the different files that form the asset.
:param service_agreement_id: Service Agreement Id, str
:param service_endpoint: Url to consume, str
:param account: Account instance of the consumer signing this agreement, hex-str
:param files: List containing the files to be consumed, list
:param index: Index of the document that is going to be downloaded, int
:param destination_folder: Path, str
:return: True if was downloaded, bool
### Response:
def consume_service(service_agreement_id, service_endpoint, account, files,
destination_folder, index=None):
"""
Call the brizo endpoint to get access to the different files that form the asset.
:param service_agreement_id: Service Agreement Id, str
:param service_endpoint: Url to consume, str
:param account: Account instance of the consumer signing this agreement, hex-str
:param files: List containing the files to be consumed, list
:param index: Index of the document that is going to be downloaded, int
:param destination_folder: Path, str
:return: True if was downloaded, bool
"""
signature = Keeper.get_instance().sign_hash(service_agreement_id, account)
if index is not None:
assert isinstance(index, int), logger.error('index has to be an integer.')
assert index >= 0, logger.error('index has to be 0 or a positive integer.')
assert index < len(files), logger.error(
'index can not be bigger than the number of files')
consume_url = Brizo._create_consume_url(service_endpoint, service_agreement_id, account,
None, signature, index)
logger.info(f'invoke consume endpoint with this url: {consume_url}')
response = Brizo._http_client.get(consume_url, stream=True)
file_name = Brizo._get_file_name(response)
Brizo.write_file(response, destination_folder, file_name)
else:
for i, _file in enumerate(files):
consume_url = Brizo._create_consume_url(service_endpoint, service_agreement_id,
account, _file,
signature, i)
logger.info(f'invoke consume endpoint with this url: {consume_url}')
response = Brizo._http_client.get(consume_url, stream=True)
file_name = Brizo._get_file_name(response)
Brizo.write_file(response, destination_folder, file_name) |
def preserve_shape(func):
"""Preserve shape of the image."""
@wraps(func)
def wrapped_function(img, *args, **kwargs):
shape = img.shape
result = func(img, *args, **kwargs)
result = result.reshape(shape)
return result
return wrapped_function | Preserve shape of the image. | Below is the the instruction that describes the task:
### Input:
Preserve shape of the image.
### Response:
def preserve_shape(func):
"""Preserve shape of the image."""
@wraps(func)
def wrapped_function(img, *args, **kwargs):
shape = img.shape
result = func(img, *args, **kwargs)
result = result.reshape(shape)
return result
return wrapped_function |
def evaluate(self, agentml, user=None):
"""
Evaluate the conditional statement and return its contents if a successful evaluation takes place
:param user: The active user object
:type user: agentml.User or None
:param agentml: The active AgentML instance
:type agentml: AgentML
:return: Condition contents if the condition evaluates successfully, otherwise False
:rtype : tuple or bool
"""
self._log.debug('Evaluating conditional statement: {statement}'
.format(statement=' '.join(filter(None, [self.type, self.name, self.operator, self.value]))))
# Get the value of our key type
if self.type not in agentml.conditions:
self._log.error('Unknown condition type, "{type}", unable to evaluate condition statement'
.format(type=self.type))
return
key_value = agentml.conditions[self.type].get(agentml, user, self.name)
# Atomic comparisons
if self.operator is None and key_value:
return self.contents
if (self.operator == self.IS) and (key_value == self.value):
return self.contents
if (self.operator == self.IS_NOT) and (key_value != self.value):
return self.contents
# All remaining self.operators are numeric based, so key_value must contain a valid integer or float
try:
key_value = float(key_value)
value = float(self.value)
except (ValueError, TypeError):
return False
# Numeric comparisons
if (self.operator == self.GREATER_THAN) and (key_value > value):
return self.contents
if (self.operator == self.GREATER_THAN_OR_EQUAL) and (key_value >= value):
return self.contents
if (self.operator == self.LESS_THAN) and (key_value < value):
return self.contents
if (self.operator == self.LESS_THAN_OR_EQUAL) and (key_value <= value):
return self.contents
return False | Evaluate the conditional statement and return its contents if a successful evaluation takes place
:param user: The active user object
:type user: agentml.User or None
:param agentml: The active AgentML instance
:type agentml: AgentML
:return: Condition contents if the condition evaluates successfully, otherwise False
:rtype : tuple or bool | Below is the the instruction that describes the task:
### Input:
Evaluate the conditional statement and return its contents if a successful evaluation takes place
:param user: The active user object
:type user: agentml.User or None
:param agentml: The active AgentML instance
:type agentml: AgentML
:return: Condition contents if the condition evaluates successfully, otherwise False
:rtype : tuple or bool
### Response:
def evaluate(self, agentml, user=None):
"""
Evaluate the conditional statement and return its contents if a successful evaluation takes place
:param user: The active user object
:type user: agentml.User or None
:param agentml: The active AgentML instance
:type agentml: AgentML
:return: Condition contents if the condition evaluates successfully, otherwise False
:rtype : tuple or bool
"""
self._log.debug('Evaluating conditional statement: {statement}'
.format(statement=' '.join(filter(None, [self.type, self.name, self.operator, self.value]))))
# Get the value of our key type
if self.type not in agentml.conditions:
self._log.error('Unknown condition type, "{type}", unable to evaluate condition statement'
.format(type=self.type))
return
key_value = agentml.conditions[self.type].get(agentml, user, self.name)
# Atomic comparisons
if self.operator is None and key_value:
return self.contents
if (self.operator == self.IS) and (key_value == self.value):
return self.contents
if (self.operator == self.IS_NOT) and (key_value != self.value):
return self.contents
# All remaining self.operators are numeric based, so key_value must contain a valid integer or float
try:
key_value = float(key_value)
value = float(self.value)
except (ValueError, TypeError):
return False
# Numeric comparisons
if (self.operator == self.GREATER_THAN) and (key_value > value):
return self.contents
if (self.operator == self.GREATER_THAN_OR_EQUAL) and (key_value >= value):
return self.contents
if (self.operator == self.LESS_THAN) and (key_value < value):
return self.contents
if (self.operator == self.LESS_THAN_OR_EQUAL) and (key_value <= value):
return self.contents
return False |
def add_flags(self, *flags):
"""Adds one or more flags to the query.
For example:
current-patch-set -> --current-patch-set
"""
if not isinstance(flags, (list, tuple)):
flags = [str(flags)]
self.extend(["--%s" % f for f in flags])
return self | Adds one or more flags to the query.
For example:
current-patch-set -> --current-patch-set | Below is the the instruction that describes the task:
### Input:
Adds one or more flags to the query.
For example:
current-patch-set -> --current-patch-set
### Response:
def add_flags(self, *flags):
"""Adds one or more flags to the query.
For example:
current-patch-set -> --current-patch-set
"""
if not isinstance(flags, (list, tuple)):
flags = [str(flags)]
self.extend(["--%s" % f for f in flags])
return self |
def visit_ListComp(self, node: ast.ListComp) -> None:
"""Represent the list comprehension by dumping its source code."""
if node in self._recomputed_values:
value = self._recomputed_values[node]
text = self._atok.get_text(node)
self.reprs[text] = value
self.generic_visit(node=node) | Represent the list comprehension by dumping its source code. | Below is the the instruction that describes the task:
### Input:
Represent the list comprehension by dumping its source code.
### Response:
def visit_ListComp(self, node: ast.ListComp) -> None:
"""Represent the list comprehension by dumping its source code."""
if node in self._recomputed_values:
value = self._recomputed_values[node]
text = self._atok.get_text(node)
self.reprs[text] = value
self.generic_visit(node=node) |
def debug(self, request, message, extra_tags='', fail_silently=False):
"""Add a message with the ``DEBUG`` level."""
add(self.target_name, request, constants.DEBUG, message, extra_tags=extra_tags,
fail_silently=fail_silently) | Add a message with the ``DEBUG`` level. | Below is the the instruction that describes the task:
### Input:
Add a message with the ``DEBUG`` level.
### Response:
def debug(self, request, message, extra_tags='', fail_silently=False):
"""Add a message with the ``DEBUG`` level."""
add(self.target_name, request, constants.DEBUG, message, extra_tags=extra_tags,
fail_silently=fail_silently) |
def add_size_info (self):
"""Set size of URL content (if any)..
Should be overridden in subclasses."""
maxbytes = self.aggregate.config["maxfilesizedownload"]
if self.size > maxbytes:
self.add_warning(
_("Content size %(size)s is larger than %(maxbytes)s.") %
dict(size=strformat.strsize(self.size),
maxbytes=strformat.strsize(maxbytes)),
tag=WARN_URL_CONTENT_SIZE_TOO_LARGE) | Set size of URL content (if any)..
Should be overridden in subclasses. | Below is the the instruction that describes the task:
### Input:
Set size of URL content (if any)..
Should be overridden in subclasses.
### Response:
def add_size_info (self):
"""Set size of URL content (if any)..
Should be overridden in subclasses."""
maxbytes = self.aggregate.config["maxfilesizedownload"]
if self.size > maxbytes:
self.add_warning(
_("Content size %(size)s is larger than %(maxbytes)s.") %
dict(size=strformat.strsize(self.size),
maxbytes=strformat.strsize(maxbytes)),
tag=WARN_URL_CONTENT_SIZE_TOO_LARGE) |
def strings(self, minSize = 4, maxSize = 1024):
"""
Extract ASCII strings from the process memory.
@type minSize: int
@param minSize: (Optional) Minimum size of the strings to search for.
@type maxSize: int
@param maxSize: (Optional) Maximum size of the strings to search for.
@rtype: iterator of tuple(int, int, str)
@return: Iterator of strings extracted from the process memory.
Each tuple contains the following:
- The memory address where the string was found.
- The size of the string.
- The string.
"""
return Search.extract_ascii_strings(self, minSize = minSize,
maxSize = maxSize) | Extract ASCII strings from the process memory.
@type minSize: int
@param minSize: (Optional) Minimum size of the strings to search for.
@type maxSize: int
@param maxSize: (Optional) Maximum size of the strings to search for.
@rtype: iterator of tuple(int, int, str)
@return: Iterator of strings extracted from the process memory.
Each tuple contains the following:
- The memory address where the string was found.
- The size of the string.
- The string. | Below is the the instruction that describes the task:
### Input:
Extract ASCII strings from the process memory.
@type minSize: int
@param minSize: (Optional) Minimum size of the strings to search for.
@type maxSize: int
@param maxSize: (Optional) Maximum size of the strings to search for.
@rtype: iterator of tuple(int, int, str)
@return: Iterator of strings extracted from the process memory.
Each tuple contains the following:
- The memory address where the string was found.
- The size of the string.
- The string.
### Response:
def strings(self, minSize = 4, maxSize = 1024):
"""
Extract ASCII strings from the process memory.
@type minSize: int
@param minSize: (Optional) Minimum size of the strings to search for.
@type maxSize: int
@param maxSize: (Optional) Maximum size of the strings to search for.
@rtype: iterator of tuple(int, int, str)
@return: Iterator of strings extracted from the process memory.
Each tuple contains the following:
- The memory address where the string was found.
- The size of the string.
- The string.
"""
return Search.extract_ascii_strings(self, minSize = minSize,
maxSize = maxSize) |
def compile_settings(model_path, file_path, ignore_errors=False):
''' a method to compile configuration values from different sources
NOTE: method searches the environment variables, a local
configuration path and the default values for a jsonmodel
object for valid configuration values. if an environmental
variable or key inside a local config file matches the key
for a configuration setting declared in the jsonmodel schema,
its value will be added to the configuration file as long
as the value is model valid. SEE jsonmodel module.
NOTE: the order of assignment:
first: environment variable
second: configuration file
third: default value
fourth: empty value
NOTE: method is guaranteed to produce a full set of top-level keys
:param model_path: string with path to jsonmodel valid model data
:param file_path: string with path to local configuration file
:param ignore_errors: [optional] boolean to ignore any invalid values
:return: dictionary with settings
'''
# construct configuration model and default details
from jsonmodel.validators import jsonModel
config_model = jsonModel(load_settings(model_path))
default_details = config_model.ingest(**{})
# retrieve environmental variables and file details
environ_details = ingest_environ()
try:
file_details = load_settings(file_path)
except:
file_details = {}
# construct config details from (first) envvar, (second) file, (third) default
config_details = {}
for key in default_details.keys():
test_file = True
test_default = True
if key.upper() in environ_details.keys():
test_file = False
test_default = False
try:
config_details[key] = config_model.validate(environ_details[key.upper()], '.%s' % key)
except:
if ignore_errors:
test_file = True
test_default = True
else:
raise
if key in file_details.keys() and test_file:
test_default = False
try:
config_details[key] = config_model.validate(file_details[key], '.%s' % key)
except:
if ignore_errors:
test_default = True
else:
raise
if test_default:
config_details[key] = default_details[key]
return config_details | a method to compile configuration values from different sources
NOTE: method searches the environment variables, a local
configuration path and the default values for a jsonmodel
object for valid configuration values. if an environmental
variable or key inside a local config file matches the key
for a configuration setting declared in the jsonmodel schema,
its value will be added to the configuration file as long
as the value is model valid. SEE jsonmodel module.
NOTE: the order of assignment:
first: environment variable
second: configuration file
third: default value
fourth: empty value
NOTE: method is guaranteed to produce a full set of top-level keys
:param model_path: string with path to jsonmodel valid model data
:param file_path: string with path to local configuration file
:param ignore_errors: [optional] boolean to ignore any invalid values
:return: dictionary with settings | Below is the the instruction that describes the task:
### Input:
a method to compile configuration values from different sources
NOTE: method searches the environment variables, a local
configuration path and the default values for a jsonmodel
object for valid configuration values. if an environmental
variable or key inside a local config file matches the key
for a configuration setting declared in the jsonmodel schema,
its value will be added to the configuration file as long
as the value is model valid. SEE jsonmodel module.
NOTE: the order of assignment:
first: environment variable
second: configuration file
third: default value
fourth: empty value
NOTE: method is guaranteed to produce a full set of top-level keys
:param model_path: string with path to jsonmodel valid model data
:param file_path: string with path to local configuration file
:param ignore_errors: [optional] boolean to ignore any invalid values
:return: dictionary with settings
### Response:
def compile_settings(model_path, file_path, ignore_errors=False):
''' a method to compile configuration values from different sources
NOTE: method searches the environment variables, a local
configuration path and the default values for a jsonmodel
object for valid configuration values. if an environmental
variable or key inside a local config file matches the key
for a configuration setting declared in the jsonmodel schema,
its value will be added to the configuration file as long
as the value is model valid. SEE jsonmodel module.
NOTE: the order of assignment:
first: environment variable
second: configuration file
third: default value
fourth: empty value
NOTE: method is guaranteed to produce a full set of top-level keys
:param model_path: string with path to jsonmodel valid model data
:param file_path: string with path to local configuration file
:param ignore_errors: [optional] boolean to ignore any invalid values
:return: dictionary with settings
'''
# construct configuration model and default details
from jsonmodel.validators import jsonModel
config_model = jsonModel(load_settings(model_path))
default_details = config_model.ingest(**{})
# retrieve environmental variables and file details
environ_details = ingest_environ()
try:
file_details = load_settings(file_path)
except:
file_details = {}
# construct config details from (first) envvar, (second) file, (third) default
config_details = {}
for key in default_details.keys():
test_file = True
test_default = True
if key.upper() in environ_details.keys():
test_file = False
test_default = False
try:
config_details[key] = config_model.validate(environ_details[key.upper()], '.%s' % key)
except:
if ignore_errors:
test_file = True
test_default = True
else:
raise
if key in file_details.keys() and test_file:
test_default = False
try:
config_details[key] = config_model.validate(file_details[key], '.%s' % key)
except:
if ignore_errors:
test_default = True
else:
raise
if test_default:
config_details[key] = default_details[key]
return config_details |
def compact_references(basis_dict, ref_data):
"""
Creates a mapping of elements to reference keys
A list is returned, with each element of the list being a dictionary
with entries 'reference_info' containing data for (possibly) multiple references,
and 'elements' which is a list of element Z numbers
that those references apply to
Parameters
----------
basis_dict : dict
Dictionary containing basis set information
ref_data : dict
Dictionary containing all reference information
"""
element_refs = []
# Create a mapping of elements -> reference information
# (sort by Z first, keeping in mind Z is a string)
sorted_el = sorted(basis_dict['elements'].items(), key=lambda x: int(x[0]))
for el, eldata in sorted_el:
# elref is a list of dict
# dict is { 'reference_description': str, 'reference_keys': [keys] }
elref = eldata['references']
for x in element_refs:
if x['reference_info'] == elref:
x['elements'].append(el)
break
else:
element_refs.append({'reference_info': elref, 'elements': [el]})
for item in element_refs:
# Loop over a list of dictionaries for this group of elements and add the
# actual reference data
# Since we store the keys with the data, we don't need it anymore
for elref in item['reference_info']:
elref['reference_data'] = [(k, ref_data[k]) for k in elref['reference_keys']]
elref.pop('reference_keys')
return element_refs | Creates a mapping of elements to reference keys
A list is returned, with each element of the list being a dictionary
with entries 'reference_info' containing data for (possibly) multiple references,
and 'elements' which is a list of element Z numbers
that those references apply to
Parameters
----------
basis_dict : dict
Dictionary containing basis set information
ref_data : dict
Dictionary containing all reference information | Below is the the instruction that describes the task:
### Input:
Creates a mapping of elements to reference keys
A list is returned, with each element of the list being a dictionary
with entries 'reference_info' containing data for (possibly) multiple references,
and 'elements' which is a list of element Z numbers
that those references apply to
Parameters
----------
basis_dict : dict
Dictionary containing basis set information
ref_data : dict
Dictionary containing all reference information
### Response:
def compact_references(basis_dict, ref_data):
"""
Creates a mapping of elements to reference keys
A list is returned, with each element of the list being a dictionary
with entries 'reference_info' containing data for (possibly) multiple references,
and 'elements' which is a list of element Z numbers
that those references apply to
Parameters
----------
basis_dict : dict
Dictionary containing basis set information
ref_data : dict
Dictionary containing all reference information
"""
element_refs = []
# Create a mapping of elements -> reference information
# (sort by Z first, keeping in mind Z is a string)
sorted_el = sorted(basis_dict['elements'].items(), key=lambda x: int(x[0]))
for el, eldata in sorted_el:
# elref is a list of dict
# dict is { 'reference_description': str, 'reference_keys': [keys] }
elref = eldata['references']
for x in element_refs:
if x['reference_info'] == elref:
x['elements'].append(el)
break
else:
element_refs.append({'reference_info': elref, 'elements': [el]})
for item in element_refs:
# Loop over a list of dictionaries for this group of elements and add the
# actual reference data
# Since we store the keys with the data, we don't need it anymore
for elref in item['reference_info']:
elref['reference_data'] = [(k, ref_data[k]) for k in elref['reference_keys']]
elref.pop('reference_keys')
return element_refs |
def add_toolkit(topology, location):
"""Add an SPL toolkit to a topology.
Args:
topology(Topology): Topology to include toolkit in.
location(str): Location of the toolkit directory.
"""
import streamsx.topology.topology
assert isinstance(topology, streamsx.topology.topology.Topology)
tkinfo = dict()
tkinfo['root'] = os.path.abspath(location)
topology.graph._spl_toolkits.append(tkinfo) | Add an SPL toolkit to a topology.
Args:
topology(Topology): Topology to include toolkit in.
location(str): Location of the toolkit directory. | Below is the the instruction that describes the task:
### Input:
Add an SPL toolkit to a topology.
Args:
topology(Topology): Topology to include toolkit in.
location(str): Location of the toolkit directory.
### Response:
def add_toolkit(topology, location):
"""Add an SPL toolkit to a topology.
Args:
topology(Topology): Topology to include toolkit in.
location(str): Location of the toolkit directory.
"""
import streamsx.topology.topology
assert isinstance(topology, streamsx.topology.topology.Topology)
tkinfo = dict()
tkinfo['root'] = os.path.abspath(location)
topology.graph._spl_toolkits.append(tkinfo) |
def remove_old(self, max_log_time):
"""Remove all logs which are older than the specified time."""
files = glob.glob('{}/queue-*'.format(self.log_dir))
files = list(map(lambda x: os.path.basename(x), files))
for log_file in files:
# Get time stamp from filename
name = os.path.splitext(log_file)[0]
timestamp = name.split('-', maxsplit=1)[1]
# Get datetime from time stamp
time = datetime.strptime(timestamp, '%Y%m%d-%H%M')
now = datetime.now()
# Get total delta in seconds
delta = now - time
seconds = delta.total_seconds()
# Delete log file, if the delta is bigger than the specified log time
if seconds > int(max_log_time):
log_filePath = os.path.join(self.log_dir, log_file)
os.remove(log_filePath) | Remove all logs which are older than the specified time. | Below is the the instruction that describes the task:
### Input:
Remove all logs which are older than the specified time.
### Response:
def remove_old(self, max_log_time):
"""Remove all logs which are older than the specified time."""
files = glob.glob('{}/queue-*'.format(self.log_dir))
files = list(map(lambda x: os.path.basename(x), files))
for log_file in files:
# Get time stamp from filename
name = os.path.splitext(log_file)[0]
timestamp = name.split('-', maxsplit=1)[1]
# Get datetime from time stamp
time = datetime.strptime(timestamp, '%Y%m%d-%H%M')
now = datetime.now()
# Get total delta in seconds
delta = now - time
seconds = delta.total_seconds()
# Delete log file, if the delta is bigger than the specified log time
if seconds > int(max_log_time):
log_filePath = os.path.join(self.log_dir, log_file)
os.remove(log_filePath) |
def render(self, form_id=None):
'''
动态输出html内容
'''
page_bar = self.page_bars.get(int(self.page / 10))
if page_bar is None:
return ''
_htmls = []
if form_id:
_htmls.append(u'''<script>
function goto_page(form_id,page){
var form=document.getElementById(form_id);
var page_input = document.createElement("input");
page_input.type="hidden";
page_input.name="page";
page_input.value=page;
form.appendChild(page_input);
form.submit();
}</script>''')
_htmls.append('<ul class="pagination pull-right">')
_htmls.append(u'\t<li class="disabled"><a href="#">查询记录数 %s</a></li>' % self.total)
current_start = self.page
if current_start == 1:
_htmls.append(u'\t<li class="disabled"><a href="#">首页</a></li>')
_htmls.append(u'\t<li class="disabled"><a href="#">← 上一页</a></li>')
else:
_htmls.append(u'\t<li><a href="%s">首页</a></li>' % self.url_func(1,form_id))
_htmls.append(u'\t<li><a href="%s">← 上一页</a></li>' % self.url_func(current_start - 1,form_id))
for page in page_bar:
_page_url = self.url_func(page,form_id)
if page == self.page:
_htmls.append(u'\t<li class="active"><span>%s <span class="sr-only">{current}</span></span></li>' % page)
else:
_htmls.append(u'\t<li><a href="%s">%s</a></li>' % (_page_url, page))
current_end = self.page
if current_end == self.page_num:
_htmls.append(u'\t<li class="disabled"><a href="#">下一页 →</a></li>')
_htmls.append(u'\t<li class="disabled"><a href="#">尾页</a></li>')
else:
_htmls.append(u'\t<li><a href="%s">下一页 →</a></li>' % self.url_func(current_end + 1,form_id))
_htmls.append(u'\t<li><a href="%s">尾页</a></li>' % self.url_func(self.page_num,form_id))
_htmls.append('</ul>')
return '\r\n'.join(_htmls) | 动态输出html内容 | Below is the the instruction that describes the task:
### Input:
动态输出html内容
### Response:
def render(self, form_id=None):
'''
动态输出html内容
'''
page_bar = self.page_bars.get(int(self.page / 10))
if page_bar is None:
return ''
_htmls = []
if form_id:
_htmls.append(u'''<script>
function goto_page(form_id,page){
var form=document.getElementById(form_id);
var page_input = document.createElement("input");
page_input.type="hidden";
page_input.name="page";
page_input.value=page;
form.appendChild(page_input);
form.submit();
}</script>''')
_htmls.append('<ul class="pagination pull-right">')
_htmls.append(u'\t<li class="disabled"><a href="#">查询记录数 %s</a></li>' % self.total)
current_start = self.page
if current_start == 1:
_htmls.append(u'\t<li class="disabled"><a href="#">首页</a></li>')
_htmls.append(u'\t<li class="disabled"><a href="#">← 上一页</a></li>')
else:
_htmls.append(u'\t<li><a href="%s">首页</a></li>' % self.url_func(1,form_id))
_htmls.append(u'\t<li><a href="%s">← 上一页</a></li>' % self.url_func(current_start - 1,form_id))
for page in page_bar:
_page_url = self.url_func(page,form_id)
if page == self.page:
_htmls.append(u'\t<li class="active"><span>%s <span class="sr-only">{current}</span></span></li>' % page)
else:
_htmls.append(u'\t<li><a href="%s">%s</a></li>' % (_page_url, page))
current_end = self.page
if current_end == self.page_num:
_htmls.append(u'\t<li class="disabled"><a href="#">下一页 →</a></li>')
_htmls.append(u'\t<li class="disabled"><a href="#">尾页</a></li>')
else:
_htmls.append(u'\t<li><a href="%s">下一页 →</a></li>' % self.url_func(current_end + 1,form_id))
_htmls.append(u'\t<li><a href="%s">尾页</a></li>' % self.url_func(self.page_num,form_id))
_htmls.append('</ul>')
return '\r\n'.join(_htmls) |
def shape_type(self):
"""
Unique integer identifying the type of this shape, like
``MSO_SHAPE_TYPE.TEXT_BOX``.
"""
if self.is_placeholder:
return MSO_SHAPE_TYPE.PLACEHOLDER
if self._sp.has_custom_geometry:
return MSO_SHAPE_TYPE.FREEFORM
if self._sp.is_autoshape:
return MSO_SHAPE_TYPE.AUTO_SHAPE
if self._sp.is_textbox:
return MSO_SHAPE_TYPE.TEXT_BOX
msg = 'Shape instance of unrecognized shape type'
raise NotImplementedError(msg) | Unique integer identifying the type of this shape, like
``MSO_SHAPE_TYPE.TEXT_BOX``. | Below is the the instruction that describes the task:
### Input:
Unique integer identifying the type of this shape, like
``MSO_SHAPE_TYPE.TEXT_BOX``.
### Response:
def shape_type(self):
"""
Unique integer identifying the type of this shape, like
``MSO_SHAPE_TYPE.TEXT_BOX``.
"""
if self.is_placeholder:
return MSO_SHAPE_TYPE.PLACEHOLDER
if self._sp.has_custom_geometry:
return MSO_SHAPE_TYPE.FREEFORM
if self._sp.is_autoshape:
return MSO_SHAPE_TYPE.AUTO_SHAPE
if self._sp.is_textbox:
return MSO_SHAPE_TYPE.TEXT_BOX
msg = 'Shape instance of unrecognized shape type'
raise NotImplementedError(msg) |
def add_flag(*args, **kwargs):
"""
define a single flag.
add_flag(flagname, default_value, help='', **kwargs)
add_flag([(flagname, default_value, help), ...])
or
define flags without help message
add_flag(flagname, default_value, help='', **kwargs)
add_flag('gpu', 1, help='CUDA_VISIBLE_DEVICES')
:param args:
:param kwargs:
:return:
"""
if len(args) == 1 and isinstance(args[0], (list, tuple)):
for a in args[0]:
flag.add_flag(*a)
elif args:
flag.add_flag(*args, **kwargs)
else:
for f, v in kwargs.items():
flag.add_flag(f, v) | define a single flag.
add_flag(flagname, default_value, help='', **kwargs)
add_flag([(flagname, default_value, help), ...])
or
define flags without help message
add_flag(flagname, default_value, help='', **kwargs)
add_flag('gpu', 1, help='CUDA_VISIBLE_DEVICES')
:param args:
:param kwargs:
:return: | Below is the the instruction that describes the task:
### Input:
define a single flag.
add_flag(flagname, default_value, help='', **kwargs)
add_flag([(flagname, default_value, help), ...])
or
define flags without help message
add_flag(flagname, default_value, help='', **kwargs)
add_flag('gpu', 1, help='CUDA_VISIBLE_DEVICES')
:param args:
:param kwargs:
:return:
### Response:
def add_flag(*args, **kwargs):
"""
define a single flag.
add_flag(flagname, default_value, help='', **kwargs)
add_flag([(flagname, default_value, help), ...])
or
define flags without help message
add_flag(flagname, default_value, help='', **kwargs)
add_flag('gpu', 1, help='CUDA_VISIBLE_DEVICES')
:param args:
:param kwargs:
:return:
"""
if len(args) == 1 and isinstance(args[0], (list, tuple)):
for a in args[0]:
flag.add_flag(*a)
elif args:
flag.add_flag(*args, **kwargs)
else:
for f, v in kwargs.items():
flag.add_flag(f, v) |
def relabel(self, label=None, group=None, depth=1):
"""Clone object and apply new group and/or label.
Applies relabeling to child up to the supplied depth.
Args:
label (str, optional): New label to apply to returned object
group (str, optional): New group to apply to returned object
depth (int, optional): Depth to which relabel will be applied
If applied to container allows applying relabeling to
contained objects up to the specified depth
Returns:
Returns relabelled object
"""
return super(AdjointLayout, self).relabel(label=label, group=group, depth=depth) | Clone object and apply new group and/or label.
Applies relabeling to child up to the supplied depth.
Args:
label (str, optional): New label to apply to returned object
group (str, optional): New group to apply to returned object
depth (int, optional): Depth to which relabel will be applied
If applied to container allows applying relabeling to
contained objects up to the specified depth
Returns:
Returns relabelled object | Below is the the instruction that describes the task:
### Input:
Clone object and apply new group and/or label.
Applies relabeling to child up to the supplied depth.
Args:
label (str, optional): New label to apply to returned object
group (str, optional): New group to apply to returned object
depth (int, optional): Depth to which relabel will be applied
If applied to container allows applying relabeling to
contained objects up to the specified depth
Returns:
Returns relabelled object
### Response:
def relabel(self, label=None, group=None, depth=1):
"""Clone object and apply new group and/or label.
Applies relabeling to child up to the supplied depth.
Args:
label (str, optional): New label to apply to returned object
group (str, optional): New group to apply to returned object
depth (int, optional): Depth to which relabel will be applied
If applied to container allows applying relabeling to
contained objects up to the specified depth
Returns:
Returns relabelled object
"""
return super(AdjointLayout, self).relabel(label=label, group=group, depth=depth) |
def getMaximinScores(profile):
"""
Returns a dictionary that associates integer representations of each candidate with their
Copeland score.
:ivar Profile profile: A Profile object that represents an election profile.
"""
# Currently, we expect the profile to contain complete ordering over candidates. Ties are
# allowed however.
elecType = profile.getElecType()
if elecType != "soc" and elecType != "toc":
print("ERROR: unsupported election type")
exit()
wmgMap = profile.getWmg()
# Initialize each Copeland score as infinity.
maximinscores = {}
for cand in wmgMap.keys():
maximinscores[cand] = float("inf")
# For each pair of candidates, calculate the number of votes in which one beat the other.
# For each pair of candidates, calculate the number of times each beats the other.
for cand1, cand2 in itertools.combinations(wmgMap.keys(), 2):
if cand2 in wmgMap[cand1].keys():
maximinscores[cand1] = min(maximinscores[cand1], wmgMap[cand1][cand2])
maximinscores[cand2] = min(maximinscores[cand2], wmgMap[cand2][cand1])
return maximinscores | Returns a dictionary that associates integer representations of each candidate with their
Copeland score.
:ivar Profile profile: A Profile object that represents an election profile. | Below is the the instruction that describes the task:
### Input:
Returns a dictionary that associates integer representations of each candidate with their
Copeland score.
:ivar Profile profile: A Profile object that represents an election profile.
### Response:
def getMaximinScores(profile):
"""
Returns a dictionary that associates integer representations of each candidate with their
Copeland score.
:ivar Profile profile: A Profile object that represents an election profile.
"""
# Currently, we expect the profile to contain complete ordering over candidates. Ties are
# allowed however.
elecType = profile.getElecType()
if elecType != "soc" and elecType != "toc":
print("ERROR: unsupported election type")
exit()
wmgMap = profile.getWmg()
# Initialize each Copeland score as infinity.
maximinscores = {}
for cand in wmgMap.keys():
maximinscores[cand] = float("inf")
# For each pair of candidates, calculate the number of votes in which one beat the other.
# For each pair of candidates, calculate the number of times each beats the other.
for cand1, cand2 in itertools.combinations(wmgMap.keys(), 2):
if cand2 in wmgMap[cand1].keys():
maximinscores[cand1] = min(maximinscores[cand1], wmgMap[cand1][cand2])
maximinscores[cand2] = min(maximinscores[cand2], wmgMap[cand2][cand1])
return maximinscores |
def login(self):
"""Login"""
# perform the request
data = {'apikey': self.apikey, 'username': self.username, 'password': self.password}
r = self.session.post(self.base_url + '/login', json=data)
r.raise_for_status()
# set the Authorization header
self.session.headers['Authorization'] = 'Bearer ' + r.json()['token']
# update token_date
self.token_date = datetime.utcnow() | Login | Below is the the instruction that describes the task:
### Input:
Login
### Response:
def login(self):
"""Login"""
# perform the request
data = {'apikey': self.apikey, 'username': self.username, 'password': self.password}
r = self.session.post(self.base_url + '/login', json=data)
r.raise_for_status()
# set the Authorization header
self.session.headers['Authorization'] = 'Bearer ' + r.json()['token']
# update token_date
self.token_date = datetime.utcnow() |
def get_data(self, field, function=None, default=None):
"""
Get data from the striplog.
"""
f = function or utils.null
data = []
for iv in self:
d = iv.data.get(field)
if d is None:
if default is not None:
d = default
else:
d = np.nan
data.append(f(d))
return np.array(data) | Get data from the striplog. | Below is the the instruction that describes the task:
### Input:
Get data from the striplog.
### Response:
def get_data(self, field, function=None, default=None):
"""
Get data from the striplog.
"""
f = function or utils.null
data = []
for iv in self:
d = iv.data.get(field)
if d is None:
if default is not None:
d = default
else:
d = np.nan
data.append(f(d))
return np.array(data) |
def _populate(self):
"""
**Purpose**: Populate the ResourceManager class with the validated
resource description
"""
if self._validated:
self._prof.prof('populating rmgr', uid=self._uid)
self._logger.debug('Populating resource manager object')
self._resource = self._resource_desc['resource']
self._walltime = self._resource_desc['walltime']
self._cpus = self._resource_desc['cpus']
self._gpus = self._resource_desc.get('gpus', 0)
self._project = self._resource_desc.get('project', None)
self._access_schema = self._resource_desc.get('access_schema', None)
self._queue = self._resource_desc.get('queue', None)
self._logger.debug('Resource manager population successful')
self._prof.prof('rmgr populated', uid=self._uid)
else:
raise EnTKError('Resource description not validated') | **Purpose**: Populate the ResourceManager class with the validated
resource description | Below is the the instruction that describes the task:
### Input:
**Purpose**: Populate the ResourceManager class with the validated
resource description
### Response:
def _populate(self):
"""
**Purpose**: Populate the ResourceManager class with the validated
resource description
"""
if self._validated:
self._prof.prof('populating rmgr', uid=self._uid)
self._logger.debug('Populating resource manager object')
self._resource = self._resource_desc['resource']
self._walltime = self._resource_desc['walltime']
self._cpus = self._resource_desc['cpus']
self._gpus = self._resource_desc.get('gpus', 0)
self._project = self._resource_desc.get('project', None)
self._access_schema = self._resource_desc.get('access_schema', None)
self._queue = self._resource_desc.get('queue', None)
self._logger.debug('Resource manager population successful')
self._prof.prof('rmgr populated', uid=self._uid)
else:
raise EnTKError('Resource description not validated') |
def roc(self):
"""ROC plot
"""
return plot.roc(self.y_true, self.y_score, ax=_gen_ax()) | ROC plot | Below is the the instruction that describes the task:
### Input:
ROC plot
### Response:
def roc(self):
"""ROC plot
"""
return plot.roc(self.y_true, self.y_score, ax=_gen_ax()) |
def add(self, value):
"""
Add a value to the buffer.
"""
ind = int(self._ind % self.shape)
self._pos = self._ind % self.shape
self._values[ind] = value
if self._ind < self.shape:
self._ind += 1 # fast fill
else:
self._ind += self._splitValue
self._splitPos += self._splitValue
self._cached = False | Add a value to the buffer. | Below is the the instruction that describes the task:
### Input:
Add a value to the buffer.
### Response:
def add(self, value):
"""
Add a value to the buffer.
"""
ind = int(self._ind % self.shape)
self._pos = self._ind % self.shape
self._values[ind] = value
if self._ind < self.shape:
self._ind += 1 # fast fill
else:
self._ind += self._splitValue
self._splitPos += self._splitValue
self._cached = False |
def get_logs(self, container_id):
""" Return the full stdout/stderr of a container"""
stdout = self._docker.containers.get(container_id).logs(stdout=True, stderr=False).decode('utf8')
stderr = self._docker.containers.get(container_id).logs(stdout=False, stderr=True).decode('utf8')
return stdout, stderr | Return the full stdout/stderr of a container | Below is the the instruction that describes the task:
### Input:
Return the full stdout/stderr of a container
### Response:
def get_logs(self, container_id):
""" Return the full stdout/stderr of a container"""
stdout = self._docker.containers.get(container_id).logs(stdout=True, stderr=False).decode('utf8')
stderr = self._docker.containers.get(container_id).logs(stdout=False, stderr=True).decode('utf8')
return stdout, stderr |
def browse(self):
"""
Save response in temporary file and open it in GUI browser.
"""
_, path = tempfile.mkstemp()
self.save(path)
webbrowser.open('file://' + path) | Save response in temporary file and open it in GUI browser. | Below is the the instruction that describes the task:
### Input:
Save response in temporary file and open it in GUI browser.
### Response:
def browse(self):
"""
Save response in temporary file and open it in GUI browser.
"""
_, path = tempfile.mkstemp()
self.save(path)
webbrowser.open('file://' + path) |
def _remove_exploration(self):
""" Called if trajectory is expanded, deletes all explored parameters from disk """
for param in self._explored_parameters.values():
if param._stored:
try:
self.f_delete_item(param)
except Exception:
self._logger.exception('Could not delete expanded parameter `%s` '
'from disk.' % param.v_full_name) | Called if trajectory is expanded, deletes all explored parameters from disk | Below is the the instruction that describes the task:
### Input:
Called if trajectory is expanded, deletes all explored parameters from disk
### Response:
def _remove_exploration(self):
""" Called if trajectory is expanded, deletes all explored parameters from disk """
for param in self._explored_parameters.values():
if param._stored:
try:
self.f_delete_item(param)
except Exception:
self._logger.exception('Could not delete expanded parameter `%s` '
'from disk.' % param.v_full_name) |
def clip_datetime(dt, tz=DEFAULT_TZ, is_dst=None):
"""Limit a datetime to a valid range for datetime, datetime64, and Timestamp objects
>>> from datetime import timedelta
>>> clip_datetime(MAX_DATETIME + timedelta(100)) == pd.Timestamp(MAX_DATETIME, tz='utc') == MAX_TIMESTAMP
True
>>> MAX_TIMESTAMP
Timestamp('2262-04-11 23:47:16.854775+0000', tz='UTC')
"""
if isinstance(dt, datetime.datetime):
# TODO: this gives up a day of datetime range due to assumptions about timezone
# make MIN/MAX naive and replace dt.replace(tz=None) before comparison
# set it back when done
dt = make_tz_aware(dt, tz=tz, is_dst=is_dst)
try:
return pd.Timestamp(dt)
except (ValueError, AttributeError):
pass
if dt > MAX_DATETIME:
return MAX_TIMESTAMP
elif dt < MIN_DATETIME:
return MIN_TIMESTAMP
return NAT
return dt | Limit a datetime to a valid range for datetime, datetime64, and Timestamp objects
>>> from datetime import timedelta
>>> clip_datetime(MAX_DATETIME + timedelta(100)) == pd.Timestamp(MAX_DATETIME, tz='utc') == MAX_TIMESTAMP
True
>>> MAX_TIMESTAMP
Timestamp('2262-04-11 23:47:16.854775+0000', tz='UTC') | Below is the the instruction that describes the task:
### Input:
Limit a datetime to a valid range for datetime, datetime64, and Timestamp objects
>>> from datetime import timedelta
>>> clip_datetime(MAX_DATETIME + timedelta(100)) == pd.Timestamp(MAX_DATETIME, tz='utc') == MAX_TIMESTAMP
True
>>> MAX_TIMESTAMP
Timestamp('2262-04-11 23:47:16.854775+0000', tz='UTC')
### Response:
def clip_datetime(dt, tz=DEFAULT_TZ, is_dst=None):
"""Limit a datetime to a valid range for datetime, datetime64, and Timestamp objects
>>> from datetime import timedelta
>>> clip_datetime(MAX_DATETIME + timedelta(100)) == pd.Timestamp(MAX_DATETIME, tz='utc') == MAX_TIMESTAMP
True
>>> MAX_TIMESTAMP
Timestamp('2262-04-11 23:47:16.854775+0000', tz='UTC')
"""
if isinstance(dt, datetime.datetime):
# TODO: this gives up a day of datetime range due to assumptions about timezone
# make MIN/MAX naive and replace dt.replace(tz=None) before comparison
# set it back when done
dt = make_tz_aware(dt, tz=tz, is_dst=is_dst)
try:
return pd.Timestamp(dt)
except (ValueError, AttributeError):
pass
if dt > MAX_DATETIME:
return MAX_TIMESTAMP
elif dt < MIN_DATETIME:
return MIN_TIMESTAMP
return NAT
return dt |
def remove_prefix(self, prefix):
"""Removes prefix from this set. This is a no-op if the prefix
doesn't exist in it.
"""
if prefix not in self.__prefix_map:
return
ni = self.__lookup_prefix(prefix)
ni.prefixes.discard(prefix)
del self.__prefix_map[prefix]
# If we removed the preferred prefix, find a new one.
if ni.preferred_prefix == prefix:
ni.preferred_prefix = next(iter(ni.prefixes), None) | Removes prefix from this set. This is a no-op if the prefix
doesn't exist in it. | Below is the the instruction that describes the task:
### Input:
Removes prefix from this set. This is a no-op if the prefix
doesn't exist in it.
### Response:
def remove_prefix(self, prefix):
"""Removes prefix from this set. This is a no-op if the prefix
doesn't exist in it.
"""
if prefix not in self.__prefix_map:
return
ni = self.__lookup_prefix(prefix)
ni.prefixes.discard(prefix)
del self.__prefix_map[prefix]
# If we removed the preferred prefix, find a new one.
if ni.preferred_prefix == prefix:
ni.preferred_prefix = next(iter(ni.prefixes), None) |
def cell_start(self, cell, cell_index=None, **kwargs):
"""
Set and save a cell's start state.
Optionally called by engines during execution to initialize the
metadata for a cell and save the notebook to the output path.
"""
if self.log_output:
ceel_num = cell_index + 1 if cell_index is not None else ''
logger.info('Executing Cell {:-<40}'.format(ceel_num))
cell.metadata.papermill['start_time'] = self.now().isoformat()
cell.metadata.papermill["status"] = self.RUNNING
cell.metadata.papermill['exception'] = False
self.save() | Set and save a cell's start state.
Optionally called by engines during execution to initialize the
metadata for a cell and save the notebook to the output path. | Below is the the instruction that describes the task:
### Input:
Set and save a cell's start state.
Optionally called by engines during execution to initialize the
metadata for a cell and save the notebook to the output path.
### Response:
def cell_start(self, cell, cell_index=None, **kwargs):
"""
Set and save a cell's start state.
Optionally called by engines during execution to initialize the
metadata for a cell and save the notebook to the output path.
"""
if self.log_output:
ceel_num = cell_index + 1 if cell_index is not None else ''
logger.info('Executing Cell {:-<40}'.format(ceel_num))
cell.metadata.papermill['start_time'] = self.now().isoformat()
cell.metadata.papermill["status"] = self.RUNNING
cell.metadata.papermill['exception'] = False
self.save() |
def _run_container(self, run_command_instance, callback):
""" this is internal method """
tmpfile = os.path.join(get_backend_tmpdir(), random_tmp_filename())
# the cid file must not exist
run_command_instance.options += ["--cidfile=%s" % tmpfile]
logger.debug("podman command: %s" % run_command_instance)
response = callback()
# and we need to wait now; inotify would be better but is way more complicated and
# adds dependency
Probe(timeout=10, count=10, pause=0.1, fnc=lambda: self._file_not_empty(tmpfile)).run()
with open(tmpfile, 'r') as fd:
container_id = fd.read()
return container_id, response | this is internal method | Below is the the instruction that describes the task:
### Input:
this is internal method
### Response:
def _run_container(self, run_command_instance, callback):
""" this is internal method """
tmpfile = os.path.join(get_backend_tmpdir(), random_tmp_filename())
# the cid file must not exist
run_command_instance.options += ["--cidfile=%s" % tmpfile]
logger.debug("podman command: %s" % run_command_instance)
response = callback()
# and we need to wait now; inotify would be better but is way more complicated and
# adds dependency
Probe(timeout=10, count=10, pause=0.1, fnc=lambda: self._file_not_empty(tmpfile)).run()
with open(tmpfile, 'r') as fd:
container_id = fd.read()
return container_id, response |
def _stopOnFailure(self, f):
"utility method to stop the service when a failure occurs"
if self.running:
d = defer.maybeDeferred(self.stopService)
d.addErrback(log.err, 'while stopping broken HgPoller service')
return f | utility method to stop the service when a failure occurs | Below is the the instruction that describes the task:
### Input:
utility method to stop the service when a failure occurs
### Response:
def _stopOnFailure(self, f):
"utility method to stop the service when a failure occurs"
if self.running:
d = defer.maybeDeferred(self.stopService)
d.addErrback(log.err, 'while stopping broken HgPoller service')
return f |
def boolean(self, field, value=None, **validations):
"""*Asserts the field as JSON boolean.*
The field consists of parts separated by spaces, the parts being
object property names or array indices starting from 0, and the root
being the instance created by the last request (see `Output` for it).
For asserting deeply nested properties or multiple objects at once,
[http://goessner.net/articles/JsonPath|JSONPath] can be used with
[https://github.com/h2non/jsonpath-ng#jsonpath-syntax|supported JSONPath expressions], the root being the response body.
*Value*
If given, the property value is validated in addition to the type.
*Validations*
The JSON Schema validation keywords
[https://json-schema.org/understanding-json-schema/reference/generic.html|common for all types]
can be used. Validations are optional but update the schema of
the property (more accurate) if given.
`Output Schema` can be used for the current schema of the field.
The keyword will fail if any of the given validations fail.
Given validations can be skipped altogether by adding ``skip=true``.
When skipped, the schema is updated but the validations are not ran.
Skip is intented mainly for debugging the updated schema before aborting.
*Examples*
| `PUT` | /users/1 | { "verified_email": true } | | | # https://jsonplaceholder.typicode.com/users/1 |
| `Boolean` | response body verified_email | | | | # value is optional |
| `Boolean` | response body verified_email | true |
| `Boolean` | response body verified_email | ${True} | | | # same as above |
| `Boolean` | $.verified_email | true | | | # JSONPath alternative |
| `Boolean` | $.verified_email | true | enum=[1, "1"] | skip=true | # would pass |
"""
values = []
for found in self._find_by_field(field):
reality = found["reality"]
schema = {"type": "boolean"}
if value is not None:
schema["enum"] = [self._input_boolean(value)]
elif self._should_add_examples():
schema["examples"] = [reality]
skip = self._input_boolean(validations.pop("skip", False))
if not skip:
self._assert_schema(schema, reality)
values.append(reality)
return values | *Asserts the field as JSON boolean.*
The field consists of parts separated by spaces, the parts being
object property names or array indices starting from 0, and the root
being the instance created by the last request (see `Output` for it).
For asserting deeply nested properties or multiple objects at once,
[http://goessner.net/articles/JsonPath|JSONPath] can be used with
[https://github.com/h2non/jsonpath-ng#jsonpath-syntax|supported JSONPath expressions], the root being the response body.
*Value*
If given, the property value is validated in addition to the type.
*Validations*
The JSON Schema validation keywords
[https://json-schema.org/understanding-json-schema/reference/generic.html|common for all types]
can be used. Validations are optional but update the schema of
the property (more accurate) if given.
`Output Schema` can be used for the current schema of the field.
The keyword will fail if any of the given validations fail.
Given validations can be skipped altogether by adding ``skip=true``.
When skipped, the schema is updated but the validations are not ran.
Skip is intented mainly for debugging the updated schema before aborting.
*Examples*
| `PUT` | /users/1 | { "verified_email": true } | | | # https://jsonplaceholder.typicode.com/users/1 |
| `Boolean` | response body verified_email | | | | # value is optional |
| `Boolean` | response body verified_email | true |
| `Boolean` | response body verified_email | ${True} | | | # same as above |
| `Boolean` | $.verified_email | true | | | # JSONPath alternative |
| `Boolean` | $.verified_email | true | enum=[1, "1"] | skip=true | # would pass | | Below is the the instruction that describes the task:
### Input:
*Asserts the field as JSON boolean.*
The field consists of parts separated by spaces, the parts being
object property names or array indices starting from 0, and the root
being the instance created by the last request (see `Output` for it).
For asserting deeply nested properties or multiple objects at once,
[http://goessner.net/articles/JsonPath|JSONPath] can be used with
[https://github.com/h2non/jsonpath-ng#jsonpath-syntax|supported JSONPath expressions], the root being the response body.
*Value*
If given, the property value is validated in addition to the type.
*Validations*
The JSON Schema validation keywords
[https://json-schema.org/understanding-json-schema/reference/generic.html|common for all types]
can be used. Validations are optional but update the schema of
the property (more accurate) if given.
`Output Schema` can be used for the current schema of the field.
The keyword will fail if any of the given validations fail.
Given validations can be skipped altogether by adding ``skip=true``.
When skipped, the schema is updated but the validations are not ran.
Skip is intented mainly for debugging the updated schema before aborting.
*Examples*
| `PUT` | /users/1 | { "verified_email": true } | | | # https://jsonplaceholder.typicode.com/users/1 |
| `Boolean` | response body verified_email | | | | # value is optional |
| `Boolean` | response body verified_email | true |
| `Boolean` | response body verified_email | ${True} | | | # same as above |
| `Boolean` | $.verified_email | true | | | # JSONPath alternative |
| `Boolean` | $.verified_email | true | enum=[1, "1"] | skip=true | # would pass |
### Response:
def boolean(self, field, value=None, **validations):
"""*Asserts the field as JSON boolean.*
The field consists of parts separated by spaces, the parts being
object property names or array indices starting from 0, and the root
being the instance created by the last request (see `Output` for it).
For asserting deeply nested properties or multiple objects at once,
[http://goessner.net/articles/JsonPath|JSONPath] can be used with
[https://github.com/h2non/jsonpath-ng#jsonpath-syntax|supported JSONPath expressions], the root being the response body.
*Value*
If given, the property value is validated in addition to the type.
*Validations*
The JSON Schema validation keywords
[https://json-schema.org/understanding-json-schema/reference/generic.html|common for all types]
can be used. Validations are optional but update the schema of
the property (more accurate) if given.
`Output Schema` can be used for the current schema of the field.
The keyword will fail if any of the given validations fail.
Given validations can be skipped altogether by adding ``skip=true``.
When skipped, the schema is updated but the validations are not ran.
Skip is intented mainly for debugging the updated schema before aborting.
*Examples*
| `PUT` | /users/1 | { "verified_email": true } | | | # https://jsonplaceholder.typicode.com/users/1 |
| `Boolean` | response body verified_email | | | | # value is optional |
| `Boolean` | response body verified_email | true |
| `Boolean` | response body verified_email | ${True} | | | # same as above |
| `Boolean` | $.verified_email | true | | | # JSONPath alternative |
| `Boolean` | $.verified_email | true | enum=[1, "1"] | skip=true | # would pass |
"""
values = []
for found in self._find_by_field(field):
reality = found["reality"]
schema = {"type": "boolean"}
if value is not None:
schema["enum"] = [self._input_boolean(value)]
elif self._should_add_examples():
schema["examples"] = [reality]
skip = self._input_boolean(validations.pop("skip", False))
if not skip:
self._assert_schema(schema, reality)
values.append(reality)
return values |
def get_package_metadata(self):
"""
Gets metatada relative to the country package the tax and benefit system is built from.
:returns: Country package metadata
:rtype: dict
Example:
>>> tax_benefit_system.get_package_metadata()
>>> {
>>> 'location': '/path/to/dir/containing/package',
>>> 'name': 'openfisca-france',
>>> 'repository_url': 'https://github.com/openfisca/openfisca-france',
>>> 'version': '17.2.0'
>>> }
"""
# Handle reforms
if self.baseline:
return self.baseline.get_package_metadata()
fallback_metadata = {
'name': self.__class__.__name__,
'version': '',
'repository_url': '',
'location': '',
}
module = inspect.getmodule(self)
if not module.__package__:
return fallback_metadata
package_name = module.__package__.split('.')[0]
try:
distribution = pkg_resources.get_distribution(package_name)
except pkg_resources.DistributionNotFound:
return fallback_metadata
location = inspect.getsourcefile(module).split(package_name)[0].rstrip('/')
home_page_metadatas = [
metadata.split(':', 1)[1].strip(' ')
for metadata in distribution._get_metadata(distribution.PKG_INFO) if 'Home-page' in metadata
]
repository_url = home_page_metadatas[0] if home_page_metadatas else ''
return {
'name': distribution.key,
'version': distribution.version,
'repository_url': repository_url,
'location': location,
} | Gets metatada relative to the country package the tax and benefit system is built from.
:returns: Country package metadata
:rtype: dict
Example:
>>> tax_benefit_system.get_package_metadata()
>>> {
>>> 'location': '/path/to/dir/containing/package',
>>> 'name': 'openfisca-france',
>>> 'repository_url': 'https://github.com/openfisca/openfisca-france',
>>> 'version': '17.2.0'
>>> } | Below is the the instruction that describes the task:
### Input:
Gets metatada relative to the country package the tax and benefit system is built from.
:returns: Country package metadata
:rtype: dict
Example:
>>> tax_benefit_system.get_package_metadata()
>>> {
>>> 'location': '/path/to/dir/containing/package',
>>> 'name': 'openfisca-france',
>>> 'repository_url': 'https://github.com/openfisca/openfisca-france',
>>> 'version': '17.2.0'
>>> }
### Response:
def get_package_metadata(self):
"""
Gets metatada relative to the country package the tax and benefit system is built from.
:returns: Country package metadata
:rtype: dict
Example:
>>> tax_benefit_system.get_package_metadata()
>>> {
>>> 'location': '/path/to/dir/containing/package',
>>> 'name': 'openfisca-france',
>>> 'repository_url': 'https://github.com/openfisca/openfisca-france',
>>> 'version': '17.2.0'
>>> }
"""
# Handle reforms
if self.baseline:
return self.baseline.get_package_metadata()
fallback_metadata = {
'name': self.__class__.__name__,
'version': '',
'repository_url': '',
'location': '',
}
module = inspect.getmodule(self)
if not module.__package__:
return fallback_metadata
package_name = module.__package__.split('.')[0]
try:
distribution = pkg_resources.get_distribution(package_name)
except pkg_resources.DistributionNotFound:
return fallback_metadata
location = inspect.getsourcefile(module).split(package_name)[0].rstrip('/')
home_page_metadatas = [
metadata.split(':', 1)[1].strip(' ')
for metadata in distribution._get_metadata(distribution.PKG_INFO) if 'Home-page' in metadata
]
repository_url = home_page_metadatas[0] if home_page_metadatas else ''
return {
'name': distribution.key,
'version': distribution.version,
'repository_url': repository_url,
'location': location,
} |
def stop_server(self):
"""
Stop serving
"""
if self.api_server is not None:
try:
self.api_server.socket.shutdown(socket.SHUT_RDWR)
except:
log.warning("Failed to shut down API server socket")
self.api_server.shutdown() | Stop serving | Below is the the instruction that describes the task:
### Input:
Stop serving
### Response:
def stop_server(self):
"""
Stop serving
"""
if self.api_server is not None:
try:
self.api_server.socket.shutdown(socket.SHUT_RDWR)
except:
log.warning("Failed to shut down API server socket")
self.api_server.shutdown() |
def ase(dbuser, dbpassword, args, gui):
"""Connection to atomic structures on the Catalysis-Hub
server with ase db cli.
Arguments to the the ase db cli client must be enclosed in one string.
For example: <cathub ase 'formula=Ag6In6H -s energy -L 200'>.
To see possible ase db arguments run <ase db --help>"""
if dbuser == 'upload':
dbpassword = 'cHyuuQH0'
db = CathubPostgreSQL(user=dbuser, password=dbpassword)
db._connect()
server_name = db.server_name
subprocess.call(
("ase db {} {}".format(server_name, args)).split())
if gui:
args = args.split('-')[0]
subprocess.call(
('ase gui {}@{}'.format(server_name, args)).split()) | Connection to atomic structures on the Catalysis-Hub
server with ase db cli.
Arguments to the the ase db cli client must be enclosed in one string.
For example: <cathub ase 'formula=Ag6In6H -s energy -L 200'>.
To see possible ase db arguments run <ase db --help> | Below is the the instruction that describes the task:
### Input:
Connection to atomic structures on the Catalysis-Hub
server with ase db cli.
Arguments to the the ase db cli client must be enclosed in one string.
For example: <cathub ase 'formula=Ag6In6H -s energy -L 200'>.
To see possible ase db arguments run <ase db --help>
### Response:
def ase(dbuser, dbpassword, args, gui):
"""Connection to atomic structures on the Catalysis-Hub
server with ase db cli.
Arguments to the the ase db cli client must be enclosed in one string.
For example: <cathub ase 'formula=Ag6In6H -s energy -L 200'>.
To see possible ase db arguments run <ase db --help>"""
if dbuser == 'upload':
dbpassword = 'cHyuuQH0'
db = CathubPostgreSQL(user=dbuser, password=dbpassword)
db._connect()
server_name = db.server_name
subprocess.call(
("ase db {} {}".format(server_name, args)).split())
if gui:
args = args.split('-')[0]
subprocess.call(
('ase gui {}@{}'.format(server_name, args)).split()) |
def post(self, path, data=None, **kwargs):
"""
HTTP post on the node
"""
if data:
return (yield from self._compute.post("/projects/{}/{}/nodes/{}{}".format(self._project.id, self._node_type, self._id, path), data=data, **kwargs))
else:
return (yield from self._compute.post("/projects/{}/{}/nodes/{}{}".format(self._project.id, self._node_type, self._id, path), **kwargs)) | HTTP post on the node | Below is the the instruction that describes the task:
### Input:
HTTP post on the node
### Response:
def post(self, path, data=None, **kwargs):
"""
HTTP post on the node
"""
if data:
return (yield from self._compute.post("/projects/{}/{}/nodes/{}{}".format(self._project.id, self._node_type, self._id, path), data=data, **kwargs))
else:
return (yield from self._compute.post("/projects/{}/{}/nodes/{}{}".format(self._project.id, self._node_type, self._id, path), **kwargs)) |
def get_sid(principal):
'''
Converts a username to a sid, or verifies a sid. Required for working with
the DACL.
Args:
principal(str):
The principal to lookup the sid. Can be a sid or a username.
Returns:
PySID Object: A sid
Usage:
.. code-block:: python
# Get a user's sid
salt.utils.win_dacl.get_sid('jsnuffy')
# Verify that the sid is valid
salt.utils.win_dacl.get_sid('S-1-5-32-544')
'''
# If None is passed, use the Universal Well-known SID "Null SID"
if principal is None:
principal = 'NULL SID'
# Test if the user passed a sid or a name
try:
sid = salt.utils.win_functions.get_sid_from_name(principal)
except CommandExecutionError:
sid = principal
# Test if the SID is valid
try:
sid = win32security.ConvertStringSidToSid(sid)
except pywintypes.error:
log.exception('Invalid user/group or sid: %s', principal)
raise CommandExecutionError(
'Invalid user/group or sid: {0}'.format(principal))
except TypeError:
raise CommandExecutionError
return sid | Converts a username to a sid, or verifies a sid. Required for working with
the DACL.
Args:
principal(str):
The principal to lookup the sid. Can be a sid or a username.
Returns:
PySID Object: A sid
Usage:
.. code-block:: python
# Get a user's sid
salt.utils.win_dacl.get_sid('jsnuffy')
# Verify that the sid is valid
salt.utils.win_dacl.get_sid('S-1-5-32-544') | Below is the the instruction that describes the task:
### Input:
Converts a username to a sid, or verifies a sid. Required for working with
the DACL.
Args:
principal(str):
The principal to lookup the sid. Can be a sid or a username.
Returns:
PySID Object: A sid
Usage:
.. code-block:: python
# Get a user's sid
salt.utils.win_dacl.get_sid('jsnuffy')
# Verify that the sid is valid
salt.utils.win_dacl.get_sid('S-1-5-32-544')
### Response:
def get_sid(principal):
'''
Converts a username to a sid, or verifies a sid. Required for working with
the DACL.
Args:
principal(str):
The principal to lookup the sid. Can be a sid or a username.
Returns:
PySID Object: A sid
Usage:
.. code-block:: python
# Get a user's sid
salt.utils.win_dacl.get_sid('jsnuffy')
# Verify that the sid is valid
salt.utils.win_dacl.get_sid('S-1-5-32-544')
'''
# If None is passed, use the Universal Well-known SID "Null SID"
if principal is None:
principal = 'NULL SID'
# Test if the user passed a sid or a name
try:
sid = salt.utils.win_functions.get_sid_from_name(principal)
except CommandExecutionError:
sid = principal
# Test if the SID is valid
try:
sid = win32security.ConvertStringSidToSid(sid)
except pywintypes.error:
log.exception('Invalid user/group or sid: %s', principal)
raise CommandExecutionError(
'Invalid user/group or sid: {0}'.format(principal))
except TypeError:
raise CommandExecutionError
return sid |
def write_usnps(data, sidx, pnames):
""" write the bisnp string """
## grab bis data from tmparr
tmparrs = os.path.join(data.dirs.outfiles, "tmp-{}.h5".format(data.name))
with h5py.File(tmparrs, 'r') as io5:
bisarr = io5["bisarr"]
## trim to size b/c it was made longer than actual
end = np.where(np.all(bisarr[:] == "", axis=0))[0]
if np.any(end):
end = end.min()
else:
end = bisarr.shape[1]
## write to usnps file
with open(data.outfiles.usnpsphy, 'w') as out:
out.write("{} {}\n".format(bisarr.shape[0], end))
for idx, name in enumerate(pnames):
out.write("{}{}\n".format(name, "".join(bisarr[idx, :end]))) | write the bisnp string | Below is the the instruction that describes the task:
### Input:
write the bisnp string
### Response:
def write_usnps(data, sidx, pnames):
""" write the bisnp string """
## grab bis data from tmparr
tmparrs = os.path.join(data.dirs.outfiles, "tmp-{}.h5".format(data.name))
with h5py.File(tmparrs, 'r') as io5:
bisarr = io5["bisarr"]
## trim to size b/c it was made longer than actual
end = np.where(np.all(bisarr[:] == "", axis=0))[0]
if np.any(end):
end = end.min()
else:
end = bisarr.shape[1]
## write to usnps file
with open(data.outfiles.usnpsphy, 'w') as out:
out.write("{} {}\n".format(bisarr.shape[0], end))
for idx, name in enumerate(pnames):
out.write("{}{}\n".format(name, "".join(bisarr[idx, :end]))) |
def import_from_ding0(file, network):
"""
Import an eDisGo grid topology from
`Ding0 data <https://github.com/openego/ding0>`_.
This import method is specifically designed to load grid topology data in
the format as `Ding0 <https://github.com/openego/ding0>`_ provides it via
pickles.
The import of the grid topology includes
* the topology itself
* equipment parameter
* generators incl. location, type, subtype and capacity
* loads incl. location and sectoral consumption
Parameters
----------
file: :obj:`str` or :class:`ding0.core.NetworkDing0`
If a str is provided it is assumed it points to a pickle with Ding0
grid data. This file will be read.
If an object of the type :class:`ding0.core.NetworkDing0` data will be
used directly from this object.
network: :class:`~.grid.network.Network`
The eDisGo data container object
Notes
-----
Assumes :class:`ding0.core.NetworkDing0` provided by `file` contains
only data of one mv_grid_district.
"""
# when `file` is a string, it will be read by the help of pickle
if isinstance(file, str):
ding0_nd = load_nd_from_pickle(filename=file)
# otherwise it is assumed the object is passed directly
else:
ding0_nd = file
ding0_mv_grid = ding0_nd._mv_grid_districts[0].mv_grid
# Make sure circuit breakers (respectively the rings) are closed
ding0_mv_grid.close_circuit_breakers()
# Import medium-voltage grid data
network.mv_grid = _build_mv_grid(ding0_mv_grid, network)
# Import low-voltage grid data
lv_grids, lv_station_mapping, lv_grid_mapping = _build_lv_grid(
ding0_mv_grid, network)
# Assign lv_grids to network
network.mv_grid.lv_grids = lv_grids
# Integrate disconnecting points
position_switch_disconnectors(network.mv_grid,
mode=network.config['disconnecting_point'][
'position'])
# Check data integrity
_validate_ding0_grid_import(network.mv_grid, ding0_mv_grid,
lv_grid_mapping)
# Set data source
network.set_data_source('grid', 'dingo')
# Set more params
network._id = network.mv_grid.id
# Update the weather_cell_ids in mv_grid to include the ones in lv_grids
# ToDo: maybe get a better solution to push the weather_cell_ids in lv_grids but not in mv_grid but into the
# mv_grid.weather_cell_ids from within the Grid() object or the MVGrid() or LVGrid()
mv_weather_cell_id = network.mv_grid.weather_cells
for lvg in lv_grids:
if lvg.weather_cells:
for lv_w_id in lvg._weather_cells:
if not (lv_w_id in mv_weather_cell_id):
network.mv_grid._weather_cells.append(lv_w_id) | Import an eDisGo grid topology from
`Ding0 data <https://github.com/openego/ding0>`_.
This import method is specifically designed to load grid topology data in
the format as `Ding0 <https://github.com/openego/ding0>`_ provides it via
pickles.
The import of the grid topology includes
* the topology itself
* equipment parameter
* generators incl. location, type, subtype and capacity
* loads incl. location and sectoral consumption
Parameters
----------
file: :obj:`str` or :class:`ding0.core.NetworkDing0`
If a str is provided it is assumed it points to a pickle with Ding0
grid data. This file will be read.
If an object of the type :class:`ding0.core.NetworkDing0` data will be
used directly from this object.
network: :class:`~.grid.network.Network`
The eDisGo data container object
Notes
-----
Assumes :class:`ding0.core.NetworkDing0` provided by `file` contains
only data of one mv_grid_district. | Below is the the instruction that describes the task:
### Input:
Import an eDisGo grid topology from
`Ding0 data <https://github.com/openego/ding0>`_.
This import method is specifically designed to load grid topology data in
the format as `Ding0 <https://github.com/openego/ding0>`_ provides it via
pickles.
The import of the grid topology includes
* the topology itself
* equipment parameter
* generators incl. location, type, subtype and capacity
* loads incl. location and sectoral consumption
Parameters
----------
file: :obj:`str` or :class:`ding0.core.NetworkDing0`
If a str is provided it is assumed it points to a pickle with Ding0
grid data. This file will be read.
If an object of the type :class:`ding0.core.NetworkDing0` data will be
used directly from this object.
network: :class:`~.grid.network.Network`
The eDisGo data container object
Notes
-----
Assumes :class:`ding0.core.NetworkDing0` provided by `file` contains
only data of one mv_grid_district.
### Response:
def import_from_ding0(file, network):
"""
Import an eDisGo grid topology from
`Ding0 data <https://github.com/openego/ding0>`_.
This import method is specifically designed to load grid topology data in
the format as `Ding0 <https://github.com/openego/ding0>`_ provides it via
pickles.
The import of the grid topology includes
* the topology itself
* equipment parameter
* generators incl. location, type, subtype and capacity
* loads incl. location and sectoral consumption
Parameters
----------
file: :obj:`str` or :class:`ding0.core.NetworkDing0`
If a str is provided it is assumed it points to a pickle with Ding0
grid data. This file will be read.
If an object of the type :class:`ding0.core.NetworkDing0` data will be
used directly from this object.
network: :class:`~.grid.network.Network`
The eDisGo data container object
Notes
-----
Assumes :class:`ding0.core.NetworkDing0` provided by `file` contains
only data of one mv_grid_district.
"""
# when `file` is a string, it will be read by the help of pickle
if isinstance(file, str):
ding0_nd = load_nd_from_pickle(filename=file)
# otherwise it is assumed the object is passed directly
else:
ding0_nd = file
ding0_mv_grid = ding0_nd._mv_grid_districts[0].mv_grid
# Make sure circuit breakers (respectively the rings) are closed
ding0_mv_grid.close_circuit_breakers()
# Import medium-voltage grid data
network.mv_grid = _build_mv_grid(ding0_mv_grid, network)
# Import low-voltage grid data
lv_grids, lv_station_mapping, lv_grid_mapping = _build_lv_grid(
ding0_mv_grid, network)
# Assign lv_grids to network
network.mv_grid.lv_grids = lv_grids
# Integrate disconnecting points
position_switch_disconnectors(network.mv_grid,
mode=network.config['disconnecting_point'][
'position'])
# Check data integrity
_validate_ding0_grid_import(network.mv_grid, ding0_mv_grid,
lv_grid_mapping)
# Set data source
network.set_data_source('grid', 'dingo')
# Set more params
network._id = network.mv_grid.id
# Update the weather_cell_ids in mv_grid to include the ones in lv_grids
# ToDo: maybe get a better solution to push the weather_cell_ids in lv_grids but not in mv_grid but into the
# mv_grid.weather_cell_ids from within the Grid() object or the MVGrid() or LVGrid()
mv_weather_cell_id = network.mv_grid.weather_cells
for lvg in lv_grids:
if lvg.weather_cells:
for lv_w_id in lvg._weather_cells:
if not (lv_w_id in mv_weather_cell_id):
network.mv_grid._weather_cells.append(lv_w_id) |
def plot_pnlratio(self):
"""
画出pnl比率散点图
"""
plt.scatter(x=self.pnl.sell_date.apply(str), y=self.pnl.pnl_ratio)
plt.gcf().autofmt_xdate()
return plt | 画出pnl比率散点图 | Below is the the instruction that describes the task:
### Input:
画出pnl比率散点图
### Response:
def plot_pnlratio(self):
"""
画出pnl比率散点图
"""
plt.scatter(x=self.pnl.sell_date.apply(str), y=self.pnl.pnl_ratio)
plt.gcf().autofmt_xdate()
return plt |
def _do_login_locked(self):
"""Executes the login procedure (telnet) as well as setting up some
connection defaults like turning off the prompt, etc."""
self._telnet = telnetlib.Telnet(self._host)
self._telnet.read_until(LutronConnection.USER_PROMPT)
self._telnet.write(self._user + b'\r\n')
self._telnet.read_until(LutronConnection.PW_PROMPT)
self._telnet.write(self._password + b'\r\n')
self._telnet.read_until(LutronConnection.PROMPT)
self._send_locked("#MONITORING,12,2")
self._send_locked("#MONITORING,255,2")
self._send_locked("#MONITORING,3,1")
self._send_locked("#MONITORING,4,1")
self._send_locked("#MONITORING,5,1")
self._send_locked("#MONITORING,6,1")
self._send_locked("#MONITORING,8,1") | Executes the login procedure (telnet) as well as setting up some
connection defaults like turning off the prompt, etc. | Below is the the instruction that describes the task:
### Input:
Executes the login procedure (telnet) as well as setting up some
connection defaults like turning off the prompt, etc.
### Response:
def _do_login_locked(self):
"""Executes the login procedure (telnet) as well as setting up some
connection defaults like turning off the prompt, etc."""
self._telnet = telnetlib.Telnet(self._host)
self._telnet.read_until(LutronConnection.USER_PROMPT)
self._telnet.write(self._user + b'\r\n')
self._telnet.read_until(LutronConnection.PW_PROMPT)
self._telnet.write(self._password + b'\r\n')
self._telnet.read_until(LutronConnection.PROMPT)
self._send_locked("#MONITORING,12,2")
self._send_locked("#MONITORING,255,2")
self._send_locked("#MONITORING,3,1")
self._send_locked("#MONITORING,4,1")
self._send_locked("#MONITORING,5,1")
self._send_locked("#MONITORING,6,1")
self._send_locked("#MONITORING,8,1") |
def print_help(self, script_name: str):
'''print a help message from the script'''
textWidth = max(60, shutil.get_terminal_size((80, 20)).columns)
if len(script_name) > 20:
print(f'usage: sos run {script_name}')
print(
' [workflow_name | -t targets] [options] [workflow_options]'
)
else:
print(
f'usage: sos run {script_name} [workflow_name | -t targets] [options] [workflow_options]'
)
print(
' workflow_name: Single or combined workflows defined in this script'
)
print(' targets: One or more targets to generate')
print(
' options: Single-hyphen sos parameters (see "sos run -h" for details)'
)
print(
' workflow_options: Double-hyphen workflow-specific parameters'
)
description = [x.lstrip('# ').strip() for x in self.description]
description = textwrap.dedent('\n'.join(description)).strip()
if description:
print('\n' + description)
#
print('\nWorkflows:')
print(' ' + '\n '.join(self.workflows))
#
global_parameters = {}
for section in self.sections:
global_parameters.update(section.global_parameters)
if global_parameters:
print('\nGlobal Workflow Options:')
for name, (value, comment) in global_parameters.items():
par_str = f' {format_par(name, value)}'
print(par_str)
if comment:
print('\n'.join(
textwrap.wrap(
comment,
width=textWidth,
initial_indent=' ' * 24,
subsequent_indent=' ' * 24)))
#
print('\nSections')
for section in self.sections:
section.show() | print a help message from the script | Below is the the instruction that describes the task:
### Input:
print a help message from the script
### Response:
def print_help(self, script_name: str):
'''print a help message from the script'''
textWidth = max(60, shutil.get_terminal_size((80, 20)).columns)
if len(script_name) > 20:
print(f'usage: sos run {script_name}')
print(
' [workflow_name | -t targets] [options] [workflow_options]'
)
else:
print(
f'usage: sos run {script_name} [workflow_name | -t targets] [options] [workflow_options]'
)
print(
' workflow_name: Single or combined workflows defined in this script'
)
print(' targets: One or more targets to generate')
print(
' options: Single-hyphen sos parameters (see "sos run -h" for details)'
)
print(
' workflow_options: Double-hyphen workflow-specific parameters'
)
description = [x.lstrip('# ').strip() for x in self.description]
description = textwrap.dedent('\n'.join(description)).strip()
if description:
print('\n' + description)
#
print('\nWorkflows:')
print(' ' + '\n '.join(self.workflows))
#
global_parameters = {}
for section in self.sections:
global_parameters.update(section.global_parameters)
if global_parameters:
print('\nGlobal Workflow Options:')
for name, (value, comment) in global_parameters.items():
par_str = f' {format_par(name, value)}'
print(par_str)
if comment:
print('\n'.join(
textwrap.wrap(
comment,
width=textWidth,
initial_indent=' ' * 24,
subsequent_indent=' ' * 24)))
#
print('\nSections')
for section in self.sections:
section.show() |
def importConnFromExcel (fileName, sheetName):
''' Import connectivity rules from Excel sheet'''
import openpyxl as xl
# set columns
colPreTags = 0 # 'A'
colPostTags = 1 # 'B'
colConnFunc = 2 # 'C'
colSyn = 3 # 'D'
colProb = 5 # 'F'
colWeight = 6 # 'G'
colAnnot = 8 # 'I'
outFileName = fileName[:-5]+'_'+sheetName+'.py' # set output file name
connText = """## Generated using importConnFromExcel() function in params/utils.py \n\nnetParams['connParams'] = [] \n\n"""
# open excel file and sheet
wb = xl.load_workbook(fileName)
sheet = wb.get_sheet_by_name(sheetName)
numRows = sheet.get_highest_row()
with open(outFileName, 'w') as f:
f.write(connText) # write starting text
for row in range(1,numRows+1):
if sheet.cell(row=row, column=colProb).value: # if not empty row
print('Creating conn rule for row ' + str(row))
# read row values
pre = sheet.cell(row=row, column=colPreTags).value
post = sheet.cell(row=row, column=colPostTags).value
func = sheet.cell(row=row, column=colConnFunc).value
syn = sheet.cell(row=row, column=colSyn).value
prob = sheet.cell(row=row, column=colProb).value
weight = sheet.cell(row=row, column=colWeight).value
# write preTags
line = "netParams['connParams'].append({'preConds': {"
for i,cond in enumerate(pre.split(';')): # split into different conditions
if i>0: line = line + ", "
cond2 = cond.split('=') # split into key and value
line = line + "'" + cond2[0].replace(' ','') + "': " + cond2[1].replace(' ','') # generate line
line = line + "}" # end of preTags
# write postTags
line = line + ",\n'postConds': {"
for i,cond in enumerate(post.split(';')): # split into different conditions
if i>0: line = line + ", "
cond2 = cond.split('=') # split into key and value
line = line + "'" + cond2[0].replace(' ','') + "': " + cond2[1].replace(' ','') # generate line
line = line + "}" # end of postTags
line = line + ",\n'connFunc': '" + func + "'" # write connFunc
line = line + ",\n'synMech': '" + syn + "'" # write synReceptor
line = line + ",\n'probability': " + str(prob) # write prob
line = line + ",\n'weight': " + str(weight) # write prob
line = line + "})" # add closing brackets
line = line + '\n\n' # new line after each conn rule
f.write(line) | Import connectivity rules from Excel sheet | Below is the the instruction that describes the task:
### Input:
Import connectivity rules from Excel sheet
### Response:
def importConnFromExcel (fileName, sheetName):
''' Import connectivity rules from Excel sheet'''
import openpyxl as xl
# set columns
colPreTags = 0 # 'A'
colPostTags = 1 # 'B'
colConnFunc = 2 # 'C'
colSyn = 3 # 'D'
colProb = 5 # 'F'
colWeight = 6 # 'G'
colAnnot = 8 # 'I'
outFileName = fileName[:-5]+'_'+sheetName+'.py' # set output file name
connText = """## Generated using importConnFromExcel() function in params/utils.py \n\nnetParams['connParams'] = [] \n\n"""
# open excel file and sheet
wb = xl.load_workbook(fileName)
sheet = wb.get_sheet_by_name(sheetName)
numRows = sheet.get_highest_row()
with open(outFileName, 'w') as f:
f.write(connText) # write starting text
for row in range(1,numRows+1):
if sheet.cell(row=row, column=colProb).value: # if not empty row
print('Creating conn rule for row ' + str(row))
# read row values
pre = sheet.cell(row=row, column=colPreTags).value
post = sheet.cell(row=row, column=colPostTags).value
func = sheet.cell(row=row, column=colConnFunc).value
syn = sheet.cell(row=row, column=colSyn).value
prob = sheet.cell(row=row, column=colProb).value
weight = sheet.cell(row=row, column=colWeight).value
# write preTags
line = "netParams['connParams'].append({'preConds': {"
for i,cond in enumerate(pre.split(';')): # split into different conditions
if i>0: line = line + ", "
cond2 = cond.split('=') # split into key and value
line = line + "'" + cond2[0].replace(' ','') + "': " + cond2[1].replace(' ','') # generate line
line = line + "}" # end of preTags
# write postTags
line = line + ",\n'postConds': {"
for i,cond in enumerate(post.split(';')): # split into different conditions
if i>0: line = line + ", "
cond2 = cond.split('=') # split into key and value
line = line + "'" + cond2[0].replace(' ','') + "': " + cond2[1].replace(' ','') # generate line
line = line + "}" # end of postTags
line = line + ",\n'connFunc': '" + func + "'" # write connFunc
line = line + ",\n'synMech': '" + syn + "'" # write synReceptor
line = line + ",\n'probability': " + str(prob) # write prob
line = line + ",\n'weight': " + str(weight) # write prob
line = line + "})" # add closing brackets
line = line + '\n\n' # new line after each conn rule
f.write(line) |
def plot_incremental_transactions(
model,
transactions,
datetime_col,
customer_id_col,
t,
t_cal,
datetime_format=None,
freq="D",
set_index_date=False,
title="Tracking Daily Transactions",
xlabel="day",
ylabel="Transactions",
ax=None,
**kwargs
):
"""
Plot a figure of the predicted and actual cumulative transactions of users.
Parameters
----------
model: lifetimes model
A fitted lifetimes model
transactions: pandas DataFrame
DataFrame containing the transactions history of the customer_id
datetime_col: str
The column in transactions that denotes the datetime the purchase was made.
customer_id_col: str
The column in transactions that denotes the customer_id
t: float
The number of time units since the begining of
data for which we want to calculate cumulative transactions
t_cal: float
A marker used to indicate where the vertical line for plotting should be.
datetime_format: str, optional
A string that represents the timestamp format. Useful if Pandas
can't understand the provided format.
freq: str, optional
Default 'D' for days, 'W' for weeks, 'M' for months... etc.
Full list here:
http://pandas.pydata.org/pandas-docs/stable/timeseries.html#dateoffset-objects
set_index_date: bool, optional
When True set date as Pandas DataFrame index, default False - number of time units
title: str, optional
Figure title
xlabel: str, optional
Figure xlabel
ylabel: str, optional
Figure ylabel
ax: matplotlib.AxesSubplot, optional
Using user axes
kwargs
Passed into the pandas.DataFrame.plot command.
Returns
-------
axes: matplotlib.AxesSubplot
"""
from matplotlib import pyplot as plt
if ax is None:
ax = plt.subplot(111)
df_cum_transactions = expected_cumulative_transactions(
model,
transactions,
datetime_col,
customer_id_col,
t,
datetime_format=datetime_format,
freq=freq,
set_index_date=set_index_date,
)
# get incremental from cumulative transactions
df_cum_transactions = df_cum_transactions.apply(lambda x: x - x.shift(1))
ax = df_cum_transactions.plot(ax=ax, title=title, **kwargs)
if set_index_date:
x_vline = df_cum_transactions.index[int(t_cal)]
xlabel = "date"
else:
x_vline = t_cal
ax.axvline(x=x_vline, color="r", linestyle="--")
ax.set_xlabel(xlabel)
ax.set_ylabel(ylabel)
return ax | Plot a figure of the predicted and actual cumulative transactions of users.
Parameters
----------
model: lifetimes model
A fitted lifetimes model
transactions: pandas DataFrame
DataFrame containing the transactions history of the customer_id
datetime_col: str
The column in transactions that denotes the datetime the purchase was made.
customer_id_col: str
The column in transactions that denotes the customer_id
t: float
The number of time units since the begining of
data for which we want to calculate cumulative transactions
t_cal: float
A marker used to indicate where the vertical line for plotting should be.
datetime_format: str, optional
A string that represents the timestamp format. Useful if Pandas
can't understand the provided format.
freq: str, optional
Default 'D' for days, 'W' for weeks, 'M' for months... etc.
Full list here:
http://pandas.pydata.org/pandas-docs/stable/timeseries.html#dateoffset-objects
set_index_date: bool, optional
When True set date as Pandas DataFrame index, default False - number of time units
title: str, optional
Figure title
xlabel: str, optional
Figure xlabel
ylabel: str, optional
Figure ylabel
ax: matplotlib.AxesSubplot, optional
Using user axes
kwargs
Passed into the pandas.DataFrame.plot command.
Returns
-------
axes: matplotlib.AxesSubplot | Below is the the instruction that describes the task:
### Input:
Plot a figure of the predicted and actual cumulative transactions of users.
Parameters
----------
model: lifetimes model
A fitted lifetimes model
transactions: pandas DataFrame
DataFrame containing the transactions history of the customer_id
datetime_col: str
The column in transactions that denotes the datetime the purchase was made.
customer_id_col: str
The column in transactions that denotes the customer_id
t: float
The number of time units since the begining of
data for which we want to calculate cumulative transactions
t_cal: float
A marker used to indicate where the vertical line for plotting should be.
datetime_format: str, optional
A string that represents the timestamp format. Useful if Pandas
can't understand the provided format.
freq: str, optional
Default 'D' for days, 'W' for weeks, 'M' for months... etc.
Full list here:
http://pandas.pydata.org/pandas-docs/stable/timeseries.html#dateoffset-objects
set_index_date: bool, optional
When True set date as Pandas DataFrame index, default False - number of time units
title: str, optional
Figure title
xlabel: str, optional
Figure xlabel
ylabel: str, optional
Figure ylabel
ax: matplotlib.AxesSubplot, optional
Using user axes
kwargs
Passed into the pandas.DataFrame.plot command.
Returns
-------
axes: matplotlib.AxesSubplot
### Response:
def plot_incremental_transactions(
model,
transactions,
datetime_col,
customer_id_col,
t,
t_cal,
datetime_format=None,
freq="D",
set_index_date=False,
title="Tracking Daily Transactions",
xlabel="day",
ylabel="Transactions",
ax=None,
**kwargs
):
"""
Plot a figure of the predicted and actual cumulative transactions of users.
Parameters
----------
model: lifetimes model
A fitted lifetimes model
transactions: pandas DataFrame
DataFrame containing the transactions history of the customer_id
datetime_col: str
The column in transactions that denotes the datetime the purchase was made.
customer_id_col: str
The column in transactions that denotes the customer_id
t: float
The number of time units since the begining of
data for which we want to calculate cumulative transactions
t_cal: float
A marker used to indicate where the vertical line for plotting should be.
datetime_format: str, optional
A string that represents the timestamp format. Useful if Pandas
can't understand the provided format.
freq: str, optional
Default 'D' for days, 'W' for weeks, 'M' for months... etc.
Full list here:
http://pandas.pydata.org/pandas-docs/stable/timeseries.html#dateoffset-objects
set_index_date: bool, optional
When True set date as Pandas DataFrame index, default False - number of time units
title: str, optional
Figure title
xlabel: str, optional
Figure xlabel
ylabel: str, optional
Figure ylabel
ax: matplotlib.AxesSubplot, optional
Using user axes
kwargs
Passed into the pandas.DataFrame.plot command.
Returns
-------
axes: matplotlib.AxesSubplot
"""
from matplotlib import pyplot as plt
if ax is None:
ax = plt.subplot(111)
df_cum_transactions = expected_cumulative_transactions(
model,
transactions,
datetime_col,
customer_id_col,
t,
datetime_format=datetime_format,
freq=freq,
set_index_date=set_index_date,
)
# get incremental from cumulative transactions
df_cum_transactions = df_cum_transactions.apply(lambda x: x - x.shift(1))
ax = df_cum_transactions.plot(ax=ax, title=title, **kwargs)
if set_index_date:
x_vline = df_cum_transactions.index[int(t_cal)]
xlabel = "date"
else:
x_vline = t_cal
ax.axvline(x=x_vline, color="r", linestyle="--")
ax.set_xlabel(xlabel)
ax.set_ylabel(ylabel)
return ax |
def find_prepositions(chunked):
""" The input is a list of [token, tag, chunk]-items.
The output is a list of [token, tag, chunk, preposition]-items.
PP-chunks followed by NP-chunks make up a PNP-chunk.
"""
# Tokens that are not part of a preposition just get the O-tag.
for ch in chunked:
ch.append("O")
for i, chunk in enumerate(chunked):
if chunk[2].endswith("PP") and chunk[-1] == "O":
# Find PP followed by other PP, NP with nouns and pronouns, VP with a gerund.
if i < len(chunked)-1 and \
(chunked[i+1][2].endswith(("NP", "PP")) or \
chunked[i+1][1] in ("VBG", "VBN")):
chunk[-1] = "B-PNP"
pp = True
for ch in chunked[i+1:]:
if not (ch[2].endswith(("NP", "PP")) or ch[1] in ("VBG", "VBN")):
break
if ch[2].endswith("PP") and pp:
ch[-1] = "I-PNP"
if not ch[2].endswith("PP"):
ch[-1] = "I-PNP"
pp = False
return chunked | The input is a list of [token, tag, chunk]-items.
The output is a list of [token, tag, chunk, preposition]-items.
PP-chunks followed by NP-chunks make up a PNP-chunk. | Below is the the instruction that describes the task:
### Input:
The input is a list of [token, tag, chunk]-items.
The output is a list of [token, tag, chunk, preposition]-items.
PP-chunks followed by NP-chunks make up a PNP-chunk.
### Response:
def find_prepositions(chunked):
""" The input is a list of [token, tag, chunk]-items.
The output is a list of [token, tag, chunk, preposition]-items.
PP-chunks followed by NP-chunks make up a PNP-chunk.
"""
# Tokens that are not part of a preposition just get the O-tag.
for ch in chunked:
ch.append("O")
for i, chunk in enumerate(chunked):
if chunk[2].endswith("PP") and chunk[-1] == "O":
# Find PP followed by other PP, NP with nouns and pronouns, VP with a gerund.
if i < len(chunked)-1 and \
(chunked[i+1][2].endswith(("NP", "PP")) or \
chunked[i+1][1] in ("VBG", "VBN")):
chunk[-1] = "B-PNP"
pp = True
for ch in chunked[i+1:]:
if not (ch[2].endswith(("NP", "PP")) or ch[1] in ("VBG", "VBN")):
break
if ch[2].endswith("PP") and pp:
ch[-1] = "I-PNP"
if not ch[2].endswith("PP"):
ch[-1] = "I-PNP"
pp = False
return chunked |
def getSegmentOnCell(self, c, i, segIdx):
"""
Overrides :meth:`nupic.algorithms.backtracking_tm.BacktrackingTM.getSegmentOnCell`.
"""
segList = self.cells4.getNonEmptySegList(c,i)
seg = self.cells4.getSegment(c, i, segList[segIdx])
numSyn = seg.size()
assert numSyn != 0
# Accumulate segment information
result = []
result.append([int(segIdx), bool(seg.isSequenceSegment()),
seg.getPositiveActivations(),
seg.getTotalActivations(), seg.getLastActiveIteration(),
seg.getLastPosDutyCycle(),
seg.getLastPosDutyCycleIteration()])
for s in xrange(numSyn):
sc, si = self.getColCellIdx(seg.getSrcCellIdx(s))
result.append([int(sc), int(si), seg.getPermanence(s)])
return result | Overrides :meth:`nupic.algorithms.backtracking_tm.BacktrackingTM.getSegmentOnCell`. | Below is the the instruction that describes the task:
### Input:
Overrides :meth:`nupic.algorithms.backtracking_tm.BacktrackingTM.getSegmentOnCell`.
### Response:
def getSegmentOnCell(self, c, i, segIdx):
"""
Overrides :meth:`nupic.algorithms.backtracking_tm.BacktrackingTM.getSegmentOnCell`.
"""
segList = self.cells4.getNonEmptySegList(c,i)
seg = self.cells4.getSegment(c, i, segList[segIdx])
numSyn = seg.size()
assert numSyn != 0
# Accumulate segment information
result = []
result.append([int(segIdx), bool(seg.isSequenceSegment()),
seg.getPositiveActivations(),
seg.getTotalActivations(), seg.getLastActiveIteration(),
seg.getLastPosDutyCycle(),
seg.getLastPosDutyCycleIteration()])
for s in xrange(numSyn):
sc, si = self.getColCellIdx(seg.getSrcCellIdx(s))
result.append([int(sc), int(si), seg.getPermanence(s)])
return result |
def fetch_live(self, formatter=TableFormat):
"""
Fetch a live stream query. This is the equivalent of selecting
the "Play" option for monitoring fields within the SMC UI. Data will
be streamed back in real time.
:param formatter: Formatter type for data representation. Any type
in :py:mod:`smc_monitoring.models.formatters`.
:return: generator yielding results in specified format
"""
fmt = formatter(self)
for results in self.execute():
if 'records' in results and results['records'].get('added'):
yield fmt.formatted(results['records']['added']) | Fetch a live stream query. This is the equivalent of selecting
the "Play" option for monitoring fields within the SMC UI. Data will
be streamed back in real time.
:param formatter: Formatter type for data representation. Any type
in :py:mod:`smc_monitoring.models.formatters`.
:return: generator yielding results in specified format | Below is the the instruction that describes the task:
### Input:
Fetch a live stream query. This is the equivalent of selecting
the "Play" option for monitoring fields within the SMC UI. Data will
be streamed back in real time.
:param formatter: Formatter type for data representation. Any type
in :py:mod:`smc_monitoring.models.formatters`.
:return: generator yielding results in specified format
### Response:
def fetch_live(self, formatter=TableFormat):
"""
Fetch a live stream query. This is the equivalent of selecting
the "Play" option for monitoring fields within the SMC UI. Data will
be streamed back in real time.
:param formatter: Formatter type for data representation. Any type
in :py:mod:`smc_monitoring.models.formatters`.
:return: generator yielding results in specified format
"""
fmt = formatter(self)
for results in self.execute():
if 'records' in results and results['records'].get('added'):
yield fmt.formatted(results['records']['added']) |
def set_parameter_label(self, parameter, label):
"""
Sets the Label used in the User Interface for the given parameter.
:type parameter: str or Parameter
:type label: str
"""
labels = self.metadata\
.setdefault("AWS::CloudFormation::Interface", {})\
.setdefault("ParameterLabels", {})
if isinstance(parameter, BaseAWSObject):
parameter = parameter.title
labels[parameter] = {"default": label} | Sets the Label used in the User Interface for the given parameter.
:type parameter: str or Parameter
:type label: str | Below is the the instruction that describes the task:
### Input:
Sets the Label used in the User Interface for the given parameter.
:type parameter: str or Parameter
:type label: str
### Response:
def set_parameter_label(self, parameter, label):
"""
Sets the Label used in the User Interface for the given parameter.
:type parameter: str or Parameter
:type label: str
"""
labels = self.metadata\
.setdefault("AWS::CloudFormation::Interface", {})\
.setdefault("ParameterLabels", {})
if isinstance(parameter, BaseAWSObject):
parameter = parameter.title
labels[parameter] = {"default": label} |
def set_brightness(host, did, value, token=None):
"""Set brightness of a bulb or fixture."""
urllib3.disable_warnings()
if token:
scheme = "https"
if not token:
scheme = "http"
token = "1234567890"
url = (
scheme + '://' + host + '/gwr/gop.php?cmd=DeviceSendCommand&data=<gip><version>1</version><token>' + token + '</token><did>' + did + '</did><value>' + str(
value) + '</value><type>level</type></gip>&fmt=xml')
response = requests.get(url, verify=False)
if response.status_code == '200':
return True
else:
return False | Set brightness of a bulb or fixture. | Below is the the instruction that describes the task:
### Input:
Set brightness of a bulb or fixture.
### Response:
def set_brightness(host, did, value, token=None):
"""Set brightness of a bulb or fixture."""
urllib3.disable_warnings()
if token:
scheme = "https"
if not token:
scheme = "http"
token = "1234567890"
url = (
scheme + '://' + host + '/gwr/gop.php?cmd=DeviceSendCommand&data=<gip><version>1</version><token>' + token + '</token><did>' + did + '</did><value>' + str(
value) + '</value><type>level</type></gip>&fmt=xml')
response = requests.get(url, verify=False)
if response.status_code == '200':
return True
else:
return False |
def matrix_iter_detail(matrix, version, scale=1, border=None):
"""\
Returns an iterator / generator over the provided matrix which includes
the border and the scaling factor.
This iterator / generator returns different values for dark / light modules
and therefor the different parts (like the finder patterns, alignment patterns etc.)
are distinguishable. If this information isn't necessary, use the
:py:func:`matrix_iter()` function because it is much cheaper and faster.
If either the `scale` or `border` value is invalid, a py:exc:`ValueError`
is raised.
:param matrix: An iterable of bytearrays.
:param int version: A version constant.
:param int scale: The scaling factor (default: ``1``).
:param int border: The border size or ``None`` to specify the
default quiet zone (4 for QR Codes, 2 for Micro QR Codes).
:raises: :py:exc:`ValueError` if an illegal scale or border value is provided
"""
from segno import encoder
check_valid_border(border)
scale = int(scale)
check_valid_scale(scale)
border = get_border(version, border)
width, height = get_symbol_size(version, scale=1, border=0)
is_micro = version < 1
# Create an empty matrix with invalid 0x2 values
alignment_matrix = encoder.make_matrix(version, reserve_regions=False, add_timing=False)
encoder.add_alignment_patterns(alignment_matrix, version)
def get_bit(i, j):
# Check if we operate upon the matrix or the "virtual" border
if 0 <= i < height and 0 <= j < width:
val = matrix[i][j]
if not is_micro:
# Alignment pattern
alignment_val = alignment_matrix[i][j]
if alignment_val != 0x2:
return (TYPE_ALIGNMENT_PATTERN_LIGHT, TYPE_ALIGNMENT_PATTERN_DARK)[alignment_val]
if version > 6: # Version information
if i < 6 and width - 12 < j < width - 8 \
or height - 12 < i < height - 8 and j < 6:
return (TYPE_VERSION_LIGHT, TYPE_VERSION_DARK)[val]
# Dark module
if i == height - 8 and j == 8:
return TYPE_DARKMODULE
# Timing - IMPORTANT: Check alignment (see above) in advance!
if not is_micro and ((i == 6 and j > 7 and j < width - 8) or (j == 6 and i > 7 and i < height - 8)) \
or is_micro and (i == 0 and j > 7 or j == 0 and i > 7):
return (TYPE_TIMING_LIGHT, TYPE_TIMING_DARK)[val]
# Format - IMPORTANT: Check timing (see above) in advance!
if i == 8 and (j < 9 or (not is_micro and j > width - 10)) \
or j == 8 and (i < 8 or not is_micro and i > height - 9):
return (TYPE_FORMAT_LIGHT, TYPE_FORMAT_DARK)[val]
# Finder pattern
# top left top right
if i < 7 and (j < 7 or (not is_micro and j > width - 8)) \
or not is_micro and i > height - 8 and j < 7: # bottom left
return (TYPE_FINDER_PATTERN_LIGHT, TYPE_FINDER_PATTERN_DARK)[val]
# Separator
# top left top right
if i < 8 and (j < 8 or (not is_micro and j > width - 9)) \
or not is_micro and (i > height - 9 and j < 8): # bottom left
return TYPE_SEPARATOR
return (TYPE_DATA_LIGHT, TYPE_DATA_DARK)[val]
else:
return TYPE_QUIET_ZONE
for i in range(-border, height + border):
for s in range(scale):
yield chain.from_iterable(([get_bit(i, j)] * scale for j in range(-border, width + border))) | \
Returns an iterator / generator over the provided matrix which includes
the border and the scaling factor.
This iterator / generator returns different values for dark / light modules
and therefor the different parts (like the finder patterns, alignment patterns etc.)
are distinguishable. If this information isn't necessary, use the
:py:func:`matrix_iter()` function because it is much cheaper and faster.
If either the `scale` or `border` value is invalid, a py:exc:`ValueError`
is raised.
:param matrix: An iterable of bytearrays.
:param int version: A version constant.
:param int scale: The scaling factor (default: ``1``).
:param int border: The border size or ``None`` to specify the
default quiet zone (4 for QR Codes, 2 for Micro QR Codes).
:raises: :py:exc:`ValueError` if an illegal scale or border value is provided | Below is the the instruction that describes the task:
### Input:
\
Returns an iterator / generator over the provided matrix which includes
the border and the scaling factor.
This iterator / generator returns different values for dark / light modules
and therefor the different parts (like the finder patterns, alignment patterns etc.)
are distinguishable. If this information isn't necessary, use the
:py:func:`matrix_iter()` function because it is much cheaper and faster.
If either the `scale` or `border` value is invalid, a py:exc:`ValueError`
is raised.
:param matrix: An iterable of bytearrays.
:param int version: A version constant.
:param int scale: The scaling factor (default: ``1``).
:param int border: The border size or ``None`` to specify the
default quiet zone (4 for QR Codes, 2 for Micro QR Codes).
:raises: :py:exc:`ValueError` if an illegal scale or border value is provided
### Response:
def matrix_iter_detail(matrix, version, scale=1, border=None):
"""\
Returns an iterator / generator over the provided matrix which includes
the border and the scaling factor.
This iterator / generator returns different values for dark / light modules
and therefor the different parts (like the finder patterns, alignment patterns etc.)
are distinguishable. If this information isn't necessary, use the
:py:func:`matrix_iter()` function because it is much cheaper and faster.
If either the `scale` or `border` value is invalid, a py:exc:`ValueError`
is raised.
:param matrix: An iterable of bytearrays.
:param int version: A version constant.
:param int scale: The scaling factor (default: ``1``).
:param int border: The border size or ``None`` to specify the
default quiet zone (4 for QR Codes, 2 for Micro QR Codes).
:raises: :py:exc:`ValueError` if an illegal scale or border value is provided
"""
from segno import encoder
check_valid_border(border)
scale = int(scale)
check_valid_scale(scale)
border = get_border(version, border)
width, height = get_symbol_size(version, scale=1, border=0)
is_micro = version < 1
# Create an empty matrix with invalid 0x2 values
alignment_matrix = encoder.make_matrix(version, reserve_regions=False, add_timing=False)
encoder.add_alignment_patterns(alignment_matrix, version)
def get_bit(i, j):
# Check if we operate upon the matrix or the "virtual" border
if 0 <= i < height and 0 <= j < width:
val = matrix[i][j]
if not is_micro:
# Alignment pattern
alignment_val = alignment_matrix[i][j]
if alignment_val != 0x2:
return (TYPE_ALIGNMENT_PATTERN_LIGHT, TYPE_ALIGNMENT_PATTERN_DARK)[alignment_val]
if version > 6: # Version information
if i < 6 and width - 12 < j < width - 8 \
or height - 12 < i < height - 8 and j < 6:
return (TYPE_VERSION_LIGHT, TYPE_VERSION_DARK)[val]
# Dark module
if i == height - 8 and j == 8:
return TYPE_DARKMODULE
# Timing - IMPORTANT: Check alignment (see above) in advance!
if not is_micro and ((i == 6 and j > 7 and j < width - 8) or (j == 6 and i > 7 and i < height - 8)) \
or is_micro and (i == 0 and j > 7 or j == 0 and i > 7):
return (TYPE_TIMING_LIGHT, TYPE_TIMING_DARK)[val]
# Format - IMPORTANT: Check timing (see above) in advance!
if i == 8 and (j < 9 or (not is_micro and j > width - 10)) \
or j == 8 and (i < 8 or not is_micro and i > height - 9):
return (TYPE_FORMAT_LIGHT, TYPE_FORMAT_DARK)[val]
# Finder pattern
# top left top right
if i < 7 and (j < 7 or (not is_micro and j > width - 8)) \
or not is_micro and i > height - 8 and j < 7: # bottom left
return (TYPE_FINDER_PATTERN_LIGHT, TYPE_FINDER_PATTERN_DARK)[val]
# Separator
# top left top right
if i < 8 and (j < 8 or (not is_micro and j > width - 9)) \
or not is_micro and (i > height - 9 and j < 8): # bottom left
return TYPE_SEPARATOR
return (TYPE_DATA_LIGHT, TYPE_DATA_DARK)[val]
else:
return TYPE_QUIET_ZONE
for i in range(-border, height + border):
for s in range(scale):
yield chain.from_iterable(([get_bit(i, j)] * scale for j in range(-border, width + border))) |
def incr_obj(obj, **attrs):
"""Increments context variables
"""
for name, value in attrs.iteritems():
v = getattr(obj, name, None)
if not hasattr(obj, name) or v is None:
v = 0
setattr(obj, name, v + value) | Increments context variables | Below is the the instruction that describes the task:
### Input:
Increments context variables
### Response:
def incr_obj(obj, **attrs):
"""Increments context variables
"""
for name, value in attrs.iteritems():
v = getattr(obj, name, None)
if not hasattr(obj, name) or v is None:
v = 0
setattr(obj, name, v + value) |
def generate_admin_metadata(name, creator_username=None):
"""Return admin metadata as a dictionary."""
if not dtoolcore.utils.name_is_valid(name):
raise(DtoolCoreInvalidNameError())
if creator_username is None:
creator_username = dtoolcore.utils.getuser()
datetime_obj = datetime.datetime.utcnow()
admin_metadata = {
"uuid": str(uuid.uuid4()),
"dtoolcore_version": __version__,
"name": name,
"type": "protodataset",
"creator_username": creator_username,
"created_at": dtoolcore.utils.timestamp(datetime_obj)
}
return admin_metadata | Return admin metadata as a dictionary. | Below is the the instruction that describes the task:
### Input:
Return admin metadata as a dictionary.
### Response:
def generate_admin_metadata(name, creator_username=None):
"""Return admin metadata as a dictionary."""
if not dtoolcore.utils.name_is_valid(name):
raise(DtoolCoreInvalidNameError())
if creator_username is None:
creator_username = dtoolcore.utils.getuser()
datetime_obj = datetime.datetime.utcnow()
admin_metadata = {
"uuid": str(uuid.uuid4()),
"dtoolcore_version": __version__,
"name": name,
"type": "protodataset",
"creator_username": creator_username,
"created_at": dtoolcore.utils.timestamp(datetime_obj)
}
return admin_metadata |
def get_setting(self, key, converter=None, choices=None):
'''Returns the settings value for the provided key.
If converter is str, unicode, bool or int the settings value will be
returned converted to the provided type.
If choices is an instance of list or tuple its item at position of the
settings value be returned.
.. note:: It is suggested to always use unicode for text-settings
because else xbmc returns utf-8 encoded strings.
:param key: The id of the setting defined in settings.xml.
:param converter: (Optional) Choices are str, unicode, bool and int.
:param converter: (Optional) Choices are instances of list or tuple.
Examples:
* ``plugin.get_setting('per_page', int)``
* ``plugin.get_setting('password', unicode)``
* ``plugin.get_setting('force_viewmode', bool)``
* ``plugin.get_setting('content', choices=('videos', 'movies'))``
'''
#TODO: allow pickling of settings items?
# TODO: STUB THIS OUT ON CLI
value = self.addon.getSetting(id=key)
if converter is str:
return value
elif converter is unicode:
return value.decode('utf-8')
elif converter is bool:
return value == 'true'
elif converter is int:
return int(value)
elif isinstance(choices, (list, tuple)):
return choices[int(value)]
elif converter is None:
log.warning('No converter provided, unicode should be used, '
'but returning str value')
return value
else:
raise TypeError('Acceptable converters are str, unicode, bool and '
'int. Acceptable choices are instances of list '
' or tuple.') | Returns the settings value for the provided key.
If converter is str, unicode, bool or int the settings value will be
returned converted to the provided type.
If choices is an instance of list or tuple its item at position of the
settings value be returned.
.. note:: It is suggested to always use unicode for text-settings
because else xbmc returns utf-8 encoded strings.
:param key: The id of the setting defined in settings.xml.
:param converter: (Optional) Choices are str, unicode, bool and int.
:param converter: (Optional) Choices are instances of list or tuple.
Examples:
* ``plugin.get_setting('per_page', int)``
* ``plugin.get_setting('password', unicode)``
* ``plugin.get_setting('force_viewmode', bool)``
* ``plugin.get_setting('content', choices=('videos', 'movies'))`` | Below is the the instruction that describes the task:
### Input:
Returns the settings value for the provided key.
If converter is str, unicode, bool or int the settings value will be
returned converted to the provided type.
If choices is an instance of list or tuple its item at position of the
settings value be returned.
.. note:: It is suggested to always use unicode for text-settings
because else xbmc returns utf-8 encoded strings.
:param key: The id of the setting defined in settings.xml.
:param converter: (Optional) Choices are str, unicode, bool and int.
:param converter: (Optional) Choices are instances of list or tuple.
Examples:
* ``plugin.get_setting('per_page', int)``
* ``plugin.get_setting('password', unicode)``
* ``plugin.get_setting('force_viewmode', bool)``
* ``plugin.get_setting('content', choices=('videos', 'movies'))``
### Response:
def get_setting(self, key, converter=None, choices=None):
'''Returns the settings value for the provided key.
If converter is str, unicode, bool or int the settings value will be
returned converted to the provided type.
If choices is an instance of list or tuple its item at position of the
settings value be returned.
.. note:: It is suggested to always use unicode for text-settings
because else xbmc returns utf-8 encoded strings.
:param key: The id of the setting defined in settings.xml.
:param converter: (Optional) Choices are str, unicode, bool and int.
:param converter: (Optional) Choices are instances of list or tuple.
Examples:
* ``plugin.get_setting('per_page', int)``
* ``plugin.get_setting('password', unicode)``
* ``plugin.get_setting('force_viewmode', bool)``
* ``plugin.get_setting('content', choices=('videos', 'movies'))``
'''
#TODO: allow pickling of settings items?
# TODO: STUB THIS OUT ON CLI
value = self.addon.getSetting(id=key)
if converter is str:
return value
elif converter is unicode:
return value.decode('utf-8')
elif converter is bool:
return value == 'true'
elif converter is int:
return int(value)
elif isinstance(choices, (list, tuple)):
return choices[int(value)]
elif converter is None:
log.warning('No converter provided, unicode should be used, '
'but returning str value')
return value
else:
raise TypeError('Acceptable converters are str, unicode, bool and '
'int. Acceptable choices are instances of list '
' or tuple.') |
def bend(mapping, source, context=None):
"""
The main bending function.
mapping: the map of benders
source: a dict to be bent
returns a new dict according to the provided map.
"""
context = {} if context is None else context
transport = Transport(source, context)
return _bend(mapping, transport) | The main bending function.
mapping: the map of benders
source: a dict to be bent
returns a new dict according to the provided map. | Below is the the instruction that describes the task:
### Input:
The main bending function.
mapping: the map of benders
source: a dict to be bent
returns a new dict according to the provided map.
### Response:
def bend(mapping, source, context=None):
"""
The main bending function.
mapping: the map of benders
source: a dict to be bent
returns a new dict according to the provided map.
"""
context = {} if context is None else context
transport = Transport(source, context)
return _bend(mapping, transport) |
def keeprunning(wait_secs=0, exit_on_success=False,
on_success=None, on_error=None, on_done=None):
'''
Example 1: dosomething needs to run until completion condition
without needing to have a loop in its code. Also, when error
happens, we should NOT terminate execution
>>> from deeputil import AttrDict
>>> @keeprunning(wait_secs=1)
... def dosomething(state):
... state.i += 1
... print (state)
... if state.i % 2 == 0:
... print("Error happened")
... 1 / 0 # create an error condition
... if state.i >= 7:
... print ("Done")
... raise keeprunning.terminate
...
>>> state = AttrDict(i=0)
>>> dosomething(state)
AttrDict({'i': 1})
AttrDict({'i': 2})
Error happened
AttrDict({'i': 3})
AttrDict({'i': 4})
Error happened
AttrDict({'i': 5})
AttrDict({'i': 6})
Error happened
AttrDict({'i': 7})
Done
Example 2: In case you want to log exceptions while
dosomething keeps running, or perform any other action
when an exceptions arise
>>> def some_error(__exc__):
... print (__exc__)
...
>>> @keeprunning(on_error=some_error)
... def dosomething(state):
... state.i += 1
... print (state)
... if state.i % 2 == 0:
... print("Error happened")
... 1 / 0 # create an error condition
... if state.i >= 7:
... print ("Done")
... raise keeprunning.terminate
...
>>> state = AttrDict(i=0)
>>> dosomething(state)
AttrDict({'i': 1})
AttrDict({'i': 2})
Error happened
division by zero
AttrDict({'i': 3})
AttrDict({'i': 4})
Error happened
division by zero
AttrDict({'i': 5})
AttrDict({'i': 6})
Error happened
division by zero
AttrDict({'i': 7})
Done
Example 3: Full set of arguments that can be passed in @keeprunning()
with class implementations
>>> # Class that has some class variables
... class Demo(object):
... SUCCESS_MSG = 'Yay!!'
... DONE_MSG = 'STOPPED AT NOTHING!'
... ERROR_MSG = 'Error'
...
... # Functions to be called by @keeprunning
... def success(self):
... print((self.SUCCESS_MSG))
...
... def failure(self, __exc__):
... print((self.ERROR_MSG, __exc__))
...
... def task_done(self):
... print((self.DONE_MSG))
...
... #Actual use of keeprunning with all arguments passed
... @keeprunning(wait_secs=1, exit_on_success=False,
... on_success=success, on_error=failure, on_done=task_done)
... def dosomething(self, state):
... state.i += 1
... print (state)
... if state.i % 2 == 0:
... print("Error happened")
... # create an error condition
... 1 / 0
... if state.i >= 7:
... print ("Done")
... raise keeprunning.terminate
...
>>> demo = Demo()
>>> state = AttrDict(i=0)
>>> demo.dosomething(state)
AttrDict({'i': 1})
Yay!!
AttrDict({'i': 2})
Error happened
('Error', ZeroDivisionError('division by zero'))
AttrDict({'i': 3})
Yay!!
AttrDict({'i': 4})
Error happened
('Error', ZeroDivisionError('division by zero'))
AttrDict({'i': 5})
Yay!!
AttrDict({'i': 6})
Error happened
('Error', ZeroDivisionError('division by zero'))
AttrDict({'i': 7})
Done
STOPPED AT NOTHING!
'''
def decfn(fn):
def _call_callback(cb, fargs):
if not cb: return
# get the getargspec fn in inspect module (python 2/3 support)
G = getattr(inspect, 'getfullargspec', getattr(inspect, 'getargspec'))
cb_args = G(cb).args
cb_args = dict([(a, fargs.get(a, None)) for a in cb_args])
cb(**cb_args)
def _fn(*args, **kwargs):
fargs = inspect.getcallargs(fn, *args, **kwargs)
fargs.update(dict(__fn__=fn, __exc__=None))
while 1:
try:
fn(*args, **kwargs)
if exit_on_success: break
except (SystemExit, KeyboardInterrupt):
raise
except KeepRunningTerminate:
break
except Exception as exc:
fargs.update(dict(__exc__=exc))
_call_callback(on_error, fargs)
fargs.update(dict(__exc__=None))
if wait_secs: time.sleep(wait_secs)
continue
_call_callback(on_success, fargs)
_call_callback(on_done, fargs)
return _fn
return decfn | Example 1: dosomething needs to run until completion condition
without needing to have a loop in its code. Also, when error
happens, we should NOT terminate execution
>>> from deeputil import AttrDict
>>> @keeprunning(wait_secs=1)
... def dosomething(state):
... state.i += 1
... print (state)
... if state.i % 2 == 0:
... print("Error happened")
... 1 / 0 # create an error condition
... if state.i >= 7:
... print ("Done")
... raise keeprunning.terminate
...
>>> state = AttrDict(i=0)
>>> dosomething(state)
AttrDict({'i': 1})
AttrDict({'i': 2})
Error happened
AttrDict({'i': 3})
AttrDict({'i': 4})
Error happened
AttrDict({'i': 5})
AttrDict({'i': 6})
Error happened
AttrDict({'i': 7})
Done
Example 2: In case you want to log exceptions while
dosomething keeps running, or perform any other action
when an exceptions arise
>>> def some_error(__exc__):
... print (__exc__)
...
>>> @keeprunning(on_error=some_error)
... def dosomething(state):
... state.i += 1
... print (state)
... if state.i % 2 == 0:
... print("Error happened")
... 1 / 0 # create an error condition
... if state.i >= 7:
... print ("Done")
... raise keeprunning.terminate
...
>>> state = AttrDict(i=0)
>>> dosomething(state)
AttrDict({'i': 1})
AttrDict({'i': 2})
Error happened
division by zero
AttrDict({'i': 3})
AttrDict({'i': 4})
Error happened
division by zero
AttrDict({'i': 5})
AttrDict({'i': 6})
Error happened
division by zero
AttrDict({'i': 7})
Done
Example 3: Full set of arguments that can be passed in @keeprunning()
with class implementations
>>> # Class that has some class variables
... class Demo(object):
... SUCCESS_MSG = 'Yay!!'
... DONE_MSG = 'STOPPED AT NOTHING!'
... ERROR_MSG = 'Error'
...
... # Functions to be called by @keeprunning
... def success(self):
... print((self.SUCCESS_MSG))
...
... def failure(self, __exc__):
... print((self.ERROR_MSG, __exc__))
...
... def task_done(self):
... print((self.DONE_MSG))
...
... #Actual use of keeprunning with all arguments passed
... @keeprunning(wait_secs=1, exit_on_success=False,
... on_success=success, on_error=failure, on_done=task_done)
... def dosomething(self, state):
... state.i += 1
... print (state)
... if state.i % 2 == 0:
... print("Error happened")
... # create an error condition
... 1 / 0
... if state.i >= 7:
... print ("Done")
... raise keeprunning.terminate
...
>>> demo = Demo()
>>> state = AttrDict(i=0)
>>> demo.dosomething(state)
AttrDict({'i': 1})
Yay!!
AttrDict({'i': 2})
Error happened
('Error', ZeroDivisionError('division by zero'))
AttrDict({'i': 3})
Yay!!
AttrDict({'i': 4})
Error happened
('Error', ZeroDivisionError('division by zero'))
AttrDict({'i': 5})
Yay!!
AttrDict({'i': 6})
Error happened
('Error', ZeroDivisionError('division by zero'))
AttrDict({'i': 7})
Done
STOPPED AT NOTHING! | Below is the the instruction that describes the task:
### Input:
Example 1: dosomething needs to run until completion condition
without needing to have a loop in its code. Also, when error
happens, we should NOT terminate execution
>>> from deeputil import AttrDict
>>> @keeprunning(wait_secs=1)
... def dosomething(state):
... state.i += 1
... print (state)
... if state.i % 2 == 0:
... print("Error happened")
... 1 / 0 # create an error condition
... if state.i >= 7:
... print ("Done")
... raise keeprunning.terminate
...
>>> state = AttrDict(i=0)
>>> dosomething(state)
AttrDict({'i': 1})
AttrDict({'i': 2})
Error happened
AttrDict({'i': 3})
AttrDict({'i': 4})
Error happened
AttrDict({'i': 5})
AttrDict({'i': 6})
Error happened
AttrDict({'i': 7})
Done
Example 2: In case you want to log exceptions while
dosomething keeps running, or perform any other action
when an exceptions arise
>>> def some_error(__exc__):
... print (__exc__)
...
>>> @keeprunning(on_error=some_error)
... def dosomething(state):
... state.i += 1
... print (state)
... if state.i % 2 == 0:
... print("Error happened")
... 1 / 0 # create an error condition
... if state.i >= 7:
... print ("Done")
... raise keeprunning.terminate
...
>>> state = AttrDict(i=0)
>>> dosomething(state)
AttrDict({'i': 1})
AttrDict({'i': 2})
Error happened
division by zero
AttrDict({'i': 3})
AttrDict({'i': 4})
Error happened
division by zero
AttrDict({'i': 5})
AttrDict({'i': 6})
Error happened
division by zero
AttrDict({'i': 7})
Done
Example 3: Full set of arguments that can be passed in @keeprunning()
with class implementations
>>> # Class that has some class variables
... class Demo(object):
... SUCCESS_MSG = 'Yay!!'
... DONE_MSG = 'STOPPED AT NOTHING!'
... ERROR_MSG = 'Error'
...
... # Functions to be called by @keeprunning
... def success(self):
... print((self.SUCCESS_MSG))
...
... def failure(self, __exc__):
... print((self.ERROR_MSG, __exc__))
...
... def task_done(self):
... print((self.DONE_MSG))
...
... #Actual use of keeprunning with all arguments passed
... @keeprunning(wait_secs=1, exit_on_success=False,
... on_success=success, on_error=failure, on_done=task_done)
... def dosomething(self, state):
... state.i += 1
... print (state)
... if state.i % 2 == 0:
... print("Error happened")
... # create an error condition
... 1 / 0
... if state.i >= 7:
... print ("Done")
... raise keeprunning.terminate
...
>>> demo = Demo()
>>> state = AttrDict(i=0)
>>> demo.dosomething(state)
AttrDict({'i': 1})
Yay!!
AttrDict({'i': 2})
Error happened
('Error', ZeroDivisionError('division by zero'))
AttrDict({'i': 3})
Yay!!
AttrDict({'i': 4})
Error happened
('Error', ZeroDivisionError('division by zero'))
AttrDict({'i': 5})
Yay!!
AttrDict({'i': 6})
Error happened
('Error', ZeroDivisionError('division by zero'))
AttrDict({'i': 7})
Done
STOPPED AT NOTHING!
### Response:
def keeprunning(wait_secs=0, exit_on_success=False,
on_success=None, on_error=None, on_done=None):
'''
Example 1: dosomething needs to run until completion condition
without needing to have a loop in its code. Also, when error
happens, we should NOT terminate execution
>>> from deeputil import AttrDict
>>> @keeprunning(wait_secs=1)
... def dosomething(state):
... state.i += 1
... print (state)
... if state.i % 2 == 0:
... print("Error happened")
... 1 / 0 # create an error condition
... if state.i >= 7:
... print ("Done")
... raise keeprunning.terminate
...
>>> state = AttrDict(i=0)
>>> dosomething(state)
AttrDict({'i': 1})
AttrDict({'i': 2})
Error happened
AttrDict({'i': 3})
AttrDict({'i': 4})
Error happened
AttrDict({'i': 5})
AttrDict({'i': 6})
Error happened
AttrDict({'i': 7})
Done
Example 2: In case you want to log exceptions while
dosomething keeps running, or perform any other action
when an exceptions arise
>>> def some_error(__exc__):
... print (__exc__)
...
>>> @keeprunning(on_error=some_error)
... def dosomething(state):
... state.i += 1
... print (state)
... if state.i % 2 == 0:
... print("Error happened")
... 1 / 0 # create an error condition
... if state.i >= 7:
... print ("Done")
... raise keeprunning.terminate
...
>>> state = AttrDict(i=0)
>>> dosomething(state)
AttrDict({'i': 1})
AttrDict({'i': 2})
Error happened
division by zero
AttrDict({'i': 3})
AttrDict({'i': 4})
Error happened
division by zero
AttrDict({'i': 5})
AttrDict({'i': 6})
Error happened
division by zero
AttrDict({'i': 7})
Done
Example 3: Full set of arguments that can be passed in @keeprunning()
with class implementations
>>> # Class that has some class variables
... class Demo(object):
... SUCCESS_MSG = 'Yay!!'
... DONE_MSG = 'STOPPED AT NOTHING!'
... ERROR_MSG = 'Error'
...
... # Functions to be called by @keeprunning
... def success(self):
... print((self.SUCCESS_MSG))
...
... def failure(self, __exc__):
... print((self.ERROR_MSG, __exc__))
...
... def task_done(self):
... print((self.DONE_MSG))
...
... #Actual use of keeprunning with all arguments passed
... @keeprunning(wait_secs=1, exit_on_success=False,
... on_success=success, on_error=failure, on_done=task_done)
... def dosomething(self, state):
... state.i += 1
... print (state)
... if state.i % 2 == 0:
... print("Error happened")
... # create an error condition
... 1 / 0
... if state.i >= 7:
... print ("Done")
... raise keeprunning.terminate
...
>>> demo = Demo()
>>> state = AttrDict(i=0)
>>> demo.dosomething(state)
AttrDict({'i': 1})
Yay!!
AttrDict({'i': 2})
Error happened
('Error', ZeroDivisionError('division by zero'))
AttrDict({'i': 3})
Yay!!
AttrDict({'i': 4})
Error happened
('Error', ZeroDivisionError('division by zero'))
AttrDict({'i': 5})
Yay!!
AttrDict({'i': 6})
Error happened
('Error', ZeroDivisionError('division by zero'))
AttrDict({'i': 7})
Done
STOPPED AT NOTHING!
'''
def decfn(fn):
def _call_callback(cb, fargs):
if not cb: return
# get the getargspec fn in inspect module (python 2/3 support)
G = getattr(inspect, 'getfullargspec', getattr(inspect, 'getargspec'))
cb_args = G(cb).args
cb_args = dict([(a, fargs.get(a, None)) for a in cb_args])
cb(**cb_args)
def _fn(*args, **kwargs):
fargs = inspect.getcallargs(fn, *args, **kwargs)
fargs.update(dict(__fn__=fn, __exc__=None))
while 1:
try:
fn(*args, **kwargs)
if exit_on_success: break
except (SystemExit, KeyboardInterrupt):
raise
except KeepRunningTerminate:
break
except Exception as exc:
fargs.update(dict(__exc__=exc))
_call_callback(on_error, fargs)
fargs.update(dict(__exc__=None))
if wait_secs: time.sleep(wait_secs)
continue
_call_callback(on_success, fargs)
_call_callback(on_done, fargs)
return _fn
return decfn |
def get_page(session,
url,
json=False,
post=False,
data=None,
headers=None,
quiet=False,
**kwargs):
"""
Download an HTML page using the requests session.
@param session: Requests session.
@type session: requests.Session
@param url: URL pattern with optional keywords to format.
@type url: str
@param post: Flag that indicates whether POST request should be sent.
@type post: bool
@param data: Payload data that is sent with request (in request body).
@type data: object
@param headers: Additional headers to send with request.
@type headers: dict
@return: Response body.
@rtype: str
"""
url = url.format(**kwargs)
reply = get_reply(session, url, post=post, data=data, headers=headers,
quiet=quiet)
return reply.json() if json else reply.text | Download an HTML page using the requests session.
@param session: Requests session.
@type session: requests.Session
@param url: URL pattern with optional keywords to format.
@type url: str
@param post: Flag that indicates whether POST request should be sent.
@type post: bool
@param data: Payload data that is sent with request (in request body).
@type data: object
@param headers: Additional headers to send with request.
@type headers: dict
@return: Response body.
@rtype: str | Below is the the instruction that describes the task:
### Input:
Download an HTML page using the requests session.
@param session: Requests session.
@type session: requests.Session
@param url: URL pattern with optional keywords to format.
@type url: str
@param post: Flag that indicates whether POST request should be sent.
@type post: bool
@param data: Payload data that is sent with request (in request body).
@type data: object
@param headers: Additional headers to send with request.
@type headers: dict
@return: Response body.
@rtype: str
### Response:
def get_page(session,
url,
json=False,
post=False,
data=None,
headers=None,
quiet=False,
**kwargs):
"""
Download an HTML page using the requests session.
@param session: Requests session.
@type session: requests.Session
@param url: URL pattern with optional keywords to format.
@type url: str
@param post: Flag that indicates whether POST request should be sent.
@type post: bool
@param data: Payload data that is sent with request (in request body).
@type data: object
@param headers: Additional headers to send with request.
@type headers: dict
@return: Response body.
@rtype: str
"""
url = url.format(**kwargs)
reply = get_reply(session, url, post=post, data=data, headers=headers,
quiet=quiet)
return reply.json() if json else reply.text |
def get_db_prep_value(self, value, connection, prepared=False):
"""Prepare a value for DB interaction.
Returns:
- list(bytes) if not prepared
- list(str) if prepared
"""
if prepared:
return value
if value is None:
return []
values = value if self.multi_valued_field else [value]
prepared_values = [self.get_prep_value(v) for v in values]
# Remove duplicates.
# https://tools.ietf.org/html/rfc4511#section-4.1.7 :
# "The set of attribute values is unordered."
# We keep those values sorted in natural order to avoid useless
# updates to the LDAP server.
return list(sorted(set(v for v in prepared_values if v))) | Prepare a value for DB interaction.
Returns:
- list(bytes) if not prepared
- list(str) if prepared | Below is the the instruction that describes the task:
### Input:
Prepare a value for DB interaction.
Returns:
- list(bytes) if not prepared
- list(str) if prepared
### Response:
def get_db_prep_value(self, value, connection, prepared=False):
"""Prepare a value for DB interaction.
Returns:
- list(bytes) if not prepared
- list(str) if prepared
"""
if prepared:
return value
if value is None:
return []
values = value if self.multi_valued_field else [value]
prepared_values = [self.get_prep_value(v) for v in values]
# Remove duplicates.
# https://tools.ietf.org/html/rfc4511#section-4.1.7 :
# "The set of attribute values is unordered."
# We keep those values sorted in natural order to avoid useless
# updates to the LDAP server.
return list(sorted(set(v for v in prepared_values if v))) |
def choose_labels(alternatives):
"""
Prompt the user select several labels from the provided alternatives.
At least one label must be selected.
:param list alternatives: Sequence of options that are available to select from
:return: Several selected labels
"""
if not alternatives:
raise ValueError
if not isinstance(alternatives, list):
raise TypeError
choice_map = OrderedDict(
('{}'.format(i), value) for i, value in enumerate(alternatives, 1)
)
# prepend a termination option
input_terminator = '0'
choice_map.update({input_terminator: '<done>'})
choice_map.move_to_end('0', last=False)
choice_indexes = choice_map.keys()
choice_lines = ['{} - {}'.format(*c) for c in choice_map.items()]
prompt = '\n'.join(
(
'Select labels:',
'\n'.join(choice_lines),
'Choose from {}'.format(', '.join(choice_indexes)),
)
)
user_choices = set()
user_choice = None
while not user_choice == input_terminator:
if user_choices:
note('Selected labels: [{}]'.format(', '.join(user_choices)))
user_choice = click.prompt(
prompt, type=click.Choice(choice_indexes), default=input_terminator
)
done = user_choice == input_terminator
new_selection = user_choice not in user_choices
nothing_selected = not user_choices
if not done and new_selection:
user_choices.add(choice_map[user_choice])
if done and nothing_selected:
error('Please select at least one label')
user_choice = None
return user_choices | Prompt the user select several labels from the provided alternatives.
At least one label must be selected.
:param list alternatives: Sequence of options that are available to select from
:return: Several selected labels | Below is the the instruction that describes the task:
### Input:
Prompt the user select several labels from the provided alternatives.
At least one label must be selected.
:param list alternatives: Sequence of options that are available to select from
:return: Several selected labels
### Response:
def choose_labels(alternatives):
"""
Prompt the user select several labels from the provided alternatives.
At least one label must be selected.
:param list alternatives: Sequence of options that are available to select from
:return: Several selected labels
"""
if not alternatives:
raise ValueError
if not isinstance(alternatives, list):
raise TypeError
choice_map = OrderedDict(
('{}'.format(i), value) for i, value in enumerate(alternatives, 1)
)
# prepend a termination option
input_terminator = '0'
choice_map.update({input_terminator: '<done>'})
choice_map.move_to_end('0', last=False)
choice_indexes = choice_map.keys()
choice_lines = ['{} - {}'.format(*c) for c in choice_map.items()]
prompt = '\n'.join(
(
'Select labels:',
'\n'.join(choice_lines),
'Choose from {}'.format(', '.join(choice_indexes)),
)
)
user_choices = set()
user_choice = None
while not user_choice == input_terminator:
if user_choices:
note('Selected labels: [{}]'.format(', '.join(user_choices)))
user_choice = click.prompt(
prompt, type=click.Choice(choice_indexes), default=input_terminator
)
done = user_choice == input_terminator
new_selection = user_choice not in user_choices
nothing_selected = not user_choices
if not done and new_selection:
user_choices.add(choice_map[user_choice])
if done and nothing_selected:
error('Please select at least one label')
user_choice = None
return user_choices |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.