code
stringlengths 75
104k
| docstring
stringlengths 1
46.9k
| text
stringlengths 164
112k
|
---|---|---|
def remove_empty_dirs(path):
""" removes empty dirs under a given path """
for root, dirs, files in os.walk(path):
for d in dirs:
dir_path = os.path.join(root, d)
if not os.listdir(dir_path):
os.rmdir(dir_path) | removes empty dirs under a given path | Below is the the instruction that describes the task:
### Input:
removes empty dirs under a given path
### Response:
def remove_empty_dirs(path):
""" removes empty dirs under a given path """
for root, dirs, files in os.walk(path):
for d in dirs:
dir_path = os.path.join(root, d)
if not os.listdir(dir_path):
os.rmdir(dir_path) |
def _at_block_start(tc, line):
"""
Improve QTextCursor.atBlockStart to ignore spaces
"""
if tc.atBlockStart():
return True
column = tc.columnNumber()
indentation = len(line) - len(line.lstrip())
return column <= indentation | Improve QTextCursor.atBlockStart to ignore spaces | Below is the the instruction that describes the task:
### Input:
Improve QTextCursor.atBlockStart to ignore spaces
### Response:
def _at_block_start(tc, line):
"""
Improve QTextCursor.atBlockStart to ignore spaces
"""
if tc.atBlockStart():
return True
column = tc.columnNumber()
indentation = len(line) - len(line.lstrip())
return column <= indentation |
def interpretAsOpenMath(x):
"""tries to convert a Python object into an OpenMath object
this is not a replacement for using a Converter for exporting Python objects
instead, it is used conveniently building OM objects in DSL embedded in Python
inparticular, it converts Python functions into OMBinding objects using lambdaOM as the binder"""
if hasattr(x, "_ishelper") and x._ishelper:
# wrapped things in this class -> unwrap
return x._toOM()
elif isinstance(x, om.OMAny):
# already OM
return x
elif isinstance(x, six.integer_types):
# integers -> OMI
return om.OMInteger(x)
elif isinstance(x, float):
# floats -> OMF
return om.OMFloat(x)
elif isinstance(x, six.string_types):
# strings -> OMSTR
return om.OMString(x)
elif isinstance(x, WrappedHelper):
# wrapper -> wrapped object
return x.toOM()
elif inspect.isfunction(x):
# function -> OMBIND(lambda,...)
# get all the parameters of the function
paramMap = inspect.signature(x).parameters
params = [v for k, v in six.iteritems(paramMap)]
# make sure that all of them are positional
posArgKinds = [inspect.Parameter.POSITIONAL_ONLY, inspect.Parameter.POSITIONAL_OR_KEYWORD]
if not all([p.kind in posArgKinds for p in params]):
raise CannotInterpretAsOpenMath("no sequence arguments allowed")
# call the function with appropriate OMVariables
paramsOM = [om.OMVariable(name=p.name) for p in params]
bodyOM = interpretAsOpenMath(x(*paramsOM))
return OMBinding(om.OMSymbol(name="lambda", cd="python", cdbase="http://python.org"), paramsOM, bodyOM)
else:
# fail
raise CannotInterpretAsOpenMath("unknown kind of object: " + str(x)) | tries to convert a Python object into an OpenMath object
this is not a replacement for using a Converter for exporting Python objects
instead, it is used conveniently building OM objects in DSL embedded in Python
inparticular, it converts Python functions into OMBinding objects using lambdaOM as the binder | Below is the the instruction that describes the task:
### Input:
tries to convert a Python object into an OpenMath object
this is not a replacement for using a Converter for exporting Python objects
instead, it is used conveniently building OM objects in DSL embedded in Python
inparticular, it converts Python functions into OMBinding objects using lambdaOM as the binder
### Response:
def interpretAsOpenMath(x):
"""tries to convert a Python object into an OpenMath object
this is not a replacement for using a Converter for exporting Python objects
instead, it is used conveniently building OM objects in DSL embedded in Python
inparticular, it converts Python functions into OMBinding objects using lambdaOM as the binder"""
if hasattr(x, "_ishelper") and x._ishelper:
# wrapped things in this class -> unwrap
return x._toOM()
elif isinstance(x, om.OMAny):
# already OM
return x
elif isinstance(x, six.integer_types):
# integers -> OMI
return om.OMInteger(x)
elif isinstance(x, float):
# floats -> OMF
return om.OMFloat(x)
elif isinstance(x, six.string_types):
# strings -> OMSTR
return om.OMString(x)
elif isinstance(x, WrappedHelper):
# wrapper -> wrapped object
return x.toOM()
elif inspect.isfunction(x):
# function -> OMBIND(lambda,...)
# get all the parameters of the function
paramMap = inspect.signature(x).parameters
params = [v for k, v in six.iteritems(paramMap)]
# make sure that all of them are positional
posArgKinds = [inspect.Parameter.POSITIONAL_ONLY, inspect.Parameter.POSITIONAL_OR_KEYWORD]
if not all([p.kind in posArgKinds for p in params]):
raise CannotInterpretAsOpenMath("no sequence arguments allowed")
# call the function with appropriate OMVariables
paramsOM = [om.OMVariable(name=p.name) for p in params]
bodyOM = interpretAsOpenMath(x(*paramsOM))
return OMBinding(om.OMSymbol(name="lambda", cd="python", cdbase="http://python.org"), paramsOM, bodyOM)
else:
# fail
raise CannotInterpretAsOpenMath("unknown kind of object: " + str(x)) |
def adjust_all_to_360(dictionary):
"""
Take a dictionary and check each key/value pair.
If this key is of type: declination/longitude/azimuth/direction,
adjust it to be within 0-360 as required by the MagIC data model
"""
for key in dictionary:
dictionary[key] = adjust_to_360(dictionary[key], key)
return dictionary | Take a dictionary and check each key/value pair.
If this key is of type: declination/longitude/azimuth/direction,
adjust it to be within 0-360 as required by the MagIC data model | Below is the the instruction that describes the task:
### Input:
Take a dictionary and check each key/value pair.
If this key is of type: declination/longitude/azimuth/direction,
adjust it to be within 0-360 as required by the MagIC data model
### Response:
def adjust_all_to_360(dictionary):
"""
Take a dictionary and check each key/value pair.
If this key is of type: declination/longitude/azimuth/direction,
adjust it to be within 0-360 as required by the MagIC data model
"""
for key in dictionary:
dictionary[key] = adjust_to_360(dictionary[key], key)
return dictionary |
def show_one(request, post_process_fun, object_class, id, template='common_json.html'):
"""
Return object of the given type with the specified identifier.
GET parameters:
user:
identifier of the current user
stats:
turn on the enrichment of the objects by some statistics
html
turn on the HTML version of the API
"""
obj = get_object_or_404(object_class, pk=id)
json = post_process_fun(request, obj)
return render_json(request, json, template=template, help_text=show_one.__doc__) | Return object of the given type with the specified identifier.
GET parameters:
user:
identifier of the current user
stats:
turn on the enrichment of the objects by some statistics
html
turn on the HTML version of the API | Below is the the instruction that describes the task:
### Input:
Return object of the given type with the specified identifier.
GET parameters:
user:
identifier of the current user
stats:
turn on the enrichment of the objects by some statistics
html
turn on the HTML version of the API
### Response:
def show_one(request, post_process_fun, object_class, id, template='common_json.html'):
"""
Return object of the given type with the specified identifier.
GET parameters:
user:
identifier of the current user
stats:
turn on the enrichment of the objects by some statistics
html
turn on the HTML version of the API
"""
obj = get_object_or_404(object_class, pk=id)
json = post_process_fun(request, obj)
return render_json(request, json, template=template, help_text=show_one.__doc__) |
def features(self):
"""All available features"""
mycols = []
for col in dfn.feature_names:
if col in self:
mycols.append(col)
mycols.sort()
return mycols | All available features | Below is the the instruction that describes the task:
### Input:
All available features
### Response:
def features(self):
"""All available features"""
mycols = []
for col in dfn.feature_names:
if col in self:
mycols.append(col)
mycols.sort()
return mycols |
def text2labels(text, sents):
'''
Marks all characters in given `text`, that doesn't exists within any
element of `sents` with `1` character, other characters (within sentences)
will be marked with `0`
Used in training process
>>> text = 'привет. меня зовут аня.'
>>> sents = ['привет.', 'меня зовут аня.']
>>> labels = text2labels(text, sents)
>>> ' '.join(text)
>>> 'п р и в е т . м е н я з о в у т а н я .'
>>> ' '.join(labels)
>>> '0 0 0 0 0 0 0 1 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0'
'''
labels = [c for c in text]
for sent in sents:
start = text.index(sent)
finish = start + len(sent)
labels[start:finish] = '0' * len(sent)
for i, c in enumerate(labels):
if c != '0':
labels[i] = '1'
return labels | Marks all characters in given `text`, that doesn't exists within any
element of `sents` with `1` character, other characters (within sentences)
will be marked with `0`
Used in training process
>>> text = 'привет. меня зовут аня.'
>>> sents = ['привет.', 'меня зовут аня.']
>>> labels = text2labels(text, sents)
>>> ' '.join(text)
>>> 'п р и в е т . м е н я з о в у т а н я .'
>>> ' '.join(labels)
>>> '0 0 0 0 0 0 0 1 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0' | Below is the the instruction that describes the task:
### Input:
Marks all characters in given `text`, that doesn't exists within any
element of `sents` with `1` character, other characters (within sentences)
will be marked with `0`
Used in training process
>>> text = 'привет. меня зовут аня.'
>>> sents = ['привет.', 'меня зовут аня.']
>>> labels = text2labels(text, sents)
>>> ' '.join(text)
>>> 'п р и в е т . м е н я з о в у т а н я .'
>>> ' '.join(labels)
>>> '0 0 0 0 0 0 0 1 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0'
### Response:
def text2labels(text, sents):
'''
Marks all characters in given `text`, that doesn't exists within any
element of `sents` with `1` character, other characters (within sentences)
will be marked with `0`
Used in training process
>>> text = 'привет. меня зовут аня.'
>>> sents = ['привет.', 'меня зовут аня.']
>>> labels = text2labels(text, sents)
>>> ' '.join(text)
>>> 'п р и в е т . м е н я з о в у т а н я .'
>>> ' '.join(labels)
>>> '0 0 0 0 0 0 0 1 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0'
'''
labels = [c for c in text]
for sent in sents:
start = text.index(sent)
finish = start + len(sent)
labels[start:finish] = '0' * len(sent)
for i, c in enumerate(labels):
if c != '0':
labels[i] = '1'
return labels |
def _recurse(self, inputs, output):
'''internal recursion routine called by the run method that generates
all input combinations'''
if inputs:
my_input = inputs[0]
name = my_input.name
if my_input.state:
my_options = my_input.options(self.state)
else:
my_options = my_input.options
for option in my_options:
my_output = list(output)
my_output.append({name: option})
self._recurse(inputs[1:], my_output)
else:
try:
valid, result = self._function(output)
except ValueError:
raise RuntimeError("function must return 2 values")
print output, valid, result | internal recursion routine called by the run method that generates
all input combinations | Below is the the instruction that describes the task:
### Input:
internal recursion routine called by the run method that generates
all input combinations
### Response:
def _recurse(self, inputs, output):
'''internal recursion routine called by the run method that generates
all input combinations'''
if inputs:
my_input = inputs[0]
name = my_input.name
if my_input.state:
my_options = my_input.options(self.state)
else:
my_options = my_input.options
for option in my_options:
my_output = list(output)
my_output.append({name: option})
self._recurse(inputs[1:], my_output)
else:
try:
valid, result = self._function(output)
except ValueError:
raise RuntimeError("function must return 2 values")
print output, valid, result |
def get_source(self, objtxt):
"""Get object source"""
from spyder_kernels.utils.dochelpers import getsource
obj, valid = self._eval(objtxt)
if valid:
return getsource(obj) | Get object source | Below is the the instruction that describes the task:
### Input:
Get object source
### Response:
def get_source(self, objtxt):
"""Get object source"""
from spyder_kernels.utils.dochelpers import getsource
obj, valid = self._eval(objtxt)
if valid:
return getsource(obj) |
def description(self):
""" Get the textual description of the category """
if self._meta and self._meta.get_payload():
return utils.TrueCallableProxy(self._description)
return utils.CallableProxy(None) | Get the textual description of the category | Below is the the instruction that describes the task:
### Input:
Get the textual description of the category
### Response:
def description(self):
""" Get the textual description of the category """
if self._meta and self._meta.get_payload():
return utils.TrueCallableProxy(self._description)
return utils.CallableProxy(None) |
def gradient(self, mu, dist):
"""
derivative of the link function wrt mu
Parameters
----------
mu : array-like of legth n
dist : Distribution instance
Returns
-------
grad : np.array of length n
"""
return dist.levels/(mu*(dist.levels - mu)) | derivative of the link function wrt mu
Parameters
----------
mu : array-like of legth n
dist : Distribution instance
Returns
-------
grad : np.array of length n | Below is the the instruction that describes the task:
### Input:
derivative of the link function wrt mu
Parameters
----------
mu : array-like of legth n
dist : Distribution instance
Returns
-------
grad : np.array of length n
### Response:
def gradient(self, mu, dist):
"""
derivative of the link function wrt mu
Parameters
----------
mu : array-like of legth n
dist : Distribution instance
Returns
-------
grad : np.array of length n
"""
return dist.levels/(mu*(dist.levels - mu)) |
def read(self, queue):
"""
Read complete DSMR telegram's from the serial interface and parse it
into CosemObject's and MbusObject's.
Instead of being a generator, values are pushed to provided queue for
asynchronous processing.
:rtype: None
"""
# create Serial StreamReader
conn = serial_asyncio.open_serial_connection(**self.serial_settings)
reader, _ = yield from conn
while True:
# Read line if available or give control back to loop until new
# data has arrived.
data = yield from reader.readline()
self.telegram_buffer.append(data.decode('ascii'))
for telegram in self.telegram_buffer.get_all():
try:
# Push new parsed telegram onto queue.
queue.put_nowait(
self.telegram_parser.parse(telegram)
)
except ParseError as e:
logger.warning('Failed to parse telegram: %s', e) | Read complete DSMR telegram's from the serial interface and parse it
into CosemObject's and MbusObject's.
Instead of being a generator, values are pushed to provided queue for
asynchronous processing.
:rtype: None | Below is the the instruction that describes the task:
### Input:
Read complete DSMR telegram's from the serial interface and parse it
into CosemObject's and MbusObject's.
Instead of being a generator, values are pushed to provided queue for
asynchronous processing.
:rtype: None
### Response:
def read(self, queue):
"""
Read complete DSMR telegram's from the serial interface and parse it
into CosemObject's and MbusObject's.
Instead of being a generator, values are pushed to provided queue for
asynchronous processing.
:rtype: None
"""
# create Serial StreamReader
conn = serial_asyncio.open_serial_connection(**self.serial_settings)
reader, _ = yield from conn
while True:
# Read line if available or give control back to loop until new
# data has arrived.
data = yield from reader.readline()
self.telegram_buffer.append(data.decode('ascii'))
for telegram in self.telegram_buffer.get_all():
try:
# Push new parsed telegram onto queue.
queue.put_nowait(
self.telegram_parser.parse(telegram)
)
except ParseError as e:
logger.warning('Failed to parse telegram: %s', e) |
def handle(self, *args, **options):
"""
dump fields permissions for a user
"""
def get_user(username):
try:
return User.objects.get(username=username)
except ObjectDoesNotExist as e:
raise CommandError("This user doesn't exist in the database")
def add_permissions(user_field_permissions, content_type, name):
p = None
try:
p = FieldPermission.objects.get(content_type=content_type, name=name)
except ObjectDoesNotExist:
p = FieldPermission(content_type=content_type, name=name)
p.save()
finally:
user_field_permissions.permissions.add(p)
if len(args) !=1:
raise CommandError("Specifies a json file created by the fine_permissions_dump command")
else:
try:
with open(args[0], 'r') as json_file:
myjson = json.load(json_file)
user = get_user(options.get('user')) if options['user'] else get_user(myjson['username'])
fields_permissions = myjson['fields_permissions']
user_field_permissions = UserFieldPermissions(user=user)
user_field_permissions.save()
for f in fields_permissions:
content_type = ContentType.objects.get(app_label=f["app_label"], model=f["model"])
add_permissions(user_field_permissions, content_type, f['name'])
except Exception as e:
raise CommandError(e) | dump fields permissions for a user | Below is the the instruction that describes the task:
### Input:
dump fields permissions for a user
### Response:
def handle(self, *args, **options):
"""
dump fields permissions for a user
"""
def get_user(username):
try:
return User.objects.get(username=username)
except ObjectDoesNotExist as e:
raise CommandError("This user doesn't exist in the database")
def add_permissions(user_field_permissions, content_type, name):
p = None
try:
p = FieldPermission.objects.get(content_type=content_type, name=name)
except ObjectDoesNotExist:
p = FieldPermission(content_type=content_type, name=name)
p.save()
finally:
user_field_permissions.permissions.add(p)
if len(args) !=1:
raise CommandError("Specifies a json file created by the fine_permissions_dump command")
else:
try:
with open(args[0], 'r') as json_file:
myjson = json.load(json_file)
user = get_user(options.get('user')) if options['user'] else get_user(myjson['username'])
fields_permissions = myjson['fields_permissions']
user_field_permissions = UserFieldPermissions(user=user)
user_field_permissions.save()
for f in fields_permissions:
content_type = ContentType.objects.get(app_label=f["app_label"], model=f["model"])
add_permissions(user_field_permissions, content_type, f['name'])
except Exception as e:
raise CommandError(e) |
def PrintAllTables(self):
""" Prints contents of every table. """
goodlogging.Log.Info("DB", "Database contents:\n")
for table in self._tableDict.keys():
self._PrintDatabaseTable(table) | Prints contents of every table. | Below is the the instruction that describes the task:
### Input:
Prints contents of every table.
### Response:
def PrintAllTables(self):
""" Prints contents of every table. """
goodlogging.Log.Info("DB", "Database contents:\n")
for table in self._tableDict.keys():
self._PrintDatabaseTable(table) |
def _get_baseline_from_tag(config, tag):
'''
Returns the last created baseline snapshot marked with `tag`
'''
last_snapshot = None
for snapshot in __salt__['snapper.list_snapshots'](config):
if tag == snapshot['userdata'].get("baseline_tag"):
if not last_snapshot or last_snapshot['timestamp'] < snapshot['timestamp']:
last_snapshot = snapshot
return last_snapshot | Returns the last created baseline snapshot marked with `tag` | Below is the the instruction that describes the task:
### Input:
Returns the last created baseline snapshot marked with `tag`
### Response:
def _get_baseline_from_tag(config, tag):
'''
Returns the last created baseline snapshot marked with `tag`
'''
last_snapshot = None
for snapshot in __salt__['snapper.list_snapshots'](config):
if tag == snapshot['userdata'].get("baseline_tag"):
if not last_snapshot or last_snapshot['timestamp'] < snapshot['timestamp']:
last_snapshot = snapshot
return last_snapshot |
def assign_power_curve(self, wake_losses_model='power_efficiency_curve',
smoothing=False, block_width=0.5,
standard_deviation_method='turbulence_intensity',
smoothing_order='wind_farm_power_curves',
turbulence_intensity=None, **kwargs):
r"""
Calculates the power curve of a wind turbine cluster.
The turbine cluster power curve is calculated by aggregating the wind
farm power curves of wind farms within the turbine cluster. Depending
on the parameters the power curves are smoothed (before or after the
aggregation) and/or a wind farm efficiency is applied before the
aggregation.
After the calculations the power curve is assigned to the attribute
`power_curve`.
Parameters
----------
wake_losses_model : string
Defines the method for taking wake losses within the farm into
consideration. Options: 'power_efficiency_curve',
'constant_efficiency' or None. Default: 'power_efficiency_curve'.
smoothing : boolean
If True the power curves will be smoothed before or after the
aggregation of power curves depending on `smoothing_order`.
Default: False.
block_width : float
Width between the wind speeds in the sum of the equation in
:py:func:`~.power_curves.smooth_power_curve`. Default: 0.5.
standard_deviation_method : string
Method for calculating the standard deviation for the Gauss
distribution. Options: 'turbulence_intensity',
'Staffell_Pfenninger'. Default: 'turbulence_intensity'.
smoothing_order : string
Defines when the smoothing takes place if `smoothing` is True.
Options: 'turbine_power_curves' (to the single turbine power
curves), 'wind_farm_power_curves'.
Default: 'wind_farm_power_curves'.
turbulence_intensity : float
Turbulence intensity at hub height of the wind farm or
wind turbine cluster for power curve smoothing with
'turbulence_intensity' method. Can be calculated from
`roughness_length` instead. Default: None.
Other Parameters
----------------
roughness_length : float, optional.
Roughness length. If `standard_deviation_method` is
'turbulence_intensity' and `turbulence_intensity` is not given
the turbulence intensity is calculated via the roughness length.
Returns
-------
self
"""
# Assign wind farm power curves to wind farms of wind turbine cluster
for farm in self.wind_farms:
# Assign hub heights (needed for power curve and later for
# hub height of turbine cluster)
farm.mean_hub_height()
# Assign wind farm power curve
farm.assign_power_curve(
wake_losses_model=wake_losses_model,
smoothing=smoothing, block_width=block_width,
standard_deviation_method=standard_deviation_method,
smoothing_order=smoothing_order,
turbulence_intensity=turbulence_intensity, **kwargs)
# Create data frame from power curves of all wind farms
df = pd.concat([farm.power_curve.set_index(['wind_speed']).rename(
columns={'value': farm.name}) for
farm in self.wind_farms], axis=1)
# Sum up power curves
cluster_power_curve = pd.DataFrame(
df.interpolate(method='index').sum(axis=1))
cluster_power_curve.columns = ['value']
# Return wind speed (index) to a column of the data frame
cluster_power_curve.reset_index('wind_speed', inplace=True)
self.power_curve = cluster_power_curve
return self | r"""
Calculates the power curve of a wind turbine cluster.
The turbine cluster power curve is calculated by aggregating the wind
farm power curves of wind farms within the turbine cluster. Depending
on the parameters the power curves are smoothed (before or after the
aggregation) and/or a wind farm efficiency is applied before the
aggregation.
After the calculations the power curve is assigned to the attribute
`power_curve`.
Parameters
----------
wake_losses_model : string
Defines the method for taking wake losses within the farm into
consideration. Options: 'power_efficiency_curve',
'constant_efficiency' or None. Default: 'power_efficiency_curve'.
smoothing : boolean
If True the power curves will be smoothed before or after the
aggregation of power curves depending on `smoothing_order`.
Default: False.
block_width : float
Width between the wind speeds in the sum of the equation in
:py:func:`~.power_curves.smooth_power_curve`. Default: 0.5.
standard_deviation_method : string
Method for calculating the standard deviation for the Gauss
distribution. Options: 'turbulence_intensity',
'Staffell_Pfenninger'. Default: 'turbulence_intensity'.
smoothing_order : string
Defines when the smoothing takes place if `smoothing` is True.
Options: 'turbine_power_curves' (to the single turbine power
curves), 'wind_farm_power_curves'.
Default: 'wind_farm_power_curves'.
turbulence_intensity : float
Turbulence intensity at hub height of the wind farm or
wind turbine cluster for power curve smoothing with
'turbulence_intensity' method. Can be calculated from
`roughness_length` instead. Default: None.
Other Parameters
----------------
roughness_length : float, optional.
Roughness length. If `standard_deviation_method` is
'turbulence_intensity' and `turbulence_intensity` is not given
the turbulence intensity is calculated via the roughness length.
Returns
-------
self | Below is the the instruction that describes the task:
### Input:
r"""
Calculates the power curve of a wind turbine cluster.
The turbine cluster power curve is calculated by aggregating the wind
farm power curves of wind farms within the turbine cluster. Depending
on the parameters the power curves are smoothed (before or after the
aggregation) and/or a wind farm efficiency is applied before the
aggregation.
After the calculations the power curve is assigned to the attribute
`power_curve`.
Parameters
----------
wake_losses_model : string
Defines the method for taking wake losses within the farm into
consideration. Options: 'power_efficiency_curve',
'constant_efficiency' or None. Default: 'power_efficiency_curve'.
smoothing : boolean
If True the power curves will be smoothed before or after the
aggregation of power curves depending on `smoothing_order`.
Default: False.
block_width : float
Width between the wind speeds in the sum of the equation in
:py:func:`~.power_curves.smooth_power_curve`. Default: 0.5.
standard_deviation_method : string
Method for calculating the standard deviation for the Gauss
distribution. Options: 'turbulence_intensity',
'Staffell_Pfenninger'. Default: 'turbulence_intensity'.
smoothing_order : string
Defines when the smoothing takes place if `smoothing` is True.
Options: 'turbine_power_curves' (to the single turbine power
curves), 'wind_farm_power_curves'.
Default: 'wind_farm_power_curves'.
turbulence_intensity : float
Turbulence intensity at hub height of the wind farm or
wind turbine cluster for power curve smoothing with
'turbulence_intensity' method. Can be calculated from
`roughness_length` instead. Default: None.
Other Parameters
----------------
roughness_length : float, optional.
Roughness length. If `standard_deviation_method` is
'turbulence_intensity' and `turbulence_intensity` is not given
the turbulence intensity is calculated via the roughness length.
Returns
-------
self
### Response:
def assign_power_curve(self, wake_losses_model='power_efficiency_curve',
smoothing=False, block_width=0.5,
standard_deviation_method='turbulence_intensity',
smoothing_order='wind_farm_power_curves',
turbulence_intensity=None, **kwargs):
r"""
Calculates the power curve of a wind turbine cluster.
The turbine cluster power curve is calculated by aggregating the wind
farm power curves of wind farms within the turbine cluster. Depending
on the parameters the power curves are smoothed (before or after the
aggregation) and/or a wind farm efficiency is applied before the
aggregation.
After the calculations the power curve is assigned to the attribute
`power_curve`.
Parameters
----------
wake_losses_model : string
Defines the method for taking wake losses within the farm into
consideration. Options: 'power_efficiency_curve',
'constant_efficiency' or None. Default: 'power_efficiency_curve'.
smoothing : boolean
If True the power curves will be smoothed before or after the
aggregation of power curves depending on `smoothing_order`.
Default: False.
block_width : float
Width between the wind speeds in the sum of the equation in
:py:func:`~.power_curves.smooth_power_curve`. Default: 0.5.
standard_deviation_method : string
Method for calculating the standard deviation for the Gauss
distribution. Options: 'turbulence_intensity',
'Staffell_Pfenninger'. Default: 'turbulence_intensity'.
smoothing_order : string
Defines when the smoothing takes place if `smoothing` is True.
Options: 'turbine_power_curves' (to the single turbine power
curves), 'wind_farm_power_curves'.
Default: 'wind_farm_power_curves'.
turbulence_intensity : float
Turbulence intensity at hub height of the wind farm or
wind turbine cluster for power curve smoothing with
'turbulence_intensity' method. Can be calculated from
`roughness_length` instead. Default: None.
Other Parameters
----------------
roughness_length : float, optional.
Roughness length. If `standard_deviation_method` is
'turbulence_intensity' and `turbulence_intensity` is not given
the turbulence intensity is calculated via the roughness length.
Returns
-------
self
"""
# Assign wind farm power curves to wind farms of wind turbine cluster
for farm in self.wind_farms:
# Assign hub heights (needed for power curve and later for
# hub height of turbine cluster)
farm.mean_hub_height()
# Assign wind farm power curve
farm.assign_power_curve(
wake_losses_model=wake_losses_model,
smoothing=smoothing, block_width=block_width,
standard_deviation_method=standard_deviation_method,
smoothing_order=smoothing_order,
turbulence_intensity=turbulence_intensity, **kwargs)
# Create data frame from power curves of all wind farms
df = pd.concat([farm.power_curve.set_index(['wind_speed']).rename(
columns={'value': farm.name}) for
farm in self.wind_farms], axis=1)
# Sum up power curves
cluster_power_curve = pd.DataFrame(
df.interpolate(method='index').sum(axis=1))
cluster_power_curve.columns = ['value']
# Return wind speed (index) to a column of the data frame
cluster_power_curve.reset_index('wind_speed', inplace=True)
self.power_curve = cluster_power_curve
return self |
def get_states(self):
"""
Add outcome to variables of XMLBIF
Return
------
dict: dict of type {variable: outcome tags}
Examples
--------
>>> writer = XMLBIFWriter(model)
>>> writer.get_states()
{'dog-out': [<Element OUTCOME at 0x7ffbabfcdec8>, <Element OUTCOME at 0x7ffbabfcdf08>],
'family-out': [<Element OUTCOME at 0x7ffbabfd4108>, <Element OUTCOME at 0x7ffbabfd4148>],
'bowel-problem': [<Element OUTCOME at 0x7ffbabfd4088>, <Element OUTCOME at 0x7ffbabfd40c8>],
'hear-bark': [<Element OUTCOME at 0x7ffbabfcdf48>, <Element OUTCOME at 0x7ffbabfcdf88>],
'light-on': [<Element OUTCOME at 0x7ffbabfcdfc8>, <Element OUTCOME at 0x7ffbabfd4048>]}
"""
outcome_tag = {}
cpds = self.model.get_cpds()
for cpd in cpds:
var = cpd.variable
outcome_tag[var] = []
if cpd.state_names is None or cpd.state_names.get(var) is None:
states = range(cpd.get_cardinality([var])[var])
else:
states = cpd.state_names[var]
for state in states:
state_tag = etree.SubElement(self.variables[var], "OUTCOME")
state_tag.text = self._make_valid_state_name(state)
outcome_tag[var].append(state_tag)
return outcome_tag | Add outcome to variables of XMLBIF
Return
------
dict: dict of type {variable: outcome tags}
Examples
--------
>>> writer = XMLBIFWriter(model)
>>> writer.get_states()
{'dog-out': [<Element OUTCOME at 0x7ffbabfcdec8>, <Element OUTCOME at 0x7ffbabfcdf08>],
'family-out': [<Element OUTCOME at 0x7ffbabfd4108>, <Element OUTCOME at 0x7ffbabfd4148>],
'bowel-problem': [<Element OUTCOME at 0x7ffbabfd4088>, <Element OUTCOME at 0x7ffbabfd40c8>],
'hear-bark': [<Element OUTCOME at 0x7ffbabfcdf48>, <Element OUTCOME at 0x7ffbabfcdf88>],
'light-on': [<Element OUTCOME at 0x7ffbabfcdfc8>, <Element OUTCOME at 0x7ffbabfd4048>]} | Below is the the instruction that describes the task:
### Input:
Add outcome to variables of XMLBIF
Return
------
dict: dict of type {variable: outcome tags}
Examples
--------
>>> writer = XMLBIFWriter(model)
>>> writer.get_states()
{'dog-out': [<Element OUTCOME at 0x7ffbabfcdec8>, <Element OUTCOME at 0x7ffbabfcdf08>],
'family-out': [<Element OUTCOME at 0x7ffbabfd4108>, <Element OUTCOME at 0x7ffbabfd4148>],
'bowel-problem': [<Element OUTCOME at 0x7ffbabfd4088>, <Element OUTCOME at 0x7ffbabfd40c8>],
'hear-bark': [<Element OUTCOME at 0x7ffbabfcdf48>, <Element OUTCOME at 0x7ffbabfcdf88>],
'light-on': [<Element OUTCOME at 0x7ffbabfcdfc8>, <Element OUTCOME at 0x7ffbabfd4048>]}
### Response:
def get_states(self):
"""
Add outcome to variables of XMLBIF
Return
------
dict: dict of type {variable: outcome tags}
Examples
--------
>>> writer = XMLBIFWriter(model)
>>> writer.get_states()
{'dog-out': [<Element OUTCOME at 0x7ffbabfcdec8>, <Element OUTCOME at 0x7ffbabfcdf08>],
'family-out': [<Element OUTCOME at 0x7ffbabfd4108>, <Element OUTCOME at 0x7ffbabfd4148>],
'bowel-problem': [<Element OUTCOME at 0x7ffbabfd4088>, <Element OUTCOME at 0x7ffbabfd40c8>],
'hear-bark': [<Element OUTCOME at 0x7ffbabfcdf48>, <Element OUTCOME at 0x7ffbabfcdf88>],
'light-on': [<Element OUTCOME at 0x7ffbabfcdfc8>, <Element OUTCOME at 0x7ffbabfd4048>]}
"""
outcome_tag = {}
cpds = self.model.get_cpds()
for cpd in cpds:
var = cpd.variable
outcome_tag[var] = []
if cpd.state_names is None or cpd.state_names.get(var) is None:
states = range(cpd.get_cardinality([var])[var])
else:
states = cpd.state_names[var]
for state in states:
state_tag = etree.SubElement(self.variables[var], "OUTCOME")
state_tag.text = self._make_valid_state_name(state)
outcome_tag[var].append(state_tag)
return outcome_tag |
def user(username, password, all):
"""Create admin users (BasicAuth only)."""
if current_app.config['AUTH_PROVIDER'] != 'basic':
raise click.UsageError('Not required for {} admin users'.format(current_app.config['AUTH_PROVIDER']))
if username and username not in current_app.config['ADMIN_USERS']:
raise click.UsageError('User {} not an admin'.format(username))
if not username and not all:
raise click.UsageError('Missing option "--username".')
def create_user(admin):
email = admin if '@' in admin else None
user = User(
name='Admin user',
login=admin,
password=generate_password_hash(password),
roles=['admin'],
text='Created by alertad script',
email=email,
email_verified=bool(email)
)
try:
db.get_db() # init db on global app context
user = user.create()
except Exception as e:
click.echo('ERROR: {}'.format(e))
else:
click.echo('{} {}'.format(user.id, user.name))
if all:
for admin in current_app.config['ADMIN_USERS']:
create_user(admin)
else:
create_user(username) | Create admin users (BasicAuth only). | Below is the the instruction that describes the task:
### Input:
Create admin users (BasicAuth only).
### Response:
def user(username, password, all):
"""Create admin users (BasicAuth only)."""
if current_app.config['AUTH_PROVIDER'] != 'basic':
raise click.UsageError('Not required for {} admin users'.format(current_app.config['AUTH_PROVIDER']))
if username and username not in current_app.config['ADMIN_USERS']:
raise click.UsageError('User {} not an admin'.format(username))
if not username and not all:
raise click.UsageError('Missing option "--username".')
def create_user(admin):
email = admin if '@' in admin else None
user = User(
name='Admin user',
login=admin,
password=generate_password_hash(password),
roles=['admin'],
text='Created by alertad script',
email=email,
email_verified=bool(email)
)
try:
db.get_db() # init db on global app context
user = user.create()
except Exception as e:
click.echo('ERROR: {}'.format(e))
else:
click.echo('{} {}'.format(user.id, user.name))
if all:
for admin in current_app.config['ADMIN_USERS']:
create_user(admin)
else:
create_user(username) |
def _GetNormalizedTimestamp(self):
"""Retrieves the normalized timestamp.
Returns:
decimal.Decimal: normalized timestamp, which contains the number of
seconds since January 1, 1970 00:00:00 and a fraction of second used
for increased precision, or None if the normalized timestamp cannot be
determined.
"""
if self._normalized_timestamp is None:
if self._number_of_seconds is not None:
self._normalized_timestamp = (
decimal.Decimal(self._microseconds) /
definitions.MICROSECONDS_PER_SECOND)
self._normalized_timestamp += decimal.Decimal(self._number_of_seconds)
return self._normalized_timestamp | Retrieves the normalized timestamp.
Returns:
decimal.Decimal: normalized timestamp, which contains the number of
seconds since January 1, 1970 00:00:00 and a fraction of second used
for increased precision, or None if the normalized timestamp cannot be
determined. | Below is the the instruction that describes the task:
### Input:
Retrieves the normalized timestamp.
Returns:
decimal.Decimal: normalized timestamp, which contains the number of
seconds since January 1, 1970 00:00:00 and a fraction of second used
for increased precision, or None if the normalized timestamp cannot be
determined.
### Response:
def _GetNormalizedTimestamp(self):
"""Retrieves the normalized timestamp.
Returns:
decimal.Decimal: normalized timestamp, which contains the number of
seconds since January 1, 1970 00:00:00 and a fraction of second used
for increased precision, or None if the normalized timestamp cannot be
determined.
"""
if self._normalized_timestamp is None:
if self._number_of_seconds is not None:
self._normalized_timestamp = (
decimal.Decimal(self._microseconds) /
definitions.MICROSECONDS_PER_SECOND)
self._normalized_timestamp += decimal.Decimal(self._number_of_seconds)
return self._normalized_timestamp |
def set_cache_buster(redis, path, hash):
"""Sets the cache buster value for a given file path"""
redis.hset("cache-buster:{}:v3".format(oz.settings["s3_bucket"]), path, hash) | Sets the cache buster value for a given file path | Below is the the instruction that describes the task:
### Input:
Sets the cache buster value for a given file path
### Response:
def set_cache_buster(redis, path, hash):
"""Sets the cache buster value for a given file path"""
redis.hset("cache-buster:{}:v3".format(oz.settings["s3_bucket"]), path, hash) |
def get_narrow_url(self, instance):
"""
Return a link suitable for narrowing on the current item.
"""
text = instance[0]
request = self.context["request"]
query_params = request.GET.copy()
# Never keep the page query parameter in narrowing urls.
# It will raise a NotFound exception when trying to paginate a narrowed queryset.
page_query_param = self.get_paginate_by_param()
if page_query_param and page_query_param in query_params:
del query_params[page_query_param]
selected_facets = set(query_params.pop(self.root.facet_query_params_text, []))
selected_facets.add("%(field)s_exact:%(text)s" % {"field": self.parent_field, "text": text})
query_params.setlist(self.root.facet_query_params_text, sorted(selected_facets))
path = "%(path)s?%(query)s" % {"path": request.path_info, "query": query_params.urlencode()}
url = request.build_absolute_uri(path)
return serializers.Hyperlink(url, "narrow-url") | Return a link suitable for narrowing on the current item. | Below is the the instruction that describes the task:
### Input:
Return a link suitable for narrowing on the current item.
### Response:
def get_narrow_url(self, instance):
"""
Return a link suitable for narrowing on the current item.
"""
text = instance[0]
request = self.context["request"]
query_params = request.GET.copy()
# Never keep the page query parameter in narrowing urls.
# It will raise a NotFound exception when trying to paginate a narrowed queryset.
page_query_param = self.get_paginate_by_param()
if page_query_param and page_query_param in query_params:
del query_params[page_query_param]
selected_facets = set(query_params.pop(self.root.facet_query_params_text, []))
selected_facets.add("%(field)s_exact:%(text)s" % {"field": self.parent_field, "text": text})
query_params.setlist(self.root.facet_query_params_text, sorted(selected_facets))
path = "%(path)s?%(query)s" % {"path": request.path_info, "query": query_params.urlencode()}
url = request.build_absolute_uri(path)
return serializers.Hyperlink(url, "narrow-url") |
def matplotlib_scraper(block, block_vars, gallery_conf, **kwargs):
"""Scrape Matplotlib images.
Parameters
----------
block : tuple
A tuple containing the (label, content, line_number) of the block.
block_vars : dict
Dict of block variables.
gallery_conf : dict
Contains the configuration of Sphinx-Gallery
**kwargs : dict
Additional keyword arguments to pass to
:meth:`~matplotlib.figure.Figure.savefig`, e.g. ``format='svg'``.
The ``format`` kwarg in particular is used to set the file extension
of the output file (currently only 'png' and 'svg' are supported).
Returns
-------
rst : str
The ReSTructuredText that will be rendered to HTML containing
the images. This is often produced by :func:`figure_rst`.
"""
matplotlib, plt = _import_matplotlib()
image_path_iterator = block_vars['image_path_iterator']
image_paths = list()
for fig_num, image_path in zip(plt.get_fignums(), image_path_iterator):
if 'format' in kwargs:
image_path = '%s.%s' % (os.path.splitext(image_path)[0],
kwargs['format'])
# Set the fig_num figure as the current figure as we can't
# save a figure that's not the current figure.
fig = plt.figure(fig_num)
to_rgba = matplotlib.colors.colorConverter.to_rgba
for attr in ['facecolor', 'edgecolor']:
fig_attr = getattr(fig, 'get_' + attr)()
default_attr = matplotlib.rcParams['figure.' + attr]
if to_rgba(fig_attr) != to_rgba(default_attr) and \
attr not in kwargs:
kwargs[attr] = fig_attr
fig.savefig(image_path, **kwargs)
image_paths.append(image_path)
plt.close('all')
return figure_rst(image_paths, gallery_conf['src_dir']) | Scrape Matplotlib images.
Parameters
----------
block : tuple
A tuple containing the (label, content, line_number) of the block.
block_vars : dict
Dict of block variables.
gallery_conf : dict
Contains the configuration of Sphinx-Gallery
**kwargs : dict
Additional keyword arguments to pass to
:meth:`~matplotlib.figure.Figure.savefig`, e.g. ``format='svg'``.
The ``format`` kwarg in particular is used to set the file extension
of the output file (currently only 'png' and 'svg' are supported).
Returns
-------
rst : str
The ReSTructuredText that will be rendered to HTML containing
the images. This is often produced by :func:`figure_rst`. | Below is the the instruction that describes the task:
### Input:
Scrape Matplotlib images.
Parameters
----------
block : tuple
A tuple containing the (label, content, line_number) of the block.
block_vars : dict
Dict of block variables.
gallery_conf : dict
Contains the configuration of Sphinx-Gallery
**kwargs : dict
Additional keyword arguments to pass to
:meth:`~matplotlib.figure.Figure.savefig`, e.g. ``format='svg'``.
The ``format`` kwarg in particular is used to set the file extension
of the output file (currently only 'png' and 'svg' are supported).
Returns
-------
rst : str
The ReSTructuredText that will be rendered to HTML containing
the images. This is often produced by :func:`figure_rst`.
### Response:
def matplotlib_scraper(block, block_vars, gallery_conf, **kwargs):
"""Scrape Matplotlib images.
Parameters
----------
block : tuple
A tuple containing the (label, content, line_number) of the block.
block_vars : dict
Dict of block variables.
gallery_conf : dict
Contains the configuration of Sphinx-Gallery
**kwargs : dict
Additional keyword arguments to pass to
:meth:`~matplotlib.figure.Figure.savefig`, e.g. ``format='svg'``.
The ``format`` kwarg in particular is used to set the file extension
of the output file (currently only 'png' and 'svg' are supported).
Returns
-------
rst : str
The ReSTructuredText that will be rendered to HTML containing
the images. This is often produced by :func:`figure_rst`.
"""
matplotlib, plt = _import_matplotlib()
image_path_iterator = block_vars['image_path_iterator']
image_paths = list()
for fig_num, image_path in zip(plt.get_fignums(), image_path_iterator):
if 'format' in kwargs:
image_path = '%s.%s' % (os.path.splitext(image_path)[0],
kwargs['format'])
# Set the fig_num figure as the current figure as we can't
# save a figure that's not the current figure.
fig = plt.figure(fig_num)
to_rgba = matplotlib.colors.colorConverter.to_rgba
for attr in ['facecolor', 'edgecolor']:
fig_attr = getattr(fig, 'get_' + attr)()
default_attr = matplotlib.rcParams['figure.' + attr]
if to_rgba(fig_attr) != to_rgba(default_attr) and \
attr not in kwargs:
kwargs[attr] = fig_attr
fig.savefig(image_path, **kwargs)
image_paths.append(image_path)
plt.close('all')
return figure_rst(image_paths, gallery_conf['src_dir']) |
def dl_full_file(url, save_file_name):
"""
Download a file. No checks are performed.
Parameters
----------
url : str
The url of the file to download
save_file_name : str
The name to save the file as
"""
response = requests.get(url)
with open(save_file_name, 'wb') as writefile:
writefile.write(response.content)
return | Download a file. No checks are performed.
Parameters
----------
url : str
The url of the file to download
save_file_name : str
The name to save the file as | Below is the the instruction that describes the task:
### Input:
Download a file. No checks are performed.
Parameters
----------
url : str
The url of the file to download
save_file_name : str
The name to save the file as
### Response:
def dl_full_file(url, save_file_name):
"""
Download a file. No checks are performed.
Parameters
----------
url : str
The url of the file to download
save_file_name : str
The name to save the file as
"""
response = requests.get(url)
with open(save_file_name, 'wb') as writefile:
writefile.write(response.content)
return |
def _new_conn(self):
""" Establish a socket connection and set nodelay settings on it.
:return: New socket connection.
"""
extra_kw = {}
if self.source_address:
extra_kw['source_address'] = self.source_address
if self.socket_options:
extra_kw['socket_options'] = self.socket_options
try:
conn = connection.create_connection(
(self._dns_host, self.port), self.timeout, **extra_kw)
except SocketTimeout as e:
raise ConnectTimeoutError(
self, "Connection to %s timed out. (connect timeout=%s)" %
(self.host, self.timeout))
except SocketError as e:
raise NewConnectionError(
self, "Failed to establish a new connection: %s" % e)
return conn | Establish a socket connection and set nodelay settings on it.
:return: New socket connection. | Below is the the instruction that describes the task:
### Input:
Establish a socket connection and set nodelay settings on it.
:return: New socket connection.
### Response:
def _new_conn(self):
""" Establish a socket connection and set nodelay settings on it.
:return: New socket connection.
"""
extra_kw = {}
if self.source_address:
extra_kw['source_address'] = self.source_address
if self.socket_options:
extra_kw['socket_options'] = self.socket_options
try:
conn = connection.create_connection(
(self._dns_host, self.port), self.timeout, **extra_kw)
except SocketTimeout as e:
raise ConnectTimeoutError(
self, "Connection to %s timed out. (connect timeout=%s)" %
(self.host, self.timeout))
except SocketError as e:
raise NewConnectionError(
self, "Failed to establish a new connection: %s" % e)
return conn |
def get_user_autocompletions(ctx, args, incomplete, cmd_param):
"""
:param ctx: context associated with the parsed command
:param args: full list of args
:param incomplete: the incomplete text to autocomplete
:param cmd_param: command definition
:return: all the possible user-specified completions for the param
"""
results = []
if isinstance(cmd_param.type, Choice):
# Choices don't support descriptions.
results = [(c, None)
for c in cmd_param.type.choices if str(c).startswith(incomplete)]
elif cmd_param.autocompletion is not None:
dynamic_completions = cmd_param.autocompletion(ctx=ctx,
args=args,
incomplete=incomplete)
results = [c if isinstance(c, tuple) else (c, None)
for c in dynamic_completions]
return results | :param ctx: context associated with the parsed command
:param args: full list of args
:param incomplete: the incomplete text to autocomplete
:param cmd_param: command definition
:return: all the possible user-specified completions for the param | Below is the the instruction that describes the task:
### Input:
:param ctx: context associated with the parsed command
:param args: full list of args
:param incomplete: the incomplete text to autocomplete
:param cmd_param: command definition
:return: all the possible user-specified completions for the param
### Response:
def get_user_autocompletions(ctx, args, incomplete, cmd_param):
"""
:param ctx: context associated with the parsed command
:param args: full list of args
:param incomplete: the incomplete text to autocomplete
:param cmd_param: command definition
:return: all the possible user-specified completions for the param
"""
results = []
if isinstance(cmd_param.type, Choice):
# Choices don't support descriptions.
results = [(c, None)
for c in cmd_param.type.choices if str(c).startswith(incomplete)]
elif cmd_param.autocompletion is not None:
dynamic_completions = cmd_param.autocompletion(ctx=ctx,
args=args,
incomplete=incomplete)
results = [c if isinstance(c, tuple) else (c, None)
for c in dynamic_completions]
return results |
def assignEditor(self):
"""
Assigns the editor for this entry based on the plugin.
"""
plugin = self.currentPlugin()
column = self.currentColumn()
value = self.currentValue()
if not plugin:
self.setEditor(None)
return
self.setUpdatesEnabled(False)
self.blockSignals(True)
op = self.uiOperatorDDL.currentText()
self.setEditor(plugin.createEditor(self, column, op, value))
self.setUpdatesEnabled(True)
self.blockSignals(False) | Assigns the editor for this entry based on the plugin. | Below is the the instruction that describes the task:
### Input:
Assigns the editor for this entry based on the plugin.
### Response:
def assignEditor(self):
"""
Assigns the editor for this entry based on the plugin.
"""
plugin = self.currentPlugin()
column = self.currentColumn()
value = self.currentValue()
if not plugin:
self.setEditor(None)
return
self.setUpdatesEnabled(False)
self.blockSignals(True)
op = self.uiOperatorDDL.currentText()
self.setEditor(plugin.createEditor(self, column, op, value))
self.setUpdatesEnabled(True)
self.blockSignals(False) |
def extract_names(sender):
"""Tries to extract sender's names from `From:` header.
It could extract not only the actual names but e.g.
the name of the company, parts of email, etc.
>>> extract_names('Sergey N. Obukhov <[email protected]>')
['Sergey', 'Obukhov', 'serobnic']
>>> extract_names('')
[]
"""
sender = to_unicode(sender, precise=True)
# Remove non-alphabetical characters
sender = "".join([char if char.isalpha() else ' ' for char in sender])
# Remove too short words and words from "black" list i.e.
# words like `ru`, `gmail`, `com`, `org`, etc.
sender = [word for word in sender.split() if len(word) > 1 and
not word in BAD_SENDER_NAMES]
# Remove duplicates
names = list(set(sender))
return names | Tries to extract sender's names from `From:` header.
It could extract not only the actual names but e.g.
the name of the company, parts of email, etc.
>>> extract_names('Sergey N. Obukhov <[email protected]>')
['Sergey', 'Obukhov', 'serobnic']
>>> extract_names('')
[] | Below is the the instruction that describes the task:
### Input:
Tries to extract sender's names from `From:` header.
It could extract not only the actual names but e.g.
the name of the company, parts of email, etc.
>>> extract_names('Sergey N. Obukhov <[email protected]>')
['Sergey', 'Obukhov', 'serobnic']
>>> extract_names('')
[]
### Response:
def extract_names(sender):
"""Tries to extract sender's names from `From:` header.
It could extract not only the actual names but e.g.
the name of the company, parts of email, etc.
>>> extract_names('Sergey N. Obukhov <[email protected]>')
['Sergey', 'Obukhov', 'serobnic']
>>> extract_names('')
[]
"""
sender = to_unicode(sender, precise=True)
# Remove non-alphabetical characters
sender = "".join([char if char.isalpha() else ' ' for char in sender])
# Remove too short words and words from "black" list i.e.
# words like `ru`, `gmail`, `com`, `org`, etc.
sender = [word for word in sender.split() if len(word) > 1 and
not word in BAD_SENDER_NAMES]
# Remove duplicates
names = list(set(sender))
return names |
def filter(self, query, output_fields=None):
"""Query the database and return subject information for those
who match the query logic
Parameters
----------
query: Query or QueryGroup
Query(Group) object to process
output_fields: list
The fields desired for matching subjects
Returns
-------
A list of dictionaries whose keys contains at least the default field
and at most each key passed in with output_fields, each dictionary
representing a surviving row in the database.
"""
query_keys = query.fields()
if not set(query_keys).issubset(set(self.field_names)):
raise ValueError("One or more query keys not in project keys")
query_keys.append(self.def_field)
data = self.export_records(fields=query_keys)
matches = query.filter(data, self.def_field)
if matches:
# if output_fields is empty, we'll download all fields, which is
# not desired, so we limit download to def_field
if not output_fields:
output_fields = [self.def_field]
# But if caller passed a string and not list, we need to listify
if isinstance(output_fields, basestring):
output_fields = [output_fields]
return self.export_records(records=matches, fields=output_fields)
else:
# If there are no matches, then sending an empty list to
# export_records will actually return all rows, which is not
# what we want
return [] | Query the database and return subject information for those
who match the query logic
Parameters
----------
query: Query or QueryGroup
Query(Group) object to process
output_fields: list
The fields desired for matching subjects
Returns
-------
A list of dictionaries whose keys contains at least the default field
and at most each key passed in with output_fields, each dictionary
representing a surviving row in the database. | Below is the the instruction that describes the task:
### Input:
Query the database and return subject information for those
who match the query logic
Parameters
----------
query: Query or QueryGroup
Query(Group) object to process
output_fields: list
The fields desired for matching subjects
Returns
-------
A list of dictionaries whose keys contains at least the default field
and at most each key passed in with output_fields, each dictionary
representing a surviving row in the database.
### Response:
def filter(self, query, output_fields=None):
"""Query the database and return subject information for those
who match the query logic
Parameters
----------
query: Query or QueryGroup
Query(Group) object to process
output_fields: list
The fields desired for matching subjects
Returns
-------
A list of dictionaries whose keys contains at least the default field
and at most each key passed in with output_fields, each dictionary
representing a surviving row in the database.
"""
query_keys = query.fields()
if not set(query_keys).issubset(set(self.field_names)):
raise ValueError("One or more query keys not in project keys")
query_keys.append(self.def_field)
data = self.export_records(fields=query_keys)
matches = query.filter(data, self.def_field)
if matches:
# if output_fields is empty, we'll download all fields, which is
# not desired, so we limit download to def_field
if not output_fields:
output_fields = [self.def_field]
# But if caller passed a string and not list, we need to listify
if isinstance(output_fields, basestring):
output_fields = [output_fields]
return self.export_records(records=matches, fields=output_fields)
else:
# If there are no matches, then sending an empty list to
# export_records will actually return all rows, which is not
# what we want
return [] |
def reset_full(self, force=False, _meta=None):
""" Remove all accounts which can be recalculated based on Z, Y, F, FY
Parameters
----------
force: boolean, optional
If True, reset to flows although the system can not be
recalculated. Default: False
_meta: MRIOMetaData, optional
Metadata handler for logging, optional. Internal
"""
# Attriubtes to keep must be defined in the init: __basic__
strwarn = None
for df in self.__basic__:
if (getattr(self, df)) is None:
if force:
strwarn = ("Reset system warning - Recalculation after "
"reset not possible "
"because {} missing".format(df))
warnings.warn(strwarn, ResetWarning)
else:
raise ResetError("To few tables to recalculate the "
"system after reset ({} missing) "
"- reset can be forced by passing "
"'force=True')".format(df))
if _meta:
_meta._add_modify("Reset system to Z and Y")
if strwarn:
_meta._add_modify(strwarn)
[setattr(self, key, None)
for key in self.get_DataFrame(
data=False,
with_unit=False,
with_population=False)
if key not in self.__basic__]
return self | Remove all accounts which can be recalculated based on Z, Y, F, FY
Parameters
----------
force: boolean, optional
If True, reset to flows although the system can not be
recalculated. Default: False
_meta: MRIOMetaData, optional
Metadata handler for logging, optional. Internal | Below is the the instruction that describes the task:
### Input:
Remove all accounts which can be recalculated based on Z, Y, F, FY
Parameters
----------
force: boolean, optional
If True, reset to flows although the system can not be
recalculated. Default: False
_meta: MRIOMetaData, optional
Metadata handler for logging, optional. Internal
### Response:
def reset_full(self, force=False, _meta=None):
""" Remove all accounts which can be recalculated based on Z, Y, F, FY
Parameters
----------
force: boolean, optional
If True, reset to flows although the system can not be
recalculated. Default: False
_meta: MRIOMetaData, optional
Metadata handler for logging, optional. Internal
"""
# Attriubtes to keep must be defined in the init: __basic__
strwarn = None
for df in self.__basic__:
if (getattr(self, df)) is None:
if force:
strwarn = ("Reset system warning - Recalculation after "
"reset not possible "
"because {} missing".format(df))
warnings.warn(strwarn, ResetWarning)
else:
raise ResetError("To few tables to recalculate the "
"system after reset ({} missing) "
"- reset can be forced by passing "
"'force=True')".format(df))
if _meta:
_meta._add_modify("Reset system to Z and Y")
if strwarn:
_meta._add_modify(strwarn)
[setattr(self, key, None)
for key in self.get_DataFrame(
data=False,
with_unit=False,
with_population=False)
if key not in self.__basic__]
return self |
def is_unprocessed_local_replica(pid):
"""Is local replica with status "queued"."""
return d1_gmn.app.models.LocalReplica.objects.filter(
pid__did=pid, info__status__status='queued'
).exists() | Is local replica with status "queued". | Below is the the instruction that describes the task:
### Input:
Is local replica with status "queued".
### Response:
def is_unprocessed_local_replica(pid):
"""Is local replica with status "queued"."""
return d1_gmn.app.models.LocalReplica.objects.filter(
pid__did=pid, info__status__status='queued'
).exists() |
def assumed_state(self):
# pylint: disable=protected-access
"""Return whether the data is from an online vehicle."""
return (not self._controller.car_online[self.id()] and
(self._controller._last_update_time[self.id()] -
self._controller._last_wake_up_time[self.id()] >
self._controller.update_interval)) | Return whether the data is from an online vehicle. | Below is the the instruction that describes the task:
### Input:
Return whether the data is from an online vehicle.
### Response:
def assumed_state(self):
# pylint: disable=protected-access
"""Return whether the data is from an online vehicle."""
return (not self._controller.car_online[self.id()] and
(self._controller._last_update_time[self.id()] -
self._controller._last_wake_up_time[self.id()] >
self._controller.update_interval)) |
def _std_err(self):
"""Standard error of the estimate (SEE). A scalar.
For standard errors of parameters, see _se_all, se_alpha, and se_beta.
"""
return np.sqrt(np.sum(np.square(self._resids), axis=1) / self._df_err) | Standard error of the estimate (SEE). A scalar.
For standard errors of parameters, see _se_all, se_alpha, and se_beta. | Below is the the instruction that describes the task:
### Input:
Standard error of the estimate (SEE). A scalar.
For standard errors of parameters, see _se_all, se_alpha, and se_beta.
### Response:
def _std_err(self):
"""Standard error of the estimate (SEE). A scalar.
For standard errors of parameters, see _se_all, se_alpha, and se_beta.
"""
return np.sqrt(np.sum(np.square(self._resids), axis=1) / self._df_err) |
def get_charge(chebi_id):
'''Returns charge'''
if len(__CHARGES) == 0:
__parse_chemical_data()
return __CHARGES[chebi_id] if chebi_id in __CHARGES else float('NaN') | Returns charge | Below is the the instruction that describes the task:
### Input:
Returns charge
### Response:
def get_charge(chebi_id):
'''Returns charge'''
if len(__CHARGES) == 0:
__parse_chemical_data()
return __CHARGES[chebi_id] if chebi_id in __CHARGES else float('NaN') |
def run_simulation(wdir, arp=True, **kwargs):
"""
Example
-------
100-nm silica sphere with 10-nm thick Ag coating,
embedded in water; arprec 20 digits; illuminated with YAG (1064nm);
scan xz plane (21x21, +-200nm)
bhfield-arp-db.exe mpdigit wl r_core r_coat
n_grid_x xspan_min xspan_max
n_grid_y yspan_min yspan_max
n_grid_z zspan_min zspan_max
case Kreibig
[n_med n_core k_core n_coat k_coat (case=other)]
bhfield-arp-db.exe 20 1.064 0.050 0.060
21 -0.2 0.2
1 0 0
21 -0.2 0.2
other 0
1.3205 1.53413 0 0.565838 7.23262
Explanation of parameters
-------------------------
mpdigit:
arprec's number of precision digits;
increase it to overcome round-off errors
wl[um]:
light wavelength in vacuum
r_core[um], r_coat[um]:
core & coat radii
n_grid_x xspan_min[um] xspan_max[um]:
number & span of grid points for field computation; x span
n_grid_y yspan_min[um] yspan_max[um]:
y span
n_grid_z zspan_min[um] zspan_max[um]:
z span
Kreibig:
Kreibig mean free path correction for Ag (0.0 - 1.0)
case:
nanoshell/liposome/HPC/barber/other
n_med n_core k_core n_coat k_coat (case=other only):
refractive indices of medium (real), core & coat (n, k)
If `case=other`, complex refractive indices
(n, k at the particular wavelength) must be specified.
Otherwise (case = nanoshell etc) the medium/core/coat
materials are predefined and the n,k values
are taken from the data file (Ag_palik.nk etc).
The latter reflects our own interest and is intended
for use in our lab, so general users may not find it useful :-)
"""
wdir = pathlib.Path(wdir)
cmd = "{pathbhfield} {mpdigit} {wl:f} {r_core:f} {r_coat:f} " \
+ "{n_grid_x:d} {xspan_min:f} {xspan_max:f} " \
+ "{n_grid_y:d} {yspan_min:f} {yspan_max:f} " \
+ "{n_grid_z:d} {zspan_min:f} {zspan_max:f} " \
+ "{case} {Kreibig:f} {n_med:f} {n_core:f} {k_core:f} " \
+ "{n_coat:f} {k_coat:f}"
old_dir = pathlib.Path.cwd()
os.chdir(wdir)
kwargs["pathbhfield"] = get_binary(arp=arp)
if arp:
kwargs["mpdigit"] = 16
else:
kwargs["mpdigit"] = ""
# run simulation with kwargs
sp.check_output(cmd.format(**kwargs), shell=True)
# Go back to orgignal directory before checking (checking might fail)
os.chdir(old_dir)
# Check bhdebug.txt to make sure that you specify enough digits to
# overcome roundoff errors.
check_simulation(wdir) | Example
-------
100-nm silica sphere with 10-nm thick Ag coating,
embedded in water; arprec 20 digits; illuminated with YAG (1064nm);
scan xz plane (21x21, +-200nm)
bhfield-arp-db.exe mpdigit wl r_core r_coat
n_grid_x xspan_min xspan_max
n_grid_y yspan_min yspan_max
n_grid_z zspan_min zspan_max
case Kreibig
[n_med n_core k_core n_coat k_coat (case=other)]
bhfield-arp-db.exe 20 1.064 0.050 0.060
21 -0.2 0.2
1 0 0
21 -0.2 0.2
other 0
1.3205 1.53413 0 0.565838 7.23262
Explanation of parameters
-------------------------
mpdigit:
arprec's number of precision digits;
increase it to overcome round-off errors
wl[um]:
light wavelength in vacuum
r_core[um], r_coat[um]:
core & coat radii
n_grid_x xspan_min[um] xspan_max[um]:
number & span of grid points for field computation; x span
n_grid_y yspan_min[um] yspan_max[um]:
y span
n_grid_z zspan_min[um] zspan_max[um]:
z span
Kreibig:
Kreibig mean free path correction for Ag (0.0 - 1.0)
case:
nanoshell/liposome/HPC/barber/other
n_med n_core k_core n_coat k_coat (case=other only):
refractive indices of medium (real), core & coat (n, k)
If `case=other`, complex refractive indices
(n, k at the particular wavelength) must be specified.
Otherwise (case = nanoshell etc) the medium/core/coat
materials are predefined and the n,k values
are taken from the data file (Ag_palik.nk etc).
The latter reflects our own interest and is intended
for use in our lab, so general users may not find it useful :-) | Below is the the instruction that describes the task:
### Input:
Example
-------
100-nm silica sphere with 10-nm thick Ag coating,
embedded in water; arprec 20 digits; illuminated with YAG (1064nm);
scan xz plane (21x21, +-200nm)
bhfield-arp-db.exe mpdigit wl r_core r_coat
n_grid_x xspan_min xspan_max
n_grid_y yspan_min yspan_max
n_grid_z zspan_min zspan_max
case Kreibig
[n_med n_core k_core n_coat k_coat (case=other)]
bhfield-arp-db.exe 20 1.064 0.050 0.060
21 -0.2 0.2
1 0 0
21 -0.2 0.2
other 0
1.3205 1.53413 0 0.565838 7.23262
Explanation of parameters
-------------------------
mpdigit:
arprec's number of precision digits;
increase it to overcome round-off errors
wl[um]:
light wavelength in vacuum
r_core[um], r_coat[um]:
core & coat radii
n_grid_x xspan_min[um] xspan_max[um]:
number & span of grid points for field computation; x span
n_grid_y yspan_min[um] yspan_max[um]:
y span
n_grid_z zspan_min[um] zspan_max[um]:
z span
Kreibig:
Kreibig mean free path correction for Ag (0.0 - 1.0)
case:
nanoshell/liposome/HPC/barber/other
n_med n_core k_core n_coat k_coat (case=other only):
refractive indices of medium (real), core & coat (n, k)
If `case=other`, complex refractive indices
(n, k at the particular wavelength) must be specified.
Otherwise (case = nanoshell etc) the medium/core/coat
materials are predefined and the n,k values
are taken from the data file (Ag_palik.nk etc).
The latter reflects our own interest and is intended
for use in our lab, so general users may not find it useful :-)
### Response:
def run_simulation(wdir, arp=True, **kwargs):
"""
Example
-------
100-nm silica sphere with 10-nm thick Ag coating,
embedded in water; arprec 20 digits; illuminated with YAG (1064nm);
scan xz plane (21x21, +-200nm)
bhfield-arp-db.exe mpdigit wl r_core r_coat
n_grid_x xspan_min xspan_max
n_grid_y yspan_min yspan_max
n_grid_z zspan_min zspan_max
case Kreibig
[n_med n_core k_core n_coat k_coat (case=other)]
bhfield-arp-db.exe 20 1.064 0.050 0.060
21 -0.2 0.2
1 0 0
21 -0.2 0.2
other 0
1.3205 1.53413 0 0.565838 7.23262
Explanation of parameters
-------------------------
mpdigit:
arprec's number of precision digits;
increase it to overcome round-off errors
wl[um]:
light wavelength in vacuum
r_core[um], r_coat[um]:
core & coat radii
n_grid_x xspan_min[um] xspan_max[um]:
number & span of grid points for field computation; x span
n_grid_y yspan_min[um] yspan_max[um]:
y span
n_grid_z zspan_min[um] zspan_max[um]:
z span
Kreibig:
Kreibig mean free path correction for Ag (0.0 - 1.0)
case:
nanoshell/liposome/HPC/barber/other
n_med n_core k_core n_coat k_coat (case=other only):
refractive indices of medium (real), core & coat (n, k)
If `case=other`, complex refractive indices
(n, k at the particular wavelength) must be specified.
Otherwise (case = nanoshell etc) the medium/core/coat
materials are predefined and the n,k values
are taken from the data file (Ag_palik.nk etc).
The latter reflects our own interest and is intended
for use in our lab, so general users may not find it useful :-)
"""
wdir = pathlib.Path(wdir)
cmd = "{pathbhfield} {mpdigit} {wl:f} {r_core:f} {r_coat:f} " \
+ "{n_grid_x:d} {xspan_min:f} {xspan_max:f} " \
+ "{n_grid_y:d} {yspan_min:f} {yspan_max:f} " \
+ "{n_grid_z:d} {zspan_min:f} {zspan_max:f} " \
+ "{case} {Kreibig:f} {n_med:f} {n_core:f} {k_core:f} " \
+ "{n_coat:f} {k_coat:f}"
old_dir = pathlib.Path.cwd()
os.chdir(wdir)
kwargs["pathbhfield"] = get_binary(arp=arp)
if arp:
kwargs["mpdigit"] = 16
else:
kwargs["mpdigit"] = ""
# run simulation with kwargs
sp.check_output(cmd.format(**kwargs), shell=True)
# Go back to orgignal directory before checking (checking might fail)
os.chdir(old_dir)
# Check bhdebug.txt to make sure that you specify enough digits to
# overcome roundoff errors.
check_simulation(wdir) |
def load(self: T, **kwargs) -> T:
"""Manually trigger loading of this dataset's data from disk or a
remote source into memory and return this dataset.
Normally, it should not be necessary to call this method in user code,
because all xarray functions should either work on deferred data or
load data automatically. However, this method can be necessary when
working with many file objects on disk.
Parameters
----------
**kwargs : dict
Additional keyword arguments passed on to ``dask.array.compute``.
See Also
--------
dask.array.compute
"""
# access .data to coerce everything to numpy or dask arrays
lazy_data = {k: v._data for k, v in self.variables.items()
if isinstance(v._data, dask_array_type)}
if lazy_data:
import dask.array as da
# evaluate all the dask arrays simultaneously
evaluated_data = da.compute(*lazy_data.values(), **kwargs)
for k, data in zip(lazy_data, evaluated_data):
self.variables[k].data = data
# load everything else sequentially
for k, v in self.variables.items():
if k not in lazy_data:
v.load()
return self | Manually trigger loading of this dataset's data from disk or a
remote source into memory and return this dataset.
Normally, it should not be necessary to call this method in user code,
because all xarray functions should either work on deferred data or
load data automatically. However, this method can be necessary when
working with many file objects on disk.
Parameters
----------
**kwargs : dict
Additional keyword arguments passed on to ``dask.array.compute``.
See Also
--------
dask.array.compute | Below is the the instruction that describes the task:
### Input:
Manually trigger loading of this dataset's data from disk or a
remote source into memory and return this dataset.
Normally, it should not be necessary to call this method in user code,
because all xarray functions should either work on deferred data or
load data automatically. However, this method can be necessary when
working with many file objects on disk.
Parameters
----------
**kwargs : dict
Additional keyword arguments passed on to ``dask.array.compute``.
See Also
--------
dask.array.compute
### Response:
def load(self: T, **kwargs) -> T:
"""Manually trigger loading of this dataset's data from disk or a
remote source into memory and return this dataset.
Normally, it should not be necessary to call this method in user code,
because all xarray functions should either work on deferred data or
load data automatically. However, this method can be necessary when
working with many file objects on disk.
Parameters
----------
**kwargs : dict
Additional keyword arguments passed on to ``dask.array.compute``.
See Also
--------
dask.array.compute
"""
# access .data to coerce everything to numpy or dask arrays
lazy_data = {k: v._data for k, v in self.variables.items()
if isinstance(v._data, dask_array_type)}
if lazy_data:
import dask.array as da
# evaluate all the dask arrays simultaneously
evaluated_data = da.compute(*lazy_data.values(), **kwargs)
for k, data in zip(lazy_data, evaluated_data):
self.variables[k].data = data
# load everything else sequentially
for k, v in self.variables.items():
if k not in lazy_data:
v.load()
return self |
def use_kwargs(args, locations=None, inherit=None, apply=None, **kwargs):
"""Inject keyword arguments from the specified webargs arguments into the
decorated view function.
Usage:
.. code-block:: python
from marshmallow import fields
@use_kwargs({'name': fields.Str(), 'category': fields.Str()})
def get_pets(**kwargs):
return Pet.query.filter_by(**kwargs).all()
:param args: Mapping of argument names to :class:`Field <marshmallow.fields.Field>`
objects, :class:`Schema <marshmallow.Schema>`, or a callable which accepts a
request and returns a :class:`Schema <marshmallow.Schema>`
:param locations: Default request locations to parse
:param inherit: Inherit args from parent classes
:param apply: Parse request with specified args
"""
kwargs.update({'locations': locations})
def wrapper(func):
options = {
'args': args,
'kwargs': kwargs,
}
annotate(func, 'args', [options], inherit=inherit, apply=apply)
return activate(func)
return wrapper | Inject keyword arguments from the specified webargs arguments into the
decorated view function.
Usage:
.. code-block:: python
from marshmallow import fields
@use_kwargs({'name': fields.Str(), 'category': fields.Str()})
def get_pets(**kwargs):
return Pet.query.filter_by(**kwargs).all()
:param args: Mapping of argument names to :class:`Field <marshmallow.fields.Field>`
objects, :class:`Schema <marshmallow.Schema>`, or a callable which accepts a
request and returns a :class:`Schema <marshmallow.Schema>`
:param locations: Default request locations to parse
:param inherit: Inherit args from parent classes
:param apply: Parse request with specified args | Below is the the instruction that describes the task:
### Input:
Inject keyword arguments from the specified webargs arguments into the
decorated view function.
Usage:
.. code-block:: python
from marshmallow import fields
@use_kwargs({'name': fields.Str(), 'category': fields.Str()})
def get_pets(**kwargs):
return Pet.query.filter_by(**kwargs).all()
:param args: Mapping of argument names to :class:`Field <marshmallow.fields.Field>`
objects, :class:`Schema <marshmallow.Schema>`, or a callable which accepts a
request and returns a :class:`Schema <marshmallow.Schema>`
:param locations: Default request locations to parse
:param inherit: Inherit args from parent classes
:param apply: Parse request with specified args
### Response:
def use_kwargs(args, locations=None, inherit=None, apply=None, **kwargs):
"""Inject keyword arguments from the specified webargs arguments into the
decorated view function.
Usage:
.. code-block:: python
from marshmallow import fields
@use_kwargs({'name': fields.Str(), 'category': fields.Str()})
def get_pets(**kwargs):
return Pet.query.filter_by(**kwargs).all()
:param args: Mapping of argument names to :class:`Field <marshmallow.fields.Field>`
objects, :class:`Schema <marshmallow.Schema>`, or a callable which accepts a
request and returns a :class:`Schema <marshmallow.Schema>`
:param locations: Default request locations to parse
:param inherit: Inherit args from parent classes
:param apply: Parse request with specified args
"""
kwargs.update({'locations': locations})
def wrapper(func):
options = {
'args': args,
'kwargs': kwargs,
}
annotate(func, 'args', [options], inherit=inherit, apply=apply)
return activate(func)
return wrapper |
def _register(self, session, url):
"""
Register a package to a repository.
"""
dist = self._poetry.file.parent / "dist"
file = dist / "{}-{}.tar.gz".format(
self._package.name, normalize_version(self._package.version.text)
)
if not file.exists():
raise RuntimeError('"{0}" does not exist.'.format(file.name))
data = self.post_data(file)
data.update({":action": "submit", "protocol_version": "1"})
data_to_send = self._prepare_data(data)
encoder = MultipartEncoder(data_to_send)
resp = session.post(
url,
data=encoder,
allow_redirects=False,
headers={"Content-Type": encoder.content_type},
)
resp.raise_for_status()
return resp | Register a package to a repository. | Below is the the instruction that describes the task:
### Input:
Register a package to a repository.
### Response:
def _register(self, session, url):
"""
Register a package to a repository.
"""
dist = self._poetry.file.parent / "dist"
file = dist / "{}-{}.tar.gz".format(
self._package.name, normalize_version(self._package.version.text)
)
if not file.exists():
raise RuntimeError('"{0}" does not exist.'.format(file.name))
data = self.post_data(file)
data.update({":action": "submit", "protocol_version": "1"})
data_to_send = self._prepare_data(data)
encoder = MultipartEncoder(data_to_send)
resp = session.post(
url,
data=encoder,
allow_redirects=False,
headers={"Content-Type": encoder.content_type},
)
resp.raise_for_status()
return resp |
def set_description(self):
"""
Set the node description
"""
if self.device_info['type'] == 'Router':
self.node['description'] = '%s %s' % (self.device_info['type'],
self.device_info['model'])
else:
self.node['description'] = self.device_info['desc'] | Set the node description | Below is the the instruction that describes the task:
### Input:
Set the node description
### Response:
def set_description(self):
"""
Set the node description
"""
if self.device_info['type'] == 'Router':
self.node['description'] = '%s %s' % (self.device_info['type'],
self.device_info['model'])
else:
self.node['description'] = self.device_info['desc'] |
def json_request(endpoint, verb='GET', session_options=None, **options):
"""Like :func:`molotov.request` but extracts json from the response.
"""
req = functools.partial(_request, endpoint, verb, session_options,
json=True, **options)
return _run_in_fresh_loop(req) | Like :func:`molotov.request` but extracts json from the response. | Below is the the instruction that describes the task:
### Input:
Like :func:`molotov.request` but extracts json from the response.
### Response:
def json_request(endpoint, verb='GET', session_options=None, **options):
"""Like :func:`molotov.request` but extracts json from the response.
"""
req = functools.partial(_request, endpoint, verb, session_options,
json=True, **options)
return _run_in_fresh_loop(req) |
def trace_to_next_plane(self):
"""Trace the positions to the next plane."""
return list(map(lambda positions, deflections: np.subtract(positions, deflections),
self.positions, self.deflections)) | Trace the positions to the next plane. | Below is the the instruction that describes the task:
### Input:
Trace the positions to the next plane.
### Response:
def trace_to_next_plane(self):
"""Trace the positions to the next plane."""
return list(map(lambda positions, deflections: np.subtract(positions, deflections),
self.positions, self.deflections)) |
def check_if_username_exists(self, username):
'''
Check if the username handles exists.
:param username: The username, in the form index:prefix/suffix
:raises: :exc:`~b2handle.handleexceptions.HandleNotFoundException`
:raises: :exc:`~b2handle.handleexceptions.GenericHandleError`
:return: True. If it does not exist, an exception is raised.
*Note:* Only the existence of the handle is verified. The existence or
validity of the index is not checked, because entries containing
a key are hidden anyway.
'''
LOGGER.debug('check_if_username_exists...')
_, handle = b2handle.utilhandle.remove_index_from_handle(username)
resp = self.send_handle_get_request(handle)
resp_content = decoded_response(resp)
if b2handle.hsresponses.does_handle_exist(resp):
handlerecord_json = json.loads(resp_content)
if not handlerecord_json['handle'] == handle:
raise GenericHandleError(
operation='Checking if username exists',
handle=handle,
reponse=resp,
msg='The check returned a different handle than was asked for.'
)
return True
elif b2handle.hsresponses.handle_not_found(resp):
msg = 'The username handle does not exist'
raise HandleNotFoundException(handle=handle, msg=msg, response=resp)
else:
op = 'checking if handle exists'
msg = 'Checking if username exists went wrong'
raise GenericHandleError(operation=op, handle=handle, response=resp, msg=msg) | Check if the username handles exists.
:param username: The username, in the form index:prefix/suffix
:raises: :exc:`~b2handle.handleexceptions.HandleNotFoundException`
:raises: :exc:`~b2handle.handleexceptions.GenericHandleError`
:return: True. If it does not exist, an exception is raised.
*Note:* Only the existence of the handle is verified. The existence or
validity of the index is not checked, because entries containing
a key are hidden anyway. | Below is the the instruction that describes the task:
### Input:
Check if the username handles exists.
:param username: The username, in the form index:prefix/suffix
:raises: :exc:`~b2handle.handleexceptions.HandleNotFoundException`
:raises: :exc:`~b2handle.handleexceptions.GenericHandleError`
:return: True. If it does not exist, an exception is raised.
*Note:* Only the existence of the handle is verified. The existence or
validity of the index is not checked, because entries containing
a key are hidden anyway.
### Response:
def check_if_username_exists(self, username):
'''
Check if the username handles exists.
:param username: The username, in the form index:prefix/suffix
:raises: :exc:`~b2handle.handleexceptions.HandleNotFoundException`
:raises: :exc:`~b2handle.handleexceptions.GenericHandleError`
:return: True. If it does not exist, an exception is raised.
*Note:* Only the existence of the handle is verified. The existence or
validity of the index is not checked, because entries containing
a key are hidden anyway.
'''
LOGGER.debug('check_if_username_exists...')
_, handle = b2handle.utilhandle.remove_index_from_handle(username)
resp = self.send_handle_get_request(handle)
resp_content = decoded_response(resp)
if b2handle.hsresponses.does_handle_exist(resp):
handlerecord_json = json.loads(resp_content)
if not handlerecord_json['handle'] == handle:
raise GenericHandleError(
operation='Checking if username exists',
handle=handle,
reponse=resp,
msg='The check returned a different handle than was asked for.'
)
return True
elif b2handle.hsresponses.handle_not_found(resp):
msg = 'The username handle does not exist'
raise HandleNotFoundException(handle=handle, msg=msg, response=resp)
else:
op = 'checking if handle exists'
msg = 'Checking if username exists went wrong'
raise GenericHandleError(operation=op, handle=handle, response=resp, msg=msg) |
def is_correct(self):
"""Check if this object configuration is correct ::
* Check our own specific properties
* Call our parent class is_correct checker
:return: True if the configuration is correct, otherwise False
:rtype: bool
"""
state = True
# Internal checks before executing inherited function...
cls = self.__class__
if hasattr(self, 'host_name'):
for char in cls.illegal_object_name_chars:
if char in self.host_name:
self.add_error("[%s::%s] host_name contains an illegal character: %s"
% (self.my_type, self.get_name(), char))
state = False
# Fred: do not alert about missing check_command for an host... this because 1/ it is
# very verbose if hosts are not checked and 2/ because it is the Nagios default behavior
# if not self.check_command:
# self.add_warning("[%s::%s] has no defined check command"
# % (self.my_type, self.get_name()))
if self.notifications_enabled and not self.contacts:
self.add_warning("[%s::%s] notifications are enabled but no contacts nor "
"contact_groups property is defined for this host"
% (self.my_type, self.get_name()))
return super(Host, self).is_correct() and state | Check if this object configuration is correct ::
* Check our own specific properties
* Call our parent class is_correct checker
:return: True if the configuration is correct, otherwise False
:rtype: bool | Below is the the instruction that describes the task:
### Input:
Check if this object configuration is correct ::
* Check our own specific properties
* Call our parent class is_correct checker
:return: True if the configuration is correct, otherwise False
:rtype: bool
### Response:
def is_correct(self):
"""Check if this object configuration is correct ::
* Check our own specific properties
* Call our parent class is_correct checker
:return: True if the configuration is correct, otherwise False
:rtype: bool
"""
state = True
# Internal checks before executing inherited function...
cls = self.__class__
if hasattr(self, 'host_name'):
for char in cls.illegal_object_name_chars:
if char in self.host_name:
self.add_error("[%s::%s] host_name contains an illegal character: %s"
% (self.my_type, self.get_name(), char))
state = False
# Fred: do not alert about missing check_command for an host... this because 1/ it is
# very verbose if hosts are not checked and 2/ because it is the Nagios default behavior
# if not self.check_command:
# self.add_warning("[%s::%s] has no defined check command"
# % (self.my_type, self.get_name()))
if self.notifications_enabled and not self.contacts:
self.add_warning("[%s::%s] notifications are enabled but no contacts nor "
"contact_groups property is defined for this host"
% (self.my_type, self.get_name()))
return super(Host, self).is_correct() and state |
def AddMethod(self, function, name=None):
"""
Adds the specified function as a method of this construction
environment with the specified name. If the name is omitted,
the default name is the name of the function itself.
"""
method = MethodWrapper(self, function, name)
self.added_methods.append(method) | Adds the specified function as a method of this construction
environment with the specified name. If the name is omitted,
the default name is the name of the function itself. | Below is the the instruction that describes the task:
### Input:
Adds the specified function as a method of this construction
environment with the specified name. If the name is omitted,
the default name is the name of the function itself.
### Response:
def AddMethod(self, function, name=None):
"""
Adds the specified function as a method of this construction
environment with the specified name. If the name is omitted,
the default name is the name of the function itself.
"""
method = MethodWrapper(self, function, name)
self.added_methods.append(method) |
def fd(self):
""":return: file descriptor used to create the underlying mapping.
**Note:** it is not required to be valid anymore
:raise ValueError: if the mapping was not created by a file descriptor"""
if isinstance(self._rlist.path_or_fd(), string_types()):
raise ValueError("File descriptor queried although mapping was generated from path")
# END handle type
return self._rlist.path_or_fd() | :return: file descriptor used to create the underlying mapping.
**Note:** it is not required to be valid anymore
:raise ValueError: if the mapping was not created by a file descriptor | Below is the the instruction that describes the task:
### Input:
:return: file descriptor used to create the underlying mapping.
**Note:** it is not required to be valid anymore
:raise ValueError: if the mapping was not created by a file descriptor
### Response:
def fd(self):
""":return: file descriptor used to create the underlying mapping.
**Note:** it is not required to be valid anymore
:raise ValueError: if the mapping was not created by a file descriptor"""
if isinstance(self._rlist.path_or_fd(), string_types()):
raise ValueError("File descriptor queried although mapping was generated from path")
# END handle type
return self._rlist.path_or_fd() |
def get_version():
"""
Retrieves the version of the GLFW library.
Wrapper for:
void glfwGetVersion(int* major, int* minor, int* rev);
"""
major_value = ctypes.c_int(0)
major = ctypes.pointer(major_value)
minor_value = ctypes.c_int(0)
minor = ctypes.pointer(minor_value)
rev_value = ctypes.c_int(0)
rev = ctypes.pointer(rev_value)
_glfw.glfwGetVersion(major, minor, rev)
return major_value.value, minor_value.value, rev_value.value | Retrieves the version of the GLFW library.
Wrapper for:
void glfwGetVersion(int* major, int* minor, int* rev); | Below is the the instruction that describes the task:
### Input:
Retrieves the version of the GLFW library.
Wrapper for:
void glfwGetVersion(int* major, int* minor, int* rev);
### Response:
def get_version():
"""
Retrieves the version of the GLFW library.
Wrapper for:
void glfwGetVersion(int* major, int* minor, int* rev);
"""
major_value = ctypes.c_int(0)
major = ctypes.pointer(major_value)
minor_value = ctypes.c_int(0)
minor = ctypes.pointer(minor_value)
rev_value = ctypes.c_int(0)
rev = ctypes.pointer(rev_value)
_glfw.glfwGetVersion(major, minor, rev)
return major_value.value, minor_value.value, rev_value.value |
def _explore_storage(self):
"""Generator of all files contained in media storage."""
path = ''
dirs = [path]
while dirs:
path = dirs.pop()
subdirs, files = self.media_storage.listdir(path)
for media_filename in files:
yield os.path.join(path, media_filename)
dirs.extend([os.path.join(path, subdir) for subdir in subdirs]) | Generator of all files contained in media storage. | Below is the the instruction that describes the task:
### Input:
Generator of all files contained in media storage.
### Response:
def _explore_storage(self):
"""Generator of all files contained in media storage."""
path = ''
dirs = [path]
while dirs:
path = dirs.pop()
subdirs, files = self.media_storage.listdir(path)
for media_filename in files:
yield os.path.join(path, media_filename)
dirs.extend([os.path.join(path, subdir) for subdir in subdirs]) |
def status(self):
"""
The current status of the event (started, finished or pending).
"""
myNow = timezone.localtime(timezone=self.tz)
daysDelta = dt.timedelta(days=self.num_days - 1)
# NB: postponements can be created after the until date
# so ignore that
todayStart = getAwareDatetime(myNow.date(), dt.time.min, self.tz)
eventStart, event = self.__afterOrPostponedTo(todayStart - daysDelta)
if eventStart is None:
return "finished"
eventFinish = getAwareDatetime(eventStart.date() + daysDelta,
event.time_to, self.tz)
if event.time_from is None:
eventStart += _1day
if eventStart < myNow < eventFinish:
# if there are two occurences on the same day then we may miss
# that one of them has started
return "started"
if (self.repeat.until and eventFinish < myNow and
self.__afterOrPostponedTo(myNow)[0] is None):
# only just wound up, the last occurence was earlier today
return "finished" | The current status of the event (started, finished or pending). | Below is the the instruction that describes the task:
### Input:
The current status of the event (started, finished or pending).
### Response:
def status(self):
"""
The current status of the event (started, finished or pending).
"""
myNow = timezone.localtime(timezone=self.tz)
daysDelta = dt.timedelta(days=self.num_days - 1)
# NB: postponements can be created after the until date
# so ignore that
todayStart = getAwareDatetime(myNow.date(), dt.time.min, self.tz)
eventStart, event = self.__afterOrPostponedTo(todayStart - daysDelta)
if eventStart is None:
return "finished"
eventFinish = getAwareDatetime(eventStart.date() + daysDelta,
event.time_to, self.tz)
if event.time_from is None:
eventStart += _1day
if eventStart < myNow < eventFinish:
# if there are two occurences on the same day then we may miss
# that one of them has started
return "started"
if (self.repeat.until and eventFinish < myNow and
self.__afterOrPostponedTo(myNow)[0] is None):
# only just wound up, the last occurence was earlier today
return "finished" |
def setbit(self, key, offset, value):
"""Sets or clears the bit at offset in the string value stored at key.
:raises TypeError: if offset is not int
:raises ValueError: if offset is less than 0 or value is not 0 or 1
"""
if not isinstance(offset, int):
raise TypeError("offset argument must be int")
if offset < 0:
raise ValueError("offset must be greater equal 0")
if value not in (0, 1):
raise ValueError("value argument must be either 1 or 0")
return self.execute(b'SETBIT', key, offset, value) | Sets or clears the bit at offset in the string value stored at key.
:raises TypeError: if offset is not int
:raises ValueError: if offset is less than 0 or value is not 0 or 1 | Below is the the instruction that describes the task:
### Input:
Sets or clears the bit at offset in the string value stored at key.
:raises TypeError: if offset is not int
:raises ValueError: if offset is less than 0 or value is not 0 or 1
### Response:
def setbit(self, key, offset, value):
"""Sets or clears the bit at offset in the string value stored at key.
:raises TypeError: if offset is not int
:raises ValueError: if offset is less than 0 or value is not 0 or 1
"""
if not isinstance(offset, int):
raise TypeError("offset argument must be int")
if offset < 0:
raise ValueError("offset must be greater equal 0")
if value not in (0, 1):
raise ValueError("value argument must be either 1 or 0")
return self.execute(b'SETBIT', key, offset, value) |
def parse_map_Ka(self):
"""Ambient map"""
Kd = os.path.join(self.dir, " ".join(self.values[1:]))
self.this_material.set_texture_ambient(Kd) | Ambient map | Below is the the instruction that describes the task:
### Input:
Ambient map
### Response:
def parse_map_Ka(self):
"""Ambient map"""
Kd = os.path.join(self.dir, " ".join(self.values[1:]))
self.this_material.set_texture_ambient(Kd) |
def get_parent_vault_ids(self, vault_id):
"""Gets the parent ``Ids`` of the given vault.
arg: vault_id (osid.id.Id): a vault ``Id``
return: (osid.id.IdList) - the parent ``Ids`` of the vault
raise: NotFound - ``vault_id`` is not found
raise: NullArgument - ``vault_id`` is ``null``
raise: OperationFailed - unable to complete request
raise: PermissionDenied - authorization failure
*compliance: mandatory -- This method must be implemented.*
"""
# Implemented from template for
# osid.resource.BinHierarchySession.get_parent_bin_ids
if self._catalog_session is not None:
return self._catalog_session.get_parent_catalog_ids(catalog_id=vault_id)
return self._hierarchy_session.get_parents(id_=vault_id) | Gets the parent ``Ids`` of the given vault.
arg: vault_id (osid.id.Id): a vault ``Id``
return: (osid.id.IdList) - the parent ``Ids`` of the vault
raise: NotFound - ``vault_id`` is not found
raise: NullArgument - ``vault_id`` is ``null``
raise: OperationFailed - unable to complete request
raise: PermissionDenied - authorization failure
*compliance: mandatory -- This method must be implemented.* | Below is the the instruction that describes the task:
### Input:
Gets the parent ``Ids`` of the given vault.
arg: vault_id (osid.id.Id): a vault ``Id``
return: (osid.id.IdList) - the parent ``Ids`` of the vault
raise: NotFound - ``vault_id`` is not found
raise: NullArgument - ``vault_id`` is ``null``
raise: OperationFailed - unable to complete request
raise: PermissionDenied - authorization failure
*compliance: mandatory -- This method must be implemented.*
### Response:
def get_parent_vault_ids(self, vault_id):
"""Gets the parent ``Ids`` of the given vault.
arg: vault_id (osid.id.Id): a vault ``Id``
return: (osid.id.IdList) - the parent ``Ids`` of the vault
raise: NotFound - ``vault_id`` is not found
raise: NullArgument - ``vault_id`` is ``null``
raise: OperationFailed - unable to complete request
raise: PermissionDenied - authorization failure
*compliance: mandatory -- This method must be implemented.*
"""
# Implemented from template for
# osid.resource.BinHierarchySession.get_parent_bin_ids
if self._catalog_session is not None:
return self._catalog_session.get_parent_catalog_ids(catalog_id=vault_id)
return self._hierarchy_session.get_parents(id_=vault_id) |
def load(self, filename="temp.pkl"):
"""
Save TM in the filename specified above
"""
inputFile = open(filename, 'rb')
self.tm = cPickle.load(inputFile) | Save TM in the filename specified above | Below is the the instruction that describes the task:
### Input:
Save TM in the filename specified above
### Response:
def load(self, filename="temp.pkl"):
"""
Save TM in the filename specified above
"""
inputFile = open(filename, 'rb')
self.tm = cPickle.load(inputFile) |
def list_connection_channels(self, name):
"""
List of all channels for a given connection.
:param name: The connection name
:type name: str
"""
return self._api_get('/api/connections/{0}/channels'.format(
urllib.parse.quote_plus(name)
)) | List of all channels for a given connection.
:param name: The connection name
:type name: str | Below is the the instruction that describes the task:
### Input:
List of all channels for a given connection.
:param name: The connection name
:type name: str
### Response:
def list_connection_channels(self, name):
"""
List of all channels for a given connection.
:param name: The connection name
:type name: str
"""
return self._api_get('/api/connections/{0}/channels'.format(
urllib.parse.quote_plus(name)
)) |
def _get_object(data, position, obj_end, opts, dummy):
"""Decode a BSON subdocument to opts.document_class or bson.dbref.DBRef."""
obj_size = _UNPACK_INT(data[position:position + 4])[0]
end = position + obj_size - 1
if data[end:position + obj_size] != b"\x00":
raise InvalidBSON("bad eoo")
if end >= obj_end:
raise InvalidBSON("invalid object length")
if _raw_document_class(opts.document_class):
return (opts.document_class(data[position:end + 1], opts),
position + obj_size)
obj = _elements_to_dict(data, position + 4, end, opts)
position += obj_size
if "$ref" in obj:
return (DBRef(obj.pop("$ref"), obj.pop("$id", None),
obj.pop("$db", None), obj), position)
return obj, position | Decode a BSON subdocument to opts.document_class or bson.dbref.DBRef. | Below is the the instruction that describes the task:
### Input:
Decode a BSON subdocument to opts.document_class or bson.dbref.DBRef.
### Response:
def _get_object(data, position, obj_end, opts, dummy):
"""Decode a BSON subdocument to opts.document_class or bson.dbref.DBRef."""
obj_size = _UNPACK_INT(data[position:position + 4])[0]
end = position + obj_size - 1
if data[end:position + obj_size] != b"\x00":
raise InvalidBSON("bad eoo")
if end >= obj_end:
raise InvalidBSON("invalid object length")
if _raw_document_class(opts.document_class):
return (opts.document_class(data[position:end + 1], opts),
position + obj_size)
obj = _elements_to_dict(data, position + 4, end, opts)
position += obj_size
if "$ref" in obj:
return (DBRef(obj.pop("$ref"), obj.pop("$id", None),
obj.pop("$db", None), obj), position)
return obj, position |
def build(env, ciprcfg, console):
"""
Build the current project for distribution
"""
os.putenv('CIPR_PACKAGES', env.package_dir)
os.putenv('CIPR_PROJECT', env.project_directory)
build_settings = path.join(env.project_directory, 'build.settings')
with open(build_settings, 'r') as f:
data = f.read()
m = _build_re.search(data)
if m:
ver = int(m.group(2))
data = data.replace(m.group(0), 'CFBundleVersion = "%d"' % (ver + 1))
with open(build_settings, 'w') as f:
f.write(data)
if path.exists(env.build_dir):
shutil.rmtree(env.build_dir)
os.makedirs(env.build_dir)
if path.exists(env.dist_dir):
shutil.rmtree(env.dist_dir)
os.makedirs(env.dist_dir)
console.normal('Building in %s' % env.build_dir)
console.normal('Copy project files...')
for src, dst in util.sync_dir_to(env.project_directory, env.build_dir, exclude=['.cipr', '.git', 'build', 'dist', '.*']):
console.quiet(' %s -> %s' % (src, dst))
if src.endswith('.lua'):
_fix_lua_module_name(src, dst)
console.normal('Copy cipr packages...')
for package in ciprcfg.packages.keys():
for src, dst in util.sync_lua_dir_to(path.join(env.package_dir, package), env.build_dir, exclude=['.git'], include=['*.lua']):
console.quiet(' %s -> %s' % (src, dst))
if src.endswith('.lua'):
_fix_lua_module_name(src, dst)
src = path.join(env.code_dir, 'cipr.lua')
dst = path.join(env.build_dir, 'cipr.lua')
shutil.copy(src, dst)
cmd = AND(clom.cd(env.build_dir), clom[CORONA_SIMULATOR_PATH](env.build_dir))
console.normal('Be sure to output your app to %s' % env.dist_dir)
try:
cmd.shell.execute()
except KeyboardInterrupt:
pass | Build the current project for distribution | Below is the the instruction that describes the task:
### Input:
Build the current project for distribution
### Response:
def build(env, ciprcfg, console):
"""
Build the current project for distribution
"""
os.putenv('CIPR_PACKAGES', env.package_dir)
os.putenv('CIPR_PROJECT', env.project_directory)
build_settings = path.join(env.project_directory, 'build.settings')
with open(build_settings, 'r') as f:
data = f.read()
m = _build_re.search(data)
if m:
ver = int(m.group(2))
data = data.replace(m.group(0), 'CFBundleVersion = "%d"' % (ver + 1))
with open(build_settings, 'w') as f:
f.write(data)
if path.exists(env.build_dir):
shutil.rmtree(env.build_dir)
os.makedirs(env.build_dir)
if path.exists(env.dist_dir):
shutil.rmtree(env.dist_dir)
os.makedirs(env.dist_dir)
console.normal('Building in %s' % env.build_dir)
console.normal('Copy project files...')
for src, dst in util.sync_dir_to(env.project_directory, env.build_dir, exclude=['.cipr', '.git', 'build', 'dist', '.*']):
console.quiet(' %s -> %s' % (src, dst))
if src.endswith('.lua'):
_fix_lua_module_name(src, dst)
console.normal('Copy cipr packages...')
for package in ciprcfg.packages.keys():
for src, dst in util.sync_lua_dir_to(path.join(env.package_dir, package), env.build_dir, exclude=['.git'], include=['*.lua']):
console.quiet(' %s -> %s' % (src, dst))
if src.endswith('.lua'):
_fix_lua_module_name(src, dst)
src = path.join(env.code_dir, 'cipr.lua')
dst = path.join(env.build_dir, 'cipr.lua')
shutil.copy(src, dst)
cmd = AND(clom.cd(env.build_dir), clom[CORONA_SIMULATOR_PATH](env.build_dir))
console.normal('Be sure to output your app to %s' % env.dist_dir)
try:
cmd.shell.execute()
except KeyboardInterrupt:
pass |
def amchar_to_int(amchar, hij=False):
'''Convert an angular momentum integer to a character
The return value is a list of integers (to handle sp, spd, ... orbitals)
For example, converts 'p' to [1] and 'sp' to [0,1]
If hij is True, the ordering spdfghijkl is used. Otherwise, the
ordering will be spdfghikl (skipping j)
'''
if hij:
amchar_map = _amchar_map_hij
else:
amchar_map = _amchar_map_hik
amchar_lower = amchar.lower()
amint = []
for c in amchar_lower:
if c not in amchar_map:
raise KeyError('Angular momentum character {} is not valid'.format(c))
amint.append(amchar_map.index(c))
return amint | Convert an angular momentum integer to a character
The return value is a list of integers (to handle sp, spd, ... orbitals)
For example, converts 'p' to [1] and 'sp' to [0,1]
If hij is True, the ordering spdfghijkl is used. Otherwise, the
ordering will be spdfghikl (skipping j) | Below is the the instruction that describes the task:
### Input:
Convert an angular momentum integer to a character
The return value is a list of integers (to handle sp, spd, ... orbitals)
For example, converts 'p' to [1] and 'sp' to [0,1]
If hij is True, the ordering spdfghijkl is used. Otherwise, the
ordering will be spdfghikl (skipping j)
### Response:
def amchar_to_int(amchar, hij=False):
'''Convert an angular momentum integer to a character
The return value is a list of integers (to handle sp, spd, ... orbitals)
For example, converts 'p' to [1] and 'sp' to [0,1]
If hij is True, the ordering spdfghijkl is used. Otherwise, the
ordering will be spdfghikl (skipping j)
'''
if hij:
amchar_map = _amchar_map_hij
else:
amchar_map = _amchar_map_hik
amchar_lower = amchar.lower()
amint = []
for c in amchar_lower:
if c not in amchar_map:
raise KeyError('Angular momentum character {} is not valid'.format(c))
amint.append(amchar_map.index(c))
return amint |
def draw_tree_grid(self,
nrows=None,
ncols=None,
start=0,
fixed_order=False,
shared_axis=False,
**kwargs):
"""
Draw a slice of x*y trees into a x,y grid non-overlapping.
Parameters:
-----------
x (int):
Number of grid cells in x dimension. Default=automatically set.
y (int):
Number of grid cells in y dimension. Default=automatically set.
start (int):
Starting index of tree slice from .treelist.
kwargs (dict):
Toytree .draw() arguments as a dictionary.
"""
# return nothing if tree is empty
if not self.treelist:
print("Treelist is empty")
return None, None
# make a copy of the treelist so we don't modify the original
if not fixed_order:
treelist = self.copy().treelist
else:
if fixed_order is True:
fixed_order = self.treelist[0].get_tip_labels()
treelist = [
ToyTree(i, fixed_order=fixed_order)
for i in self.copy().treelist
]
# apply kwargs styles to the individual tree styles
for tree in treelist:
tree.style.update(kwargs)
# get reasonable values for x,y given treelist length
if not (ncols or nrows):
ncols = 5
nrows = 1
elif not (ncols and nrows):
if ncols:
if ncols == 1:
if self.ntrees <= 5:
nrows = self.ntrees
else:
nrows = 2
else:
if self.ntrees <= 10:
nrows = 2
else:
nrows = 3
if nrows:
if nrows == 1:
if self.ntrees <= 5:
ncols = self.ntrees
else:
ncols = 5
else:
if self.ntrees <= 10:
ncols = 5
else:
ncols = 3
else:
pass
# Return TereGrid object for debugging
draw = TreeGrid(treelist)
if kwargs.get("debug"):
return draw
# Call update to draw plot. Kwargs still here for width, height, axes
canvas, axes = draw.update(nrows, ncols, start, shared_axis, **kwargs)
return canvas, axes | Draw a slice of x*y trees into a x,y grid non-overlapping.
Parameters:
-----------
x (int):
Number of grid cells in x dimension. Default=automatically set.
y (int):
Number of grid cells in y dimension. Default=automatically set.
start (int):
Starting index of tree slice from .treelist.
kwargs (dict):
Toytree .draw() arguments as a dictionary. | Below is the the instruction that describes the task:
### Input:
Draw a slice of x*y trees into a x,y grid non-overlapping.
Parameters:
-----------
x (int):
Number of grid cells in x dimension. Default=automatically set.
y (int):
Number of grid cells in y dimension. Default=automatically set.
start (int):
Starting index of tree slice from .treelist.
kwargs (dict):
Toytree .draw() arguments as a dictionary.
### Response:
def draw_tree_grid(self,
nrows=None,
ncols=None,
start=0,
fixed_order=False,
shared_axis=False,
**kwargs):
"""
Draw a slice of x*y trees into a x,y grid non-overlapping.
Parameters:
-----------
x (int):
Number of grid cells in x dimension. Default=automatically set.
y (int):
Number of grid cells in y dimension. Default=automatically set.
start (int):
Starting index of tree slice from .treelist.
kwargs (dict):
Toytree .draw() arguments as a dictionary.
"""
# return nothing if tree is empty
if not self.treelist:
print("Treelist is empty")
return None, None
# make a copy of the treelist so we don't modify the original
if not fixed_order:
treelist = self.copy().treelist
else:
if fixed_order is True:
fixed_order = self.treelist[0].get_tip_labels()
treelist = [
ToyTree(i, fixed_order=fixed_order)
for i in self.copy().treelist
]
# apply kwargs styles to the individual tree styles
for tree in treelist:
tree.style.update(kwargs)
# get reasonable values for x,y given treelist length
if not (ncols or nrows):
ncols = 5
nrows = 1
elif not (ncols and nrows):
if ncols:
if ncols == 1:
if self.ntrees <= 5:
nrows = self.ntrees
else:
nrows = 2
else:
if self.ntrees <= 10:
nrows = 2
else:
nrows = 3
if nrows:
if nrows == 1:
if self.ntrees <= 5:
ncols = self.ntrees
else:
ncols = 5
else:
if self.ntrees <= 10:
ncols = 5
else:
ncols = 3
else:
pass
# Return TereGrid object for debugging
draw = TreeGrid(treelist)
if kwargs.get("debug"):
return draw
# Call update to draw plot. Kwargs still here for width, height, axes
canvas, axes = draw.update(nrows, ncols, start, shared_axis, **kwargs)
return canvas, axes |
def main(config, host, port, logfile, debug, daemon, uid, gid, pidfile, umask, rundir):
"""
Main entry point for running a socket server from the commandline.
This method will read in options from the commandline and call the L{config.init_config} method
to get everything setup. Then, depending on whether deamon mode was specified or not,
the process may be forked (or not) and the server will be started.
"""
_main(**locals()) | Main entry point for running a socket server from the commandline.
This method will read in options from the commandline and call the L{config.init_config} method
to get everything setup. Then, depending on whether deamon mode was specified or not,
the process may be forked (or not) and the server will be started. | Below is the the instruction that describes the task:
### Input:
Main entry point for running a socket server from the commandline.
This method will read in options from the commandline and call the L{config.init_config} method
to get everything setup. Then, depending on whether deamon mode was specified or not,
the process may be forked (or not) and the server will be started.
### Response:
def main(config, host, port, logfile, debug, daemon, uid, gid, pidfile, umask, rundir):
"""
Main entry point for running a socket server from the commandline.
This method will read in options from the commandline and call the L{config.init_config} method
to get everything setup. Then, depending on whether deamon mode was specified or not,
the process may be forked (or not) and the server will be started.
"""
_main(**locals()) |
def stopped(name, connection=None, username=None, password=None):
'''
Stops a VM by shutting it down nicely.
.. versionadded:: 2016.3.0
:param connection: libvirt connection URI, overriding defaults
.. versionadded:: 2019.2.0
:param username: username to connect with, overriding defaults
.. versionadded:: 2019.2.0
:param password: password to connect with, overriding defaults
.. versionadded:: 2019.2.0
.. code-block:: yaml
domain_name:
virt.stopped
'''
return _virt_call(name, 'shutdown', 'stopped', "Machine has been shut down",
connection=connection, username=username, password=password) | Stops a VM by shutting it down nicely.
.. versionadded:: 2016.3.0
:param connection: libvirt connection URI, overriding defaults
.. versionadded:: 2019.2.0
:param username: username to connect with, overriding defaults
.. versionadded:: 2019.2.0
:param password: password to connect with, overriding defaults
.. versionadded:: 2019.2.0
.. code-block:: yaml
domain_name:
virt.stopped | Below is the the instruction that describes the task:
### Input:
Stops a VM by shutting it down nicely.
.. versionadded:: 2016.3.0
:param connection: libvirt connection URI, overriding defaults
.. versionadded:: 2019.2.0
:param username: username to connect with, overriding defaults
.. versionadded:: 2019.2.0
:param password: password to connect with, overriding defaults
.. versionadded:: 2019.2.0
.. code-block:: yaml
domain_name:
virt.stopped
### Response:
def stopped(name, connection=None, username=None, password=None):
'''
Stops a VM by shutting it down nicely.
.. versionadded:: 2016.3.0
:param connection: libvirt connection URI, overriding defaults
.. versionadded:: 2019.2.0
:param username: username to connect with, overriding defaults
.. versionadded:: 2019.2.0
:param password: password to connect with, overriding defaults
.. versionadded:: 2019.2.0
.. code-block:: yaml
domain_name:
virt.stopped
'''
return _virt_call(name, 'shutdown', 'stopped', "Machine has been shut down",
connection=connection, username=username, password=password) |
def _create_table_and_update_context(node, context):
"""Create an aliased table for a SqlNode.
Updates the relevant Selectable global context.
Args:
node: SqlNode, the current node.
context: CompilationContext, global compilation state and metadata.
Returns:
Table, the newly aliased SQLAlchemy table.
"""
schema_type_name = sql_context_helpers.get_schema_type_name(node, context)
table = context.compiler_metadata.get_table(schema_type_name).alias()
context.query_path_to_selectable[node.query_path] = table
return table | Create an aliased table for a SqlNode.
Updates the relevant Selectable global context.
Args:
node: SqlNode, the current node.
context: CompilationContext, global compilation state and metadata.
Returns:
Table, the newly aliased SQLAlchemy table. | Below is the the instruction that describes the task:
### Input:
Create an aliased table for a SqlNode.
Updates the relevant Selectable global context.
Args:
node: SqlNode, the current node.
context: CompilationContext, global compilation state and metadata.
Returns:
Table, the newly aliased SQLAlchemy table.
### Response:
def _create_table_and_update_context(node, context):
"""Create an aliased table for a SqlNode.
Updates the relevant Selectable global context.
Args:
node: SqlNode, the current node.
context: CompilationContext, global compilation state and metadata.
Returns:
Table, the newly aliased SQLAlchemy table.
"""
schema_type_name = sql_context_helpers.get_schema_type_name(node, context)
table = context.compiler_metadata.get_table(schema_type_name).alias()
context.query_path_to_selectable[node.query_path] = table
return table |
def attributes(self, **kwargs): # pragma: no cover
"""Retrieve the attribute configuration object.
Retrieves a mapping that identifies the custom directory
attributes configured for the Directory SyncService instance,
and the mapping of the custom attributes to standard directory
attributes.
Args:
**kwargs: Supported :meth:`~pancloud.httpclient.HTTPClient.request` parameters.
Returns:
requests.Response: Requests Response() object.
Examples:
Refer to ``directory_attributes.py`` example.
"""
path = "/directory-sync-service/v1/attributes"
r = self._httpclient.request(
method="GET",
path=path,
url=self.url,
**kwargs
)
return r | Retrieve the attribute configuration object.
Retrieves a mapping that identifies the custom directory
attributes configured for the Directory SyncService instance,
and the mapping of the custom attributes to standard directory
attributes.
Args:
**kwargs: Supported :meth:`~pancloud.httpclient.HTTPClient.request` parameters.
Returns:
requests.Response: Requests Response() object.
Examples:
Refer to ``directory_attributes.py`` example. | Below is the the instruction that describes the task:
### Input:
Retrieve the attribute configuration object.
Retrieves a mapping that identifies the custom directory
attributes configured for the Directory SyncService instance,
and the mapping of the custom attributes to standard directory
attributes.
Args:
**kwargs: Supported :meth:`~pancloud.httpclient.HTTPClient.request` parameters.
Returns:
requests.Response: Requests Response() object.
Examples:
Refer to ``directory_attributes.py`` example.
### Response:
def attributes(self, **kwargs): # pragma: no cover
"""Retrieve the attribute configuration object.
Retrieves a mapping that identifies the custom directory
attributes configured for the Directory SyncService instance,
and the mapping of the custom attributes to standard directory
attributes.
Args:
**kwargs: Supported :meth:`~pancloud.httpclient.HTTPClient.request` parameters.
Returns:
requests.Response: Requests Response() object.
Examples:
Refer to ``directory_attributes.py`` example.
"""
path = "/directory-sync-service/v1/attributes"
r = self._httpclient.request(
method="GET",
path=path,
url=self.url,
**kwargs
)
return r |
def code_deparse_around_offset(name, offset, co, out=StringIO(),
version=None, is_pypy=None,
debug_opts=DEFAULT_DEBUG_OPTS):
"""
Like deparse_code(), but given a function/module name and
offset, finds the node closest to offset. If offset is not an instruction boundary,
we raise an IndexError.
"""
assert iscode(co)
if version is None:
version = sysinfo2float()
if is_pypy is None:
is_pypy = IS_PYPY
deparsed = code_deparse(co, out, version, is_pypy, debug_opts)
if (name, offset) in deparsed.offsets.keys():
# This is the easy case
return deparsed
valid_offsets = [t for t in deparsed.offsets if isinstance(t[1], int)]
offset_list = sorted([t[1] for t in valid_offsets if t[0] == name])
# FIXME: should check for branching?
found_offset = find_gt(offset_list, offset)
deparsed.offsets[name, offset] = deparsed.offsets[name, found_offset]
return deparsed | Like deparse_code(), but given a function/module name and
offset, finds the node closest to offset. If offset is not an instruction boundary,
we raise an IndexError. | Below is the the instruction that describes the task:
### Input:
Like deparse_code(), but given a function/module name and
offset, finds the node closest to offset. If offset is not an instruction boundary,
we raise an IndexError.
### Response:
def code_deparse_around_offset(name, offset, co, out=StringIO(),
version=None, is_pypy=None,
debug_opts=DEFAULT_DEBUG_OPTS):
"""
Like deparse_code(), but given a function/module name and
offset, finds the node closest to offset. If offset is not an instruction boundary,
we raise an IndexError.
"""
assert iscode(co)
if version is None:
version = sysinfo2float()
if is_pypy is None:
is_pypy = IS_PYPY
deparsed = code_deparse(co, out, version, is_pypy, debug_opts)
if (name, offset) in deparsed.offsets.keys():
# This is the easy case
return deparsed
valid_offsets = [t for t in deparsed.offsets if isinstance(t[1], int)]
offset_list = sorted([t[1] for t in valid_offsets if t[0] == name])
# FIXME: should check for branching?
found_offset = find_gt(offset_list, offset)
deparsed.offsets[name, offset] = deparsed.offsets[name, found_offset]
return deparsed |
def dpss_windows(N, NW, Kmax, interp_from=None, interp_kind='linear'):
"""
Returns the Discrete Prolate Spheroidal Sequences of orders [0,Kmax-1]
for a given frequency-spacing multiple NW and sequence length N.
Parameters
----------
N : int
sequence length
NW : float, unitless
standardized half bandwidth corresponding to 2NW = BW/f0 = BW*N*dt
but with dt taken as 1
Kmax : int
number of DPSS windows to return is Kmax (orders 0 through Kmax-1)
interp_from : int (optional)
The dpss can be calculated using interpolation from a set of dpss
with the same NW and Kmax, but shorter N. This is the length of this
shorter set of dpss windows.
interp_kind : str (optional)
This input variable is passed to scipy.interpolate.interp1d and
specifies the kind of interpolation as a string ('linear', 'nearest',
'zero', 'slinear', 'quadratic, 'cubic') or as an integer specifying the
order of the spline interpolator to use.
Returns
-------
v, e : tuple,
v is an array of DPSS windows shaped (Kmax, N)
e are the eigenvalues
Notes
-----
Tridiagonal form of DPSS calculation from:
Slepian, D. Prolate spheroidal wave functions, Fourier analysis, and
uncertainty V: The discrete case. Bell System Technical Journal,
Volume 57 (1978), 1371430
"""
Kmax = int(Kmax)
W = float(NW) / N
nidx = np.arange(N, dtype='d')
# In this case, we create the dpss windows of the smaller size
# (interp_from) and then interpolate to the larger size (N)
if interp_from is not None:
if interp_from > N:
e_s = 'In dpss_windows, interp_from is: %s ' % interp_from
e_s += 'and N is: %s. ' % N
e_s += 'Please enter interp_from smaller than N.'
raise ValueError(e_s)
dpss = []
d, e = dpss_windows(interp_from, NW, Kmax)
for this_d in d:
x = np.arange(this_d.shape[-1])
I = interpolate.interp1d(x, this_d, kind=interp_kind)
d_temp = I(np.linspace(0, this_d.shape[-1] - 1, N, endpoint=False))
# Rescale:
d_temp = d_temp / np.sqrt(np.sum(d_temp ** 2))
dpss.append(d_temp)
dpss = np.array(dpss)
else:
# here we want to set up an optimization problem to find a sequence
# whose energy is maximally concentrated within band [-W,W].
# Thus, the measure lambda(T,W) is the ratio between the energy within
# that band, and the total energy. This leads to the eigen-system
# (A - (l1)I)v = 0, where the eigenvector corresponding to the largest
# eigenvalue is the sequence with maximally concentrated energy. The
# collection of eigenvectors of this system are called Slepian
# sequences, or discrete prolate spheroidal sequences (DPSS). Only the
# first K, K = 2NW/dt orders of DPSS will exhibit good spectral
# concentration
# [see http://en.wikipedia.org/wiki/Spectral_concentration_problem]
# Here I set up an alternative symmetric tri-diagonal eigenvalue
# problem such that
# (B - (l2)I)v = 0, and v are our DPSS (but eigenvalues l2 != l1)
# the main diagonal = ([N-1-2*t]/2)**2 cos(2PIW), t=[0,1,2,...,N-1]
# and the first off-diagonal = t(N-t)/2, t=[1,2,...,N-1]
# [see Percival and Walden, 1993]
diagonal = ((N - 1 - 2 * nidx) / 2.) ** 2 * np.cos(2 * np.pi * W)
off_diag = np.zeros_like(nidx)
off_diag[:-1] = nidx[1:] * (N - nidx[1:]) / 2.
# put the diagonals in LAPACK "packed" storage
ab = np.zeros((2, N), 'd')
ab[1] = diagonal
ab[0, 1:] = off_diag[:-1]
# only calculate the highest Kmax eigenvalues
w = linalg.eigvals_banded(ab, select='i',
select_range=(N - Kmax, N - 1))
w = w[::-1]
# find the corresponding eigenvectors via inverse iteration
t = np.linspace(0, np.pi, N)
dpss = np.zeros((Kmax, N), 'd')
for k in range(Kmax):
dpss[k] = tridi_inverse_iteration(
diagonal, off_diag, w[k], x0=np.sin((k + 1) * t)
)
# By convention (Percival and Walden, 1993 pg 379)
# * symmetric tapers (k=0,2,4,...) should have a positive average.
# * antisymmetric tapers should begin with a positive lobe
fix_symmetric = (dpss[0::2].sum(axis=1) < 0)
for i, f in enumerate(fix_symmetric):
if f:
dpss[2 * i] *= -1
# rather than test the sign of one point, test the sign of the
# linear slope up to the first (largest) peak
pk = np.argmax(np.abs(dpss[1::2, :N//2]), axis=1)
for i, p in enumerate(pk):
if np.sum(dpss[2 * i + 1, :p]) < 0:
dpss[2 * i + 1] *= -1
# Now find the eigenvalues of the original spectral concentration problem
# Use the autocorr sequence technique from Percival and Walden, 1993 pg 390
dpss_rxx = autocorr(dpss) * N
r = 4 * W * np.sinc(2 * W * nidx)
r[0] = 2 * W
eigvals = np.dot(dpss_rxx, r)
return dpss, eigvals | Returns the Discrete Prolate Spheroidal Sequences of orders [0,Kmax-1]
for a given frequency-spacing multiple NW and sequence length N.
Parameters
----------
N : int
sequence length
NW : float, unitless
standardized half bandwidth corresponding to 2NW = BW/f0 = BW*N*dt
but with dt taken as 1
Kmax : int
number of DPSS windows to return is Kmax (orders 0 through Kmax-1)
interp_from : int (optional)
The dpss can be calculated using interpolation from a set of dpss
with the same NW and Kmax, but shorter N. This is the length of this
shorter set of dpss windows.
interp_kind : str (optional)
This input variable is passed to scipy.interpolate.interp1d and
specifies the kind of interpolation as a string ('linear', 'nearest',
'zero', 'slinear', 'quadratic, 'cubic') or as an integer specifying the
order of the spline interpolator to use.
Returns
-------
v, e : tuple,
v is an array of DPSS windows shaped (Kmax, N)
e are the eigenvalues
Notes
-----
Tridiagonal form of DPSS calculation from:
Slepian, D. Prolate spheroidal wave functions, Fourier analysis, and
uncertainty V: The discrete case. Bell System Technical Journal,
Volume 57 (1978), 1371430 | Below is the the instruction that describes the task:
### Input:
Returns the Discrete Prolate Spheroidal Sequences of orders [0,Kmax-1]
for a given frequency-spacing multiple NW and sequence length N.
Parameters
----------
N : int
sequence length
NW : float, unitless
standardized half bandwidth corresponding to 2NW = BW/f0 = BW*N*dt
but with dt taken as 1
Kmax : int
number of DPSS windows to return is Kmax (orders 0 through Kmax-1)
interp_from : int (optional)
The dpss can be calculated using interpolation from a set of dpss
with the same NW and Kmax, but shorter N. This is the length of this
shorter set of dpss windows.
interp_kind : str (optional)
This input variable is passed to scipy.interpolate.interp1d and
specifies the kind of interpolation as a string ('linear', 'nearest',
'zero', 'slinear', 'quadratic, 'cubic') or as an integer specifying the
order of the spline interpolator to use.
Returns
-------
v, e : tuple,
v is an array of DPSS windows shaped (Kmax, N)
e are the eigenvalues
Notes
-----
Tridiagonal form of DPSS calculation from:
Slepian, D. Prolate spheroidal wave functions, Fourier analysis, and
uncertainty V: The discrete case. Bell System Technical Journal,
Volume 57 (1978), 1371430
### Response:
def dpss_windows(N, NW, Kmax, interp_from=None, interp_kind='linear'):
"""
Returns the Discrete Prolate Spheroidal Sequences of orders [0,Kmax-1]
for a given frequency-spacing multiple NW and sequence length N.
Parameters
----------
N : int
sequence length
NW : float, unitless
standardized half bandwidth corresponding to 2NW = BW/f0 = BW*N*dt
but with dt taken as 1
Kmax : int
number of DPSS windows to return is Kmax (orders 0 through Kmax-1)
interp_from : int (optional)
The dpss can be calculated using interpolation from a set of dpss
with the same NW and Kmax, but shorter N. This is the length of this
shorter set of dpss windows.
interp_kind : str (optional)
This input variable is passed to scipy.interpolate.interp1d and
specifies the kind of interpolation as a string ('linear', 'nearest',
'zero', 'slinear', 'quadratic, 'cubic') or as an integer specifying the
order of the spline interpolator to use.
Returns
-------
v, e : tuple,
v is an array of DPSS windows shaped (Kmax, N)
e are the eigenvalues
Notes
-----
Tridiagonal form of DPSS calculation from:
Slepian, D. Prolate spheroidal wave functions, Fourier analysis, and
uncertainty V: The discrete case. Bell System Technical Journal,
Volume 57 (1978), 1371430
"""
Kmax = int(Kmax)
W = float(NW) / N
nidx = np.arange(N, dtype='d')
# In this case, we create the dpss windows of the smaller size
# (interp_from) and then interpolate to the larger size (N)
if interp_from is not None:
if interp_from > N:
e_s = 'In dpss_windows, interp_from is: %s ' % interp_from
e_s += 'and N is: %s. ' % N
e_s += 'Please enter interp_from smaller than N.'
raise ValueError(e_s)
dpss = []
d, e = dpss_windows(interp_from, NW, Kmax)
for this_d in d:
x = np.arange(this_d.shape[-1])
I = interpolate.interp1d(x, this_d, kind=interp_kind)
d_temp = I(np.linspace(0, this_d.shape[-1] - 1, N, endpoint=False))
# Rescale:
d_temp = d_temp / np.sqrt(np.sum(d_temp ** 2))
dpss.append(d_temp)
dpss = np.array(dpss)
else:
# here we want to set up an optimization problem to find a sequence
# whose energy is maximally concentrated within band [-W,W].
# Thus, the measure lambda(T,W) is the ratio between the energy within
# that band, and the total energy. This leads to the eigen-system
# (A - (l1)I)v = 0, where the eigenvector corresponding to the largest
# eigenvalue is the sequence with maximally concentrated energy. The
# collection of eigenvectors of this system are called Slepian
# sequences, or discrete prolate spheroidal sequences (DPSS). Only the
# first K, K = 2NW/dt orders of DPSS will exhibit good spectral
# concentration
# [see http://en.wikipedia.org/wiki/Spectral_concentration_problem]
# Here I set up an alternative symmetric tri-diagonal eigenvalue
# problem such that
# (B - (l2)I)v = 0, and v are our DPSS (but eigenvalues l2 != l1)
# the main diagonal = ([N-1-2*t]/2)**2 cos(2PIW), t=[0,1,2,...,N-1]
# and the first off-diagonal = t(N-t)/2, t=[1,2,...,N-1]
# [see Percival and Walden, 1993]
diagonal = ((N - 1 - 2 * nidx) / 2.) ** 2 * np.cos(2 * np.pi * W)
off_diag = np.zeros_like(nidx)
off_diag[:-1] = nidx[1:] * (N - nidx[1:]) / 2.
# put the diagonals in LAPACK "packed" storage
ab = np.zeros((2, N), 'd')
ab[1] = diagonal
ab[0, 1:] = off_diag[:-1]
# only calculate the highest Kmax eigenvalues
w = linalg.eigvals_banded(ab, select='i',
select_range=(N - Kmax, N - 1))
w = w[::-1]
# find the corresponding eigenvectors via inverse iteration
t = np.linspace(0, np.pi, N)
dpss = np.zeros((Kmax, N), 'd')
for k in range(Kmax):
dpss[k] = tridi_inverse_iteration(
diagonal, off_diag, w[k], x0=np.sin((k + 1) * t)
)
# By convention (Percival and Walden, 1993 pg 379)
# * symmetric tapers (k=0,2,4,...) should have a positive average.
# * antisymmetric tapers should begin with a positive lobe
fix_symmetric = (dpss[0::2].sum(axis=1) < 0)
for i, f in enumerate(fix_symmetric):
if f:
dpss[2 * i] *= -1
# rather than test the sign of one point, test the sign of the
# linear slope up to the first (largest) peak
pk = np.argmax(np.abs(dpss[1::2, :N//2]), axis=1)
for i, p in enumerate(pk):
if np.sum(dpss[2 * i + 1, :p]) < 0:
dpss[2 * i + 1] *= -1
# Now find the eigenvalues of the original spectral concentration problem
# Use the autocorr sequence technique from Percival and Walden, 1993 pg 390
dpss_rxx = autocorr(dpss) * N
r = 4 * W * np.sinc(2 * W * nidx)
r[0] = 2 * W
eigvals = np.dot(dpss_rxx, r)
return dpss, eigvals |
def _convertNonNumericData(self, spatialOutput, temporalOutput, output):
"""
Converts all of the non-numeric fields from spatialOutput and temporalOutput
into their scalar equivalents and records them in the output dictionary.
:param spatialOutput: The results of topDownCompute() for the spatial input.
:param temporalOutput: The results of topDownCompute() for the temporal
input.
:param output: The main dictionary of outputs passed to compute(). It is
expected to have keys 'spatialTopDownOut' and 'temporalTopDownOut' that
are mapped to numpy arrays.
"""
encoders = self.encoder.getEncoderList()
types = self.encoder.getDecoderOutputFieldTypes()
for i, (encoder, type) in enumerate(zip(encoders, types)):
spatialData = spatialOutput[i]
temporalData = temporalOutput[i]
if type != FieldMetaType.integer and type != FieldMetaType.float:
# TODO: Make sure that this doesn't modify any state
spatialData = encoder.getScalars(spatialData)[0]
temporalData = encoder.getScalars(temporalData)[0]
assert isinstance(spatialData, (float, int))
assert isinstance(temporalData, (float, int))
output['spatialTopDownOut'][i] = spatialData
output['temporalTopDownOut'][i] = temporalData | Converts all of the non-numeric fields from spatialOutput and temporalOutput
into their scalar equivalents and records them in the output dictionary.
:param spatialOutput: The results of topDownCompute() for the spatial input.
:param temporalOutput: The results of topDownCompute() for the temporal
input.
:param output: The main dictionary of outputs passed to compute(). It is
expected to have keys 'spatialTopDownOut' and 'temporalTopDownOut' that
are mapped to numpy arrays. | Below is the the instruction that describes the task:
### Input:
Converts all of the non-numeric fields from spatialOutput and temporalOutput
into their scalar equivalents and records them in the output dictionary.
:param spatialOutput: The results of topDownCompute() for the spatial input.
:param temporalOutput: The results of topDownCompute() for the temporal
input.
:param output: The main dictionary of outputs passed to compute(). It is
expected to have keys 'spatialTopDownOut' and 'temporalTopDownOut' that
are mapped to numpy arrays.
### Response:
def _convertNonNumericData(self, spatialOutput, temporalOutput, output):
"""
Converts all of the non-numeric fields from spatialOutput and temporalOutput
into their scalar equivalents and records them in the output dictionary.
:param spatialOutput: The results of topDownCompute() for the spatial input.
:param temporalOutput: The results of topDownCompute() for the temporal
input.
:param output: The main dictionary of outputs passed to compute(). It is
expected to have keys 'spatialTopDownOut' and 'temporalTopDownOut' that
are mapped to numpy arrays.
"""
encoders = self.encoder.getEncoderList()
types = self.encoder.getDecoderOutputFieldTypes()
for i, (encoder, type) in enumerate(zip(encoders, types)):
spatialData = spatialOutput[i]
temporalData = temporalOutput[i]
if type != FieldMetaType.integer and type != FieldMetaType.float:
# TODO: Make sure that this doesn't modify any state
spatialData = encoder.getScalars(spatialData)[0]
temporalData = encoder.getScalars(temporalData)[0]
assert isinstance(spatialData, (float, int))
assert isinstance(temporalData, (float, int))
output['spatialTopDownOut'][i] = spatialData
output['temporalTopDownOut'][i] = temporalData |
def getOntology(self, id_):
"""
Returns the ontology with the specified ID.
"""
if id_ not in self._ontologyIdMap:
raise exceptions.OntologyNotFoundException(id_)
return self._ontologyIdMap[id_] | Returns the ontology with the specified ID. | Below is the the instruction that describes the task:
### Input:
Returns the ontology with the specified ID.
### Response:
def getOntology(self, id_):
"""
Returns the ontology with the specified ID.
"""
if id_ not in self._ontologyIdMap:
raise exceptions.OntologyNotFoundException(id_)
return self._ontologyIdMap[id_] |
def parse_subprotocol_item(
header: str, pos: int, header_name: str
) -> Tuple[Subprotocol, int]:
"""
Parse a subprotocol from ``header`` at the given position.
Return the subprotocol value and the new position.
Raise :exc:`~websockets.exceptions.InvalidHeaderFormat` on invalid inputs.
"""
item, pos = parse_token(header, pos, header_name)
return cast(Subprotocol, item), pos | Parse a subprotocol from ``header`` at the given position.
Return the subprotocol value and the new position.
Raise :exc:`~websockets.exceptions.InvalidHeaderFormat` on invalid inputs. | Below is the the instruction that describes the task:
### Input:
Parse a subprotocol from ``header`` at the given position.
Return the subprotocol value and the new position.
Raise :exc:`~websockets.exceptions.InvalidHeaderFormat` on invalid inputs.
### Response:
def parse_subprotocol_item(
header: str, pos: int, header_name: str
) -> Tuple[Subprotocol, int]:
"""
Parse a subprotocol from ``header`` at the given position.
Return the subprotocol value and the new position.
Raise :exc:`~websockets.exceptions.InvalidHeaderFormat` on invalid inputs.
"""
item, pos = parse_token(header, pos, header_name)
return cast(Subprotocol, item), pos |
def organization_requests(self, id, **kwargs):
"https://developer.zendesk.com/rest_api/docs/core/requests#list-requests"
api_path = "/api/v2/organizations/{id}/requests.json"
api_path = api_path.format(id=id)
return self.call(api_path, **kwargs) | https://developer.zendesk.com/rest_api/docs/core/requests#list-requests | Below is the the instruction that describes the task:
### Input:
https://developer.zendesk.com/rest_api/docs/core/requests#list-requests
### Response:
def organization_requests(self, id, **kwargs):
"https://developer.zendesk.com/rest_api/docs/core/requests#list-requests"
api_path = "/api/v2/organizations/{id}/requests.json"
api_path = api_path.format(id=id)
return self.call(api_path, **kwargs) |
def add_libravatar (generator, metadata):
"""Article generator connector for the Libravatar plugin"""
missing = generator.settings.get ('LIBRAVATAR_MISSING')
size = generator.settings.get ('LIBRAVATAR_SIZE')
## Check the presence of the Email header
if 'email' not in metadata.keys ():
try:
metadata ['email'] = generator.settings.get ('AUTHOR_EMAIL')
except:
pass
## Add the Libravatar URL
if metadata ['email']:
## Compose URL using the MD5 hash
## (the ascii encoding is necessary for Python3)
email = metadata ['email'].lower ().encode ('ascii')
md5 = hashlib.md5 (email).hexdigest ()
url = 'http://cdn.libravatar.org/avatar/' + md5
## Add eventual "missing picture" option
if missing or size:
url = url + '?'
if missing:
url = url + 'd=' + missing
if size:
url = url + '&'
if size:
url = url + 's=' + str (size)
## Add URL to the article's metadata
metadata ['author_libravatar'] = url | Article generator connector for the Libravatar plugin | Below is the the instruction that describes the task:
### Input:
Article generator connector for the Libravatar plugin
### Response:
def add_libravatar (generator, metadata):
"""Article generator connector for the Libravatar plugin"""
missing = generator.settings.get ('LIBRAVATAR_MISSING')
size = generator.settings.get ('LIBRAVATAR_SIZE')
## Check the presence of the Email header
if 'email' not in metadata.keys ():
try:
metadata ['email'] = generator.settings.get ('AUTHOR_EMAIL')
except:
pass
## Add the Libravatar URL
if metadata ['email']:
## Compose URL using the MD5 hash
## (the ascii encoding is necessary for Python3)
email = metadata ['email'].lower ().encode ('ascii')
md5 = hashlib.md5 (email).hexdigest ()
url = 'http://cdn.libravatar.org/avatar/' + md5
## Add eventual "missing picture" option
if missing or size:
url = url + '?'
if missing:
url = url + 'd=' + missing
if size:
url = url + '&'
if size:
url = url + 's=' + str (size)
## Add URL to the article's metadata
metadata ['author_libravatar'] = url |
def add_material(self, material):
"""Add a material to the mesh, IF it's not already present."""
if self.has_material(material):
return
self.materials.append(material) | Add a material to the mesh, IF it's not already present. | Below is the the instruction that describes the task:
### Input:
Add a material to the mesh, IF it's not already present.
### Response:
def add_material(self, material):
"""Add a material to the mesh, IF it's not already present."""
if self.has_material(material):
return
self.materials.append(material) |
def add_image_history(self, data):
"""Add arbitrary string to ImageHistory tag."""
self._ef['0th'][piexif.ImageIFD.ImageHistory] = json.dumps(data) | Add arbitrary string to ImageHistory tag. | Below is the the instruction that describes the task:
### Input:
Add arbitrary string to ImageHistory tag.
### Response:
def add_image_history(self, data):
"""Add arbitrary string to ImageHistory tag."""
self._ef['0th'][piexif.ImageIFD.ImageHistory] = json.dumps(data) |
def clazz(clazz, parent_clazz, description, link, params_string, init_super_args=None):
"""
Live template for pycharm:
y = clazz(clazz="$clazz$", parent_clazz="%parent$", description="$description$", link="$lnk$", params_string="$first_param$")
"""
variables_needed = []
variables_optional = []
imports = set()
for param in params_string.split("\n"):
variable = parse_param_types(param)
# any variable.types has always_is_value => lenght must be 1.
assert(not any([type_.always_is_value is not None for type_ in variable.types]) or len(variable.types) == 1)
if variable.optional:
variables_optional.append(variable)
else:
variables_needed.append(variable)
# end if
imports.update(variable.all_imports)
# end for
imports = list(imports)
imports.sort()
if isinstance(parent_clazz, str):
parent_clazz = to_type(parent_clazz, "parent class")
assert isinstance(parent_clazz, Type)
clazz_object = Clazz(imports=imports,
clazz=clazz, parent_clazz=parent_clazz, link=link, description=description,
parameters=variables_needed, keywords=variables_optional
)
return clazz_object | Live template for pycharm:
y = clazz(clazz="$clazz$", parent_clazz="%parent$", description="$description$", link="$lnk$", params_string="$first_param$") | Below is the the instruction that describes the task:
### Input:
Live template for pycharm:
y = clazz(clazz="$clazz$", parent_clazz="%parent$", description="$description$", link="$lnk$", params_string="$first_param$")
### Response:
def clazz(clazz, parent_clazz, description, link, params_string, init_super_args=None):
"""
Live template for pycharm:
y = clazz(clazz="$clazz$", parent_clazz="%parent$", description="$description$", link="$lnk$", params_string="$first_param$")
"""
variables_needed = []
variables_optional = []
imports = set()
for param in params_string.split("\n"):
variable = parse_param_types(param)
# any variable.types has always_is_value => lenght must be 1.
assert(not any([type_.always_is_value is not None for type_ in variable.types]) or len(variable.types) == 1)
if variable.optional:
variables_optional.append(variable)
else:
variables_needed.append(variable)
# end if
imports.update(variable.all_imports)
# end for
imports = list(imports)
imports.sort()
if isinstance(parent_clazz, str):
parent_clazz = to_type(parent_clazz, "parent class")
assert isinstance(parent_clazz, Type)
clazz_object = Clazz(imports=imports,
clazz=clazz, parent_clazz=parent_clazz, link=link, description=description,
parameters=variables_needed, keywords=variables_optional
)
return clazz_object |
def transplant_func(func, module):
"""
Make a function imported from module A appear as if it is located
in module B.
>>> from pprint import pprint
>>> pprint.__module__
'pprint'
>>> pp = transplant_func(pprint, __name__)
>>> pp.__module__
'nose.util'
The original function is not modified.
>>> pprint.__module__
'pprint'
Calling the transplanted function calls the original.
>>> pp([1, 2])
[1, 2]
>>> pprint([1,2])
[1, 2]
"""
from nose.tools import make_decorator
def newfunc(*arg, **kw):
return func(*arg, **kw)
newfunc = make_decorator(func)(newfunc)
newfunc.__module__ = module
return newfunc | Make a function imported from module A appear as if it is located
in module B.
>>> from pprint import pprint
>>> pprint.__module__
'pprint'
>>> pp = transplant_func(pprint, __name__)
>>> pp.__module__
'nose.util'
The original function is not modified.
>>> pprint.__module__
'pprint'
Calling the transplanted function calls the original.
>>> pp([1, 2])
[1, 2]
>>> pprint([1,2])
[1, 2] | Below is the the instruction that describes the task:
### Input:
Make a function imported from module A appear as if it is located
in module B.
>>> from pprint import pprint
>>> pprint.__module__
'pprint'
>>> pp = transplant_func(pprint, __name__)
>>> pp.__module__
'nose.util'
The original function is not modified.
>>> pprint.__module__
'pprint'
Calling the transplanted function calls the original.
>>> pp([1, 2])
[1, 2]
>>> pprint([1,2])
[1, 2]
### Response:
def transplant_func(func, module):
"""
Make a function imported from module A appear as if it is located
in module B.
>>> from pprint import pprint
>>> pprint.__module__
'pprint'
>>> pp = transplant_func(pprint, __name__)
>>> pp.__module__
'nose.util'
The original function is not modified.
>>> pprint.__module__
'pprint'
Calling the transplanted function calls the original.
>>> pp([1, 2])
[1, 2]
>>> pprint([1,2])
[1, 2]
"""
from nose.tools import make_decorator
def newfunc(*arg, **kw):
return func(*arg, **kw)
newfunc = make_decorator(func)(newfunc)
newfunc.__module__ = module
return newfunc |
def random_outdir(): # type: () -> Text
""" Return the random directory name chosen to use for tool / workflow output """
# compute this once and store it as a function attribute - each subsequent call will return the same value
if not hasattr(random_outdir, 'outdir'):
random_outdir.outdir = '/' + ''.join([random.choice(string.ascii_letters) for _ in range(6)]) # type: ignore # nosec
return random_outdir.outdir | Return the random directory name chosen to use for tool / workflow output | Below is the the instruction that describes the task:
### Input:
Return the random directory name chosen to use for tool / workflow output
### Response:
def random_outdir(): # type: () -> Text
""" Return the random directory name chosen to use for tool / workflow output """
# compute this once and store it as a function attribute - each subsequent call will return the same value
if not hasattr(random_outdir, 'outdir'):
random_outdir.outdir = '/' + ''.join([random.choice(string.ascii_letters) for _ in range(6)]) # type: ignore # nosec
return random_outdir.outdir |
def from_dict(cls, d):
"""
Reconstructs the VoronoiContainer object from a dict representation of the VoronoiContainer created using
the as_dict method.
:param d: dict representation of the VoronoiContainer object
:return: VoronoiContainer object
"""
structure = Structure.from_dict(d['structure'])
voronoi_list2 = from_bson_voronoi_list2(d['bson_nb_voro_list2'], structure)
maximum_distance_factor = d['maximum_distance_factor'] if 'maximum_distance_factor' in d else None
minimum_angle_factor = d['minimum_angle_factor'] if 'minimum_angle_factor' in d else None
return cls(structure=structure, voronoi_list2=voronoi_list2,
# neighbors_lists=neighbors_lists,
normalized_angle_tolerance=d['normalized_angle_tolerance'],
normalized_distance_tolerance=d['normalized_distance_tolerance'],
additional_conditions=d['additional_conditions'],
valences=d['valences'],
maximum_distance_factor=maximum_distance_factor,
minimum_angle_factor=minimum_angle_factor) | Reconstructs the VoronoiContainer object from a dict representation of the VoronoiContainer created using
the as_dict method.
:param d: dict representation of the VoronoiContainer object
:return: VoronoiContainer object | Below is the the instruction that describes the task:
### Input:
Reconstructs the VoronoiContainer object from a dict representation of the VoronoiContainer created using
the as_dict method.
:param d: dict representation of the VoronoiContainer object
:return: VoronoiContainer object
### Response:
def from_dict(cls, d):
"""
Reconstructs the VoronoiContainer object from a dict representation of the VoronoiContainer created using
the as_dict method.
:param d: dict representation of the VoronoiContainer object
:return: VoronoiContainer object
"""
structure = Structure.from_dict(d['structure'])
voronoi_list2 = from_bson_voronoi_list2(d['bson_nb_voro_list2'], structure)
maximum_distance_factor = d['maximum_distance_factor'] if 'maximum_distance_factor' in d else None
minimum_angle_factor = d['minimum_angle_factor'] if 'minimum_angle_factor' in d else None
return cls(structure=structure, voronoi_list2=voronoi_list2,
# neighbors_lists=neighbors_lists,
normalized_angle_tolerance=d['normalized_angle_tolerance'],
normalized_distance_tolerance=d['normalized_distance_tolerance'],
additional_conditions=d['additional_conditions'],
valences=d['valences'],
maximum_distance_factor=maximum_distance_factor,
minimum_angle_factor=minimum_angle_factor) |
def storage_update(self, name, config, timeout=10):
"""
Create or update a storage plugin configuration.
:param name: The name of the storage plugin configuration to create or update.
:param config: Overwrites the existing configuration if there is any, and therefore, must include all
required attributes and definitions.
:param timeout: int
:return: pydrill.client.Result
"""
result = Result(*self.perform_request(**{
'method': 'POST',
'url': '/storage/{0}.json'.format(name),
'body': config,
'params': {
'request_timeout': timeout
}
}))
return result | Create or update a storage plugin configuration.
:param name: The name of the storage plugin configuration to create or update.
:param config: Overwrites the existing configuration if there is any, and therefore, must include all
required attributes and definitions.
:param timeout: int
:return: pydrill.client.Result | Below is the the instruction that describes the task:
### Input:
Create or update a storage plugin configuration.
:param name: The name of the storage plugin configuration to create or update.
:param config: Overwrites the existing configuration if there is any, and therefore, must include all
required attributes and definitions.
:param timeout: int
:return: pydrill.client.Result
### Response:
def storage_update(self, name, config, timeout=10):
"""
Create or update a storage plugin configuration.
:param name: The name of the storage plugin configuration to create or update.
:param config: Overwrites the existing configuration if there is any, and therefore, must include all
required attributes and definitions.
:param timeout: int
:return: pydrill.client.Result
"""
result = Result(*self.perform_request(**{
'method': 'POST',
'url': '/storage/{0}.json'.format(name),
'body': config,
'params': {
'request_timeout': timeout
}
}))
return result |
def parse_http_accept_header(header):
"""
Return a list of content types listed in the HTTP Accept header
ordered by quality.
:param header: A string describing the contents of the HTTP Accept header.
"""
components = [item.strip() for item in header.split(',')]
l = []
for component in components:
if ';' in component:
subcomponents = [item.strip() for item in component.split(';')]
l.append(
(
subcomponents[0], # eg. 'text/html'
subcomponents[1][2:] # eg. 'q=0.9'
)
)
else:
l.append((component, '1'))
l.sort(
key = lambda i: i[1],
reverse = True
)
content_types = []
for i in l:
content_types.append(i[0])
return content_types | Return a list of content types listed in the HTTP Accept header
ordered by quality.
:param header: A string describing the contents of the HTTP Accept header. | Below is the the instruction that describes the task:
### Input:
Return a list of content types listed in the HTTP Accept header
ordered by quality.
:param header: A string describing the contents of the HTTP Accept header.
### Response:
def parse_http_accept_header(header):
"""
Return a list of content types listed in the HTTP Accept header
ordered by quality.
:param header: A string describing the contents of the HTTP Accept header.
"""
components = [item.strip() for item in header.split(',')]
l = []
for component in components:
if ';' in component:
subcomponents = [item.strip() for item in component.split(';')]
l.append(
(
subcomponents[0], # eg. 'text/html'
subcomponents[1][2:] # eg. 'q=0.9'
)
)
else:
l.append((component, '1'))
l.sort(
key = lambda i: i[1],
reverse = True
)
content_types = []
for i in l:
content_types.append(i[0])
return content_types |
def import_fx_rates(self, rates: List[PriceModel]):
""" Imports the given prices into database. Write operation! """
have_new_rates = False
base_currency = self.get_default_currency()
for rate in rates:
assert isinstance(rate, PriceModel)
currency = self.get_by_symbol(rate.symbol)
amount = rate.value
# Do not import duplicate prices.
# todo: if the price differs, update it!
# exists_query = exists(rates_query)
has_rate = currency.prices.filter(Price.date == rate.datetime.date()).first()
# has_rate = (
# self.book.session.query(Price)
# .filter(Price.date == rate.date.date())
# .filter(Price.currency == currency)
# )
if not has_rate:
log(INFO, "Creating entry for %s, %s, %s, %s",
base_currency.mnemonic, currency.mnemonic, rate.datetime.date(), amount)
# Save the price in the exchange currency, not the default.
# Invert the rate in that case.
inverted_rate = 1 / amount
inverted_rate = inverted_rate.quantize(Decimal('.00000000'))
price = Price(commodity=currency,
currency=base_currency,
date=rate.datetime.date(),
value=str(inverted_rate))
have_new_rates = True
# Save the book after the prices have been created.
if have_new_rates:
log(INFO, "Saving new prices...")
self.book.flush()
self.book.save()
else:
log(INFO, "No prices imported.") | Imports the given prices into database. Write operation! | Below is the the instruction that describes the task:
### Input:
Imports the given prices into database. Write operation!
### Response:
def import_fx_rates(self, rates: List[PriceModel]):
""" Imports the given prices into database. Write operation! """
have_new_rates = False
base_currency = self.get_default_currency()
for rate in rates:
assert isinstance(rate, PriceModel)
currency = self.get_by_symbol(rate.symbol)
amount = rate.value
# Do not import duplicate prices.
# todo: if the price differs, update it!
# exists_query = exists(rates_query)
has_rate = currency.prices.filter(Price.date == rate.datetime.date()).first()
# has_rate = (
# self.book.session.query(Price)
# .filter(Price.date == rate.date.date())
# .filter(Price.currency == currency)
# )
if not has_rate:
log(INFO, "Creating entry for %s, %s, %s, %s",
base_currency.mnemonic, currency.mnemonic, rate.datetime.date(), amount)
# Save the price in the exchange currency, not the default.
# Invert the rate in that case.
inverted_rate = 1 / amount
inverted_rate = inverted_rate.quantize(Decimal('.00000000'))
price = Price(commodity=currency,
currency=base_currency,
date=rate.datetime.date(),
value=str(inverted_rate))
have_new_rates = True
# Save the book after the prices have been created.
if have_new_rates:
log(INFO, "Saving new prices...")
self.book.flush()
self.book.save()
else:
log(INFO, "No prices imported.") |
def get_version(version=None):
"""Derives a PEP386-compliant version number from VERSION."""
if version is None:
version = VERSION
assert len(version) == 5
assert version[3] in ("alpha", "beta", "rc", "final")
# Now build the two parts of the version number:
# main = X.Y[.Z]
# sub = .devN - for pre-alpha releases
# | {a|b|c}N - for alpha, beta and rc releases
parts = 2 if version[2] == 0 else 3
main = ".".join(str(x) for x in version[:parts])
sub = ""
if version[3] != "final":
mapping = {"alpha": "a", "beta": "b", "rc": "c"}
sub = mapping[version[3]] + str(version[4])
return main + sub | Derives a PEP386-compliant version number from VERSION. | Below is the the instruction that describes the task:
### Input:
Derives a PEP386-compliant version number from VERSION.
### Response:
def get_version(version=None):
"""Derives a PEP386-compliant version number from VERSION."""
if version is None:
version = VERSION
assert len(version) == 5
assert version[3] in ("alpha", "beta", "rc", "final")
# Now build the two parts of the version number:
# main = X.Y[.Z]
# sub = .devN - for pre-alpha releases
# | {a|b|c}N - for alpha, beta and rc releases
parts = 2 if version[2] == 0 else 3
main = ".".join(str(x) for x in version[:parts])
sub = ""
if version[3] != "final":
mapping = {"alpha": "a", "beta": "b", "rc": "c"}
sub = mapping[version[3]] + str(version[4])
return main + sub |
def check_bottleneck(text):
"""Avoid mixing metaphors about bottles and their necks.
source: Sir Ernest Gowers
source_url: http://bit.ly/1CQPH61
"""
err = "mixed_metaphors.misc.bottleneck"
msg = u"Mixed metaphor — bottles with big necks are easy to pass through."
list = [
"biggest bottleneck",
"big bottleneck",
"large bottleneck",
"largest bottleneck",
"world-wide bottleneck",
"huge bottleneck",
"massive bottleneck",
]
return existence_check(text, list, err, msg, max_errors=1) | Avoid mixing metaphors about bottles and their necks.
source: Sir Ernest Gowers
source_url: http://bit.ly/1CQPH61 | Below is the the instruction that describes the task:
### Input:
Avoid mixing metaphors about bottles and their necks.
source: Sir Ernest Gowers
source_url: http://bit.ly/1CQPH61
### Response:
def check_bottleneck(text):
"""Avoid mixing metaphors about bottles and their necks.
source: Sir Ernest Gowers
source_url: http://bit.ly/1CQPH61
"""
err = "mixed_metaphors.misc.bottleneck"
msg = u"Mixed metaphor — bottles with big necks are easy to pass through."
list = [
"biggest bottleneck",
"big bottleneck",
"large bottleneck",
"largest bottleneck",
"world-wide bottleneck",
"huge bottleneck",
"massive bottleneck",
]
return existence_check(text, list, err, msg, max_errors=1) |
def to_xml(self, name="address"):
'''
Returns a DOM Element containing the XML representation of the
address.
@return:Element
'''
for n, v in {"street_address": self.street_address, "city": self.city,
"country": self.country}.items():
if is_empty_or_none(v):
raise ValueError("'%s' attribute cannot be empty or None." % n)
doc = Document()
root = doc.createElement(name)
self._create_text_node(root, "streetAddress", self.street_address, True)
self._create_text_node(root, "city", self.city, True)
self._create_text_node(root, "zipcode", self.zipcode)
self._create_text_node(root, "state", self.state, True)
self._create_text_node(root, "country", self.country)
return root | Returns a DOM Element containing the XML representation of the
address.
@return:Element | Below is the the instruction that describes the task:
### Input:
Returns a DOM Element containing the XML representation of the
address.
@return:Element
### Response:
def to_xml(self, name="address"):
'''
Returns a DOM Element containing the XML representation of the
address.
@return:Element
'''
for n, v in {"street_address": self.street_address, "city": self.city,
"country": self.country}.items():
if is_empty_or_none(v):
raise ValueError("'%s' attribute cannot be empty or None." % n)
doc = Document()
root = doc.createElement(name)
self._create_text_node(root, "streetAddress", self.street_address, True)
self._create_text_node(root, "city", self.city, True)
self._create_text_node(root, "zipcode", self.zipcode)
self._create_text_node(root, "state", self.state, True)
self._create_text_node(root, "country", self.country)
return root |
def StartGdb(self):
"""Hands control over to a new gdb process."""
if self.inferior.is_running:
self.inferior.ShutDownGdb()
program_arg = 'program %d ' % self.inferior.pid
else:
program_arg = ''
os.system('gdb ' + program_arg + ' '.join(self.gdb_args))
reset_position = raw_input('Reset debugger position? [y]/n ')
if not reset_position or reset_position == 'y' or reset_position == 'yes':
self.position = None | Hands control over to a new gdb process. | Below is the the instruction that describes the task:
### Input:
Hands control over to a new gdb process.
### Response:
def StartGdb(self):
"""Hands control over to a new gdb process."""
if self.inferior.is_running:
self.inferior.ShutDownGdb()
program_arg = 'program %d ' % self.inferior.pid
else:
program_arg = ''
os.system('gdb ' + program_arg + ' '.join(self.gdb_args))
reset_position = raw_input('Reset debugger position? [y]/n ')
if not reset_position or reset_position == 'y' or reset_position == 'yes':
self.position = None |
def ArcSin(input_vertex: vertex_constructor_param_types, label: Optional[str]=None) -> Vertex:
"""
Takes the inverse sin of a vertex, Arcsin(vertex)
:param input_vertex: the vertex
"""
return Double(context.jvm_view().ArcSinVertex, label, cast_to_double_vertex(input_vertex)) | Takes the inverse sin of a vertex, Arcsin(vertex)
:param input_vertex: the vertex | Below is the the instruction that describes the task:
### Input:
Takes the inverse sin of a vertex, Arcsin(vertex)
:param input_vertex: the vertex
### Response:
def ArcSin(input_vertex: vertex_constructor_param_types, label: Optional[str]=None) -> Vertex:
"""
Takes the inverse sin of a vertex, Arcsin(vertex)
:param input_vertex: the vertex
"""
return Double(context.jvm_view().ArcSinVertex, label, cast_to_double_vertex(input_vertex)) |
def kullback_leibler(p, q) :
"""Discrete Kullback-Leibler divergence D(P||Q)"""
p = np.asarray(p, dtype=np.float)
q = np.asarray(q, dtype=np.float)
if p.shape != q.shape :
raise ValueError("p and q must be of the same dimensions")
return np.sum(np.where(p > 0, np.log(p / q) * p, 0)) | Discrete Kullback-Leibler divergence D(P||Q) | Below is the the instruction that describes the task:
### Input:
Discrete Kullback-Leibler divergence D(P||Q)
### Response:
def kullback_leibler(p, q) :
"""Discrete Kullback-Leibler divergence D(P||Q)"""
p = np.asarray(p, dtype=np.float)
q = np.asarray(q, dtype=np.float)
if p.shape != q.shape :
raise ValueError("p and q must be of the same dimensions")
return np.sum(np.where(p > 0, np.log(p / q) * p, 0)) |
def delete_cache_security_group(name, region=None, key=None, keyid=None, profile=None, **args):
'''
Delete a cache security group.
Example:
.. code-block:: bash
salt myminion boto3_elasticache.delete_cache_security_group myelasticachesg
'''
return _delete_resource(name, name_param='CacheSecurityGroupName',
desc='cache security group', res_type='cache_security_group',
region=region, key=key, keyid=keyid, profile=profile, **args) | Delete a cache security group.
Example:
.. code-block:: bash
salt myminion boto3_elasticache.delete_cache_security_group myelasticachesg | Below is the the instruction that describes the task:
### Input:
Delete a cache security group.
Example:
.. code-block:: bash
salt myminion boto3_elasticache.delete_cache_security_group myelasticachesg
### Response:
def delete_cache_security_group(name, region=None, key=None, keyid=None, profile=None, **args):
'''
Delete a cache security group.
Example:
.. code-block:: bash
salt myminion boto3_elasticache.delete_cache_security_group myelasticachesg
'''
return _delete_resource(name, name_param='CacheSecurityGroupName',
desc='cache security group', res_type='cache_security_group',
region=region, key=key, keyid=keyid, profile=profile, **args) |
def JCXZ(cpu, target):
"""
Jumps short if CX register is 0.
:param cpu: current CPU.
:param target: destination operand.
"""
cpu.PC = Operators.ITEBV(cpu.address_bit_size, cpu.CX == 0, target.read(), cpu.PC) | Jumps short if CX register is 0.
:param cpu: current CPU.
:param target: destination operand. | Below is the the instruction that describes the task:
### Input:
Jumps short if CX register is 0.
:param cpu: current CPU.
:param target: destination operand.
### Response:
def JCXZ(cpu, target):
"""
Jumps short if CX register is 0.
:param cpu: current CPU.
:param target: destination operand.
"""
cpu.PC = Operators.ITEBV(cpu.address_bit_size, cpu.CX == 0, target.read(), cpu.PC) |
def get_as_dataframe(worksheet,
evaluate_formulas=False,
**options):
"""
Returns the worksheet contents as a DataFrame.
:param worksheet: the worksheet.
:param evaluate_formulas: if True, get the value of a cell after
formula evaluation; otherwise get the formula itself if present.
Defaults to False.
:param \*\*options: all the options for pandas.io.parsers.TextParser,
according to the version of pandas that is installed.
(Note: TextParser supports only the default 'python' parser engine,
not the C engine.)
:returns: pandas.DataFrame
"""
all_values = _get_all_values(worksheet, evaluate_formulas)
return TextParser(all_values, **options).read() | Returns the worksheet contents as a DataFrame.
:param worksheet: the worksheet.
:param evaluate_formulas: if True, get the value of a cell after
formula evaluation; otherwise get the formula itself if present.
Defaults to False.
:param \*\*options: all the options for pandas.io.parsers.TextParser,
according to the version of pandas that is installed.
(Note: TextParser supports only the default 'python' parser engine,
not the C engine.)
:returns: pandas.DataFrame | Below is the the instruction that describes the task:
### Input:
Returns the worksheet contents as a DataFrame.
:param worksheet: the worksheet.
:param evaluate_formulas: if True, get the value of a cell after
formula evaluation; otherwise get the formula itself if present.
Defaults to False.
:param \*\*options: all the options for pandas.io.parsers.TextParser,
according to the version of pandas that is installed.
(Note: TextParser supports only the default 'python' parser engine,
not the C engine.)
:returns: pandas.DataFrame
### Response:
def get_as_dataframe(worksheet,
evaluate_formulas=False,
**options):
"""
Returns the worksheet contents as a DataFrame.
:param worksheet: the worksheet.
:param evaluate_formulas: if True, get the value of a cell after
formula evaluation; otherwise get the formula itself if present.
Defaults to False.
:param \*\*options: all the options for pandas.io.parsers.TextParser,
according to the version of pandas that is installed.
(Note: TextParser supports only the default 'python' parser engine,
not the C engine.)
:returns: pandas.DataFrame
"""
all_values = _get_all_values(worksheet, evaluate_formulas)
return TextParser(all_values, **options).read() |
def ratio(value, decimal_places=0, failure_string='N/A'):
"""
Converts a floating point value a X:1 ratio.
Number of decimal places set by the `precision` kwarg. Default is one.
"""
try:
f = float(value)
except ValueError:
return failure_string
return _saferound(f, decimal_places) + ':1' | Converts a floating point value a X:1 ratio.
Number of decimal places set by the `precision` kwarg. Default is one. | Below is the the instruction that describes the task:
### Input:
Converts a floating point value a X:1 ratio.
Number of decimal places set by the `precision` kwarg. Default is one.
### Response:
def ratio(value, decimal_places=0, failure_string='N/A'):
"""
Converts a floating point value a X:1 ratio.
Number of decimal places set by the `precision` kwarg. Default is one.
"""
try:
f = float(value)
except ValueError:
return failure_string
return _saferound(f, decimal_places) + ':1' |
def limit_update(db, key, limits):
"""
Safely updates the list of limits in the database.
:param db: The database handle.
:param key: The key the limits are stored under.
:param limits: A list or sequence of limit objects, each
understanding the dehydrate() method.
The limits list currently in the database will be atomically
changed to match the new list. This is done using the pipeline()
method.
"""
# Start by dehydrating all the limits
desired = [msgpack.dumps(l.dehydrate()) for l in limits]
desired_set = set(desired)
# Now, let's update the limits
with db.pipeline() as pipe:
while True:
try:
# Watch for changes to the key
pipe.watch(key)
# Look up the existing limits
existing = set(pipe.zrange(key, 0, -1))
# Start the transaction...
pipe.multi()
# Remove limits we no longer have
for lim in existing - desired_set:
pipe.zrem(key, lim)
# Update or add all our desired limits
for idx, lim in enumerate(desired):
pipe.zadd(key, (idx + 1) * 10, lim)
# Execute the transaction
pipe.execute()
except redis.WatchError:
# Try again...
continue
else:
# We're all done!
break | Safely updates the list of limits in the database.
:param db: The database handle.
:param key: The key the limits are stored under.
:param limits: A list or sequence of limit objects, each
understanding the dehydrate() method.
The limits list currently in the database will be atomically
changed to match the new list. This is done using the pipeline()
method. | Below is the the instruction that describes the task:
### Input:
Safely updates the list of limits in the database.
:param db: The database handle.
:param key: The key the limits are stored under.
:param limits: A list or sequence of limit objects, each
understanding the dehydrate() method.
The limits list currently in the database will be atomically
changed to match the new list. This is done using the pipeline()
method.
### Response:
def limit_update(db, key, limits):
"""
Safely updates the list of limits in the database.
:param db: The database handle.
:param key: The key the limits are stored under.
:param limits: A list or sequence of limit objects, each
understanding the dehydrate() method.
The limits list currently in the database will be atomically
changed to match the new list. This is done using the pipeline()
method.
"""
# Start by dehydrating all the limits
desired = [msgpack.dumps(l.dehydrate()) for l in limits]
desired_set = set(desired)
# Now, let's update the limits
with db.pipeline() as pipe:
while True:
try:
# Watch for changes to the key
pipe.watch(key)
# Look up the existing limits
existing = set(pipe.zrange(key, 0, -1))
# Start the transaction...
pipe.multi()
# Remove limits we no longer have
for lim in existing - desired_set:
pipe.zrem(key, lim)
# Update or add all our desired limits
for idx, lim in enumerate(desired):
pipe.zadd(key, (idx + 1) * 10, lim)
# Execute the transaction
pipe.execute()
except redis.WatchError:
# Try again...
continue
else:
# We're all done!
break |
def get_name(self, name):
"""Return name `PyName` defined in this scope"""
if name not in self.get_names():
raise exceptions.NameNotFoundError('name %s not found' % name)
return self.get_names()[name] | Return name `PyName` defined in this scope | Below is the the instruction that describes the task:
### Input:
Return name `PyName` defined in this scope
### Response:
def get_name(self, name):
"""Return name `PyName` defined in this scope"""
if name not in self.get_names():
raise exceptions.NameNotFoundError('name %s not found' % name)
return self.get_names()[name] |
def send(self, stat, value, backend=None):
"""Send stat to backend."""
client = yield from self.client(backend)
if not client:
return False
client.send(stat, value)
client.disconnect() | Send stat to backend. | Below is the the instruction that describes the task:
### Input:
Send stat to backend.
### Response:
def send(self, stat, value, backend=None):
"""Send stat to backend."""
client = yield from self.client(backend)
if not client:
return False
client.send(stat, value)
client.disconnect() |
def daffpa():
"""
Find the previous (backward) array in the current DAF.
http://naif.jpl.nasa.gov/pub/naif/toolkit_docs/C/cspice/daffpa_c.html
:return: True if an array was found.
:rtype: bool
"""
found = ctypes.c_int()
libspice.daffpa_c(ctypes.byref(found))
return bool(found.value) | Find the previous (backward) array in the current DAF.
http://naif.jpl.nasa.gov/pub/naif/toolkit_docs/C/cspice/daffpa_c.html
:return: True if an array was found.
:rtype: bool | Below is the the instruction that describes the task:
### Input:
Find the previous (backward) array in the current DAF.
http://naif.jpl.nasa.gov/pub/naif/toolkit_docs/C/cspice/daffpa_c.html
:return: True if an array was found.
:rtype: bool
### Response:
def daffpa():
"""
Find the previous (backward) array in the current DAF.
http://naif.jpl.nasa.gov/pub/naif/toolkit_docs/C/cspice/daffpa_c.html
:return: True if an array was found.
:rtype: bool
"""
found = ctypes.c_int()
libspice.daffpa_c(ctypes.byref(found))
return bool(found.value) |
def _write_hex_long(self, data, pos, value):
"""
Writes an unsigned long value across a byte array.
:param data: the buffer to write the value to
:type data: bytearray
:param pos: the starting position
:type pos: int
:param value: the value to write
:type value: unsigned long
"""
self._write_hex_byte(data, pos + 0, (value >> 56) & 0xff)
self._write_hex_byte(data, pos + 2, (value >> 48) & 0xff)
self._write_hex_byte(data, pos + 4, (value >> 40) & 0xff)
self._write_hex_byte(data, pos + 6, (value >> 32) & 0xff)
self._write_hex_byte(data, pos + 8, (value >> 24) & 0xff)
self._write_hex_byte(data, pos + 10, (value >> 16) & 0xff)
self._write_hex_byte(data, pos + 12, (value >> 8) & 0xff)
self._write_hex_byte(data, pos + 14, (value & 0xff)) | Writes an unsigned long value across a byte array.
:param data: the buffer to write the value to
:type data: bytearray
:param pos: the starting position
:type pos: int
:param value: the value to write
:type value: unsigned long | Below is the the instruction that describes the task:
### Input:
Writes an unsigned long value across a byte array.
:param data: the buffer to write the value to
:type data: bytearray
:param pos: the starting position
:type pos: int
:param value: the value to write
:type value: unsigned long
### Response:
def _write_hex_long(self, data, pos, value):
"""
Writes an unsigned long value across a byte array.
:param data: the buffer to write the value to
:type data: bytearray
:param pos: the starting position
:type pos: int
:param value: the value to write
:type value: unsigned long
"""
self._write_hex_byte(data, pos + 0, (value >> 56) & 0xff)
self._write_hex_byte(data, pos + 2, (value >> 48) & 0xff)
self._write_hex_byte(data, pos + 4, (value >> 40) & 0xff)
self._write_hex_byte(data, pos + 6, (value >> 32) & 0xff)
self._write_hex_byte(data, pos + 8, (value >> 24) & 0xff)
self._write_hex_byte(data, pos + 10, (value >> 16) & 0xff)
self._write_hex_byte(data, pos + 12, (value >> 8) & 0xff)
self._write_hex_byte(data, pos + 14, (value & 0xff)) |
def recv(self, filename, dest_file, timeout=None):
"""Retrieve a file from the device into the file-like dest_file."""
transport = DataFilesyncTransport(self.stream)
transport.write_data('RECV', filename, timeout)
for data_msg in transport.read_until_done('DATA', timeout):
dest_file.write(data_msg.data) | Retrieve a file from the device into the file-like dest_file. | Below is the the instruction that describes the task:
### Input:
Retrieve a file from the device into the file-like dest_file.
### Response:
def recv(self, filename, dest_file, timeout=None):
"""Retrieve a file from the device into the file-like dest_file."""
transport = DataFilesyncTransport(self.stream)
transport.write_data('RECV', filename, timeout)
for data_msg in transport.read_until_done('DATA', timeout):
dest_file.write(data_msg.data) |
def Range(min=None, max=None, min_message="Must be at least {min}", max_message="Must be at most {max}"):
"""
Creates a validator that checks if the given numeric value is in the
specified range, inclusive.
Accepts values specified by ``numbers.Number`` only, excluding booleans.
The error messages raised can be customized with ``min_message`` and
``max_message``. The ``min`` and ``max`` arguments are formatted.
"""
@wraps(Range)
def built(value):
if not isinstance(value, numbers.Number) or isinstance(value, bool):
raise Error("Not a number")
if min is not None and min > value:
raise Error(min_message.format(min=min, max=max))
if max is not None and value > max:
raise Error(max_message.format(min=min, max=max))
return value
return built | Creates a validator that checks if the given numeric value is in the
specified range, inclusive.
Accepts values specified by ``numbers.Number`` only, excluding booleans.
The error messages raised can be customized with ``min_message`` and
``max_message``. The ``min`` and ``max`` arguments are formatted. | Below is the the instruction that describes the task:
### Input:
Creates a validator that checks if the given numeric value is in the
specified range, inclusive.
Accepts values specified by ``numbers.Number`` only, excluding booleans.
The error messages raised can be customized with ``min_message`` and
``max_message``. The ``min`` and ``max`` arguments are formatted.
### Response:
def Range(min=None, max=None, min_message="Must be at least {min}", max_message="Must be at most {max}"):
"""
Creates a validator that checks if the given numeric value is in the
specified range, inclusive.
Accepts values specified by ``numbers.Number`` only, excluding booleans.
The error messages raised can be customized with ``min_message`` and
``max_message``. The ``min`` and ``max`` arguments are formatted.
"""
@wraps(Range)
def built(value):
if not isinstance(value, numbers.Number) or isinstance(value, bool):
raise Error("Not a number")
if min is not None and min > value:
raise Error(min_message.format(min=min, max=max))
if max is not None and value > max:
raise Error(max_message.format(min=min, max=max))
return value
return built |
def reset_score(student_id, course_id, item_id, clear_state=False, emit_signal=True):
"""
Reset scores for a specific student on a specific problem.
Note: this does *not* delete `Score` models from the database,
since these are immutable. It simply creates a new score with
the "reset" flag set to True.
Args:
student_id (unicode): The ID of the student for whom to reset scores.
course_id (unicode): The ID of the course containing the item to reset.
item_id (unicode): The ID of the item for which to reset scores.
clear_state (bool): If True, will appear to delete any submissions associated with the specified StudentItem
Returns:
None
Raises:
SubmissionInternalError: An unexpected error occurred while resetting scores.
"""
# Retrieve the student item
try:
student_item = StudentItem.objects.get(
student_id=student_id, course_id=course_id, item_id=item_id
)
except StudentItem.DoesNotExist:
# If there is no student item, then there is no score to reset,
# so we can return immediately.
return
# Create a "reset" score
try:
score = Score.create_reset_score(student_item)
if emit_signal:
# Send a signal out to any listeners who are waiting for scoring events.
score_reset.send(
sender=None,
anonymous_user_id=student_id,
course_id=course_id,
item_id=item_id,
created_at=score.created_at,
)
if clear_state:
for sub in student_item.submission_set.all():
# soft-delete the Submission
sub.status = Submission.DELETED
sub.save(update_fields=["status"])
# Also clear out cached values
cache_key = Submission.get_cache_key(sub.uuid)
cache.delete(cache_key)
except DatabaseError:
msg = (
u"Error occurred while reseting scores for"
u" item {item_id} in course {course_id} for student {student_id}"
).format(item_id=item_id, course_id=course_id, student_id=student_id)
logger.exception(msg)
raise SubmissionInternalError(msg)
else:
msg = u"Score reset for item {item_id} in course {course_id} for student {student_id}".format(
item_id=item_id, course_id=course_id, student_id=student_id
)
logger.info(msg) | Reset scores for a specific student on a specific problem.
Note: this does *not* delete `Score` models from the database,
since these are immutable. It simply creates a new score with
the "reset" flag set to True.
Args:
student_id (unicode): The ID of the student for whom to reset scores.
course_id (unicode): The ID of the course containing the item to reset.
item_id (unicode): The ID of the item for which to reset scores.
clear_state (bool): If True, will appear to delete any submissions associated with the specified StudentItem
Returns:
None
Raises:
SubmissionInternalError: An unexpected error occurred while resetting scores. | Below is the the instruction that describes the task:
### Input:
Reset scores for a specific student on a specific problem.
Note: this does *not* delete `Score` models from the database,
since these are immutable. It simply creates a new score with
the "reset" flag set to True.
Args:
student_id (unicode): The ID of the student for whom to reset scores.
course_id (unicode): The ID of the course containing the item to reset.
item_id (unicode): The ID of the item for which to reset scores.
clear_state (bool): If True, will appear to delete any submissions associated with the specified StudentItem
Returns:
None
Raises:
SubmissionInternalError: An unexpected error occurred while resetting scores.
### Response:
def reset_score(student_id, course_id, item_id, clear_state=False, emit_signal=True):
"""
Reset scores for a specific student on a specific problem.
Note: this does *not* delete `Score` models from the database,
since these are immutable. It simply creates a new score with
the "reset" flag set to True.
Args:
student_id (unicode): The ID of the student for whom to reset scores.
course_id (unicode): The ID of the course containing the item to reset.
item_id (unicode): The ID of the item for which to reset scores.
clear_state (bool): If True, will appear to delete any submissions associated with the specified StudentItem
Returns:
None
Raises:
SubmissionInternalError: An unexpected error occurred while resetting scores.
"""
# Retrieve the student item
try:
student_item = StudentItem.objects.get(
student_id=student_id, course_id=course_id, item_id=item_id
)
except StudentItem.DoesNotExist:
# If there is no student item, then there is no score to reset,
# so we can return immediately.
return
# Create a "reset" score
try:
score = Score.create_reset_score(student_item)
if emit_signal:
# Send a signal out to any listeners who are waiting for scoring events.
score_reset.send(
sender=None,
anonymous_user_id=student_id,
course_id=course_id,
item_id=item_id,
created_at=score.created_at,
)
if clear_state:
for sub in student_item.submission_set.all():
# soft-delete the Submission
sub.status = Submission.DELETED
sub.save(update_fields=["status"])
# Also clear out cached values
cache_key = Submission.get_cache_key(sub.uuid)
cache.delete(cache_key)
except DatabaseError:
msg = (
u"Error occurred while reseting scores for"
u" item {item_id} in course {course_id} for student {student_id}"
).format(item_id=item_id, course_id=course_id, student_id=student_id)
logger.exception(msg)
raise SubmissionInternalError(msg)
else:
msg = u"Score reset for item {item_id} in course {course_id} for student {student_id}".format(
item_id=item_id, course_id=course_id, student_id=student_id
)
logger.info(msg) |
def tasks_missing_predicate(
service_name,
old_task_ids,
task_predicate=None
):
""" Returns whether any of old_task_ids are no longer present
:param service_name: the service name
:type service_name: str
:param old_task_ids: list of original task ids as returned by get_service_task_ids
:type old_task_ids: [str]
:param task_predicate: filter to use when searching for tasks
:type task_predicate: func
:return: True if any of old_task_ids are no longer present in the service
:rtype: bool
"""
try:
task_ids = get_service_task_ids(service_name, task_predicate)
except DCOSHTTPException:
print('failed to get task ids for service {}'.format(service_name))
task_ids = []
print('checking whether old tasks in "{}" are missing:\n- old tasks: {}\n- current tasks: {}'.format(
service_name, old_task_ids, task_ids))
for id in old_task_ids:
if id not in task_ids:
return True # an old task was not present
return False | Returns whether any of old_task_ids are no longer present
:param service_name: the service name
:type service_name: str
:param old_task_ids: list of original task ids as returned by get_service_task_ids
:type old_task_ids: [str]
:param task_predicate: filter to use when searching for tasks
:type task_predicate: func
:return: True if any of old_task_ids are no longer present in the service
:rtype: bool | Below is the the instruction that describes the task:
### Input:
Returns whether any of old_task_ids are no longer present
:param service_name: the service name
:type service_name: str
:param old_task_ids: list of original task ids as returned by get_service_task_ids
:type old_task_ids: [str]
:param task_predicate: filter to use when searching for tasks
:type task_predicate: func
:return: True if any of old_task_ids are no longer present in the service
:rtype: bool
### Response:
def tasks_missing_predicate(
service_name,
old_task_ids,
task_predicate=None
):
""" Returns whether any of old_task_ids are no longer present
:param service_name: the service name
:type service_name: str
:param old_task_ids: list of original task ids as returned by get_service_task_ids
:type old_task_ids: [str]
:param task_predicate: filter to use when searching for tasks
:type task_predicate: func
:return: True if any of old_task_ids are no longer present in the service
:rtype: bool
"""
try:
task_ids = get_service_task_ids(service_name, task_predicate)
except DCOSHTTPException:
print('failed to get task ids for service {}'.format(service_name))
task_ids = []
print('checking whether old tasks in "{}" are missing:\n- old tasks: {}\n- current tasks: {}'.format(
service_name, old_task_ids, task_ids))
for id in old_task_ids:
if id not in task_ids:
return True # an old task was not present
return False |
def span(self, name='child_span'):
"""Create a child span for the current span and append it to the child
spans list.
:type name: str
:param name: (Optional) The name of the child span.
:rtype: :class: `~opencensus.trace.span.Span`
:returns: A child Span to be added to the current span.
"""
child_span = Span(name, parent_span=self)
self._child_spans.append(child_span)
return child_span | Create a child span for the current span and append it to the child
spans list.
:type name: str
:param name: (Optional) The name of the child span.
:rtype: :class: `~opencensus.trace.span.Span`
:returns: A child Span to be added to the current span. | Below is the the instruction that describes the task:
### Input:
Create a child span for the current span and append it to the child
spans list.
:type name: str
:param name: (Optional) The name of the child span.
:rtype: :class: `~opencensus.trace.span.Span`
:returns: A child Span to be added to the current span.
### Response:
def span(self, name='child_span'):
"""Create a child span for the current span and append it to the child
spans list.
:type name: str
:param name: (Optional) The name of the child span.
:rtype: :class: `~opencensus.trace.span.Span`
:returns: A child Span to be added to the current span.
"""
child_span = Span(name, parent_span=self)
self._child_spans.append(child_span)
return child_span |
def _get_data(self, read_size):
"""Get data from the character device."""
if NIX:
return super(Mouse, self)._get_data(read_size)
return self._pipe.recv_bytes() | Get data from the character device. | Below is the the instruction that describes the task:
### Input:
Get data from the character device.
### Response:
def _get_data(self, read_size):
"""Get data from the character device."""
if NIX:
return super(Mouse, self)._get_data(read_size)
return self._pipe.recv_bytes() |
def SerializeFaultDetail(self, val, info):
""" Serialize an object """
self._SerializeDataObject(val, info, ' xsi:typ="{1}"'.format(val._wsdlName), self.defaultNS) | Serialize an object | Below is the the instruction that describes the task:
### Input:
Serialize an object
### Response:
def SerializeFaultDetail(self, val, info):
""" Serialize an object """
self._SerializeDataObject(val, info, ' xsi:typ="{1}"'.format(val._wsdlName), self.defaultNS) |
def update(self, repo_dir, **kwargs):
"""This function updates an existing checkout of source code."""
del kwargs
rev = self._args.get("revision")
if rev:
return [{"args": ["git", "checkout", rev], "cwd": repo_dir}] + _ff_command(
rev, repo_dir
)
return None | This function updates an existing checkout of source code. | Below is the the instruction that describes the task:
### Input:
This function updates an existing checkout of source code.
### Response:
def update(self, repo_dir, **kwargs):
"""This function updates an existing checkout of source code."""
del kwargs
rev = self._args.get("revision")
if rev:
return [{"args": ["git", "checkout", rev], "cwd": repo_dir}] + _ff_command(
rev, repo_dir
)
return None |
def enum_sigma_cubic(cutoff, r_axis):
"""
Find all possible sigma values and corresponding rotation angles
within a sigma value cutoff with known rotation axis in cubic system.
The algorithm for this code is from reference, Acta Cryst, A40,108(1984)
Args:
cutoff (integer): the cutoff of sigma values.
r_axis (list of three integers, e.g. u, v, w):
the rotation axis of the grain boundary, with the format of [u,v,w].
Returns:
sigmas (dict):
dictionary with keys as the possible integer sigma values
and values as list of the possible rotation angles to the
corresponding sigma values.
e.g. the format as
{sigma1: [angle11,angle12,...], sigma2: [angle21, angle22,...],...}
Note: the angles are the rotation angles of one grain respect to
the other grain.
When generate the microstructures of the grain boundary using these angles,
you need to analyze the symmetry of the structure. Different angles may
result in equivalent microstructures.
"""
sigmas = {}
# make sure gcd(r_axis)==1
if reduce(gcd, r_axis) != 1:
r_axis = [int(round(x / reduce(gcd, r_axis))) for x in r_axis]
# count the number of odds in r_axis
odd_r = len(list(filter(lambda x: x % 2 == 1, r_axis)))
# Compute the max n we need to enumerate.
if odd_r == 3:
a_max = 4
elif odd_r == 0:
a_max = 1
else:
a_max = 2
n_max = int(np.sqrt(cutoff * a_max / sum(np.array(r_axis) ** 2)))
# enumerate all possible n, m to give possible sigmas within the cutoff.
for n_loop in range(1, n_max + 1):
n = n_loop
m_max = int(np.sqrt(cutoff * a_max - n ** 2 * sum(np.array(r_axis) ** 2)))
for m in range(0, m_max + 1):
if gcd(m, n) == 1 or m == 0:
if m == 0:
n = 1
else:
n = n_loop
# construct the quadruple [m, U,V,W], count the number of odds in
# quadruple to determine the parameter a, refer to the reference
quadruple = [m] + [x * n for x in r_axis]
odd_qua = len(list(filter(lambda x: x % 2 == 1, quadruple)))
if odd_qua == 4:
a = 4
elif odd_qua == 2:
a = 2
else:
a = 1
sigma = int(round((m ** 2 + n ** 2 * sum(np.array(r_axis) ** 2)) / a))
if (sigma <= cutoff) and (sigma > 1):
if sigma not in list(sigmas.keys()):
if m == 0:
angle = 180.0
else:
angle = 2 * np.arctan(n * np.sqrt(sum(np.array(r_axis) ** 2)) / m) \
/ np.pi * 180
sigmas[sigma] = [angle]
else:
if m == 0:
angle = 180.0
else:
angle = 2 * np.arctan(n * np.sqrt(sum(np.array(r_axis) ** 2)) / m) \
/ np.pi * 180
if angle not in sigmas[sigma]:
sigmas[sigma].append(angle)
return sigmas | Find all possible sigma values and corresponding rotation angles
within a sigma value cutoff with known rotation axis in cubic system.
The algorithm for this code is from reference, Acta Cryst, A40,108(1984)
Args:
cutoff (integer): the cutoff of sigma values.
r_axis (list of three integers, e.g. u, v, w):
the rotation axis of the grain boundary, with the format of [u,v,w].
Returns:
sigmas (dict):
dictionary with keys as the possible integer sigma values
and values as list of the possible rotation angles to the
corresponding sigma values.
e.g. the format as
{sigma1: [angle11,angle12,...], sigma2: [angle21, angle22,...],...}
Note: the angles are the rotation angles of one grain respect to
the other grain.
When generate the microstructures of the grain boundary using these angles,
you need to analyze the symmetry of the structure. Different angles may
result in equivalent microstructures. | Below is the the instruction that describes the task:
### Input:
Find all possible sigma values and corresponding rotation angles
within a sigma value cutoff with known rotation axis in cubic system.
The algorithm for this code is from reference, Acta Cryst, A40,108(1984)
Args:
cutoff (integer): the cutoff of sigma values.
r_axis (list of three integers, e.g. u, v, w):
the rotation axis of the grain boundary, with the format of [u,v,w].
Returns:
sigmas (dict):
dictionary with keys as the possible integer sigma values
and values as list of the possible rotation angles to the
corresponding sigma values.
e.g. the format as
{sigma1: [angle11,angle12,...], sigma2: [angle21, angle22,...],...}
Note: the angles are the rotation angles of one grain respect to
the other grain.
When generate the microstructures of the grain boundary using these angles,
you need to analyze the symmetry of the structure. Different angles may
result in equivalent microstructures.
### Response:
def enum_sigma_cubic(cutoff, r_axis):
"""
Find all possible sigma values and corresponding rotation angles
within a sigma value cutoff with known rotation axis in cubic system.
The algorithm for this code is from reference, Acta Cryst, A40,108(1984)
Args:
cutoff (integer): the cutoff of sigma values.
r_axis (list of three integers, e.g. u, v, w):
the rotation axis of the grain boundary, with the format of [u,v,w].
Returns:
sigmas (dict):
dictionary with keys as the possible integer sigma values
and values as list of the possible rotation angles to the
corresponding sigma values.
e.g. the format as
{sigma1: [angle11,angle12,...], sigma2: [angle21, angle22,...],...}
Note: the angles are the rotation angles of one grain respect to
the other grain.
When generate the microstructures of the grain boundary using these angles,
you need to analyze the symmetry of the structure. Different angles may
result in equivalent microstructures.
"""
sigmas = {}
# make sure gcd(r_axis)==1
if reduce(gcd, r_axis) != 1:
r_axis = [int(round(x / reduce(gcd, r_axis))) for x in r_axis]
# count the number of odds in r_axis
odd_r = len(list(filter(lambda x: x % 2 == 1, r_axis)))
# Compute the max n we need to enumerate.
if odd_r == 3:
a_max = 4
elif odd_r == 0:
a_max = 1
else:
a_max = 2
n_max = int(np.sqrt(cutoff * a_max / sum(np.array(r_axis) ** 2)))
# enumerate all possible n, m to give possible sigmas within the cutoff.
for n_loop in range(1, n_max + 1):
n = n_loop
m_max = int(np.sqrt(cutoff * a_max - n ** 2 * sum(np.array(r_axis) ** 2)))
for m in range(0, m_max + 1):
if gcd(m, n) == 1 or m == 0:
if m == 0:
n = 1
else:
n = n_loop
# construct the quadruple [m, U,V,W], count the number of odds in
# quadruple to determine the parameter a, refer to the reference
quadruple = [m] + [x * n for x in r_axis]
odd_qua = len(list(filter(lambda x: x % 2 == 1, quadruple)))
if odd_qua == 4:
a = 4
elif odd_qua == 2:
a = 2
else:
a = 1
sigma = int(round((m ** 2 + n ** 2 * sum(np.array(r_axis) ** 2)) / a))
if (sigma <= cutoff) and (sigma > 1):
if sigma not in list(sigmas.keys()):
if m == 0:
angle = 180.0
else:
angle = 2 * np.arctan(n * np.sqrt(sum(np.array(r_axis) ** 2)) / m) \
/ np.pi * 180
sigmas[sigma] = [angle]
else:
if m == 0:
angle = 180.0
else:
angle = 2 * np.arctan(n * np.sqrt(sum(np.array(r_axis) ** 2)) / m) \
/ np.pi * 180
if angle not in sigmas[sigma]:
sigmas[sigma].append(angle)
return sigmas |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.