code
stringlengths 75
104k
| docstring
stringlengths 1
46.9k
| text
stringlengths 164
112k
|
---|---|---|
def update(self, docs=None, split=0, parallelism=None, progress_bar=True):
"""Update the features of the specified candidates.
:param docs: If provided, apply features to all the candidates in these
documents.
:param split: If docs is None, apply features to the candidates in this
particular split.
:type split: int
:param parallelism: How many threads to use for extraction. This will
override the parallelism value used to initialize the Featurizer if
it is provided.
:type parallelism: int
:param progress_bar: Whether or not to display a progress bar. The
progress bar is measured per document.
:type progress_bar: bool
"""
self.apply(
docs=docs,
split=split,
train=True,
clear=False,
parallelism=parallelism,
progress_bar=progress_bar,
) | Update the features of the specified candidates.
:param docs: If provided, apply features to all the candidates in these
documents.
:param split: If docs is None, apply features to the candidates in this
particular split.
:type split: int
:param parallelism: How many threads to use for extraction. This will
override the parallelism value used to initialize the Featurizer if
it is provided.
:type parallelism: int
:param progress_bar: Whether or not to display a progress bar. The
progress bar is measured per document.
:type progress_bar: bool | Below is the the instruction that describes the task:
### Input:
Update the features of the specified candidates.
:param docs: If provided, apply features to all the candidates in these
documents.
:param split: If docs is None, apply features to the candidates in this
particular split.
:type split: int
:param parallelism: How many threads to use for extraction. This will
override the parallelism value used to initialize the Featurizer if
it is provided.
:type parallelism: int
:param progress_bar: Whether or not to display a progress bar. The
progress bar is measured per document.
:type progress_bar: bool
### Response:
def update(self, docs=None, split=0, parallelism=None, progress_bar=True):
"""Update the features of the specified candidates.
:param docs: If provided, apply features to all the candidates in these
documents.
:param split: If docs is None, apply features to the candidates in this
particular split.
:type split: int
:param parallelism: How many threads to use for extraction. This will
override the parallelism value used to initialize the Featurizer if
it is provided.
:type parallelism: int
:param progress_bar: Whether or not to display a progress bar. The
progress bar is measured per document.
:type progress_bar: bool
"""
self.apply(
docs=docs,
split=split,
train=True,
clear=False,
parallelism=parallelism,
progress_bar=progress_bar,
) |
def _get_stats_table(self, table_id, kind='R', summary=False):
"""Gets a stats table from the player page; helper function that does
the work for per-game, per-100-poss, etc. stats.
:table_id: the ID of the HTML table.
:kind: specifies regular season, playoffs, or both. One of 'R', 'P',
'B'. Defaults to 'R'.
:returns: A DataFrame of stats.
"""
doc = self.get_main_doc()
table_id = 'table#{}{}'.format(
'playoffs_' if kind == 'P' else '', table_id)
table = doc(table_id)
df = sportsref.utils.parse_table(table, flatten=(not summary),
footer=summary)
return df | Gets a stats table from the player page; helper function that does
the work for per-game, per-100-poss, etc. stats.
:table_id: the ID of the HTML table.
:kind: specifies regular season, playoffs, or both. One of 'R', 'P',
'B'. Defaults to 'R'.
:returns: A DataFrame of stats. | Below is the the instruction that describes the task:
### Input:
Gets a stats table from the player page; helper function that does
the work for per-game, per-100-poss, etc. stats.
:table_id: the ID of the HTML table.
:kind: specifies regular season, playoffs, or both. One of 'R', 'P',
'B'. Defaults to 'R'.
:returns: A DataFrame of stats.
### Response:
def _get_stats_table(self, table_id, kind='R', summary=False):
"""Gets a stats table from the player page; helper function that does
the work for per-game, per-100-poss, etc. stats.
:table_id: the ID of the HTML table.
:kind: specifies regular season, playoffs, or both. One of 'R', 'P',
'B'. Defaults to 'R'.
:returns: A DataFrame of stats.
"""
doc = self.get_main_doc()
table_id = 'table#{}{}'.format(
'playoffs_' if kind == 'P' else '', table_id)
table = doc(table_id)
df = sportsref.utils.parse_table(table, flatten=(not summary),
footer=summary)
return df |
def cast(func, value):
""" Cast the specified value to the specified type (returned by func). Currently this
only support int, float, bool. Should be extended if needed.
Parameters:
func (func): Calback function to used cast to type (int, bool, float).
value (any): value to be cast and returned.
"""
if value is not None:
if func == bool:
return bool(int(value))
elif func in (int, float):
try:
return func(value)
except ValueError:
return float('nan')
return func(value)
return value | Cast the specified value to the specified type (returned by func). Currently this
only support int, float, bool. Should be extended if needed.
Parameters:
func (func): Calback function to used cast to type (int, bool, float).
value (any): value to be cast and returned. | Below is the the instruction that describes the task:
### Input:
Cast the specified value to the specified type (returned by func). Currently this
only support int, float, bool. Should be extended if needed.
Parameters:
func (func): Calback function to used cast to type (int, bool, float).
value (any): value to be cast and returned.
### Response:
def cast(func, value):
""" Cast the specified value to the specified type (returned by func). Currently this
only support int, float, bool. Should be extended if needed.
Parameters:
func (func): Calback function to used cast to type (int, bool, float).
value (any): value to be cast and returned.
"""
if value is not None:
if func == bool:
return bool(int(value))
elif func in (int, float):
try:
return func(value)
except ValueError:
return float('nan')
return func(value)
return value |
def save(self):
""" Exports all user attributes to the user's configuration and writes configuration
Saves the values for each attribute stored in User.configVars
into the user's configuration. The password is automatically
encoded and salted to prevent saving it as plaintext. The
session is pickled, encoded, and compressed to take up
less space in the configuration file. All other attributes
are saved in plain text. Writes the changes to the configuration file.
"""
# Code to load all attributes
for prop in dir(self):
if getattr(self, prop) == None: continue
if not prop in self.configVars: continue
# Special handling for some attributes
if prop == "session":
pic = pickle.dumps(getattr(self, prop).cookies)
comp = zlib.compress(pic)
enc = base64.b64encode(comp)
self.config[prop] = enc.decode()
continue
if prop == "password" and not self.savePassword: continue
if prop == "password":
s = hashlib.md5(self.username.encode()).hexdigest()
p = base64.b64encode(getattr(self, prop).encode()) + s.encode()
self.config[prop] = p.decode()
continue
self.config[prop] = str(getattr(self, prop))
if 'password' in self.config and not self.savePassword: del self.config.password
self.config.write()
self.__loadConfig() | Exports all user attributes to the user's configuration and writes configuration
Saves the values for each attribute stored in User.configVars
into the user's configuration. The password is automatically
encoded and salted to prevent saving it as plaintext. The
session is pickled, encoded, and compressed to take up
less space in the configuration file. All other attributes
are saved in plain text. Writes the changes to the configuration file. | Below is the the instruction that describes the task:
### Input:
Exports all user attributes to the user's configuration and writes configuration
Saves the values for each attribute stored in User.configVars
into the user's configuration. The password is automatically
encoded and salted to prevent saving it as plaintext. The
session is pickled, encoded, and compressed to take up
less space in the configuration file. All other attributes
are saved in plain text. Writes the changes to the configuration file.
### Response:
def save(self):
""" Exports all user attributes to the user's configuration and writes configuration
Saves the values for each attribute stored in User.configVars
into the user's configuration. The password is automatically
encoded and salted to prevent saving it as plaintext. The
session is pickled, encoded, and compressed to take up
less space in the configuration file. All other attributes
are saved in plain text. Writes the changes to the configuration file.
"""
# Code to load all attributes
for prop in dir(self):
if getattr(self, prop) == None: continue
if not prop in self.configVars: continue
# Special handling for some attributes
if prop == "session":
pic = pickle.dumps(getattr(self, prop).cookies)
comp = zlib.compress(pic)
enc = base64.b64encode(comp)
self.config[prop] = enc.decode()
continue
if prop == "password" and not self.savePassword: continue
if prop == "password":
s = hashlib.md5(self.username.encode()).hexdigest()
p = base64.b64encode(getattr(self, prop).encode()) + s.encode()
self.config[prop] = p.decode()
continue
self.config[prop] = str(getattr(self, prop))
if 'password' in self.config and not self.savePassword: del self.config.password
self.config.write()
self.__loadConfig() |
def parse_config(file_path):
"""
Convert the CISM configuration file to a python dictionary
Args:
file_path: absolute path to the configuration file
Returns:
A dictionary representation of the given file
"""
if not os.path.isfile(file_path):
return {}
parser = ConfigParser()
parser.read(file_path)
# Strip out inline comments
for s in parser._sections:
for v in six.iterkeys(parser._sections[s]):
parser._sections[s][v] = parser._sections[s][v].split("#")[0].strip()
return parser._sections | Convert the CISM configuration file to a python dictionary
Args:
file_path: absolute path to the configuration file
Returns:
A dictionary representation of the given file | Below is the the instruction that describes the task:
### Input:
Convert the CISM configuration file to a python dictionary
Args:
file_path: absolute path to the configuration file
Returns:
A dictionary representation of the given file
### Response:
def parse_config(file_path):
"""
Convert the CISM configuration file to a python dictionary
Args:
file_path: absolute path to the configuration file
Returns:
A dictionary representation of the given file
"""
if not os.path.isfile(file_path):
return {}
parser = ConfigParser()
parser.read(file_path)
# Strip out inline comments
for s in parser._sections:
for v in six.iterkeys(parser._sections[s]):
parser._sections[s][v] = parser._sections[s][v].split("#")[0].strip()
return parser._sections |
def advance(self, blocksize, timeout=10):
"""Advanced buffer blocksize seconds.
Add blocksize seconds more to the buffer, push blocksize seconds
from the beginning.
Parameters
----------
blocksize: int
The number of seconds to attempt to read from the channel
Returns
-------
status: boolean
Returns True if this block is analyzable.
"""
ts = super(StrainBuffer, self).attempt_advance(blocksize, timeout=timeout)
self.blocksize = blocksize
# We have given up so there is no time series
if ts is None:
logging.info("%s frame is late, giving up", self.detector)
self.null_advance_strain(blocksize)
if self.state:
self.state.null_advance(blocksize)
if self.dq:
self.dq.null_advance(blocksize)
return False
# We collected some data so we are closer to being able to analyze data
self.wait_duration -= blocksize
# If the data we got was invalid, reset the counter on how much to collect
# This behavior corresponds to how we handle CAT1 vetoes
if self.state and self.state.advance(blocksize) is False:
self.add_hard_count()
self.null_advance_strain(blocksize)
if self.dq:
self.dq.null_advance(blocksize)
logging.info("%s time has invalid data, resetting buffer",
self.detector)
return False
# Also advance the dq vector in lockstep
if self.dq:
self.dq.advance(blocksize)
self.segments = {}
# only condition with the needed raw data so we can continuously add
# to the existing result
# Precondition
sample_step = int(blocksize * self.sample_rate)
csize = sample_step + self.corruption * 2
start = len(self.raw_buffer) - csize * self.factor
strain = self.raw_buffer[start:]
strain = pycbc.filter.highpass_fir(strain, self.highpass_frequency,
self.highpass_samples,
beta=self.beta)
strain = (strain * self.dyn_range_fac).astype(numpy.float32)
strain = pycbc.filter.resample_to_delta_t(strain,
1.0/self.sample_rate, method='ldas')
# remove corruption at beginning
strain = strain[self.corruption:]
# taper beginning if needed
if self.taper_immediate_strain:
logging.info("Tapering start of %s strain block", self.detector)
strain = gate_data(strain, [(strain.start_time, 0., self.autogating_pad)])
self.taper_immediate_strain = False
# Stitch into continuous stream
self.strain.roll(-sample_step)
self.strain[len(self.strain) - csize + self.corruption:] = strain[:]
self.strain.start_time += blocksize
# apply gating if need be: NOT YET IMPLEMENTED
if self.psd is None and self.wait_duration <=0:
self.recalculate_psd()
if self.wait_duration > 0:
return False
else:
return True | Advanced buffer blocksize seconds.
Add blocksize seconds more to the buffer, push blocksize seconds
from the beginning.
Parameters
----------
blocksize: int
The number of seconds to attempt to read from the channel
Returns
-------
status: boolean
Returns True if this block is analyzable. | Below is the the instruction that describes the task:
### Input:
Advanced buffer blocksize seconds.
Add blocksize seconds more to the buffer, push blocksize seconds
from the beginning.
Parameters
----------
blocksize: int
The number of seconds to attempt to read from the channel
Returns
-------
status: boolean
Returns True if this block is analyzable.
### Response:
def advance(self, blocksize, timeout=10):
"""Advanced buffer blocksize seconds.
Add blocksize seconds more to the buffer, push blocksize seconds
from the beginning.
Parameters
----------
blocksize: int
The number of seconds to attempt to read from the channel
Returns
-------
status: boolean
Returns True if this block is analyzable.
"""
ts = super(StrainBuffer, self).attempt_advance(blocksize, timeout=timeout)
self.blocksize = blocksize
# We have given up so there is no time series
if ts is None:
logging.info("%s frame is late, giving up", self.detector)
self.null_advance_strain(blocksize)
if self.state:
self.state.null_advance(blocksize)
if self.dq:
self.dq.null_advance(blocksize)
return False
# We collected some data so we are closer to being able to analyze data
self.wait_duration -= blocksize
# If the data we got was invalid, reset the counter on how much to collect
# This behavior corresponds to how we handle CAT1 vetoes
if self.state and self.state.advance(blocksize) is False:
self.add_hard_count()
self.null_advance_strain(blocksize)
if self.dq:
self.dq.null_advance(blocksize)
logging.info("%s time has invalid data, resetting buffer",
self.detector)
return False
# Also advance the dq vector in lockstep
if self.dq:
self.dq.advance(blocksize)
self.segments = {}
# only condition with the needed raw data so we can continuously add
# to the existing result
# Precondition
sample_step = int(blocksize * self.sample_rate)
csize = sample_step + self.corruption * 2
start = len(self.raw_buffer) - csize * self.factor
strain = self.raw_buffer[start:]
strain = pycbc.filter.highpass_fir(strain, self.highpass_frequency,
self.highpass_samples,
beta=self.beta)
strain = (strain * self.dyn_range_fac).astype(numpy.float32)
strain = pycbc.filter.resample_to_delta_t(strain,
1.0/self.sample_rate, method='ldas')
# remove corruption at beginning
strain = strain[self.corruption:]
# taper beginning if needed
if self.taper_immediate_strain:
logging.info("Tapering start of %s strain block", self.detector)
strain = gate_data(strain, [(strain.start_time, 0., self.autogating_pad)])
self.taper_immediate_strain = False
# Stitch into continuous stream
self.strain.roll(-sample_step)
self.strain[len(self.strain) - csize + self.corruption:] = strain[:]
self.strain.start_time += blocksize
# apply gating if need be: NOT YET IMPLEMENTED
if self.psd is None and self.wait_duration <=0:
self.recalculate_psd()
if self.wait_duration > 0:
return False
else:
return True |
def gameloop(self):
"""
A game loop that circles through the methods.
"""
try:
while True:
self.handle_events()
self.update()
self.render()
except KeyboardInterrupt:
pass | A game loop that circles through the methods. | Below is the the instruction that describes the task:
### Input:
A game loop that circles through the methods.
### Response:
def gameloop(self):
"""
A game loop that circles through the methods.
"""
try:
while True:
self.handle_events()
self.update()
self.render()
except KeyboardInterrupt:
pass |
def _format_dict(dict_, **kwargs):
"""
Makes a pretty printable / human-readable string representation of a
dictionary. In most cases this string could be evaled.
Args:
dict_ (dict): a dictionary
**kwargs: si, stritems, strkeys, strvals, sk, sv, nl, newlines, nobr,
nobraces, cbr, compact_brace, trailing_sep,
explicit, itemsep, precision, kvsep, sort
Returns:
Tuple[str, Dict] : retstr, _leaf_info
Kwargs:
sort (None): if True, sorts ALL collections and subcollections,
note, collections with undefined orders (e.g. dicts, sets) are
sorted by default. (default = None)
nl (int): preferred alias for newline. can be a countdown variable
(default = None)
explicit (int): can be a countdown variable. if True, uses
dict(a=b) syntax instead of {'a': b}
nobr (bool): removes outer braces (default = False)
"""
kwargs['_root_info'] = _rectify_root_info(kwargs.get('_root_info', None))
kwargs['_root_info']['depth'] += 1
stritems = kwargs.pop('si', kwargs.pop('stritems', False))
if stritems:
kwargs['strkeys'] = True
kwargs['strvals'] = True
kwargs['strkeys'] = kwargs.pop('sk', kwargs.pop('strkeys', False))
kwargs['strvals'] = kwargs.pop('sv', kwargs.pop('strvals', False))
newlines = kwargs.pop('nl', kwargs.pop('newlines', True))
kwargs['nl'] = _rectify_countdown_or_bool(newlines)
nobraces = kwargs.pop('nobr', kwargs.pop('nobraces', False))
compact_brace = kwargs.get('cbr', kwargs.get('compact_brace', False))
# kwargs['cbr'] = _rectify_countdown_or_bool(compact_brace)
# Doesn't actually put in trailing comma if on same line
trailing_sep = kwargs.get('trailsep', kwargs.get('trailing_sep', newlines > 0))
explicit = kwargs.get('explicit', False)
itemsep = kwargs.get('itemsep', ' ')
if len(dict_) == 0:
retstr = 'dict()' if explicit else '{}'
_leaf_info = None
else:
itemstrs, _leaf_info = _dict_itemstrs(dict_, **kwargs)
if nobraces:
lbr, rbr = '', ''
elif explicit:
lbr, rbr = 'dict(', ')'
else:
lbr, rbr = '{', '}'
retstr = _join_itemstrs(itemstrs, itemsep, newlines, _leaf_info, nobraces,
trailing_sep, compact_brace, lbr, rbr)
return retstr, _leaf_info | Makes a pretty printable / human-readable string representation of a
dictionary. In most cases this string could be evaled.
Args:
dict_ (dict): a dictionary
**kwargs: si, stritems, strkeys, strvals, sk, sv, nl, newlines, nobr,
nobraces, cbr, compact_brace, trailing_sep,
explicit, itemsep, precision, kvsep, sort
Returns:
Tuple[str, Dict] : retstr, _leaf_info
Kwargs:
sort (None): if True, sorts ALL collections and subcollections,
note, collections with undefined orders (e.g. dicts, sets) are
sorted by default. (default = None)
nl (int): preferred alias for newline. can be a countdown variable
(default = None)
explicit (int): can be a countdown variable. if True, uses
dict(a=b) syntax instead of {'a': b}
nobr (bool): removes outer braces (default = False) | Below is the the instruction that describes the task:
### Input:
Makes a pretty printable / human-readable string representation of a
dictionary. In most cases this string could be evaled.
Args:
dict_ (dict): a dictionary
**kwargs: si, stritems, strkeys, strvals, sk, sv, nl, newlines, nobr,
nobraces, cbr, compact_brace, trailing_sep,
explicit, itemsep, precision, kvsep, sort
Returns:
Tuple[str, Dict] : retstr, _leaf_info
Kwargs:
sort (None): if True, sorts ALL collections and subcollections,
note, collections with undefined orders (e.g. dicts, sets) are
sorted by default. (default = None)
nl (int): preferred alias for newline. can be a countdown variable
(default = None)
explicit (int): can be a countdown variable. if True, uses
dict(a=b) syntax instead of {'a': b}
nobr (bool): removes outer braces (default = False)
### Response:
def _format_dict(dict_, **kwargs):
"""
Makes a pretty printable / human-readable string representation of a
dictionary. In most cases this string could be evaled.
Args:
dict_ (dict): a dictionary
**kwargs: si, stritems, strkeys, strvals, sk, sv, nl, newlines, nobr,
nobraces, cbr, compact_brace, trailing_sep,
explicit, itemsep, precision, kvsep, sort
Returns:
Tuple[str, Dict] : retstr, _leaf_info
Kwargs:
sort (None): if True, sorts ALL collections and subcollections,
note, collections with undefined orders (e.g. dicts, sets) are
sorted by default. (default = None)
nl (int): preferred alias for newline. can be a countdown variable
(default = None)
explicit (int): can be a countdown variable. if True, uses
dict(a=b) syntax instead of {'a': b}
nobr (bool): removes outer braces (default = False)
"""
kwargs['_root_info'] = _rectify_root_info(kwargs.get('_root_info', None))
kwargs['_root_info']['depth'] += 1
stritems = kwargs.pop('si', kwargs.pop('stritems', False))
if stritems:
kwargs['strkeys'] = True
kwargs['strvals'] = True
kwargs['strkeys'] = kwargs.pop('sk', kwargs.pop('strkeys', False))
kwargs['strvals'] = kwargs.pop('sv', kwargs.pop('strvals', False))
newlines = kwargs.pop('nl', kwargs.pop('newlines', True))
kwargs['nl'] = _rectify_countdown_or_bool(newlines)
nobraces = kwargs.pop('nobr', kwargs.pop('nobraces', False))
compact_brace = kwargs.get('cbr', kwargs.get('compact_brace', False))
# kwargs['cbr'] = _rectify_countdown_or_bool(compact_brace)
# Doesn't actually put in trailing comma if on same line
trailing_sep = kwargs.get('trailsep', kwargs.get('trailing_sep', newlines > 0))
explicit = kwargs.get('explicit', False)
itemsep = kwargs.get('itemsep', ' ')
if len(dict_) == 0:
retstr = 'dict()' if explicit else '{}'
_leaf_info = None
else:
itemstrs, _leaf_info = _dict_itemstrs(dict_, **kwargs)
if nobraces:
lbr, rbr = '', ''
elif explicit:
lbr, rbr = 'dict(', ')'
else:
lbr, rbr = '{', '}'
retstr = _join_itemstrs(itemstrs, itemsep, newlines, _leaf_info, nobraces,
trailing_sep, compact_brace, lbr, rbr)
return retstr, _leaf_info |
def apply_units_to_cache(self, data):
"""
Applies units to data when a proxy reader is used. For example if the
data is cached as JSON and retrieved using the
:class:`~simkit.core.data_readers.JSONReader`, then units can be
applied from the original parameter schema.
:param data: Data read by proxy reader.
:return: data with units applied
"""
# if units key exists then apply
for k, v in self.parameters.iteritems():
if v and v.get('units'):
data[k] = Q_(data[k], v.get('units'))
return data | Applies units to data when a proxy reader is used. For example if the
data is cached as JSON and retrieved using the
:class:`~simkit.core.data_readers.JSONReader`, then units can be
applied from the original parameter schema.
:param data: Data read by proxy reader.
:return: data with units applied | Below is the the instruction that describes the task:
### Input:
Applies units to data when a proxy reader is used. For example if the
data is cached as JSON and retrieved using the
:class:`~simkit.core.data_readers.JSONReader`, then units can be
applied from the original parameter schema.
:param data: Data read by proxy reader.
:return: data with units applied
### Response:
def apply_units_to_cache(self, data):
"""
Applies units to data when a proxy reader is used. For example if the
data is cached as JSON and retrieved using the
:class:`~simkit.core.data_readers.JSONReader`, then units can be
applied from the original parameter schema.
:param data: Data read by proxy reader.
:return: data with units applied
"""
# if units key exists then apply
for k, v in self.parameters.iteritems():
if v and v.get('units'):
data[k] = Q_(data[k], v.get('units'))
return data |
def labels(self):
""" Returns symbol instances corresponding to labels
in the current scope.
"""
return [x for x in self[self.current_scope].values() if x.class_ == CLASS.label] | Returns symbol instances corresponding to labels
in the current scope. | Below is the the instruction that describes the task:
### Input:
Returns symbol instances corresponding to labels
in the current scope.
### Response:
def labels(self):
""" Returns symbol instances corresponding to labels
in the current scope.
"""
return [x for x in self[self.current_scope].values() if x.class_ == CLASS.label] |
def merge_metrics(self):
"""
Merge metrics in the internal _metrics dict to metrics list
and delete the internal _metrics
"""
self.metrics.extend(self._metrics.values())
del self._metrics | Merge metrics in the internal _metrics dict to metrics list
and delete the internal _metrics | Below is the the instruction that describes the task:
### Input:
Merge metrics in the internal _metrics dict to metrics list
and delete the internal _metrics
### Response:
def merge_metrics(self):
"""
Merge metrics in the internal _metrics dict to metrics list
and delete the internal _metrics
"""
self.metrics.extend(self._metrics.values())
del self._metrics |
def save_ical(self, ical_location): # type: (str) -> None
""" Save the calendar instance to a file """
data = self.cal.to_ical()
with open(ical_location, 'w') as ical_file:
ical_file.write(data.decode('utf-8')) | Save the calendar instance to a file | Below is the the instruction that describes the task:
### Input:
Save the calendar instance to a file
### Response:
def save_ical(self, ical_location): # type: (str) -> None
""" Save the calendar instance to a file """
data = self.cal.to_ical()
with open(ical_location, 'w') as ical_file:
ical_file.write(data.decode('utf-8')) |
def place2thing(self, name, location):
"""Turn a Place into a Thing with the given location.
It will keep all its attached Portals.
"""
self.engine._set_thing_loc(
self.name, name, location
)
if (self.name, name) in self.engine._node_objs:
obj = self.engine._node_objs[self.name, name]
thing = Thing(self, name)
for port in obj.portals():
port.origin = thing
for port in obj.preportals():
port.destination = thing
self.engine._node_objs[self.name, name] = thing | Turn a Place into a Thing with the given location.
It will keep all its attached Portals. | Below is the the instruction that describes the task:
### Input:
Turn a Place into a Thing with the given location.
It will keep all its attached Portals.
### Response:
def place2thing(self, name, location):
"""Turn a Place into a Thing with the given location.
It will keep all its attached Portals.
"""
self.engine._set_thing_loc(
self.name, name, location
)
if (self.name, name) in self.engine._node_objs:
obj = self.engine._node_objs[self.name, name]
thing = Thing(self, name)
for port in obj.portals():
port.origin = thing
for port in obj.preportals():
port.destination = thing
self.engine._node_objs[self.name, name] = thing |
def exists(self):
"""
Test, if this task has been run.
"""
try:
self.es.get(index=self.marker_index, doc_type=self.marker_doc_type, id=self.marker_index_document_id())
return True
except elasticsearch.NotFoundError:
logger.debug('Marker document not found.')
except elasticsearch.ElasticsearchException as err:
logger.warn(err)
return False | Test, if this task has been run. | Below is the the instruction that describes the task:
### Input:
Test, if this task has been run.
### Response:
def exists(self):
"""
Test, if this task has been run.
"""
try:
self.es.get(index=self.marker_index, doc_type=self.marker_doc_type, id=self.marker_index_document_id())
return True
except elasticsearch.NotFoundError:
logger.debug('Marker document not found.')
except elasticsearch.ElasticsearchException as err:
logger.warn(err)
return False |
def atlasdb_get_zonefiles_by_name(name, max_index=None, con=None, path=None):
"""
Look up the sequence of zone file records by name, optionally up to a specific zonefile index
Returns [{'zonefile_hash': ..., 'txid': ..., 'inv_index': ..., 'block_height': ..., 'present': ..., 'tried_storage': ...}], in blockchain order
"""
ret = []
with AtlasDBOpen(con=con, path=path) as dbcon:
sql = 'SELECT * FROM zonefiles WHERE name = ? ORDER BY inv_index'
args = (name,)
if max_index:
sql += ' AND inv_index <= ?'
args += (max_index,)
sql += ';'
cur = dbcon.cursor()
res = atlasdb_query_execute(cur, sql, args)
for zfinfo in res:
row = {}
row.update(zfinfo)
ret.append(row)
return ret | Look up the sequence of zone file records by name, optionally up to a specific zonefile index
Returns [{'zonefile_hash': ..., 'txid': ..., 'inv_index': ..., 'block_height': ..., 'present': ..., 'tried_storage': ...}], in blockchain order | Below is the the instruction that describes the task:
### Input:
Look up the sequence of zone file records by name, optionally up to a specific zonefile index
Returns [{'zonefile_hash': ..., 'txid': ..., 'inv_index': ..., 'block_height': ..., 'present': ..., 'tried_storage': ...}], in blockchain order
### Response:
def atlasdb_get_zonefiles_by_name(name, max_index=None, con=None, path=None):
"""
Look up the sequence of zone file records by name, optionally up to a specific zonefile index
Returns [{'zonefile_hash': ..., 'txid': ..., 'inv_index': ..., 'block_height': ..., 'present': ..., 'tried_storage': ...}], in blockchain order
"""
ret = []
with AtlasDBOpen(con=con, path=path) as dbcon:
sql = 'SELECT * FROM zonefiles WHERE name = ? ORDER BY inv_index'
args = (name,)
if max_index:
sql += ' AND inv_index <= ?'
args += (max_index,)
sql += ';'
cur = dbcon.cursor()
res = atlasdb_query_execute(cur, sql, args)
for zfinfo in res:
row = {}
row.update(zfinfo)
ret.append(row)
return ret |
def MakeProto():
"""Make sure our protos have been compiled to python libraries."""
# Start running from one directory above the grr directory which is found by
# this scripts's location as __file__.
cwd = os.path.dirname(os.path.abspath(__file__))
# Find all the .proto files.
protos_to_compile = []
for (root, _, files) in os.walk(cwd):
for filename in files:
full_filename = os.path.join(root, filename)
if full_filename.endswith(".proto"):
proto_stat = os.stat(full_filename)
pb2_path = full_filename.rsplit(".", 1)[0] + "_pb2.py"
try:
pb2_stat = os.stat(pb2_path)
if pb2_stat.st_mtime >= proto_stat.st_mtime:
continue
except (OSError, IOError):
pass
protos_to_compile.append(full_filename)
if protos_to_compile:
# Find the protoc compiler.
protoc = os.environ.get("PROTOC", "protoc")
try:
output = subprocess.check_output([protoc, "--version"])
except (IOError, OSError):
raise RuntimeError("Unable to launch %s protoc compiler. Please "
"set the PROTOC environment variable.", protoc)
if b"3.6.1" not in output:
raise RuntimeError("Incompatible protoc compiler detected. "
"We need 3.6.1 not %s" % output)
for proto in protos_to_compile:
command = [
protoc,
# Write the python files next to the .proto files.
"--python_out",
ROOT,
"--proto_path=%s" % ROOT,
proto
]
print(
"Compiling %s with (cwd: %s): %s" % (proto, ROOT, " ".join(command)))
# The protoc compiler is too dumb to deal with full paths - it expects a
# relative path from the current working directory.
subprocess.check_call(command, cwd=ROOT) | Make sure our protos have been compiled to python libraries. | Below is the the instruction that describes the task:
### Input:
Make sure our protos have been compiled to python libraries.
### Response:
def MakeProto():
"""Make sure our protos have been compiled to python libraries."""
# Start running from one directory above the grr directory which is found by
# this scripts's location as __file__.
cwd = os.path.dirname(os.path.abspath(__file__))
# Find all the .proto files.
protos_to_compile = []
for (root, _, files) in os.walk(cwd):
for filename in files:
full_filename = os.path.join(root, filename)
if full_filename.endswith(".proto"):
proto_stat = os.stat(full_filename)
pb2_path = full_filename.rsplit(".", 1)[0] + "_pb2.py"
try:
pb2_stat = os.stat(pb2_path)
if pb2_stat.st_mtime >= proto_stat.st_mtime:
continue
except (OSError, IOError):
pass
protos_to_compile.append(full_filename)
if protos_to_compile:
# Find the protoc compiler.
protoc = os.environ.get("PROTOC", "protoc")
try:
output = subprocess.check_output([protoc, "--version"])
except (IOError, OSError):
raise RuntimeError("Unable to launch %s protoc compiler. Please "
"set the PROTOC environment variable.", protoc)
if b"3.6.1" not in output:
raise RuntimeError("Incompatible protoc compiler detected. "
"We need 3.6.1 not %s" % output)
for proto in protos_to_compile:
command = [
protoc,
# Write the python files next to the .proto files.
"--python_out",
ROOT,
"--proto_path=%s" % ROOT,
proto
]
print(
"Compiling %s with (cwd: %s): %s" % (proto, ROOT, " ".join(command)))
# The protoc compiler is too dumb to deal with full paths - it expects a
# relative path from the current working directory.
subprocess.check_call(command, cwd=ROOT) |
def recursive_copy(source, destination):
"""A wrapper around distutils.dir_util.copy_tree but won't throw any exception when the source
directory does not exist.
Args:
source (str): source path
destination (str): destination path
"""
if os.path.isdir(source):
copy_tree(source, destination) | A wrapper around distutils.dir_util.copy_tree but won't throw any exception when the source
directory does not exist.
Args:
source (str): source path
destination (str): destination path | Below is the the instruction that describes the task:
### Input:
A wrapper around distutils.dir_util.copy_tree but won't throw any exception when the source
directory does not exist.
Args:
source (str): source path
destination (str): destination path
### Response:
def recursive_copy(source, destination):
"""A wrapper around distutils.dir_util.copy_tree but won't throw any exception when the source
directory does not exist.
Args:
source (str): source path
destination (str): destination path
"""
if os.path.isdir(source):
copy_tree(source, destination) |
def connect(db_url=None,
pooling=hgvs.global_config.uta.pooling,
application_name=None,
mode=None,
cache=None):
"""Connect to a UTA database instance and return a UTA interface instance.
:param db_url: URL for database connection
:type db_url: string
:param pooling: whether to use connection pooling (postgresql only)
:type pooling: bool
:param application_name: log application name in connection (useful for debugging; PostgreSQL only)
:type application_name: str
When called with an explicit db_url argument, that db_url is used for connecting.
When called without an explicit argument, the function default is
determined by the environment variable UTA_DB_URL if it exists, or
hgvs.datainterface.uta.public_db_url otherwise.
>>> hdp = connect()
>>> hdp.schema_version()
'1.1'
The format of the db_url is driver://user:pass@host/database/schema (the same
as that used by SQLAlchemy). Examples:
A remote public postgresql database:
postgresql://anonymous:[email protected]/uta/uta_20170707'
A local postgresql database:
postgresql://localhost/uta_dev/uta_20170707
For postgresql db_urls, pooling=True causes connect to use a
psycopg2.pool.ThreadedConnectionPool.
"""
_logger.debug('connecting to ' + str(db_url) + '...')
if db_url is None:
db_url = _get_uta_db_url()
url = _parse_url(db_url)
if url.scheme == 'sqlite':
conn = UTA_sqlite(url, mode, cache)
elif url.scheme == 'postgresql':
conn = UTA_postgresql(
url=url, pooling=pooling, application_name=application_name, mode=mode, cache=cache)
else:
# fell through connection scheme cases
raise RuntimeError("{url.scheme} in {url} is not currently supported".format(url=url))
_logger.info('connected to ' + str(db_url) + '...')
return conn | Connect to a UTA database instance and return a UTA interface instance.
:param db_url: URL for database connection
:type db_url: string
:param pooling: whether to use connection pooling (postgresql only)
:type pooling: bool
:param application_name: log application name in connection (useful for debugging; PostgreSQL only)
:type application_name: str
When called with an explicit db_url argument, that db_url is used for connecting.
When called without an explicit argument, the function default is
determined by the environment variable UTA_DB_URL if it exists, or
hgvs.datainterface.uta.public_db_url otherwise.
>>> hdp = connect()
>>> hdp.schema_version()
'1.1'
The format of the db_url is driver://user:pass@host/database/schema (the same
as that used by SQLAlchemy). Examples:
A remote public postgresql database:
postgresql://anonymous:[email protected]/uta/uta_20170707'
A local postgresql database:
postgresql://localhost/uta_dev/uta_20170707
For postgresql db_urls, pooling=True causes connect to use a
psycopg2.pool.ThreadedConnectionPool. | Below is the the instruction that describes the task:
### Input:
Connect to a UTA database instance and return a UTA interface instance.
:param db_url: URL for database connection
:type db_url: string
:param pooling: whether to use connection pooling (postgresql only)
:type pooling: bool
:param application_name: log application name in connection (useful for debugging; PostgreSQL only)
:type application_name: str
When called with an explicit db_url argument, that db_url is used for connecting.
When called without an explicit argument, the function default is
determined by the environment variable UTA_DB_URL if it exists, or
hgvs.datainterface.uta.public_db_url otherwise.
>>> hdp = connect()
>>> hdp.schema_version()
'1.1'
The format of the db_url is driver://user:pass@host/database/schema (the same
as that used by SQLAlchemy). Examples:
A remote public postgresql database:
postgresql://anonymous:[email protected]/uta/uta_20170707'
A local postgresql database:
postgresql://localhost/uta_dev/uta_20170707
For postgresql db_urls, pooling=True causes connect to use a
psycopg2.pool.ThreadedConnectionPool.
### Response:
def connect(db_url=None,
pooling=hgvs.global_config.uta.pooling,
application_name=None,
mode=None,
cache=None):
"""Connect to a UTA database instance and return a UTA interface instance.
:param db_url: URL for database connection
:type db_url: string
:param pooling: whether to use connection pooling (postgresql only)
:type pooling: bool
:param application_name: log application name in connection (useful for debugging; PostgreSQL only)
:type application_name: str
When called with an explicit db_url argument, that db_url is used for connecting.
When called without an explicit argument, the function default is
determined by the environment variable UTA_DB_URL if it exists, or
hgvs.datainterface.uta.public_db_url otherwise.
>>> hdp = connect()
>>> hdp.schema_version()
'1.1'
The format of the db_url is driver://user:pass@host/database/schema (the same
as that used by SQLAlchemy). Examples:
A remote public postgresql database:
postgresql://anonymous:[email protected]/uta/uta_20170707'
A local postgresql database:
postgresql://localhost/uta_dev/uta_20170707
For postgresql db_urls, pooling=True causes connect to use a
psycopg2.pool.ThreadedConnectionPool.
"""
_logger.debug('connecting to ' + str(db_url) + '...')
if db_url is None:
db_url = _get_uta_db_url()
url = _parse_url(db_url)
if url.scheme == 'sqlite':
conn = UTA_sqlite(url, mode, cache)
elif url.scheme == 'postgresql':
conn = UTA_postgresql(
url=url, pooling=pooling, application_name=application_name, mode=mode, cache=cache)
else:
# fell through connection scheme cases
raise RuntimeError("{url.scheme} in {url} is not currently supported".format(url=url))
_logger.info('connected to ' + str(db_url) + '...')
return conn |
def fetch(self, card_id, data={}, **kwargs):
""""
Fetch Card for given Id
Args:
card_id : Id for which card object has to be retrieved
Returns:
Card dict for given card Id
"""
return super(Card, self).fetch(card_id, data, **kwargs) | Fetch Card for given Id
Args:
card_id : Id for which card object has to be retrieved
Returns:
Card dict for given card Id | Below is the the instruction that describes the task:
### Input:
Fetch Card for given Id
Args:
card_id : Id for which card object has to be retrieved
Returns:
Card dict for given card Id
### Response:
def fetch(self, card_id, data={}, **kwargs):
""""
Fetch Card for given Id
Args:
card_id : Id for which card object has to be retrieved
Returns:
Card dict for given card Id
"""
return super(Card, self).fetch(card_id, data, **kwargs) |
def create_package(name, data, package_cls=None):
"""Create a package given package data.
Args:
name (str): Package name.
data (dict): Package data. Must conform to `package_maker.package_schema`.
Returns:
`Package` object.
"""
from rez.package_maker__ import PackageMaker
maker = PackageMaker(name, data, package_cls=package_cls)
return maker.get_package() | Create a package given package data.
Args:
name (str): Package name.
data (dict): Package data. Must conform to `package_maker.package_schema`.
Returns:
`Package` object. | Below is the the instruction that describes the task:
### Input:
Create a package given package data.
Args:
name (str): Package name.
data (dict): Package data. Must conform to `package_maker.package_schema`.
Returns:
`Package` object.
### Response:
def create_package(name, data, package_cls=None):
"""Create a package given package data.
Args:
name (str): Package name.
data (dict): Package data. Must conform to `package_maker.package_schema`.
Returns:
`Package` object.
"""
from rez.package_maker__ import PackageMaker
maker = PackageMaker(name, data, package_cls=package_cls)
return maker.get_package() |
def get_version(path="src/devpy/__init__.py"):
""" Return the version of by with regex intead of importing it"""
init_content = open(path, "rt").read()
pattern = r"^__version__ = ['\"]([^'\"]*)['\"]"
return re.search(pattern, init_content, re.M).group(1) | Return the version of by with regex intead of importing it | Below is the the instruction that describes the task:
### Input:
Return the version of by with regex intead of importing it
### Response:
def get_version(path="src/devpy/__init__.py"):
""" Return the version of by with regex intead of importing it"""
init_content = open(path, "rt").read()
pattern = r"^__version__ = ['\"]([^'\"]*)['\"]"
return re.search(pattern, init_content, re.M).group(1) |
def in_scope(self, *args, **kw):
"""
Scope this object to a parent environment (like bind but reversed.)
"""
new_self = self.copy()
new_scopes = Object.translate_to_scopes(*args, **kw)
new_self._scopes = new_self._scopes + new_scopes
return new_self | Scope this object to a parent environment (like bind but reversed.) | Below is the the instruction that describes the task:
### Input:
Scope this object to a parent environment (like bind but reversed.)
### Response:
def in_scope(self, *args, **kw):
"""
Scope this object to a parent environment (like bind but reversed.)
"""
new_self = self.copy()
new_scopes = Object.translate_to_scopes(*args, **kw)
new_self._scopes = new_self._scopes + new_scopes
return new_self |
def iter_fields(self, schema: Schema) -> Iterable[Tuple[str, Field]]:
"""
Iterate through marshmallow schema fields.
Generates: name, field pairs
"""
for name in sorted(schema.fields.keys()):
field = schema.fields[name]
yield field.dump_to or name, field | Iterate through marshmallow schema fields.
Generates: name, field pairs | Below is the the instruction that describes the task:
### Input:
Iterate through marshmallow schema fields.
Generates: name, field pairs
### Response:
def iter_fields(self, schema: Schema) -> Iterable[Tuple[str, Field]]:
"""
Iterate through marshmallow schema fields.
Generates: name, field pairs
"""
for name in sorted(schema.fields.keys()):
field = schema.fields[name]
yield field.dump_to or name, field |
def get_resource(self, uri, resource_type=None, response_format=None):
'''
Retrieve resource:
- Issues an initial GET request
- If 200, continues, 404, returns False, otherwise raises Exception
- Parse resource type
- If custom resource type parser provided, this fires
- Else, or if custom parser misses, fire HEAD request and parse LDP resource type from Link header
- Return instantiated pyfc4 resource
Args:
uri (rdflib.term.URIRef,str): input URI
resource_type (): resource class e.g. BasicContainer, NonRDFSource, or extensions thereof
response_format (str): expects mimetype / Content-Type header such as 'application/rdf+xml', 'text/turtle', etc.
Returns:
Resource
'''
# handle uri
uri = self.parse_uri(uri)
# remove fcr:metadata if included, as handled below
if uri.toPython().endswith('/fcr:metadata'):
uri = rdflib.term.URIRef(uri.toPython().rstrip('/fcr:metadata'))
# fire GET request
get_response = self.api.http_request(
'GET',
"%s/fcr:metadata" % uri,
response_format=response_format)
# 404, item does not exist, return False
if get_response.status_code == 404:
logger.debug('resource uri %s not found, returning False' % uri)
return False
# assume exists, parse headers for resource type and return instance
elif get_response.status_code == 200:
# if resource_type not provided
if not resource_type:
# if custom resource type parser affixed to repo instance, fire
if self.custom_resource_type_parser:
logger.debug("custom resource type parser provided, attempting")
resource_type = self.custom_resource_type_parser(self, uri, get_response)
# parse LDP resource type from headers if custom resource parser misses,
# or not provided
if not resource_type:
# Issue HEAD request to get LDP resource type from URI proper, not /fcr:metadata
head_response = self.api.http_request('HEAD', uri)
resource_type = self.api.parse_resource_type(head_response)
logger.debug('using resource type: %s' % resource_type)
# return resource
return resource_type(self,
uri,
response=get_response)
else:
raise Exception('HTTP %s, error retrieving resource uri %s' % (get_response.status_code, uri)) | Retrieve resource:
- Issues an initial GET request
- If 200, continues, 404, returns False, otherwise raises Exception
- Parse resource type
- If custom resource type parser provided, this fires
- Else, or if custom parser misses, fire HEAD request and parse LDP resource type from Link header
- Return instantiated pyfc4 resource
Args:
uri (rdflib.term.URIRef,str): input URI
resource_type (): resource class e.g. BasicContainer, NonRDFSource, or extensions thereof
response_format (str): expects mimetype / Content-Type header such as 'application/rdf+xml', 'text/turtle', etc.
Returns:
Resource | Below is the the instruction that describes the task:
### Input:
Retrieve resource:
- Issues an initial GET request
- If 200, continues, 404, returns False, otherwise raises Exception
- Parse resource type
- If custom resource type parser provided, this fires
- Else, or if custom parser misses, fire HEAD request and parse LDP resource type from Link header
- Return instantiated pyfc4 resource
Args:
uri (rdflib.term.URIRef,str): input URI
resource_type (): resource class e.g. BasicContainer, NonRDFSource, or extensions thereof
response_format (str): expects mimetype / Content-Type header such as 'application/rdf+xml', 'text/turtle', etc.
Returns:
Resource
### Response:
def get_resource(self, uri, resource_type=None, response_format=None):
'''
Retrieve resource:
- Issues an initial GET request
- If 200, continues, 404, returns False, otherwise raises Exception
- Parse resource type
- If custom resource type parser provided, this fires
- Else, or if custom parser misses, fire HEAD request and parse LDP resource type from Link header
- Return instantiated pyfc4 resource
Args:
uri (rdflib.term.URIRef,str): input URI
resource_type (): resource class e.g. BasicContainer, NonRDFSource, or extensions thereof
response_format (str): expects mimetype / Content-Type header such as 'application/rdf+xml', 'text/turtle', etc.
Returns:
Resource
'''
# handle uri
uri = self.parse_uri(uri)
# remove fcr:metadata if included, as handled below
if uri.toPython().endswith('/fcr:metadata'):
uri = rdflib.term.URIRef(uri.toPython().rstrip('/fcr:metadata'))
# fire GET request
get_response = self.api.http_request(
'GET',
"%s/fcr:metadata" % uri,
response_format=response_format)
# 404, item does not exist, return False
if get_response.status_code == 404:
logger.debug('resource uri %s not found, returning False' % uri)
return False
# assume exists, parse headers for resource type and return instance
elif get_response.status_code == 200:
# if resource_type not provided
if not resource_type:
# if custom resource type parser affixed to repo instance, fire
if self.custom_resource_type_parser:
logger.debug("custom resource type parser provided, attempting")
resource_type = self.custom_resource_type_parser(self, uri, get_response)
# parse LDP resource type from headers if custom resource parser misses,
# or not provided
if not resource_type:
# Issue HEAD request to get LDP resource type from URI proper, not /fcr:metadata
head_response = self.api.http_request('HEAD', uri)
resource_type = self.api.parse_resource_type(head_response)
logger.debug('using resource type: %s' % resource_type)
# return resource
return resource_type(self,
uri,
response=get_response)
else:
raise Exception('HTTP %s, error retrieving resource uri %s' % (get_response.status_code, uri)) |
def get_cherry_pick_for_ref_name(self, project, repository_id, ref_name):
"""GetCherryPickForRefName.
[Preview API] Retrieve information about a cherry pick for a specific branch.
:param str project: Project ID or project name
:param str repository_id: ID of the repository.
:param str ref_name: The GitAsyncRefOperationParameters generatedRefName used for the cherry pick operation.
:rtype: :class:`<GitCherryPick> <azure.devops.v5_1.git.models.GitCherryPick>`
"""
route_values = {}
if project is not None:
route_values['project'] = self._serialize.url('project', project, 'str')
if repository_id is not None:
route_values['repositoryId'] = self._serialize.url('repository_id', repository_id, 'str')
query_parameters = {}
if ref_name is not None:
query_parameters['refName'] = self._serialize.query('ref_name', ref_name, 'str')
response = self._send(http_method='GET',
location_id='033bad68-9a14-43d1-90e0-59cb8856fef6',
version='5.1-preview.1',
route_values=route_values,
query_parameters=query_parameters)
return self._deserialize('GitCherryPick', response) | GetCherryPickForRefName.
[Preview API] Retrieve information about a cherry pick for a specific branch.
:param str project: Project ID or project name
:param str repository_id: ID of the repository.
:param str ref_name: The GitAsyncRefOperationParameters generatedRefName used for the cherry pick operation.
:rtype: :class:`<GitCherryPick> <azure.devops.v5_1.git.models.GitCherryPick>` | Below is the the instruction that describes the task:
### Input:
GetCherryPickForRefName.
[Preview API] Retrieve information about a cherry pick for a specific branch.
:param str project: Project ID or project name
:param str repository_id: ID of the repository.
:param str ref_name: The GitAsyncRefOperationParameters generatedRefName used for the cherry pick operation.
:rtype: :class:`<GitCherryPick> <azure.devops.v5_1.git.models.GitCherryPick>`
### Response:
def get_cherry_pick_for_ref_name(self, project, repository_id, ref_name):
"""GetCherryPickForRefName.
[Preview API] Retrieve information about a cherry pick for a specific branch.
:param str project: Project ID or project name
:param str repository_id: ID of the repository.
:param str ref_name: The GitAsyncRefOperationParameters generatedRefName used for the cherry pick operation.
:rtype: :class:`<GitCherryPick> <azure.devops.v5_1.git.models.GitCherryPick>`
"""
route_values = {}
if project is not None:
route_values['project'] = self._serialize.url('project', project, 'str')
if repository_id is not None:
route_values['repositoryId'] = self._serialize.url('repository_id', repository_id, 'str')
query_parameters = {}
if ref_name is not None:
query_parameters['refName'] = self._serialize.query('ref_name', ref_name, 'str')
response = self._send(http_method='GET',
location_id='033bad68-9a14-43d1-90e0-59cb8856fef6',
version='5.1-preview.1',
route_values=route_values,
query_parameters=query_parameters)
return self._deserialize('GitCherryPick', response) |
def upload_to_mugshot(instance, filename):
"""
Uploads a mugshot for a user to the ``USERENA_MUGSHOT_PATH`` and saving it
under unique hash for the image. This is for privacy reasons so others
can't just browse through the mugshot directory.
"""
extension = filename.split('.')[-1].lower()
salt, hash = generate_sha1(instance.pk)
path = userena_settings.USERENA_MUGSHOT_PATH % {'username': instance.user.username,
'id': instance.user.id,
'date': instance.user.date_joined,
'date_now': get_datetime_now().date()}
return '%(path)s%(hash)s.%(extension)s' % {'path': path,
'hash': hash[:10],
'extension': extension} | Uploads a mugshot for a user to the ``USERENA_MUGSHOT_PATH`` and saving it
under unique hash for the image. This is for privacy reasons so others
can't just browse through the mugshot directory. | Below is the the instruction that describes the task:
### Input:
Uploads a mugshot for a user to the ``USERENA_MUGSHOT_PATH`` and saving it
under unique hash for the image. This is for privacy reasons so others
can't just browse through the mugshot directory.
### Response:
def upload_to_mugshot(instance, filename):
"""
Uploads a mugshot for a user to the ``USERENA_MUGSHOT_PATH`` and saving it
under unique hash for the image. This is for privacy reasons so others
can't just browse through the mugshot directory.
"""
extension = filename.split('.')[-1].lower()
salt, hash = generate_sha1(instance.pk)
path = userena_settings.USERENA_MUGSHOT_PATH % {'username': instance.user.username,
'id': instance.user.id,
'date': instance.user.date_joined,
'date_now': get_datetime_now().date()}
return '%(path)s%(hash)s.%(extension)s' % {'path': path,
'hash': hash[:10],
'extension': extension} |
def _expand_des_key(key):
"""
Expand the key from a 7-byte password key into a 8-byte DES key
"""
key = key[:7] + b'\0' * (7 - len(key))
byte = struct.unpack_from('BBBBBBB', key)
s = struct.pack('B', ((byte[0] >> 1) & 0x7f) << 1)
s += struct.pack("B", ((byte[0] & 0x01) << 6 | ((byte[1] >> 2) & 0x3f)) << 1)
s += struct.pack("B", ((byte[1] & 0x03) << 5 | ((byte[2] >> 3) & 0x1f)) << 1)
s += struct.pack("B", ((byte[2] & 0x07) << 4 | ((byte[3] >> 4) & 0x0f)) << 1)
s += struct.pack("B", ((byte[3] & 0x0f) << 3 | ((byte[4] >> 5) & 0x07)) << 1)
s += struct.pack("B", ((byte[4] & 0x1f) << 2 | ((byte[5] >> 6) & 0x03)) << 1)
s += struct.pack("B", ((byte[5] & 0x3f) << 1 | ((byte[6] >> 7) & 0x01)) << 1)
s += struct.pack("B", (byte[6] & 0x7f) << 1)
return s | Expand the key from a 7-byte password key into a 8-byte DES key | Below is the the instruction that describes the task:
### Input:
Expand the key from a 7-byte password key into a 8-byte DES key
### Response:
def _expand_des_key(key):
"""
Expand the key from a 7-byte password key into a 8-byte DES key
"""
key = key[:7] + b'\0' * (7 - len(key))
byte = struct.unpack_from('BBBBBBB', key)
s = struct.pack('B', ((byte[0] >> 1) & 0x7f) << 1)
s += struct.pack("B", ((byte[0] & 0x01) << 6 | ((byte[1] >> 2) & 0x3f)) << 1)
s += struct.pack("B", ((byte[1] & 0x03) << 5 | ((byte[2] >> 3) & 0x1f)) << 1)
s += struct.pack("B", ((byte[2] & 0x07) << 4 | ((byte[3] >> 4) & 0x0f)) << 1)
s += struct.pack("B", ((byte[3] & 0x0f) << 3 | ((byte[4] >> 5) & 0x07)) << 1)
s += struct.pack("B", ((byte[4] & 0x1f) << 2 | ((byte[5] >> 6) & 0x03)) << 1)
s += struct.pack("B", ((byte[5] & 0x3f) << 1 | ((byte[6] >> 7) & 0x01)) << 1)
s += struct.pack("B", (byte[6] & 0x7f) << 1)
return s |
def _run_job(self):
"""
Build a bsub argument that will run lsf_runner.py on the directory we've specified.
"""
args = []
if isinstance(self.output(), list):
log_output = os.path.split(self.output()[0].path)
else:
log_output = os.path.split(self.output().path)
args += ["bsub", "-q", self.queue_flag]
args += ["-n", str(self.n_cpu_flag)]
args += ["-M", str(self.memory_flag)]
args += ["-R", "rusage[%s]" % self.resource_flag]
args += ["-W", str(self.runtime_flag)]
if self.job_name_flag:
args += ["-J", str(self.job_name_flag)]
args += ["-o", os.path.join(log_output[0], "job.out")]
args += ["-e", os.path.join(log_output[0], "job.err")]
if self.extra_bsub_args:
args += self.extra_bsub_args.split()
# Find where the runner file is
runner_path = os.path.abspath(lsf_runner.__file__)
args += [runner_path]
args += [self.tmp_dir]
# That should do it. Let the world know what we're doing.
LOGGER.info("### LSF SUBMISSION ARGS: %s",
" ".join([str(a) for a in args]))
# Submit the job
run_job_proc = subprocess.Popen(
[str(a) for a in args],
stdin=subprocess.PIPE, stdout=subprocess.PIPE, cwd=self.tmp_dir)
output = run_job_proc.communicate()[0]
# ASSUMPTION
# The result will be of the format
# Job <123> is submitted ot queue <myqueue>
# So get the number in those first brackets.
# I cannot think of a better workaround that leaves logic on the Task side of things.
LOGGER.info("### JOB SUBMISSION OUTPUT: %s", str(output))
self.job_id = int(output.split("<")[1].split(">")[0])
LOGGER.info(
"Job %ssubmitted as job %s",
self.job_name_flag + ' ',
str(self.job_id)
)
self._track_job()
# If we want to save the job temporaries, then do so
# We'll move them to be next to the job output
if self.save_job_info:
LOGGER.info("Saving up temporary bits")
# dest_dir = self.output().path
shutil.move(self.tmp_dir, "/".join(log_output[0:-1]))
# Now delete the temporaries, if they're there.
self._finish() | Build a bsub argument that will run lsf_runner.py on the directory we've specified. | Below is the the instruction that describes the task:
### Input:
Build a bsub argument that will run lsf_runner.py on the directory we've specified.
### Response:
def _run_job(self):
"""
Build a bsub argument that will run lsf_runner.py on the directory we've specified.
"""
args = []
if isinstance(self.output(), list):
log_output = os.path.split(self.output()[0].path)
else:
log_output = os.path.split(self.output().path)
args += ["bsub", "-q", self.queue_flag]
args += ["-n", str(self.n_cpu_flag)]
args += ["-M", str(self.memory_flag)]
args += ["-R", "rusage[%s]" % self.resource_flag]
args += ["-W", str(self.runtime_flag)]
if self.job_name_flag:
args += ["-J", str(self.job_name_flag)]
args += ["-o", os.path.join(log_output[0], "job.out")]
args += ["-e", os.path.join(log_output[0], "job.err")]
if self.extra_bsub_args:
args += self.extra_bsub_args.split()
# Find where the runner file is
runner_path = os.path.abspath(lsf_runner.__file__)
args += [runner_path]
args += [self.tmp_dir]
# That should do it. Let the world know what we're doing.
LOGGER.info("### LSF SUBMISSION ARGS: %s",
" ".join([str(a) for a in args]))
# Submit the job
run_job_proc = subprocess.Popen(
[str(a) for a in args],
stdin=subprocess.PIPE, stdout=subprocess.PIPE, cwd=self.tmp_dir)
output = run_job_proc.communicate()[0]
# ASSUMPTION
# The result will be of the format
# Job <123> is submitted ot queue <myqueue>
# So get the number in those first brackets.
# I cannot think of a better workaround that leaves logic on the Task side of things.
LOGGER.info("### JOB SUBMISSION OUTPUT: %s", str(output))
self.job_id = int(output.split("<")[1].split(">")[0])
LOGGER.info(
"Job %ssubmitted as job %s",
self.job_name_flag + ' ',
str(self.job_id)
)
self._track_job()
# If we want to save the job temporaries, then do so
# We'll move them to be next to the job output
if self.save_job_info:
LOGGER.info("Saving up temporary bits")
# dest_dir = self.output().path
shutil.move(self.tmp_dir, "/".join(log_output[0:-1]))
# Now delete the temporaries, if they're there.
self._finish() |
def get_bag_info(bag_file):
'''Get uamle dict of the bag information
by calling the subprocess -- used to create correct sized
arrays'''
# Get the info on the bag
bag_info = yaml.load(subprocess.Popen(
['rosbag', 'info', '--yaml', bag_file],
stdout=subprocess.PIPE).communicate()[0])
return bag_info | Get uamle dict of the bag information
by calling the subprocess -- used to create correct sized
arrays | Below is the the instruction that describes the task:
### Input:
Get uamle dict of the bag information
by calling the subprocess -- used to create correct sized
arrays
### Response:
def get_bag_info(bag_file):
'''Get uamle dict of the bag information
by calling the subprocess -- used to create correct sized
arrays'''
# Get the info on the bag
bag_info = yaml.load(subprocess.Popen(
['rosbag', 'info', '--yaml', bag_file],
stdout=subprocess.PIPE).communicate()[0])
return bag_info |
def update_widget(self, idx=None):
"""Forces the widget at given index to be updated from the
property value. If index is not given, all controlled
widgets will be updated. This method should be called
directly by the user when the property is not observable, or
in very unusual conditions."""
if idx is None:
for w in self._widgets:
idx = self._get_idx_from_widget(w)
self._write_widget(self._read_property(idx), idx)
pass
else: self._write_widget(self._read_property(idx), idx)
return | Forces the widget at given index to be updated from the
property value. If index is not given, all controlled
widgets will be updated. This method should be called
directly by the user when the property is not observable, or
in very unusual conditions. | Below is the the instruction that describes the task:
### Input:
Forces the widget at given index to be updated from the
property value. If index is not given, all controlled
widgets will be updated. This method should be called
directly by the user when the property is not observable, or
in very unusual conditions.
### Response:
def update_widget(self, idx=None):
"""Forces the widget at given index to be updated from the
property value. If index is not given, all controlled
widgets will be updated. This method should be called
directly by the user when the property is not observable, or
in very unusual conditions."""
if idx is None:
for w in self._widgets:
idx = self._get_idx_from_widget(w)
self._write_widget(self._read_property(idx), idx)
pass
else: self._write_widget(self._read_property(idx), idx)
return |
def word2array(ft_names, word):
"""Converts `word` [[(value, feature),...],...] to a NumPy array
Given a word consisting of lists of lists/sets of (value, feature) tuples,
return a NumPy array where each row is a segment and each column is a
feature.
Args:
ft_names (list): list of feature names (as strings) in order; this
argument controls what features are included in the
array that is output and their order vis-a-vis the
columns of the array
word (list): list of lists of feature tuples (output by
FeatureTable.word_fts)
Returns:
ndarray: array in which each row is a segment and each column
is a feature
"""
vdict = {'+': 1, '-': -1, '0': 0}
def seg2col(seg):
seg = dict([(k, v) for (v, k) in seg])
return [vdict[seg[ft]] for ft in ft_names]
return numpy.array([seg2col(s) for s in word], order='F') | Converts `word` [[(value, feature),...],...] to a NumPy array
Given a word consisting of lists of lists/sets of (value, feature) tuples,
return a NumPy array where each row is a segment and each column is a
feature.
Args:
ft_names (list): list of feature names (as strings) in order; this
argument controls what features are included in the
array that is output and their order vis-a-vis the
columns of the array
word (list): list of lists of feature tuples (output by
FeatureTable.word_fts)
Returns:
ndarray: array in which each row is a segment and each column
is a feature | Below is the the instruction that describes the task:
### Input:
Converts `word` [[(value, feature),...],...] to a NumPy array
Given a word consisting of lists of lists/sets of (value, feature) tuples,
return a NumPy array where each row is a segment and each column is a
feature.
Args:
ft_names (list): list of feature names (as strings) in order; this
argument controls what features are included in the
array that is output and their order vis-a-vis the
columns of the array
word (list): list of lists of feature tuples (output by
FeatureTable.word_fts)
Returns:
ndarray: array in which each row is a segment and each column
is a feature
### Response:
def word2array(ft_names, word):
"""Converts `word` [[(value, feature),...],...] to a NumPy array
Given a word consisting of lists of lists/sets of (value, feature) tuples,
return a NumPy array where each row is a segment and each column is a
feature.
Args:
ft_names (list): list of feature names (as strings) in order; this
argument controls what features are included in the
array that is output and their order vis-a-vis the
columns of the array
word (list): list of lists of feature tuples (output by
FeatureTable.word_fts)
Returns:
ndarray: array in which each row is a segment and each column
is a feature
"""
vdict = {'+': 1, '-': -1, '0': 0}
def seg2col(seg):
seg = dict([(k, v) for (v, k) in seg])
return [vdict[seg[ft]] for ft in ft_names]
return numpy.array([seg2col(s) for s in word], order='F') |
def getLogs(self, CorpNum, MgtKeyType, MgtKey):
""" 세금계산서 문서이력 목록 확인
args
CorpNum : 회원 사업자 번호
MgtKeyType : 관리번호 유형 one of ['SELL','BUY','TRUSTEE']
MgtKey : 파트너 관리번호
return
문서이력 정보 목록 as List
raise
PopbillException
"""
if MgtKeyType not in self.__MgtKeyTypes:
raise PopbillException(-99999999, "관리번호 형태가 올바르지 않습니다.")
if MgtKey == None or MgtKey == "":
raise PopbillException(-99999999, "관리번호가 입력되지 않았습니다.")
return self._httpget('/Taxinvoice/' + MgtKeyType + "/" + MgtKey + "/Logs", CorpNum) | 세금계산서 문서이력 목록 확인
args
CorpNum : 회원 사업자 번호
MgtKeyType : 관리번호 유형 one of ['SELL','BUY','TRUSTEE']
MgtKey : 파트너 관리번호
return
문서이력 정보 목록 as List
raise
PopbillException | Below is the the instruction that describes the task:
### Input:
세금계산서 문서이력 목록 확인
args
CorpNum : 회원 사업자 번호
MgtKeyType : 관리번호 유형 one of ['SELL','BUY','TRUSTEE']
MgtKey : 파트너 관리번호
return
문서이력 정보 목록 as List
raise
PopbillException
### Response:
def getLogs(self, CorpNum, MgtKeyType, MgtKey):
""" 세금계산서 문서이력 목록 확인
args
CorpNum : 회원 사업자 번호
MgtKeyType : 관리번호 유형 one of ['SELL','BUY','TRUSTEE']
MgtKey : 파트너 관리번호
return
문서이력 정보 목록 as List
raise
PopbillException
"""
if MgtKeyType not in self.__MgtKeyTypes:
raise PopbillException(-99999999, "관리번호 형태가 올바르지 않습니다.")
if MgtKey == None or MgtKey == "":
raise PopbillException(-99999999, "관리번호가 입력되지 않았습니다.")
return self._httpget('/Taxinvoice/' + MgtKeyType + "/" + MgtKey + "/Logs", CorpNum) |
def unweave(target, *advices):
"""Unweave advices from input target."""
advices = (
advice if isinstance(advice, Advice) else Advice(advice)
for advice in advices
)
unweave(target=target, *advices) | Unweave advices from input target. | Below is the the instruction that describes the task:
### Input:
Unweave advices from input target.
### Response:
def unweave(target, *advices):
"""Unweave advices from input target."""
advices = (
advice if isinstance(advice, Advice) else Advice(advice)
for advice in advices
)
unweave(target=target, *advices) |
def _check_cols(df, col_names):
""" Raise an AttributeError if `df` does not have a column named as an item of
the list of strings `col_names`.
"""
for col in col_names:
if not hasattr(df, col):
raise AttributeError("DataFrame does not have a '{}' column, got {}.".format(col,
df.columns)) | Raise an AttributeError if `df` does not have a column named as an item of
the list of strings `col_names`. | Below is the the instruction that describes the task:
### Input:
Raise an AttributeError if `df` does not have a column named as an item of
the list of strings `col_names`.
### Response:
def _check_cols(df, col_names):
""" Raise an AttributeError if `df` does not have a column named as an item of
the list of strings `col_names`.
"""
for col in col_names:
if not hasattr(df, col):
raise AttributeError("DataFrame does not have a '{}' column, got {}.".format(col,
df.columns)) |
def points_close(a,b):
'''
points_close(a,b) yields True if points a and b are close to each other and False otherwise.
'''
(a,b) = [np.asarray(u) for u in (a,b)]
if len(a.shape) == 2 or len(b.shape) == 2: (a,b) = [np.reshape(u,(len(u),-1)) for u in (a,b)]
return np.isclose(np.sqrt(np.sum((a - b)**2, axis=0)), 0) | points_close(a,b) yields True if points a and b are close to each other and False otherwise. | Below is the the instruction that describes the task:
### Input:
points_close(a,b) yields True if points a and b are close to each other and False otherwise.
### Response:
def points_close(a,b):
'''
points_close(a,b) yields True if points a and b are close to each other and False otherwise.
'''
(a,b) = [np.asarray(u) for u in (a,b)]
if len(a.shape) == 2 or len(b.shape) == 2: (a,b) = [np.reshape(u,(len(u),-1)) for u in (a,b)]
return np.isclose(np.sqrt(np.sum((a - b)**2, axis=0)), 0) |
def gen_output_fmt_1(self, fmt):
"""given a single format specifier, get_output_fmt_1() constructs and returns
a list of tuples for matching against that specifier.
Each element of this list is a tuple
(gen_fmt, cvt_fmt, sz)
where:
gen_fmt is the Python format specifier for assembling this value into
the string constructed for output;
cvt_fmt is the Python format specifier for converting this value into
a string that will be assembled into the output string; and
sz is the width of this field.
"""
# first, remove any surrounding space
fmt = fmt.strip()
# get any leading digits indicating repetition
match = re.match("(\d+)(.+)", fmt)
if match is None:
reps = 1
else:
reps = int(match.group(1))
fmt = match.group(2)
if fmt[0] == "(": # process parenthesized format list recursively
fmt = fmt[1:-1]
fmt_list = fmt.split(",")
rexp = self.gen_output_fmt(fmt_list)
else:
if fmt[0] in "iI": # integer
sz = fmt[1:]
gen_fmt = "{}"
cvt_fmt = "{:" + str(sz) + "d}"
rexp = [(gen_fmt, cvt_fmt, int(sz))]
elif fmt[0] in "xX":
gen_fmt = " "
rexp = [(gen_fmt, None, None)]
elif fmt[0] in "eEfFgG": # various floating point formats
idx0 = fmt.find(".")
sz = fmt[1:idx0]
suffix = fmt[idx0 + 1 :]
# The 'E' and G formats can optionally specify the width of
# the exponent, e.g.: 'E15.3E2'. For now we ignore any such
# the exponent width -- but if it's there, we need to extract
# the sequence of digits before it.
m = re.match("(\d+).*", suffix)
assert m is not None, f"Improper format? '{fmt}'"
prec = m.group(1)
gen_fmt = "{}"
cvt_fmt = "{:" + sz + "." + prec + fmt[0] + "}"
rexp = [(gen_fmt, cvt_fmt, int(sz))]
elif fmt[0] in "pP": # scaling factor
# For now we ignore scaling: there are lots of other things we
# need to spend time on. To fix later if necessary.
rest_of_fmt = fmt[1:]
rexp = self.gen_output_fmt_1(rest_of_fmt)
elif fmt[0] in "'\"": # character string
sz = len(fmt) - 2 # -2 for the quote at either end
gen_fmt = fmt[1:-1]
rexp = [(gen_fmt, None, None)]
elif fmt[0] == "/": # newlines
gen_fmt = "\\n" * len(fmt)
rexp = [(gen_fmt, None, None)]
else:
raise For2PyError(
f"ERROR: Unrecognized format specifier {fmt}\n"
)
# replicate the regular expression by the repetition factor in the format
rexp *= reps
return rexp | given a single format specifier, get_output_fmt_1() constructs and returns
a list of tuples for matching against that specifier.
Each element of this list is a tuple
(gen_fmt, cvt_fmt, sz)
where:
gen_fmt is the Python format specifier for assembling this value into
the string constructed for output;
cvt_fmt is the Python format specifier for converting this value into
a string that will be assembled into the output string; and
sz is the width of this field. | Below is the the instruction that describes the task:
### Input:
given a single format specifier, get_output_fmt_1() constructs and returns
a list of tuples for matching against that specifier.
Each element of this list is a tuple
(gen_fmt, cvt_fmt, sz)
where:
gen_fmt is the Python format specifier for assembling this value into
the string constructed for output;
cvt_fmt is the Python format specifier for converting this value into
a string that will be assembled into the output string; and
sz is the width of this field.
### Response:
def gen_output_fmt_1(self, fmt):
"""given a single format specifier, get_output_fmt_1() constructs and returns
a list of tuples for matching against that specifier.
Each element of this list is a tuple
(gen_fmt, cvt_fmt, sz)
where:
gen_fmt is the Python format specifier for assembling this value into
the string constructed for output;
cvt_fmt is the Python format specifier for converting this value into
a string that will be assembled into the output string; and
sz is the width of this field.
"""
# first, remove any surrounding space
fmt = fmt.strip()
# get any leading digits indicating repetition
match = re.match("(\d+)(.+)", fmt)
if match is None:
reps = 1
else:
reps = int(match.group(1))
fmt = match.group(2)
if fmt[0] == "(": # process parenthesized format list recursively
fmt = fmt[1:-1]
fmt_list = fmt.split(",")
rexp = self.gen_output_fmt(fmt_list)
else:
if fmt[0] in "iI": # integer
sz = fmt[1:]
gen_fmt = "{}"
cvt_fmt = "{:" + str(sz) + "d}"
rexp = [(gen_fmt, cvt_fmt, int(sz))]
elif fmt[0] in "xX":
gen_fmt = " "
rexp = [(gen_fmt, None, None)]
elif fmt[0] in "eEfFgG": # various floating point formats
idx0 = fmt.find(".")
sz = fmt[1:idx0]
suffix = fmt[idx0 + 1 :]
# The 'E' and G formats can optionally specify the width of
# the exponent, e.g.: 'E15.3E2'. For now we ignore any such
# the exponent width -- but if it's there, we need to extract
# the sequence of digits before it.
m = re.match("(\d+).*", suffix)
assert m is not None, f"Improper format? '{fmt}'"
prec = m.group(1)
gen_fmt = "{}"
cvt_fmt = "{:" + sz + "." + prec + fmt[0] + "}"
rexp = [(gen_fmt, cvt_fmt, int(sz))]
elif fmt[0] in "pP": # scaling factor
# For now we ignore scaling: there are lots of other things we
# need to spend time on. To fix later if necessary.
rest_of_fmt = fmt[1:]
rexp = self.gen_output_fmt_1(rest_of_fmt)
elif fmt[0] in "'\"": # character string
sz = len(fmt) - 2 # -2 for the quote at either end
gen_fmt = fmt[1:-1]
rexp = [(gen_fmt, None, None)]
elif fmt[0] == "/": # newlines
gen_fmt = "\\n" * len(fmt)
rexp = [(gen_fmt, None, None)]
else:
raise For2PyError(
f"ERROR: Unrecognized format specifier {fmt}\n"
)
# replicate the regular expression by the repetition factor in the format
rexp *= reps
return rexp |
def WSGIHandler(self):
"""Returns GRR's WSGI handler."""
sdm = werkzeug_wsgi.SharedDataMiddleware(self, {
"/": config.CONFIG["AdminUI.document_root"],
})
# Use DispatcherMiddleware to make sure that SharedDataMiddleware is not
# used at all if the URL path doesn't start with "/static". This is a
# workaround for cases when unicode URLs are used on systems with
# non-unicode filesystems (as detected by Werkzeug). In this case
# SharedDataMiddleware may fail early while trying to convert the
# URL into the file path and not dispatch the call further to our own
# WSGI handler.
return werkzeug_wsgi.DispatcherMiddleware(self, {
"/static": sdm,
}) | Returns GRR's WSGI handler. | Below is the the instruction that describes the task:
### Input:
Returns GRR's WSGI handler.
### Response:
def WSGIHandler(self):
"""Returns GRR's WSGI handler."""
sdm = werkzeug_wsgi.SharedDataMiddleware(self, {
"/": config.CONFIG["AdminUI.document_root"],
})
# Use DispatcherMiddleware to make sure that SharedDataMiddleware is not
# used at all if the URL path doesn't start with "/static". This is a
# workaround for cases when unicode URLs are used on systems with
# non-unicode filesystems (as detected by Werkzeug). In this case
# SharedDataMiddleware may fail early while trying to convert the
# URL into the file path and not dispatch the call further to our own
# WSGI handler.
return werkzeug_wsgi.DispatcherMiddleware(self, {
"/static": sdm,
}) |
def absolute_distance(cls, q0, q1):
"""Quaternion absolute distance.
Find the distance between two quaternions accounting for the sign ambiguity.
Params:
q0: the first quaternion
q1: the second quaternion
Returns:
A positive scalar corresponding to the chord of the shortest path/arc that
connects q0 to q1.
Note:
This function does not measure the distance on the hypersphere, but
it takes into account the fact that q and -q encode the same rotation.
It is thus a good indicator for rotation similarities.
"""
q0_minus_q1 = q0 - q1
q0_plus_q1 = q0 + q1
d_minus = q0_minus_q1.norm
d_plus = q0_plus_q1.norm
if (d_minus < d_plus):
return d_minus
else:
return d_plus | Quaternion absolute distance.
Find the distance between two quaternions accounting for the sign ambiguity.
Params:
q0: the first quaternion
q1: the second quaternion
Returns:
A positive scalar corresponding to the chord of the shortest path/arc that
connects q0 to q1.
Note:
This function does not measure the distance on the hypersphere, but
it takes into account the fact that q and -q encode the same rotation.
It is thus a good indicator for rotation similarities. | Below is the the instruction that describes the task:
### Input:
Quaternion absolute distance.
Find the distance between two quaternions accounting for the sign ambiguity.
Params:
q0: the first quaternion
q1: the second quaternion
Returns:
A positive scalar corresponding to the chord of the shortest path/arc that
connects q0 to q1.
Note:
This function does not measure the distance on the hypersphere, but
it takes into account the fact that q and -q encode the same rotation.
It is thus a good indicator for rotation similarities.
### Response:
def absolute_distance(cls, q0, q1):
"""Quaternion absolute distance.
Find the distance between two quaternions accounting for the sign ambiguity.
Params:
q0: the first quaternion
q1: the second quaternion
Returns:
A positive scalar corresponding to the chord of the shortest path/arc that
connects q0 to q1.
Note:
This function does not measure the distance on the hypersphere, but
it takes into account the fact that q and -q encode the same rotation.
It is thus a good indicator for rotation similarities.
"""
q0_minus_q1 = q0 - q1
q0_plus_q1 = q0 + q1
d_minus = q0_minus_q1.norm
d_plus = q0_plus_q1.norm
if (d_minus < d_plus):
return d_minus
else:
return d_plus |
def reshape_fortran(tensor, shape):
"""The missing Fortran reshape for mx.NDArray
Parameters
----------
tensor : NDArray
source tensor
shape : NDArray
desired shape
Returns
-------
output : NDArray
reordered result
"""
return tensor.T.reshape(tuple(reversed(shape))).T | The missing Fortran reshape for mx.NDArray
Parameters
----------
tensor : NDArray
source tensor
shape : NDArray
desired shape
Returns
-------
output : NDArray
reordered result | Below is the the instruction that describes the task:
### Input:
The missing Fortran reshape for mx.NDArray
Parameters
----------
tensor : NDArray
source tensor
shape : NDArray
desired shape
Returns
-------
output : NDArray
reordered result
### Response:
def reshape_fortran(tensor, shape):
"""The missing Fortran reshape for mx.NDArray
Parameters
----------
tensor : NDArray
source tensor
shape : NDArray
desired shape
Returns
-------
output : NDArray
reordered result
"""
return tensor.T.reshape(tuple(reversed(shape))).T |
def fit(self, X, y=None):
"""
Build a trainer and run main_loop.
Parameters
----------
X : array_like
Training examples.
y : array_like, optional
Labels.
"""
from pylearn2.config import yaml_parse
from pylearn2.train import Train
# build trainer
params = self.get_params()
yaml_string = Template(self.yaml_string).substitute(params)
self.trainer = yaml_parse.load(yaml_string)
assert isinstance(self.trainer, Train)
if self.trainer.dataset is not None:
raise ValueError('Train YAML database must evaluate to None.')
self.trainer.dataset = self._get_dataset(X, y)
# update monitoring dataset(s)
if (hasattr(self.trainer.algorithm, 'monitoring_dataset') and
self.trainer.algorithm.monitoring_dataset is not None):
monitoring_dataset = self.trainer.algorithm.monitoring_dataset
if len(monitoring_dataset) == 1 and '' in monitoring_dataset:
monitoring_dataset[''] = self.trainer.dataset
else:
monitoring_dataset['train'] = self.trainer.dataset
self.trainer.algorithm._set_monitoring_dataset(monitoring_dataset)
else:
self.trainer.algorithm._set_monitoring_dataset(
self.trainer.dataset)
# run main loop
self.trainer.main_loop() | Build a trainer and run main_loop.
Parameters
----------
X : array_like
Training examples.
y : array_like, optional
Labels. | Below is the the instruction that describes the task:
### Input:
Build a trainer and run main_loop.
Parameters
----------
X : array_like
Training examples.
y : array_like, optional
Labels.
### Response:
def fit(self, X, y=None):
"""
Build a trainer and run main_loop.
Parameters
----------
X : array_like
Training examples.
y : array_like, optional
Labels.
"""
from pylearn2.config import yaml_parse
from pylearn2.train import Train
# build trainer
params = self.get_params()
yaml_string = Template(self.yaml_string).substitute(params)
self.trainer = yaml_parse.load(yaml_string)
assert isinstance(self.trainer, Train)
if self.trainer.dataset is not None:
raise ValueError('Train YAML database must evaluate to None.')
self.trainer.dataset = self._get_dataset(X, y)
# update monitoring dataset(s)
if (hasattr(self.trainer.algorithm, 'monitoring_dataset') and
self.trainer.algorithm.monitoring_dataset is not None):
monitoring_dataset = self.trainer.algorithm.monitoring_dataset
if len(monitoring_dataset) == 1 and '' in monitoring_dataset:
monitoring_dataset[''] = self.trainer.dataset
else:
monitoring_dataset['train'] = self.trainer.dataset
self.trainer.algorithm._set_monitoring_dataset(monitoring_dataset)
else:
self.trainer.algorithm._set_monitoring_dataset(
self.trainer.dataset)
# run main loop
self.trainer.main_loop() |
def associate(base_dir: str, rr_id: str, tails_hash: str) -> None:
"""
Create symbolic link to tails file named tails_hash for rev reg id rr_id.
:param rr_id: rev reg id
:param tails_hash: hash of tails file, serving as file name
"""
cd_id = rev_reg_id2cred_def_id(rr_id)
directory = join(base_dir, cd_id)
cwd = getcwd()
makedirs(directory, exist_ok=True)
chdir(directory)
symlink(tails_hash, rr_id)
chdir(cwd) | Create symbolic link to tails file named tails_hash for rev reg id rr_id.
:param rr_id: rev reg id
:param tails_hash: hash of tails file, serving as file name | Below is the the instruction that describes the task:
### Input:
Create symbolic link to tails file named tails_hash for rev reg id rr_id.
:param rr_id: rev reg id
:param tails_hash: hash of tails file, serving as file name
### Response:
def associate(base_dir: str, rr_id: str, tails_hash: str) -> None:
"""
Create symbolic link to tails file named tails_hash for rev reg id rr_id.
:param rr_id: rev reg id
:param tails_hash: hash of tails file, serving as file name
"""
cd_id = rev_reg_id2cred_def_id(rr_id)
directory = join(base_dir, cd_id)
cwd = getcwd()
makedirs(directory, exist_ok=True)
chdir(directory)
symlink(tails_hash, rr_id)
chdir(cwd) |
def sample(self, N=1):
"""Sample N trajectories from the posterior.
Note
----
Performs the forward step in case it has not been performed.
"""
if not self.filt:
self.forward()
paths = np.empty((len(self.filt), N), np.int)
paths[-1, :] = rs.multinomial(self.filt[-1], M=N)
log_trans = np.log(self.hmm.trans_mat)
for t, f in reversed(list(enumerate(self.filt[:-1]))):
for n in range(N):
probs = rs.exp_and_normalise(log_trans[:, paths[t + 1, n]] + np.log(f))
paths[t, n] = rs.multinomial_once(probs)
return paths | Sample N trajectories from the posterior.
Note
----
Performs the forward step in case it has not been performed. | Below is the the instruction that describes the task:
### Input:
Sample N trajectories from the posterior.
Note
----
Performs the forward step in case it has not been performed.
### Response:
def sample(self, N=1):
"""Sample N trajectories from the posterior.
Note
----
Performs the forward step in case it has not been performed.
"""
if not self.filt:
self.forward()
paths = np.empty((len(self.filt), N), np.int)
paths[-1, :] = rs.multinomial(self.filt[-1], M=N)
log_trans = np.log(self.hmm.trans_mat)
for t, f in reversed(list(enumerate(self.filt[:-1]))):
for n in range(N):
probs = rs.exp_and_normalise(log_trans[:, paths[t + 1, n]] + np.log(f))
paths[t, n] = rs.multinomial_once(probs)
return paths |
def model_functions(vk, model):
"""Fill the model with functions"""
def get_vk_extension_functions():
names = set()
for extension in get_extensions_filtered(vk):
for req in extension['require']:
if 'command' not in req:
continue
for command in req['command']:
cn = command['@name']
names.add(cn)
# add alias command too
for alias, n in model['alias'].items():
if n == cn:
names.add(alias)
return names
def get_count_param(command):
for param in command['param']:
if param['type'] + param.get('#text', '') == 'uint32_t*':
return param
return None
def member_has_str(name):
c = next(iter([x for x in model['constructors']
if x['name'] == name]), None)
if c and any(['char' in x['type'] for x in c['members']]):
return True
return False
def format_member(member):
type_name = member['type']
if '#text' in member:
text = member['#text'].replace('const ', '').strip()
type_name += ' ' + text
return {'name': member['name'],
'type': member['type'],
'none': member['name'] in NULL_MEMBERS,
'force_array': True if '@len' in member else False,
'to_create': False,
'has_str': member_has_str(member['type'])}
def format_return_member(member):
t = member['type']
static_count = None
if '@len' in member and '::' in member['@len']:
lens = member['@len'].split('::')
static_count = {'key': lens[0], 'value': lens[1]}
is_handle = t in get_handle_names(vk)
is_enum = t in get_enum_names(vk)
is_struct = t in get_struct_names(vk)
return {'name': member['name'],
'type': t,
'handle': is_handle,
'enum': is_enum,
'struct': is_struct,
'static_count': static_count,
'has_str': member_has_str(member['type'])}
ALLOCATE_PREFIX = ('vkCreate', 'vkGet', 'vkEnumerate', 'vkAllocate',
'vkMap', 'vkAcquire')
ALLOCATE_EXCEPTION = ('vkGetFenceStatus', 'vkGetEventStatus',
'vkGetQueryPoolResults',
'vkGetPhysicalDeviceXlibPresentationSupportKHR')
COUNT_EXCEPTION = ('vkAcquireNextImageKHR', 'vkEnumerateInstanceVersion')
model['functions'] = []
model['extension_functions'] = []
functions = [f for f in vk['registry']['commands']['command']]
extension_function_names = get_vk_extension_functions()
for function in functions:
if '@alias' in function:
continue
fname = function['proto']['name']
ftype = function['proto']['type']
if fname in CUSTOM_FUNCTIONS:
continue
if type(function['param']) is not list:
function['param'] = [function['param']]
count_param = get_count_param(function)
if fname in COUNT_EXCEPTION:
count_param = None
is_allocate = any([fname.startswith(a) for a in ALLOCATE_PREFIX])
is_count = is_allocate and count_param is not None
if fname in ALLOCATE_EXCEPTION or ftype == 'VkBool32':
is_allocate = is_count = False
members = []
for member in function['param']:
members.append(format_member(member))
return_member = None
if is_allocate:
return_member = format_return_member(function['param'][-1])
members[-1]['to_create'] = True
if is_count:
members[-2]['to_create'] = True
f = {
'name': fname,
'members': members,
'allocate': is_allocate,
'count': is_count,
'return_boolean': True if ftype == 'VkBool32' else False,
'return_result': True if ftype == 'VkResult' else False,
'return_member': return_member,
'is_extension': fname in extension_function_names
}
model['functions'].append(f) | Fill the model with functions | Below is the the instruction that describes the task:
### Input:
Fill the model with functions
### Response:
def model_functions(vk, model):
"""Fill the model with functions"""
def get_vk_extension_functions():
names = set()
for extension in get_extensions_filtered(vk):
for req in extension['require']:
if 'command' not in req:
continue
for command in req['command']:
cn = command['@name']
names.add(cn)
# add alias command too
for alias, n in model['alias'].items():
if n == cn:
names.add(alias)
return names
def get_count_param(command):
for param in command['param']:
if param['type'] + param.get('#text', '') == 'uint32_t*':
return param
return None
def member_has_str(name):
c = next(iter([x for x in model['constructors']
if x['name'] == name]), None)
if c and any(['char' in x['type'] for x in c['members']]):
return True
return False
def format_member(member):
type_name = member['type']
if '#text' in member:
text = member['#text'].replace('const ', '').strip()
type_name += ' ' + text
return {'name': member['name'],
'type': member['type'],
'none': member['name'] in NULL_MEMBERS,
'force_array': True if '@len' in member else False,
'to_create': False,
'has_str': member_has_str(member['type'])}
def format_return_member(member):
t = member['type']
static_count = None
if '@len' in member and '::' in member['@len']:
lens = member['@len'].split('::')
static_count = {'key': lens[0], 'value': lens[1]}
is_handle = t in get_handle_names(vk)
is_enum = t in get_enum_names(vk)
is_struct = t in get_struct_names(vk)
return {'name': member['name'],
'type': t,
'handle': is_handle,
'enum': is_enum,
'struct': is_struct,
'static_count': static_count,
'has_str': member_has_str(member['type'])}
ALLOCATE_PREFIX = ('vkCreate', 'vkGet', 'vkEnumerate', 'vkAllocate',
'vkMap', 'vkAcquire')
ALLOCATE_EXCEPTION = ('vkGetFenceStatus', 'vkGetEventStatus',
'vkGetQueryPoolResults',
'vkGetPhysicalDeviceXlibPresentationSupportKHR')
COUNT_EXCEPTION = ('vkAcquireNextImageKHR', 'vkEnumerateInstanceVersion')
model['functions'] = []
model['extension_functions'] = []
functions = [f for f in vk['registry']['commands']['command']]
extension_function_names = get_vk_extension_functions()
for function in functions:
if '@alias' in function:
continue
fname = function['proto']['name']
ftype = function['proto']['type']
if fname in CUSTOM_FUNCTIONS:
continue
if type(function['param']) is not list:
function['param'] = [function['param']]
count_param = get_count_param(function)
if fname in COUNT_EXCEPTION:
count_param = None
is_allocate = any([fname.startswith(a) for a in ALLOCATE_PREFIX])
is_count = is_allocate and count_param is not None
if fname in ALLOCATE_EXCEPTION or ftype == 'VkBool32':
is_allocate = is_count = False
members = []
for member in function['param']:
members.append(format_member(member))
return_member = None
if is_allocate:
return_member = format_return_member(function['param'][-1])
members[-1]['to_create'] = True
if is_count:
members[-2]['to_create'] = True
f = {
'name': fname,
'members': members,
'allocate': is_allocate,
'count': is_count,
'return_boolean': True if ftype == 'VkBool32' else False,
'return_result': True if ftype == 'VkResult' else False,
'return_member': return_member,
'is_extension': fname in extension_function_names
}
model['functions'].append(f) |
def maybe_sendraw(self, host_port: Tuple[int, int], messagedata: bytes):
""" Send message to recipient if the transport is running. """
# Don't sleep if timeout is zero, otherwise a context-switch is done
# and the message is delayed, increasing its latency
sleep_timeout = self.throttle_policy.consume(1)
if sleep_timeout:
gevent.sleep(sleep_timeout)
# Check the udp socket is still available before trying to send the
# message. There must be *no context-switches after this test*.
if hasattr(self.server, 'socket'):
self.server.sendto(
messagedata,
host_port,
) | Send message to recipient if the transport is running. | Below is the the instruction that describes the task:
### Input:
Send message to recipient if the transport is running.
### Response:
def maybe_sendraw(self, host_port: Tuple[int, int], messagedata: bytes):
""" Send message to recipient if the transport is running. """
# Don't sleep if timeout is zero, otherwise a context-switch is done
# and the message is delayed, increasing its latency
sleep_timeout = self.throttle_policy.consume(1)
if sleep_timeout:
gevent.sleep(sleep_timeout)
# Check the udp socket is still available before trying to send the
# message. There must be *no context-switches after this test*.
if hasattr(self.server, 'socket'):
self.server.sendto(
messagedata,
host_port,
) |
def D_s(self, H_0, Om0, Ode0=None):
"""
angular diameter to source
:param H_0: Hubble parameter [km/s/Mpc]
:param Om0: normalized matter density at present time
:return: float [Mpc]
"""
lensCosmo = self._get_cosom(H_0, Om0, Ode0)
return lensCosmo.D_s | angular diameter to source
:param H_0: Hubble parameter [km/s/Mpc]
:param Om0: normalized matter density at present time
:return: float [Mpc] | Below is the the instruction that describes the task:
### Input:
angular diameter to source
:param H_0: Hubble parameter [km/s/Mpc]
:param Om0: normalized matter density at present time
:return: float [Mpc]
### Response:
def D_s(self, H_0, Om0, Ode0=None):
"""
angular diameter to source
:param H_0: Hubble parameter [km/s/Mpc]
:param Om0: normalized matter density at present time
:return: float [Mpc]
"""
lensCosmo = self._get_cosom(H_0, Om0, Ode0)
return lensCosmo.D_s |
def _default_poll_callback(self, poll_resp):
"""
Checks the condition in poll response to determine if it is complete
and no subsequent poll requests should be done.
"""
if poll_resp.parsed is None:
return False
success_list = ['UpdatesComplete', True, 'COMPLETE']
status = None
if self.response_format == 'xml':
status = poll_resp.parsed.find('./Status').text
elif self.response_format == 'json':
status = poll_resp.parsed.get(
'Status', poll_resp.parsed.get('status'))
if status is None:
raise RuntimeError('Unable to get poll response status.')
return status in success_list | Checks the condition in poll response to determine if it is complete
and no subsequent poll requests should be done. | Below is the the instruction that describes the task:
### Input:
Checks the condition in poll response to determine if it is complete
and no subsequent poll requests should be done.
### Response:
def _default_poll_callback(self, poll_resp):
"""
Checks the condition in poll response to determine if it is complete
and no subsequent poll requests should be done.
"""
if poll_resp.parsed is None:
return False
success_list = ['UpdatesComplete', True, 'COMPLETE']
status = None
if self.response_format == 'xml':
status = poll_resp.parsed.find('./Status').text
elif self.response_format == 'json':
status = poll_resp.parsed.get(
'Status', poll_resp.parsed.get('status'))
if status is None:
raise RuntimeError('Unable to get poll response status.')
return status in success_list |
def sc(simulated_array, observed_array, replace_nan=None, replace_inf=None,
remove_neg=False, remove_zero=False):
"""Compute the Spectral Correlation (SC).
.. image:: /pictures/SC.png
**Range:** -π/2 ≤ SA < π/2, closer to 0 is better.
**Notes:** The spectral correlation metric measures the angle between the two vectors in
hyperspace. It indicates how well the shape of the two series match – not magnitude.
Parameters
----------
simulated_array: one dimensional ndarray
An array of simulated data from the time series.
observed_array: one dimensional ndarray
An array of observed data from the time series.
replace_nan: float, optional
If given, indicates which value to replace NaN values with in the two arrays. If None, when
a NaN value is found at the i-th position in the observed OR simulated array, the i-th value
of the observed and simulated array are removed before the computation.
replace_inf: float, optional
If given, indicates which value to replace Inf values with in the two arrays. If None, when
an inf value is found at the i-th position in the observed OR simulated array, the i-th
value of the observed and simulated array are removed before the computation.
remove_neg: boolean, optional
If True, when a negative value is found at the i-th position in the observed OR simulated
array, the i-th value of the observed AND simulated array are removed before the
computation.
remove_zero: boolean, optional
If true, when a zero value is found at the i-th position in the observed OR simulated
array, the i-th value of the observed AND simulated array are removed before the
computation.
Returns
-------
float
The Spectral Correlation value.
Examples
--------
>>> import HydroErr as he
>>> import numpy as np
>>> sim = np.array([5, 7, 9, 2, 4.5, 6.7])
>>> obs = np.array([4.7, 6, 10, 2.5, 4, 7])
>>> he.sc(sim, obs)
0.27991341383646606
References
----------
- Robila, S.A., Gershman, A., 2005. Spectral matching accuracy in processing hyperspectral
data, Signals, Circuits and Systems, 2005. ISSCS 2005. International Symposium on. IEEE,
pp. 163-166.
"""
# Treats data
simulated_array, observed_array = treat_values(
simulated_array,
observed_array,
replace_nan=replace_nan,
replace_inf=replace_inf,
remove_neg=remove_neg,
remove_zero=remove_zero
)
a = np.dot(observed_array - np.mean(observed_array), simulated_array - np.mean(simulated_array))
b = np.linalg.norm(observed_array - np.mean(observed_array))
c = np.linalg.norm(simulated_array - np.mean(simulated_array))
e = b * c
return np.arccos(a / e) | Compute the Spectral Correlation (SC).
.. image:: /pictures/SC.png
**Range:** -π/2 ≤ SA < π/2, closer to 0 is better.
**Notes:** The spectral correlation metric measures the angle between the two vectors in
hyperspace. It indicates how well the shape of the two series match – not magnitude.
Parameters
----------
simulated_array: one dimensional ndarray
An array of simulated data from the time series.
observed_array: one dimensional ndarray
An array of observed data from the time series.
replace_nan: float, optional
If given, indicates which value to replace NaN values with in the two arrays. If None, when
a NaN value is found at the i-th position in the observed OR simulated array, the i-th value
of the observed and simulated array are removed before the computation.
replace_inf: float, optional
If given, indicates which value to replace Inf values with in the two arrays. If None, when
an inf value is found at the i-th position in the observed OR simulated array, the i-th
value of the observed and simulated array are removed before the computation.
remove_neg: boolean, optional
If True, when a negative value is found at the i-th position in the observed OR simulated
array, the i-th value of the observed AND simulated array are removed before the
computation.
remove_zero: boolean, optional
If true, when a zero value is found at the i-th position in the observed OR simulated
array, the i-th value of the observed AND simulated array are removed before the
computation.
Returns
-------
float
The Spectral Correlation value.
Examples
--------
>>> import HydroErr as he
>>> import numpy as np
>>> sim = np.array([5, 7, 9, 2, 4.5, 6.7])
>>> obs = np.array([4.7, 6, 10, 2.5, 4, 7])
>>> he.sc(sim, obs)
0.27991341383646606
References
----------
- Robila, S.A., Gershman, A., 2005. Spectral matching accuracy in processing hyperspectral
data, Signals, Circuits and Systems, 2005. ISSCS 2005. International Symposium on. IEEE,
pp. 163-166. | Below is the the instruction that describes the task:
### Input:
Compute the Spectral Correlation (SC).
.. image:: /pictures/SC.png
**Range:** -π/2 ≤ SA < π/2, closer to 0 is better.
**Notes:** The spectral correlation metric measures the angle between the two vectors in
hyperspace. It indicates how well the shape of the two series match – not magnitude.
Parameters
----------
simulated_array: one dimensional ndarray
An array of simulated data from the time series.
observed_array: one dimensional ndarray
An array of observed data from the time series.
replace_nan: float, optional
If given, indicates which value to replace NaN values with in the two arrays. If None, when
a NaN value is found at the i-th position in the observed OR simulated array, the i-th value
of the observed and simulated array are removed before the computation.
replace_inf: float, optional
If given, indicates which value to replace Inf values with in the two arrays. If None, when
an inf value is found at the i-th position in the observed OR simulated array, the i-th
value of the observed and simulated array are removed before the computation.
remove_neg: boolean, optional
If True, when a negative value is found at the i-th position in the observed OR simulated
array, the i-th value of the observed AND simulated array are removed before the
computation.
remove_zero: boolean, optional
If true, when a zero value is found at the i-th position in the observed OR simulated
array, the i-th value of the observed AND simulated array are removed before the
computation.
Returns
-------
float
The Spectral Correlation value.
Examples
--------
>>> import HydroErr as he
>>> import numpy as np
>>> sim = np.array([5, 7, 9, 2, 4.5, 6.7])
>>> obs = np.array([4.7, 6, 10, 2.5, 4, 7])
>>> he.sc(sim, obs)
0.27991341383646606
References
----------
- Robila, S.A., Gershman, A., 2005. Spectral matching accuracy in processing hyperspectral
data, Signals, Circuits and Systems, 2005. ISSCS 2005. International Symposium on. IEEE,
pp. 163-166.
### Response:
def sc(simulated_array, observed_array, replace_nan=None, replace_inf=None,
remove_neg=False, remove_zero=False):
"""Compute the Spectral Correlation (SC).
.. image:: /pictures/SC.png
**Range:** -π/2 ≤ SA < π/2, closer to 0 is better.
**Notes:** The spectral correlation metric measures the angle between the two vectors in
hyperspace. It indicates how well the shape of the two series match – not magnitude.
Parameters
----------
simulated_array: one dimensional ndarray
An array of simulated data from the time series.
observed_array: one dimensional ndarray
An array of observed data from the time series.
replace_nan: float, optional
If given, indicates which value to replace NaN values with in the two arrays. If None, when
a NaN value is found at the i-th position in the observed OR simulated array, the i-th value
of the observed and simulated array are removed before the computation.
replace_inf: float, optional
If given, indicates which value to replace Inf values with in the two arrays. If None, when
an inf value is found at the i-th position in the observed OR simulated array, the i-th
value of the observed and simulated array are removed before the computation.
remove_neg: boolean, optional
If True, when a negative value is found at the i-th position in the observed OR simulated
array, the i-th value of the observed AND simulated array are removed before the
computation.
remove_zero: boolean, optional
If true, when a zero value is found at the i-th position in the observed OR simulated
array, the i-th value of the observed AND simulated array are removed before the
computation.
Returns
-------
float
The Spectral Correlation value.
Examples
--------
>>> import HydroErr as he
>>> import numpy as np
>>> sim = np.array([5, 7, 9, 2, 4.5, 6.7])
>>> obs = np.array([4.7, 6, 10, 2.5, 4, 7])
>>> he.sc(sim, obs)
0.27991341383646606
References
----------
- Robila, S.A., Gershman, A., 2005. Spectral matching accuracy in processing hyperspectral
data, Signals, Circuits and Systems, 2005. ISSCS 2005. International Symposium on. IEEE,
pp. 163-166.
"""
# Treats data
simulated_array, observed_array = treat_values(
simulated_array,
observed_array,
replace_nan=replace_nan,
replace_inf=replace_inf,
remove_neg=remove_neg,
remove_zero=remove_zero
)
a = np.dot(observed_array - np.mean(observed_array), simulated_array - np.mean(simulated_array))
b = np.linalg.norm(observed_array - np.mean(observed_array))
c = np.linalg.norm(simulated_array - np.mean(simulated_array))
e = b * c
return np.arccos(a / e) |
def tokenize(self, string):
"""Used to parce a string into tokens
This function is to take in a string and return a list of tokens
Args:
string(str): This is a string of words or a sentance to be parsed into tokens
Returns:
list: a list of tokens from the string passed in.
Notes:
Doesn't seem to parse contractions correctly for example don't
would parse as two tokens 'do' and "n't" and this seems to be not
what we would want. Maybe should be "don't" or maybe contractions
should be expanded into "do not" or "do","not". This could be
done with a contraction dictionary and some preprocessing.
"""
s = string
s = re.sub('\t', " ", s)
s = re.sub("(" + regex_separator + ")", " \g<1> ", s)
s = re.sub("([^0-9]),", "\g<1> , ", s)
s = re.sub(",([^0-9])", " , \g<1>", s)
s = re.sub("^(')", "\g<1> ", s)
s = re.sub("(" + regex_not_letter_number + ")'", "\g<1> '", s)
s = re.sub("(" + regex_clitics + ")$", " \g<1>", s)
s = re.sub("(" + regex_clitics + ")(" + regex_not_letter_number + ")", " \g<1> \g<2>", s)
words = s.strip().split()
p1 = re.compile(".*" + regex_letter_number + "\\.")
p2 = re.compile("^([A-Za-z]\\.([A-Za-z]\\.)+|[A-Z][bcdfghj-nptvxz]+\\.)$")
token_list = []
for word in words:
m1 = p1.match(word)
m2 = p2.match(word)
if m1 and word not in abbreviations_list and not m2:
token_list.append(word[0: word.find('.')])
token_list.append(word[word.find('.')])
else:
token_list.append(word)
return token_list | Used to parce a string into tokens
This function is to take in a string and return a list of tokens
Args:
string(str): This is a string of words or a sentance to be parsed into tokens
Returns:
list: a list of tokens from the string passed in.
Notes:
Doesn't seem to parse contractions correctly for example don't
would parse as two tokens 'do' and "n't" and this seems to be not
what we would want. Maybe should be "don't" or maybe contractions
should be expanded into "do not" or "do","not". This could be
done with a contraction dictionary and some preprocessing. | Below is the the instruction that describes the task:
### Input:
Used to parce a string into tokens
This function is to take in a string and return a list of tokens
Args:
string(str): This is a string of words or a sentance to be parsed into tokens
Returns:
list: a list of tokens from the string passed in.
Notes:
Doesn't seem to parse contractions correctly for example don't
would parse as two tokens 'do' and "n't" and this seems to be not
what we would want. Maybe should be "don't" or maybe contractions
should be expanded into "do not" or "do","not". This could be
done with a contraction dictionary and some preprocessing.
### Response:
def tokenize(self, string):
"""Used to parce a string into tokens
This function is to take in a string and return a list of tokens
Args:
string(str): This is a string of words or a sentance to be parsed into tokens
Returns:
list: a list of tokens from the string passed in.
Notes:
Doesn't seem to parse contractions correctly for example don't
would parse as two tokens 'do' and "n't" and this seems to be not
what we would want. Maybe should be "don't" or maybe contractions
should be expanded into "do not" or "do","not". This could be
done with a contraction dictionary and some preprocessing.
"""
s = string
s = re.sub('\t', " ", s)
s = re.sub("(" + regex_separator + ")", " \g<1> ", s)
s = re.sub("([^0-9]),", "\g<1> , ", s)
s = re.sub(",([^0-9])", " , \g<1>", s)
s = re.sub("^(')", "\g<1> ", s)
s = re.sub("(" + regex_not_letter_number + ")'", "\g<1> '", s)
s = re.sub("(" + regex_clitics + ")$", " \g<1>", s)
s = re.sub("(" + regex_clitics + ")(" + regex_not_letter_number + ")", " \g<1> \g<2>", s)
words = s.strip().split()
p1 = re.compile(".*" + regex_letter_number + "\\.")
p2 = re.compile("^([A-Za-z]\\.([A-Za-z]\\.)+|[A-Z][bcdfghj-nptvxz]+\\.)$")
token_list = []
for word in words:
m1 = p1.match(word)
m2 = p2.match(word)
if m1 and word not in abbreviations_list and not m2:
token_list.append(word[0: word.find('.')])
token_list.append(word[word.find('.')])
else:
token_list.append(word)
return token_list |
def delete(self, config_object, purge=None, recurse=False):
"""
send a delete request
XXX [more here]
"""
rest_url = config_object.href
params = []
# purge deletes the SLD from disk when a style is deleted
if purge:
params.append("purge=" + str(purge))
# recurse deletes the resource when a layer is deleted.
if recurse:
params.append("recurse=true")
if params:
rest_url = rest_url + "?" + "&".join(params)
headers = {
"Content-type": "application/xml",
"Accept": "application/xml"
}
resp = self.http_request(rest_url, method='delete', headers=headers)
if resp.status_code != 200:
raise FailedRequestError('Failed to make DELETE request: {}, {}'.format(resp.status_code, resp.text))
self._cache.clear()
# do we really need to return anything other than None?
return (resp) | send a delete request
XXX [more here] | Below is the the instruction that describes the task:
### Input:
send a delete request
XXX [more here]
### Response:
def delete(self, config_object, purge=None, recurse=False):
"""
send a delete request
XXX [more here]
"""
rest_url = config_object.href
params = []
# purge deletes the SLD from disk when a style is deleted
if purge:
params.append("purge=" + str(purge))
# recurse deletes the resource when a layer is deleted.
if recurse:
params.append("recurse=true")
if params:
rest_url = rest_url + "?" + "&".join(params)
headers = {
"Content-type": "application/xml",
"Accept": "application/xml"
}
resp = self.http_request(rest_url, method='delete', headers=headers)
if resp.status_code != 200:
raise FailedRequestError('Failed to make DELETE request: {}, {}'.format(resp.status_code, resp.text))
self._cache.clear()
# do we really need to return anything other than None?
return (resp) |
def filter_search(self, search):
"""Filter given search by the filter parameter given in request.
:param search: ElasticSearch query object
"""
builder = QueryBuilder(
self.filtering_fields,
self.filtering_map,
self
)
search, unmatched = builder.build(search, self.get_query_params())
# Ensure that no unsupported arguments were used.
for argument in self.get_always_allowed_arguments():
unmatched.pop(argument, None)
if unmatched:
msg = 'Unsupported parameter(s): {}. Please use a combination of: {}.'.format(
', '.join(unmatched),
', '.join(self.filtering_fields),
)
raise ParseError(msg)
return search | Filter given search by the filter parameter given in request.
:param search: ElasticSearch query object | Below is the the instruction that describes the task:
### Input:
Filter given search by the filter parameter given in request.
:param search: ElasticSearch query object
### Response:
def filter_search(self, search):
"""Filter given search by the filter parameter given in request.
:param search: ElasticSearch query object
"""
builder = QueryBuilder(
self.filtering_fields,
self.filtering_map,
self
)
search, unmatched = builder.build(search, self.get_query_params())
# Ensure that no unsupported arguments were used.
for argument in self.get_always_allowed_arguments():
unmatched.pop(argument, None)
if unmatched:
msg = 'Unsupported parameter(s): {}. Please use a combination of: {}.'.format(
', '.join(unmatched),
', '.join(self.filtering_fields),
)
raise ParseError(msg)
return search |
def has_public(self):
"""Whether this JWK has an asymmetric Public key."""
if self.is_symmetric:
return False
reg = JWKValuesRegistry[self._params['kty']]
for value in reg:
if reg[value].public and value in self._key:
return True | Whether this JWK has an asymmetric Public key. | Below is the the instruction that describes the task:
### Input:
Whether this JWK has an asymmetric Public key.
### Response:
def has_public(self):
"""Whether this JWK has an asymmetric Public key."""
if self.is_symmetric:
return False
reg = JWKValuesRegistry[self._params['kty']]
for value in reg:
if reg[value].public and value in self._key:
return True |
def FindByName(cls, name):
"""Find an installed VirtualTile by name.
This function searches for installed virtual tiles
using the pkg_resources entry_point `iotile.virtual_tile`.
If name is a path ending in .py, it is assumed to point to
a module on disk and loaded directly rather than using
pkg_resources.
Args:
name (str): The name of the tile to search
for.
Returns:
VirtualTile class: A virtual tile subclass that can be
instantiated to create a virtual tile.
"""
if name.endswith('.py'):
return cls.LoadFromFile(name)
reg = ComponentRegistry()
for _name, tile in reg.load_extensions('iotile.virtual_tile', name_filter=name, class_filter=VirtualTile):
return tile
raise ArgumentError("VirtualTile could not be found by name", name=name) | Find an installed VirtualTile by name.
This function searches for installed virtual tiles
using the pkg_resources entry_point `iotile.virtual_tile`.
If name is a path ending in .py, it is assumed to point to
a module on disk and loaded directly rather than using
pkg_resources.
Args:
name (str): The name of the tile to search
for.
Returns:
VirtualTile class: A virtual tile subclass that can be
instantiated to create a virtual tile. | Below is the the instruction that describes the task:
### Input:
Find an installed VirtualTile by name.
This function searches for installed virtual tiles
using the pkg_resources entry_point `iotile.virtual_tile`.
If name is a path ending in .py, it is assumed to point to
a module on disk and loaded directly rather than using
pkg_resources.
Args:
name (str): The name of the tile to search
for.
Returns:
VirtualTile class: A virtual tile subclass that can be
instantiated to create a virtual tile.
### Response:
def FindByName(cls, name):
"""Find an installed VirtualTile by name.
This function searches for installed virtual tiles
using the pkg_resources entry_point `iotile.virtual_tile`.
If name is a path ending in .py, it is assumed to point to
a module on disk and loaded directly rather than using
pkg_resources.
Args:
name (str): The name of the tile to search
for.
Returns:
VirtualTile class: A virtual tile subclass that can be
instantiated to create a virtual tile.
"""
if name.endswith('.py'):
return cls.LoadFromFile(name)
reg = ComponentRegistry()
for _name, tile in reg.load_extensions('iotile.virtual_tile', name_filter=name, class_filter=VirtualTile):
return tile
raise ArgumentError("VirtualTile could not be found by name", name=name) |
def proc_line_coordinate(self, line):
"""Extracts data from columns in ATOM/HETATM record."""
pdb_atom_col_dict = global_settings['ampal']['pdb_atom_col_dict']
at_type = line[0:6].strip() # 0
at_ser = int(line[6:11].strip()) # 1
at_name = line[12:16].strip() # 2
alt_loc = line[16].strip() # 3
res_name = line[17:20].strip() # 4
chain_id = line[21].strip() # 5
res_seq = int(line[22:26].strip()) # 6
i_code = line[26].strip() # 7
x = float(line[30:38].strip()) # 8
y = float(line[38:46].strip()) # 9
z = float(line[46:54].strip()) # 10
occupancy = float(line[54:60].strip()) # 11
temp_factor = float(line[60:66].strip()) # 12
element = line[76:78].strip() # 13
charge = line[78:80].strip() # 14
if at_name not in pdb_atom_col_dict:
pdb_atom_col_dict[at_name] = line[12:16]
pdb_col_e = PDBColFormat(atom_name=at_name, atom_col=line[12:16])
ampal_data_session.add(pdb_col_e)
self.new_labels = True
return (at_type, at_ser, at_name, alt_loc, res_name, chain_id, res_seq,
i_code, x, y, z, occupancy, temp_factor, element, charge) | Extracts data from columns in ATOM/HETATM record. | Below is the the instruction that describes the task:
### Input:
Extracts data from columns in ATOM/HETATM record.
### Response:
def proc_line_coordinate(self, line):
"""Extracts data from columns in ATOM/HETATM record."""
pdb_atom_col_dict = global_settings['ampal']['pdb_atom_col_dict']
at_type = line[0:6].strip() # 0
at_ser = int(line[6:11].strip()) # 1
at_name = line[12:16].strip() # 2
alt_loc = line[16].strip() # 3
res_name = line[17:20].strip() # 4
chain_id = line[21].strip() # 5
res_seq = int(line[22:26].strip()) # 6
i_code = line[26].strip() # 7
x = float(line[30:38].strip()) # 8
y = float(line[38:46].strip()) # 9
z = float(line[46:54].strip()) # 10
occupancy = float(line[54:60].strip()) # 11
temp_factor = float(line[60:66].strip()) # 12
element = line[76:78].strip() # 13
charge = line[78:80].strip() # 14
if at_name not in pdb_atom_col_dict:
pdb_atom_col_dict[at_name] = line[12:16]
pdb_col_e = PDBColFormat(atom_name=at_name, atom_col=line[12:16])
ampal_data_session.add(pdb_col_e)
self.new_labels = True
return (at_type, at_ser, at_name, alt_loc, res_name, chain_id, res_seq,
i_code, x, y, z, occupancy, temp_factor, element, charge) |
def write(self,
fout=None,
fmt=SPARSE,
schema_only=False,
data_only=False):
"""
Write an arff structure to a string.
"""
assert not (schema_only and data_only), 'Make up your mind.'
assert fmt in FORMATS, 'Invalid format "%s". Should be one of: %s' % (fmt, ', '.join(FORMATS))
close = False
if fout is None:
close = True
fout = StringIO()
if not data_only:
print('% ' + re.sub("\n", "\n% ", '\n'.join(self.comment)), file=fout)
print("@relation " + self.relation, file=fout)
self.write_attributes(fout=fout)
if not schema_only:
print("@data", file=fout)
for d in self.data:
line_str = self.write_line(d, fmt=fmt)
if line_str:
print(line_str, file=fout)
if isinstance(fout, StringIO) and close:
return fout.getvalue() | Write an arff structure to a string. | Below is the the instruction that describes the task:
### Input:
Write an arff structure to a string.
### Response:
def write(self,
fout=None,
fmt=SPARSE,
schema_only=False,
data_only=False):
"""
Write an arff structure to a string.
"""
assert not (schema_only and data_only), 'Make up your mind.'
assert fmt in FORMATS, 'Invalid format "%s". Should be one of: %s' % (fmt, ', '.join(FORMATS))
close = False
if fout is None:
close = True
fout = StringIO()
if not data_only:
print('% ' + re.sub("\n", "\n% ", '\n'.join(self.comment)), file=fout)
print("@relation " + self.relation, file=fout)
self.write_attributes(fout=fout)
if not schema_only:
print("@data", file=fout)
for d in self.data:
line_str = self.write_line(d, fmt=fmt)
if line_str:
print(line_str, file=fout)
if isinstance(fout, StringIO) and close:
return fout.getvalue() |
def create_hit(
self,
title,
description,
keywords,
reward,
duration_hours,
lifetime_days,
ad_url,
notification_url,
approve_requirement,
max_assignments,
us_only,
blacklist=None,
annotation=None,
):
"""Create the actual HIT and return a dict with its useful properties."""
frame_height = 600
mturk_question = self._external_question(ad_url, frame_height)
qualifications = self.build_hit_qualifications(
approve_requirement, us_only, blacklist
)
# We need a HIT_Type in order to register for REST notifications
hit_type_id = self.register_hit_type(
title, description, reward, duration_hours, keywords, qualifications
)
self.set_rest_notification(notification_url, hit_type_id)
params = {
"HITTypeId": hit_type_id,
"Question": mturk_question,
"LifetimeInSeconds": int(
datetime.timedelta(days=lifetime_days).total_seconds()
),
"MaxAssignments": max_assignments,
"UniqueRequestToken": self._request_token(),
}
if annotation:
params["RequesterAnnotation"] = annotation
response = self.mturk.create_hit_with_hit_type(**params)
if "HIT" not in response:
raise MTurkServiceException("HIT request was invalid for unknown reason.")
return self._translate_hit(response["HIT"]) | Create the actual HIT and return a dict with its useful properties. | Below is the the instruction that describes the task:
### Input:
Create the actual HIT and return a dict with its useful properties.
### Response:
def create_hit(
self,
title,
description,
keywords,
reward,
duration_hours,
lifetime_days,
ad_url,
notification_url,
approve_requirement,
max_assignments,
us_only,
blacklist=None,
annotation=None,
):
"""Create the actual HIT and return a dict with its useful properties."""
frame_height = 600
mturk_question = self._external_question(ad_url, frame_height)
qualifications = self.build_hit_qualifications(
approve_requirement, us_only, blacklist
)
# We need a HIT_Type in order to register for REST notifications
hit_type_id = self.register_hit_type(
title, description, reward, duration_hours, keywords, qualifications
)
self.set_rest_notification(notification_url, hit_type_id)
params = {
"HITTypeId": hit_type_id,
"Question": mturk_question,
"LifetimeInSeconds": int(
datetime.timedelta(days=lifetime_days).total_seconds()
),
"MaxAssignments": max_assignments,
"UniqueRequestToken": self._request_token(),
}
if annotation:
params["RequesterAnnotation"] = annotation
response = self.mturk.create_hit_with_hit_type(**params)
if "HIT" not in response:
raise MTurkServiceException("HIT request was invalid for unknown reason.")
return self._translate_hit(response["HIT"]) |
def send_request(self, method, url, headers=None,
json_data=None, retry=True):
"""Send requests to Skybell."""
if not self.cache(CONST.ACCESS_TOKEN) and url != CONST.LOGIN_URL:
self.login()
if not headers:
headers = {}
if self.cache(CONST.ACCESS_TOKEN):
headers['Authorization'] = 'Bearer ' + \
self.cache(CONST.ACCESS_TOKEN)
headers['user-agent'] = (
'SkyBell/3.4.1 (iPhone9,2; iOS 11.0; loc=en_US; lang=en-US) '
'com.skybell.doorbell/1')
headers['content-type'] = 'application/json'
headers['accepts'] = '*/*'
headers['x-skybell-app-id'] = self.cache(CONST.APP_ID)
headers['x-skybell-client-id'] = self.cache(CONST.CLIENT_ID)
_LOGGER.debug("HTTP %s %s Request with headers: %s",
method, url, headers)
try:
response = getattr(self._session, method)(
url, headers=headers, json=json_data)
_LOGGER.debug("%s %s", response, response.text)
if response and response.status_code < 400:
return response
except RequestException as exc:
_LOGGER.warning("Skybell request exception: %s", exc)
if retry:
self.login()
return self.send_request(method, url, headers, json_data, False)
raise SkybellException(ERROR.REQUEST, "Retry failed") | Send requests to Skybell. | Below is the the instruction that describes the task:
### Input:
Send requests to Skybell.
### Response:
def send_request(self, method, url, headers=None,
json_data=None, retry=True):
"""Send requests to Skybell."""
if not self.cache(CONST.ACCESS_TOKEN) and url != CONST.LOGIN_URL:
self.login()
if not headers:
headers = {}
if self.cache(CONST.ACCESS_TOKEN):
headers['Authorization'] = 'Bearer ' + \
self.cache(CONST.ACCESS_TOKEN)
headers['user-agent'] = (
'SkyBell/3.4.1 (iPhone9,2; iOS 11.0; loc=en_US; lang=en-US) '
'com.skybell.doorbell/1')
headers['content-type'] = 'application/json'
headers['accepts'] = '*/*'
headers['x-skybell-app-id'] = self.cache(CONST.APP_ID)
headers['x-skybell-client-id'] = self.cache(CONST.CLIENT_ID)
_LOGGER.debug("HTTP %s %s Request with headers: %s",
method, url, headers)
try:
response = getattr(self._session, method)(
url, headers=headers, json=json_data)
_LOGGER.debug("%s %s", response, response.text)
if response and response.status_code < 400:
return response
except RequestException as exc:
_LOGGER.warning("Skybell request exception: %s", exc)
if retry:
self.login()
return self.send_request(method, url, headers, json_data, False)
raise SkybellException(ERROR.REQUEST, "Retry failed") |
def add_warning(self, key, *args, **kwargs):
"""Add a warning to be triggered when the specified key is read
Parameters
----------
key : any hashable object
The key
"""
self._deprecations[key] = (args, kwargs) | Add a warning to be triggered when the specified key is read
Parameters
----------
key : any hashable object
The key | Below is the the instruction that describes the task:
### Input:
Add a warning to be triggered when the specified key is read
Parameters
----------
key : any hashable object
The key
### Response:
def add_warning(self, key, *args, **kwargs):
"""Add a warning to be triggered when the specified key is read
Parameters
----------
key : any hashable object
The key
"""
self._deprecations[key] = (args, kwargs) |
def get_columns(self, *, top=None, skip=None):
"""
Return the columns of this table
:param int top: specify n columns to retrieve
:param int skip: specify n columns to skip
"""
url = self.build_url(self._endpoints.get('get_columns'))
params = {}
if top is not None:
params['$top'] = top
if skip is not None:
params['$skip'] = skip
params = None if not params else params
response = self.session.get(url, params=params)
if not response:
return iter(())
data = response.json()
return (self.column_constructor(parent=self, **{self._cloud_data_key: column})
for column in data.get('value', [])) | Return the columns of this table
:param int top: specify n columns to retrieve
:param int skip: specify n columns to skip | Below is the the instruction that describes the task:
### Input:
Return the columns of this table
:param int top: specify n columns to retrieve
:param int skip: specify n columns to skip
### Response:
def get_columns(self, *, top=None, skip=None):
"""
Return the columns of this table
:param int top: specify n columns to retrieve
:param int skip: specify n columns to skip
"""
url = self.build_url(self._endpoints.get('get_columns'))
params = {}
if top is not None:
params['$top'] = top
if skip is not None:
params['$skip'] = skip
params = None if not params else params
response = self.session.get(url, params=params)
if not response:
return iter(())
data = response.json()
return (self.column_constructor(parent=self, **{self._cloud_data_key: column})
for column in data.get('value', [])) |
def sp_to_rshares(self, sp, voting_power=10000, vote_pct=10000):
""" Obtain the r-shares
:param number sp: Steem Power
:param int voting_power: voting power (100% = 10000)
:param int vote_pct: voting participation (100% = 10000)
"""
vesting_shares = int(self.sp_to_vests(sp) * 1e6)
used_power = int((voting_power * vote_pct) / 10000);
max_vote_denom = self.vote_power_reserve_rate * (5 * 60 * 60 * 24) / (60 * 60 * 24);
used_power = int((used_power + max_vote_denom - 1) / max_vote_denom)
rshares = ((vesting_shares * used_power) / 10000)
return rshares | Obtain the r-shares
:param number sp: Steem Power
:param int voting_power: voting power (100% = 10000)
:param int vote_pct: voting participation (100% = 10000) | Below is the the instruction that describes the task:
### Input:
Obtain the r-shares
:param number sp: Steem Power
:param int voting_power: voting power (100% = 10000)
:param int vote_pct: voting participation (100% = 10000)
### Response:
def sp_to_rshares(self, sp, voting_power=10000, vote_pct=10000):
""" Obtain the r-shares
:param number sp: Steem Power
:param int voting_power: voting power (100% = 10000)
:param int vote_pct: voting participation (100% = 10000)
"""
vesting_shares = int(self.sp_to_vests(sp) * 1e6)
used_power = int((voting_power * vote_pct) / 10000);
max_vote_denom = self.vote_power_reserve_rate * (5 * 60 * 60 * 24) / (60 * 60 * 24);
used_power = int((used_power + max_vote_denom - 1) / max_vote_denom)
rshares = ((vesting_shares * used_power) / 10000)
return rshares |
def nan_circstd(samples, high=2.0*np.pi, low=0.0, axis=None):
"""NaN insensitive version of scipy's circular standard deviation routine
Parameters
-----------
samples : array_like
Input array
low : float or int
Lower boundary for circular standard deviation range (default=0)
high: float or int
Upper boundary for circular standard deviation range (default=2 pi)
axis : int or NoneType
Axis along which standard deviations are computed. The default is to
compute the standard deviation of the flattened array
Returns
--------
circstd : float
Circular standard deviation
"""
samples = np.asarray(samples)
samples = samples[~np.isnan(samples)]
if samples.size == 0:
return np.nan
# Ensure the samples are in radians
ang = (samples - low) * 2.0 * np.pi / (high - low)
# Calculate the means of the sine and cosine, as well as the length
# of their unit vector
smean = np.sin(ang).mean(axis=axis)
cmean = np.cos(ang).mean(axis=axis)
rmean = np.sqrt(smean**2 + cmean**2)
# Calculate the circular standard deviation
circstd = (high - low) * np.sqrt(-2.0 * np.log(rmean)) / (2.0 * np.pi)
return circstd | NaN insensitive version of scipy's circular standard deviation routine
Parameters
-----------
samples : array_like
Input array
low : float or int
Lower boundary for circular standard deviation range (default=0)
high: float or int
Upper boundary for circular standard deviation range (default=2 pi)
axis : int or NoneType
Axis along which standard deviations are computed. The default is to
compute the standard deviation of the flattened array
Returns
--------
circstd : float
Circular standard deviation | Below is the the instruction that describes the task:
### Input:
NaN insensitive version of scipy's circular standard deviation routine
Parameters
-----------
samples : array_like
Input array
low : float or int
Lower boundary for circular standard deviation range (default=0)
high: float or int
Upper boundary for circular standard deviation range (default=2 pi)
axis : int or NoneType
Axis along which standard deviations are computed. The default is to
compute the standard deviation of the flattened array
Returns
--------
circstd : float
Circular standard deviation
### Response:
def nan_circstd(samples, high=2.0*np.pi, low=0.0, axis=None):
"""NaN insensitive version of scipy's circular standard deviation routine
Parameters
-----------
samples : array_like
Input array
low : float or int
Lower boundary for circular standard deviation range (default=0)
high: float or int
Upper boundary for circular standard deviation range (default=2 pi)
axis : int or NoneType
Axis along which standard deviations are computed. The default is to
compute the standard deviation of the flattened array
Returns
--------
circstd : float
Circular standard deviation
"""
samples = np.asarray(samples)
samples = samples[~np.isnan(samples)]
if samples.size == 0:
return np.nan
# Ensure the samples are in radians
ang = (samples - low) * 2.0 * np.pi / (high - low)
# Calculate the means of the sine and cosine, as well as the length
# of their unit vector
smean = np.sin(ang).mean(axis=axis)
cmean = np.cos(ang).mean(axis=axis)
rmean = np.sqrt(smean**2 + cmean**2)
# Calculate the circular standard deviation
circstd = (high - low) * np.sqrt(-2.0 * np.log(rmean)) / (2.0 * np.pi)
return circstd |
def update_external_account(resource_root, account):
"""
Update an external account
@param resource_root: The root Resource object.
@param account: Account to update, account name must be specified.
@return: An ApiExternalAccount object, representing the updated external account
"""
return call(resource_root.put,
EXTERNAL_ACCOUNT_PATH % ("update",),
ApiExternalAccount, False, data=account) | Update an external account
@param resource_root: The root Resource object.
@param account: Account to update, account name must be specified.
@return: An ApiExternalAccount object, representing the updated external account | Below is the the instruction that describes the task:
### Input:
Update an external account
@param resource_root: The root Resource object.
@param account: Account to update, account name must be specified.
@return: An ApiExternalAccount object, representing the updated external account
### Response:
def update_external_account(resource_root, account):
"""
Update an external account
@param resource_root: The root Resource object.
@param account: Account to update, account name must be specified.
@return: An ApiExternalAccount object, representing the updated external account
"""
return call(resource_root.put,
EXTERNAL_ACCOUNT_PATH % ("update",),
ApiExternalAccount, False, data=account) |
def _service_by_name(name):
'''
Return the service info for a service by label, filename or path
'''
services = _available_services()
name = name.lower()
if name in services:
# Match on label
return services[name]
for service in six.itervalues(services):
if service['file_path'].lower() == name:
# Match on full path
return service
basename, ext = os.path.splitext(service['filename'])
if basename.lower() == name:
# Match on basename
return service
return False | Return the service info for a service by label, filename or path | Below is the the instruction that describes the task:
### Input:
Return the service info for a service by label, filename or path
### Response:
def _service_by_name(name):
'''
Return the service info for a service by label, filename or path
'''
services = _available_services()
name = name.lower()
if name in services:
# Match on label
return services[name]
for service in six.itervalues(services):
if service['file_path'].lower() == name:
# Match on full path
return service
basename, ext = os.path.splitext(service['filename'])
if basename.lower() == name:
# Match on basename
return service
return False |
def day_counts(index):
"""Days between DatetimeIndex values as a :any:`pandas.Series`.
Parameters
----------
index : :any:`pandas.DatetimeIndex`
The index for which to get day counts.
Returns
-------
day_counts : :any:`pandas.Series`
A :any:`pandas.Series` with counts of days between periods. Counts are
given on start dates of periods.
"""
# dont affect the original data
index = index.copy()
if len(index) == 0:
return pd.Series([], index=index)
timedeltas = (index[1:] - index[:-1]).append(pd.TimedeltaIndex([pd.NaT]))
timedelta_days = timedeltas.total_seconds() / (60 * 60 * 24)
return pd.Series(timedelta_days, index=index) | Days between DatetimeIndex values as a :any:`pandas.Series`.
Parameters
----------
index : :any:`pandas.DatetimeIndex`
The index for which to get day counts.
Returns
-------
day_counts : :any:`pandas.Series`
A :any:`pandas.Series` with counts of days between periods. Counts are
given on start dates of periods. | Below is the the instruction that describes the task:
### Input:
Days between DatetimeIndex values as a :any:`pandas.Series`.
Parameters
----------
index : :any:`pandas.DatetimeIndex`
The index for which to get day counts.
Returns
-------
day_counts : :any:`pandas.Series`
A :any:`pandas.Series` with counts of days between periods. Counts are
given on start dates of periods.
### Response:
def day_counts(index):
"""Days between DatetimeIndex values as a :any:`pandas.Series`.
Parameters
----------
index : :any:`pandas.DatetimeIndex`
The index for which to get day counts.
Returns
-------
day_counts : :any:`pandas.Series`
A :any:`pandas.Series` with counts of days between periods. Counts are
given on start dates of periods.
"""
# dont affect the original data
index = index.copy()
if len(index) == 0:
return pd.Series([], index=index)
timedeltas = (index[1:] - index[:-1]).append(pd.TimedeltaIndex([pd.NaT]))
timedelta_days = timedeltas.total_seconds() / (60 * 60 * 24)
return pd.Series(timedelta_days, index=index) |
def RZToplanarPotential(RZPot):
"""
NAME:
RZToplanarPotential
PURPOSE:
convert an RZPotential to a planarPotential in the mid-plane (z=0)
INPUT:
RZPot - RZPotential instance or list of such instances (existing planarPotential instances are just copied to the output)
OUTPUT:
planarPotential instance(s)
HISTORY:
2010-07-13 - Written - Bovy (NYU)
"""
RZPot= flatten(RZPot)
if isinstance(RZPot,list):
out= []
for pot in RZPot:
if isinstance(pot,planarPotential) and not pot.isNonAxi:
out.append(pot)
elif isinstance(pot,Potential) and not pot.isNonAxi:
out.append(planarPotentialFromRZPotential(pot))
else:
raise PotentialError("Input to 'RZToplanarPotential' is neither an RZPotential-instance or a list of such instances")
return out
elif isinstance(RZPot,Potential) and not RZPot.isNonAxi:
return planarPotentialFromRZPotential(RZPot)
elif isinstance(RZPot,planarPotential) and not RZPot.isNonAxi:
return RZPot
else:
raise PotentialError("Input to 'RZToplanarPotential' is neither an RZPotential-instance or a list of such instances") | NAME:
RZToplanarPotential
PURPOSE:
convert an RZPotential to a planarPotential in the mid-plane (z=0)
INPUT:
RZPot - RZPotential instance or list of such instances (existing planarPotential instances are just copied to the output)
OUTPUT:
planarPotential instance(s)
HISTORY:
2010-07-13 - Written - Bovy (NYU) | Below is the the instruction that describes the task:
### Input:
NAME:
RZToplanarPotential
PURPOSE:
convert an RZPotential to a planarPotential in the mid-plane (z=0)
INPUT:
RZPot - RZPotential instance or list of such instances (existing planarPotential instances are just copied to the output)
OUTPUT:
planarPotential instance(s)
HISTORY:
2010-07-13 - Written - Bovy (NYU)
### Response:
def RZToplanarPotential(RZPot):
"""
NAME:
RZToplanarPotential
PURPOSE:
convert an RZPotential to a planarPotential in the mid-plane (z=0)
INPUT:
RZPot - RZPotential instance or list of such instances (existing planarPotential instances are just copied to the output)
OUTPUT:
planarPotential instance(s)
HISTORY:
2010-07-13 - Written - Bovy (NYU)
"""
RZPot= flatten(RZPot)
if isinstance(RZPot,list):
out= []
for pot in RZPot:
if isinstance(pot,planarPotential) and not pot.isNonAxi:
out.append(pot)
elif isinstance(pot,Potential) and not pot.isNonAxi:
out.append(planarPotentialFromRZPotential(pot))
else:
raise PotentialError("Input to 'RZToplanarPotential' is neither an RZPotential-instance or a list of such instances")
return out
elif isinstance(RZPot,Potential) and not RZPot.isNonAxi:
return planarPotentialFromRZPotential(RZPot)
elif isinstance(RZPot,planarPotential) and not RZPot.isNonAxi:
return RZPot
else:
raise PotentialError("Input to 'RZToplanarPotential' is neither an RZPotential-instance or a list of such instances") |
def _get_block_transactions(
self,
transaction_root: Hash32,
transaction_class: Type['BaseTransaction']) -> Iterable['BaseTransaction']:
"""
Memoizable version of `get_block_transactions`
"""
for encoded_transaction in self._get_block_transaction_data(self.db, transaction_root):
yield rlp.decode(encoded_transaction, sedes=transaction_class) | Memoizable version of `get_block_transactions` | Below is the the instruction that describes the task:
### Input:
Memoizable version of `get_block_transactions`
### Response:
def _get_block_transactions(
self,
transaction_root: Hash32,
transaction_class: Type['BaseTransaction']) -> Iterable['BaseTransaction']:
"""
Memoizable version of `get_block_transactions`
"""
for encoded_transaction in self._get_block_transaction_data(self.db, transaction_root):
yield rlp.decode(encoded_transaction, sedes=transaction_class) |
def decode(name, byts, **opts):
'''
Decode the given byts with the named decoder.
If name is a comma separated list of decoders,
loop through and do them all.
Example:
byts = s_encoding.decode('base64',byts)
Note: Decoder names may also be prefixed with +
to *encode* for that name/layer.
'''
for name in name.split(','):
if name.startswith('+'):
byts = encode(name[1:], byts, **opts)
continue
func = decoders.get(name)
if func is None:
raise s_exc.NoSuchDecoder(name=name)
byts = func(byts, **opts)
return byts | Decode the given byts with the named decoder.
If name is a comma separated list of decoders,
loop through and do them all.
Example:
byts = s_encoding.decode('base64',byts)
Note: Decoder names may also be prefixed with +
to *encode* for that name/layer. | Below is the the instruction that describes the task:
### Input:
Decode the given byts with the named decoder.
If name is a comma separated list of decoders,
loop through and do them all.
Example:
byts = s_encoding.decode('base64',byts)
Note: Decoder names may also be prefixed with +
to *encode* for that name/layer.
### Response:
def decode(name, byts, **opts):
'''
Decode the given byts with the named decoder.
If name is a comma separated list of decoders,
loop through and do them all.
Example:
byts = s_encoding.decode('base64',byts)
Note: Decoder names may also be prefixed with +
to *encode* for that name/layer.
'''
for name in name.split(','):
if name.startswith('+'):
byts = encode(name[1:], byts, **opts)
continue
func = decoders.get(name)
if func is None:
raise s_exc.NoSuchDecoder(name=name)
byts = func(byts, **opts)
return byts |
def get_qrcode_url(self, ticket, data=None):
"""
通过 ticket 换取二维码地址
详情请参考
https://iot.weixin.qq.com/wiki/new/index.html?page=3-4-4
:param ticket: 二维码 ticket
:param data: 额外数据
:return: 二维码地址
"""
url = 'https://we.qq.com/d/{ticket}'.format(ticket=ticket)
if data:
if isinstance(data, (dict, tuple, list)):
data = urllib.urlencode(data)
data = to_text(base64.b64encode(to_binary(data)))
url = '{base}#{data}'.format(base=url, data=data)
return url | 通过 ticket 换取二维码地址
详情请参考
https://iot.weixin.qq.com/wiki/new/index.html?page=3-4-4
:param ticket: 二维码 ticket
:param data: 额外数据
:return: 二维码地址 | Below is the the instruction that describes the task:
### Input:
通过 ticket 换取二维码地址
详情请参考
https://iot.weixin.qq.com/wiki/new/index.html?page=3-4-4
:param ticket: 二维码 ticket
:param data: 额外数据
:return: 二维码地址
### Response:
def get_qrcode_url(self, ticket, data=None):
"""
通过 ticket 换取二维码地址
详情请参考
https://iot.weixin.qq.com/wiki/new/index.html?page=3-4-4
:param ticket: 二维码 ticket
:param data: 额外数据
:return: 二维码地址
"""
url = 'https://we.qq.com/d/{ticket}'.format(ticket=ticket)
if data:
if isinstance(data, (dict, tuple, list)):
data = urllib.urlencode(data)
data = to_text(base64.b64encode(to_binary(data)))
url = '{base}#{data}'.format(base=url, data=data)
return url |
def do_classdesc(self, parent=None, ident=0):
"""
Handles a TC_CLASSDESC opcode
:param parent:
:param ident: Log indentation level
:return: A JavaClass object
"""
# TC_CLASSDESC className serialVersionUID newHandle classDescInfo
# classDescInfo:
# classDescFlags fields classAnnotation superClassDesc
# classDescFlags:
# (byte) // Defined in Terminal Symbols and Constants
# fields:
# (short)<count> fieldDesc[count]
# fieldDesc:
# primitiveDesc
# objectDesc
# primitiveDesc:
# prim_typecode fieldName
# objectDesc:
# obj_typecode fieldName className1
clazz = JavaClass()
log_debug("[classdesc]", ident)
class_name = self._readString()
clazz.name = class_name
log_debug("Class name: %s" % class_name, ident)
# serialVersionUID is a Java (signed) long => 8 bytes
serialVersionUID, classDescFlags = self._readStruct(">qB")
clazz.serialVersionUID = serialVersionUID
clazz.flags = classDescFlags
self._add_reference(clazz, ident)
log_debug(
"Serial: 0x{0:X} / {0:d} - classDescFlags: 0x{1:X} {2}".format(
serialVersionUID, classDescFlags, OpCodeDebug.flags(classDescFlags)
),
ident,
)
(length,) = self._readStruct(">H")
log_debug("Fields num: 0x{0:X}".format(length), ident)
clazz.fields_names = []
clazz.fields_types = []
for fieldId in range(length):
(typecode,) = self._readStruct(">B")
field_name = self._readString()
field_type = self._convert_char_to_type(typecode)
log_debug("> Reading field {0}".format(field_name), ident)
if field_type == self.TYPE_ARRAY:
_, field_type = self._read_and_exec_opcode(
ident=ident + 1, expect=(self.TC_STRING, self.TC_REFERENCE)
)
if type(field_type) is not JavaString:
raise AssertionError(
"Field type must be a JavaString, "
"not {0}".format(type(field_type))
)
elif field_type == self.TYPE_OBJECT:
_, field_type = self._read_and_exec_opcode(
ident=ident + 1, expect=(self.TC_STRING, self.TC_REFERENCE)
)
if type(field_type) is JavaClass:
# FIXME: ugly trick
field_type = JavaString(field_type.name)
if type(field_type) is not JavaString:
raise AssertionError(
"Field type must be a JavaString, "
"not {0}".format(type(field_type))
)
log_debug(
"< FieldName: 0x{0:X} Name:{1} Type:{2} ID:{3}".format(
typecode, field_name, field_type, fieldId
),
ident,
)
assert field_name is not None
assert field_type is not None
clazz.fields_names.append(field_name)
clazz.fields_types.append(field_type)
if parent:
parent.__fields = clazz.fields_names
parent.__types = clazz.fields_types
# classAnnotation
(opid,) = self._readStruct(">B")
log_debug(
"OpCode: 0x{0:X} -- {1} (classAnnotation)".format(
opid, OpCodeDebug.op_id(opid)
),
ident,
)
if opid != self.TC_ENDBLOCKDATA:
raise NotImplementedError("classAnnotation isn't implemented yet")
# superClassDesc
log_debug("Reading Super Class of {0}".format(clazz.name), ident)
_, superclassdesc = self._read_and_exec_opcode(
ident=ident + 1, expect=(self.TC_CLASSDESC, self.TC_NULL, self.TC_REFERENCE)
)
log_debug(
"Super Class for {0}: {1}".format(clazz.name, str(superclassdesc)), ident
)
clazz.superclass = superclassdesc
return clazz | Handles a TC_CLASSDESC opcode
:param parent:
:param ident: Log indentation level
:return: A JavaClass object | Below is the the instruction that describes the task:
### Input:
Handles a TC_CLASSDESC opcode
:param parent:
:param ident: Log indentation level
:return: A JavaClass object
### Response:
def do_classdesc(self, parent=None, ident=0):
"""
Handles a TC_CLASSDESC opcode
:param parent:
:param ident: Log indentation level
:return: A JavaClass object
"""
# TC_CLASSDESC className serialVersionUID newHandle classDescInfo
# classDescInfo:
# classDescFlags fields classAnnotation superClassDesc
# classDescFlags:
# (byte) // Defined in Terminal Symbols and Constants
# fields:
# (short)<count> fieldDesc[count]
# fieldDesc:
# primitiveDesc
# objectDesc
# primitiveDesc:
# prim_typecode fieldName
# objectDesc:
# obj_typecode fieldName className1
clazz = JavaClass()
log_debug("[classdesc]", ident)
class_name = self._readString()
clazz.name = class_name
log_debug("Class name: %s" % class_name, ident)
# serialVersionUID is a Java (signed) long => 8 bytes
serialVersionUID, classDescFlags = self._readStruct(">qB")
clazz.serialVersionUID = serialVersionUID
clazz.flags = classDescFlags
self._add_reference(clazz, ident)
log_debug(
"Serial: 0x{0:X} / {0:d} - classDescFlags: 0x{1:X} {2}".format(
serialVersionUID, classDescFlags, OpCodeDebug.flags(classDescFlags)
),
ident,
)
(length,) = self._readStruct(">H")
log_debug("Fields num: 0x{0:X}".format(length), ident)
clazz.fields_names = []
clazz.fields_types = []
for fieldId in range(length):
(typecode,) = self._readStruct(">B")
field_name = self._readString()
field_type = self._convert_char_to_type(typecode)
log_debug("> Reading field {0}".format(field_name), ident)
if field_type == self.TYPE_ARRAY:
_, field_type = self._read_and_exec_opcode(
ident=ident + 1, expect=(self.TC_STRING, self.TC_REFERENCE)
)
if type(field_type) is not JavaString:
raise AssertionError(
"Field type must be a JavaString, "
"not {0}".format(type(field_type))
)
elif field_type == self.TYPE_OBJECT:
_, field_type = self._read_and_exec_opcode(
ident=ident + 1, expect=(self.TC_STRING, self.TC_REFERENCE)
)
if type(field_type) is JavaClass:
# FIXME: ugly trick
field_type = JavaString(field_type.name)
if type(field_type) is not JavaString:
raise AssertionError(
"Field type must be a JavaString, "
"not {0}".format(type(field_type))
)
log_debug(
"< FieldName: 0x{0:X} Name:{1} Type:{2} ID:{3}".format(
typecode, field_name, field_type, fieldId
),
ident,
)
assert field_name is not None
assert field_type is not None
clazz.fields_names.append(field_name)
clazz.fields_types.append(field_type)
if parent:
parent.__fields = clazz.fields_names
parent.__types = clazz.fields_types
# classAnnotation
(opid,) = self._readStruct(">B")
log_debug(
"OpCode: 0x{0:X} -- {1} (classAnnotation)".format(
opid, OpCodeDebug.op_id(opid)
),
ident,
)
if opid != self.TC_ENDBLOCKDATA:
raise NotImplementedError("classAnnotation isn't implemented yet")
# superClassDesc
log_debug("Reading Super Class of {0}".format(clazz.name), ident)
_, superclassdesc = self._read_and_exec_opcode(
ident=ident + 1, expect=(self.TC_CLASSDESC, self.TC_NULL, self.TC_REFERENCE)
)
log_debug(
"Super Class for {0}: {1}".format(clazz.name, str(superclassdesc)), ident
)
clazz.superclass = superclassdesc
return clazz |
def f_get_default(self, name, default=None, fast_access=True, with_links=True,
shortcuts=True, max_depth=None, auto_load=False):
""" Similar to `f_get`, but returns the default value if `name` is not found in the
trajectory.
This function uses the `f_get` method and will return the default value
in case `f_get` raises an AttributeError or a DataNotInStorageError.
Other errors are not handled.
In contrast to `f_get`, fast access is True by default.
"""
try:
return self.f_get(name, fast_access=fast_access,
shortcuts=shortcuts,
max_depth=max_depth,
auto_load=auto_load,
with_links=with_links)
except (AttributeError, pex.DataNotInStorageError):
return default | Similar to `f_get`, but returns the default value if `name` is not found in the
trajectory.
This function uses the `f_get` method and will return the default value
in case `f_get` raises an AttributeError or a DataNotInStorageError.
Other errors are not handled.
In contrast to `f_get`, fast access is True by default. | Below is the the instruction that describes the task:
### Input:
Similar to `f_get`, but returns the default value if `name` is not found in the
trajectory.
This function uses the `f_get` method and will return the default value
in case `f_get` raises an AttributeError or a DataNotInStorageError.
Other errors are not handled.
In contrast to `f_get`, fast access is True by default.
### Response:
def f_get_default(self, name, default=None, fast_access=True, with_links=True,
shortcuts=True, max_depth=None, auto_load=False):
""" Similar to `f_get`, but returns the default value if `name` is not found in the
trajectory.
This function uses the `f_get` method and will return the default value
in case `f_get` raises an AttributeError or a DataNotInStorageError.
Other errors are not handled.
In contrast to `f_get`, fast access is True by default.
"""
try:
return self.f_get(name, fast_access=fast_access,
shortcuts=shortcuts,
max_depth=max_depth,
auto_load=auto_load,
with_links=with_links)
except (AttributeError, pex.DataNotInStorageError):
return default |
def unquote(str):
"""Remove quotes from a string."""
if len(str) > 1:
if str.startswith('"') and str.endswith('"'):
return str[1:-1].replace('\\\\', '\\').replace('\\"', '"')
if str.startswith('<') and str.endswith('>'):
return str[1:-1]
return str | Remove quotes from a string. | Below is the the instruction that describes the task:
### Input:
Remove quotes from a string.
### Response:
def unquote(str):
"""Remove quotes from a string."""
if len(str) > 1:
if str.startswith('"') and str.endswith('"'):
return str[1:-1].replace('\\\\', '\\').replace('\\"', '"')
if str.startswith('<') and str.endswith('>'):
return str[1:-1]
return str |
def _proxy(self):
"""
Generate an instance context for the instance, the context is capable of
performing various actions. All instance actions are proxied to the context
:returns: SyncListPermissionContext for this SyncListPermissionInstance
:rtype: twilio.rest.sync.v1.service.sync_list.sync_list_permission.SyncListPermissionContext
"""
if self._context is None:
self._context = SyncListPermissionContext(
self._version,
service_sid=self._solution['service_sid'],
list_sid=self._solution['list_sid'],
identity=self._solution['identity'],
)
return self._context | Generate an instance context for the instance, the context is capable of
performing various actions. All instance actions are proxied to the context
:returns: SyncListPermissionContext for this SyncListPermissionInstance
:rtype: twilio.rest.sync.v1.service.sync_list.sync_list_permission.SyncListPermissionContext | Below is the the instruction that describes the task:
### Input:
Generate an instance context for the instance, the context is capable of
performing various actions. All instance actions are proxied to the context
:returns: SyncListPermissionContext for this SyncListPermissionInstance
:rtype: twilio.rest.sync.v1.service.sync_list.sync_list_permission.SyncListPermissionContext
### Response:
def _proxy(self):
"""
Generate an instance context for the instance, the context is capable of
performing various actions. All instance actions are proxied to the context
:returns: SyncListPermissionContext for this SyncListPermissionInstance
:rtype: twilio.rest.sync.v1.service.sync_list.sync_list_permission.SyncListPermissionContext
"""
if self._context is None:
self._context = SyncListPermissionContext(
self._version,
service_sid=self._solution['service_sid'],
list_sid=self._solution['list_sid'],
identity=self._solution['identity'],
)
return self._context |
def optimize_var(self):
""" Optimize the VAR model's hyperparameters (such as regularization).
Returns
-------
self : Workspace
The Workspace object.
Raises
------
RuntimeError
If the :class:`Workspace` instance does not contain source activations.
"""
if self.activations_ is None:
raise RuntimeError("VAR fitting requires source activations (run do_mvarica first)")
self.var_.optimize(self.activations_[self.trial_mask_, :, :])
return self | Optimize the VAR model's hyperparameters (such as regularization).
Returns
-------
self : Workspace
The Workspace object.
Raises
------
RuntimeError
If the :class:`Workspace` instance does not contain source activations. | Below is the the instruction that describes the task:
### Input:
Optimize the VAR model's hyperparameters (such as regularization).
Returns
-------
self : Workspace
The Workspace object.
Raises
------
RuntimeError
If the :class:`Workspace` instance does not contain source activations.
### Response:
def optimize_var(self):
""" Optimize the VAR model's hyperparameters (such as regularization).
Returns
-------
self : Workspace
The Workspace object.
Raises
------
RuntimeError
If the :class:`Workspace` instance does not contain source activations.
"""
if self.activations_ is None:
raise RuntimeError("VAR fitting requires source activations (run do_mvarica first)")
self.var_.optimize(self.activations_[self.trial_mask_, :, :])
return self |
def get_jids():
'''
Return all job data from all returners
'''
ret = {}
for returner_ in __opts__[CONFIG_KEY]:
ret.update(_mminion().returners['{0}.get_jids'.format(returner_)]())
return ret | Return all job data from all returners | Below is the the instruction that describes the task:
### Input:
Return all job data from all returners
### Response:
def get_jids():
'''
Return all job data from all returners
'''
ret = {}
for returner_ in __opts__[CONFIG_KEY]:
ret.update(_mminion().returners['{0}.get_jids'.format(returner_)]())
return ret |
def _trim_external_pores(self, shape):
r'''
'''
# Find all pores within the domain
Ps = topotools.isoutside(coords=self['pore.coords'], shape=shape)
self['pore.external'] = False
self['pore.external'][Ps] = True
# Find which internal pores are delaunay
Ps = (~self['pore.external'])*self['pore.delaunay']
# Find all pores connected to an internal delaunay pore
Ps = self.find_neighbor_pores(pores=Ps, include_input=True)
# Mark them all as keepers
self['pore.keep'] = False
self['pore.keep'][Ps] = True
# Trim all bad pores
topotools.trim(network=self, pores=~self['pore.keep'])
# Now label boundary pores
self['pore.boundary'] = False
self['pore.boundary'] = self['pore.delaunay']*self['pore.external']
# Label Voronoi pores on boundary
Ps = self.find_neighbor_pores(pores=self.pores('boundary'))
Ps = self['pore.voronoi']*self.tomask(pores=Ps)
self['pore.boundary'][Ps] = True
# Label Voronoi and interconnect throats on boundary
self['throat.boundary'] = False
Ps = self.pores('boundary')
Ts = self.find_neighbor_throats(pores=Ps, mode='xnor')
self['throat.boundary'][Ts] = True
# Trim throats between Delaunay boundary pores
Ps = self.pores(labels=['boundary', 'delaunay'], mode='xnor')
Ts = self.find_neighbor_throats(pores=Ps, mode='xnor')
topotools.trim(network=self, throats=Ts)
# Move Delaunay boundary pores to centroid of Voronoi facet
Ps = self.pores(labels=['boundary', 'delaunay'], mode='xnor')
for P in Ps:
Ns = self.find_neighbor_pores(pores=P)
Ns = Ps = self['pore.voronoi']*self.tomask(pores=Ns)
coords = sp.mean(self['pore.coords'][Ns], axis=0)
self['pore.coords'][P] = coords
self['pore.internal'] = ~self['pore.boundary']
Ps = self.pores('internal')
Ts = self.find_neighbor_throats(pores=Ps, mode='xnor')
self['throat.internal'] = False
self['throat.internal'][Ts] = True
# Label surface pores and throats between boundary and internal
Ts = self.throats(['boundary', 'internal'], mode='not')
self['throat.surface'] = False
self['throat.surface'][Ts] = True
surf_pores = self['throat.conns'][Ts].flatten()
surf_pores = sp.unique(surf_pores[~self['pore.boundary'][surf_pores]])
self['pore.surface'] = False
self['pore.surface'][surf_pores] = True
# Clean-up
del self['pore.external']
del self['pore.keep'] | r''' | Below is the the instruction that describes the task:
### Input:
r'''
### Response:
def _trim_external_pores(self, shape):
r'''
'''
# Find all pores within the domain
Ps = topotools.isoutside(coords=self['pore.coords'], shape=shape)
self['pore.external'] = False
self['pore.external'][Ps] = True
# Find which internal pores are delaunay
Ps = (~self['pore.external'])*self['pore.delaunay']
# Find all pores connected to an internal delaunay pore
Ps = self.find_neighbor_pores(pores=Ps, include_input=True)
# Mark them all as keepers
self['pore.keep'] = False
self['pore.keep'][Ps] = True
# Trim all bad pores
topotools.trim(network=self, pores=~self['pore.keep'])
# Now label boundary pores
self['pore.boundary'] = False
self['pore.boundary'] = self['pore.delaunay']*self['pore.external']
# Label Voronoi pores on boundary
Ps = self.find_neighbor_pores(pores=self.pores('boundary'))
Ps = self['pore.voronoi']*self.tomask(pores=Ps)
self['pore.boundary'][Ps] = True
# Label Voronoi and interconnect throats on boundary
self['throat.boundary'] = False
Ps = self.pores('boundary')
Ts = self.find_neighbor_throats(pores=Ps, mode='xnor')
self['throat.boundary'][Ts] = True
# Trim throats between Delaunay boundary pores
Ps = self.pores(labels=['boundary', 'delaunay'], mode='xnor')
Ts = self.find_neighbor_throats(pores=Ps, mode='xnor')
topotools.trim(network=self, throats=Ts)
# Move Delaunay boundary pores to centroid of Voronoi facet
Ps = self.pores(labels=['boundary', 'delaunay'], mode='xnor')
for P in Ps:
Ns = self.find_neighbor_pores(pores=P)
Ns = Ps = self['pore.voronoi']*self.tomask(pores=Ns)
coords = sp.mean(self['pore.coords'][Ns], axis=0)
self['pore.coords'][P] = coords
self['pore.internal'] = ~self['pore.boundary']
Ps = self.pores('internal')
Ts = self.find_neighbor_throats(pores=Ps, mode='xnor')
self['throat.internal'] = False
self['throat.internal'][Ts] = True
# Label surface pores and throats between boundary and internal
Ts = self.throats(['boundary', 'internal'], mode='not')
self['throat.surface'] = False
self['throat.surface'][Ts] = True
surf_pores = self['throat.conns'][Ts].flatten()
surf_pores = sp.unique(surf_pores[~self['pore.boundary'][surf_pores]])
self['pore.surface'] = False
self['pore.surface'][surf_pores] = True
# Clean-up
del self['pore.external']
del self['pore.keep'] |
def dft_optsize_same(im0, im1):
"""Resize 2 image same size for optimal DFT and computes it
Parameters
----------
im0: 2d array
First image
im1: 2d array
Second image
Returns
-------
dft0: 2d array
The dft of the first image
dft1: 2d array
The dft of the second image
Notes
-----
dft0 and dft1 will have the same size
"""
im0 = np.asarray(im0)
im1 = np.asarray(im1)
# save shape
shape0 = im0.shape
shape1 = im1.shape
# get optimal size
ys = max(cv2.getOptimalDFTSize(shape0[0]),
cv2.getOptimalDFTSize(shape1[0]))
xs = max(cv2.getOptimalDFTSize(shape0[1]),
cv2.getOptimalDFTSize(shape1[1]))
shape = [ys, xs]
f0 = dft_optsize(im0, shape=shape)
f1 = dft_optsize(im1, shape=shape)
return f0, f1 | Resize 2 image same size for optimal DFT and computes it
Parameters
----------
im0: 2d array
First image
im1: 2d array
Second image
Returns
-------
dft0: 2d array
The dft of the first image
dft1: 2d array
The dft of the second image
Notes
-----
dft0 and dft1 will have the same size | Below is the the instruction that describes the task:
### Input:
Resize 2 image same size for optimal DFT and computes it
Parameters
----------
im0: 2d array
First image
im1: 2d array
Second image
Returns
-------
dft0: 2d array
The dft of the first image
dft1: 2d array
The dft of the second image
Notes
-----
dft0 and dft1 will have the same size
### Response:
def dft_optsize_same(im0, im1):
"""Resize 2 image same size for optimal DFT and computes it
Parameters
----------
im0: 2d array
First image
im1: 2d array
Second image
Returns
-------
dft0: 2d array
The dft of the first image
dft1: 2d array
The dft of the second image
Notes
-----
dft0 and dft1 will have the same size
"""
im0 = np.asarray(im0)
im1 = np.asarray(im1)
# save shape
shape0 = im0.shape
shape1 = im1.shape
# get optimal size
ys = max(cv2.getOptimalDFTSize(shape0[0]),
cv2.getOptimalDFTSize(shape1[0]))
xs = max(cv2.getOptimalDFTSize(shape0[1]),
cv2.getOptimalDFTSize(shape1[1]))
shape = [ys, xs]
f0 = dft_optsize(im0, shape=shape)
f1 = dft_optsize(im1, shape=shape)
return f0, f1 |
def verify_reg(self, obj, object_type):
"""Verify a register."""
# How to verify:
# types must match
# indexes must be checked
if obj.name not in self.global_symtab:
raise QasmError('Cannot find definition for', object_type, "'"
+ obj.name + "'", 'at line', str(obj.line),
'file', obj.file)
g_sym = self.global_symtab[obj.name]
if g_sym.type != object_type:
raise QasmError("Type for '" + g_sym.name + "' should be '"
+ object_type + "' but was found to be '"
+ g_sym.type + "'", "line", str(obj.line),
"file", obj.file)
if obj.type == 'indexed_id':
bound = g_sym.index
ndx = obj.index
if ndx < 0 or ndx >= bound:
raise QasmError("Register index for '" + g_sym.name
+ "' out of bounds. Index is", str(ndx),
"bound is 0 <= index <", str(bound),
"at line", str(obj.line), "file", obj.file) | Verify a register. | Below is the the instruction that describes the task:
### Input:
Verify a register.
### Response:
def verify_reg(self, obj, object_type):
"""Verify a register."""
# How to verify:
# types must match
# indexes must be checked
if obj.name not in self.global_symtab:
raise QasmError('Cannot find definition for', object_type, "'"
+ obj.name + "'", 'at line', str(obj.line),
'file', obj.file)
g_sym = self.global_symtab[obj.name]
if g_sym.type != object_type:
raise QasmError("Type for '" + g_sym.name + "' should be '"
+ object_type + "' but was found to be '"
+ g_sym.type + "'", "line", str(obj.line),
"file", obj.file)
if obj.type == 'indexed_id':
bound = g_sym.index
ndx = obj.index
if ndx < 0 or ndx >= bound:
raise QasmError("Register index for '" + g_sym.name
+ "' out of bounds. Index is", str(ndx),
"bound is 0 <= index <", str(bound),
"at line", str(obj.line), "file", obj.file) |
def get_segment_leaderboard(self, segment_id, gender=None, age_group=None, weight_class=None,
following=None, club_id=None, timeframe=None, top_results_limit=None,
page=None, context_entries = None):
"""
Gets the leaderboard for a segment.
http://strava.github.io/api/v3/segments/#leaderboard
Note that by default Strava will return the top 10 results, and if the current user has ridden
that segment, the current user's result along with the two results above in rank and the two
results below will be included. The top X results can be configured by setting the top_results_limit
parameter; however, the other 5 results will be included if the current user has ridden that segment.
(i.e. if you specify top_results_limit=15, you will get a total of 20 entries back.)
:param segment_id: ID of the segment.
:type segment_id: int
:param gender: (optional) 'M' or 'F'
:type gender: str
:param age_group: (optional) '0_24', '25_34', '35_44', '45_54', '55_64', '65_plus'
:type age_group: str
:param weight_class: (optional) pounds '0_124', '125_149', '150_164', '165_179', '180_199', '200_plus'
or kilograms '0_54', '55_64', '65_74', '75_84', '85_94', '95_plus'
:type weight_class: str
:param following: (optional) Limit to athletes current user is following.
:type following: bool
:param club_id: (optional) limit to specific club
:type club_id: int
:param timeframe: (optional) 'this_year', 'this_month', 'this_week', 'today'
:type timeframe: str
:param top_results_limit: (optional, strava default is 10 + 5 from end) How many of leading leaderboard entries to display.
See description for why this is a little confusing.
:type top_results_limit: int
:param page: (optional, strava default is 1) Page number of leaderboard to return, sorted by highest ranking leaders
:type page: int
:param context_entries: (optional, strava default is 2, max is 15) number of entries surrounding requesting athlete to return
:type context_entries: int
:return: The SegmentLeaderboard for the specified page (default: 1)
:rtype: :class:`stravalib.model.SegmentLeaderboard`
"""
params = {}
if gender is not None:
if gender.upper() not in ('M', 'F'):
raise ValueError("Invalid gender: {0}. Possible values: 'M' or 'F'".format(gender))
params['gender'] = gender
valid_age_groups = ('0_24', '25_34', '35_44', '45_54', '55_64', '65_plus')
if age_group is not None:
if not age_group in valid_age_groups:
raise ValueError("Invalid age group: {0}. Possible values: {1!r}".format(age_group, valid_age_groups))
params['age_group'] = age_group
valid_weight_classes = ('0_124', '125_149', '150_164', '165_179', '180_199', '200_plus',
'0_54', '55_64', '65_74', '75_84', '85_94', '95_plus')
if weight_class is not None:
if not weight_class in valid_weight_classes:
raise ValueError("Invalid weight class: {0}. Possible values: {1!r}".format(weight_class, valid_weight_classes))
params['weight_class'] = weight_class
if following is not None:
params['following'] = int(following)
if club_id is not None:
params['club_id'] = club_id
if timeframe is not None:
valid_timeframes = 'this_year', 'this_month', 'this_week', 'today'
if not timeframe in valid_timeframes:
raise ValueError("Invalid timeframe: {0}. Possible values: {1!r}".format(timeframe, valid_timeframes))
params['date_range'] = timeframe
if top_results_limit is not None:
params['per_page'] = top_results_limit
if page is not None:
params['page'] = page
if context_entries is not None:
params['context_entries'] = context_entries
return model.SegmentLeaderboard.deserialize(self.protocol.get('/segments/{id}/leaderboard',
id=segment_id,
**params),
bind_client=self) | Gets the leaderboard for a segment.
http://strava.github.io/api/v3/segments/#leaderboard
Note that by default Strava will return the top 10 results, and if the current user has ridden
that segment, the current user's result along with the two results above in rank and the two
results below will be included. The top X results can be configured by setting the top_results_limit
parameter; however, the other 5 results will be included if the current user has ridden that segment.
(i.e. if you specify top_results_limit=15, you will get a total of 20 entries back.)
:param segment_id: ID of the segment.
:type segment_id: int
:param gender: (optional) 'M' or 'F'
:type gender: str
:param age_group: (optional) '0_24', '25_34', '35_44', '45_54', '55_64', '65_plus'
:type age_group: str
:param weight_class: (optional) pounds '0_124', '125_149', '150_164', '165_179', '180_199', '200_plus'
or kilograms '0_54', '55_64', '65_74', '75_84', '85_94', '95_plus'
:type weight_class: str
:param following: (optional) Limit to athletes current user is following.
:type following: bool
:param club_id: (optional) limit to specific club
:type club_id: int
:param timeframe: (optional) 'this_year', 'this_month', 'this_week', 'today'
:type timeframe: str
:param top_results_limit: (optional, strava default is 10 + 5 from end) How many of leading leaderboard entries to display.
See description for why this is a little confusing.
:type top_results_limit: int
:param page: (optional, strava default is 1) Page number of leaderboard to return, sorted by highest ranking leaders
:type page: int
:param context_entries: (optional, strava default is 2, max is 15) number of entries surrounding requesting athlete to return
:type context_entries: int
:return: The SegmentLeaderboard for the specified page (default: 1)
:rtype: :class:`stravalib.model.SegmentLeaderboard` | Below is the the instruction that describes the task:
### Input:
Gets the leaderboard for a segment.
http://strava.github.io/api/v3/segments/#leaderboard
Note that by default Strava will return the top 10 results, and if the current user has ridden
that segment, the current user's result along with the two results above in rank and the two
results below will be included. The top X results can be configured by setting the top_results_limit
parameter; however, the other 5 results will be included if the current user has ridden that segment.
(i.e. if you specify top_results_limit=15, you will get a total of 20 entries back.)
:param segment_id: ID of the segment.
:type segment_id: int
:param gender: (optional) 'M' or 'F'
:type gender: str
:param age_group: (optional) '0_24', '25_34', '35_44', '45_54', '55_64', '65_plus'
:type age_group: str
:param weight_class: (optional) pounds '0_124', '125_149', '150_164', '165_179', '180_199', '200_plus'
or kilograms '0_54', '55_64', '65_74', '75_84', '85_94', '95_plus'
:type weight_class: str
:param following: (optional) Limit to athletes current user is following.
:type following: bool
:param club_id: (optional) limit to specific club
:type club_id: int
:param timeframe: (optional) 'this_year', 'this_month', 'this_week', 'today'
:type timeframe: str
:param top_results_limit: (optional, strava default is 10 + 5 from end) How many of leading leaderboard entries to display.
See description for why this is a little confusing.
:type top_results_limit: int
:param page: (optional, strava default is 1) Page number of leaderboard to return, sorted by highest ranking leaders
:type page: int
:param context_entries: (optional, strava default is 2, max is 15) number of entries surrounding requesting athlete to return
:type context_entries: int
:return: The SegmentLeaderboard for the specified page (default: 1)
:rtype: :class:`stravalib.model.SegmentLeaderboard`
### Response:
def get_segment_leaderboard(self, segment_id, gender=None, age_group=None, weight_class=None,
following=None, club_id=None, timeframe=None, top_results_limit=None,
page=None, context_entries = None):
"""
Gets the leaderboard for a segment.
http://strava.github.io/api/v3/segments/#leaderboard
Note that by default Strava will return the top 10 results, and if the current user has ridden
that segment, the current user's result along with the two results above in rank and the two
results below will be included. The top X results can be configured by setting the top_results_limit
parameter; however, the other 5 results will be included if the current user has ridden that segment.
(i.e. if you specify top_results_limit=15, you will get a total of 20 entries back.)
:param segment_id: ID of the segment.
:type segment_id: int
:param gender: (optional) 'M' or 'F'
:type gender: str
:param age_group: (optional) '0_24', '25_34', '35_44', '45_54', '55_64', '65_plus'
:type age_group: str
:param weight_class: (optional) pounds '0_124', '125_149', '150_164', '165_179', '180_199', '200_plus'
or kilograms '0_54', '55_64', '65_74', '75_84', '85_94', '95_plus'
:type weight_class: str
:param following: (optional) Limit to athletes current user is following.
:type following: bool
:param club_id: (optional) limit to specific club
:type club_id: int
:param timeframe: (optional) 'this_year', 'this_month', 'this_week', 'today'
:type timeframe: str
:param top_results_limit: (optional, strava default is 10 + 5 from end) How many of leading leaderboard entries to display.
See description for why this is a little confusing.
:type top_results_limit: int
:param page: (optional, strava default is 1) Page number of leaderboard to return, sorted by highest ranking leaders
:type page: int
:param context_entries: (optional, strava default is 2, max is 15) number of entries surrounding requesting athlete to return
:type context_entries: int
:return: The SegmentLeaderboard for the specified page (default: 1)
:rtype: :class:`stravalib.model.SegmentLeaderboard`
"""
params = {}
if gender is not None:
if gender.upper() not in ('M', 'F'):
raise ValueError("Invalid gender: {0}. Possible values: 'M' or 'F'".format(gender))
params['gender'] = gender
valid_age_groups = ('0_24', '25_34', '35_44', '45_54', '55_64', '65_plus')
if age_group is not None:
if not age_group in valid_age_groups:
raise ValueError("Invalid age group: {0}. Possible values: {1!r}".format(age_group, valid_age_groups))
params['age_group'] = age_group
valid_weight_classes = ('0_124', '125_149', '150_164', '165_179', '180_199', '200_plus',
'0_54', '55_64', '65_74', '75_84', '85_94', '95_plus')
if weight_class is not None:
if not weight_class in valid_weight_classes:
raise ValueError("Invalid weight class: {0}. Possible values: {1!r}".format(weight_class, valid_weight_classes))
params['weight_class'] = weight_class
if following is not None:
params['following'] = int(following)
if club_id is not None:
params['club_id'] = club_id
if timeframe is not None:
valid_timeframes = 'this_year', 'this_month', 'this_week', 'today'
if not timeframe in valid_timeframes:
raise ValueError("Invalid timeframe: {0}. Possible values: {1!r}".format(timeframe, valid_timeframes))
params['date_range'] = timeframe
if top_results_limit is not None:
params['per_page'] = top_results_limit
if page is not None:
params['page'] = page
if context_entries is not None:
params['context_entries'] = context_entries
return model.SegmentLeaderboard.deserialize(self.protocol.get('/segments/{id}/leaderboard',
id=segment_id,
**params),
bind_client=self) |
def iterate_schema(fields, schema, path=None):
"""Recursively iterate over all schema sub-fields.
:param fields: Field instance (e.g. input)
:type fields: dict
:param schema: Schema instance (e.g. input_schema)
:type schema: dict
:path schema: Field path
:path schema: string
"""
for field_schema in schema:
name = field_schema['name']
if 'group' in field_schema:
for rvals in iterate_schema(fields[name] if name in fields else {},
field_schema['group'],
None if path is None else '{}.{}'.format(path, name)):
yield rvals
else:
if path is None:
yield (field_schema, fields)
else:
yield (field_schema, fields, '{}.{}'.format(path, name)) | Recursively iterate over all schema sub-fields.
:param fields: Field instance (e.g. input)
:type fields: dict
:param schema: Schema instance (e.g. input_schema)
:type schema: dict
:path schema: Field path
:path schema: string | Below is the the instruction that describes the task:
### Input:
Recursively iterate over all schema sub-fields.
:param fields: Field instance (e.g. input)
:type fields: dict
:param schema: Schema instance (e.g. input_schema)
:type schema: dict
:path schema: Field path
:path schema: string
### Response:
def iterate_schema(fields, schema, path=None):
"""Recursively iterate over all schema sub-fields.
:param fields: Field instance (e.g. input)
:type fields: dict
:param schema: Schema instance (e.g. input_schema)
:type schema: dict
:path schema: Field path
:path schema: string
"""
for field_schema in schema:
name = field_schema['name']
if 'group' in field_schema:
for rvals in iterate_schema(fields[name] if name in fields else {},
field_schema['group'],
None if path is None else '{}.{}'.format(path, name)):
yield rvals
else:
if path is None:
yield (field_schema, fields)
else:
yield (field_schema, fields, '{}.{}'.format(path, name)) |
def check_partial(func, *args, **kwargs):
"""Create a partial to be used by goodtables."""
new_func = partial(func, *args, **kwargs)
new_func.check = func.check
return new_func | Create a partial to be used by goodtables. | Below is the the instruction that describes the task:
### Input:
Create a partial to be used by goodtables.
### Response:
def check_partial(func, *args, **kwargs):
"""Create a partial to be used by goodtables."""
new_func = partial(func, *args, **kwargs)
new_func.check = func.check
return new_func |
def _replace(self, data, replacements):
"""
Given a list of 2-tuples (find, repl) this function performs all
replacements on the input and returns the result.
"""
for find, repl in replacements:
data = data.replace(find, repl)
return data | Given a list of 2-tuples (find, repl) this function performs all
replacements on the input and returns the result. | Below is the the instruction that describes the task:
### Input:
Given a list of 2-tuples (find, repl) this function performs all
replacements on the input and returns the result.
### Response:
def _replace(self, data, replacements):
"""
Given a list of 2-tuples (find, repl) this function performs all
replacements on the input and returns the result.
"""
for find, repl in replacements:
data = data.replace(find, repl)
return data |
def aggregate(self, dataset_ids=None, boundary='exact', side='left', func='mean', **dim_kwargs):
"""Create an aggregated version of the Scene.
Args:
dataset_ids (iterable): DatasetIDs to include in the returned
`Scene`. Defaults to all datasets.
func (string): Function to apply on each aggregation window. One of
'mean', 'sum', 'min', 'max', 'median', 'argmin',
'argmax', 'prod', 'std', 'var'.
'mean' is the default.
boundary: Not implemented.
side: Not implemented.
dim_kwargs: the size of the windows to aggregate.
Returns:
A new aggregated scene
See also:
xarray.DataArray.coarsen
Example:
`scn.aggregate(func='min', x=2, y=2)` will aggregate 2x2 pixels by
applying the `min` function.
"""
new_scn = self.copy(datasets=dataset_ids)
for src_area, ds_ids in new_scn.iter_by_area():
if src_area is None:
for ds_id in ds_ids:
new_scn.datasets[ds_id] = self[ds_id]
continue
if boundary != 'exact':
raise NotImplementedError("boundary modes appart from 'exact' are not implemented yet.")
target_area = src_area.aggregate(**dim_kwargs)
resolution = max(target_area.pixel_size_x, target_area.pixel_size_y)
for ds_id in ds_ids:
res = self[ds_id].coarsen(boundary=boundary, side=side, func=func, **dim_kwargs)
new_scn.datasets[ds_id] = getattr(res, func)()
new_scn.datasets[ds_id].attrs['area'] = target_area
new_scn.datasets[ds_id].attrs['resolution'] = resolution
return new_scn | Create an aggregated version of the Scene.
Args:
dataset_ids (iterable): DatasetIDs to include in the returned
`Scene`. Defaults to all datasets.
func (string): Function to apply on each aggregation window. One of
'mean', 'sum', 'min', 'max', 'median', 'argmin',
'argmax', 'prod', 'std', 'var'.
'mean' is the default.
boundary: Not implemented.
side: Not implemented.
dim_kwargs: the size of the windows to aggregate.
Returns:
A new aggregated scene
See also:
xarray.DataArray.coarsen
Example:
`scn.aggregate(func='min', x=2, y=2)` will aggregate 2x2 pixels by
applying the `min` function. | Below is the the instruction that describes the task:
### Input:
Create an aggregated version of the Scene.
Args:
dataset_ids (iterable): DatasetIDs to include in the returned
`Scene`. Defaults to all datasets.
func (string): Function to apply on each aggregation window. One of
'mean', 'sum', 'min', 'max', 'median', 'argmin',
'argmax', 'prod', 'std', 'var'.
'mean' is the default.
boundary: Not implemented.
side: Not implemented.
dim_kwargs: the size of the windows to aggregate.
Returns:
A new aggregated scene
See also:
xarray.DataArray.coarsen
Example:
`scn.aggregate(func='min', x=2, y=2)` will aggregate 2x2 pixels by
applying the `min` function.
### Response:
def aggregate(self, dataset_ids=None, boundary='exact', side='left', func='mean', **dim_kwargs):
"""Create an aggregated version of the Scene.
Args:
dataset_ids (iterable): DatasetIDs to include in the returned
`Scene`. Defaults to all datasets.
func (string): Function to apply on each aggregation window. One of
'mean', 'sum', 'min', 'max', 'median', 'argmin',
'argmax', 'prod', 'std', 'var'.
'mean' is the default.
boundary: Not implemented.
side: Not implemented.
dim_kwargs: the size of the windows to aggregate.
Returns:
A new aggregated scene
See also:
xarray.DataArray.coarsen
Example:
`scn.aggregate(func='min', x=2, y=2)` will aggregate 2x2 pixels by
applying the `min` function.
"""
new_scn = self.copy(datasets=dataset_ids)
for src_area, ds_ids in new_scn.iter_by_area():
if src_area is None:
for ds_id in ds_ids:
new_scn.datasets[ds_id] = self[ds_id]
continue
if boundary != 'exact':
raise NotImplementedError("boundary modes appart from 'exact' are not implemented yet.")
target_area = src_area.aggregate(**dim_kwargs)
resolution = max(target_area.pixel_size_x, target_area.pixel_size_y)
for ds_id in ds_ids:
res = self[ds_id].coarsen(boundary=boundary, side=side, func=func, **dim_kwargs)
new_scn.datasets[ds_id] = getattr(res, func)()
new_scn.datasets[ds_id].attrs['area'] = target_area
new_scn.datasets[ds_id].attrs['resolution'] = resolution
return new_scn |
def drop_all(self):
"""Drops all tables in the database"""
log.info('dropping tables in %s', self.engine.url)
self.session.commit()
models.Base.metadata.drop_all(self.engine)
self.session.commit() | Drops all tables in the database | Below is the the instruction that describes the task:
### Input:
Drops all tables in the database
### Response:
def drop_all(self):
"""Drops all tables in the database"""
log.info('dropping tables in %s', self.engine.url)
self.session.commit()
models.Base.metadata.drop_all(self.engine)
self.session.commit() |
def logvol_prefactor(n, p=2.):
"""
Returns the ln(volume constant) for an `n`-dimensional sphere with an
:math:`L^p` norm. The constant is defined as::
lnf = n * ln(2.) + n * LogGamma(1./p + 1) - LogGamma(n/p + 1.)
By default the `p=2.` norm is used (i.e. the standard Euclidean norm).
"""
p *= 1. # convert to float in case user inputs an integer
lnf = (n * np.log(2.) + n * special.gammaln(1./p + 1.) -
special.gammaln(n/p + 1))
return lnf | Returns the ln(volume constant) for an `n`-dimensional sphere with an
:math:`L^p` norm. The constant is defined as::
lnf = n * ln(2.) + n * LogGamma(1./p + 1) - LogGamma(n/p + 1.)
By default the `p=2.` norm is used (i.e. the standard Euclidean norm). | Below is the the instruction that describes the task:
### Input:
Returns the ln(volume constant) for an `n`-dimensional sphere with an
:math:`L^p` norm. The constant is defined as::
lnf = n * ln(2.) + n * LogGamma(1./p + 1) - LogGamma(n/p + 1.)
By default the `p=2.` norm is used (i.e. the standard Euclidean norm).
### Response:
def logvol_prefactor(n, p=2.):
"""
Returns the ln(volume constant) for an `n`-dimensional sphere with an
:math:`L^p` norm. The constant is defined as::
lnf = n * ln(2.) + n * LogGamma(1./p + 1) - LogGamma(n/p + 1.)
By default the `p=2.` norm is used (i.e. the standard Euclidean norm).
"""
p *= 1. # convert to float in case user inputs an integer
lnf = (n * np.log(2.) + n * special.gammaln(1./p + 1.) -
special.gammaln(n/p + 1))
return lnf |
def list_boards(self):
"""Return a list with all the supported boards"""
# Print table
click.echo('\nSupported boards:\n')
BOARDLIST_TPL = ('{board:25} {fpga:20} {type:<5} {size:<5} {pack:<10}')
terminal_width, _ = click.get_terminal_size()
click.echo('-' * terminal_width)
click.echo(BOARDLIST_TPL.format(
board=click.style('Board', fg='cyan'), fpga='FPGA', type='Type',
size='Size', pack='Pack'))
click.echo('-' * terminal_width)
for board in self.boards:
fpga = self.boards.get(board).get('fpga')
click.echo(BOARDLIST_TPL.format(
board=click.style(board, fg='cyan'),
fpga=fpga,
type=self.fpgas.get(fpga).get('type'),
size=self.fpgas.get(fpga).get('size'),
pack=self.fpgas.get(fpga).get('pack')))
click.secho(BOARDS_MSG, fg='green') | Return a list with all the supported boards | Below is the the instruction that describes the task:
### Input:
Return a list with all the supported boards
### Response:
def list_boards(self):
"""Return a list with all the supported boards"""
# Print table
click.echo('\nSupported boards:\n')
BOARDLIST_TPL = ('{board:25} {fpga:20} {type:<5} {size:<5} {pack:<10}')
terminal_width, _ = click.get_terminal_size()
click.echo('-' * terminal_width)
click.echo(BOARDLIST_TPL.format(
board=click.style('Board', fg='cyan'), fpga='FPGA', type='Type',
size='Size', pack='Pack'))
click.echo('-' * terminal_width)
for board in self.boards:
fpga = self.boards.get(board).get('fpga')
click.echo(BOARDLIST_TPL.format(
board=click.style(board, fg='cyan'),
fpga=fpga,
type=self.fpgas.get(fpga).get('type'),
size=self.fpgas.get(fpga).get('size'),
pack=self.fpgas.get(fpga).get('pack')))
click.secho(BOARDS_MSG, fg='green') |
def transform_sequence(f):
"""
A decorator to take a function operating on a point and
turn it into a function returning a callable operating on a sequence.
The functions passed to this decorator must define a kwarg called "point",
or have point be the last positional argument
"""
@wraps(f)
def wrapper(*args, **kwargs):
#The arguments here are the arguments passed to the transform,
#ie, there will be no "point" argument
#Send a function to seq.map_points with all of its arguments applied except
#point
return lambda seq: seq.map_points(partial(f, *args, **kwargs))
return wrapper | A decorator to take a function operating on a point and
turn it into a function returning a callable operating on a sequence.
The functions passed to this decorator must define a kwarg called "point",
or have point be the last positional argument | Below is the the instruction that describes the task:
### Input:
A decorator to take a function operating on a point and
turn it into a function returning a callable operating on a sequence.
The functions passed to this decorator must define a kwarg called "point",
or have point be the last positional argument
### Response:
def transform_sequence(f):
"""
A decorator to take a function operating on a point and
turn it into a function returning a callable operating on a sequence.
The functions passed to this decorator must define a kwarg called "point",
or have point be the last positional argument
"""
@wraps(f)
def wrapper(*args, **kwargs):
#The arguments here are the arguments passed to the transform,
#ie, there will be no "point" argument
#Send a function to seq.map_points with all of its arguments applied except
#point
return lambda seq: seq.map_points(partial(f, *args, **kwargs))
return wrapper |
def save_form(self, form):
"""
Save a valid form. If there is a parent attribute,
this will make sure that the parent object is added
to the saved object. Either as a relationship before
saving or in the case of many to many relations after
saving. Any forced instance values are set as well.
Returns the saved object.
"""
# Add any force_instance_values
force = self.get_force_instance_values()
if force:
for k, v in force.items():
setattr(form.instance, k, v)
# Are we adding to an attr or manager
should_add = False
if self.parent_object:
m2ms = [f.name for f in form.instance._meta.many_to_many]
m2ms.extend(
[f.field.rel.related_name for f in
[
f for f in form.instance._meta.get_fields(include_hidden=True)
if f.many_to_many and f.auto_created
]
]
)
if self.parent_field in m2ms:
should_add = True
else:
try:
form.instance._meta.get_field(self.parent_field)
setattr(form.instance, self.parent_field,
self.parent_object)
except FieldDoesNotExist:
pass
obj = form.save()
# Do we need to add this to a m2m
if should_add:
getattr(obj, self.parent_field).add(self.parent_object)
return obj | Save a valid form. If there is a parent attribute,
this will make sure that the parent object is added
to the saved object. Either as a relationship before
saving or in the case of many to many relations after
saving. Any forced instance values are set as well.
Returns the saved object. | Below is the the instruction that describes the task:
### Input:
Save a valid form. If there is a parent attribute,
this will make sure that the parent object is added
to the saved object. Either as a relationship before
saving or in the case of many to many relations after
saving. Any forced instance values are set as well.
Returns the saved object.
### Response:
def save_form(self, form):
"""
Save a valid form. If there is a parent attribute,
this will make sure that the parent object is added
to the saved object. Either as a relationship before
saving or in the case of many to many relations after
saving. Any forced instance values are set as well.
Returns the saved object.
"""
# Add any force_instance_values
force = self.get_force_instance_values()
if force:
for k, v in force.items():
setattr(form.instance, k, v)
# Are we adding to an attr or manager
should_add = False
if self.parent_object:
m2ms = [f.name for f in form.instance._meta.many_to_many]
m2ms.extend(
[f.field.rel.related_name for f in
[
f for f in form.instance._meta.get_fields(include_hidden=True)
if f.many_to_many and f.auto_created
]
]
)
if self.parent_field in m2ms:
should_add = True
else:
try:
form.instance._meta.get_field(self.parent_field)
setattr(form.instance, self.parent_field,
self.parent_object)
except FieldDoesNotExist:
pass
obj = form.save()
# Do we need to add this to a m2m
if should_add:
getattr(obj, self.parent_field).add(self.parent_object)
return obj |
def real_value(value, digit):
"""
function to calculate the real value
we need to devide the value by the digit
e.g.
value = 100
digit = 2
return: "1.0"
"""
return str(float(value) / math.pow(10, float(digit))) | function to calculate the real value
we need to devide the value by the digit
e.g.
value = 100
digit = 2
return: "1.0" | Below is the the instruction that describes the task:
### Input:
function to calculate the real value
we need to devide the value by the digit
e.g.
value = 100
digit = 2
return: "1.0"
### Response:
def real_value(value, digit):
"""
function to calculate the real value
we need to devide the value by the digit
e.g.
value = 100
digit = 2
return: "1.0"
"""
return str(float(value) / math.pow(10, float(digit))) |
def yeardoy2datetime(yeardate: int,
utsec: Union[float, int] = None) -> datetime.datetime:
"""
Inputs:
yd: yyyyddd four digit year, 3 digit day of year (INTEGER 7 digits)
outputs:
t: datetime
http://stackoverflow.com/questions/2427555/python-question-year-and-day-of-year-to-date
"""
if isinstance(yeardate, (tuple, list, np.ndarray)):
if utsec is None:
return np.asarray([yeardoy2datetime(y) for y in yeardate])
elif isinstance(utsec, (tuple, list, np.ndarray)):
return np.asarray([yeardoy2datetime(y, s) for y, s in zip(yeardate, utsec)])
yeardate = int(yeardate)
yd = str(yeardate)
if len(yd) != 7:
raise ValueError('yyyyddd expected')
year = int(yd[:4])
assert 0 < year < 3000, 'year not in expected format'
dt = datetime.datetime(year, 1, 1) + datetime.timedelta(days=int(yd[4:]) - 1)
assert isinstance(dt, datetime.datetime)
if utsec is not None:
dt += datetime.timedelta(seconds=utsec)
return dt | Inputs:
yd: yyyyddd four digit year, 3 digit day of year (INTEGER 7 digits)
outputs:
t: datetime
http://stackoverflow.com/questions/2427555/python-question-year-and-day-of-year-to-date | Below is the the instruction that describes the task:
### Input:
Inputs:
yd: yyyyddd four digit year, 3 digit day of year (INTEGER 7 digits)
outputs:
t: datetime
http://stackoverflow.com/questions/2427555/python-question-year-and-day-of-year-to-date
### Response:
def yeardoy2datetime(yeardate: int,
utsec: Union[float, int] = None) -> datetime.datetime:
"""
Inputs:
yd: yyyyddd four digit year, 3 digit day of year (INTEGER 7 digits)
outputs:
t: datetime
http://stackoverflow.com/questions/2427555/python-question-year-and-day-of-year-to-date
"""
if isinstance(yeardate, (tuple, list, np.ndarray)):
if utsec is None:
return np.asarray([yeardoy2datetime(y) for y in yeardate])
elif isinstance(utsec, (tuple, list, np.ndarray)):
return np.asarray([yeardoy2datetime(y, s) for y, s in zip(yeardate, utsec)])
yeardate = int(yeardate)
yd = str(yeardate)
if len(yd) != 7:
raise ValueError('yyyyddd expected')
year = int(yd[:4])
assert 0 < year < 3000, 'year not in expected format'
dt = datetime.datetime(year, 1, 1) + datetime.timedelta(days=int(yd[4:]) - 1)
assert isinstance(dt, datetime.datetime)
if utsec is not None:
dt += datetime.timedelta(seconds=utsec)
return dt |
def apply_config(self, config):
"""
Constructs HAProxyConfig and HAProxyControl instances based on the
contents of the config.
This is mostly a matter of constructing the configuration stanzas.
"""
self.haproxy_config_path = config["config_file"]
global_stanza = Stanza("global")
global_stanza.add_lines(config.get("global", []))
global_stanza.add_lines([
"stats socket %s mode 600 level admin" % config["socket_file"],
"stats timeout 2m"
])
defaults_stanza = Stanza("defaults")
defaults_stanza.add_lines(config.get("defaults", []))
proxy_stanzas = [
ProxyStanza(
name, proxy["port"], proxy["upstreams"],
proxy.get("options", []),
proxy.get("bind_address")
)
for name, proxy in six.iteritems(config.get("proxies", {}))
]
stats_stanza = None
if "stats" in config:
stats_stanza = StatsStanza(
config["stats"]["port"], config["stats"].get("uri", "/")
)
for timeout in ("client", "connect", "server"):
if timeout in config["stats"].get("timeouts", {}):
stats_stanza.add_line(
"timeout %s %d" % (
timeout,
config["stats"]["timeouts"][timeout]
)
)
self.config_file = HAProxyConfig(
global_stanza, defaults_stanza,
proxy_stanzas=proxy_stanzas, stats_stanza=stats_stanza,
meta_clusters=config.get("meta_clusters", {}),
bind_address=config.get("bind_address")
)
self.control = HAProxyControl(
config["config_file"], config["socket_file"], config["pid_file"],
) | Constructs HAProxyConfig and HAProxyControl instances based on the
contents of the config.
This is mostly a matter of constructing the configuration stanzas. | Below is the the instruction that describes the task:
### Input:
Constructs HAProxyConfig and HAProxyControl instances based on the
contents of the config.
This is mostly a matter of constructing the configuration stanzas.
### Response:
def apply_config(self, config):
"""
Constructs HAProxyConfig and HAProxyControl instances based on the
contents of the config.
This is mostly a matter of constructing the configuration stanzas.
"""
self.haproxy_config_path = config["config_file"]
global_stanza = Stanza("global")
global_stanza.add_lines(config.get("global", []))
global_stanza.add_lines([
"stats socket %s mode 600 level admin" % config["socket_file"],
"stats timeout 2m"
])
defaults_stanza = Stanza("defaults")
defaults_stanza.add_lines(config.get("defaults", []))
proxy_stanzas = [
ProxyStanza(
name, proxy["port"], proxy["upstreams"],
proxy.get("options", []),
proxy.get("bind_address")
)
for name, proxy in six.iteritems(config.get("proxies", {}))
]
stats_stanza = None
if "stats" in config:
stats_stanza = StatsStanza(
config["stats"]["port"], config["stats"].get("uri", "/")
)
for timeout in ("client", "connect", "server"):
if timeout in config["stats"].get("timeouts", {}):
stats_stanza.add_line(
"timeout %s %d" % (
timeout,
config["stats"]["timeouts"][timeout]
)
)
self.config_file = HAProxyConfig(
global_stanza, defaults_stanza,
proxy_stanzas=proxy_stanzas, stats_stanza=stats_stanza,
meta_clusters=config.get("meta_clusters", {}),
bind_address=config.get("bind_address")
)
self.control = HAProxyControl(
config["config_file"], config["socket_file"], config["pid_file"],
) |
def make_named_stemmer(stem=None, min_len=3):
"""Construct a callable object and a string sufficient to reconstruct it later (unpickling)
>>> make_named_stemmer('str_lower')
('str_lower', <function str_lower at ...>)
>>> make_named_stemmer('Lancaster')
('lancaster', <Stemmer object at ...>)
"""
name, stem = stringify(stem), make_stemmer(stem=stem, min_len=min_len)
if hasattr(stem, '__name__'):
return stem.__name__, stem
if name.strip().lower() in STEMMER_TYPES:
return name.strip().lower(), stem
if hasattr(stem, 'pattern'):
return stem.pattern, stem
return stringify(stem), stem | Construct a callable object and a string sufficient to reconstruct it later (unpickling)
>>> make_named_stemmer('str_lower')
('str_lower', <function str_lower at ...>)
>>> make_named_stemmer('Lancaster')
('lancaster', <Stemmer object at ...>) | Below is the the instruction that describes the task:
### Input:
Construct a callable object and a string sufficient to reconstruct it later (unpickling)
>>> make_named_stemmer('str_lower')
('str_lower', <function str_lower at ...>)
>>> make_named_stemmer('Lancaster')
('lancaster', <Stemmer object at ...>)
### Response:
def make_named_stemmer(stem=None, min_len=3):
"""Construct a callable object and a string sufficient to reconstruct it later (unpickling)
>>> make_named_stemmer('str_lower')
('str_lower', <function str_lower at ...>)
>>> make_named_stemmer('Lancaster')
('lancaster', <Stemmer object at ...>)
"""
name, stem = stringify(stem), make_stemmer(stem=stem, min_len=min_len)
if hasattr(stem, '__name__'):
return stem.__name__, stem
if name.strip().lower() in STEMMER_TYPES:
return name.strip().lower(), stem
if hasattr(stem, 'pattern'):
return stem.pattern, stem
return stringify(stem), stem |
def AddMemberDefinition(self, member_definition):
"""Adds a member definition.
Args:
member_definition (DataTypeDefinition): member data type definition.
"""
self._byte_size = None
self.members.append(member_definition)
if self.sections:
section_definition = self.sections[-1]
section_definition.members.append(member_definition) | Adds a member definition.
Args:
member_definition (DataTypeDefinition): member data type definition. | Below is the the instruction that describes the task:
### Input:
Adds a member definition.
Args:
member_definition (DataTypeDefinition): member data type definition.
### Response:
def AddMemberDefinition(self, member_definition):
"""Adds a member definition.
Args:
member_definition (DataTypeDefinition): member data type definition.
"""
self._byte_size = None
self.members.append(member_definition)
if self.sections:
section_definition = self.sections[-1]
section_definition.members.append(member_definition) |
def dir2fn(ofn, ifn, suffix) -> Union[None, Path]:
"""
ofn = filename or output directory, to create filename based on ifn
ifn = input filename (don't overwrite!)
suffix = desired file extension e.g. .h5
"""
if not ofn: # no output file desired
return None
ofn = Path(ofn).expanduser()
ifn = Path(ifn).expanduser()
assert ifn.is_file()
if ofn.suffix == suffix: # must already be a filename
pass
else: # must be a directory
assert ofn.is_dir(), f'create directory {ofn}'
ofn = ofn / ifn.with_suffix(suffix).name
try:
assert not ofn.samefile(ifn), f'do not overwrite input file! {ifn}'
except FileNotFoundError: # a good thing, the output file doesn't exist and hence it's not the input file
pass
return ofn | ofn = filename or output directory, to create filename based on ifn
ifn = input filename (don't overwrite!)
suffix = desired file extension e.g. .h5 | Below is the the instruction that describes the task:
### Input:
ofn = filename or output directory, to create filename based on ifn
ifn = input filename (don't overwrite!)
suffix = desired file extension e.g. .h5
### Response:
def dir2fn(ofn, ifn, suffix) -> Union[None, Path]:
"""
ofn = filename or output directory, to create filename based on ifn
ifn = input filename (don't overwrite!)
suffix = desired file extension e.g. .h5
"""
if not ofn: # no output file desired
return None
ofn = Path(ofn).expanduser()
ifn = Path(ifn).expanduser()
assert ifn.is_file()
if ofn.suffix == suffix: # must already be a filename
pass
else: # must be a directory
assert ofn.is_dir(), f'create directory {ofn}'
ofn = ofn / ifn.with_suffix(suffix).name
try:
assert not ofn.samefile(ifn), f'do not overwrite input file! {ifn}'
except FileNotFoundError: # a good thing, the output file doesn't exist and hence it's not the input file
pass
return ofn |
def transition(self, state, message=""):
"""Change to a new state if the transition is allowed
Args:
state (str): State to transition to
message (str): Message if the transition is to a fault state
"""
with self.changes_squashed:
initial_state = self.state.value
if self.state_set.transition_allowed(
initial_state=initial_state, target_state=state):
self.log.debug(
"%s: Transitioning from %s to %s",
self.mri, initial_state, state)
if state == ss.DISABLED:
alarm = Alarm.invalid("Disabled")
elif state == ss.FAULT:
alarm = Alarm.major(message)
else:
alarm = Alarm()
self.update_health(self, HealthInfo(alarm))
self.state.set_value(state)
self.state.set_alarm(alarm)
for child, writeable in self._children_writeable[state].items():
if isinstance(child, AttributeModel):
child.meta.set_writeable(writeable)
elif isinstance(child, MethodModel):
child.set_writeable(writeable)
else:
raise TypeError("Cannot transition from %s to %s" %
(initial_state, state)) | Change to a new state if the transition is allowed
Args:
state (str): State to transition to
message (str): Message if the transition is to a fault state | Below is the the instruction that describes the task:
### Input:
Change to a new state if the transition is allowed
Args:
state (str): State to transition to
message (str): Message if the transition is to a fault state
### Response:
def transition(self, state, message=""):
"""Change to a new state if the transition is allowed
Args:
state (str): State to transition to
message (str): Message if the transition is to a fault state
"""
with self.changes_squashed:
initial_state = self.state.value
if self.state_set.transition_allowed(
initial_state=initial_state, target_state=state):
self.log.debug(
"%s: Transitioning from %s to %s",
self.mri, initial_state, state)
if state == ss.DISABLED:
alarm = Alarm.invalid("Disabled")
elif state == ss.FAULT:
alarm = Alarm.major(message)
else:
alarm = Alarm()
self.update_health(self, HealthInfo(alarm))
self.state.set_value(state)
self.state.set_alarm(alarm)
for child, writeable in self._children_writeable[state].items():
if isinstance(child, AttributeModel):
child.meta.set_writeable(writeable)
elif isinstance(child, MethodModel):
child.set_writeable(writeable)
else:
raise TypeError("Cannot transition from %s to %s" %
(initial_state, state)) |
def index():
"""Show the index with all posts.
:param int all: Whether or not should show all posts
"""
context = {'postform': NewPostForm(),
'pageform': NewPageForm(),
'delform': DeleteForm()}
n = request.args.get('all')
if n is None:
wants_now = None
else:
wants_now = n == '1'
if wants_now is None and current_user.wants_all_posts:
wants = True
else:
wants = wants_now
if current_user.can_edit_all_posts and wants:
posts = site.all_posts
pages = site.pages
else:
wants = False
posts = []
pages = []
for p in site.timeline:
if (p.meta('author.uid') and
p.meta('author.uid') != str(current_user.uid)):
continue
if p.is_post:
posts.append(p)
else:
pages.append(p)
context['posts'] = posts
context['pages'] = pages
context['title'] = 'Posts & Pages'
context['wants'] = wants
return render('coil_index.tmpl', context) | Show the index with all posts.
:param int all: Whether or not should show all posts | Below is the the instruction that describes the task:
### Input:
Show the index with all posts.
:param int all: Whether or not should show all posts
### Response:
def index():
"""Show the index with all posts.
:param int all: Whether or not should show all posts
"""
context = {'postform': NewPostForm(),
'pageform': NewPageForm(),
'delform': DeleteForm()}
n = request.args.get('all')
if n is None:
wants_now = None
else:
wants_now = n == '1'
if wants_now is None and current_user.wants_all_posts:
wants = True
else:
wants = wants_now
if current_user.can_edit_all_posts and wants:
posts = site.all_posts
pages = site.pages
else:
wants = False
posts = []
pages = []
for p in site.timeline:
if (p.meta('author.uid') and
p.meta('author.uid') != str(current_user.uid)):
continue
if p.is_post:
posts.append(p)
else:
pages.append(p)
context['posts'] = posts
context['pages'] = pages
context['title'] = 'Posts & Pages'
context['wants'] = wants
return render('coil_index.tmpl', context) |
def get_method_by_idx(self, idx):
"""
Return a specific method by using an index
:param idx: the index of the method
:type idx: int
:rtype: None or an :class:`EncodedMethod` object
"""
if self.__cached_methods_idx == None:
self.__cached_methods_idx = {}
for i in self.classes.class_def:
for j in i.get_methods():
self.__cached_methods_idx[j.get_method_idx()] = j
try:
return self.__cached_methods_idx[idx]
except KeyError:
return None | Return a specific method by using an index
:param idx: the index of the method
:type idx: int
:rtype: None or an :class:`EncodedMethod` object | Below is the the instruction that describes the task:
### Input:
Return a specific method by using an index
:param idx: the index of the method
:type idx: int
:rtype: None or an :class:`EncodedMethod` object
### Response:
def get_method_by_idx(self, idx):
"""
Return a specific method by using an index
:param idx: the index of the method
:type idx: int
:rtype: None or an :class:`EncodedMethod` object
"""
if self.__cached_methods_idx == None:
self.__cached_methods_idx = {}
for i in self.classes.class_def:
for j in i.get_methods():
self.__cached_methods_idx[j.get_method_idx()] = j
try:
return self.__cached_methods_idx[idx]
except KeyError:
return None |
def alts_columns_used(self):
"""
Columns from the alternatives table that are used for filtering.
"""
return list(tz.unique(tz.concat(
m.alts_columns_used() for m in self.models.values()))) | Columns from the alternatives table that are used for filtering. | Below is the the instruction that describes the task:
### Input:
Columns from the alternatives table that are used for filtering.
### Response:
def alts_columns_used(self):
"""
Columns from the alternatives table that are used for filtering.
"""
return list(tz.unique(tz.concat(
m.alts_columns_used() for m in self.models.values()))) |
def _inhibitColumns(self, overlaps):
"""
Performs inhibition. This method calculates the necessary values needed to
actually perform inhibition and then delegates the task of picking the
active columns to helper functions.
Parameters:
----------------------------
:param overlaps: an array containing the overlap score for each column.
The overlap score for a column is defined as the number
of synapses in a "connected state" (connected synapses)
that are connected to input bits which are turned on.
"""
# determine how many columns should be selected in the inhibition phase.
# This can be specified by either setting the 'numActiveColumnsPerInhArea'
# parameter or the 'localAreaDensity' parameter when initializing the class
if (self._localAreaDensity > 0):
density = self._localAreaDensity
else:
inhibitionArea = ((2*self._inhibitionRadius + 1)
** self._columnDimensions.size)
inhibitionArea = min(self._numColumns, inhibitionArea)
density = float(self._numActiveColumnsPerInhArea) / inhibitionArea
density = min(density, 0.5)
if self._globalInhibition or \
self._inhibitionRadius > max(self._columnDimensions):
return self._inhibitColumnsGlobal(overlaps, density)
else:
return self._inhibitColumnsLocal(overlaps, density) | Performs inhibition. This method calculates the necessary values needed to
actually perform inhibition and then delegates the task of picking the
active columns to helper functions.
Parameters:
----------------------------
:param overlaps: an array containing the overlap score for each column.
The overlap score for a column is defined as the number
of synapses in a "connected state" (connected synapses)
that are connected to input bits which are turned on. | Below is the the instruction that describes the task:
### Input:
Performs inhibition. This method calculates the necessary values needed to
actually perform inhibition and then delegates the task of picking the
active columns to helper functions.
Parameters:
----------------------------
:param overlaps: an array containing the overlap score for each column.
The overlap score for a column is defined as the number
of synapses in a "connected state" (connected synapses)
that are connected to input bits which are turned on.
### Response:
def _inhibitColumns(self, overlaps):
"""
Performs inhibition. This method calculates the necessary values needed to
actually perform inhibition and then delegates the task of picking the
active columns to helper functions.
Parameters:
----------------------------
:param overlaps: an array containing the overlap score for each column.
The overlap score for a column is defined as the number
of synapses in a "connected state" (connected synapses)
that are connected to input bits which are turned on.
"""
# determine how many columns should be selected in the inhibition phase.
# This can be specified by either setting the 'numActiveColumnsPerInhArea'
# parameter or the 'localAreaDensity' parameter when initializing the class
if (self._localAreaDensity > 0):
density = self._localAreaDensity
else:
inhibitionArea = ((2*self._inhibitionRadius + 1)
** self._columnDimensions.size)
inhibitionArea = min(self._numColumns, inhibitionArea)
density = float(self._numActiveColumnsPerInhArea) / inhibitionArea
density = min(density, 0.5)
if self._globalInhibition or \
self._inhibitionRadius > max(self._columnDimensions):
return self._inhibitColumnsGlobal(overlaps, density)
else:
return self._inhibitColumnsLocal(overlaps, density) |
def create_event(self, actors=None, ignore_duplicates=False, **kwargs):
"""
Create events with actors.
This method can be used in place of ``Event.objects.create``
to create events, and the appropriate actors. It takes all the
same keywords as ``Event.objects.create`` for the event
creation, but additionally takes a list of actors, and can be
told to not attempt to create an event if a duplicate event
exists.
:type source: Source
:param source: A ``Source`` object representing where the
event came from.
:type context: dict
:param context: A dictionary containing relevant
information about the event, to be serialized into
JSON. It is possible to load additional context
dynamically when events are fetched. See the
documentation on the ``ContextRenderer`` model.
:type uuid: str
:param uuid: A unique string for the event. Requiring a
``uuid`` allows code that creates events to ensure they do
not create duplicate events. This id could be, for example
some hash of the ``context``, or, if the creator is
unconcerned with creating duplicate events a call to
python's ``uuid1()`` in the ``uuid`` module.
:type time_expires: datetime (optional)
:param time_expires: If given, the default methods for
querying events will not return this event after this time
has passed.
:type actors: (optional) List of entities or list of entity ids.
:param actors: An ``EventActor`` object will be created for
each entity in the list. This allows for subscriptions
which are only following certain entities to behave
appropriately.
:type ignore_duplicates: (optional) Boolean
:param ignore_duplicates: If ``True``, a check will be made to
ensure that an event with the give ``uuid`` does not exist
before attempting to create the event. Setting this to
``True`` allows the creator of events to gracefully ensure
no duplicates are attempted to be created. There is a uniqueness constraint on uuid
so it will raise an exception if duplicates are allowed and submitted.
:rtype: Event
:returns: The created event. Alternatively if a duplicate
event already exists and ``ignore_duplicates`` is
``True``, it will return ``None``.
"""
kwargs['actors'] = actors
kwargs['ignore_duplicates'] = ignore_duplicates
events = self.create_events([kwargs])
if events:
return events[0]
return None | Create events with actors.
This method can be used in place of ``Event.objects.create``
to create events, and the appropriate actors. It takes all the
same keywords as ``Event.objects.create`` for the event
creation, but additionally takes a list of actors, and can be
told to not attempt to create an event if a duplicate event
exists.
:type source: Source
:param source: A ``Source`` object representing where the
event came from.
:type context: dict
:param context: A dictionary containing relevant
information about the event, to be serialized into
JSON. It is possible to load additional context
dynamically when events are fetched. See the
documentation on the ``ContextRenderer`` model.
:type uuid: str
:param uuid: A unique string for the event. Requiring a
``uuid`` allows code that creates events to ensure they do
not create duplicate events. This id could be, for example
some hash of the ``context``, or, if the creator is
unconcerned with creating duplicate events a call to
python's ``uuid1()`` in the ``uuid`` module.
:type time_expires: datetime (optional)
:param time_expires: If given, the default methods for
querying events will not return this event after this time
has passed.
:type actors: (optional) List of entities or list of entity ids.
:param actors: An ``EventActor`` object will be created for
each entity in the list. This allows for subscriptions
which are only following certain entities to behave
appropriately.
:type ignore_duplicates: (optional) Boolean
:param ignore_duplicates: If ``True``, a check will be made to
ensure that an event with the give ``uuid`` does not exist
before attempting to create the event. Setting this to
``True`` allows the creator of events to gracefully ensure
no duplicates are attempted to be created. There is a uniqueness constraint on uuid
so it will raise an exception if duplicates are allowed and submitted.
:rtype: Event
:returns: The created event. Alternatively if a duplicate
event already exists and ``ignore_duplicates`` is
``True``, it will return ``None``. | Below is the the instruction that describes the task:
### Input:
Create events with actors.
This method can be used in place of ``Event.objects.create``
to create events, and the appropriate actors. It takes all the
same keywords as ``Event.objects.create`` for the event
creation, but additionally takes a list of actors, and can be
told to not attempt to create an event if a duplicate event
exists.
:type source: Source
:param source: A ``Source`` object representing where the
event came from.
:type context: dict
:param context: A dictionary containing relevant
information about the event, to be serialized into
JSON. It is possible to load additional context
dynamically when events are fetched. See the
documentation on the ``ContextRenderer`` model.
:type uuid: str
:param uuid: A unique string for the event. Requiring a
``uuid`` allows code that creates events to ensure they do
not create duplicate events. This id could be, for example
some hash of the ``context``, or, if the creator is
unconcerned with creating duplicate events a call to
python's ``uuid1()`` in the ``uuid`` module.
:type time_expires: datetime (optional)
:param time_expires: If given, the default methods for
querying events will not return this event after this time
has passed.
:type actors: (optional) List of entities or list of entity ids.
:param actors: An ``EventActor`` object will be created for
each entity in the list. This allows for subscriptions
which are only following certain entities to behave
appropriately.
:type ignore_duplicates: (optional) Boolean
:param ignore_duplicates: If ``True``, a check will be made to
ensure that an event with the give ``uuid`` does not exist
before attempting to create the event. Setting this to
``True`` allows the creator of events to gracefully ensure
no duplicates are attempted to be created. There is a uniqueness constraint on uuid
so it will raise an exception if duplicates are allowed and submitted.
:rtype: Event
:returns: The created event. Alternatively if a duplicate
event already exists and ``ignore_duplicates`` is
``True``, it will return ``None``.
### Response:
def create_event(self, actors=None, ignore_duplicates=False, **kwargs):
"""
Create events with actors.
This method can be used in place of ``Event.objects.create``
to create events, and the appropriate actors. It takes all the
same keywords as ``Event.objects.create`` for the event
creation, but additionally takes a list of actors, and can be
told to not attempt to create an event if a duplicate event
exists.
:type source: Source
:param source: A ``Source`` object representing where the
event came from.
:type context: dict
:param context: A dictionary containing relevant
information about the event, to be serialized into
JSON. It is possible to load additional context
dynamically when events are fetched. See the
documentation on the ``ContextRenderer`` model.
:type uuid: str
:param uuid: A unique string for the event. Requiring a
``uuid`` allows code that creates events to ensure they do
not create duplicate events. This id could be, for example
some hash of the ``context``, or, if the creator is
unconcerned with creating duplicate events a call to
python's ``uuid1()`` in the ``uuid`` module.
:type time_expires: datetime (optional)
:param time_expires: If given, the default methods for
querying events will not return this event after this time
has passed.
:type actors: (optional) List of entities or list of entity ids.
:param actors: An ``EventActor`` object will be created for
each entity in the list. This allows for subscriptions
which are only following certain entities to behave
appropriately.
:type ignore_duplicates: (optional) Boolean
:param ignore_duplicates: If ``True``, a check will be made to
ensure that an event with the give ``uuid`` does not exist
before attempting to create the event. Setting this to
``True`` allows the creator of events to gracefully ensure
no duplicates are attempted to be created. There is a uniqueness constraint on uuid
so it will raise an exception if duplicates are allowed and submitted.
:rtype: Event
:returns: The created event. Alternatively if a duplicate
event already exists and ``ignore_duplicates`` is
``True``, it will return ``None``.
"""
kwargs['actors'] = actors
kwargs['ignore_duplicates'] = ignore_duplicates
events = self.create_events([kwargs])
if events:
return events[0]
return None |
def _remove_debug_handlers(self):
"""Remove any handlers with an attribute of debug_only that is True and
remove the references to said handlers from any loggers that are
referencing them.
"""
remove = list()
for handler in self.config[self.HANDLERS]:
if self.config[self.HANDLERS][handler].get('debug_only'):
remove.append(handler)
for handler in remove:
del self.config[self.HANDLERS][handler]
for logger in self.config[self.LOGGERS].keys():
logger = self.config[self.LOGGERS][logger]
if handler in logger[self.HANDLERS]:
logger[self.HANDLERS].remove(handler)
self._remove_debug_only() | Remove any handlers with an attribute of debug_only that is True and
remove the references to said handlers from any loggers that are
referencing them. | Below is the the instruction that describes the task:
### Input:
Remove any handlers with an attribute of debug_only that is True and
remove the references to said handlers from any loggers that are
referencing them.
### Response:
def _remove_debug_handlers(self):
"""Remove any handlers with an attribute of debug_only that is True and
remove the references to said handlers from any loggers that are
referencing them.
"""
remove = list()
for handler in self.config[self.HANDLERS]:
if self.config[self.HANDLERS][handler].get('debug_only'):
remove.append(handler)
for handler in remove:
del self.config[self.HANDLERS][handler]
for logger in self.config[self.LOGGERS].keys():
logger = self.config[self.LOGGERS][logger]
if handler in logger[self.HANDLERS]:
logger[self.HANDLERS].remove(handler)
self._remove_debug_only() |
def tabbar_toggled(self, settings, key, user_data):
"""If the gconf var use_tabbar be changed, this method will be
called and will show/hide the tabbar.
"""
if settings.get_boolean(key):
for n in self.guake.notebook_manager.iter_notebooks():
n.set_property("show-tabs", True)
else:
for n in self.guake.notebook_manager.iter_notebooks():
n.set_property("show-tabs", False) | If the gconf var use_tabbar be changed, this method will be
called and will show/hide the tabbar. | Below is the the instruction that describes the task:
### Input:
If the gconf var use_tabbar be changed, this method will be
called and will show/hide the tabbar.
### Response:
def tabbar_toggled(self, settings, key, user_data):
"""If the gconf var use_tabbar be changed, this method will be
called and will show/hide the tabbar.
"""
if settings.get_boolean(key):
for n in self.guake.notebook_manager.iter_notebooks():
n.set_property("show-tabs", True)
else:
for n in self.guake.notebook_manager.iter_notebooks():
n.set_property("show-tabs", False) |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.