code
stringlengths 75
104k
| docstring
stringlengths 1
46.9k
| text
stringlengths 164
112k
|
---|---|---|
def is_filter_selected(self, selection_id, value):
"""
Compares whether the 'selection_id' parameter value saved in the
cookie is the same value as the "value" parameter.
:param selection_id: a string as a dashboard_cookie key.
:param value: The value to compare against the value from
dashboard_cookie key.
:return: Boolean.
"""
selected = self.dashboard_cookie.get(selection_id)
return selected == value | Compares whether the 'selection_id' parameter value saved in the
cookie is the same value as the "value" parameter.
:param selection_id: a string as a dashboard_cookie key.
:param value: The value to compare against the value from
dashboard_cookie key.
:return: Boolean. | Below is the the instruction that describes the task:
### Input:
Compares whether the 'selection_id' parameter value saved in the
cookie is the same value as the "value" parameter.
:param selection_id: a string as a dashboard_cookie key.
:param value: The value to compare against the value from
dashboard_cookie key.
:return: Boolean.
### Response:
def is_filter_selected(self, selection_id, value):
"""
Compares whether the 'selection_id' parameter value saved in the
cookie is the same value as the "value" parameter.
:param selection_id: a string as a dashboard_cookie key.
:param value: The value to compare against the value from
dashboard_cookie key.
:return: Boolean.
"""
selected = self.dashboard_cookie.get(selection_id)
return selected == value |
def add_tweets(self, url, last_modified, tweets):
"""Adds new tweets to the cache."""
try:
self.cache[url] = {"last_modified": last_modified, "tweets": tweets}
self.mark_updated()
return True
except TypeError:
return False | Adds new tweets to the cache. | Below is the the instruction that describes the task:
### Input:
Adds new tweets to the cache.
### Response:
def add_tweets(self, url, last_modified, tweets):
"""Adds new tweets to the cache."""
try:
self.cache[url] = {"last_modified": last_modified, "tweets": tweets}
self.mark_updated()
return True
except TypeError:
return False |
def simplify(self, eps, max_dist_error, max_speed_error, topology_only=False):
""" In-place segment simplification
See `drp` and `compression` modules
Args:
eps (float): Distance threshold for the `drp` function
max_dist_error (float): Max distance error, in meters
max_speed_error (float): Max speed error, in km/h
topology_only (bool, optional): True to only keep topology, not considering
times when simplifying. Defaults to False.
Returns:
:obj:`Segment`
"""
if topology_only:
self.points = drp(self.points, eps)
else:
self.points = spt(self.points, max_dist_error, max_speed_error)
return self | In-place segment simplification
See `drp` and `compression` modules
Args:
eps (float): Distance threshold for the `drp` function
max_dist_error (float): Max distance error, in meters
max_speed_error (float): Max speed error, in km/h
topology_only (bool, optional): True to only keep topology, not considering
times when simplifying. Defaults to False.
Returns:
:obj:`Segment` | Below is the the instruction that describes the task:
### Input:
In-place segment simplification
See `drp` and `compression` modules
Args:
eps (float): Distance threshold for the `drp` function
max_dist_error (float): Max distance error, in meters
max_speed_error (float): Max speed error, in km/h
topology_only (bool, optional): True to only keep topology, not considering
times when simplifying. Defaults to False.
Returns:
:obj:`Segment`
### Response:
def simplify(self, eps, max_dist_error, max_speed_error, topology_only=False):
""" In-place segment simplification
See `drp` and `compression` modules
Args:
eps (float): Distance threshold for the `drp` function
max_dist_error (float): Max distance error, in meters
max_speed_error (float): Max speed error, in km/h
topology_only (bool, optional): True to only keep topology, not considering
times when simplifying. Defaults to False.
Returns:
:obj:`Segment`
"""
if topology_only:
self.points = drp(self.points, eps)
else:
self.points = spt(self.points, max_dist_error, max_speed_error)
return self |
def _build_document_scrapers(cls, session: AppSession):
'''Create the document scrapers.
Returns:
A list of document scrapers
'''
html_parser = session.factory['HTMLParser']
element_walker = session.factory.new('ElementWalker')
scrapers = [
session.factory.new(
'HTMLScraper',
html_parser,
element_walker,
followed_tags=session.args.follow_tags,
ignored_tags=session.args.ignore_tags,
only_relative=session.args.relative,
robots=session.args.robots,
encoding_override=session.args.remote_encoding,
),
]
if 'css' in session.args.link_extractors:
css_scraper = session.factory.new(
'CSSScraper',
encoding_override=session.args.remote_encoding,
)
scrapers.append(css_scraper)
element_walker.css_scraper = css_scraper
if 'javascript' in session.args.link_extractors:
javascript_scraper = session.factory.new(
'JavaScriptScraper',
encoding_override=session.args.remote_encoding,
)
scrapers.append(javascript_scraper)
element_walker.javascript_scraper = javascript_scraper
if session.args.sitemaps:
scrapers.append(session.factory.new(
'SitemapScraper', html_parser,
encoding_override=session.args.remote_encoding,
))
return scrapers | Create the document scrapers.
Returns:
A list of document scrapers | Below is the the instruction that describes the task:
### Input:
Create the document scrapers.
Returns:
A list of document scrapers
### Response:
def _build_document_scrapers(cls, session: AppSession):
'''Create the document scrapers.
Returns:
A list of document scrapers
'''
html_parser = session.factory['HTMLParser']
element_walker = session.factory.new('ElementWalker')
scrapers = [
session.factory.new(
'HTMLScraper',
html_parser,
element_walker,
followed_tags=session.args.follow_tags,
ignored_tags=session.args.ignore_tags,
only_relative=session.args.relative,
robots=session.args.robots,
encoding_override=session.args.remote_encoding,
),
]
if 'css' in session.args.link_extractors:
css_scraper = session.factory.new(
'CSSScraper',
encoding_override=session.args.remote_encoding,
)
scrapers.append(css_scraper)
element_walker.css_scraper = css_scraper
if 'javascript' in session.args.link_extractors:
javascript_scraper = session.factory.new(
'JavaScriptScraper',
encoding_override=session.args.remote_encoding,
)
scrapers.append(javascript_scraper)
element_walker.javascript_scraper = javascript_scraper
if session.args.sitemaps:
scrapers.append(session.factory.new(
'SitemapScraper', html_parser,
encoding_override=session.args.remote_encoding,
))
return scrapers |
def getColorName(c):
"""Find the name of a color.
.. hint:: |colorpalette| |colorpalette.py|_
"""
c = np.array(getColor(c)) # reformat to rgb
mdist = 99.0
kclosest = ""
for key in colors.keys():
ci = np.array(getColor(key))
d = np.linalg.norm(c - ci)
if d < mdist:
mdist = d
kclosest = str(key)
return kclosest | Find the name of a color.
.. hint:: |colorpalette| |colorpalette.py|_ | Below is the the instruction that describes the task:
### Input:
Find the name of a color.
.. hint:: |colorpalette| |colorpalette.py|_
### Response:
def getColorName(c):
"""Find the name of a color.
.. hint:: |colorpalette| |colorpalette.py|_
"""
c = np.array(getColor(c)) # reformat to rgb
mdist = 99.0
kclosest = ""
for key in colors.keys():
ci = np.array(getColor(key))
d = np.linalg.norm(c - ci)
if d < mdist:
mdist = d
kclosest = str(key)
return kclosest |
def train(self, ftrain):
'''Trains the polynomial expansion.
:param numpy.ndarray/function ftrain: output values corresponding to the
quadrature points given by the getQuadraturePoints method to
which the expansion should be trained. Or a function that should be evaluated
at the quadrature points to give these output values.
*Sample Usage*::
>>> thePC = PolySurrogate(dimensions=2)
>>> thePC.train(myFunc)
>>> predicted_q = thePC.predict([0, 1])
>>> thePC = PolySurrogate(dimensions=2)
>>> U = thePC.getQuadraturePoints()
>>> Q = [myFunc(u) for u in U]
>>> thePC.train(Q)
>>> predicted_q = thePC.predict([0, 1])
'''
self.coeffs = 0*self.coeffs
upoints, wpoints = self.getQuadraturePointsAndWeights()
try:
fpoints = [ftrain(u) for u in upoints]
except TypeError:
fpoints = ftrain
for ipoly in np.arange(self.N_poly):
inds = tuple(self.index_polys[ipoly])
coeff = 0.0
for (u, q, w) in zip(upoints, fpoints, wpoints):
coeff += eval_poly(u, inds, self.J_list)*q*np.prod(w)
self.coeffs[inds] = coeff
return None | Trains the polynomial expansion.
:param numpy.ndarray/function ftrain: output values corresponding to the
quadrature points given by the getQuadraturePoints method to
which the expansion should be trained. Or a function that should be evaluated
at the quadrature points to give these output values.
*Sample Usage*::
>>> thePC = PolySurrogate(dimensions=2)
>>> thePC.train(myFunc)
>>> predicted_q = thePC.predict([0, 1])
>>> thePC = PolySurrogate(dimensions=2)
>>> U = thePC.getQuadraturePoints()
>>> Q = [myFunc(u) for u in U]
>>> thePC.train(Q)
>>> predicted_q = thePC.predict([0, 1]) | Below is the the instruction that describes the task:
### Input:
Trains the polynomial expansion.
:param numpy.ndarray/function ftrain: output values corresponding to the
quadrature points given by the getQuadraturePoints method to
which the expansion should be trained. Or a function that should be evaluated
at the quadrature points to give these output values.
*Sample Usage*::
>>> thePC = PolySurrogate(dimensions=2)
>>> thePC.train(myFunc)
>>> predicted_q = thePC.predict([0, 1])
>>> thePC = PolySurrogate(dimensions=2)
>>> U = thePC.getQuadraturePoints()
>>> Q = [myFunc(u) for u in U]
>>> thePC.train(Q)
>>> predicted_q = thePC.predict([0, 1])
### Response:
def train(self, ftrain):
'''Trains the polynomial expansion.
:param numpy.ndarray/function ftrain: output values corresponding to the
quadrature points given by the getQuadraturePoints method to
which the expansion should be trained. Or a function that should be evaluated
at the quadrature points to give these output values.
*Sample Usage*::
>>> thePC = PolySurrogate(dimensions=2)
>>> thePC.train(myFunc)
>>> predicted_q = thePC.predict([0, 1])
>>> thePC = PolySurrogate(dimensions=2)
>>> U = thePC.getQuadraturePoints()
>>> Q = [myFunc(u) for u in U]
>>> thePC.train(Q)
>>> predicted_q = thePC.predict([0, 1])
'''
self.coeffs = 0*self.coeffs
upoints, wpoints = self.getQuadraturePointsAndWeights()
try:
fpoints = [ftrain(u) for u in upoints]
except TypeError:
fpoints = ftrain
for ipoly in np.arange(self.N_poly):
inds = tuple(self.index_polys[ipoly])
coeff = 0.0
for (u, q, w) in zip(upoints, fpoints, wpoints):
coeff += eval_poly(u, inds, self.J_list)*q*np.prod(w)
self.coeffs[inds] = coeff
return None |
def encrypt_file(self, inpath, force_nocompress=False, force_compress=False, armored=False, checksum=False):
"""public method for single file encryption with optional compression, ASCII armored formatting, and file hash digest generation"""
if armored:
if force_compress:
command_stub = self.command_maxcompress_armored
elif force_nocompress:
command_stub = self.command_nocompress_armored
else:
if self._is_compress_filetype(inpath):
command_stub = self.command_default_armored
else:
command_stub = self.command_nocompress_armored
else:
if force_compress:
command_stub = self.command_maxcompress
elif force_nocompress:
command_stub = self.command_nocompress
else:
if self._is_compress_filetype(inpath):
command_stub = self.command_default
else:
command_stub = self.command_nocompress
encrypted_outpath = self._create_outfilepath(inpath)
system_command = command_stub + encrypted_outpath + " --passphrase " + quote(self.passphrase) + " --symmetric " + quote(inpath)
try:
response = muterun(system_command)
# check returned status code
if response.exitcode == 0:
stdout(encrypted_outpath + " was generated from " + inpath)
if checksum: # add a SHA256 hash digest of the encrypted file - requested by user --hash flag in command
from crypto.library import hash
encrypted_file_hash = hash.generate_hash(encrypted_outpath)
if len(encrypted_file_hash) == 64:
stdout("SHA256 hash digest for " + encrypted_outpath + " :")
stdout(encrypted_file_hash)
else:
stdout("Unable to generate a SHA256 hash digest for the file " + encrypted_outpath)
else:
stderr(response.stderr, 0)
stderr("Encryption failed")
sys.exit(1)
except Exception as e:
stderr("There was a problem with the execution of gpg. Encryption failed. Error: [" + str(e) + "]")
sys.exit(1) | public method for single file encryption with optional compression, ASCII armored formatting, and file hash digest generation | Below is the the instruction that describes the task:
### Input:
public method for single file encryption with optional compression, ASCII armored formatting, and file hash digest generation
### Response:
def encrypt_file(self, inpath, force_nocompress=False, force_compress=False, armored=False, checksum=False):
"""public method for single file encryption with optional compression, ASCII armored formatting, and file hash digest generation"""
if armored:
if force_compress:
command_stub = self.command_maxcompress_armored
elif force_nocompress:
command_stub = self.command_nocompress_armored
else:
if self._is_compress_filetype(inpath):
command_stub = self.command_default_armored
else:
command_stub = self.command_nocompress_armored
else:
if force_compress:
command_stub = self.command_maxcompress
elif force_nocompress:
command_stub = self.command_nocompress
else:
if self._is_compress_filetype(inpath):
command_stub = self.command_default
else:
command_stub = self.command_nocompress
encrypted_outpath = self._create_outfilepath(inpath)
system_command = command_stub + encrypted_outpath + " --passphrase " + quote(self.passphrase) + " --symmetric " + quote(inpath)
try:
response = muterun(system_command)
# check returned status code
if response.exitcode == 0:
stdout(encrypted_outpath + " was generated from " + inpath)
if checksum: # add a SHA256 hash digest of the encrypted file - requested by user --hash flag in command
from crypto.library import hash
encrypted_file_hash = hash.generate_hash(encrypted_outpath)
if len(encrypted_file_hash) == 64:
stdout("SHA256 hash digest for " + encrypted_outpath + " :")
stdout(encrypted_file_hash)
else:
stdout("Unable to generate a SHA256 hash digest for the file " + encrypted_outpath)
else:
stderr(response.stderr, 0)
stderr("Encryption failed")
sys.exit(1)
except Exception as e:
stderr("There was a problem with the execution of gpg. Encryption failed. Error: [" + str(e) + "]")
sys.exit(1) |
def install_scripts(distributions):
"""
Regenerate the entry_points console_scripts for the named distribution.
"""
try:
from setuptools.command import easy_install
import pkg_resources
except ImportError:
raise RuntimeError("'wheel install_scripts' needs setuptools.")
for dist in distributions:
pkg_resources_dist = pkg_resources.get_distribution(dist)
install = wheel.paths.get_install_command(dist)
command = easy_install.easy_install(install.distribution)
command.args = ['wheel'] # dummy argument
command.finalize_options()
command.install_egg_scripts(pkg_resources_dist) | Regenerate the entry_points console_scripts for the named distribution. | Below is the the instruction that describes the task:
### Input:
Regenerate the entry_points console_scripts for the named distribution.
### Response:
def install_scripts(distributions):
"""
Regenerate the entry_points console_scripts for the named distribution.
"""
try:
from setuptools.command import easy_install
import pkg_resources
except ImportError:
raise RuntimeError("'wheel install_scripts' needs setuptools.")
for dist in distributions:
pkg_resources_dist = pkg_resources.get_distribution(dist)
install = wheel.paths.get_install_command(dist)
command = easy_install.easy_install(install.distribution)
command.args = ['wheel'] # dummy argument
command.finalize_options()
command.install_egg_scripts(pkg_resources_dist) |
def run(command, parser, cl_args, unknown_args):
'''
:param command:
:param parser:
:param args:
:param unknown_args:
:return:
'''
configcommand = cl_args.get('configcommand', None)
if configcommand == 'set':
return _set(cl_args)
elif configcommand == 'unset':
return _unset(cl_args)
else:
return _list(cl_args) | :param command:
:param parser:
:param args:
:param unknown_args:
:return: | Below is the the instruction that describes the task:
### Input:
:param command:
:param parser:
:param args:
:param unknown_args:
:return:
### Response:
def run(command, parser, cl_args, unknown_args):
'''
:param command:
:param parser:
:param args:
:param unknown_args:
:return:
'''
configcommand = cl_args.get('configcommand', None)
if configcommand == 'set':
return _set(cl_args)
elif configcommand == 'unset':
return _unset(cl_args)
else:
return _list(cl_args) |
def load_building_sample_data(bd):
"""
Sample data for the Building object
:param bd:
:return:
"""
number_of_storeys = 6
interstorey_height = 3.4 # m
masses = 40.0e3 # kg
bd.interstorey_heights = interstorey_height * np.ones(number_of_storeys)
bd.floor_length = 18.0 # m
bd.floor_width = 16.0 # m
bd.storey_masses = masses * np.ones(number_of_storeys) | Sample data for the Building object
:param bd:
:return: | Below is the the instruction that describes the task:
### Input:
Sample data for the Building object
:param bd:
:return:
### Response:
def load_building_sample_data(bd):
"""
Sample data for the Building object
:param bd:
:return:
"""
number_of_storeys = 6
interstorey_height = 3.4 # m
masses = 40.0e3 # kg
bd.interstorey_heights = interstorey_height * np.ones(number_of_storeys)
bd.floor_length = 18.0 # m
bd.floor_width = 16.0 # m
bd.storey_masses = masses * np.ones(number_of_storeys) |
def speak(self):
'''
a function for the client to announce him or herself, depending
on the level specified. If you want your client to have additional
announced things here, then implement the class `_speak` for your
client.
'''
if self.quiet is False:
bot.info('[client|%s] [database|%s]' %(self.client_name,
self.database))
self._speak() | a function for the client to announce him or herself, depending
on the level specified. If you want your client to have additional
announced things here, then implement the class `_speak` for your
client. | Below is the the instruction that describes the task:
### Input:
a function for the client to announce him or herself, depending
on the level specified. If you want your client to have additional
announced things here, then implement the class `_speak` for your
client.
### Response:
def speak(self):
'''
a function for the client to announce him or herself, depending
on the level specified. If you want your client to have additional
announced things here, then implement the class `_speak` for your
client.
'''
if self.quiet is False:
bot.info('[client|%s] [database|%s]' %(self.client_name,
self.database))
self._speak() |
def edit_miz( # noqa: C901
infile: str,
outfile: str = None,
metar: typing.Union[str, Metar] = None,
time: str = None,
min_wind: int = 0,
max_wind: int = 40
) -> str:
# noinspection SpellCheckingInspection
"""
Edit an opened MIZ file and sets the time and date and the weather
Args:
infile: source file
outfile: output file (will default to source file)
metar: metar string, ICAO or object to apply
time: time string to apply (YYYYMMDDHHMMSS)
min_wind: minimum wind
max_wind: maximum wind
Returns:
String containing error
"""
if outfile is None:
LOGGER.debug('editing in place: %s', infile)
outfile = infile
else:
LOGGER.debug('editing miz file: %s -> %s', infile, outfile)
mission_weather = mission_time = None
if metar:
error, metar = emiz.weather.custom_metar.CustomMetar.get_metar(metar)
if error:
return error
mission_weather = emiz.weather.mission_weather.MissionWeather(metar, min_wind=min_wind, max_wind=max_wind)
if time:
try:
mission_time = MissionTime.from_string(time)
except ValueError:
return f'badly formatted time string: {time}'
if not mission_weather and not mission_time:
return 'nothing to do!'
with Miz(infile) as miz:
if mission_weather:
LOGGER.debug('applying MissionWeather')
if not mission_weather.apply_to_miz(miz):
return 'error while applying METAR to mission'
if mission_time:
LOGGER.debug('applying MissionTime')
if not mission_time.apply_to_miz(miz):
return 'error while setting time on mission'
try:
miz.zip(outfile)
return ''
except OSError:
return f'permission error: cannot edit "{outfile}"; maybe it is in use ?' | Edit an opened MIZ file and sets the time and date and the weather
Args:
infile: source file
outfile: output file (will default to source file)
metar: metar string, ICAO or object to apply
time: time string to apply (YYYYMMDDHHMMSS)
min_wind: minimum wind
max_wind: maximum wind
Returns:
String containing error | Below is the the instruction that describes the task:
### Input:
Edit an opened MIZ file and sets the time and date and the weather
Args:
infile: source file
outfile: output file (will default to source file)
metar: metar string, ICAO or object to apply
time: time string to apply (YYYYMMDDHHMMSS)
min_wind: minimum wind
max_wind: maximum wind
Returns:
String containing error
### Response:
def edit_miz( # noqa: C901
infile: str,
outfile: str = None,
metar: typing.Union[str, Metar] = None,
time: str = None,
min_wind: int = 0,
max_wind: int = 40
) -> str:
# noinspection SpellCheckingInspection
"""
Edit an opened MIZ file and sets the time and date and the weather
Args:
infile: source file
outfile: output file (will default to source file)
metar: metar string, ICAO or object to apply
time: time string to apply (YYYYMMDDHHMMSS)
min_wind: minimum wind
max_wind: maximum wind
Returns:
String containing error
"""
if outfile is None:
LOGGER.debug('editing in place: %s', infile)
outfile = infile
else:
LOGGER.debug('editing miz file: %s -> %s', infile, outfile)
mission_weather = mission_time = None
if metar:
error, metar = emiz.weather.custom_metar.CustomMetar.get_metar(metar)
if error:
return error
mission_weather = emiz.weather.mission_weather.MissionWeather(metar, min_wind=min_wind, max_wind=max_wind)
if time:
try:
mission_time = MissionTime.from_string(time)
except ValueError:
return f'badly formatted time string: {time}'
if not mission_weather and not mission_time:
return 'nothing to do!'
with Miz(infile) as miz:
if mission_weather:
LOGGER.debug('applying MissionWeather')
if not mission_weather.apply_to_miz(miz):
return 'error while applying METAR to mission'
if mission_time:
LOGGER.debug('applying MissionTime')
if not mission_time.apply_to_miz(miz):
return 'error while setting time on mission'
try:
miz.zip(outfile)
return ''
except OSError:
return f'permission error: cannot edit "{outfile}"; maybe it is in use ?' |
def installProductOn(self, userstore):
"""
Creates an Installation in this user store for our collection
of powerups, and then install those powerups on the user's
store.
"""
def install():
i = Installation(store=userstore)
i.types = self.types
i.install()
userstore.transact(install) | Creates an Installation in this user store for our collection
of powerups, and then install those powerups on the user's
store. | Below is the the instruction that describes the task:
### Input:
Creates an Installation in this user store for our collection
of powerups, and then install those powerups on the user's
store.
### Response:
def installProductOn(self, userstore):
"""
Creates an Installation in this user store for our collection
of powerups, and then install those powerups on the user's
store.
"""
def install():
i = Installation(store=userstore)
i.types = self.types
i.install()
userstore.transact(install) |
def runGetReference(self, id_):
"""
Runs a getReference request for the specified ID.
"""
compoundId = datamodel.ReferenceCompoundId.parse(id_)
referenceSet = self.getDataRepository().getReferenceSet(
compoundId.reference_set_id)
reference = referenceSet.getReference(id_)
return self.runGetRequest(reference) | Runs a getReference request for the specified ID. | Below is the the instruction that describes the task:
### Input:
Runs a getReference request for the specified ID.
### Response:
def runGetReference(self, id_):
"""
Runs a getReference request for the specified ID.
"""
compoundId = datamodel.ReferenceCompoundId.parse(id_)
referenceSet = self.getDataRepository().getReferenceSet(
compoundId.reference_set_id)
reference = referenceSet.getReference(id_)
return self.runGetRequest(reference) |
def get_option_list(self, name, section=None, vars=None,
expect=None, separator=','):
"""Just like ``get_option`` but parse as a list using ``split``.
"""
val = self.get_option(name, section, vars, expect)
return val.split(separator) if val else [] | Just like ``get_option`` but parse as a list using ``split``. | Below is the the instruction that describes the task:
### Input:
Just like ``get_option`` but parse as a list using ``split``.
### Response:
def get_option_list(self, name, section=None, vars=None,
expect=None, separator=','):
"""Just like ``get_option`` but parse as a list using ``split``.
"""
val = self.get_option(name, section, vars, expect)
return val.split(separator) if val else [] |
def expand_zip(zip_fname,cwd=None):
"expand a zip"
unzip_path = '/usr/bin/unzip'
if not os.path.exists(unzip_path):
log.error('ERROR: {} does not exist'.format(unzip_path))
sys.exit(1)
args = [unzip_path, zip_fname]
# Does it have a top dir
res = subprocess.Popen(
[args[0], '-l', args[1]], cwd=cwd,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
)
contents = []
for line in res.stdout.readlines()[3:-2]:
contents.append(line.split()[-1])
commonprefix = os.path.commonprefix(contents)
if not commonprefix:
extdir = os.path.join(cwd, os.path.basename(zip_fname[:-4]))
args.extend(['-d', os.path.abspath(extdir)])
process_command(args, cwd=cwd) | expand a zip | Below is the the instruction that describes the task:
### Input:
expand a zip
### Response:
def expand_zip(zip_fname,cwd=None):
"expand a zip"
unzip_path = '/usr/bin/unzip'
if not os.path.exists(unzip_path):
log.error('ERROR: {} does not exist'.format(unzip_path))
sys.exit(1)
args = [unzip_path, zip_fname]
# Does it have a top dir
res = subprocess.Popen(
[args[0], '-l', args[1]], cwd=cwd,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
)
contents = []
for line in res.stdout.readlines()[3:-2]:
contents.append(line.split()[-1])
commonprefix = os.path.commonprefix(contents)
if not commonprefix:
extdir = os.path.join(cwd, os.path.basename(zip_fname[:-4]))
args.extend(['-d', os.path.abspath(extdir)])
process_command(args, cwd=cwd) |
def setShowOptionsButton(self, state):
"""
Sets whether or not the option button is visible.
:param state | <bool>
"""
self._showOptionsButton = state
self._optionsButton.setVisible(state) | Sets whether or not the option button is visible.
:param state | <bool> | Below is the the instruction that describes the task:
### Input:
Sets whether or not the option button is visible.
:param state | <bool>
### Response:
def setShowOptionsButton(self, state):
"""
Sets whether or not the option button is visible.
:param state | <bool>
"""
self._showOptionsButton = state
self._optionsButton.setVisible(state) |
def call():
"""Execute command line helper."""
args = get_arguments()
if args.debug:
log_level = logging.DEBUG
elif args.quiet:
log_level = logging.WARN
else:
log_level = logging.INFO
setup_logging(log_level)
lupusec = None
if not args.username or not args.password or not args.ip_address:
raise Exception("Please supply a username, password and ip.")
def _devicePrint(dev, append=''):
_LOGGER.info("%s%s", dev.desc, append)
try:
if args.username and args.password and args.ip_address:
lupusec = lupupy.Lupusec(ip_address=args.ip_address,
username=args.username,
password=args.password)
if args.arm:
if lupusec.get_alarm().set_away():
_LOGGER.info('Alarm mode changed to armed')
else:
_LOGGER.warning('Failed to change alarm mode to armed')
if args.disarm:
if lupusec.get_alarm().set_standby():
_LOGGER.info('Alarm mode changed to disarmed')
else:
_LOGGER.warning('Failed to change alarm mode to disarmed')
if args.home:
if lupusec.get_alarm().set_home():
_LOGGER.info('Alarm mode changed to home')
else:
_LOGGER.warning('Failed to change alarm mode to home')
if args.history:
_LOGGER.info(json.dumps(lupusec.get_history()['hisrows'], indent=4, sort_keys=True))
if args.status:
_LOGGER.info('Mode of panel: %s', lupusec.get_alarm().mode)
if args.devices:
for device in lupusec.get_devices():
_devicePrint(device)
except lupupy.LupusecException as exc:
_LOGGER.error(exc)
finally:
_LOGGER.info('--Finished running--') | Execute command line helper. | Below is the the instruction that describes the task:
### Input:
Execute command line helper.
### Response:
def call():
"""Execute command line helper."""
args = get_arguments()
if args.debug:
log_level = logging.DEBUG
elif args.quiet:
log_level = logging.WARN
else:
log_level = logging.INFO
setup_logging(log_level)
lupusec = None
if not args.username or not args.password or not args.ip_address:
raise Exception("Please supply a username, password and ip.")
def _devicePrint(dev, append=''):
_LOGGER.info("%s%s", dev.desc, append)
try:
if args.username and args.password and args.ip_address:
lupusec = lupupy.Lupusec(ip_address=args.ip_address,
username=args.username,
password=args.password)
if args.arm:
if lupusec.get_alarm().set_away():
_LOGGER.info('Alarm mode changed to armed')
else:
_LOGGER.warning('Failed to change alarm mode to armed')
if args.disarm:
if lupusec.get_alarm().set_standby():
_LOGGER.info('Alarm mode changed to disarmed')
else:
_LOGGER.warning('Failed to change alarm mode to disarmed')
if args.home:
if lupusec.get_alarm().set_home():
_LOGGER.info('Alarm mode changed to home')
else:
_LOGGER.warning('Failed to change alarm mode to home')
if args.history:
_LOGGER.info(json.dumps(lupusec.get_history()['hisrows'], indent=4, sort_keys=True))
if args.status:
_LOGGER.info('Mode of panel: %s', lupusec.get_alarm().mode)
if args.devices:
for device in lupusec.get_devices():
_devicePrint(device)
except lupupy.LupusecException as exc:
_LOGGER.error(exc)
finally:
_LOGGER.info('--Finished running--') |
def facets(self, *args, **kwargs):
"""
Returns a dictionary with the requested facets.
The facets function supports string args, and keyword
args.
q.facets('field_1', 'field_2') will return facets for
field_1 and field_2.
q.facets(field_1={'limit': 0}, field_2={'limit': 10})
will return all facets for field_1 and 10 facets for field_2.
"""
# Combine args and kwargs into facet format.
facets = dict((a, {}) for a in args)
facets.update(kwargs)
if not facets:
raise AttributeError('Faceting requires at least one field')
for f in facets.keys():
if not isinstance(f, six.string_types):
raise AttributeError('Facet field arguments must be strings')
q = self._clone()
q._limit = 0
q.execute(offset=0, facets=facets)
return q._response.get('facets') | Returns a dictionary with the requested facets.
The facets function supports string args, and keyword
args.
q.facets('field_1', 'field_2') will return facets for
field_1 and field_2.
q.facets(field_1={'limit': 0}, field_2={'limit': 10})
will return all facets for field_1 and 10 facets for field_2. | Below is the the instruction that describes the task:
### Input:
Returns a dictionary with the requested facets.
The facets function supports string args, and keyword
args.
q.facets('field_1', 'field_2') will return facets for
field_1 and field_2.
q.facets(field_1={'limit': 0}, field_2={'limit': 10})
will return all facets for field_1 and 10 facets for field_2.
### Response:
def facets(self, *args, **kwargs):
"""
Returns a dictionary with the requested facets.
The facets function supports string args, and keyword
args.
q.facets('field_1', 'field_2') will return facets for
field_1 and field_2.
q.facets(field_1={'limit': 0}, field_2={'limit': 10})
will return all facets for field_1 and 10 facets for field_2.
"""
# Combine args and kwargs into facet format.
facets = dict((a, {}) for a in args)
facets.update(kwargs)
if not facets:
raise AttributeError('Faceting requires at least one field')
for f in facets.keys():
if not isinstance(f, six.string_types):
raise AttributeError('Facet field arguments must be strings')
q = self._clone()
q._limit = 0
q.execute(offset=0, facets=facets)
return q._response.get('facets') |
def addReward(self, r=None):
""" A filtered mapping towards performAction of the underlying
environment.
"""
r = self.getReward() if r is None else r
# by default, the cumulative reward is just the sum over the episode
if self.discount:
self.cumulativeReward += power(self.discount, self.samples) * r
else:
self.cumulativeReward += r | A filtered mapping towards performAction of the underlying
environment. | Below is the the instruction that describes the task:
### Input:
A filtered mapping towards performAction of the underlying
environment.
### Response:
def addReward(self, r=None):
""" A filtered mapping towards performAction of the underlying
environment.
"""
r = self.getReward() if r is None else r
# by default, the cumulative reward is just the sum over the episode
if self.discount:
self.cumulativeReward += power(self.discount, self.samples) * r
else:
self.cumulativeReward += r |
def TableDescriptionParser(table_description, depth=0):
"""Parses the table_description object for internal use.
Parses the user-submitted table description into an internal format used
by the Python DataTable class. Returns the flat list of parsed columns.
Args:
table_description: A description of the table which should comply
with one of the formats described below.
depth: Optional. The depth of the first level in the current description.
Used by recursive calls to this function.
Returns:
List of columns, where each column represented by a dictionary with the
keys: id, label, type, depth, container which means the following:
- id: the id of the column
- name: The name of the column
- type: The datatype of the elements in this column. Allowed types are
described in ColumnTypeParser().
- depth: The depth of this column in the table description
- container: 'dict', 'iter' or 'scalar' for parsing the format easily.
- custom_properties: The custom properties for this column.
The returned description is flattened regardless of how it was given.
Raises:
DataTableException: Error in a column description or in the description
structure.
Examples:
A column description can be of the following forms:
'id'
('id',)
('id', 'type')
('id', 'type', 'label')
('id', 'type', 'label', {'custom_prop1': 'custom_val1'})
or as a dictionary:
'id': 'type'
'id': ('type',)
'id': ('type', 'label')
'id': ('type', 'label', {'custom_prop1': 'custom_val1'})
If the type is not specified, we treat it as string.
If no specific label is given, the label is simply the id.
If no custom properties are given, we use an empty dictionary.
input: [('a', 'date'), ('b', 'timeofday', 'b', {'foo': 'bar'})]
output: [{'id': 'a', 'label': 'a', 'type': 'date',
'depth': 0, 'container': 'iter', 'custom_properties': {}},
{'id': 'b', 'label': 'b', 'type': 'timeofday',
'depth': 0, 'container': 'iter',
'custom_properties': {'foo': 'bar'}}]
input: {'a': [('b', 'number'), ('c', 'string', 'column c')]}
output: [{'id': 'a', 'label': 'a', 'type': 'string',
'depth': 0, 'container': 'dict', 'custom_properties': {}},
{'id': 'b', 'label': 'b', 'type': 'number',
'depth': 1, 'container': 'iter', 'custom_properties': {}},
{'id': 'c', 'label': 'column c', 'type': 'string',
'depth': 1, 'container': 'iter', 'custom_properties': {}}]
input: {('a', 'number', 'column a'): { 'b': 'number', 'c': 'string'}}
output: [{'id': 'a', 'label': 'column a', 'type': 'number',
'depth': 0, 'container': 'dict', 'custom_properties': {}},
{'id': 'b', 'label': 'b', 'type': 'number',
'depth': 1, 'container': 'dict', 'custom_properties': {}},
{'id': 'c', 'label': 'c', 'type': 'string',
'depth': 1, 'container': 'dict', 'custom_properties': {}}]
input: { ('w', 'string', 'word'): ('c', 'number', 'count') }
output: [{'id': 'w', 'label': 'word', 'type': 'string',
'depth': 0, 'container': 'dict', 'custom_properties': {}},
{'id': 'c', 'label': 'count', 'type': 'number',
'depth': 1, 'container': 'scalar', 'custom_properties': {}}]
input: {'a': ('number', 'column a'), 'b': ('string', 'column b')}
output: [{'id': 'a', 'label': 'column a', 'type': 'number', 'depth': 0,
'container': 'dict', 'custom_properties': {}},
{'id': 'b', 'label': 'column b', 'type': 'string', 'depth': 0,
'container': 'dict', 'custom_properties': {}}
NOTE: there might be ambiguity in the case of a dictionary representation
of a single column. For example, the following description can be parsed
in 2 different ways: {'a': ('b', 'c')} can be thought of a single column
with the id 'a', of type 'b' and the label 'c', or as 2 columns: one named
'a', and the other named 'b' of type 'c'. We choose the first option by
default, and in case the second option is the right one, it is possible to
make the key into a tuple (i.e. {('a',): ('b', 'c')}) or add more info
into the tuple, thus making it look like this: {'a': ('b', 'c', 'b', {})}
-- second 'b' is the label, and {} is the custom properties field.
"""
# For the recursion step, we check for a scalar object (string or tuple)
if isinstance(table_description, (six.string_types, tuple)):
parsed_col = DataTable.ColumnTypeParser(table_description)
parsed_col["depth"] = depth
parsed_col["container"] = "scalar"
return [parsed_col]
# Since it is not scalar, table_description must be iterable.
if not hasattr(table_description, "__iter__"):
raise DataTableException("Expected an iterable object, got %s" %
type(table_description))
if not isinstance(table_description, dict):
# We expects a non-dictionary iterable item.
columns = []
for desc in table_description:
parsed_col = DataTable.ColumnTypeParser(desc)
parsed_col["depth"] = depth
parsed_col["container"] = "iter"
columns.append(parsed_col)
if not columns:
raise DataTableException("Description iterable objects should not"
" be empty.")
return columns
# The other case is a dictionary
if not table_description:
raise DataTableException("Empty dictionaries are not allowed inside"
" description")
# To differentiate between the two cases of more levels below or this is
# the most inner dictionary, we consider the number of keys (more then one
# key is indication for most inner dictionary) and the type of the key and
# value in case of only 1 key (if the type of key is string and the type of
# the value is a tuple of 0-3 items, we assume this is the most inner
# dictionary).
# NOTE: this way of differentiating might create ambiguity. See docs.
if (len(table_description) != 1 or
(isinstance(next(six.iterkeys(table_description)), six.string_types) and
isinstance(next(six.itervalues(table_description)), tuple) and
len(next(six.itervalues(table_description))) < 4)):
# This is the most inner dictionary. Parsing types.
columns = []
# We sort the items, equivalent to sort the keys since they are unique
for key, value in sorted(table_description.items()):
# We parse the column type as (key, type) or (key, type, label) using
# ColumnTypeParser.
if isinstance(value, tuple):
parsed_col = DataTable.ColumnTypeParser((key,) + value)
else:
parsed_col = DataTable.ColumnTypeParser((key, value))
parsed_col["depth"] = depth
parsed_col["container"] = "dict"
columns.append(parsed_col)
return columns
# This is an outer dictionary, must have at most one key.
parsed_col = DataTable.ColumnTypeParser(sorted(table_description.keys())[0])
parsed_col["depth"] = depth
parsed_col["container"] = "dict"
return ([parsed_col] + DataTable.TableDescriptionParser(
sorted(table_description.values())[0], depth=depth + 1)) | Parses the table_description object for internal use.
Parses the user-submitted table description into an internal format used
by the Python DataTable class. Returns the flat list of parsed columns.
Args:
table_description: A description of the table which should comply
with one of the formats described below.
depth: Optional. The depth of the first level in the current description.
Used by recursive calls to this function.
Returns:
List of columns, where each column represented by a dictionary with the
keys: id, label, type, depth, container which means the following:
- id: the id of the column
- name: The name of the column
- type: The datatype of the elements in this column. Allowed types are
described in ColumnTypeParser().
- depth: The depth of this column in the table description
- container: 'dict', 'iter' or 'scalar' for parsing the format easily.
- custom_properties: The custom properties for this column.
The returned description is flattened regardless of how it was given.
Raises:
DataTableException: Error in a column description or in the description
structure.
Examples:
A column description can be of the following forms:
'id'
('id',)
('id', 'type')
('id', 'type', 'label')
('id', 'type', 'label', {'custom_prop1': 'custom_val1'})
or as a dictionary:
'id': 'type'
'id': ('type',)
'id': ('type', 'label')
'id': ('type', 'label', {'custom_prop1': 'custom_val1'})
If the type is not specified, we treat it as string.
If no specific label is given, the label is simply the id.
If no custom properties are given, we use an empty dictionary.
input: [('a', 'date'), ('b', 'timeofday', 'b', {'foo': 'bar'})]
output: [{'id': 'a', 'label': 'a', 'type': 'date',
'depth': 0, 'container': 'iter', 'custom_properties': {}},
{'id': 'b', 'label': 'b', 'type': 'timeofday',
'depth': 0, 'container': 'iter',
'custom_properties': {'foo': 'bar'}}]
input: {'a': [('b', 'number'), ('c', 'string', 'column c')]}
output: [{'id': 'a', 'label': 'a', 'type': 'string',
'depth': 0, 'container': 'dict', 'custom_properties': {}},
{'id': 'b', 'label': 'b', 'type': 'number',
'depth': 1, 'container': 'iter', 'custom_properties': {}},
{'id': 'c', 'label': 'column c', 'type': 'string',
'depth': 1, 'container': 'iter', 'custom_properties': {}}]
input: {('a', 'number', 'column a'): { 'b': 'number', 'c': 'string'}}
output: [{'id': 'a', 'label': 'column a', 'type': 'number',
'depth': 0, 'container': 'dict', 'custom_properties': {}},
{'id': 'b', 'label': 'b', 'type': 'number',
'depth': 1, 'container': 'dict', 'custom_properties': {}},
{'id': 'c', 'label': 'c', 'type': 'string',
'depth': 1, 'container': 'dict', 'custom_properties': {}}]
input: { ('w', 'string', 'word'): ('c', 'number', 'count') }
output: [{'id': 'w', 'label': 'word', 'type': 'string',
'depth': 0, 'container': 'dict', 'custom_properties': {}},
{'id': 'c', 'label': 'count', 'type': 'number',
'depth': 1, 'container': 'scalar', 'custom_properties': {}}]
input: {'a': ('number', 'column a'), 'b': ('string', 'column b')}
output: [{'id': 'a', 'label': 'column a', 'type': 'number', 'depth': 0,
'container': 'dict', 'custom_properties': {}},
{'id': 'b', 'label': 'column b', 'type': 'string', 'depth': 0,
'container': 'dict', 'custom_properties': {}}
NOTE: there might be ambiguity in the case of a dictionary representation
of a single column. For example, the following description can be parsed
in 2 different ways: {'a': ('b', 'c')} can be thought of a single column
with the id 'a', of type 'b' and the label 'c', or as 2 columns: one named
'a', and the other named 'b' of type 'c'. We choose the first option by
default, and in case the second option is the right one, it is possible to
make the key into a tuple (i.e. {('a',): ('b', 'c')}) or add more info
into the tuple, thus making it look like this: {'a': ('b', 'c', 'b', {})}
-- second 'b' is the label, and {} is the custom properties field. | Below is the the instruction that describes the task:
### Input:
Parses the table_description object for internal use.
Parses the user-submitted table description into an internal format used
by the Python DataTable class. Returns the flat list of parsed columns.
Args:
table_description: A description of the table which should comply
with one of the formats described below.
depth: Optional. The depth of the first level in the current description.
Used by recursive calls to this function.
Returns:
List of columns, where each column represented by a dictionary with the
keys: id, label, type, depth, container which means the following:
- id: the id of the column
- name: The name of the column
- type: The datatype of the elements in this column. Allowed types are
described in ColumnTypeParser().
- depth: The depth of this column in the table description
- container: 'dict', 'iter' or 'scalar' for parsing the format easily.
- custom_properties: The custom properties for this column.
The returned description is flattened regardless of how it was given.
Raises:
DataTableException: Error in a column description or in the description
structure.
Examples:
A column description can be of the following forms:
'id'
('id',)
('id', 'type')
('id', 'type', 'label')
('id', 'type', 'label', {'custom_prop1': 'custom_val1'})
or as a dictionary:
'id': 'type'
'id': ('type',)
'id': ('type', 'label')
'id': ('type', 'label', {'custom_prop1': 'custom_val1'})
If the type is not specified, we treat it as string.
If no specific label is given, the label is simply the id.
If no custom properties are given, we use an empty dictionary.
input: [('a', 'date'), ('b', 'timeofday', 'b', {'foo': 'bar'})]
output: [{'id': 'a', 'label': 'a', 'type': 'date',
'depth': 0, 'container': 'iter', 'custom_properties': {}},
{'id': 'b', 'label': 'b', 'type': 'timeofday',
'depth': 0, 'container': 'iter',
'custom_properties': {'foo': 'bar'}}]
input: {'a': [('b', 'number'), ('c', 'string', 'column c')]}
output: [{'id': 'a', 'label': 'a', 'type': 'string',
'depth': 0, 'container': 'dict', 'custom_properties': {}},
{'id': 'b', 'label': 'b', 'type': 'number',
'depth': 1, 'container': 'iter', 'custom_properties': {}},
{'id': 'c', 'label': 'column c', 'type': 'string',
'depth': 1, 'container': 'iter', 'custom_properties': {}}]
input: {('a', 'number', 'column a'): { 'b': 'number', 'c': 'string'}}
output: [{'id': 'a', 'label': 'column a', 'type': 'number',
'depth': 0, 'container': 'dict', 'custom_properties': {}},
{'id': 'b', 'label': 'b', 'type': 'number',
'depth': 1, 'container': 'dict', 'custom_properties': {}},
{'id': 'c', 'label': 'c', 'type': 'string',
'depth': 1, 'container': 'dict', 'custom_properties': {}}]
input: { ('w', 'string', 'word'): ('c', 'number', 'count') }
output: [{'id': 'w', 'label': 'word', 'type': 'string',
'depth': 0, 'container': 'dict', 'custom_properties': {}},
{'id': 'c', 'label': 'count', 'type': 'number',
'depth': 1, 'container': 'scalar', 'custom_properties': {}}]
input: {'a': ('number', 'column a'), 'b': ('string', 'column b')}
output: [{'id': 'a', 'label': 'column a', 'type': 'number', 'depth': 0,
'container': 'dict', 'custom_properties': {}},
{'id': 'b', 'label': 'column b', 'type': 'string', 'depth': 0,
'container': 'dict', 'custom_properties': {}}
NOTE: there might be ambiguity in the case of a dictionary representation
of a single column. For example, the following description can be parsed
in 2 different ways: {'a': ('b', 'c')} can be thought of a single column
with the id 'a', of type 'b' and the label 'c', or as 2 columns: one named
'a', and the other named 'b' of type 'c'. We choose the first option by
default, and in case the second option is the right one, it is possible to
make the key into a tuple (i.e. {('a',): ('b', 'c')}) or add more info
into the tuple, thus making it look like this: {'a': ('b', 'c', 'b', {})}
-- second 'b' is the label, and {} is the custom properties field.
### Response:
def TableDescriptionParser(table_description, depth=0):
"""Parses the table_description object for internal use.
Parses the user-submitted table description into an internal format used
by the Python DataTable class. Returns the flat list of parsed columns.
Args:
table_description: A description of the table which should comply
with one of the formats described below.
depth: Optional. The depth of the first level in the current description.
Used by recursive calls to this function.
Returns:
List of columns, where each column represented by a dictionary with the
keys: id, label, type, depth, container which means the following:
- id: the id of the column
- name: The name of the column
- type: The datatype of the elements in this column. Allowed types are
described in ColumnTypeParser().
- depth: The depth of this column in the table description
- container: 'dict', 'iter' or 'scalar' for parsing the format easily.
- custom_properties: The custom properties for this column.
The returned description is flattened regardless of how it was given.
Raises:
DataTableException: Error in a column description or in the description
structure.
Examples:
A column description can be of the following forms:
'id'
('id',)
('id', 'type')
('id', 'type', 'label')
('id', 'type', 'label', {'custom_prop1': 'custom_val1'})
or as a dictionary:
'id': 'type'
'id': ('type',)
'id': ('type', 'label')
'id': ('type', 'label', {'custom_prop1': 'custom_val1'})
If the type is not specified, we treat it as string.
If no specific label is given, the label is simply the id.
If no custom properties are given, we use an empty dictionary.
input: [('a', 'date'), ('b', 'timeofday', 'b', {'foo': 'bar'})]
output: [{'id': 'a', 'label': 'a', 'type': 'date',
'depth': 0, 'container': 'iter', 'custom_properties': {}},
{'id': 'b', 'label': 'b', 'type': 'timeofday',
'depth': 0, 'container': 'iter',
'custom_properties': {'foo': 'bar'}}]
input: {'a': [('b', 'number'), ('c', 'string', 'column c')]}
output: [{'id': 'a', 'label': 'a', 'type': 'string',
'depth': 0, 'container': 'dict', 'custom_properties': {}},
{'id': 'b', 'label': 'b', 'type': 'number',
'depth': 1, 'container': 'iter', 'custom_properties': {}},
{'id': 'c', 'label': 'column c', 'type': 'string',
'depth': 1, 'container': 'iter', 'custom_properties': {}}]
input: {('a', 'number', 'column a'): { 'b': 'number', 'c': 'string'}}
output: [{'id': 'a', 'label': 'column a', 'type': 'number',
'depth': 0, 'container': 'dict', 'custom_properties': {}},
{'id': 'b', 'label': 'b', 'type': 'number',
'depth': 1, 'container': 'dict', 'custom_properties': {}},
{'id': 'c', 'label': 'c', 'type': 'string',
'depth': 1, 'container': 'dict', 'custom_properties': {}}]
input: { ('w', 'string', 'word'): ('c', 'number', 'count') }
output: [{'id': 'w', 'label': 'word', 'type': 'string',
'depth': 0, 'container': 'dict', 'custom_properties': {}},
{'id': 'c', 'label': 'count', 'type': 'number',
'depth': 1, 'container': 'scalar', 'custom_properties': {}}]
input: {'a': ('number', 'column a'), 'b': ('string', 'column b')}
output: [{'id': 'a', 'label': 'column a', 'type': 'number', 'depth': 0,
'container': 'dict', 'custom_properties': {}},
{'id': 'b', 'label': 'column b', 'type': 'string', 'depth': 0,
'container': 'dict', 'custom_properties': {}}
NOTE: there might be ambiguity in the case of a dictionary representation
of a single column. For example, the following description can be parsed
in 2 different ways: {'a': ('b', 'c')} can be thought of a single column
with the id 'a', of type 'b' and the label 'c', or as 2 columns: one named
'a', and the other named 'b' of type 'c'. We choose the first option by
default, and in case the second option is the right one, it is possible to
make the key into a tuple (i.e. {('a',): ('b', 'c')}) or add more info
into the tuple, thus making it look like this: {'a': ('b', 'c', 'b', {})}
-- second 'b' is the label, and {} is the custom properties field.
"""
# For the recursion step, we check for a scalar object (string or tuple)
if isinstance(table_description, (six.string_types, tuple)):
parsed_col = DataTable.ColumnTypeParser(table_description)
parsed_col["depth"] = depth
parsed_col["container"] = "scalar"
return [parsed_col]
# Since it is not scalar, table_description must be iterable.
if not hasattr(table_description, "__iter__"):
raise DataTableException("Expected an iterable object, got %s" %
type(table_description))
if not isinstance(table_description, dict):
# We expects a non-dictionary iterable item.
columns = []
for desc in table_description:
parsed_col = DataTable.ColumnTypeParser(desc)
parsed_col["depth"] = depth
parsed_col["container"] = "iter"
columns.append(parsed_col)
if not columns:
raise DataTableException("Description iterable objects should not"
" be empty.")
return columns
# The other case is a dictionary
if not table_description:
raise DataTableException("Empty dictionaries are not allowed inside"
" description")
# To differentiate between the two cases of more levels below or this is
# the most inner dictionary, we consider the number of keys (more then one
# key is indication for most inner dictionary) and the type of the key and
# value in case of only 1 key (if the type of key is string and the type of
# the value is a tuple of 0-3 items, we assume this is the most inner
# dictionary).
# NOTE: this way of differentiating might create ambiguity. See docs.
if (len(table_description) != 1 or
(isinstance(next(six.iterkeys(table_description)), six.string_types) and
isinstance(next(six.itervalues(table_description)), tuple) and
len(next(six.itervalues(table_description))) < 4)):
# This is the most inner dictionary. Parsing types.
columns = []
# We sort the items, equivalent to sort the keys since they are unique
for key, value in sorted(table_description.items()):
# We parse the column type as (key, type) or (key, type, label) using
# ColumnTypeParser.
if isinstance(value, tuple):
parsed_col = DataTable.ColumnTypeParser((key,) + value)
else:
parsed_col = DataTable.ColumnTypeParser((key, value))
parsed_col["depth"] = depth
parsed_col["container"] = "dict"
columns.append(parsed_col)
return columns
# This is an outer dictionary, must have at most one key.
parsed_col = DataTable.ColumnTypeParser(sorted(table_description.keys())[0])
parsed_col["depth"] = depth
parsed_col["container"] = "dict"
return ([parsed_col] + DataTable.TableDescriptionParser(
sorted(table_description.values())[0], depth=depth + 1)) |
def apps_installations_job_status_show(self, id, **kwargs):
"https://developer.zendesk.com/rest_api/docs/core/apps#get-requirements-install-status"
api_path = "/api/v2/apps/installations/job_statuses/{id}.json"
api_path = api_path.format(id=id)
return self.call(api_path, **kwargs) | https://developer.zendesk.com/rest_api/docs/core/apps#get-requirements-install-status | Below is the the instruction that describes the task:
### Input:
https://developer.zendesk.com/rest_api/docs/core/apps#get-requirements-install-status
### Response:
def apps_installations_job_status_show(self, id, **kwargs):
"https://developer.zendesk.com/rest_api/docs/core/apps#get-requirements-install-status"
api_path = "/api/v2/apps/installations/job_statuses/{id}.json"
api_path = api_path.format(id=id)
return self.call(api_path, **kwargs) |
def get_or_instantiate_db_state(working_dir):
"""
Get a read-only handle to the DB.
Instantiate it first if it doesn't exist.
DO NOT CALL WHILE INDEXING
Returns the handle on success
Raises on error
"""
# instantiates
new_db = BlockstackDB.borrow_readwrite_instance(working_dir, -1)
BlockstackDB.release_readwrite_instance(new_db, -1)
return get_db_state(working_dir) | Get a read-only handle to the DB.
Instantiate it first if it doesn't exist.
DO NOT CALL WHILE INDEXING
Returns the handle on success
Raises on error | Below is the the instruction that describes the task:
### Input:
Get a read-only handle to the DB.
Instantiate it first if it doesn't exist.
DO NOT CALL WHILE INDEXING
Returns the handle on success
Raises on error
### Response:
def get_or_instantiate_db_state(working_dir):
"""
Get a read-only handle to the DB.
Instantiate it first if it doesn't exist.
DO NOT CALL WHILE INDEXING
Returns the handle on success
Raises on error
"""
# instantiates
new_db = BlockstackDB.borrow_readwrite_instance(working_dir, -1)
BlockstackDB.release_readwrite_instance(new_db, -1)
return get_db_state(working_dir) |
def _validate_player(player):
'''
Checks that a player is a valid player. Valid players are: 0, 1, 2, and 3.
:param int player: player to be validated
:return: None
:raises NoSuchPlayerException: if the player is invalid
'''
valid_players = range(4)
if player not in valid_players:
valid_players = ', '.join(str(p) for p in valid_players)
raise dominoes.NoSuchPlayerException('{} is not a valid player. Valid players'
' are: {}'.format(player, valid_players)) | Checks that a player is a valid player. Valid players are: 0, 1, 2, and 3.
:param int player: player to be validated
:return: None
:raises NoSuchPlayerException: if the player is invalid | Below is the the instruction that describes the task:
### Input:
Checks that a player is a valid player. Valid players are: 0, 1, 2, and 3.
:param int player: player to be validated
:return: None
:raises NoSuchPlayerException: if the player is invalid
### Response:
def _validate_player(player):
'''
Checks that a player is a valid player. Valid players are: 0, 1, 2, and 3.
:param int player: player to be validated
:return: None
:raises NoSuchPlayerException: if the player is invalid
'''
valid_players = range(4)
if player not in valid_players:
valid_players = ', '.join(str(p) for p in valid_players)
raise dominoes.NoSuchPlayerException('{} is not a valid player. Valid players'
' are: {}'.format(player, valid_players)) |
def dependency_sort(dependency_tree):
"""
Sorts items 'dependencies first' in a given dependency tree.
A dependency tree is a dictionary mapping an object to a collection its
dependency objects.
Result is a properly sorted list of items, where each item is a 2-tuple
containing an object and its dependency list, as given in the input
dependency tree.
If B is directly or indirectly dependent on A and they are not both a part
of the same dependency cycle (i.e. then A is neither directly nor
indirectly dependent on B) then A needs to come before B.
If A and B are a part of the same dependency cycle, i.e. if they are both
directly or indirectly dependent on each other, then it does not matter
which comes first.
Any entries found listed as dependencies, but that do not have their own
dependencies listed as well, are logged & ignored.
@return: The sorted items.
@rtype: list
"""
sorted = []
processed = set()
for key, deps in dependency_tree.iteritems():
_sort_r(sorted, processed, key, deps, dependency_tree)
return sorted | Sorts items 'dependencies first' in a given dependency tree.
A dependency tree is a dictionary mapping an object to a collection its
dependency objects.
Result is a properly sorted list of items, where each item is a 2-tuple
containing an object and its dependency list, as given in the input
dependency tree.
If B is directly or indirectly dependent on A and they are not both a part
of the same dependency cycle (i.e. then A is neither directly nor
indirectly dependent on B) then A needs to come before B.
If A and B are a part of the same dependency cycle, i.e. if they are both
directly or indirectly dependent on each other, then it does not matter
which comes first.
Any entries found listed as dependencies, but that do not have their own
dependencies listed as well, are logged & ignored.
@return: The sorted items.
@rtype: list | Below is the the instruction that describes the task:
### Input:
Sorts items 'dependencies first' in a given dependency tree.
A dependency tree is a dictionary mapping an object to a collection its
dependency objects.
Result is a properly sorted list of items, where each item is a 2-tuple
containing an object and its dependency list, as given in the input
dependency tree.
If B is directly or indirectly dependent on A and they are not both a part
of the same dependency cycle (i.e. then A is neither directly nor
indirectly dependent on B) then A needs to come before B.
If A and B are a part of the same dependency cycle, i.e. if they are both
directly or indirectly dependent on each other, then it does not matter
which comes first.
Any entries found listed as dependencies, but that do not have their own
dependencies listed as well, are logged & ignored.
@return: The sorted items.
@rtype: list
### Response:
def dependency_sort(dependency_tree):
"""
Sorts items 'dependencies first' in a given dependency tree.
A dependency tree is a dictionary mapping an object to a collection its
dependency objects.
Result is a properly sorted list of items, where each item is a 2-tuple
containing an object and its dependency list, as given in the input
dependency tree.
If B is directly or indirectly dependent on A and they are not both a part
of the same dependency cycle (i.e. then A is neither directly nor
indirectly dependent on B) then A needs to come before B.
If A and B are a part of the same dependency cycle, i.e. if they are both
directly or indirectly dependent on each other, then it does not matter
which comes first.
Any entries found listed as dependencies, but that do not have their own
dependencies listed as well, are logged & ignored.
@return: The sorted items.
@rtype: list
"""
sorted = []
processed = set()
for key, deps in dependency_tree.iteritems():
_sort_r(sorted, processed, key, deps, dependency_tree)
return sorted |
def check_shastore_version(from_store, settings):
"""
This function gives us the option to emit errors or warnings
after sake upgrades
"""
sprint = settings["sprint"]
error = settings["error"]
sprint("checking .shastore version for potential incompatibilities",
level="verbose")
if not from_store or 'sake version' not in from_store:
errmes = ["Since you've used this project last, a new version of ",
"sake was installed that introduced backwards incompatible",
" changes. Run 'sake clean', and rebuild before continuing\n"]
errmes = " ".join(errmes)
error(errmes)
sys.exit(1) | This function gives us the option to emit errors or warnings
after sake upgrades | Below is the the instruction that describes the task:
### Input:
This function gives us the option to emit errors or warnings
after sake upgrades
### Response:
def check_shastore_version(from_store, settings):
"""
This function gives us the option to emit errors or warnings
after sake upgrades
"""
sprint = settings["sprint"]
error = settings["error"]
sprint("checking .shastore version for potential incompatibilities",
level="verbose")
if not from_store or 'sake version' not in from_store:
errmes = ["Since you've used this project last, a new version of ",
"sake was installed that introduced backwards incompatible",
" changes. Run 'sake clean', and rebuild before continuing\n"]
errmes = " ".join(errmes)
error(errmes)
sys.exit(1) |
def cmd(send, msg, args):
"""Checks if a website is up.
Syntax: {command} <website>
"""
if not msg:
send("What are you trying to get to?")
return
nick = args['nick']
isup = get("http://isup.me/%s" % msg).text
if "looks down from here" in isup:
send("%s: %s is down" % (nick, msg))
elif "like a site on the interwho" in isup:
send("%s: %s is not a valid url" % (nick, msg))
else:
send("%s: %s is up" % (nick, msg)) | Checks if a website is up.
Syntax: {command} <website> | Below is the the instruction that describes the task:
### Input:
Checks if a website is up.
Syntax: {command} <website>
### Response:
def cmd(send, msg, args):
"""Checks if a website is up.
Syntax: {command} <website>
"""
if not msg:
send("What are you trying to get to?")
return
nick = args['nick']
isup = get("http://isup.me/%s" % msg).text
if "looks down from here" in isup:
send("%s: %s is down" % (nick, msg))
elif "like a site on the interwho" in isup:
send("%s: %s is not a valid url" % (nick, msg))
else:
send("%s: %s is up" % (nick, msg)) |
def __mesh_coords(ax_type, coords, n, **kwargs):
'''Compute axis coordinates'''
if coords is not None:
if len(coords) < n:
raise ParameterError('Coordinate shape mismatch: '
'{}<{}'.format(len(coords), n))
return coords
coord_map = {'linear': __coord_fft_hz,
'hz': __coord_fft_hz,
'log': __coord_fft_hz,
'mel': __coord_mel_hz,
'cqt': __coord_cqt_hz,
'cqt_hz': __coord_cqt_hz,
'cqt_note': __coord_cqt_hz,
'chroma': __coord_chroma,
'time': __coord_time,
's': __coord_time,
'ms': __coord_time,
'lag': __coord_time,
'lag_s': __coord_time,
'lag_ms': __coord_time,
'tonnetz': __coord_n,
'off': __coord_n,
'tempo': __coord_tempo,
'frames': __coord_n,
None: __coord_n}
if ax_type not in coord_map:
raise ParameterError('Unknown axis type: {}'.format(ax_type))
return coord_map[ax_type](n, **kwargs) | Compute axis coordinates | Below is the the instruction that describes the task:
### Input:
Compute axis coordinates
### Response:
def __mesh_coords(ax_type, coords, n, **kwargs):
'''Compute axis coordinates'''
if coords is not None:
if len(coords) < n:
raise ParameterError('Coordinate shape mismatch: '
'{}<{}'.format(len(coords), n))
return coords
coord_map = {'linear': __coord_fft_hz,
'hz': __coord_fft_hz,
'log': __coord_fft_hz,
'mel': __coord_mel_hz,
'cqt': __coord_cqt_hz,
'cqt_hz': __coord_cqt_hz,
'cqt_note': __coord_cqt_hz,
'chroma': __coord_chroma,
'time': __coord_time,
's': __coord_time,
'ms': __coord_time,
'lag': __coord_time,
'lag_s': __coord_time,
'lag_ms': __coord_time,
'tonnetz': __coord_n,
'off': __coord_n,
'tempo': __coord_tempo,
'frames': __coord_n,
None: __coord_n}
if ax_type not in coord_map:
raise ParameterError('Unknown axis type: {}'.format(ax_type))
return coord_map[ax_type](n, **kwargs) |
def compute_efficiency(f_dist, m_dist, dbins):
'''
Compute the efficiency as a function of distance for the given sets of found
and missed injection distances.
Note that injections that do not fit into any dbin get lost :(
'''
efficiency = numpy.zeros(len(dbins) - 1)
error = numpy.zeros(len(dbins) - 1)
for j, dlow in enumerate(dbins[:-1]):
dhigh = dbins[j + 1]
found = numpy.sum((dlow <= f_dist) * (f_dist < dhigh))
missed = numpy.sum((dlow <= m_dist) * (m_dist < dhigh))
if found+missed == 0:
# avoid divide by 0 in empty bins
missed = 1.
efficiency[j] = float(found) / (found + missed)
error[j] = numpy.sqrt(efficiency[j] * (1 - efficiency[j]) /
(found + missed))
return efficiency, error | Compute the efficiency as a function of distance for the given sets of found
and missed injection distances.
Note that injections that do not fit into any dbin get lost :( | Below is the the instruction that describes the task:
### Input:
Compute the efficiency as a function of distance for the given sets of found
and missed injection distances.
Note that injections that do not fit into any dbin get lost :(
### Response:
def compute_efficiency(f_dist, m_dist, dbins):
'''
Compute the efficiency as a function of distance for the given sets of found
and missed injection distances.
Note that injections that do not fit into any dbin get lost :(
'''
efficiency = numpy.zeros(len(dbins) - 1)
error = numpy.zeros(len(dbins) - 1)
for j, dlow in enumerate(dbins[:-1]):
dhigh = dbins[j + 1]
found = numpy.sum((dlow <= f_dist) * (f_dist < dhigh))
missed = numpy.sum((dlow <= m_dist) * (m_dist < dhigh))
if found+missed == 0:
# avoid divide by 0 in empty bins
missed = 1.
efficiency[j] = float(found) / (found + missed)
error[j] = numpy.sqrt(efficiency[j] * (1 - efficiency[j]) /
(found + missed))
return efficiency, error |
def recv_exit_status(self, command, timeout=10, get_pty=False):
"""
Execute a command and get its return value
@param command: command to execute
@type command: str
@param timeout: command execution timeout
@type timeout: int
@param get_pty: get pty
@type get_pty: bool
@return: the exit code of the process or None in case of timeout
@rtype: int or None
"""
status = None
self.last_command = command
stdin, stdout, stderr = self.cli.exec_command(command, get_pty=get_pty)
if stdout and stderr and stdin:
for _ in range(timeout):
if stdout.channel.exit_status_ready():
status = stdout.channel.recv_exit_status()
break
time.sleep(1)
self.last_stdout = stdout.read()
self.last_stderr = stderr.read()
stdin.close()
stdout.close()
stderr.close()
return status | Execute a command and get its return value
@param command: command to execute
@type command: str
@param timeout: command execution timeout
@type timeout: int
@param get_pty: get pty
@type get_pty: bool
@return: the exit code of the process or None in case of timeout
@rtype: int or None | Below is the the instruction that describes the task:
### Input:
Execute a command and get its return value
@param command: command to execute
@type command: str
@param timeout: command execution timeout
@type timeout: int
@param get_pty: get pty
@type get_pty: bool
@return: the exit code of the process or None in case of timeout
@rtype: int or None
### Response:
def recv_exit_status(self, command, timeout=10, get_pty=False):
"""
Execute a command and get its return value
@param command: command to execute
@type command: str
@param timeout: command execution timeout
@type timeout: int
@param get_pty: get pty
@type get_pty: bool
@return: the exit code of the process or None in case of timeout
@rtype: int or None
"""
status = None
self.last_command = command
stdin, stdout, stderr = self.cli.exec_command(command, get_pty=get_pty)
if stdout and stderr and stdin:
for _ in range(timeout):
if stdout.channel.exit_status_ready():
status = stdout.channel.recv_exit_status()
break
time.sleep(1)
self.last_stdout = stdout.read()
self.last_stderr = stderr.read()
stdin.close()
stdout.close()
stderr.close()
return status |
def get_alias(self):
"""
Gets the alias for the field or the auto_alias if one is set.
If there isn't any kind of alias, None is returned.
:return: The field alias, auto_alias, or None
:rtype: str or None
"""
alias = None
if self.alias:
alias = self.alias
elif self.auto_alias:
alias = self.auto_alias
if self.table and self.table.prefix_fields:
field_prefix = self.table.get_field_prefix()
if alias:
alias = '{0}__{1}'.format(field_prefix, alias)
else:
alias = '{0}__{1}'.format(field_prefix, self.name)
return alias | Gets the alias for the field or the auto_alias if one is set.
If there isn't any kind of alias, None is returned.
:return: The field alias, auto_alias, or None
:rtype: str or None | Below is the the instruction that describes the task:
### Input:
Gets the alias for the field or the auto_alias if one is set.
If there isn't any kind of alias, None is returned.
:return: The field alias, auto_alias, or None
:rtype: str or None
### Response:
def get_alias(self):
"""
Gets the alias for the field or the auto_alias if one is set.
If there isn't any kind of alias, None is returned.
:return: The field alias, auto_alias, or None
:rtype: str or None
"""
alias = None
if self.alias:
alias = self.alias
elif self.auto_alias:
alias = self.auto_alias
if self.table and self.table.prefix_fields:
field_prefix = self.table.get_field_prefix()
if alias:
alias = '{0}__{1}'.format(field_prefix, alias)
else:
alias = '{0}__{1}'.format(field_prefix, self.name)
return alias |
def sort_item(iterable, number, reverse=False):
"""Sort the itertable according to the given number item."""
return sorted(iterable, key=itemgetter(number), reverse=reverse) | Sort the itertable according to the given number item. | Below is the the instruction that describes the task:
### Input:
Sort the itertable according to the given number item.
### Response:
def sort_item(iterable, number, reverse=False):
"""Sort the itertable according to the given number item."""
return sorted(iterable, key=itemgetter(number), reverse=reverse) |
def get_custom_renderer(self, dir_path, layout=True):
"""
Create a template renderer on templates in the directory specified, and returns it.
:param dir_path: the path to the template dir. If it is not absolute, it will be taken from the root of the inginious package.
:param layout: can either be True (use the base layout of the running app), False (use no layout at all), or the path to the layout to use.
If this path is relative, it is taken from the INGInious package root.
"""
# if dir_path/base is a absolute path, os.path.join(something, an_absolute_path) returns an_absolute_path.
root_path = inginious.get_root_path()
if isinstance(layout, str):
layout_path = os.path.join(root_path, layout)
elif layout is True:
layout_path = os.path.join(root_path, self._layout)
else:
layout_path = None
return web.template.render(os.path.join(root_path, dir_path),
globals=self._template_globals,
base=layout_path) | Create a template renderer on templates in the directory specified, and returns it.
:param dir_path: the path to the template dir. If it is not absolute, it will be taken from the root of the inginious package.
:param layout: can either be True (use the base layout of the running app), False (use no layout at all), or the path to the layout to use.
If this path is relative, it is taken from the INGInious package root. | Below is the the instruction that describes the task:
### Input:
Create a template renderer on templates in the directory specified, and returns it.
:param dir_path: the path to the template dir. If it is not absolute, it will be taken from the root of the inginious package.
:param layout: can either be True (use the base layout of the running app), False (use no layout at all), or the path to the layout to use.
If this path is relative, it is taken from the INGInious package root.
### Response:
def get_custom_renderer(self, dir_path, layout=True):
"""
Create a template renderer on templates in the directory specified, and returns it.
:param dir_path: the path to the template dir. If it is not absolute, it will be taken from the root of the inginious package.
:param layout: can either be True (use the base layout of the running app), False (use no layout at all), or the path to the layout to use.
If this path is relative, it is taken from the INGInious package root.
"""
# if dir_path/base is a absolute path, os.path.join(something, an_absolute_path) returns an_absolute_path.
root_path = inginious.get_root_path()
if isinstance(layout, str):
layout_path = os.path.join(root_path, layout)
elif layout is True:
layout_path = os.path.join(root_path, self._layout)
else:
layout_path = None
return web.template.render(os.path.join(root_path, dir_path),
globals=self._template_globals,
base=layout_path) |
def try_serialize_handler(handler):
"""Try to serialize map/reduce handler.
Args:
handler: handler function/instance. Handler can be a function or an
instance of a callable class. In the latter case, the handler will
be serialized across slices to allow users to save states.
Returns:
serialized handler string or None.
"""
if (isinstance(handler, types.InstanceType) or # old style class
(isinstance(handler, object) and # new style class
not inspect.isfunction(handler) and
not inspect.ismethod(handler)) and
hasattr(handler, "__call__")):
return pickle.dumps(handler)
return None | Try to serialize map/reduce handler.
Args:
handler: handler function/instance. Handler can be a function or an
instance of a callable class. In the latter case, the handler will
be serialized across slices to allow users to save states.
Returns:
serialized handler string or None. | Below is the the instruction that describes the task:
### Input:
Try to serialize map/reduce handler.
Args:
handler: handler function/instance. Handler can be a function or an
instance of a callable class. In the latter case, the handler will
be serialized across slices to allow users to save states.
Returns:
serialized handler string or None.
### Response:
def try_serialize_handler(handler):
"""Try to serialize map/reduce handler.
Args:
handler: handler function/instance. Handler can be a function or an
instance of a callable class. In the latter case, the handler will
be serialized across slices to allow users to save states.
Returns:
serialized handler string or None.
"""
if (isinstance(handler, types.InstanceType) or # old style class
(isinstance(handler, object) and # new style class
not inspect.isfunction(handler) and
not inspect.ismethod(handler)) and
hasattr(handler, "__call__")):
return pickle.dumps(handler)
return None |
def static_singleton(*args, **kwargs):
"""
STATIC Singleton Design Pattern Decorator
Class is initialized with arguments passed into the decorator.
:Usage:
>>> @static_singleton('yop')
class Bob(Person):
def __init__(arg1):
self.info = arg1
def says(self):
print self.info
b1 = Bob #note that we call it by the name of the class, no instance created here, kind of static linking to an instance
b2 = Bob #here b1 is the same object as b2
Bob.says() # it will display 'yop'
"""
def __static_singleton_wrapper(cls):
if cls not in __singleton_instances:
__singleton_instances[cls] = cls(*args, **kwargs)
return __singleton_instances[cls]
return __static_singleton_wrapper | STATIC Singleton Design Pattern Decorator
Class is initialized with arguments passed into the decorator.
:Usage:
>>> @static_singleton('yop')
class Bob(Person):
def __init__(arg1):
self.info = arg1
def says(self):
print self.info
b1 = Bob #note that we call it by the name of the class, no instance created here, kind of static linking to an instance
b2 = Bob #here b1 is the same object as b2
Bob.says() # it will display 'yop' | Below is the the instruction that describes the task:
### Input:
STATIC Singleton Design Pattern Decorator
Class is initialized with arguments passed into the decorator.
:Usage:
>>> @static_singleton('yop')
class Bob(Person):
def __init__(arg1):
self.info = arg1
def says(self):
print self.info
b1 = Bob #note that we call it by the name of the class, no instance created here, kind of static linking to an instance
b2 = Bob #here b1 is the same object as b2
Bob.says() # it will display 'yop'
### Response:
def static_singleton(*args, **kwargs):
"""
STATIC Singleton Design Pattern Decorator
Class is initialized with arguments passed into the decorator.
:Usage:
>>> @static_singleton('yop')
class Bob(Person):
def __init__(arg1):
self.info = arg1
def says(self):
print self.info
b1 = Bob #note that we call it by the name of the class, no instance created here, kind of static linking to an instance
b2 = Bob #here b1 is the same object as b2
Bob.says() # it will display 'yop'
"""
def __static_singleton_wrapper(cls):
if cls not in __singleton_instances:
__singleton_instances[cls] = cls(*args, **kwargs)
return __singleton_instances[cls]
return __static_singleton_wrapper |
def GetActiveComposition(self) -> TextRange:
"""
Call IUIAutomationTextEditPattern::GetActiveComposition.
Return `TextRange` or None, the active composition.
Refer https://docs.microsoft.com/en-us/windows/desktop/api/uiautomationclient/nf-uiautomationclient-iuiautomationtexteditpattern-getactivecomposition
"""
textRange = self.pattern.GetActiveComposition()
if textRange:
return TextRange(textRange=textRange) | Call IUIAutomationTextEditPattern::GetActiveComposition.
Return `TextRange` or None, the active composition.
Refer https://docs.microsoft.com/en-us/windows/desktop/api/uiautomationclient/nf-uiautomationclient-iuiautomationtexteditpattern-getactivecomposition | Below is the the instruction that describes the task:
### Input:
Call IUIAutomationTextEditPattern::GetActiveComposition.
Return `TextRange` or None, the active composition.
Refer https://docs.microsoft.com/en-us/windows/desktop/api/uiautomationclient/nf-uiautomationclient-iuiautomationtexteditpattern-getactivecomposition
### Response:
def GetActiveComposition(self) -> TextRange:
"""
Call IUIAutomationTextEditPattern::GetActiveComposition.
Return `TextRange` or None, the active composition.
Refer https://docs.microsoft.com/en-us/windows/desktop/api/uiautomationclient/nf-uiautomationclient-iuiautomationtexteditpattern-getactivecomposition
"""
textRange = self.pattern.GetActiveComposition()
if textRange:
return TextRange(textRange=textRange) |
def iter_conditions(condition):
"""Yield all conditions within the given condition.
If the root condition is and/or/not, it is not yielded (unless a cyclic reference to it is found)."""
conditions = list()
visited = set()
# Has to be split out, since we don't want to visit the root (for cyclic conditions)
# but we don't want to yield it (if it's non-cyclic) because this only yields inner conditions
if condition.operation in {"and", "or"}:
conditions.extend(reversed(condition.values))
elif condition.operation == "not":
conditions.append(condition.values[0])
else:
conditions.append(condition)
while conditions:
condition = conditions.pop()
if condition in visited:
continue
visited.add(condition)
yield condition
if condition.operation in {"and", "or", "not"}:
conditions.extend(reversed(condition.values)) | Yield all conditions within the given condition.
If the root condition is and/or/not, it is not yielded (unless a cyclic reference to it is found). | Below is the the instruction that describes the task:
### Input:
Yield all conditions within the given condition.
If the root condition is and/or/not, it is not yielded (unless a cyclic reference to it is found).
### Response:
def iter_conditions(condition):
"""Yield all conditions within the given condition.
If the root condition is and/or/not, it is not yielded (unless a cyclic reference to it is found)."""
conditions = list()
visited = set()
# Has to be split out, since we don't want to visit the root (for cyclic conditions)
# but we don't want to yield it (if it's non-cyclic) because this only yields inner conditions
if condition.operation in {"and", "or"}:
conditions.extend(reversed(condition.values))
elif condition.operation == "not":
conditions.append(condition.values[0])
else:
conditions.append(condition)
while conditions:
condition = conditions.pop()
if condition in visited:
continue
visited.add(condition)
yield condition
if condition.operation in {"and", "or", "not"}:
conditions.extend(reversed(condition.values)) |
def log_parameter_and_gradient_statistics(self, # pylint: disable=invalid-name
model: Model,
batch_grad_norm: float) -> None:
"""
Send the mean and std of all parameters and gradients to tensorboard, as well
as logging the average gradient norm.
"""
if self._should_log_parameter_statistics:
# Log parameter values to Tensorboard
for name, param in model.named_parameters():
self.add_train_scalar("parameter_mean/" + name, param.data.mean())
self.add_train_scalar("parameter_std/" + name, param.data.std())
if param.grad is not None:
if param.grad.is_sparse:
# pylint: disable=protected-access
grad_data = param.grad.data._values()
else:
grad_data = param.grad.data
# skip empty gradients
if torch.prod(torch.tensor(grad_data.shape)).item() > 0: # pylint: disable=not-callable
self.add_train_scalar("gradient_mean/" + name, grad_data.mean())
self.add_train_scalar("gradient_std/" + name, grad_data.std())
else:
# no gradient for a parameter with sparse gradients
logger.info("No gradient for %s, skipping tensorboard logging.", name)
# norm of gradients
if batch_grad_norm is not None:
self.add_train_scalar("gradient_norm", batch_grad_norm) | Send the mean and std of all parameters and gradients to tensorboard, as well
as logging the average gradient norm. | Below is the the instruction that describes the task:
### Input:
Send the mean and std of all parameters and gradients to tensorboard, as well
as logging the average gradient norm.
### Response:
def log_parameter_and_gradient_statistics(self, # pylint: disable=invalid-name
model: Model,
batch_grad_norm: float) -> None:
"""
Send the mean and std of all parameters and gradients to tensorboard, as well
as logging the average gradient norm.
"""
if self._should_log_parameter_statistics:
# Log parameter values to Tensorboard
for name, param in model.named_parameters():
self.add_train_scalar("parameter_mean/" + name, param.data.mean())
self.add_train_scalar("parameter_std/" + name, param.data.std())
if param.grad is not None:
if param.grad.is_sparse:
# pylint: disable=protected-access
grad_data = param.grad.data._values()
else:
grad_data = param.grad.data
# skip empty gradients
if torch.prod(torch.tensor(grad_data.shape)).item() > 0: # pylint: disable=not-callable
self.add_train_scalar("gradient_mean/" + name, grad_data.mean())
self.add_train_scalar("gradient_std/" + name, grad_data.std())
else:
# no gradient for a parameter with sparse gradients
logger.info("No gradient for %s, skipping tensorboard logging.", name)
# norm of gradients
if batch_grad_norm is not None:
self.add_train_scalar("gradient_norm", batch_grad_norm) |
def get(self, url, instance, service_check_tags, run_check=False):
"Hit a given URL and return the parsed json"
self.log.debug('Fetching CouchDB stats at url: %s' % url)
auth = None
if 'user' in instance and 'password' in instance:
auth = (instance['user'], instance['password'])
# Override Accept request header so that failures are not redirected to the Futon web-ui
request_headers = headers(self.agentConfig)
request_headers['Accept'] = 'text/json'
try:
r = requests.get(
url, auth=auth, headers=request_headers, timeout=int(instance.get('timeout', self.TIMEOUT))
)
r.raise_for_status()
if run_check:
self.service_check(
self.SERVICE_CHECK_NAME,
AgentCheck.OK,
tags=service_check_tags,
message='Connection to %s was successful' % url,
)
except requests.exceptions.Timeout as e:
self.service_check(
self.SERVICE_CHECK_NAME,
AgentCheck.CRITICAL,
tags=service_check_tags,
message="Request timeout: {0}, {1}".format(url, e),
)
raise
except requests.exceptions.HTTPError as e:
self.service_check(self.SERVICE_CHECK_NAME, AgentCheck.CRITICAL, tags=service_check_tags, message=str(e))
raise
except Exception as e:
self.service_check(self.SERVICE_CHECK_NAME, AgentCheck.CRITICAL, tags=service_check_tags, message=str(e))
raise
return r.json() | Hit a given URL and return the parsed json | Below is the the instruction that describes the task:
### Input:
Hit a given URL and return the parsed json
### Response:
def get(self, url, instance, service_check_tags, run_check=False):
"Hit a given URL and return the parsed json"
self.log.debug('Fetching CouchDB stats at url: %s' % url)
auth = None
if 'user' in instance and 'password' in instance:
auth = (instance['user'], instance['password'])
# Override Accept request header so that failures are not redirected to the Futon web-ui
request_headers = headers(self.agentConfig)
request_headers['Accept'] = 'text/json'
try:
r = requests.get(
url, auth=auth, headers=request_headers, timeout=int(instance.get('timeout', self.TIMEOUT))
)
r.raise_for_status()
if run_check:
self.service_check(
self.SERVICE_CHECK_NAME,
AgentCheck.OK,
tags=service_check_tags,
message='Connection to %s was successful' % url,
)
except requests.exceptions.Timeout as e:
self.service_check(
self.SERVICE_CHECK_NAME,
AgentCheck.CRITICAL,
tags=service_check_tags,
message="Request timeout: {0}, {1}".format(url, e),
)
raise
except requests.exceptions.HTTPError as e:
self.service_check(self.SERVICE_CHECK_NAME, AgentCheck.CRITICAL, tags=service_check_tags, message=str(e))
raise
except Exception as e:
self.service_check(self.SERVICE_CHECK_NAME, AgentCheck.CRITICAL, tags=service_check_tags, message=str(e))
raise
return r.json() |
def role_show(self, role_id, **kwargs):
"https://developer.zendesk.com/rest_api/docs/chat/roles#get-role"
api_path = "/api/v2/roles/{role_id}"
api_path = api_path.format(role_id=role_id)
return self.call(api_path, **kwargs) | https://developer.zendesk.com/rest_api/docs/chat/roles#get-role | Below is the the instruction that describes the task:
### Input:
https://developer.zendesk.com/rest_api/docs/chat/roles#get-role
### Response:
def role_show(self, role_id, **kwargs):
"https://developer.zendesk.com/rest_api/docs/chat/roles#get-role"
api_path = "/api/v2/roles/{role_id}"
api_path = api_path.format(role_id=role_id)
return self.call(api_path, **kwargs) |
def rotation_coefs(self):
""" get the rotation coefficents in radians
Returns
-------
rotation_coefs : list
the rotation coefficients implied by Vario2d.bearing
"""
return [np.cos(self.bearing_rads),
np.sin(self.bearing_rads),
-1.0*np.sin(self.bearing_rads),
np.cos(self.bearing_rads)] | get the rotation coefficents in radians
Returns
-------
rotation_coefs : list
the rotation coefficients implied by Vario2d.bearing | Below is the the instruction that describes the task:
### Input:
get the rotation coefficents in radians
Returns
-------
rotation_coefs : list
the rotation coefficients implied by Vario2d.bearing
### Response:
def rotation_coefs(self):
""" get the rotation coefficents in radians
Returns
-------
rotation_coefs : list
the rotation coefficients implied by Vario2d.bearing
"""
return [np.cos(self.bearing_rads),
np.sin(self.bearing_rads),
-1.0*np.sin(self.bearing_rads),
np.cos(self.bearing_rads)] |
def load_kb_mappings_file(kbname, kbfile, separator):
"""Add KB values from file to given KB returning rows added."""
num_added = 0
with open(kbfile) as kb_fd:
for line in kb_fd:
if not line.strip():
continue
try:
key, value = line.split(separator)
except ValueError:
# bad split, pass
current_app.logger.error("Error splitting: {0}".format(line))
continue
add_kb_mapping(kbname, key, value)
num_added += 1
return num_added | Add KB values from file to given KB returning rows added. | Below is the the instruction that describes the task:
### Input:
Add KB values from file to given KB returning rows added.
### Response:
def load_kb_mappings_file(kbname, kbfile, separator):
"""Add KB values from file to given KB returning rows added."""
num_added = 0
with open(kbfile) as kb_fd:
for line in kb_fd:
if not line.strip():
continue
try:
key, value = line.split(separator)
except ValueError:
# bad split, pass
current_app.logger.error("Error splitting: {0}".format(line))
continue
add_kb_mapping(kbname, key, value)
num_added += 1
return num_added |
def __WriteProtoServiceDeclaration(self, printer, name, method_info_map):
"""Write a single service declaration to a proto file."""
printer()
printer('service %s {', self.__GetServiceClassName(name))
with printer.Indent():
for method_name, method_info in method_info_map.items():
for line in textwrap.wrap(method_info.description,
printer.CalculateWidth() - 3):
printer('// %s', line)
printer('rpc %s (%s) returns (%s);',
method_name,
method_info.request_type_name,
method_info.response_type_name)
printer('}') | Write a single service declaration to a proto file. | Below is the the instruction that describes the task:
### Input:
Write a single service declaration to a proto file.
### Response:
def __WriteProtoServiceDeclaration(self, printer, name, method_info_map):
"""Write a single service declaration to a proto file."""
printer()
printer('service %s {', self.__GetServiceClassName(name))
with printer.Indent():
for method_name, method_info in method_info_map.items():
for line in textwrap.wrap(method_info.description,
printer.CalculateWidth() - 3):
printer('// %s', line)
printer('rpc %s (%s) returns (%s);',
method_name,
method_info.request_type_name,
method_info.response_type_name)
printer('}') |
def build_c(self):
"""Calculates the total attenuation from the total absorption and total scattering
c = a + b
"""
lg.info('Building total attenuation C')
self.c = self.a + self.b | Calculates the total attenuation from the total absorption and total scattering
c = a + b | Below is the the instruction that describes the task:
### Input:
Calculates the total attenuation from the total absorption and total scattering
c = a + b
### Response:
def build_c(self):
"""Calculates the total attenuation from the total absorption and total scattering
c = a + b
"""
lg.info('Building total attenuation C')
self.c = self.a + self.b |
def read(self, offset, length):
"""
Return *length* bytes from this stream starting at *offset*.
"""
self._file.seek(offset)
return self._file.read(length) | Return *length* bytes from this stream starting at *offset*. | Below is the the instruction that describes the task:
### Input:
Return *length* bytes from this stream starting at *offset*.
### Response:
def read(self, offset, length):
"""
Return *length* bytes from this stream starting at *offset*.
"""
self._file.seek(offset)
return self._file.read(length) |
def slice_locs(self, start=None, end=None, step=None, kind=None):
"""
For an ordered MultiIndex, compute the slice locations for input
labels.
The input labels can be tuples representing partial levels, e.g. for a
MultiIndex with 3 levels, you can pass a single value (corresponding to
the first level), or a 1-, 2-, or 3-tuple.
Parameters
----------
start : label or tuple, default None
If None, defaults to the beginning
end : label or tuple
If None, defaults to the end
step : int or None
Slice step
kind : string, optional, defaults None
Returns
-------
(start, end) : (int, int)
Notes
-----
This method only works if the MultiIndex is properly lexsorted. So,
if only the first 2 levels of a 3-level MultiIndex are lexsorted,
you can only pass two levels to ``.slice_locs``.
Examples
--------
>>> mi = pd.MultiIndex.from_arrays([list('abbd'), list('deff')],
... names=['A', 'B'])
Get the slice locations from the beginning of 'b' in the first level
until the end of the multiindex:
>>> mi.slice_locs(start='b')
(1, 4)
Like above, but stop at the end of 'b' in the first level and 'f' in
the second level:
>>> mi.slice_locs(start='b', end=('b', 'f'))
(1, 3)
See Also
--------
MultiIndex.get_loc : Get location for a label or a tuple of labels.
MultiIndex.get_locs : Get location for a label/slice/list/mask or a
sequence of such.
"""
# This function adds nothing to its parent implementation (the magic
# happens in get_slice_bound method), but it adds meaningful doc.
return super().slice_locs(start, end, step, kind=kind) | For an ordered MultiIndex, compute the slice locations for input
labels.
The input labels can be tuples representing partial levels, e.g. for a
MultiIndex with 3 levels, you can pass a single value (corresponding to
the first level), or a 1-, 2-, or 3-tuple.
Parameters
----------
start : label or tuple, default None
If None, defaults to the beginning
end : label or tuple
If None, defaults to the end
step : int or None
Slice step
kind : string, optional, defaults None
Returns
-------
(start, end) : (int, int)
Notes
-----
This method only works if the MultiIndex is properly lexsorted. So,
if only the first 2 levels of a 3-level MultiIndex are lexsorted,
you can only pass two levels to ``.slice_locs``.
Examples
--------
>>> mi = pd.MultiIndex.from_arrays([list('abbd'), list('deff')],
... names=['A', 'B'])
Get the slice locations from the beginning of 'b' in the first level
until the end of the multiindex:
>>> mi.slice_locs(start='b')
(1, 4)
Like above, but stop at the end of 'b' in the first level and 'f' in
the second level:
>>> mi.slice_locs(start='b', end=('b', 'f'))
(1, 3)
See Also
--------
MultiIndex.get_loc : Get location for a label or a tuple of labels.
MultiIndex.get_locs : Get location for a label/slice/list/mask or a
sequence of such. | Below is the the instruction that describes the task:
### Input:
For an ordered MultiIndex, compute the slice locations for input
labels.
The input labels can be tuples representing partial levels, e.g. for a
MultiIndex with 3 levels, you can pass a single value (corresponding to
the first level), or a 1-, 2-, or 3-tuple.
Parameters
----------
start : label or tuple, default None
If None, defaults to the beginning
end : label or tuple
If None, defaults to the end
step : int or None
Slice step
kind : string, optional, defaults None
Returns
-------
(start, end) : (int, int)
Notes
-----
This method only works if the MultiIndex is properly lexsorted. So,
if only the first 2 levels of a 3-level MultiIndex are lexsorted,
you can only pass two levels to ``.slice_locs``.
Examples
--------
>>> mi = pd.MultiIndex.from_arrays([list('abbd'), list('deff')],
... names=['A', 'B'])
Get the slice locations from the beginning of 'b' in the first level
until the end of the multiindex:
>>> mi.slice_locs(start='b')
(1, 4)
Like above, but stop at the end of 'b' in the first level and 'f' in
the second level:
>>> mi.slice_locs(start='b', end=('b', 'f'))
(1, 3)
See Also
--------
MultiIndex.get_loc : Get location for a label or a tuple of labels.
MultiIndex.get_locs : Get location for a label/slice/list/mask or a
sequence of such.
### Response:
def slice_locs(self, start=None, end=None, step=None, kind=None):
"""
For an ordered MultiIndex, compute the slice locations for input
labels.
The input labels can be tuples representing partial levels, e.g. for a
MultiIndex with 3 levels, you can pass a single value (corresponding to
the first level), or a 1-, 2-, or 3-tuple.
Parameters
----------
start : label or tuple, default None
If None, defaults to the beginning
end : label or tuple
If None, defaults to the end
step : int or None
Slice step
kind : string, optional, defaults None
Returns
-------
(start, end) : (int, int)
Notes
-----
This method only works if the MultiIndex is properly lexsorted. So,
if only the first 2 levels of a 3-level MultiIndex are lexsorted,
you can only pass two levels to ``.slice_locs``.
Examples
--------
>>> mi = pd.MultiIndex.from_arrays([list('abbd'), list('deff')],
... names=['A', 'B'])
Get the slice locations from the beginning of 'b' in the first level
until the end of the multiindex:
>>> mi.slice_locs(start='b')
(1, 4)
Like above, but stop at the end of 'b' in the first level and 'f' in
the second level:
>>> mi.slice_locs(start='b', end=('b', 'f'))
(1, 3)
See Also
--------
MultiIndex.get_loc : Get location for a label or a tuple of labels.
MultiIndex.get_locs : Get location for a label/slice/list/mask or a
sequence of such.
"""
# This function adds nothing to its parent implementation (the magic
# happens in get_slice_bound method), but it adds meaningful doc.
return super().slice_locs(start, end, step, kind=kind) |
def thickest(self, n=1, index=False):
"""
Returns the thickest interval(s) as a striplog.
Args:
n (int): The number of thickest intervals to return. Default: 1.
index (bool): If True, only the indices of the intervals are
returned. You can use this to index into the striplog.
Returns:
Interval. The thickest interval. Or, if ``index`` was ``True``,
the index of the thickest interval.
"""
s = sorted(range(len(self)), key=lambda k: self[k].thickness)
indices = s[-n:]
if index:
return indices
else:
if n == 1:
# Then return an interval.
i = indices[0]
return self[i]
else:
return self[indices] | Returns the thickest interval(s) as a striplog.
Args:
n (int): The number of thickest intervals to return. Default: 1.
index (bool): If True, only the indices of the intervals are
returned. You can use this to index into the striplog.
Returns:
Interval. The thickest interval. Or, if ``index`` was ``True``,
the index of the thickest interval. | Below is the the instruction that describes the task:
### Input:
Returns the thickest interval(s) as a striplog.
Args:
n (int): The number of thickest intervals to return. Default: 1.
index (bool): If True, only the indices of the intervals are
returned. You can use this to index into the striplog.
Returns:
Interval. The thickest interval. Or, if ``index`` was ``True``,
the index of the thickest interval.
### Response:
def thickest(self, n=1, index=False):
"""
Returns the thickest interval(s) as a striplog.
Args:
n (int): The number of thickest intervals to return. Default: 1.
index (bool): If True, only the indices of the intervals are
returned. You can use this to index into the striplog.
Returns:
Interval. The thickest interval. Or, if ``index`` was ``True``,
the index of the thickest interval.
"""
s = sorted(range(len(self)), key=lambda k: self[k].thickness)
indices = s[-n:]
if index:
return indices
else:
if n == 1:
# Then return an interval.
i = indices[0]
return self[i]
else:
return self[indices] |
def get_presig(self, target, source, env, executor=None):
"""Return the signature contents of this action's command line.
This strips $(-$) and everything in between the string,
since those parts don't affect signatures.
"""
from SCons.Subst import SUBST_SIG
cmd = self.cmd_list
if is_List(cmd):
cmd = ' '.join(map(str, cmd))
else:
cmd = str(cmd)
if executor:
return env.subst_target_source(cmd, SUBST_SIG, executor=executor)
else:
return env.subst_target_source(cmd, SUBST_SIG, target, source) | Return the signature contents of this action's command line.
This strips $(-$) and everything in between the string,
since those parts don't affect signatures. | Below is the the instruction that describes the task:
### Input:
Return the signature contents of this action's command line.
This strips $(-$) and everything in between the string,
since those parts don't affect signatures.
### Response:
def get_presig(self, target, source, env, executor=None):
"""Return the signature contents of this action's command line.
This strips $(-$) and everything in between the string,
since those parts don't affect signatures.
"""
from SCons.Subst import SUBST_SIG
cmd = self.cmd_list
if is_List(cmd):
cmd = ' '.join(map(str, cmd))
else:
cmd = str(cmd)
if executor:
return env.subst_target_source(cmd, SUBST_SIG, executor=executor)
else:
return env.subst_target_source(cmd, SUBST_SIG, target, source) |
def resize_imgs(self, targ, new_path, resume=True, fn=None):
"""
resize all images in the dataset and save them to `new_path`
Arguments:
targ (int): the target size
new_path (string): the new folder to save the images
resume (bool): if true (default), allow resuming a partial resize operation by checking for the existence
of individual images rather than the existence of the directory
fn (function): custom resizing function Img -> Img
"""
dest = resize_imgs(self.fnames, targ, self.path, new_path, resume, fn)
return self.__class__(self.fnames, self.y, self.transform, dest) | resize all images in the dataset and save them to `new_path`
Arguments:
targ (int): the target size
new_path (string): the new folder to save the images
resume (bool): if true (default), allow resuming a partial resize operation by checking for the existence
of individual images rather than the existence of the directory
fn (function): custom resizing function Img -> Img | Below is the the instruction that describes the task:
### Input:
resize all images in the dataset and save them to `new_path`
Arguments:
targ (int): the target size
new_path (string): the new folder to save the images
resume (bool): if true (default), allow resuming a partial resize operation by checking for the existence
of individual images rather than the existence of the directory
fn (function): custom resizing function Img -> Img
### Response:
def resize_imgs(self, targ, new_path, resume=True, fn=None):
"""
resize all images in the dataset and save them to `new_path`
Arguments:
targ (int): the target size
new_path (string): the new folder to save the images
resume (bool): if true (default), allow resuming a partial resize operation by checking for the existence
of individual images rather than the existence of the directory
fn (function): custom resizing function Img -> Img
"""
dest = resize_imgs(self.fnames, targ, self.path, new_path, resume, fn)
return self.__class__(self.fnames, self.y, self.transform, dest) |
def bristow_campbell(tmin, tmax, pot_rad_daily, A, C):
"""calculates potential shortwave radiation based on minimum and maximum temperature
This routine calculates global radiation as described in:
Bristow, Keith L., and Gaylon S. Campbell: On the relationship between
incoming solar radiation and daily maximum and minimum temperature.
Agricultural and forest meteorology 31.2 (1984): 159-166.
Args:
daily_data: time series (daily data) including at least minimum and maximum temeprature
pot_rad_daily: mean potential daily radiation
A: parameter A of the Bristow-Campbell model
C: parameter C of the Bristow-Campbell model
Returns:
series of potential shortwave radiation
"""
assert tmin.index.equals(tmax.index)
temp = pd.DataFrame(data=dict(tmin=tmin, tmax=tmax))
temp = temp.reindex(pd.DatetimeIndex(start=temp.index[0], end=temp.index[-1], freq='D'))
temp['tmin_nextday'] = temp.tmin
temp.tmin_nextday.iloc[:-1] = temp.tmin.iloc[1:].values
temp = temp.loc[tmin.index]
pot_rad_daily = pot_rad_daily.loc[tmin.index]
dT = temp.tmax - (temp.tmin + temp.tmin_nextday) / 2
dT_m_avg = dT.groupby(dT.index.month).mean()
B = 0.036 * np.exp(-0.154 * dT_m_avg[temp.index.month])
B.index = temp.index
if isinstance(A, pd.Series):
months = temp.index.month
A = A.loc[months].values
C = C.loc[months].values
transmissivity = A * (1 - np.exp(-B * dT**C))
R0 = transmissivity * pot_rad_daily
return R0 | calculates potential shortwave radiation based on minimum and maximum temperature
This routine calculates global radiation as described in:
Bristow, Keith L., and Gaylon S. Campbell: On the relationship between
incoming solar radiation and daily maximum and minimum temperature.
Agricultural and forest meteorology 31.2 (1984): 159-166.
Args:
daily_data: time series (daily data) including at least minimum and maximum temeprature
pot_rad_daily: mean potential daily radiation
A: parameter A of the Bristow-Campbell model
C: parameter C of the Bristow-Campbell model
Returns:
series of potential shortwave radiation | Below is the the instruction that describes the task:
### Input:
calculates potential shortwave radiation based on minimum and maximum temperature
This routine calculates global radiation as described in:
Bristow, Keith L., and Gaylon S. Campbell: On the relationship between
incoming solar radiation and daily maximum and minimum temperature.
Agricultural and forest meteorology 31.2 (1984): 159-166.
Args:
daily_data: time series (daily data) including at least minimum and maximum temeprature
pot_rad_daily: mean potential daily radiation
A: parameter A of the Bristow-Campbell model
C: parameter C of the Bristow-Campbell model
Returns:
series of potential shortwave radiation
### Response:
def bristow_campbell(tmin, tmax, pot_rad_daily, A, C):
"""calculates potential shortwave radiation based on minimum and maximum temperature
This routine calculates global radiation as described in:
Bristow, Keith L., and Gaylon S. Campbell: On the relationship between
incoming solar radiation and daily maximum and minimum temperature.
Agricultural and forest meteorology 31.2 (1984): 159-166.
Args:
daily_data: time series (daily data) including at least minimum and maximum temeprature
pot_rad_daily: mean potential daily radiation
A: parameter A of the Bristow-Campbell model
C: parameter C of the Bristow-Campbell model
Returns:
series of potential shortwave radiation
"""
assert tmin.index.equals(tmax.index)
temp = pd.DataFrame(data=dict(tmin=tmin, tmax=tmax))
temp = temp.reindex(pd.DatetimeIndex(start=temp.index[0], end=temp.index[-1], freq='D'))
temp['tmin_nextday'] = temp.tmin
temp.tmin_nextday.iloc[:-1] = temp.tmin.iloc[1:].values
temp = temp.loc[tmin.index]
pot_rad_daily = pot_rad_daily.loc[tmin.index]
dT = temp.tmax - (temp.tmin + temp.tmin_nextday) / 2
dT_m_avg = dT.groupby(dT.index.month).mean()
B = 0.036 * np.exp(-0.154 * dT_m_avg[temp.index.month])
B.index = temp.index
if isinstance(A, pd.Series):
months = temp.index.month
A = A.loc[months].values
C = C.loc[months].values
transmissivity = A * (1 - np.exp(-B * dT**C))
R0 = transmissivity * pot_rad_daily
return R0 |
def select_valid_methods(self, T):
r'''Method to obtain a sorted list of methods which are valid at `T`
according to `test_method_validity`. Considers either only user methods
if forced is True, or all methods. User methods are first tested
according to their listed order, and unless forced is True, then all
methods are tested and sorted by their order in `ranked_methods`.
Parameters
----------
T : float
Temperature at which to test methods, [K]
Returns
-------
sorted_valid_methods : list
Sorted lists of methods valid at T according to
`test_method_validity`
'''
# Consider either only the user's methods or all methods
# Tabular data will be in both when inserted
if self.forced:
considered_methods = list(self.user_methods)
else:
considered_methods = list(self.all_methods)
# User methods (incl. tabular data); add back later, after ranking the rest
if self.user_methods:
[considered_methods.remove(i) for i in self.user_methods]
# Index the rest of the methods by ranked_methods, and add them to a list, sorted_methods
preferences = sorted([self.ranked_methods.index(i) for i in considered_methods])
sorted_methods = [self.ranked_methods[i] for i in preferences]
# Add back the user's methods to the top, in order.
if self.user_methods:
[sorted_methods.insert(0, i) for i in reversed(self.user_methods)]
sorted_valid_methods = []
for method in sorted_methods:
if self.test_method_validity(T, method):
sorted_valid_methods.append(method)
return sorted_valid_methods | r'''Method to obtain a sorted list of methods which are valid at `T`
according to `test_method_validity`. Considers either only user methods
if forced is True, or all methods. User methods are first tested
according to their listed order, and unless forced is True, then all
methods are tested and sorted by their order in `ranked_methods`.
Parameters
----------
T : float
Temperature at which to test methods, [K]
Returns
-------
sorted_valid_methods : list
Sorted lists of methods valid at T according to
`test_method_validity` | Below is the the instruction that describes the task:
### Input:
r'''Method to obtain a sorted list of methods which are valid at `T`
according to `test_method_validity`. Considers either only user methods
if forced is True, or all methods. User methods are first tested
according to their listed order, and unless forced is True, then all
methods are tested and sorted by their order in `ranked_methods`.
Parameters
----------
T : float
Temperature at which to test methods, [K]
Returns
-------
sorted_valid_methods : list
Sorted lists of methods valid at T according to
`test_method_validity`
### Response:
def select_valid_methods(self, T):
r'''Method to obtain a sorted list of methods which are valid at `T`
according to `test_method_validity`. Considers either only user methods
if forced is True, or all methods. User methods are first tested
according to their listed order, and unless forced is True, then all
methods are tested and sorted by their order in `ranked_methods`.
Parameters
----------
T : float
Temperature at which to test methods, [K]
Returns
-------
sorted_valid_methods : list
Sorted lists of methods valid at T according to
`test_method_validity`
'''
# Consider either only the user's methods or all methods
# Tabular data will be in both when inserted
if self.forced:
considered_methods = list(self.user_methods)
else:
considered_methods = list(self.all_methods)
# User methods (incl. tabular data); add back later, after ranking the rest
if self.user_methods:
[considered_methods.remove(i) for i in self.user_methods]
# Index the rest of the methods by ranked_methods, and add them to a list, sorted_methods
preferences = sorted([self.ranked_methods.index(i) for i in considered_methods])
sorted_methods = [self.ranked_methods[i] for i in preferences]
# Add back the user's methods to the top, in order.
if self.user_methods:
[sorted_methods.insert(0, i) for i in reversed(self.user_methods)]
sorted_valid_methods = []
for method in sorted_methods:
if self.test_method_validity(T, method):
sorted_valid_methods.append(method)
return sorted_valid_methods |
def call(self, **data):
"""Issue the call.
:param data: Data to pass in the *body* of the request.
"""
uri, body, headers = self.prepare(data)
return self.dispatch(uri, body, headers) | Issue the call.
:param data: Data to pass in the *body* of the request. | Below is the the instruction that describes the task:
### Input:
Issue the call.
:param data: Data to pass in the *body* of the request.
### Response:
def call(self, **data):
"""Issue the call.
:param data: Data to pass in the *body* of the request.
"""
uri, body, headers = self.prepare(data)
return self.dispatch(uri, body, headers) |
def has_delete_permission(self, request):
""" Can delete this object """
return request.user.is_authenticated and request.user.is_active and request.user.is_superuser | Can delete this object | Below is the the instruction that describes the task:
### Input:
Can delete this object
### Response:
def has_delete_permission(self, request):
""" Can delete this object """
return request.user.is_authenticated and request.user.is_active and request.user.is_superuser |
def _request_exc_message(exc):
"""
Return a reasonable exception message from a
:exc:`request.exceptions.RequestException` exception.
The approach is to dig deep to the original reason, if the original
exception is present, skipping irrelevant exceptions such as
`urllib3.exceptions.MaxRetryError`, and eliminating useless object
representations such as the connection pool object in
`urllib3.exceptions.NewConnectionError`.
Parameters:
exc (:exc:`~request.exceptions.RequestException`): Exception
Returns:
string: A reasonable exception message from the specified exception.
"""
if exc.args:
if isinstance(exc.args[0], Exception):
org_exc = exc.args[0]
if isinstance(org_exc, urllib3.exceptions.MaxRetryError):
reason_exc = org_exc.reason
message = str(reason_exc)
else:
message = str(org_exc.args[0])
else:
message = str(exc.args[0])
# Eliminate useless object repr at begin of the message
m = re.match(r'^(\(<[^>]+>, \'(.*)\'\)|<[^>]+>: (.*))$', message)
if m:
message = m.group(2) or m.group(3)
else:
message = ""
return message | Return a reasonable exception message from a
:exc:`request.exceptions.RequestException` exception.
The approach is to dig deep to the original reason, if the original
exception is present, skipping irrelevant exceptions such as
`urllib3.exceptions.MaxRetryError`, and eliminating useless object
representations such as the connection pool object in
`urllib3.exceptions.NewConnectionError`.
Parameters:
exc (:exc:`~request.exceptions.RequestException`): Exception
Returns:
string: A reasonable exception message from the specified exception. | Below is the the instruction that describes the task:
### Input:
Return a reasonable exception message from a
:exc:`request.exceptions.RequestException` exception.
The approach is to dig deep to the original reason, if the original
exception is present, skipping irrelevant exceptions such as
`urllib3.exceptions.MaxRetryError`, and eliminating useless object
representations such as the connection pool object in
`urllib3.exceptions.NewConnectionError`.
Parameters:
exc (:exc:`~request.exceptions.RequestException`): Exception
Returns:
string: A reasonable exception message from the specified exception.
### Response:
def _request_exc_message(exc):
"""
Return a reasonable exception message from a
:exc:`request.exceptions.RequestException` exception.
The approach is to dig deep to the original reason, if the original
exception is present, skipping irrelevant exceptions such as
`urllib3.exceptions.MaxRetryError`, and eliminating useless object
representations such as the connection pool object in
`urllib3.exceptions.NewConnectionError`.
Parameters:
exc (:exc:`~request.exceptions.RequestException`): Exception
Returns:
string: A reasonable exception message from the specified exception.
"""
if exc.args:
if isinstance(exc.args[0], Exception):
org_exc = exc.args[0]
if isinstance(org_exc, urllib3.exceptions.MaxRetryError):
reason_exc = org_exc.reason
message = str(reason_exc)
else:
message = str(org_exc.args[0])
else:
message = str(exc.args[0])
# Eliminate useless object repr at begin of the message
m = re.match(r'^(\(<[^>]+>, \'(.*)\'\)|<[^>]+>: (.*))$', message)
if m:
message = m.group(2) or m.group(3)
else:
message = ""
return message |
def _list_records_in_zone(self, zone, rdtype=None, name=None, content=None):
"""
Iterates over all records of the zone and returns a list of records filtered
by record type, name and content. The list is empty if no records found.
"""
records = []
rrsets = zone.iterate_rdatasets() if zone else []
for rname, rdataset in rrsets:
rtype = dns.rdatatype.to_text(rdataset.rdtype)
if ((not rdtype or rdtype == rtype)
and (not name or name == rname.to_text())):
for rdata in rdataset:
rdata = rdata.to_text()
if not content or self._convert_content(rtype, content) == rdata:
raw_rdata = self._clean_TXT_record({'type': rtype,
'content': rdata})['content']
data = {
'type': rtype,
'name': rname.to_text(True),
'ttl': int(rdataset.ttl),
'content': raw_rdata,
'id': Provider._create_identifier(rtype, rname.to_text(), raw_rdata)
}
records.append(data)
return records | Iterates over all records of the zone and returns a list of records filtered
by record type, name and content. The list is empty if no records found. | Below is the the instruction that describes the task:
### Input:
Iterates over all records of the zone and returns a list of records filtered
by record type, name and content. The list is empty if no records found.
### Response:
def _list_records_in_zone(self, zone, rdtype=None, name=None, content=None):
"""
Iterates over all records of the zone and returns a list of records filtered
by record type, name and content. The list is empty if no records found.
"""
records = []
rrsets = zone.iterate_rdatasets() if zone else []
for rname, rdataset in rrsets:
rtype = dns.rdatatype.to_text(rdataset.rdtype)
if ((not rdtype or rdtype == rtype)
and (not name or name == rname.to_text())):
for rdata in rdataset:
rdata = rdata.to_text()
if not content or self._convert_content(rtype, content) == rdata:
raw_rdata = self._clean_TXT_record({'type': rtype,
'content': rdata})['content']
data = {
'type': rtype,
'name': rname.to_text(True),
'ttl': int(rdataset.ttl),
'content': raw_rdata,
'id': Provider._create_identifier(rtype, rname.to_text(), raw_rdata)
}
records.append(data)
return records |
def wal_archive(self, wal_path, concurrency=1):
"""
Uploads a WAL file to S3 or Windows Azure Blob Service
This code is intended to typically be called from Postgres's
archive_command feature.
"""
# Upload the segment expressly indicated. It's special
# relative to other uploads when parallel wal-push is enabled,
# in that it's not desirable to tweak its .ready/.done files
# in archive_status.
xlog_dir = os.path.dirname(wal_path)
segment = WalSegment(wal_path, explicit=True)
uploader = WalUploader(self.layout, self.creds, self.gpg_key_id)
group = WalTransferGroup(uploader)
group.start(segment)
# Upload any additional wal segments up to the specified
# concurrency by scanning the Postgres archive_status
# directory.
started = 1
seg_stream = WalSegment.from_ready_archive_status(xlog_dir)
while started < concurrency:
try:
other_segment = next(seg_stream)
except StopIteration:
break
if other_segment.path != wal_path:
group.start(other_segment)
started += 1
try:
# Wait for uploads to finish.
group.join()
except EnvironmentError as e:
if e.errno == errno.ENOENT:
print(e)
raise UserException(
msg='could not find file for wal-push',
detail=('The operating system reported: {0} {1}'
.format(e.strerror, repr(e.filename))))
raise | Uploads a WAL file to S3 or Windows Azure Blob Service
This code is intended to typically be called from Postgres's
archive_command feature. | Below is the the instruction that describes the task:
### Input:
Uploads a WAL file to S3 or Windows Azure Blob Service
This code is intended to typically be called from Postgres's
archive_command feature.
### Response:
def wal_archive(self, wal_path, concurrency=1):
"""
Uploads a WAL file to S3 or Windows Azure Blob Service
This code is intended to typically be called from Postgres's
archive_command feature.
"""
# Upload the segment expressly indicated. It's special
# relative to other uploads when parallel wal-push is enabled,
# in that it's not desirable to tweak its .ready/.done files
# in archive_status.
xlog_dir = os.path.dirname(wal_path)
segment = WalSegment(wal_path, explicit=True)
uploader = WalUploader(self.layout, self.creds, self.gpg_key_id)
group = WalTransferGroup(uploader)
group.start(segment)
# Upload any additional wal segments up to the specified
# concurrency by scanning the Postgres archive_status
# directory.
started = 1
seg_stream = WalSegment.from_ready_archive_status(xlog_dir)
while started < concurrency:
try:
other_segment = next(seg_stream)
except StopIteration:
break
if other_segment.path != wal_path:
group.start(other_segment)
started += 1
try:
# Wait for uploads to finish.
group.join()
except EnvironmentError as e:
if e.errno == errno.ENOENT:
print(e)
raise UserException(
msg='could not find file for wal-push',
detail=('The operating system reported: {0} {1}'
.format(e.strerror, repr(e.filename))))
raise |
def fromdict(dict):
"""Takes a dictionary as an argument and returns a new Proof object
from the dictionary.
:param dict: the dictionary to convert
"""
key = hb_decode(dict['key'])
check_fraction = dict['check_fraction']
return Merkle(check_fraction, key) | Takes a dictionary as an argument and returns a new Proof object
from the dictionary.
:param dict: the dictionary to convert | Below is the the instruction that describes the task:
### Input:
Takes a dictionary as an argument and returns a new Proof object
from the dictionary.
:param dict: the dictionary to convert
### Response:
def fromdict(dict):
"""Takes a dictionary as an argument and returns a new Proof object
from the dictionary.
:param dict: the dictionary to convert
"""
key = hb_decode(dict['key'])
check_fraction = dict['check_fraction']
return Merkle(check_fraction, key) |
def _add_assert(self, **kwargs):
"""
if screenshot is None, only failed case will take screenshot
"""
# convert screenshot to relative path from <None|True|False|PIL.Image>
screenshot = kwargs.get('screenshot')
is_success = kwargs.get('success')
screenshot = (not is_success) if screenshot is None else screenshot
kwargs['screenshot'] = self._take_screenshot(screenshot=screenshot, name_prefix='assert')
action = kwargs.pop('action', 'assert')
self.add_step(action, **kwargs)
if not is_success:
message = kwargs.get('message')
frame, filename, line_number, function_name, lines, index = inspect.stack()[2]
print('Assert [%s: %d] WARN: %s' % (filename, line_number, message))
if not kwargs.get('safe', False):
raise AssertionError(message) | if screenshot is None, only failed case will take screenshot | Below is the the instruction that describes the task:
### Input:
if screenshot is None, only failed case will take screenshot
### Response:
def _add_assert(self, **kwargs):
"""
if screenshot is None, only failed case will take screenshot
"""
# convert screenshot to relative path from <None|True|False|PIL.Image>
screenshot = kwargs.get('screenshot')
is_success = kwargs.get('success')
screenshot = (not is_success) if screenshot is None else screenshot
kwargs['screenshot'] = self._take_screenshot(screenshot=screenshot, name_prefix='assert')
action = kwargs.pop('action', 'assert')
self.add_step(action, **kwargs)
if not is_success:
message = kwargs.get('message')
frame, filename, line_number, function_name, lines, index = inspect.stack()[2]
print('Assert [%s: %d] WARN: %s' % (filename, line_number, message))
if not kwargs.get('safe', False):
raise AssertionError(message) |
def _text_filter_input(self, input_gen):
"""
Filters out the text input line by line to avoid parsing and processing
metrics we know we don't want to process. This only works on `text/plain`
payloads, and is an INTERNAL FEATURE implemented for the kubelet check
:param input_get: line generator
:output: generator of filtered lines
"""
for line in input_gen:
for item in self._text_filter_blacklist:
if item in line:
break
else:
# No blacklist matches, passing the line through
yield line | Filters out the text input line by line to avoid parsing and processing
metrics we know we don't want to process. This only works on `text/plain`
payloads, and is an INTERNAL FEATURE implemented for the kubelet check
:param input_get: line generator
:output: generator of filtered lines | Below is the the instruction that describes the task:
### Input:
Filters out the text input line by line to avoid parsing and processing
metrics we know we don't want to process. This only works on `text/plain`
payloads, and is an INTERNAL FEATURE implemented for the kubelet check
:param input_get: line generator
:output: generator of filtered lines
### Response:
def _text_filter_input(self, input_gen):
"""
Filters out the text input line by line to avoid parsing and processing
metrics we know we don't want to process. This only works on `text/plain`
payloads, and is an INTERNAL FEATURE implemented for the kubelet check
:param input_get: line generator
:output: generator of filtered lines
"""
for line in input_gen:
for item in self._text_filter_blacklist:
if item in line:
break
else:
# No blacklist matches, passing the line through
yield line |
async def disconnect(self, sid, namespace=None):
"""Disconnect a client.
The only difference with the :func:`socketio.Server.disconnect` method
is that when the ``namespace`` argument is not given the namespace
associated with the class is used.
Note: this method is a coroutine.
"""
return await self.server.disconnect(
sid, namespace=namespace or self.namespace) | Disconnect a client.
The only difference with the :func:`socketio.Server.disconnect` method
is that when the ``namespace`` argument is not given the namespace
associated with the class is used.
Note: this method is a coroutine. | Below is the the instruction that describes the task:
### Input:
Disconnect a client.
The only difference with the :func:`socketio.Server.disconnect` method
is that when the ``namespace`` argument is not given the namespace
associated with the class is used.
Note: this method is a coroutine.
### Response:
async def disconnect(self, sid, namespace=None):
"""Disconnect a client.
The only difference with the :func:`socketio.Server.disconnect` method
is that when the ``namespace`` argument is not given the namespace
associated with the class is used.
Note: this method is a coroutine.
"""
return await self.server.disconnect(
sid, namespace=namespace or self.namespace) |
def registerStatsHandler(app, serverName, prefix='/status/'):
"""Register the stats handler with a Flask app, serving routes
with a given prefix. The prefix defaults to '/status/', which is
generally what you want."""
if prefix[-1] != '/':
prefix += '/'
handler = functools.partial(statsHandler, serverName)
app.add_url_rule(prefix, 'statsHandler', handler, methods=['GET'])
app.add_url_rule(prefix + '<path:path>', 'statsHandler', handler, methods=['GET']) | Register the stats handler with a Flask app, serving routes
with a given prefix. The prefix defaults to '/status/', which is
generally what you want. | Below is the the instruction that describes the task:
### Input:
Register the stats handler with a Flask app, serving routes
with a given prefix. The prefix defaults to '/status/', which is
generally what you want.
### Response:
def registerStatsHandler(app, serverName, prefix='/status/'):
"""Register the stats handler with a Flask app, serving routes
with a given prefix. The prefix defaults to '/status/', which is
generally what you want."""
if prefix[-1] != '/':
prefix += '/'
handler = functools.partial(statsHandler, serverName)
app.add_url_rule(prefix, 'statsHandler', handler, methods=['GET'])
app.add_url_rule(prefix + '<path:path>', 'statsHandler', handler, methods=['GET']) |
def samples_to_batches(samples: Iterable, batch_size: int):
"""Chunk a series of network inputs and outputs into larger batches"""
it = iter(samples)
while True:
with suppress(StopIteration):
batch_in, batch_out = [], []
for i in range(batch_size):
sample_in, sample_out = next(it)
batch_in.append(sample_in)
batch_out.append(sample_out)
if not batch_in:
raise StopIteration
yield np.array(batch_in), np.array(batch_out) | Chunk a series of network inputs and outputs into larger batches | Below is the the instruction that describes the task:
### Input:
Chunk a series of network inputs and outputs into larger batches
### Response:
def samples_to_batches(samples: Iterable, batch_size: int):
"""Chunk a series of network inputs and outputs into larger batches"""
it = iter(samples)
while True:
with suppress(StopIteration):
batch_in, batch_out = [], []
for i in range(batch_size):
sample_in, sample_out = next(it)
batch_in.append(sample_in)
batch_out.append(sample_out)
if not batch_in:
raise StopIteration
yield np.array(batch_in), np.array(batch_out) |
def get_pk_descriptors(cls):
"""Return tuple of tuples with attribute name and descriptor on the
`cls` that is defined as the primary keys."""
pk_fields = {
name: descriptor
for name, descriptor in vars_class(cls).items()
if isinstance(descriptor, ObjectField) and is_pk_descriptor(descriptor)
}
alt_pk_fields = defaultdict(list)
for name, descriptor in vars_class(cls).items():
if isinstance(descriptor, ObjectField):
if descriptor.alt_pk is True:
alt_pk_fields[0].append((name, descriptor))
elif type(descriptor.alt_pk) is int:
alt_pk_fields[descriptor.alt_pk].append((name, descriptor))
if len(pk_fields) == 1:
return ((pk_fields.popitem(),), (alt_pk_fields[0],))
elif len(pk_fields) > 1:
unique_pk_fields = {
name: descriptor
for name, descriptor in pk_fields.items()
if descriptor.pk is True
}
if unique_pk_fields:
raise AttributeError(
"more than one field is marked as unique primary key: %s" % (
', '.join(sorted(pk_fields))))
pk_descriptors = tuple(sorted((
(name, descriptor)
for name, descriptor in pk_fields.items()
), key=lambda item: item[1].pk))
alt_pk_descriptors = tuple(
alt_pk_fields[idx]
for idx, (name, descriptor) in enumerate(pk_descriptors)
)
return pk_descriptors, alt_pk_descriptors
else:
return tuple(), tuple() | Return tuple of tuples with attribute name and descriptor on the
`cls` that is defined as the primary keys. | Below is the the instruction that describes the task:
### Input:
Return tuple of tuples with attribute name and descriptor on the
`cls` that is defined as the primary keys.
### Response:
def get_pk_descriptors(cls):
"""Return tuple of tuples with attribute name and descriptor on the
`cls` that is defined as the primary keys."""
pk_fields = {
name: descriptor
for name, descriptor in vars_class(cls).items()
if isinstance(descriptor, ObjectField) and is_pk_descriptor(descriptor)
}
alt_pk_fields = defaultdict(list)
for name, descriptor in vars_class(cls).items():
if isinstance(descriptor, ObjectField):
if descriptor.alt_pk is True:
alt_pk_fields[0].append((name, descriptor))
elif type(descriptor.alt_pk) is int:
alt_pk_fields[descriptor.alt_pk].append((name, descriptor))
if len(pk_fields) == 1:
return ((pk_fields.popitem(),), (alt_pk_fields[0],))
elif len(pk_fields) > 1:
unique_pk_fields = {
name: descriptor
for name, descriptor in pk_fields.items()
if descriptor.pk is True
}
if unique_pk_fields:
raise AttributeError(
"more than one field is marked as unique primary key: %s" % (
', '.join(sorted(pk_fields))))
pk_descriptors = tuple(sorted((
(name, descriptor)
for name, descriptor in pk_fields.items()
), key=lambda item: item[1].pk))
alt_pk_descriptors = tuple(
alt_pk_fields[idx]
for idx, (name, descriptor) in enumerate(pk_descriptors)
)
return pk_descriptors, alt_pk_descriptors
else:
return tuple(), tuple() |
def explode_filename(name: str, scheme: str) -> dict:
"""
Removes any path components from the input filename and returns a
dictionary containing the name of the file without extension and the
extension (if an extension exists)
:param name:
:param scheme:
:return:
"""
if not scheme:
return split_filename(name)
replacements = {
'name': '(?P<name>.*)',
'ext': '(?P<extension>.+)$',
'index': '(?P<index>[0-9]{{{length}}})'
}
scheme_pattern = '^'
empty_scheme_pattern = ''
offset = 0
while offset < len(scheme):
char = scheme[offset]
next_char = scheme[offset + 1] if (offset + 1) < len(scheme) else None
if char in r'.()^$?*+\[]|':
addition = '\\{}'.format(char)
scheme_pattern += addition
empty_scheme_pattern += addition
offset += 1
continue
if char != '{':
scheme_pattern += char
empty_scheme_pattern += char
offset += 1
continue
if next_char != '{':
scheme_pattern += char
empty_scheme_pattern += char
offset += 1
continue
end_index = scheme.find('}}', offset)
contents = scheme[offset:end_index].strip('{}').lower()
if contents in replacements:
scheme_pattern += replacements[contents]
elif contents == ('#' * len(contents)):
addition = replacements['index'].format(length=len(contents))
scheme_pattern += addition
empty_scheme_pattern += addition
else:
addition = '{{{}}}'.format(contents)
scheme_pattern += addition
empty_scheme_pattern += addition
offset = end_index + 2
match = re.compile(scheme_pattern).match(name)
if not match:
parts = split_filename(name)
comparison = re.compile(empty_scheme_pattern.rstrip('-_: .\\'))
match = comparison.match(parts['name'])
if not match:
return parts
parts = match.groupdict()
index = parts.get('index')
index = int(index) if index else None
return dict(
index=index - 1,
name=parts.get('name', ''),
extension=parts.get('extension', 'py')
) | Removes any path components from the input filename and returns a
dictionary containing the name of the file without extension and the
extension (if an extension exists)
:param name:
:param scheme:
:return: | Below is the the instruction that describes the task:
### Input:
Removes any path components from the input filename and returns a
dictionary containing the name of the file without extension and the
extension (if an extension exists)
:param name:
:param scheme:
:return:
### Response:
def explode_filename(name: str, scheme: str) -> dict:
"""
Removes any path components from the input filename and returns a
dictionary containing the name of the file without extension and the
extension (if an extension exists)
:param name:
:param scheme:
:return:
"""
if not scheme:
return split_filename(name)
replacements = {
'name': '(?P<name>.*)',
'ext': '(?P<extension>.+)$',
'index': '(?P<index>[0-9]{{{length}}})'
}
scheme_pattern = '^'
empty_scheme_pattern = ''
offset = 0
while offset < len(scheme):
char = scheme[offset]
next_char = scheme[offset + 1] if (offset + 1) < len(scheme) else None
if char in r'.()^$?*+\[]|':
addition = '\\{}'.format(char)
scheme_pattern += addition
empty_scheme_pattern += addition
offset += 1
continue
if char != '{':
scheme_pattern += char
empty_scheme_pattern += char
offset += 1
continue
if next_char != '{':
scheme_pattern += char
empty_scheme_pattern += char
offset += 1
continue
end_index = scheme.find('}}', offset)
contents = scheme[offset:end_index].strip('{}').lower()
if contents in replacements:
scheme_pattern += replacements[contents]
elif contents == ('#' * len(contents)):
addition = replacements['index'].format(length=len(contents))
scheme_pattern += addition
empty_scheme_pattern += addition
else:
addition = '{{{}}}'.format(contents)
scheme_pattern += addition
empty_scheme_pattern += addition
offset = end_index + 2
match = re.compile(scheme_pattern).match(name)
if not match:
parts = split_filename(name)
comparison = re.compile(empty_scheme_pattern.rstrip('-_: .\\'))
match = comparison.match(parts['name'])
if not match:
return parts
parts = match.groupdict()
index = parts.get('index')
index = int(index) if index else None
return dict(
index=index - 1,
name=parts.get('name', ''),
extension=parts.get('extension', 'py')
) |
def exec_commands(self, action, c_name, run_cmds, **kwargs):
"""
Runs a single command inside a container.
:param action: Action configuration.
:type action: dockermap.map.runner.ActionConfig
:param c_name: Container name.
:type c_name: unicode | str
:param run_cmds: Commands to run.
:type run_cmds: list[dockermap.map.input.ExecCommand]
:return: List of exec command return values (e.g. containing the command id), if applicable, or ``None``
if either no commands have been run or no values have been returned from the API.
:rtype: list[dict] | NoneType
"""
client = action.client
exec_results = []
for run_cmd in run_cmds:
cmd = run_cmd.cmd
cmd_user = run_cmd.user
log.debug("Creating exec command in container %s with user %s: %s.", c_name, cmd_user, cmd)
ec_kwargs = self.get_exec_create_kwargs(action, c_name, cmd, cmd_user)
create_result = client.exec_create(**ec_kwargs)
if create_result:
e_id = create_result['Id']
log.debug("Starting exec command with id %s.", e_id)
es_kwargs = self.get_exec_start_kwargs(action, c_name, e_id)
client.exec_start(**es_kwargs)
exec_results.append(create_result)
else:
log.debug("Exec command was created, but did not return an id. Assuming that it has been started.")
if exec_results:
return exec_results
return None | Runs a single command inside a container.
:param action: Action configuration.
:type action: dockermap.map.runner.ActionConfig
:param c_name: Container name.
:type c_name: unicode | str
:param run_cmds: Commands to run.
:type run_cmds: list[dockermap.map.input.ExecCommand]
:return: List of exec command return values (e.g. containing the command id), if applicable, or ``None``
if either no commands have been run or no values have been returned from the API.
:rtype: list[dict] | NoneType | Below is the the instruction that describes the task:
### Input:
Runs a single command inside a container.
:param action: Action configuration.
:type action: dockermap.map.runner.ActionConfig
:param c_name: Container name.
:type c_name: unicode | str
:param run_cmds: Commands to run.
:type run_cmds: list[dockermap.map.input.ExecCommand]
:return: List of exec command return values (e.g. containing the command id), if applicable, or ``None``
if either no commands have been run or no values have been returned from the API.
:rtype: list[dict] | NoneType
### Response:
def exec_commands(self, action, c_name, run_cmds, **kwargs):
"""
Runs a single command inside a container.
:param action: Action configuration.
:type action: dockermap.map.runner.ActionConfig
:param c_name: Container name.
:type c_name: unicode | str
:param run_cmds: Commands to run.
:type run_cmds: list[dockermap.map.input.ExecCommand]
:return: List of exec command return values (e.g. containing the command id), if applicable, or ``None``
if either no commands have been run or no values have been returned from the API.
:rtype: list[dict] | NoneType
"""
client = action.client
exec_results = []
for run_cmd in run_cmds:
cmd = run_cmd.cmd
cmd_user = run_cmd.user
log.debug("Creating exec command in container %s with user %s: %s.", c_name, cmd_user, cmd)
ec_kwargs = self.get_exec_create_kwargs(action, c_name, cmd, cmd_user)
create_result = client.exec_create(**ec_kwargs)
if create_result:
e_id = create_result['Id']
log.debug("Starting exec command with id %s.", e_id)
es_kwargs = self.get_exec_start_kwargs(action, c_name, e_id)
client.exec_start(**es_kwargs)
exec_results.append(create_result)
else:
log.debug("Exec command was created, but did not return an id. Assuming that it has been started.")
if exec_results:
return exec_results
return None |
def sort_by_fields(items, fields):
"""
Sort a list of objects on the given fields. The field list works analogously to
queryset.order_by(*fields): each field is either a property of the object,
or is prefixed by '-' (e.g. '-name') to indicate reverse ordering.
"""
# To get the desired behaviour, we need to order by keys in reverse order
# See: https://docs.python.org/2/howto/sorting.html#sort-stability-and-complex-sorts
for key in reversed(fields):
# Check if this key has been reversed
reverse = False
if key[0] == '-':
reverse = True
key = key[1:]
# Sort
# Use a tuple of (v is not None, v) as the key, to ensure that None sorts before other values,
# as comparing directly with None breaks on python3
items.sort(key=lambda x: (getattr(x, key) is not None, getattr(x, key)), reverse=reverse) | Sort a list of objects on the given fields. The field list works analogously to
queryset.order_by(*fields): each field is either a property of the object,
or is prefixed by '-' (e.g. '-name') to indicate reverse ordering. | Below is the the instruction that describes the task:
### Input:
Sort a list of objects on the given fields. The field list works analogously to
queryset.order_by(*fields): each field is either a property of the object,
or is prefixed by '-' (e.g. '-name') to indicate reverse ordering.
### Response:
def sort_by_fields(items, fields):
"""
Sort a list of objects on the given fields. The field list works analogously to
queryset.order_by(*fields): each field is either a property of the object,
or is prefixed by '-' (e.g. '-name') to indicate reverse ordering.
"""
# To get the desired behaviour, we need to order by keys in reverse order
# See: https://docs.python.org/2/howto/sorting.html#sort-stability-and-complex-sorts
for key in reversed(fields):
# Check if this key has been reversed
reverse = False
if key[0] == '-':
reverse = True
key = key[1:]
# Sort
# Use a tuple of (v is not None, v) as the key, to ensure that None sorts before other values,
# as comparing directly with None breaks on python3
items.sort(key=lambda x: (getattr(x, key) is not None, getattr(x, key)), reverse=reverse) |
def set_network_settings(self, mtu, sock_snd, sock_rcv, tcp_wnd_snd, tcp_wnd_rcv):
"""Sets network configuration of the NAT engine.
in mtu of type int
MTU (maximum transmission unit) of the NAT engine in bytes.
in sock_snd of type int
Capacity of the socket send buffer in bytes when creating a new socket.
in sock_rcv of type int
Capacity of the socket receive buffer in bytes when creating a new socket.
in tcp_wnd_snd of type int
Initial size of the NAT engine's sending TCP window in bytes when
establishing a new TCP connection.
in tcp_wnd_rcv of type int
Initial size of the NAT engine's receiving TCP window in bytes when
establishing a new TCP connection.
"""
if not isinstance(mtu, baseinteger):
raise TypeError("mtu can only be an instance of type baseinteger")
if not isinstance(sock_snd, baseinteger):
raise TypeError("sock_snd can only be an instance of type baseinteger")
if not isinstance(sock_rcv, baseinteger):
raise TypeError("sock_rcv can only be an instance of type baseinteger")
if not isinstance(tcp_wnd_snd, baseinteger):
raise TypeError("tcp_wnd_snd can only be an instance of type baseinteger")
if not isinstance(tcp_wnd_rcv, baseinteger):
raise TypeError("tcp_wnd_rcv can only be an instance of type baseinteger")
self._call("setNetworkSettings",
in_p=[mtu, sock_snd, sock_rcv, tcp_wnd_snd, tcp_wnd_rcv]) | Sets network configuration of the NAT engine.
in mtu of type int
MTU (maximum transmission unit) of the NAT engine in bytes.
in sock_snd of type int
Capacity of the socket send buffer in bytes when creating a new socket.
in sock_rcv of type int
Capacity of the socket receive buffer in bytes when creating a new socket.
in tcp_wnd_snd of type int
Initial size of the NAT engine's sending TCP window in bytes when
establishing a new TCP connection.
in tcp_wnd_rcv of type int
Initial size of the NAT engine's receiving TCP window in bytes when
establishing a new TCP connection. | Below is the the instruction that describes the task:
### Input:
Sets network configuration of the NAT engine.
in mtu of type int
MTU (maximum transmission unit) of the NAT engine in bytes.
in sock_snd of type int
Capacity of the socket send buffer in bytes when creating a new socket.
in sock_rcv of type int
Capacity of the socket receive buffer in bytes when creating a new socket.
in tcp_wnd_snd of type int
Initial size of the NAT engine's sending TCP window in bytes when
establishing a new TCP connection.
in tcp_wnd_rcv of type int
Initial size of the NAT engine's receiving TCP window in bytes when
establishing a new TCP connection.
### Response:
def set_network_settings(self, mtu, sock_snd, sock_rcv, tcp_wnd_snd, tcp_wnd_rcv):
"""Sets network configuration of the NAT engine.
in mtu of type int
MTU (maximum transmission unit) of the NAT engine in bytes.
in sock_snd of type int
Capacity of the socket send buffer in bytes when creating a new socket.
in sock_rcv of type int
Capacity of the socket receive buffer in bytes when creating a new socket.
in tcp_wnd_snd of type int
Initial size of the NAT engine's sending TCP window in bytes when
establishing a new TCP connection.
in tcp_wnd_rcv of type int
Initial size of the NAT engine's receiving TCP window in bytes when
establishing a new TCP connection.
"""
if not isinstance(mtu, baseinteger):
raise TypeError("mtu can only be an instance of type baseinteger")
if not isinstance(sock_snd, baseinteger):
raise TypeError("sock_snd can only be an instance of type baseinteger")
if not isinstance(sock_rcv, baseinteger):
raise TypeError("sock_rcv can only be an instance of type baseinteger")
if not isinstance(tcp_wnd_snd, baseinteger):
raise TypeError("tcp_wnd_snd can only be an instance of type baseinteger")
if not isinstance(tcp_wnd_rcv, baseinteger):
raise TypeError("tcp_wnd_rcv can only be an instance of type baseinteger")
self._call("setNetworkSettings",
in_p=[mtu, sock_snd, sock_rcv, tcp_wnd_snd, tcp_wnd_rcv]) |
def findall(self):
"""Find all files under the base and set ``allfiles`` to the absolute
pathnames of files found.
"""
from stat import S_ISREG, S_ISDIR, S_ISLNK
self.allfiles = allfiles = []
root = self.base
stack = [root]
pop = stack.pop
push = stack.append
while stack:
root = pop()
names = os.listdir(root)
for name in names:
fullname = os.path.join(root, name)
# Avoid excess stat calls -- just one will do, thank you!
stat = os.stat(fullname)
mode = stat.st_mode
if S_ISREG(mode):
allfiles.append(fsdecode(fullname))
elif S_ISDIR(mode) and not S_ISLNK(mode):
push(fullname) | Find all files under the base and set ``allfiles`` to the absolute
pathnames of files found. | Below is the the instruction that describes the task:
### Input:
Find all files under the base and set ``allfiles`` to the absolute
pathnames of files found.
### Response:
def findall(self):
"""Find all files under the base and set ``allfiles`` to the absolute
pathnames of files found.
"""
from stat import S_ISREG, S_ISDIR, S_ISLNK
self.allfiles = allfiles = []
root = self.base
stack = [root]
pop = stack.pop
push = stack.append
while stack:
root = pop()
names = os.listdir(root)
for name in names:
fullname = os.path.join(root, name)
# Avoid excess stat calls -- just one will do, thank you!
stat = os.stat(fullname)
mode = stat.st_mode
if S_ISREG(mode):
allfiles.append(fsdecode(fullname))
elif S_ISDIR(mode) and not S_ISLNK(mode):
push(fullname) |
def pre_release(self):
""" Return true if version is a pre-release. """
label = self.version_info.get('label', None)
pre = self.version_info.get('pre', None)
return True if (label is not None and pre is not None) else False | Return true if version is a pre-release. | Below is the the instruction that describes the task:
### Input:
Return true if version is a pre-release.
### Response:
def pre_release(self):
""" Return true if version is a pre-release. """
label = self.version_info.get('label', None)
pre = self.version_info.get('pre', None)
return True if (label is not None and pre is not None) else False |
def get_assign_annotation(node):
"""Get the type annotation of the assignment of the given node.
:param node: The node to get the annotation for.
:type node: astroid.nodes.Assign or astroid.nodes.AnnAssign
:returns: The type annotation as a string, or None if one does not exist.
:type: str or None
"""
annotation = None
annotation_node = None
try:
annotation_node = node.annotation
except AttributeError:
# Python 2 has no support for type annotations, so use getattr
annotation_node = getattr(node, "type_annotation", None)
if annotation_node:
if isinstance(annotation_node, astroid.nodes.Const):
annotation = node.value
else:
annotation = annotation_node.as_string()
return annotation | Get the type annotation of the assignment of the given node.
:param node: The node to get the annotation for.
:type node: astroid.nodes.Assign or astroid.nodes.AnnAssign
:returns: The type annotation as a string, or None if one does not exist.
:type: str or None | Below is the the instruction that describes the task:
### Input:
Get the type annotation of the assignment of the given node.
:param node: The node to get the annotation for.
:type node: astroid.nodes.Assign or astroid.nodes.AnnAssign
:returns: The type annotation as a string, or None if one does not exist.
:type: str or None
### Response:
def get_assign_annotation(node):
"""Get the type annotation of the assignment of the given node.
:param node: The node to get the annotation for.
:type node: astroid.nodes.Assign or astroid.nodes.AnnAssign
:returns: The type annotation as a string, or None if one does not exist.
:type: str or None
"""
annotation = None
annotation_node = None
try:
annotation_node = node.annotation
except AttributeError:
# Python 2 has no support for type annotations, so use getattr
annotation_node = getattr(node, "type_annotation", None)
if annotation_node:
if isinstance(annotation_node, astroid.nodes.Const):
annotation = node.value
else:
annotation = annotation_node.as_string()
return annotation |
def field_dict_from_row(row, model,
field_names=None, ignore_fields=('id', 'pk'),
strip=True,
blank_none=True,
ignore_related=True,
ignore_values=(None,),
ignore_errors=True,
verbosity=0):
"""Construct a Mapping (dict) from field names to values from a row of data
Args:
row (list or dict): Data (values) to be assigned to field_names in the dict.
If `row` is a list, then the column names (header row) can be provided in `field_names`.
If `row` is a list and no field_names are provided, then `field_names` will be taken from the
Django model class field names, in the order they appear within the class definition.
model (django.db.models.Model): The model class to be constructed with data from `row`
field_names (list or tuple of str): The field names to place the row values in.
Defaults to the keys of the dict of `row` (if `row` is a `dict`) or the names of the fields
in the Django model being constructed.
ignore_fields (list or tuple of str): The field names to ignore if place the row values in.
Returns:
dict: Mapping from fields to values compatible with a Django model constructor kwargs, `model(**kwargs)`
"""
errors = collections.Counter()
if not field_names:
field_classes = [f for f in model._meta._fields() if (not ignore_fields or (f.name not in ignore_fields))]
field_names = [f.name for f in field_classes]
else:
field_classes = [f for f in model._meta._fields() if (f.name in field_names and (not ignore_fields or (f.name not in ignore_fields)))]
field_dict = {}
if isinstance(row, collections.Mapping):
row = [row.get(field_name, None) for field_name in field_names]
# if most of the destination field names exist in the source object then
elif sum(hasattr(row, field_name) for field_name in field_names) / (len(field_names) / 2. + 1):
row = [getattr(row, field_name, None) for field_name in field_names]
for field_name, field_class, value in zip(field_names, field_classes, row):
clean_value = None
if verbosity >= 3:
print field_name, field_class, value
if isinstance(field_class, related.RelatedField):
if not ignore_related:
try:
clean_value = field_class.related.parent_model.objects.get(value)
except:
try:
clean_value = field_class.related.parent_model.objects.get_by_natural_key(value)
except:
errors += collections.Counter(['num_unlinked_fks'])
if verbosity > 1:
print 'Unable to connect related field %r using value %r' % (field_class, value)
# FIXME: lots of redundancy and potential for error here and below
if isinstance(value, basestring) and not value:
if verbosity >= 3:
print 'String field %r setting value %r to None' % (field_class, value)
value = None
if blank_none and (
not isinstance(field_class, related.RelatedField) or field_class.blank or not field_class.null):
try:
if isinstance(field_class.to_python(''), basestring):
value = ''
else:
value = None
except:
value = None
else:
value = None
if not clean_value:
try:
# get a clean python value from a string, etc
clean_value = field_class.to_python(value)
except: # ValidationError
try:
clean_value = str(field_class.to_python(util.clean_wiki_datetime(value)))
except:
try:
clean_value = field_class.to_python(util.make_float(value))
except:
try:
clean_value = field_class.to_python(value) # FIXME: this has already been tried!
except:
if verbosity > 0:
print
print "The row below has a value (%r) that can't be coerced by %r:" % (value, field_class.to_python)
print row
print_exc()
clean_value = None
errors += collections.Counter(['num_uncoercible'])
if not ignore_errors:
raise
if isinstance(clean_value, basestring):
if strip:
clean_value = clean_value.strip()
# don't forget to decode the utf8 before doing a max_length truncation!
clean_value = clean_utf8(clean_value, verbosity=verbosity).decode('utf8')
max_length = getattr(field_class, 'max_length')
if max_length:
try:
assert(len(clean_value) <= field_class.max_length)
except:
if verbosity > 0:
print
print "The row below has a string (%r) that is too long (> %d):" % (clean_value, max_length)
print row
print_exc()
errors += collections.Counter(['num_truncated'])
clean_value = clean_value[:max_length]
if not ignore_errors:
raise
if not ignore_values or clean_value not in ignore_values:
field_dict[field_name] = clean_value
return field_dict, errors | Construct a Mapping (dict) from field names to values from a row of data
Args:
row (list or dict): Data (values) to be assigned to field_names in the dict.
If `row` is a list, then the column names (header row) can be provided in `field_names`.
If `row` is a list and no field_names are provided, then `field_names` will be taken from the
Django model class field names, in the order they appear within the class definition.
model (django.db.models.Model): The model class to be constructed with data from `row`
field_names (list or tuple of str): The field names to place the row values in.
Defaults to the keys of the dict of `row` (if `row` is a `dict`) or the names of the fields
in the Django model being constructed.
ignore_fields (list or tuple of str): The field names to ignore if place the row values in.
Returns:
dict: Mapping from fields to values compatible with a Django model constructor kwargs, `model(**kwargs)` | Below is the the instruction that describes the task:
### Input:
Construct a Mapping (dict) from field names to values from a row of data
Args:
row (list or dict): Data (values) to be assigned to field_names in the dict.
If `row` is a list, then the column names (header row) can be provided in `field_names`.
If `row` is a list and no field_names are provided, then `field_names` will be taken from the
Django model class field names, in the order they appear within the class definition.
model (django.db.models.Model): The model class to be constructed with data from `row`
field_names (list or tuple of str): The field names to place the row values in.
Defaults to the keys of the dict of `row` (if `row` is a `dict`) or the names of the fields
in the Django model being constructed.
ignore_fields (list or tuple of str): The field names to ignore if place the row values in.
Returns:
dict: Mapping from fields to values compatible with a Django model constructor kwargs, `model(**kwargs)`
### Response:
def field_dict_from_row(row, model,
field_names=None, ignore_fields=('id', 'pk'),
strip=True,
blank_none=True,
ignore_related=True,
ignore_values=(None,),
ignore_errors=True,
verbosity=0):
"""Construct a Mapping (dict) from field names to values from a row of data
Args:
row (list or dict): Data (values) to be assigned to field_names in the dict.
If `row` is a list, then the column names (header row) can be provided in `field_names`.
If `row` is a list and no field_names are provided, then `field_names` will be taken from the
Django model class field names, in the order they appear within the class definition.
model (django.db.models.Model): The model class to be constructed with data from `row`
field_names (list or tuple of str): The field names to place the row values in.
Defaults to the keys of the dict of `row` (if `row` is a `dict`) or the names of the fields
in the Django model being constructed.
ignore_fields (list or tuple of str): The field names to ignore if place the row values in.
Returns:
dict: Mapping from fields to values compatible with a Django model constructor kwargs, `model(**kwargs)`
"""
errors = collections.Counter()
if not field_names:
field_classes = [f for f in model._meta._fields() if (not ignore_fields or (f.name not in ignore_fields))]
field_names = [f.name for f in field_classes]
else:
field_classes = [f for f in model._meta._fields() if (f.name in field_names and (not ignore_fields or (f.name not in ignore_fields)))]
field_dict = {}
if isinstance(row, collections.Mapping):
row = [row.get(field_name, None) for field_name in field_names]
# if most of the destination field names exist in the source object then
elif sum(hasattr(row, field_name) for field_name in field_names) / (len(field_names) / 2. + 1):
row = [getattr(row, field_name, None) for field_name in field_names]
for field_name, field_class, value in zip(field_names, field_classes, row):
clean_value = None
if verbosity >= 3:
print field_name, field_class, value
if isinstance(field_class, related.RelatedField):
if not ignore_related:
try:
clean_value = field_class.related.parent_model.objects.get(value)
except:
try:
clean_value = field_class.related.parent_model.objects.get_by_natural_key(value)
except:
errors += collections.Counter(['num_unlinked_fks'])
if verbosity > 1:
print 'Unable to connect related field %r using value %r' % (field_class, value)
# FIXME: lots of redundancy and potential for error here and below
if isinstance(value, basestring) and not value:
if verbosity >= 3:
print 'String field %r setting value %r to None' % (field_class, value)
value = None
if blank_none and (
not isinstance(field_class, related.RelatedField) or field_class.blank or not field_class.null):
try:
if isinstance(field_class.to_python(''), basestring):
value = ''
else:
value = None
except:
value = None
else:
value = None
if not clean_value:
try:
# get a clean python value from a string, etc
clean_value = field_class.to_python(value)
except: # ValidationError
try:
clean_value = str(field_class.to_python(util.clean_wiki_datetime(value)))
except:
try:
clean_value = field_class.to_python(util.make_float(value))
except:
try:
clean_value = field_class.to_python(value) # FIXME: this has already been tried!
except:
if verbosity > 0:
print
print "The row below has a value (%r) that can't be coerced by %r:" % (value, field_class.to_python)
print row
print_exc()
clean_value = None
errors += collections.Counter(['num_uncoercible'])
if not ignore_errors:
raise
if isinstance(clean_value, basestring):
if strip:
clean_value = clean_value.strip()
# don't forget to decode the utf8 before doing a max_length truncation!
clean_value = clean_utf8(clean_value, verbosity=verbosity).decode('utf8')
max_length = getattr(field_class, 'max_length')
if max_length:
try:
assert(len(clean_value) <= field_class.max_length)
except:
if verbosity > 0:
print
print "The row below has a string (%r) that is too long (> %d):" % (clean_value, max_length)
print row
print_exc()
errors += collections.Counter(['num_truncated'])
clean_value = clean_value[:max_length]
if not ignore_errors:
raise
if not ignore_values or clean_value not in ignore_values:
field_dict[field_name] = clean_value
return field_dict, errors |
def AllBalancesZeroOrLess(self):
"""
Flag indicating if all balances are 0 or less.
Returns:
bool: True if all balances are <= 0. False, otherwise.
"""
for key, fixed8 in self.Balances.items():
if fixed8.value > 0:
return False
return True | Flag indicating if all balances are 0 or less.
Returns:
bool: True if all balances are <= 0. False, otherwise. | Below is the the instruction that describes the task:
### Input:
Flag indicating if all balances are 0 or less.
Returns:
bool: True if all balances are <= 0. False, otherwise.
### Response:
def AllBalancesZeroOrLess(self):
"""
Flag indicating if all balances are 0 or less.
Returns:
bool: True if all balances are <= 0. False, otherwise.
"""
for key, fixed8 in self.Balances.items():
if fixed8.value > 0:
return False
return True |
def CheckHashes(self, hashes, external=True):
"""Checks a list of hashes for presence in the store.
Sub stores need to pass back the original HashDigest objects since they
carry state about the original file source.
Only unique hashes are checked, if there is duplication in the hashes input
it is the caller's responsibility to maintain any necessary mappings.
Args:
hashes: A list of Hash objects to check.
external: If true, attempt to check stores defined as EXTERNAL.
Yields:
Tuples of (RDFURN, hash object) that exist in the store.
"""
hashes = set(hashes)
for child in self.GetChildrenByPriority(allow_external=external):
for urn, hash_obj in child.CheckHashes(hashes):
yield urn, hash_obj
hashes.discard(hash_obj)
# Nothing to search for, we are done.
if not hashes:
break | Checks a list of hashes for presence in the store.
Sub stores need to pass back the original HashDigest objects since they
carry state about the original file source.
Only unique hashes are checked, if there is duplication in the hashes input
it is the caller's responsibility to maintain any necessary mappings.
Args:
hashes: A list of Hash objects to check.
external: If true, attempt to check stores defined as EXTERNAL.
Yields:
Tuples of (RDFURN, hash object) that exist in the store. | Below is the the instruction that describes the task:
### Input:
Checks a list of hashes for presence in the store.
Sub stores need to pass back the original HashDigest objects since they
carry state about the original file source.
Only unique hashes are checked, if there is duplication in the hashes input
it is the caller's responsibility to maintain any necessary mappings.
Args:
hashes: A list of Hash objects to check.
external: If true, attempt to check stores defined as EXTERNAL.
Yields:
Tuples of (RDFURN, hash object) that exist in the store.
### Response:
def CheckHashes(self, hashes, external=True):
"""Checks a list of hashes for presence in the store.
Sub stores need to pass back the original HashDigest objects since they
carry state about the original file source.
Only unique hashes are checked, if there is duplication in the hashes input
it is the caller's responsibility to maintain any necessary mappings.
Args:
hashes: A list of Hash objects to check.
external: If true, attempt to check stores defined as EXTERNAL.
Yields:
Tuples of (RDFURN, hash object) that exist in the store.
"""
hashes = set(hashes)
for child in self.GetChildrenByPriority(allow_external=external):
for urn, hash_obj in child.CheckHashes(hashes):
yield urn, hash_obj
hashes.discard(hash_obj)
# Nothing to search for, we are done.
if not hashes:
break |
def connect1(self, A, B, distance):
"Add a link from A to B of given distance, in one direction only."
self.dict.setdefault(A,{})[B] = distance | Add a link from A to B of given distance, in one direction only. | Below is the the instruction that describes the task:
### Input:
Add a link from A to B of given distance, in one direction only.
### Response:
def connect1(self, A, B, distance):
"Add a link from A to B of given distance, in one direction only."
self.dict.setdefault(A,{})[B] = distance |
def timescales_samples(self):
r""" Samples of the timescales """
res = np.empty((self.nsamples, self.nstates-1), dtype=config.dtype)
for i in range(self.nsamples):
res[i, :] = self._sampled_hmms[i].timescales
return res | r""" Samples of the timescales | Below is the the instruction that describes the task:
### Input:
r""" Samples of the timescales
### Response:
def timescales_samples(self):
r""" Samples of the timescales """
res = np.empty((self.nsamples, self.nstates-1), dtype=config.dtype)
for i in range(self.nsamples):
res[i, :] = self._sampled_hmms[i].timescales
return res |
def __getVariables(self):
"""Parses the P4 env vars using 'set p4'"""
try:
startupinfo = None
if os.name == 'nt':
startupinfo = subprocess.STARTUPINFO()
startupinfo.dwFlags |= subprocess.STARTF_USESHOWWINDOW
output = subprocess.check_output(['p4', 'set'], startupinfo=startupinfo)
if six.PY3:
output = str(output, 'utf8')
except subprocess.CalledProcessError as err:
LOGGER.error(err)
return
p4vars = {}
for line in output.splitlines():
if not line:
continue
try:
k, v = line.split('=', 1)
except ValueError:
continue
p4vars[k.strip()] = v.strip().split(' (')[0]
if p4vars[k.strip()].startswith('(config'):
del p4vars[k.strip()]
self._port = self._port or os.getenv('P4PORT', p4vars.get('P4PORT'))
self._user = self._user or os.getenv('P4USER', p4vars.get('P4USER'))
self._client = self._client or os.getenv('P4CLIENT', p4vars.get('P4CLIENT')) | Parses the P4 env vars using 'set p4 | Below is the the instruction that describes the task:
### Input:
Parses the P4 env vars using 'set p4
### Response:
def __getVariables(self):
"""Parses the P4 env vars using 'set p4'"""
try:
startupinfo = None
if os.name == 'nt':
startupinfo = subprocess.STARTUPINFO()
startupinfo.dwFlags |= subprocess.STARTF_USESHOWWINDOW
output = subprocess.check_output(['p4', 'set'], startupinfo=startupinfo)
if six.PY3:
output = str(output, 'utf8')
except subprocess.CalledProcessError as err:
LOGGER.error(err)
return
p4vars = {}
for line in output.splitlines():
if not line:
continue
try:
k, v = line.split('=', 1)
except ValueError:
continue
p4vars[k.strip()] = v.strip().split(' (')[0]
if p4vars[k.strip()].startswith('(config'):
del p4vars[k.strip()]
self._port = self._port or os.getenv('P4PORT', p4vars.get('P4PORT'))
self._user = self._user or os.getenv('P4USER', p4vars.get('P4USER'))
self._client = self._client or os.getenv('P4CLIENT', p4vars.get('P4CLIENT')) |
def del_piper(self, piper, forced=False):
"""
Removes a ``Piper`` from the ``Dagger`` instance.
Arguments:
- piper(``Piper`` or id(``Piper``)) ``Piper`` instance or ``Piper``
instance id.
- forced(bool) [default: ``False``] If "forced" is ``True``, will not
raise a ``DaggerError`` if the ``Piper`` hase outgoing pipes and
will also remove it.
"""
self.log.debug('%s trying to delete piper %s' % \
(repr(self), repr(piper)))
try:
piper = self.resolve(piper, forgive=False)
except DaggerError:
self.log.error('%s cannot resolve piper from %s' % \
(repr(self), repr(piper)))
raise DaggerError('%s cannot resolve piper from %s' % \
(repr(self), repr(piper)))
if self.incoming_edges(piper) and not forced:
self.log.error('%s piper %s has down-stream pipers (use forced =True to override)' % \
(repr(self), piper))
raise DaggerError('%s piper %s has down-stream pipers (use forced =True to override)' % \
(repr(self), piper))
self.del_node(piper)
self.log.debug('%s deleted piper %s' % (repr(self), piper)) | Removes a ``Piper`` from the ``Dagger`` instance.
Arguments:
- piper(``Piper`` or id(``Piper``)) ``Piper`` instance or ``Piper``
instance id.
- forced(bool) [default: ``False``] If "forced" is ``True``, will not
raise a ``DaggerError`` if the ``Piper`` hase outgoing pipes and
will also remove it. | Below is the the instruction that describes the task:
### Input:
Removes a ``Piper`` from the ``Dagger`` instance.
Arguments:
- piper(``Piper`` or id(``Piper``)) ``Piper`` instance or ``Piper``
instance id.
- forced(bool) [default: ``False``] If "forced" is ``True``, will not
raise a ``DaggerError`` if the ``Piper`` hase outgoing pipes and
will also remove it.
### Response:
def del_piper(self, piper, forced=False):
"""
Removes a ``Piper`` from the ``Dagger`` instance.
Arguments:
- piper(``Piper`` or id(``Piper``)) ``Piper`` instance or ``Piper``
instance id.
- forced(bool) [default: ``False``] If "forced" is ``True``, will not
raise a ``DaggerError`` if the ``Piper`` hase outgoing pipes and
will also remove it.
"""
self.log.debug('%s trying to delete piper %s' % \
(repr(self), repr(piper)))
try:
piper = self.resolve(piper, forgive=False)
except DaggerError:
self.log.error('%s cannot resolve piper from %s' % \
(repr(self), repr(piper)))
raise DaggerError('%s cannot resolve piper from %s' % \
(repr(self), repr(piper)))
if self.incoming_edges(piper) and not forced:
self.log.error('%s piper %s has down-stream pipers (use forced =True to override)' % \
(repr(self), piper))
raise DaggerError('%s piper %s has down-stream pipers (use forced =True to override)' % \
(repr(self), piper))
self.del_node(piper)
self.log.debug('%s deleted piper %s' % (repr(self), piper)) |
def unlink_f(path):
"""Unlink path but do not complain if file does not exist."""
try:
os.unlink(path)
except OSError as err:
if err.errno != errno.ENOENT:
raise | Unlink path but do not complain if file does not exist. | Below is the the instruction that describes the task:
### Input:
Unlink path but do not complain if file does not exist.
### Response:
def unlink_f(path):
"""Unlink path but do not complain if file does not exist."""
try:
os.unlink(path)
except OSError as err:
if err.errno != errno.ENOENT:
raise |
def grouping_delta_stats(old, new):
"""
Returns statistics about grouping changes
Args:
old (set of frozenset): old grouping
new (set of frozenset): new grouping
Returns:
pd.DataFrame: df: data frame of size statistics
Example:
>>> # ENABLE_DOCTEST
>>> from utool.util_alg import * # NOQA
>>> import utool as ut
>>> old = [
>>> [20, 21, 22, 23], [1, 2], [12], [13, 14], [3, 4], [5, 6,11],
>>> [7], [8, 9], [10], [31, 32], [33, 34, 35], [41, 42, 43, 44, 45]
>>> ]
>>> new = [
>>> [20, 21], [22, 23], [1, 2], [12, 13, 14], [4], [5, 6, 3], [7, 8],
>>> [9, 10, 11], [31, 32, 33, 34, 35], [41, 42, 43, 44], [45],
>>> ]
>>> df = ut.grouping_delta_stats(old, new)
>>> print(df)
"""
import pandas as pd
import utool as ut
group_delta = ut.grouping_delta(old, new)
stats = ut.odict()
unchanged = group_delta['unchanged']
splits = group_delta['splits']
merges = group_delta['merges']
hybrid = group_delta['hybrid']
statsmap = ut.partial(lambda x: ut.stats_dict(map(len, x), size=True))
stats['unchanged'] = statsmap(unchanged)
stats['old_split'] = statsmap(splits['old'])
stats['new_split'] = statsmap(ut.flatten(splits['new']))
stats['old_merge'] = statsmap(ut.flatten(merges['old']))
stats['new_merge'] = statsmap(merges['new'])
stats['old_hybrid'] = statsmap(hybrid['old'])
stats['new_hybrid'] = statsmap(hybrid['new'])
df = pd.DataFrame.from_dict(stats, orient='index')
df = df.loc[list(stats.keys())]
return df | Returns statistics about grouping changes
Args:
old (set of frozenset): old grouping
new (set of frozenset): new grouping
Returns:
pd.DataFrame: df: data frame of size statistics
Example:
>>> # ENABLE_DOCTEST
>>> from utool.util_alg import * # NOQA
>>> import utool as ut
>>> old = [
>>> [20, 21, 22, 23], [1, 2], [12], [13, 14], [3, 4], [5, 6,11],
>>> [7], [8, 9], [10], [31, 32], [33, 34, 35], [41, 42, 43, 44, 45]
>>> ]
>>> new = [
>>> [20, 21], [22, 23], [1, 2], [12, 13, 14], [4], [5, 6, 3], [7, 8],
>>> [9, 10, 11], [31, 32, 33, 34, 35], [41, 42, 43, 44], [45],
>>> ]
>>> df = ut.grouping_delta_stats(old, new)
>>> print(df) | Below is the the instruction that describes the task:
### Input:
Returns statistics about grouping changes
Args:
old (set of frozenset): old grouping
new (set of frozenset): new grouping
Returns:
pd.DataFrame: df: data frame of size statistics
Example:
>>> # ENABLE_DOCTEST
>>> from utool.util_alg import * # NOQA
>>> import utool as ut
>>> old = [
>>> [20, 21, 22, 23], [1, 2], [12], [13, 14], [3, 4], [5, 6,11],
>>> [7], [8, 9], [10], [31, 32], [33, 34, 35], [41, 42, 43, 44, 45]
>>> ]
>>> new = [
>>> [20, 21], [22, 23], [1, 2], [12, 13, 14], [4], [5, 6, 3], [7, 8],
>>> [9, 10, 11], [31, 32, 33, 34, 35], [41, 42, 43, 44], [45],
>>> ]
>>> df = ut.grouping_delta_stats(old, new)
>>> print(df)
### Response:
def grouping_delta_stats(old, new):
"""
Returns statistics about grouping changes
Args:
old (set of frozenset): old grouping
new (set of frozenset): new grouping
Returns:
pd.DataFrame: df: data frame of size statistics
Example:
>>> # ENABLE_DOCTEST
>>> from utool.util_alg import * # NOQA
>>> import utool as ut
>>> old = [
>>> [20, 21, 22, 23], [1, 2], [12], [13, 14], [3, 4], [5, 6,11],
>>> [7], [8, 9], [10], [31, 32], [33, 34, 35], [41, 42, 43, 44, 45]
>>> ]
>>> new = [
>>> [20, 21], [22, 23], [1, 2], [12, 13, 14], [4], [5, 6, 3], [7, 8],
>>> [9, 10, 11], [31, 32, 33, 34, 35], [41, 42, 43, 44], [45],
>>> ]
>>> df = ut.grouping_delta_stats(old, new)
>>> print(df)
"""
import pandas as pd
import utool as ut
group_delta = ut.grouping_delta(old, new)
stats = ut.odict()
unchanged = group_delta['unchanged']
splits = group_delta['splits']
merges = group_delta['merges']
hybrid = group_delta['hybrid']
statsmap = ut.partial(lambda x: ut.stats_dict(map(len, x), size=True))
stats['unchanged'] = statsmap(unchanged)
stats['old_split'] = statsmap(splits['old'])
stats['new_split'] = statsmap(ut.flatten(splits['new']))
stats['old_merge'] = statsmap(ut.flatten(merges['old']))
stats['new_merge'] = statsmap(merges['new'])
stats['old_hybrid'] = statsmap(hybrid['old'])
stats['new_hybrid'] = statsmap(hybrid['new'])
df = pd.DataFrame.from_dict(stats, orient='index')
df = df.loc[list(stats.keys())]
return df |
def create_trainer(self, username, team, start_date=None, has_cheated=None, last_cheated=None, currently_cheats=None, statistics=True, daily_goal=None, total_goal=None, prefered=True, account=None, verified=False):
"""Add a trainer to the database"""
args = locals()
url = api_url+'trainers/'
payload = {
'username': username,
'faction': team,
'statistics': statistics,
'prefered': prefered,
'last_modified': maya.now().iso8601(),
'owner': account,
'verified': verified
}
for i in args:
if args[i] is not None and i not in ['self', 'username', 'team', 'account', 'start_date']:
payload[i] = args[i]
elif args[i] is not None and i=='start_date':
payload[i] = args[i].date().isoformat()
r = requests.post(url, data=json.dumps(payload), headers=self.headers)
print(request_status(r))
r.raise_for_status()
return Trainer(r.json()) | Add a trainer to the database | Below is the the instruction that describes the task:
### Input:
Add a trainer to the database
### Response:
def create_trainer(self, username, team, start_date=None, has_cheated=None, last_cheated=None, currently_cheats=None, statistics=True, daily_goal=None, total_goal=None, prefered=True, account=None, verified=False):
"""Add a trainer to the database"""
args = locals()
url = api_url+'trainers/'
payload = {
'username': username,
'faction': team,
'statistics': statistics,
'prefered': prefered,
'last_modified': maya.now().iso8601(),
'owner': account,
'verified': verified
}
for i in args:
if args[i] is not None and i not in ['self', 'username', 'team', 'account', 'start_date']:
payload[i] = args[i]
elif args[i] is not None and i=='start_date':
payload[i] = args[i].date().isoformat()
r = requests.post(url, data=json.dumps(payload), headers=self.headers)
print(request_status(r))
r.raise_for_status()
return Trainer(r.json()) |
def make_gettext_patterns():
"Strongly inspired from idlelib.ColorDelegator.make_pat"
kwstr = 'msgid msgstr'
kw = r"\b" + any("keyword", kwstr.split()) + r"\b"
fuzzy = any("builtin", [r"#,[^\n]*"])
links = any("normal", [r"#:[^\n]*"])
comment = any("comment", [r"#[^\n]*"])
number = any("number",
[r"\b[+-]?[0-9]+[lL]?\b",
r"\b[+-]?0[xX][0-9A-Fa-f]+[lL]?\b",
r"\b[+-]?[0-9]+(?:\.[0-9]+)?(?:[eE][+-]?[0-9]+)?\b"])
sqstring = r"(\b[rRuU])?'[^'\\\n]*(\\.[^'\\\n]*)*'?"
dqstring = r'(\b[rRuU])?"[^"\\\n]*(\\.[^"\\\n]*)*"?'
string = any("string", [sqstring, dqstring])
return "|".join([kw, string, number, fuzzy, links, comment,
any("SYNC", [r"\n"])]) | Strongly inspired from idlelib.ColorDelegator.make_pat | Below is the the instruction that describes the task:
### Input:
Strongly inspired from idlelib.ColorDelegator.make_pat
### Response:
def make_gettext_patterns():
"Strongly inspired from idlelib.ColorDelegator.make_pat"
kwstr = 'msgid msgstr'
kw = r"\b" + any("keyword", kwstr.split()) + r"\b"
fuzzy = any("builtin", [r"#,[^\n]*"])
links = any("normal", [r"#:[^\n]*"])
comment = any("comment", [r"#[^\n]*"])
number = any("number",
[r"\b[+-]?[0-9]+[lL]?\b",
r"\b[+-]?0[xX][0-9A-Fa-f]+[lL]?\b",
r"\b[+-]?[0-9]+(?:\.[0-9]+)?(?:[eE][+-]?[0-9]+)?\b"])
sqstring = r"(\b[rRuU])?'[^'\\\n]*(\\.[^'\\\n]*)*'?"
dqstring = r'(\b[rRuU])?"[^"\\\n]*(\\.[^"\\\n]*)*"?'
string = any("string", [sqstring, dqstring])
return "|".join([kw, string, number, fuzzy, links, comment,
any("SYNC", [r"\n"])]) |
def users(self):
"""
:class:`~zhmcclient.UserManager`: Access to the :term:`Users <User>` in
this Console.
"""
# We do here some lazy loading.
if not self._users:
self._users = UserManager(self)
return self._users | :class:`~zhmcclient.UserManager`: Access to the :term:`Users <User>` in
this Console. | Below is the the instruction that describes the task:
### Input:
:class:`~zhmcclient.UserManager`: Access to the :term:`Users <User>` in
this Console.
### Response:
def users(self):
"""
:class:`~zhmcclient.UserManager`: Access to the :term:`Users <User>` in
this Console.
"""
# We do here some lazy loading.
if not self._users:
self._users = UserManager(self)
return self._users |
async def close(self, code: int = 1006, reason: str = "Connection closed"):
"""
Closes the websocket.
"""
if self._closed:
return
self._closed = True
if self._scope is not None:
await self._scope.cancel()
# cancel any outstanding listeners
data = self._connection.send(CloseConnection(code=code, reason=reason))
await self._sock.send_all(data)
# No, we don't wait for the correct reply
await self._sock.close() | Closes the websocket. | Below is the the instruction that describes the task:
### Input:
Closes the websocket.
### Response:
async def close(self, code: int = 1006, reason: str = "Connection closed"):
"""
Closes the websocket.
"""
if self._closed:
return
self._closed = True
if self._scope is not None:
await self._scope.cancel()
# cancel any outstanding listeners
data = self._connection.send(CloseConnection(code=code, reason=reason))
await self._sock.send_all(data)
# No, we don't wait for the correct reply
await self._sock.close() |
def overlay_gateway_access_lists_ipv6_out_ipv6_acl_out_name(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
overlay_gateway = ET.SubElement(config, "overlay-gateway", xmlns="urn:brocade.com:mgmt:brocade-tunnels")
name_key = ET.SubElement(overlay_gateway, "name")
name_key.text = kwargs.pop('name')
access_lists = ET.SubElement(overlay_gateway, "access-lists")
ipv6 = ET.SubElement(access_lists, "ipv6")
out = ET.SubElement(ipv6, "out")
ipv6_acl_out_name = ET.SubElement(out, "ipv6-acl-out-name")
ipv6_acl_out_name.text = kwargs.pop('ipv6_acl_out_name')
callback = kwargs.pop('callback', self._callback)
return callback(config) | Auto Generated Code | Below is the the instruction that describes the task:
### Input:
Auto Generated Code
### Response:
def overlay_gateway_access_lists_ipv6_out_ipv6_acl_out_name(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
overlay_gateway = ET.SubElement(config, "overlay-gateway", xmlns="urn:brocade.com:mgmt:brocade-tunnels")
name_key = ET.SubElement(overlay_gateway, "name")
name_key.text = kwargs.pop('name')
access_lists = ET.SubElement(overlay_gateway, "access-lists")
ipv6 = ET.SubElement(access_lists, "ipv6")
out = ET.SubElement(ipv6, "out")
ipv6_acl_out_name = ET.SubElement(out, "ipv6-acl-out-name")
ipv6_acl_out_name.text = kwargs.pop('ipv6_acl_out_name')
callback = kwargs.pop('callback', self._callback)
return callback(config) |
def get_pdf_from_html(html: str,
header_html: str = None,
footer_html: str = None,
wkhtmltopdf_filename: str = _WKHTMLTOPDF_FILENAME,
wkhtmltopdf_options: Dict[str, Any] = None,
file_encoding: str = "utf-8",
debug_options: bool = False,
debug_content: bool = False,
debug_wkhtmltopdf_args: bool = True,
fix_pdfkit_encoding_bug: bool = None,
processor: str = _DEFAULT_PROCESSOR) -> bytes:
"""
Takes HTML and returns a PDF.
See the arguments to :func:`make_pdf_from_html` (except ``on_disk``).
Returns:
the PDF binary as a ``bytes`` object
"""
result = make_pdf_from_html(
on_disk=False,
html=html,
header_html=header_html,
footer_html=footer_html,
wkhtmltopdf_filename=wkhtmltopdf_filename,
wkhtmltopdf_options=wkhtmltopdf_options,
file_encoding=file_encoding,
debug_options=debug_options,
debug_content=debug_content,
debug_wkhtmltopdf_args=debug_wkhtmltopdf_args,
fix_pdfkit_encoding_bug=fix_pdfkit_encoding_bug,
processor=processor,
) # type: bytes
return result | Takes HTML and returns a PDF.
See the arguments to :func:`make_pdf_from_html` (except ``on_disk``).
Returns:
the PDF binary as a ``bytes`` object | Below is the the instruction that describes the task:
### Input:
Takes HTML and returns a PDF.
See the arguments to :func:`make_pdf_from_html` (except ``on_disk``).
Returns:
the PDF binary as a ``bytes`` object
### Response:
def get_pdf_from_html(html: str,
header_html: str = None,
footer_html: str = None,
wkhtmltopdf_filename: str = _WKHTMLTOPDF_FILENAME,
wkhtmltopdf_options: Dict[str, Any] = None,
file_encoding: str = "utf-8",
debug_options: bool = False,
debug_content: bool = False,
debug_wkhtmltopdf_args: bool = True,
fix_pdfkit_encoding_bug: bool = None,
processor: str = _DEFAULT_PROCESSOR) -> bytes:
"""
Takes HTML and returns a PDF.
See the arguments to :func:`make_pdf_from_html` (except ``on_disk``).
Returns:
the PDF binary as a ``bytes`` object
"""
result = make_pdf_from_html(
on_disk=False,
html=html,
header_html=header_html,
footer_html=footer_html,
wkhtmltopdf_filename=wkhtmltopdf_filename,
wkhtmltopdf_options=wkhtmltopdf_options,
file_encoding=file_encoding,
debug_options=debug_options,
debug_content=debug_content,
debug_wkhtmltopdf_args=debug_wkhtmltopdf_args,
fix_pdfkit_encoding_bug=fix_pdfkit_encoding_bug,
processor=processor,
) # type: bytes
return result |
def leftsibling(node):
"""
Return Left Sibling of `node`.
>>> from anytree import Node
>>> dan = Node("Dan")
>>> jet = Node("Jet", parent=dan)
>>> jan = Node("Jan", parent=dan)
>>> joe = Node("Joe", parent=dan)
>>> leftsibling(dan)
>>> leftsibling(jet)
>>> leftsibling(jan)
Node('/Dan/Jet')
>>> leftsibling(joe)
Node('/Dan/Jan')
"""
if node.parent:
pchildren = node.parent.children
idx = pchildren.index(node)
if idx:
return pchildren[idx - 1]
else:
return None
else:
return None | Return Left Sibling of `node`.
>>> from anytree import Node
>>> dan = Node("Dan")
>>> jet = Node("Jet", parent=dan)
>>> jan = Node("Jan", parent=dan)
>>> joe = Node("Joe", parent=dan)
>>> leftsibling(dan)
>>> leftsibling(jet)
>>> leftsibling(jan)
Node('/Dan/Jet')
>>> leftsibling(joe)
Node('/Dan/Jan') | Below is the the instruction that describes the task:
### Input:
Return Left Sibling of `node`.
>>> from anytree import Node
>>> dan = Node("Dan")
>>> jet = Node("Jet", parent=dan)
>>> jan = Node("Jan", parent=dan)
>>> joe = Node("Joe", parent=dan)
>>> leftsibling(dan)
>>> leftsibling(jet)
>>> leftsibling(jan)
Node('/Dan/Jet')
>>> leftsibling(joe)
Node('/Dan/Jan')
### Response:
def leftsibling(node):
"""
Return Left Sibling of `node`.
>>> from anytree import Node
>>> dan = Node("Dan")
>>> jet = Node("Jet", parent=dan)
>>> jan = Node("Jan", parent=dan)
>>> joe = Node("Joe", parent=dan)
>>> leftsibling(dan)
>>> leftsibling(jet)
>>> leftsibling(jan)
Node('/Dan/Jet')
>>> leftsibling(joe)
Node('/Dan/Jan')
"""
if node.parent:
pchildren = node.parent.children
idx = pchildren.index(node)
if idx:
return pchildren[idx - 1]
else:
return None
else:
return None |
def imread(filename):
'''
Like cv2.imread
This function will make sure filename exists
'''
im = cv2.imread(filename)
if im is None:
raise RuntimeError("file: '%s' not exists" % filename)
return im | Like cv2.imread
This function will make sure filename exists | Below is the the instruction that describes the task:
### Input:
Like cv2.imread
This function will make sure filename exists
### Response:
def imread(filename):
'''
Like cv2.imread
This function will make sure filename exists
'''
im = cv2.imread(filename)
if im is None:
raise RuntimeError("file: '%s' not exists" % filename)
return im |
def get_line_flux(line_wave, wave, flux, **kwargs):
"""Interpolated flux at a given wavelength (calls np.interp)."""
return np.interp(line_wave, wave, flux, **kwargs) | Interpolated flux at a given wavelength (calls np.interp). | Below is the the instruction that describes the task:
### Input:
Interpolated flux at a given wavelength (calls np.interp).
### Response:
def get_line_flux(line_wave, wave, flux, **kwargs):
"""Interpolated flux at a given wavelength (calls np.interp)."""
return np.interp(line_wave, wave, flux, **kwargs) |
def _get_filepath(self, ext):
"""
Returns a file path relative to this page
"""
filename = ("%s%d.%s" % (self.FILE_PREFIX, self.page_nb + 1, ext))
return self.fs.join(self.doc.path, filename) | Returns a file path relative to this page | Below is the the instruction that describes the task:
### Input:
Returns a file path relative to this page
### Response:
def _get_filepath(self, ext):
"""
Returns a file path relative to this page
"""
filename = ("%s%d.%s" % (self.FILE_PREFIX, self.page_nb + 1, ext))
return self.fs.join(self.doc.path, filename) |
def has_header(self, hdrclass):
'''
Return True if the packet has a header of the given hdrclass,
False otherwise.
'''
if isinstance(hdrclass, str):
return self.get_header_by_name(hdrclass) is not None
return self.get_header(hdrclass) is not None | Return True if the packet has a header of the given hdrclass,
False otherwise. | Below is the the instruction that describes the task:
### Input:
Return True if the packet has a header of the given hdrclass,
False otherwise.
### Response:
def has_header(self, hdrclass):
'''
Return True if the packet has a header of the given hdrclass,
False otherwise.
'''
if isinstance(hdrclass, str):
return self.get_header_by_name(hdrclass) is not None
return self.get_header(hdrclass) is not None |
def does_fragment_condition_match(
self,
fragment: Union[FragmentDefinitionNode, InlineFragmentNode],
type_: GraphQLObjectType,
) -> bool:
"""Determine if a fragment is applicable to the given type."""
type_condition_node = fragment.type_condition
if not type_condition_node:
return True
conditional_type = type_from_ast(self.schema, type_condition_node)
if conditional_type is type_:
return True
if is_abstract_type(conditional_type):
return self.schema.is_possible_type(
cast(GraphQLAbstractType, conditional_type), type_
)
return False | Determine if a fragment is applicable to the given type. | Below is the the instruction that describes the task:
### Input:
Determine if a fragment is applicable to the given type.
### Response:
def does_fragment_condition_match(
self,
fragment: Union[FragmentDefinitionNode, InlineFragmentNode],
type_: GraphQLObjectType,
) -> bool:
"""Determine if a fragment is applicable to the given type."""
type_condition_node = fragment.type_condition
if not type_condition_node:
return True
conditional_type = type_from_ast(self.schema, type_condition_node)
if conditional_type is type_:
return True
if is_abstract_type(conditional_type):
return self.schema.is_possible_type(
cast(GraphQLAbstractType, conditional_type), type_
)
return False |
def _create_record(self, rtype, name, content):
"""
Create a resource record. If a record already exists with the same
content, do nothing.
"""
result = False
name = self._relative_name(name)
ttl = None
# TODO: shoud assert that this is an int
if self.ttl:
ttl = self.ttl
with localzone.manage(self.filename, self.origin, autosave=True) as zone:
if zone.add_record(name, rtype, content, ttl=ttl): # pylint: disable=no-member
result = True
LOGGER.debug("create_record: %s", result)
return result | Create a resource record. If a record already exists with the same
content, do nothing. | Below is the the instruction that describes the task:
### Input:
Create a resource record. If a record already exists with the same
content, do nothing.
### Response:
def _create_record(self, rtype, name, content):
"""
Create a resource record. If a record already exists with the same
content, do nothing.
"""
result = False
name = self._relative_name(name)
ttl = None
# TODO: shoud assert that this is an int
if self.ttl:
ttl = self.ttl
with localzone.manage(self.filename, self.origin, autosave=True) as zone:
if zone.add_record(name, rtype, content, ttl=ttl): # pylint: disable=no-member
result = True
LOGGER.debug("create_record: %s", result)
return result |
def get_vndmat_attr(d,keypath,attr,**kwargs):
'''
get_vndmat_attr(d,['x'],'lsib_path',path2keypath=True)
get_vndmat_attr(d,['t'],'lsib_path',path2keypath=True)
get_vndmat_attr(d,['u'],'lsib_path',path2keypath=True)
get_vndmat_attr(d,['y'],'lsib_path',path2keypath=True)
'''
kt,vn = _d2kvmatrix(d)
kdmat = _scankm(kt)
ltree = elel.ListTree(vn)
vndmat = ltree.desc
loc = get_kdmat_loc(kdmat,keypath)
rslt = vndmat[loc[0]][loc[1]][attr]
if(rslt == None):
pass
elif(elel.is_matrix(rslt,mode='loose')):
if('path2loc' in kwargs):
rslt = elel.array_map(rslt,ltree.path2loc)
else:
pass
if('path2keypath' in kwargs):
nlocs = elel.array_map(rslt,ltree.path2loc)
def cond_func(ele,kdmat):
return(kdmat[ele[0]][ele[1]]['path'])
rslt = elel.array_map(nlocs,cond_func,kdmat)
else:
pass
else:
if('path2loc' in kwargs):
rslt = ltree.path2loc(rslt)
else:
pass
if('path2keypath' in kwargs):
nloc = ltree.path2loc(rslt)
rslt = kdmat[nloc[0]][nloc[1]]['path']
else:
pass
return(rslt) | get_vndmat_attr(d,['x'],'lsib_path',path2keypath=True)
get_vndmat_attr(d,['t'],'lsib_path',path2keypath=True)
get_vndmat_attr(d,['u'],'lsib_path',path2keypath=True)
get_vndmat_attr(d,['y'],'lsib_path',path2keypath=True) | Below is the the instruction that describes the task:
### Input:
get_vndmat_attr(d,['x'],'lsib_path',path2keypath=True)
get_vndmat_attr(d,['t'],'lsib_path',path2keypath=True)
get_vndmat_attr(d,['u'],'lsib_path',path2keypath=True)
get_vndmat_attr(d,['y'],'lsib_path',path2keypath=True)
### Response:
def get_vndmat_attr(d,keypath,attr,**kwargs):
'''
get_vndmat_attr(d,['x'],'lsib_path',path2keypath=True)
get_vndmat_attr(d,['t'],'lsib_path',path2keypath=True)
get_vndmat_attr(d,['u'],'lsib_path',path2keypath=True)
get_vndmat_attr(d,['y'],'lsib_path',path2keypath=True)
'''
kt,vn = _d2kvmatrix(d)
kdmat = _scankm(kt)
ltree = elel.ListTree(vn)
vndmat = ltree.desc
loc = get_kdmat_loc(kdmat,keypath)
rslt = vndmat[loc[0]][loc[1]][attr]
if(rslt == None):
pass
elif(elel.is_matrix(rslt,mode='loose')):
if('path2loc' in kwargs):
rslt = elel.array_map(rslt,ltree.path2loc)
else:
pass
if('path2keypath' in kwargs):
nlocs = elel.array_map(rslt,ltree.path2loc)
def cond_func(ele,kdmat):
return(kdmat[ele[0]][ele[1]]['path'])
rslt = elel.array_map(nlocs,cond_func,kdmat)
else:
pass
else:
if('path2loc' in kwargs):
rslt = ltree.path2loc(rslt)
else:
pass
if('path2keypath' in kwargs):
nloc = ltree.path2loc(rslt)
rslt = kdmat[nloc[0]][nloc[1]]['path']
else:
pass
return(rslt) |
def export_coreml(self, filename):
"""
Export the model in Core ML format.
Parameters
----------
filename: str
A valid filename where the model can be saved.
Examples
--------
>>> model.export_coreml('myModel.mlmodel')
"""
print('This model is exported as a custom Core ML model. In order to use it in your\n'
'application, you must also include "libRecommender.dylib". For additional\n'
'details see:\n'
'https://apple.github.io/turicreate/docs/userguide/recommender/coreml-deployment.html')
import turicreate as tc
self.__proxy__.export_to_coreml(filename) | Export the model in Core ML format.
Parameters
----------
filename: str
A valid filename where the model can be saved.
Examples
--------
>>> model.export_coreml('myModel.mlmodel') | Below is the the instruction that describes the task:
### Input:
Export the model in Core ML format.
Parameters
----------
filename: str
A valid filename where the model can be saved.
Examples
--------
>>> model.export_coreml('myModel.mlmodel')
### Response:
def export_coreml(self, filename):
"""
Export the model in Core ML format.
Parameters
----------
filename: str
A valid filename where the model can be saved.
Examples
--------
>>> model.export_coreml('myModel.mlmodel')
"""
print('This model is exported as a custom Core ML model. In order to use it in your\n'
'application, you must also include "libRecommender.dylib". For additional\n'
'details see:\n'
'https://apple.github.io/turicreate/docs/userguide/recommender/coreml-deployment.html')
import turicreate as tc
self.__proxy__.export_to_coreml(filename) |
def _post(self, uri, data):
"""
HTTP POST function
:param uri: REST endpoint to POST to
:param data: list of dicts to be passed to the endpoint
:return: list of dicts, usually will be a list of objects or id's
Example:
ret = cli.post('/indicators', { 'indicator': 'example.com' })
"""
if not uri.startswith(self.remote):
uri = '{}/{}'.format(self.remote, uri)
self.logger.debug(uri)
return self._make_request(uri, data=data) | HTTP POST function
:param uri: REST endpoint to POST to
:param data: list of dicts to be passed to the endpoint
:return: list of dicts, usually will be a list of objects or id's
Example:
ret = cli.post('/indicators', { 'indicator': 'example.com' }) | Below is the the instruction that describes the task:
### Input:
HTTP POST function
:param uri: REST endpoint to POST to
:param data: list of dicts to be passed to the endpoint
:return: list of dicts, usually will be a list of objects or id's
Example:
ret = cli.post('/indicators', { 'indicator': 'example.com' })
### Response:
def _post(self, uri, data):
"""
HTTP POST function
:param uri: REST endpoint to POST to
:param data: list of dicts to be passed to the endpoint
:return: list of dicts, usually will be a list of objects or id's
Example:
ret = cli.post('/indicators', { 'indicator': 'example.com' })
"""
if not uri.startswith(self.remote):
uri = '{}/{}'.format(self.remote, uri)
self.logger.debug(uri)
return self._make_request(uri, data=data) |
def press_keycode(self, keycode, metastate=None):
"""Sends a press of keycode to the device.
Android only.
Possible keycodes & meta states can be found in
http://developer.android.com/reference/android/view/KeyEvent.html
Meta state describe the pressed state of key modifiers such as
Shift, Ctrl & Alt keys. The Meta State is an integer in which each
bit set to 1 represents a pressed meta key.
For example
- META_SHIFT_ON = 1
- META_ALT_ON = 2
| metastate=1 --> Shift is pressed
| metastate=2 --> Alt is pressed
| metastate=3 --> Shift+Alt is pressed
- _keycode- - the keycode to be sent to the device
- _metastate- - status of the meta keys
"""
driver = self._current_application()
driver.press_keycode(keycode, metastate) | Sends a press of keycode to the device.
Android only.
Possible keycodes & meta states can be found in
http://developer.android.com/reference/android/view/KeyEvent.html
Meta state describe the pressed state of key modifiers such as
Shift, Ctrl & Alt keys. The Meta State is an integer in which each
bit set to 1 represents a pressed meta key.
For example
- META_SHIFT_ON = 1
- META_ALT_ON = 2
| metastate=1 --> Shift is pressed
| metastate=2 --> Alt is pressed
| metastate=3 --> Shift+Alt is pressed
- _keycode- - the keycode to be sent to the device
- _metastate- - status of the meta keys | Below is the the instruction that describes the task:
### Input:
Sends a press of keycode to the device.
Android only.
Possible keycodes & meta states can be found in
http://developer.android.com/reference/android/view/KeyEvent.html
Meta state describe the pressed state of key modifiers such as
Shift, Ctrl & Alt keys. The Meta State is an integer in which each
bit set to 1 represents a pressed meta key.
For example
- META_SHIFT_ON = 1
- META_ALT_ON = 2
| metastate=1 --> Shift is pressed
| metastate=2 --> Alt is pressed
| metastate=3 --> Shift+Alt is pressed
- _keycode- - the keycode to be sent to the device
- _metastate- - status of the meta keys
### Response:
def press_keycode(self, keycode, metastate=None):
"""Sends a press of keycode to the device.
Android only.
Possible keycodes & meta states can be found in
http://developer.android.com/reference/android/view/KeyEvent.html
Meta state describe the pressed state of key modifiers such as
Shift, Ctrl & Alt keys. The Meta State is an integer in which each
bit set to 1 represents a pressed meta key.
For example
- META_SHIFT_ON = 1
- META_ALT_ON = 2
| metastate=1 --> Shift is pressed
| metastate=2 --> Alt is pressed
| metastate=3 --> Shift+Alt is pressed
- _keycode- - the keycode to be sent to the device
- _metastate- - status of the meta keys
"""
driver = self._current_application()
driver.press_keycode(keycode, metastate) |
def _op_method(self, data, extra_factor=1.0):
"""Operator
This method returns the input data after the singular values have been
thresholded
Parameters
----------
data : np.ndarray
Input data array
extra_factor : float
Additional multiplication factor
Returns
-------
np.ndarray SVD thresholded data
"""
# Update threshold with extra factor.
threshold = self.thresh * extra_factor
if self.lowr_type == 'standard':
data_matrix = svd_thresh(cube2matrix(data), threshold,
thresh_type=self.thresh_type)
elif self.lowr_type == 'ngole':
data_matrix = svd_thresh_coef(cube2matrix(data), self.operator,
threshold,
thresh_type=self.thresh_type)
new_data = matrix2cube(data_matrix, data.shape[1:])
# Return updated data.
return new_data | Operator
This method returns the input data after the singular values have been
thresholded
Parameters
----------
data : np.ndarray
Input data array
extra_factor : float
Additional multiplication factor
Returns
-------
np.ndarray SVD thresholded data | Below is the the instruction that describes the task:
### Input:
Operator
This method returns the input data after the singular values have been
thresholded
Parameters
----------
data : np.ndarray
Input data array
extra_factor : float
Additional multiplication factor
Returns
-------
np.ndarray SVD thresholded data
### Response:
def _op_method(self, data, extra_factor=1.0):
"""Operator
This method returns the input data after the singular values have been
thresholded
Parameters
----------
data : np.ndarray
Input data array
extra_factor : float
Additional multiplication factor
Returns
-------
np.ndarray SVD thresholded data
"""
# Update threshold with extra factor.
threshold = self.thresh * extra_factor
if self.lowr_type == 'standard':
data_matrix = svd_thresh(cube2matrix(data), threshold,
thresh_type=self.thresh_type)
elif self.lowr_type == 'ngole':
data_matrix = svd_thresh_coef(cube2matrix(data), self.operator,
threshold,
thresh_type=self.thresh_type)
new_data = matrix2cube(data_matrix, data.shape[1:])
# Return updated data.
return new_data |
def _compile_mapping(self, schema, invalid_msg=None):
"""Create validator for given mapping."""
invalid_msg = invalid_msg or 'mapping value'
# Keys that may be required
all_required_keys = set(key for key in schema
if key is not Extra and
((self.required and not isinstance(key, (Optional, Remove))) or
isinstance(key, Required)))
# Keys that may have defaults
all_default_keys = set(key for key in schema
if isinstance(key, Required) or
isinstance(key, Optional))
_compiled_schema = {}
for skey, svalue in iteritems(schema):
new_key = self._compile(skey)
new_value = self._compile(svalue)
_compiled_schema[skey] = (new_key, new_value)
candidates = list(_iterate_mapping_candidates(_compiled_schema))
# After we have the list of candidates in the correct order, we want to apply some optimization so that each
# key in the data being validated will be matched against the relevant schema keys only.
# No point in matching against different keys
additional_candidates = []
candidates_by_key = {}
for skey, (ckey, cvalue) in candidates:
if type(skey) in primitive_types:
candidates_by_key.setdefault(skey, []).append((skey, (ckey, cvalue)))
elif isinstance(skey, Marker) and type(skey.schema) in primitive_types:
candidates_by_key.setdefault(skey.schema, []).append((skey, (ckey, cvalue)))
else:
# These are wildcards such as 'int', 'str', 'Remove' and others which should be applied to all keys
additional_candidates.append((skey, (ckey, cvalue)))
def validate_mapping(path, iterable, out):
required_keys = all_required_keys.copy()
# Build a map of all provided key-value pairs.
# The type(out) is used to retain ordering in case a ordered
# map type is provided as input.
key_value_map = type(out)()
for key, value in iterable:
key_value_map[key] = value
# Insert default values for non-existing keys.
for key in all_default_keys:
if not isinstance(key.default, Undefined) and \
key.schema not in key_value_map:
# A default value has been specified for this missing
# key, insert it.
key_value_map[key.schema] = key.default()
error = None
errors = []
for key, value in key_value_map.items():
key_path = path + [key]
remove_key = False
# Optimization. Validate against the matching key first, then fallback to the rest
relevant_candidates = itertools.chain(candidates_by_key.get(key, []), additional_candidates)
# compare each given key/value against all compiled key/values
# schema key, (compiled key, compiled value)
for skey, (ckey, cvalue) in relevant_candidates:
try:
new_key = ckey(key_path, key)
except er.Invalid as e:
if len(e.path) > len(key_path):
raise
if not error or len(e.path) > len(error.path):
error = e
continue
# Backtracking is not performed once a key is selected, so if
# the value is invalid we immediately throw an exception.
exception_errors = []
# check if the key is marked for removal
is_remove = new_key is Remove
try:
cval = cvalue(key_path, value)
# include if it's not marked for removal
if not is_remove:
out[new_key] = cval
else:
remove_key = True
continue
except er.MultipleInvalid as e:
exception_errors.extend(e.errors)
except er.Invalid as e:
exception_errors.append(e)
if exception_errors:
if is_remove or remove_key:
continue
for err in exception_errors:
if len(err.path) <= len(key_path):
err.error_type = invalid_msg
errors.append(err)
# If there is a validation error for a required
# key, this means that the key was provided.
# Discard the required key so it does not
# create an additional, noisy exception.
required_keys.discard(skey)
break
# Key and value okay, mark as found in case it was
# a Required() field.
required_keys.discard(skey)
break
else:
if remove_key:
# remove key
continue
elif self.extra == ALLOW_EXTRA:
out[key] = value
elif self.extra != REMOVE_EXTRA:
errors.append(er.Invalid('extra keys not allowed', key_path))
# else REMOVE_EXTRA: ignore the key so it's removed from output
# for any required keys left that weren't found and don't have defaults:
for key in required_keys:
msg = key.msg if hasattr(key, 'msg') and key.msg else 'required key not provided'
errors.append(er.RequiredFieldInvalid(msg, path + [key]))
if errors:
raise er.MultipleInvalid(errors)
return out
return validate_mapping | Create validator for given mapping. | Below is the the instruction that describes the task:
### Input:
Create validator for given mapping.
### Response:
def _compile_mapping(self, schema, invalid_msg=None):
"""Create validator for given mapping."""
invalid_msg = invalid_msg or 'mapping value'
# Keys that may be required
all_required_keys = set(key for key in schema
if key is not Extra and
((self.required and not isinstance(key, (Optional, Remove))) or
isinstance(key, Required)))
# Keys that may have defaults
all_default_keys = set(key for key in schema
if isinstance(key, Required) or
isinstance(key, Optional))
_compiled_schema = {}
for skey, svalue in iteritems(schema):
new_key = self._compile(skey)
new_value = self._compile(svalue)
_compiled_schema[skey] = (new_key, new_value)
candidates = list(_iterate_mapping_candidates(_compiled_schema))
# After we have the list of candidates in the correct order, we want to apply some optimization so that each
# key in the data being validated will be matched against the relevant schema keys only.
# No point in matching against different keys
additional_candidates = []
candidates_by_key = {}
for skey, (ckey, cvalue) in candidates:
if type(skey) in primitive_types:
candidates_by_key.setdefault(skey, []).append((skey, (ckey, cvalue)))
elif isinstance(skey, Marker) and type(skey.schema) in primitive_types:
candidates_by_key.setdefault(skey.schema, []).append((skey, (ckey, cvalue)))
else:
# These are wildcards such as 'int', 'str', 'Remove' and others which should be applied to all keys
additional_candidates.append((skey, (ckey, cvalue)))
def validate_mapping(path, iterable, out):
required_keys = all_required_keys.copy()
# Build a map of all provided key-value pairs.
# The type(out) is used to retain ordering in case a ordered
# map type is provided as input.
key_value_map = type(out)()
for key, value in iterable:
key_value_map[key] = value
# Insert default values for non-existing keys.
for key in all_default_keys:
if not isinstance(key.default, Undefined) and \
key.schema not in key_value_map:
# A default value has been specified for this missing
# key, insert it.
key_value_map[key.schema] = key.default()
error = None
errors = []
for key, value in key_value_map.items():
key_path = path + [key]
remove_key = False
# Optimization. Validate against the matching key first, then fallback to the rest
relevant_candidates = itertools.chain(candidates_by_key.get(key, []), additional_candidates)
# compare each given key/value against all compiled key/values
# schema key, (compiled key, compiled value)
for skey, (ckey, cvalue) in relevant_candidates:
try:
new_key = ckey(key_path, key)
except er.Invalid as e:
if len(e.path) > len(key_path):
raise
if not error or len(e.path) > len(error.path):
error = e
continue
# Backtracking is not performed once a key is selected, so if
# the value is invalid we immediately throw an exception.
exception_errors = []
# check if the key is marked for removal
is_remove = new_key is Remove
try:
cval = cvalue(key_path, value)
# include if it's not marked for removal
if not is_remove:
out[new_key] = cval
else:
remove_key = True
continue
except er.MultipleInvalid as e:
exception_errors.extend(e.errors)
except er.Invalid as e:
exception_errors.append(e)
if exception_errors:
if is_remove or remove_key:
continue
for err in exception_errors:
if len(err.path) <= len(key_path):
err.error_type = invalid_msg
errors.append(err)
# If there is a validation error for a required
# key, this means that the key was provided.
# Discard the required key so it does not
# create an additional, noisy exception.
required_keys.discard(skey)
break
# Key and value okay, mark as found in case it was
# a Required() field.
required_keys.discard(skey)
break
else:
if remove_key:
# remove key
continue
elif self.extra == ALLOW_EXTRA:
out[key] = value
elif self.extra != REMOVE_EXTRA:
errors.append(er.Invalid('extra keys not allowed', key_path))
# else REMOVE_EXTRA: ignore the key so it's removed from output
# for any required keys left that weren't found and don't have defaults:
for key in required_keys:
msg = key.msg if hasattr(key, 'msg') and key.msg else 'required key not provided'
errors.append(er.RequiredFieldInvalid(msg, path + [key]))
if errors:
raise er.MultipleInvalid(errors)
return out
return validate_mapping |
def basic_auth(self, username, password):
"""Set the Basic Auth credentials on this Session.
:param str username: Your GitHub username
:param str password: Your GitHub password
"""
if not (username and password):
return
self.auth = (username, password)
# Disable token authentication
self.headers.pop('Authorization', None) | Set the Basic Auth credentials on this Session.
:param str username: Your GitHub username
:param str password: Your GitHub password | Below is the the instruction that describes the task:
### Input:
Set the Basic Auth credentials on this Session.
:param str username: Your GitHub username
:param str password: Your GitHub password
### Response:
def basic_auth(self, username, password):
"""Set the Basic Auth credentials on this Session.
:param str username: Your GitHub username
:param str password: Your GitHub password
"""
if not (username and password):
return
self.auth = (username, password)
# Disable token authentication
self.headers.pop('Authorization', None) |
def kill(config, container, *args, **kwargs):
'''
Kill a running container
:type container: string
:param container: The container id to kill
:rtype: dict
:returns: boolean
'''
err = "Unknown"
client = _get_client(config)
try:
dcontainer = _get_container_infos(config, container)['Id']
if is_running(config, dcontainer):
client.kill(dcontainer)
if not is_running(config, dcontainer):
print "Container killed."
return True
else:
print "Container not running."
return True
except Exception as e:
err = e
utils.error("Unable to kill the container: %s"%err)
return False | Kill a running container
:type container: string
:param container: The container id to kill
:rtype: dict
:returns: boolean | Below is the the instruction that describes the task:
### Input:
Kill a running container
:type container: string
:param container: The container id to kill
:rtype: dict
:returns: boolean
### Response:
def kill(config, container, *args, **kwargs):
'''
Kill a running container
:type container: string
:param container: The container id to kill
:rtype: dict
:returns: boolean
'''
err = "Unknown"
client = _get_client(config)
try:
dcontainer = _get_container_infos(config, container)['Id']
if is_running(config, dcontainer):
client.kill(dcontainer)
if not is_running(config, dcontainer):
print "Container killed."
return True
else:
print "Container not running."
return True
except Exception as e:
err = e
utils.error("Unable to kill the container: %s"%err)
return False |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.