code
stringlengths 75
104k
| docstring
stringlengths 1
46.9k
| text
stringlengths 164
112k
|
---|---|---|
def _update_params(self, constants):
"""Update the params."""
constants = np.max(np.min(constants, 1))
self.params['r']['value'] = max([self.params['r']['value'],
constants])
epsilon = constants / self.params['r']['value']
influence = self._calculate_influence(epsilon)
# Account for learning rate
return influence * epsilon | Update the params. | Below is the the instruction that describes the task:
### Input:
Update the params.
### Response:
def _update_params(self, constants):
"""Update the params."""
constants = np.max(np.min(constants, 1))
self.params['r']['value'] = max([self.params['r']['value'],
constants])
epsilon = constants / self.params['r']['value']
influence = self._calculate_influence(epsilon)
# Account for learning rate
return influence * epsilon |
def _load_template(dev_path):
"""
There are a couple of template scripts in the package. This
function loads one of them and prepares it for use.
"""
# See https://bitbucket.org/pypa/setuptools/issue/134 for info
# on script file naming and downstream issues with SVR4
name = 'script.tmpl'
if dev_path:
name = name.replace('.tmpl', ' (dev).tmpl')
raw_bytes = resource_string('setuptools', name)
return raw_bytes.decode('utf-8') | There are a couple of template scripts in the package. This
function loads one of them and prepares it for use. | Below is the the instruction that describes the task:
### Input:
There are a couple of template scripts in the package. This
function loads one of them and prepares it for use.
### Response:
def _load_template(dev_path):
"""
There are a couple of template scripts in the package. This
function loads one of them and prepares it for use.
"""
# See https://bitbucket.org/pypa/setuptools/issue/134 for info
# on script file naming and downstream issues with SVR4
name = 'script.tmpl'
if dev_path:
name = name.replace('.tmpl', ' (dev).tmpl')
raw_bytes = resource_string('setuptools', name)
return raw_bytes.decode('utf-8') |
def _setup_program(self):
"""
Create a temporary file containing the program code. The code is
fetched via :meth:`_get_program`.
"""
filename = self._get_program_filename()
path = os.path.join(self.get_temp_dir(), filename)
self.program_fp = open(path, 'wb')
self.program_fp.write(self._get_program())
self.program_fp.flush()
os.chmod(self.program_fp.name, int('0700', 8))
reopen_readonly(self.program_fp) | Create a temporary file containing the program code. The code is
fetched via :meth:`_get_program`. | Below is the the instruction that describes the task:
### Input:
Create a temporary file containing the program code. The code is
fetched via :meth:`_get_program`.
### Response:
def _setup_program(self):
"""
Create a temporary file containing the program code. The code is
fetched via :meth:`_get_program`.
"""
filename = self._get_program_filename()
path = os.path.join(self.get_temp_dir(), filename)
self.program_fp = open(path, 'wb')
self.program_fp.write(self._get_program())
self.program_fp.flush()
os.chmod(self.program_fp.name, int('0700', 8))
reopen_readonly(self.program_fp) |
def _read_header(self):
'''
Little-endian
|... 4 bytes unsigned int ...|... 4 bytes unsigned int ...|
| frames count | dimensions count |
'''
self._fh.seek(0)
buf = self._fh.read(4*2)
fc, dc = struct.unpack("<II", buf)
return fc, dc | Little-endian
|... 4 bytes unsigned int ...|... 4 bytes unsigned int ...|
| frames count | dimensions count | | Below is the the instruction that describes the task:
### Input:
Little-endian
|... 4 bytes unsigned int ...|... 4 bytes unsigned int ...|
| frames count | dimensions count |
### Response:
def _read_header(self):
'''
Little-endian
|... 4 bytes unsigned int ...|... 4 bytes unsigned int ...|
| frames count | dimensions count |
'''
self._fh.seek(0)
buf = self._fh.read(4*2)
fc, dc = struct.unpack("<II", buf)
return fc, dc |
def create_track(self, href=None, media_url=None, label=None,
audio_channel=None):
"""Add a new track to a bundle. Note that the total number of
allowable tracks is limited. See the API documentation for
details.
'href' the relative href to the tracks list. May not be None.
'media_url' public URL to media file. May not be None.
'label' short name for the track. May be None.
'audio_channel' the channel(s) to use in a stereo file. May be
None. For details see the API documentation.
Returns a data structure equivalent to the JSON returned by the
API.
If the response status is not 2xx, or if the maximum number of
tracks is exceeded, throws an APIException. If the JSON to
python data struct conversion fails, throws an
APIDataException."""
# Argument error checking.
assert href is not None
assert media_url is not None
# Prepare the data we're going to write.
data = None
fields = {}
fields['media_url'] = media_url
if label is not None:
fields['label'] = label
if audio_channel is not None:
fields['audio_channel'] = audio_channel
if len(fields) > 0:
data = fields
raw_result = self.post(href, data)
if raw_result.status < 200 or raw_result.status > 202:
raise APIException(raw_result.status, raw_result.json)
# Convert the JSON to a python data struct.
return self._parse_json(raw_result.json) | Add a new track to a bundle. Note that the total number of
allowable tracks is limited. See the API documentation for
details.
'href' the relative href to the tracks list. May not be None.
'media_url' public URL to media file. May not be None.
'label' short name for the track. May be None.
'audio_channel' the channel(s) to use in a stereo file. May be
None. For details see the API documentation.
Returns a data structure equivalent to the JSON returned by the
API.
If the response status is not 2xx, or if the maximum number of
tracks is exceeded, throws an APIException. If the JSON to
python data struct conversion fails, throws an
APIDataException. | Below is the the instruction that describes the task:
### Input:
Add a new track to a bundle. Note that the total number of
allowable tracks is limited. See the API documentation for
details.
'href' the relative href to the tracks list. May not be None.
'media_url' public URL to media file. May not be None.
'label' short name for the track. May be None.
'audio_channel' the channel(s) to use in a stereo file. May be
None. For details see the API documentation.
Returns a data structure equivalent to the JSON returned by the
API.
If the response status is not 2xx, or if the maximum number of
tracks is exceeded, throws an APIException. If the JSON to
python data struct conversion fails, throws an
APIDataException.
### Response:
def create_track(self, href=None, media_url=None, label=None,
audio_channel=None):
"""Add a new track to a bundle. Note that the total number of
allowable tracks is limited. See the API documentation for
details.
'href' the relative href to the tracks list. May not be None.
'media_url' public URL to media file. May not be None.
'label' short name for the track. May be None.
'audio_channel' the channel(s) to use in a stereo file. May be
None. For details see the API documentation.
Returns a data structure equivalent to the JSON returned by the
API.
If the response status is not 2xx, or if the maximum number of
tracks is exceeded, throws an APIException. If the JSON to
python data struct conversion fails, throws an
APIDataException."""
# Argument error checking.
assert href is not None
assert media_url is not None
# Prepare the data we're going to write.
data = None
fields = {}
fields['media_url'] = media_url
if label is not None:
fields['label'] = label
if audio_channel is not None:
fields['audio_channel'] = audio_channel
if len(fields) > 0:
data = fields
raw_result = self.post(href, data)
if raw_result.status < 200 or raw_result.status > 202:
raise APIException(raw_result.status, raw_result.json)
# Convert the JSON to a python data struct.
return self._parse_json(raw_result.json) |
def _hash(self, hash_name):
""" Returns a hash object for the file at the current path.
`hash_name` should be a hash algo name (such as ``'md5'``
or ``'sha1'``) that's available in the :mod:`hashlib` module.
"""
m = hashlib.new(hash_name)
for chunk in self.chunks(8192, mode="rb"):
m.update(chunk)
return m | Returns a hash object for the file at the current path.
`hash_name` should be a hash algo name (such as ``'md5'``
or ``'sha1'``) that's available in the :mod:`hashlib` module. | Below is the the instruction that describes the task:
### Input:
Returns a hash object for the file at the current path.
`hash_name` should be a hash algo name (such as ``'md5'``
or ``'sha1'``) that's available in the :mod:`hashlib` module.
### Response:
def _hash(self, hash_name):
""" Returns a hash object for the file at the current path.
`hash_name` should be a hash algo name (such as ``'md5'``
or ``'sha1'``) that's available in the :mod:`hashlib` module.
"""
m = hashlib.new(hash_name)
for chunk in self.chunks(8192, mode="rb"):
m.update(chunk)
return m |
async def _download_text(url, session):
"""Asynchronously request a URL and get the encoded text content of the
body.
Parameters
----------
url : `str`
URL to download.
session : `aiohttp.ClientSession`
An open aiohttp session.
Returns
-------
content : `str`
Content downloaded from the URL.
"""
logger = logging.getLogger(__name__)
async with session.get(url) as response:
# aiohttp decodes the content to a Python string
logger.info('Downloading %r', url)
return await response.text() | Asynchronously request a URL and get the encoded text content of the
body.
Parameters
----------
url : `str`
URL to download.
session : `aiohttp.ClientSession`
An open aiohttp session.
Returns
-------
content : `str`
Content downloaded from the URL. | Below is the the instruction that describes the task:
### Input:
Asynchronously request a URL and get the encoded text content of the
body.
Parameters
----------
url : `str`
URL to download.
session : `aiohttp.ClientSession`
An open aiohttp session.
Returns
-------
content : `str`
Content downloaded from the URL.
### Response:
async def _download_text(url, session):
"""Asynchronously request a URL and get the encoded text content of the
body.
Parameters
----------
url : `str`
URL to download.
session : `aiohttp.ClientSession`
An open aiohttp session.
Returns
-------
content : `str`
Content downloaded from the URL.
"""
logger = logging.getLogger(__name__)
async with session.get(url) as response:
# aiohttp decodes the content to a Python string
logger.info('Downloading %r', url)
return await response.text() |
def _reset_kvstore(self):
"""Reset kvstore."""
if self._kvstore and 'dist' in self._kvstore.type:
raise RuntimeError("Cannot reset distributed KVStore.")
self._kv_initialized = False
self._kvstore = None
self._distributed = None
self._update_on_kvstore = None
self._params_to_init = [param for param in self._params] | Reset kvstore. | Below is the the instruction that describes the task:
### Input:
Reset kvstore.
### Response:
def _reset_kvstore(self):
"""Reset kvstore."""
if self._kvstore and 'dist' in self._kvstore.type:
raise RuntimeError("Cannot reset distributed KVStore.")
self._kv_initialized = False
self._kvstore = None
self._distributed = None
self._update_on_kvstore = None
self._params_to_init = [param for param in self._params] |
def create_module(sym, data_shapes, label_shapes, label_names, gpus=''):
"""Creates a new MXNet module.
Parameters
----------
sym : Symbol
An MXNet symbol.
input_shape: tuple
The shape of the input data in the form of (batch_size, channels, height, width)
files: list of strings
List of URLs pertaining to files that need to be downloaded in order to use the model.
data_shapes: list of tuples.
List of tuples where each tuple is a pair of input variable name and its shape.
label_shapes: list of (str, tuple)
Typically is ``data_iter.provide_label``.
label_names: list of str
Name of the output labels in the MXNet symbolic graph.
gpus: str
Comma separated string of gpu ids on which inferences are executed. E.g. 3,5,6 would refer to GPUs 3, 5 and 6.
If empty, we use CPU.
Returns
-------
MXNet module
"""
if gpus == '':
devices = mx.cpu()
else:
devices = [mx.gpu(int(i)) for i in gpus.split(',')]
data_names = [data_shape[0] for data_shape in data_shapes]
mod = mx.mod.Module(
symbol=sym,
data_names=data_names,
context=devices,
label_names=label_names
)
mod.bind(
for_training=False,
data_shapes=data_shapes,
label_shapes=label_shapes
)
return mod | Creates a new MXNet module.
Parameters
----------
sym : Symbol
An MXNet symbol.
input_shape: tuple
The shape of the input data in the form of (batch_size, channels, height, width)
files: list of strings
List of URLs pertaining to files that need to be downloaded in order to use the model.
data_shapes: list of tuples.
List of tuples where each tuple is a pair of input variable name and its shape.
label_shapes: list of (str, tuple)
Typically is ``data_iter.provide_label``.
label_names: list of str
Name of the output labels in the MXNet symbolic graph.
gpus: str
Comma separated string of gpu ids on which inferences are executed. E.g. 3,5,6 would refer to GPUs 3, 5 and 6.
If empty, we use CPU.
Returns
-------
MXNet module | Below is the the instruction that describes the task:
### Input:
Creates a new MXNet module.
Parameters
----------
sym : Symbol
An MXNet symbol.
input_shape: tuple
The shape of the input data in the form of (batch_size, channels, height, width)
files: list of strings
List of URLs pertaining to files that need to be downloaded in order to use the model.
data_shapes: list of tuples.
List of tuples where each tuple is a pair of input variable name and its shape.
label_shapes: list of (str, tuple)
Typically is ``data_iter.provide_label``.
label_names: list of str
Name of the output labels in the MXNet symbolic graph.
gpus: str
Comma separated string of gpu ids on which inferences are executed. E.g. 3,5,6 would refer to GPUs 3, 5 and 6.
If empty, we use CPU.
Returns
-------
MXNet module
### Response:
def create_module(sym, data_shapes, label_shapes, label_names, gpus=''):
"""Creates a new MXNet module.
Parameters
----------
sym : Symbol
An MXNet symbol.
input_shape: tuple
The shape of the input data in the form of (batch_size, channels, height, width)
files: list of strings
List of URLs pertaining to files that need to be downloaded in order to use the model.
data_shapes: list of tuples.
List of tuples where each tuple is a pair of input variable name and its shape.
label_shapes: list of (str, tuple)
Typically is ``data_iter.provide_label``.
label_names: list of str
Name of the output labels in the MXNet symbolic graph.
gpus: str
Comma separated string of gpu ids on which inferences are executed. E.g. 3,5,6 would refer to GPUs 3, 5 and 6.
If empty, we use CPU.
Returns
-------
MXNet module
"""
if gpus == '':
devices = mx.cpu()
else:
devices = [mx.gpu(int(i)) for i in gpus.split(',')]
data_names = [data_shape[0] for data_shape in data_shapes]
mod = mx.mod.Module(
symbol=sym,
data_names=data_names,
context=devices,
label_names=label_names
)
mod.bind(
for_training=False,
data_shapes=data_shapes,
label_shapes=label_shapes
)
return mod |
def fetcher(ctx, xmlrpc, xmlrpc_host, xmlrpc_port, poolsize, proxy, user_agent,
timeout, phantomjs_endpoint, puppeteer_endpoint, splash_endpoint, fetcher_cls,
async_mode=True, get_object=False, no_input=False):
"""
Run Fetcher.
"""
g = ctx.obj
Fetcher = load_cls(None, None, fetcher_cls)
if no_input:
inqueue = None
outqueue = None
else:
inqueue = g.scheduler2fetcher
outqueue = g.fetcher2processor
fetcher = Fetcher(inqueue=inqueue, outqueue=outqueue,
poolsize=poolsize, proxy=proxy, async_mode=async_mode)
fetcher.phantomjs_proxy = phantomjs_endpoint or g.phantomjs_proxy
fetcher.puppeteer_proxy = puppeteer_endpoint or g.puppeteer_proxy
fetcher.splash_endpoint = splash_endpoint
if user_agent:
fetcher.user_agent = user_agent
if timeout:
fetcher.default_options = copy.deepcopy(fetcher.default_options)
fetcher.default_options['timeout'] = timeout
g.instances.append(fetcher)
if g.get('testing_mode') or get_object:
return fetcher
if xmlrpc:
utils.run_in_thread(fetcher.xmlrpc_run, port=xmlrpc_port, bind=xmlrpc_host)
fetcher.run() | Run Fetcher. | Below is the the instruction that describes the task:
### Input:
Run Fetcher.
### Response:
def fetcher(ctx, xmlrpc, xmlrpc_host, xmlrpc_port, poolsize, proxy, user_agent,
timeout, phantomjs_endpoint, puppeteer_endpoint, splash_endpoint, fetcher_cls,
async_mode=True, get_object=False, no_input=False):
"""
Run Fetcher.
"""
g = ctx.obj
Fetcher = load_cls(None, None, fetcher_cls)
if no_input:
inqueue = None
outqueue = None
else:
inqueue = g.scheduler2fetcher
outqueue = g.fetcher2processor
fetcher = Fetcher(inqueue=inqueue, outqueue=outqueue,
poolsize=poolsize, proxy=proxy, async_mode=async_mode)
fetcher.phantomjs_proxy = phantomjs_endpoint or g.phantomjs_proxy
fetcher.puppeteer_proxy = puppeteer_endpoint or g.puppeteer_proxy
fetcher.splash_endpoint = splash_endpoint
if user_agent:
fetcher.user_agent = user_agent
if timeout:
fetcher.default_options = copy.deepcopy(fetcher.default_options)
fetcher.default_options['timeout'] = timeout
g.instances.append(fetcher)
if g.get('testing_mode') or get_object:
return fetcher
if xmlrpc:
utils.run_in_thread(fetcher.xmlrpc_run, port=xmlrpc_port, bind=xmlrpc_host)
fetcher.run() |
def any_embedded_linux(self):
"""Check whether the current board is any embedded Linux device."""
return self.any_raspberry_pi or self.any_beaglebone or \
self.any_orange_pi or self.any_giant_board or self.any_jetson_board | Check whether the current board is any embedded Linux device. | Below is the the instruction that describes the task:
### Input:
Check whether the current board is any embedded Linux device.
### Response:
def any_embedded_linux(self):
"""Check whether the current board is any embedded Linux device."""
return self.any_raspberry_pi or self.any_beaglebone or \
self.any_orange_pi or self.any_giant_board or self.any_jetson_board |
def prependsitedir(projdir, *args):
"""
like sys.addsitedir() but gives the added directory preference
over system directories. The paths will be normalized for dots and
slash direction before being added to the path.
projdir: the path you want to add to sys.path. If its a
a file, the parent directory will be added
*args: additional directories relative to the projdir to add
to sys.path.
"""
# let the user be lazy and send a file, we will convert to parent directory
# of file
if path.isfile(projdir):
projdir = path.dirname(projdir)
projdir = path.abspath(projdir)
# any args are considered paths that need to be joined to the
# projdir to get to the correct directory.
libpaths = []
for lpath in args:
libpaths.append(path.join(projdir, path.normpath(lpath)))
# add the path to sys.path with preference over everything that currently
# exists in sys.path
syspath_orig = set(sys.path)
site.addsitedir(projdir)
for lpath in libpaths:
site.addsitedir(lpath)
syspath_after = set(sys.path)
new_paths = list(syspath_after.difference(syspath_orig))
sys.path = new_paths + sys.path | like sys.addsitedir() but gives the added directory preference
over system directories. The paths will be normalized for dots and
slash direction before being added to the path.
projdir: the path you want to add to sys.path. If its a
a file, the parent directory will be added
*args: additional directories relative to the projdir to add
to sys.path. | Below is the the instruction that describes the task:
### Input:
like sys.addsitedir() but gives the added directory preference
over system directories. The paths will be normalized for dots and
slash direction before being added to the path.
projdir: the path you want to add to sys.path. If its a
a file, the parent directory will be added
*args: additional directories relative to the projdir to add
to sys.path.
### Response:
def prependsitedir(projdir, *args):
"""
like sys.addsitedir() but gives the added directory preference
over system directories. The paths will be normalized for dots and
slash direction before being added to the path.
projdir: the path you want to add to sys.path. If its a
a file, the parent directory will be added
*args: additional directories relative to the projdir to add
to sys.path.
"""
# let the user be lazy and send a file, we will convert to parent directory
# of file
if path.isfile(projdir):
projdir = path.dirname(projdir)
projdir = path.abspath(projdir)
# any args are considered paths that need to be joined to the
# projdir to get to the correct directory.
libpaths = []
for lpath in args:
libpaths.append(path.join(projdir, path.normpath(lpath)))
# add the path to sys.path with preference over everything that currently
# exists in sys.path
syspath_orig = set(sys.path)
site.addsitedir(projdir)
for lpath in libpaths:
site.addsitedir(lpath)
syspath_after = set(sys.path)
new_paths = list(syspath_after.difference(syspath_orig))
sys.path = new_paths + sys.path |
def endswith(self, suffix, start=None, end=None):
"""Return whether the current bitstring ends with suffix.
suffix -- The bitstring to search for.
start -- The bit position to start from. Defaults to 0.
end -- The bit position to end at. Defaults to self.len.
"""
suffix = Bits(suffix)
start, end = self._validate_slice(start, end)
if start + suffix.len > end:
return False
start = end - suffix.len
return self._slice(start, end) == suffix | Return whether the current bitstring ends with suffix.
suffix -- The bitstring to search for.
start -- The bit position to start from. Defaults to 0.
end -- The bit position to end at. Defaults to self.len. | Below is the the instruction that describes the task:
### Input:
Return whether the current bitstring ends with suffix.
suffix -- The bitstring to search for.
start -- The bit position to start from. Defaults to 0.
end -- The bit position to end at. Defaults to self.len.
### Response:
def endswith(self, suffix, start=None, end=None):
"""Return whether the current bitstring ends with suffix.
suffix -- The bitstring to search for.
start -- The bit position to start from. Defaults to 0.
end -- The bit position to end at. Defaults to self.len.
"""
suffix = Bits(suffix)
start, end = self._validate_slice(start, end)
if start + suffix.len > end:
return False
start = end - suffix.len
return self._slice(start, end) == suffix |
def __get_query_filters(cls, filters={}, inverse=False):
"""
Convert a dict with the filters to be applied ({"name1":"value1", "name2":"value2"})
to a list of query objects which can be used together in a query using boolean
combination logic.
:param filters: dict with the filters to be applied
:param inverse: if True include all the inverse filters (the one starting with *)
:return: a list of es_dsl 'MatchPhrase' Query objects
Ex: [MatchPhrase(name1="value1"), MatchPhrase(name2="value2"), ..]
Dict representation of the object: {'match_phrase': {'field': 'home'}}
"""
query_filters = []
for name in filters:
if name[0] == '*' and not inverse:
# An inverse filter and not inverse mode
continue
if name[0] != '*' and inverse:
# A direct filter and inverse mode
continue
field_name = name[1:] if name[0] == '*' else name
params = {field_name: filters[name]}
# trying to use es_dsl only and not creating hard coded queries
query_filters.append(Q('match_phrase', **params))
return query_filters | Convert a dict with the filters to be applied ({"name1":"value1", "name2":"value2"})
to a list of query objects which can be used together in a query using boolean
combination logic.
:param filters: dict with the filters to be applied
:param inverse: if True include all the inverse filters (the one starting with *)
:return: a list of es_dsl 'MatchPhrase' Query objects
Ex: [MatchPhrase(name1="value1"), MatchPhrase(name2="value2"), ..]
Dict representation of the object: {'match_phrase': {'field': 'home'}} | Below is the the instruction that describes the task:
### Input:
Convert a dict with the filters to be applied ({"name1":"value1", "name2":"value2"})
to a list of query objects which can be used together in a query using boolean
combination logic.
:param filters: dict with the filters to be applied
:param inverse: if True include all the inverse filters (the one starting with *)
:return: a list of es_dsl 'MatchPhrase' Query objects
Ex: [MatchPhrase(name1="value1"), MatchPhrase(name2="value2"), ..]
Dict representation of the object: {'match_phrase': {'field': 'home'}}
### Response:
def __get_query_filters(cls, filters={}, inverse=False):
"""
Convert a dict with the filters to be applied ({"name1":"value1", "name2":"value2"})
to a list of query objects which can be used together in a query using boolean
combination logic.
:param filters: dict with the filters to be applied
:param inverse: if True include all the inverse filters (the one starting with *)
:return: a list of es_dsl 'MatchPhrase' Query objects
Ex: [MatchPhrase(name1="value1"), MatchPhrase(name2="value2"), ..]
Dict representation of the object: {'match_phrase': {'field': 'home'}}
"""
query_filters = []
for name in filters:
if name[0] == '*' and not inverse:
# An inverse filter and not inverse mode
continue
if name[0] != '*' and inverse:
# A direct filter and inverse mode
continue
field_name = name[1:] if name[0] == '*' else name
params = {field_name: filters[name]}
# trying to use es_dsl only and not creating hard coded queries
query_filters.append(Q('match_phrase', **params))
return query_filters |
def generate_cdef():
"""Generate the cdef output file"""
include_libc_path = path.join(HERE, 'fake_libc_include')
include_vulkan_path = path.join(HERE, 'vulkan_include')
out_file = path.join(HERE, path.pardir, 'vulkan', 'vulkan.cdef.h')
header = path.join(include_vulkan_path, 'vulkan.h')
command = ['cpp',
'-std=c99',
'-P',
'-nostdinc',
'-I' + include_libc_path,
'-I' + include_vulkan_path,
'-o' + out_file,
'-DVK_USE_PLATFORM_XCB_KHR',
'-DVK_USE_PLATFORM_WAYLAND_KHR',
'-DVK_USE_PLATFORM_ANDROID_KHR',
'-DVK_USE_PLATFORM_WIN32_KHR',
'-DVK_USE_PLATFORM_XLIB_KHR',
header]
subprocess.run(command, check=True) | Generate the cdef output file | Below is the the instruction that describes the task:
### Input:
Generate the cdef output file
### Response:
def generate_cdef():
"""Generate the cdef output file"""
include_libc_path = path.join(HERE, 'fake_libc_include')
include_vulkan_path = path.join(HERE, 'vulkan_include')
out_file = path.join(HERE, path.pardir, 'vulkan', 'vulkan.cdef.h')
header = path.join(include_vulkan_path, 'vulkan.h')
command = ['cpp',
'-std=c99',
'-P',
'-nostdinc',
'-I' + include_libc_path,
'-I' + include_vulkan_path,
'-o' + out_file,
'-DVK_USE_PLATFORM_XCB_KHR',
'-DVK_USE_PLATFORM_WAYLAND_KHR',
'-DVK_USE_PLATFORM_ANDROID_KHR',
'-DVK_USE_PLATFORM_WIN32_KHR',
'-DVK_USE_PLATFORM_XLIB_KHR',
header]
subprocess.run(command, check=True) |
def send_email_message(self, recipient, subject, html_message, text_message, sender_email, sender_name):
""" Send email message via Flask-Sendmail.
Args:
recipient: Email address or tuple of (Name, Email-address).
subject: Subject line.
html_message: The message body in HTML.
text_message: The message body in plain text.
"""
if not current_app.testing: # pragma: no cover
# Prepare email message
from flask_sendmail import Message
message = Message(
subject,
recipients=[recipient],
html=html_message,
body=text_message)
# Send email message
self.mail.send(message) | Send email message via Flask-Sendmail.
Args:
recipient: Email address or tuple of (Name, Email-address).
subject: Subject line.
html_message: The message body in HTML.
text_message: The message body in plain text. | Below is the the instruction that describes the task:
### Input:
Send email message via Flask-Sendmail.
Args:
recipient: Email address or tuple of (Name, Email-address).
subject: Subject line.
html_message: The message body in HTML.
text_message: The message body in plain text.
### Response:
def send_email_message(self, recipient, subject, html_message, text_message, sender_email, sender_name):
""" Send email message via Flask-Sendmail.
Args:
recipient: Email address or tuple of (Name, Email-address).
subject: Subject line.
html_message: The message body in HTML.
text_message: The message body in plain text.
"""
if not current_app.testing: # pragma: no cover
# Prepare email message
from flask_sendmail import Message
message = Message(
subject,
recipients=[recipient],
html=html_message,
body=text_message)
# Send email message
self.mail.send(message) |
def estimate_duration(self):
"""
Estimate duration (timedelta) for this task.
Estimate the average length of time we expect between this task's
start and end times.
:returns: deferred that when fired returns a timedelta object for the
estimated timedelta, or the actual timedelta, or None if we
could not estimate a time for this task method.
"""
if self.completion_ts:
# Task is already complete. Return the exact duration timedelta.
return defer.succeed(self.duration)
if not self.package:
# TODO: estimate duration some other way besides
# getAverageBuildDuration. For example, we could estimate
# completion time for newRepo/createrepo tasks by looking back at
# the the past couple of tasks for this tag.
return defer.succeed(None)
if self.method == 'tagBuild':
# These are pretty short. Haphazardly guess the max SLEEPTIME plus
# a few seconds.
tag_build_time = SLEEPTIME + timedelta(seconds=15)
return defer.succeed(tag_build_time)
return average_build_duration(self.connection, self.package) | Estimate duration (timedelta) for this task.
Estimate the average length of time we expect between this task's
start and end times.
:returns: deferred that when fired returns a timedelta object for the
estimated timedelta, or the actual timedelta, or None if we
could not estimate a time for this task method. | Below is the the instruction that describes the task:
### Input:
Estimate duration (timedelta) for this task.
Estimate the average length of time we expect between this task's
start and end times.
:returns: deferred that when fired returns a timedelta object for the
estimated timedelta, or the actual timedelta, or None if we
could not estimate a time for this task method.
### Response:
def estimate_duration(self):
"""
Estimate duration (timedelta) for this task.
Estimate the average length of time we expect between this task's
start and end times.
:returns: deferred that when fired returns a timedelta object for the
estimated timedelta, or the actual timedelta, or None if we
could not estimate a time for this task method.
"""
if self.completion_ts:
# Task is already complete. Return the exact duration timedelta.
return defer.succeed(self.duration)
if not self.package:
# TODO: estimate duration some other way besides
# getAverageBuildDuration. For example, we could estimate
# completion time for newRepo/createrepo tasks by looking back at
# the the past couple of tasks for this tag.
return defer.succeed(None)
if self.method == 'tagBuild':
# These are pretty short. Haphazardly guess the max SLEEPTIME plus
# a few seconds.
tag_build_time = SLEEPTIME + timedelta(seconds=15)
return defer.succeed(tag_build_time)
return average_build_duration(self.connection, self.package) |
def _parse_conference(self, stats):
"""
Parse the conference abbreviation for the player's team.
The conference abbreviation is embedded within the conference name tag
and should be special-parsed to extract it.
Parameters
----------
stats : PyQuery object
A PyQuery object containing the HTML from the player's stats page.
Returns
-------
string
Returns a string of the conference abbreviation, such as 'big-12'.
"""
conference_tag = stats(PLAYER_SCHEME['conference'])
conference = re.sub(r'.*/cbb/conferences/',
'',
str(conference_tag('a')))
conference = re.sub(r'/.*', '', conference)
return conference | Parse the conference abbreviation for the player's team.
The conference abbreviation is embedded within the conference name tag
and should be special-parsed to extract it.
Parameters
----------
stats : PyQuery object
A PyQuery object containing the HTML from the player's stats page.
Returns
-------
string
Returns a string of the conference abbreviation, such as 'big-12'. | Below is the the instruction that describes the task:
### Input:
Parse the conference abbreviation for the player's team.
The conference abbreviation is embedded within the conference name tag
and should be special-parsed to extract it.
Parameters
----------
stats : PyQuery object
A PyQuery object containing the HTML from the player's stats page.
Returns
-------
string
Returns a string of the conference abbreviation, such as 'big-12'.
### Response:
def _parse_conference(self, stats):
"""
Parse the conference abbreviation for the player's team.
The conference abbreviation is embedded within the conference name tag
and should be special-parsed to extract it.
Parameters
----------
stats : PyQuery object
A PyQuery object containing the HTML from the player's stats page.
Returns
-------
string
Returns a string of the conference abbreviation, such as 'big-12'.
"""
conference_tag = stats(PLAYER_SCHEME['conference'])
conference = re.sub(r'.*/cbb/conferences/',
'',
str(conference_tag('a')))
conference = re.sub(r'/.*', '', conference)
return conference |
def serialize(self, sw, **kw):
'''
sw -- SoapWriter instance, add WS-Address header.
'''
for pyobj in self.header_pyobjs:
if hasattr(pyobj, 'typecode') is False:
raise RuntimeError, 'all header pyobjs must have a typecode attribute'
sw.serialize_header(pyobj, **kw) | sw -- SoapWriter instance, add WS-Address header. | Below is the the instruction that describes the task:
### Input:
sw -- SoapWriter instance, add WS-Address header.
### Response:
def serialize(self, sw, **kw):
'''
sw -- SoapWriter instance, add WS-Address header.
'''
for pyobj in self.header_pyobjs:
if hasattr(pyobj, 'typecode') is False:
raise RuntimeError, 'all header pyobjs must have a typecode attribute'
sw.serialize_header(pyobj, **kw) |
def flexifunction_read_req_encode(self, target_system, target_component, read_req_type, data_index):
'''
Reqest reading of flexifunction data
target_system : System ID (uint8_t)
target_component : Component ID (uint8_t)
read_req_type : Type of flexifunction data requested (int16_t)
data_index : index into data where needed (int16_t)
'''
return MAVLink_flexifunction_read_req_message(target_system, target_component, read_req_type, data_index) | Reqest reading of flexifunction data
target_system : System ID (uint8_t)
target_component : Component ID (uint8_t)
read_req_type : Type of flexifunction data requested (int16_t)
data_index : index into data where needed (int16_t) | Below is the the instruction that describes the task:
### Input:
Reqest reading of flexifunction data
target_system : System ID (uint8_t)
target_component : Component ID (uint8_t)
read_req_type : Type of flexifunction data requested (int16_t)
data_index : index into data where needed (int16_t)
### Response:
def flexifunction_read_req_encode(self, target_system, target_component, read_req_type, data_index):
'''
Reqest reading of flexifunction data
target_system : System ID (uint8_t)
target_component : Component ID (uint8_t)
read_req_type : Type of flexifunction data requested (int16_t)
data_index : index into data where needed (int16_t)
'''
return MAVLink_flexifunction_read_req_message(target_system, target_component, read_req_type, data_index) |
def load_gene_exp_to_df(inst_path):
'''
Loads gene expression data from 10x in sparse matrix format and returns a
Pandas dataframe
'''
import pandas as pd
from scipy import io
from scipy import sparse
from ast import literal_eval as make_tuple
# matrix
Matrix = io.mmread( inst_path + 'matrix.mtx')
mat = Matrix.todense()
# genes
filename = inst_path + 'genes.tsv'
f = open(filename, 'r')
lines = f.readlines()
f.close()
# # add unique id to all genes
# genes = []
# unique_id = 0
# for inst_line in lines:
# inst_line = inst_line.strip().split()
# if len(inst_line) > 1:
# inst_gene = inst_line[1]
# else:
# inst_gene = inst_line[0]
# genes.append(inst_gene + '_' + str(unique_id))
# unique_id = unique_id + 1
# add unique id only to duplicate genes
ini_genes = []
for inst_line in lines:
inst_line = inst_line.strip().split()
if len(inst_line) > 1:
inst_gene = inst_line[1]
else:
inst_gene = inst_line[0]
ini_genes.append(inst_gene)
gene_name_count = pd.Series(ini_genes).value_counts()
duplicate_genes = gene_name_count[gene_name_count > 1].index.tolist()
dup_index = {}
genes = []
for inst_row in ini_genes:
# add index to non-unique genes
if inst_row in duplicate_genes:
# calc_non-unque index
if inst_row not in dup_index:
dup_index[inst_row] = 1
else:
dup_index[inst_row] = dup_index[inst_row] + 1
new_row = inst_row + '_' + str(dup_index[inst_row])
else:
new_row = inst_row
genes.append(new_row)
# barcodes
filename = inst_path + 'barcodes.tsv'
f = open(filename, 'r')
lines = f.readlines()
f.close()
cell_barcodes = []
for inst_bc in lines:
inst_bc = inst_bc.strip().split('\t')
# remove dash from barcodes if necessary
if '-' in inst_bc[0]:
inst_bc[0] = inst_bc[0].split('-')[0]
cell_barcodes.append(inst_bc[0])
# parse tuples if necessary
try:
cell_barcodes = [make_tuple(x) for x in cell_barcodes]
except:
pass
try:
genes = [make_tuple(x) for x in genes]
except:
pass
# make dataframe
df = pd.DataFrame(mat, index=genes, columns=cell_barcodes)
return df | Loads gene expression data from 10x in sparse matrix format and returns a
Pandas dataframe | Below is the the instruction that describes the task:
### Input:
Loads gene expression data from 10x in sparse matrix format and returns a
Pandas dataframe
### Response:
def load_gene_exp_to_df(inst_path):
'''
Loads gene expression data from 10x in sparse matrix format and returns a
Pandas dataframe
'''
import pandas as pd
from scipy import io
from scipy import sparse
from ast import literal_eval as make_tuple
# matrix
Matrix = io.mmread( inst_path + 'matrix.mtx')
mat = Matrix.todense()
# genes
filename = inst_path + 'genes.tsv'
f = open(filename, 'r')
lines = f.readlines()
f.close()
# # add unique id to all genes
# genes = []
# unique_id = 0
# for inst_line in lines:
# inst_line = inst_line.strip().split()
# if len(inst_line) > 1:
# inst_gene = inst_line[1]
# else:
# inst_gene = inst_line[0]
# genes.append(inst_gene + '_' + str(unique_id))
# unique_id = unique_id + 1
# add unique id only to duplicate genes
ini_genes = []
for inst_line in lines:
inst_line = inst_line.strip().split()
if len(inst_line) > 1:
inst_gene = inst_line[1]
else:
inst_gene = inst_line[0]
ini_genes.append(inst_gene)
gene_name_count = pd.Series(ini_genes).value_counts()
duplicate_genes = gene_name_count[gene_name_count > 1].index.tolist()
dup_index = {}
genes = []
for inst_row in ini_genes:
# add index to non-unique genes
if inst_row in duplicate_genes:
# calc_non-unque index
if inst_row not in dup_index:
dup_index[inst_row] = 1
else:
dup_index[inst_row] = dup_index[inst_row] + 1
new_row = inst_row + '_' + str(dup_index[inst_row])
else:
new_row = inst_row
genes.append(new_row)
# barcodes
filename = inst_path + 'barcodes.tsv'
f = open(filename, 'r')
lines = f.readlines()
f.close()
cell_barcodes = []
for inst_bc in lines:
inst_bc = inst_bc.strip().split('\t')
# remove dash from barcodes if necessary
if '-' in inst_bc[0]:
inst_bc[0] = inst_bc[0].split('-')[0]
cell_barcodes.append(inst_bc[0])
# parse tuples if necessary
try:
cell_barcodes = [make_tuple(x) for x in cell_barcodes]
except:
pass
try:
genes = [make_tuple(x) for x in genes]
except:
pass
# make dataframe
df = pd.DataFrame(mat, index=genes, columns=cell_barcodes)
return df |
def is_tone(char, strict=True):
"""
Check whether the character is a tone or word accent symbol. In strict mode
return True only for the symbols listed in the last group of the chart. If
strict=False, also accept symbols that belong to the Modifier Tone Letters
Unicode block [1].
[1]: http://www.unicode.org/charts/PDF/UA700.pdf
"""
if char in chart.tones:
return True
if not strict:
return 0xA700 <= ord(char) <= 0xA71F
return False | Check whether the character is a tone or word accent symbol. In strict mode
return True only for the symbols listed in the last group of the chart. If
strict=False, also accept symbols that belong to the Modifier Tone Letters
Unicode block [1].
[1]: http://www.unicode.org/charts/PDF/UA700.pdf | Below is the the instruction that describes the task:
### Input:
Check whether the character is a tone or word accent symbol. In strict mode
return True only for the symbols listed in the last group of the chart. If
strict=False, also accept symbols that belong to the Modifier Tone Letters
Unicode block [1].
[1]: http://www.unicode.org/charts/PDF/UA700.pdf
### Response:
def is_tone(char, strict=True):
"""
Check whether the character is a tone or word accent symbol. In strict mode
return True only for the symbols listed in the last group of the chart. If
strict=False, also accept symbols that belong to the Modifier Tone Letters
Unicode block [1].
[1]: http://www.unicode.org/charts/PDF/UA700.pdf
"""
if char in chart.tones:
return True
if not strict:
return 0xA700 <= ord(char) <= 0xA71F
return False |
def rename(self, new_name):
"""
Renames this NIO
:param new_name: new NIO name
"""
yield from self._hypervisor.send("nio rename {name} {new_name}".format(name=self._name, new_name=new_name))
log.info("NIO {name} renamed to {new_name}".format(name=self._name, new_name=new_name))
self._name = new_name | Renames this NIO
:param new_name: new NIO name | Below is the the instruction that describes the task:
### Input:
Renames this NIO
:param new_name: new NIO name
### Response:
def rename(self, new_name):
"""
Renames this NIO
:param new_name: new NIO name
"""
yield from self._hypervisor.send("nio rename {name} {new_name}".format(name=self._name, new_name=new_name))
log.info("NIO {name} renamed to {new_name}".format(name=self._name, new_name=new_name))
self._name = new_name |
def _extract_word(self, source, position):
"""
Extracts the word that falls at or around a specific position
:param source:
The template source as a unicode string
:param position:
The position where the word falls
:return:
The word
"""
boundry = re.search('{{|{|\s|$', source[:position][::-1])
start_offset = boundry.end() if boundry.group(0).startswith('{') else boundry.start()
boundry = re.search('}}|}|\s|$', source[position:])
end_offset = boundry.end() if boundry.group(0).startswith('}') else boundry.start()
return source[position - start_offset:position + end_offset] | Extracts the word that falls at or around a specific position
:param source:
The template source as a unicode string
:param position:
The position where the word falls
:return:
The word | Below is the the instruction that describes the task:
### Input:
Extracts the word that falls at or around a specific position
:param source:
The template source as a unicode string
:param position:
The position where the word falls
:return:
The word
### Response:
def _extract_word(self, source, position):
"""
Extracts the word that falls at or around a specific position
:param source:
The template source as a unicode string
:param position:
The position where the word falls
:return:
The word
"""
boundry = re.search('{{|{|\s|$', source[:position][::-1])
start_offset = boundry.end() if boundry.group(0).startswith('{') else boundry.start()
boundry = re.search('}}|}|\s|$', source[position:])
end_offset = boundry.end() if boundry.group(0).startswith('}') else boundry.start()
return source[position - start_offset:position + end_offset] |
def visit(self, url=''):
"""Visit the url, checking for rr errors in the response
@param url: URL
@return: Visit result
"""
result = super(CoyoteDriver, self).visit(url)
source = self.page_source()
return result | Visit the url, checking for rr errors in the response
@param url: URL
@return: Visit result | Below is the the instruction that describes the task:
### Input:
Visit the url, checking for rr errors in the response
@param url: URL
@return: Visit result
### Response:
def visit(self, url=''):
"""Visit the url, checking for rr errors in the response
@param url: URL
@return: Visit result
"""
result = super(CoyoteDriver, self).visit(url)
source = self.page_source()
return result |
def find_cumulative_indices(list_of_numbers, search_sum):
"""
find_cumulative_indices([70, 58, 81, 909, 70, 215, 70, 1022, 580, 930, 898], 285) ->
[[4, 5],[5, 6]]
"""
u = 0
y = 0
result = []
for idx, val in enumerate(list_of_numbers):
y += list_of_numbers[idx]
while y >= search_sum:
if y == search_sum:
result.append(range(u, idx+1))
y -= list_of_numbers[u]
u += 1
# if matches are not found, empty string is returned
# for easier cell data handling on pandas dataframe
return result or '' | find_cumulative_indices([70, 58, 81, 909, 70, 215, 70, 1022, 580, 930, 898], 285) ->
[[4, 5],[5, 6]] | Below is the the instruction that describes the task:
### Input:
find_cumulative_indices([70, 58, 81, 909, 70, 215, 70, 1022, 580, 930, 898], 285) ->
[[4, 5],[5, 6]]
### Response:
def find_cumulative_indices(list_of_numbers, search_sum):
"""
find_cumulative_indices([70, 58, 81, 909, 70, 215, 70, 1022, 580, 930, 898], 285) ->
[[4, 5],[5, 6]]
"""
u = 0
y = 0
result = []
for idx, val in enumerate(list_of_numbers):
y += list_of_numbers[idx]
while y >= search_sum:
if y == search_sum:
result.append(range(u, idx+1))
y -= list_of_numbers[u]
u += 1
# if matches are not found, empty string is returned
# for easier cell data handling on pandas dataframe
return result or '' |
def MakeSelfExtractingZip(self, payload_data, output_path):
"""Repack the installer into the payload.
Args:
payload_data: data payload for zip file
output_path: filename for the zip output
Raises:
RuntimeError: if the ClientBuilder.unzipsfx_stub doesn't require admin.
Returns:
output_path: filename string of zip output file
"""
context = self.context + ["Client Context"]
src_zip = zipfile.ZipFile(io.BytesIO(payload_data), mode="r")
zip_data = io.BytesIO()
output_zip = zipfile.ZipFile(
zip_data, mode="w", compression=zipfile.ZIP_DEFLATED)
config_file_name = config.CONFIG.Get(
"ClientBuilder.config_filename", context=context)
# Copy the rest of the files from the package to the new zip.
for template_file in src_zip.namelist():
if template_file != config_file_name:
# Avoid writing the config file twice if we're repacking a binary that
# has already been run through deployment. We write it in the next step,
# so no need to copy over from the original here.
CopyFileInZip(src_zip, template_file, output_zip)
client_config_content = self.GetClientConfig(context)
output_zip.writestr(
config_file_name,
client_config_content.encode("utf-8"),
compress_type=zipfile.ZIP_STORED)
# The zip file comment is used by the self extractor to run the installation
# script. Comment has to be `bytes` object because `zipfile` module is not
# smart enough to properly handle `unicode` objects. We use the `encode`
# method instead of `SmartStr` because we expect this option to be an
# `unicode` object and in case it is not, we want it to blow up.
output_zip.comment = b"$AUTORUN$>%s" % config.CONFIG.Get(
"ClientBuilder.autorun_command_line", context=context).encode("utf-8")
output_zip.close()
utils.EnsureDirExists(os.path.dirname(output_path))
with open(output_path, "wb") as fd:
# First write the installer stub
stub_data = io.BytesIO()
unzipsfx_stub = config.CONFIG.Get(
"ClientBuilder.unzipsfx_stub", context=context)
stub_raw = open(unzipsfx_stub, "rb").read()
# Check stub has been compiled with the requireAdministrator manifest.
if b"level=\"requireAdministrator" not in stub_raw:
raise RuntimeError("Bad unzip binary in use. Not compiled with the"
"requireAdministrator manifest option.")
stub_data.write(stub_raw)
# If in verbose mode, modify the unzip bins PE header to run in console
# mode for easier debugging.
SetPeSubsystem(
stub_data,
console=config.CONFIG.Get("ClientBuilder.console", context=context))
# Now patch up the .rsrc section to contain the payload.
end_of_file = zip_data.tell() + stub_data.tell()
# This is the IMAGE_SECTION_HEADER.Name which is also the start of
# IMAGE_SECTION_HEADER.
offset_to_rsrc = stub_data.getvalue().find(b".rsrc")
# IMAGE_SECTION_HEADER.PointerToRawData is a 32 bit int.
stub_data.seek(offset_to_rsrc + 20)
start_of_rsrc_section = struct.unpack("<I", stub_data.read(4))[0]
# Adjust IMAGE_SECTION_HEADER.SizeOfRawData to span from the old start to
# the end of file.
stub_data.seek(offset_to_rsrc + 16)
stub_data.write(struct.pack("<I", end_of_file - start_of_rsrc_section))
# Concatenate stub and zip file.
out_data = io.BytesIO()
out_data.write(stub_data.getvalue())
out_data.write(zip_data.getvalue())
# Then write the actual output file.
fd.write(out_data.getvalue())
if self.signer:
self.signer.SignFile(output_path)
logging.info("Deployable binary generated at %s", output_path)
return output_path | Repack the installer into the payload.
Args:
payload_data: data payload for zip file
output_path: filename for the zip output
Raises:
RuntimeError: if the ClientBuilder.unzipsfx_stub doesn't require admin.
Returns:
output_path: filename string of zip output file | Below is the the instruction that describes the task:
### Input:
Repack the installer into the payload.
Args:
payload_data: data payload for zip file
output_path: filename for the zip output
Raises:
RuntimeError: if the ClientBuilder.unzipsfx_stub doesn't require admin.
Returns:
output_path: filename string of zip output file
### Response:
def MakeSelfExtractingZip(self, payload_data, output_path):
"""Repack the installer into the payload.
Args:
payload_data: data payload for zip file
output_path: filename for the zip output
Raises:
RuntimeError: if the ClientBuilder.unzipsfx_stub doesn't require admin.
Returns:
output_path: filename string of zip output file
"""
context = self.context + ["Client Context"]
src_zip = zipfile.ZipFile(io.BytesIO(payload_data), mode="r")
zip_data = io.BytesIO()
output_zip = zipfile.ZipFile(
zip_data, mode="w", compression=zipfile.ZIP_DEFLATED)
config_file_name = config.CONFIG.Get(
"ClientBuilder.config_filename", context=context)
# Copy the rest of the files from the package to the new zip.
for template_file in src_zip.namelist():
if template_file != config_file_name:
# Avoid writing the config file twice if we're repacking a binary that
# has already been run through deployment. We write it in the next step,
# so no need to copy over from the original here.
CopyFileInZip(src_zip, template_file, output_zip)
client_config_content = self.GetClientConfig(context)
output_zip.writestr(
config_file_name,
client_config_content.encode("utf-8"),
compress_type=zipfile.ZIP_STORED)
# The zip file comment is used by the self extractor to run the installation
# script. Comment has to be `bytes` object because `zipfile` module is not
# smart enough to properly handle `unicode` objects. We use the `encode`
# method instead of `SmartStr` because we expect this option to be an
# `unicode` object and in case it is not, we want it to blow up.
output_zip.comment = b"$AUTORUN$>%s" % config.CONFIG.Get(
"ClientBuilder.autorun_command_line", context=context).encode("utf-8")
output_zip.close()
utils.EnsureDirExists(os.path.dirname(output_path))
with open(output_path, "wb") as fd:
# First write the installer stub
stub_data = io.BytesIO()
unzipsfx_stub = config.CONFIG.Get(
"ClientBuilder.unzipsfx_stub", context=context)
stub_raw = open(unzipsfx_stub, "rb").read()
# Check stub has been compiled with the requireAdministrator manifest.
if b"level=\"requireAdministrator" not in stub_raw:
raise RuntimeError("Bad unzip binary in use. Not compiled with the"
"requireAdministrator manifest option.")
stub_data.write(stub_raw)
# If in verbose mode, modify the unzip bins PE header to run in console
# mode for easier debugging.
SetPeSubsystem(
stub_data,
console=config.CONFIG.Get("ClientBuilder.console", context=context))
# Now patch up the .rsrc section to contain the payload.
end_of_file = zip_data.tell() + stub_data.tell()
# This is the IMAGE_SECTION_HEADER.Name which is also the start of
# IMAGE_SECTION_HEADER.
offset_to_rsrc = stub_data.getvalue().find(b".rsrc")
# IMAGE_SECTION_HEADER.PointerToRawData is a 32 bit int.
stub_data.seek(offset_to_rsrc + 20)
start_of_rsrc_section = struct.unpack("<I", stub_data.read(4))[0]
# Adjust IMAGE_SECTION_HEADER.SizeOfRawData to span from the old start to
# the end of file.
stub_data.seek(offset_to_rsrc + 16)
stub_data.write(struct.pack("<I", end_of_file - start_of_rsrc_section))
# Concatenate stub and zip file.
out_data = io.BytesIO()
out_data.write(stub_data.getvalue())
out_data.write(zip_data.getvalue())
# Then write the actual output file.
fd.write(out_data.getvalue())
if self.signer:
self.signer.SignFile(output_path)
logging.info("Deployable binary generated at %s", output_path)
return output_path |
def get_obj_cols(df):
"""
Returns names of 'object' columns in the DataFrame.
"""
obj_cols = []
for idx, dt in enumerate(df.dtypes):
if dt == 'object' or is_category(dt):
obj_cols.append(df.columns.values[idx])
return obj_cols | Returns names of 'object' columns in the DataFrame. | Below is the the instruction that describes the task:
### Input:
Returns names of 'object' columns in the DataFrame.
### Response:
def get_obj_cols(df):
"""
Returns names of 'object' columns in the DataFrame.
"""
obj_cols = []
for idx, dt in enumerate(df.dtypes):
if dt == 'object' or is_category(dt):
obj_cols.append(df.columns.values[idx])
return obj_cols |
def rollforward(self, dt):
"""
Roll provided date forward to next offset only if not on offset.
"""
dt = as_timestamp(dt)
if not self.onOffset(dt):
dt = dt + self.__class__(1, normalize=self.normalize, **self.kwds)
return dt | Roll provided date forward to next offset only if not on offset. | Below is the the instruction that describes the task:
### Input:
Roll provided date forward to next offset only if not on offset.
### Response:
def rollforward(self, dt):
"""
Roll provided date forward to next offset only if not on offset.
"""
dt = as_timestamp(dt)
if not self.onOffset(dt):
dt = dt + self.__class__(1, normalize=self.normalize, **self.kwds)
return dt |
def _render(self):
"""
Render the text.
Avoid using this fonction too many time as it is slow as it is low to render text and blit it.
"""
self._last_text = self.text
self._surface = self.font.render(self.text, True, self.color, self.bg_color)
rect = self._surface.get_rect()
self.size = rect.size | Render the text.
Avoid using this fonction too many time as it is slow as it is low to render text and blit it. | Below is the the instruction that describes the task:
### Input:
Render the text.
Avoid using this fonction too many time as it is slow as it is low to render text and blit it.
### Response:
def _render(self):
"""
Render the text.
Avoid using this fonction too many time as it is slow as it is low to render text and blit it.
"""
self._last_text = self.text
self._surface = self.font.render(self.text, True, self.color, self.bg_color)
rect = self._surface.get_rect()
self.size = rect.size |
def config_get(args):
""" Retrieve a method config from a workspace, send stdout """
r = fapi.get_workspace_config(args.project, args.workspace,
args.namespace, args.config)
fapi._check_response_code(r, 200)
# Setting ensure_ascii to False ensures unicode string returns
return json.dumps(r.json(), indent=4, separators=(',', ': '),
sort_keys=True, ensure_ascii=False) | Retrieve a method config from a workspace, send stdout | Below is the the instruction that describes the task:
### Input:
Retrieve a method config from a workspace, send stdout
### Response:
def config_get(args):
""" Retrieve a method config from a workspace, send stdout """
r = fapi.get_workspace_config(args.project, args.workspace,
args.namespace, args.config)
fapi._check_response_code(r, 200)
# Setting ensure_ascii to False ensures unicode string returns
return json.dumps(r.json(), indent=4, separators=(',', ': '),
sort_keys=True, ensure_ascii=False) |
def trace_requirements(requirements):
"""given an iterable of pip InstallRequirements,
return the set of required packages, given their transitive requirements.
"""
requirements = tuple(pretty_req(r) for r in requirements)
working_set = fresh_working_set()
# breadth-first traversal:
from collections import deque
queue = deque(requirements)
queued = {_package_req_to_pkg_resources_req(req.req) for req in queue}
errors = []
result = []
while queue:
req = queue.popleft()
logger.debug('tracing: %s', req)
try:
dist = working_set.find_normalized(_package_req_to_pkg_resources_req(req.req))
except pkg_resources.VersionConflict as conflict:
dist = conflict.args[0]
errors.append('Error: version conflict: {} ({}) <-> {}'.format(
dist, timid_relpath(dist.location), req
))
assert dist is not None, 'Should be unreachable in pip8+'
result.append(dist_to_req(dist))
# TODO: pip does no validation of extras. should we?
extras = [extra for extra in req.extras if extra in dist.extras]
for sub_req in sorted(dist.requires(extras=extras), key=lambda req: req.key):
sub_req = InstallRequirement(sub_req, req)
if req_cycle(sub_req):
logger.warning('Circular dependency! %s', sub_req)
continue
elif sub_req.req in queued:
logger.debug('already queued: %s', sub_req)
continue
else:
logger.debug('adding sub-requirement %s', sub_req)
queue.append(sub_req)
queued.add(sub_req.req)
if errors:
raise InstallationError('\n'.join(errors))
return result | given an iterable of pip InstallRequirements,
return the set of required packages, given their transitive requirements. | Below is the the instruction that describes the task:
### Input:
given an iterable of pip InstallRequirements,
return the set of required packages, given their transitive requirements.
### Response:
def trace_requirements(requirements):
"""given an iterable of pip InstallRequirements,
return the set of required packages, given their transitive requirements.
"""
requirements = tuple(pretty_req(r) for r in requirements)
working_set = fresh_working_set()
# breadth-first traversal:
from collections import deque
queue = deque(requirements)
queued = {_package_req_to_pkg_resources_req(req.req) for req in queue}
errors = []
result = []
while queue:
req = queue.popleft()
logger.debug('tracing: %s', req)
try:
dist = working_set.find_normalized(_package_req_to_pkg_resources_req(req.req))
except pkg_resources.VersionConflict as conflict:
dist = conflict.args[0]
errors.append('Error: version conflict: {} ({}) <-> {}'.format(
dist, timid_relpath(dist.location), req
))
assert dist is not None, 'Should be unreachable in pip8+'
result.append(dist_to_req(dist))
# TODO: pip does no validation of extras. should we?
extras = [extra for extra in req.extras if extra in dist.extras]
for sub_req in sorted(dist.requires(extras=extras), key=lambda req: req.key):
sub_req = InstallRequirement(sub_req, req)
if req_cycle(sub_req):
logger.warning('Circular dependency! %s', sub_req)
continue
elif sub_req.req in queued:
logger.debug('already queued: %s', sub_req)
continue
else:
logger.debug('adding sub-requirement %s', sub_req)
queue.append(sub_req)
queued.add(sub_req.req)
if errors:
raise InstallationError('\n'.join(errors))
return result |
def keep_segments(self, segments_to_keep, preserve_segmentation=True):
'''
Keep the faces and vertices for given segments, discarding all others.
When preserve_segmentation is false self.segm is discarded for speed.
'''
v_ind, f_ind = self.vertex_indices_in_segments(segments_to_keep, ret_face_indices=True)
self.segm = {name: self.segm[name] for name in segments_to_keep}
if not preserve_segmentation:
self.segm = None
self.f = self.f[f_ind]
if self.ft is not None:
self.ft = self.ft[f_ind]
self.keep_vertices(v_ind) | Keep the faces and vertices for given segments, discarding all others.
When preserve_segmentation is false self.segm is discarded for speed. | Below is the the instruction that describes the task:
### Input:
Keep the faces and vertices for given segments, discarding all others.
When preserve_segmentation is false self.segm is discarded for speed.
### Response:
def keep_segments(self, segments_to_keep, preserve_segmentation=True):
'''
Keep the faces and vertices for given segments, discarding all others.
When preserve_segmentation is false self.segm is discarded for speed.
'''
v_ind, f_ind = self.vertex_indices_in_segments(segments_to_keep, ret_face_indices=True)
self.segm = {name: self.segm[name] for name in segments_to_keep}
if not preserve_segmentation:
self.segm = None
self.f = self.f[f_ind]
if self.ft is not None:
self.ft = self.ft[f_ind]
self.keep_vertices(v_ind) |
def to_graphml(graph: BELGraph, path: Union[str, BinaryIO]) -> None:
"""Write this graph to GraphML XML file using :func:`networkx.write_graphml`.
The .graphml file extension is suggested so Cytoscape can recognize it.
"""
rv = nx.MultiDiGraph()
for node in graph:
rv.add_node(node.as_bel(), function=node.function)
for u, v, key, edge_data in graph.edges(data=True, keys=True):
rv.add_edge(
u.as_bel(),
v.as_bel(),
interaction=edge_data[RELATION],
bel=graph.edge_to_bel(u, v, edge_data),
key=key,
)
nx.write_graphml(rv, path) | Write this graph to GraphML XML file using :func:`networkx.write_graphml`.
The .graphml file extension is suggested so Cytoscape can recognize it. | Below is the the instruction that describes the task:
### Input:
Write this graph to GraphML XML file using :func:`networkx.write_graphml`.
The .graphml file extension is suggested so Cytoscape can recognize it.
### Response:
def to_graphml(graph: BELGraph, path: Union[str, BinaryIO]) -> None:
"""Write this graph to GraphML XML file using :func:`networkx.write_graphml`.
The .graphml file extension is suggested so Cytoscape can recognize it.
"""
rv = nx.MultiDiGraph()
for node in graph:
rv.add_node(node.as_bel(), function=node.function)
for u, v, key, edge_data in graph.edges(data=True, keys=True):
rv.add_edge(
u.as_bel(),
v.as_bel(),
interaction=edge_data[RELATION],
bel=graph.edge_to_bel(u, v, edge_data),
key=key,
)
nx.write_graphml(rv, path) |
def set_vm_status(self, device='FLOPPY',
boot_option='BOOT_ONCE', write_protect='YES'):
"""Sets the Virtual Media drive status and allows the
boot options for booting from the virtual media.
"""
return self._call_method('set_vm_status', device, boot_option,
write_protect) | Sets the Virtual Media drive status and allows the
boot options for booting from the virtual media. | Below is the the instruction that describes the task:
### Input:
Sets the Virtual Media drive status and allows the
boot options for booting from the virtual media.
### Response:
def set_vm_status(self, device='FLOPPY',
boot_option='BOOT_ONCE', write_protect='YES'):
"""Sets the Virtual Media drive status and allows the
boot options for booting from the virtual media.
"""
return self._call_method('set_vm_status', device, boot_option,
write_protect) |
def bulk_write(self, requests, **kwargs):
"""
See http://api.mongodb.com/python/current/api/pymongo/collection.html#pymongo.collection.Collection.bulk_write
Warning: this is wrapped in mongo_retry, and is therefore potentially unsafe if the write you want to execute
isn't idempotent.
"""
self._arctic_lib.check_quota()
return self._collection.bulk_write(requests, **kwargs) | See http://api.mongodb.com/python/current/api/pymongo/collection.html#pymongo.collection.Collection.bulk_write
Warning: this is wrapped in mongo_retry, and is therefore potentially unsafe if the write you want to execute
isn't idempotent. | Below is the the instruction that describes the task:
### Input:
See http://api.mongodb.com/python/current/api/pymongo/collection.html#pymongo.collection.Collection.bulk_write
Warning: this is wrapped in mongo_retry, and is therefore potentially unsafe if the write you want to execute
isn't idempotent.
### Response:
def bulk_write(self, requests, **kwargs):
"""
See http://api.mongodb.com/python/current/api/pymongo/collection.html#pymongo.collection.Collection.bulk_write
Warning: this is wrapped in mongo_retry, and is therefore potentially unsafe if the write you want to execute
isn't idempotent.
"""
self._arctic_lib.check_quota()
return self._collection.bulk_write(requests, **kwargs) |
def whois(self, record):
"""
Logs the WHOIS record if needed.
:param record: The record to log.
:type record: str
"""
if PyFunceble.CONFIGURATION["debug"] and PyFunceble.CONFIGURATION["logs"]:
# The debug and the logs subsystem are activated.
if PyFunceble.INTERN["referer"]:
referer = PyFunceble.INTERN["referer"]
else:
referer = None
to_write = {
self.current_time: {
"domain": PyFunceble.INTERN["to_test"],
"record": record,
"referer": referer,
}
}
if self.output:
output = self.output
else:
output = PyFunceble.OUTPUT_DIRECTORY
output += PyFunceble.OUTPUTS["parent_directory"]
output += PyFunceble.OUTPUTS["logs"]["directories"]["parent"]
output += PyFunceble.OUTPUTS["logs"]["filenames"]["whois"]
current_content = self._get_content(output)
current_content.update(to_write)
self._write_content(current_content, output) | Logs the WHOIS record if needed.
:param record: The record to log.
:type record: str | Below is the the instruction that describes the task:
### Input:
Logs the WHOIS record if needed.
:param record: The record to log.
:type record: str
### Response:
def whois(self, record):
"""
Logs the WHOIS record if needed.
:param record: The record to log.
:type record: str
"""
if PyFunceble.CONFIGURATION["debug"] and PyFunceble.CONFIGURATION["logs"]:
# The debug and the logs subsystem are activated.
if PyFunceble.INTERN["referer"]:
referer = PyFunceble.INTERN["referer"]
else:
referer = None
to_write = {
self.current_time: {
"domain": PyFunceble.INTERN["to_test"],
"record": record,
"referer": referer,
}
}
if self.output:
output = self.output
else:
output = PyFunceble.OUTPUT_DIRECTORY
output += PyFunceble.OUTPUTS["parent_directory"]
output += PyFunceble.OUTPUTS["logs"]["directories"]["parent"]
output += PyFunceble.OUTPUTS["logs"]["filenames"]["whois"]
current_content = self._get_content(output)
current_content.update(to_write)
self._write_content(current_content, output) |
def request(self, url, post=None, method="GET"):
""" Make the request"""
dsid = self.get_dsid()
baseurl = "https://auth.api.swedbank.se/TDE_DAP_Portal_REST_WEB/api/v1/%s?dsid=%s" % (
url, dsid)
if self.pch is None:
self.pch = build_opener(HTTPCookieProcessor(self.cj))
if post:
post = bytearray(post, "utf-8")
request = Request(baseurl, data=post)
request.add_header("Content-Type", "application/json")
else:
request = Request(baseurl)
request.add_header("User-Agent", self.useragent)
request.add_header("Authorization", self.get_authkey())
request.add_header("Accept", "*/*")
request.add_header("Accept-Language", "sv-se")
request.add_header("Connection", "keep-alive")
request.add_header("Proxy-Connection", "keep-alive")
self.cj.set_cookie(
Cookie(version=0, name='dsid', value=dsid, port=None,
port_specified=False, domain='.api.swedbank.se',
domain_specified=False, domain_initial_dot=False,
path='/',
path_specified=True, secure=False, expires=None,
discard=True, comment=None, comment_url=None,
rest={'HttpsOnly': None}, rfc2109=False))
request.get_method = lambda: method
tmp = self.pch.open(request)
self.data = tmp.read().decode("utf8") | Make the request | Below is the the instruction that describes the task:
### Input:
Make the request
### Response:
def request(self, url, post=None, method="GET"):
""" Make the request"""
dsid = self.get_dsid()
baseurl = "https://auth.api.swedbank.se/TDE_DAP_Portal_REST_WEB/api/v1/%s?dsid=%s" % (
url, dsid)
if self.pch is None:
self.pch = build_opener(HTTPCookieProcessor(self.cj))
if post:
post = bytearray(post, "utf-8")
request = Request(baseurl, data=post)
request.add_header("Content-Type", "application/json")
else:
request = Request(baseurl)
request.add_header("User-Agent", self.useragent)
request.add_header("Authorization", self.get_authkey())
request.add_header("Accept", "*/*")
request.add_header("Accept-Language", "sv-se")
request.add_header("Connection", "keep-alive")
request.add_header("Proxy-Connection", "keep-alive")
self.cj.set_cookie(
Cookie(version=0, name='dsid', value=dsid, port=None,
port_specified=False, domain='.api.swedbank.se',
domain_specified=False, domain_initial_dot=False,
path='/',
path_specified=True, secure=False, expires=None,
discard=True, comment=None, comment_url=None,
rest={'HttpsOnly': None}, rfc2109=False))
request.get_method = lambda: method
tmp = self.pch.open(request)
self.data = tmp.read().decode("utf8") |
def pyquil_to_image(program: pyquil.Program) -> PIL.Image: # pragma: no cover
"""Returns an image of a pyquil circuit.
See circuit_to_latex() for more details.
"""
circ = pyquil_to_circuit(program)
latex = circuit_to_latex(circ)
img = render_latex(latex)
return img | Returns an image of a pyquil circuit.
See circuit_to_latex() for more details. | Below is the the instruction that describes the task:
### Input:
Returns an image of a pyquil circuit.
See circuit_to_latex() for more details.
### Response:
def pyquil_to_image(program: pyquil.Program) -> PIL.Image: # pragma: no cover
"""Returns an image of a pyquil circuit.
See circuit_to_latex() for more details.
"""
circ = pyquil_to_circuit(program)
latex = circuit_to_latex(circ)
img = render_latex(latex)
return img |
def check_exists(name, path):
'''
Check if the given path is an alternative for a name.
.. versionadded:: 2015.8.4
CLI Example:
.. code-block:: bash
salt '*' alternatives.check_exists name path
'''
cmd = [_get_cmd(), '--display', name]
out = __salt__['cmd.run_all'](cmd, python_shell=False)
if out['retcode'] > 0 and out['stderr'] != '':
return False
return any((line.startswith(path) for line in out['stdout'].splitlines())) | Check if the given path is an alternative for a name.
.. versionadded:: 2015.8.4
CLI Example:
.. code-block:: bash
salt '*' alternatives.check_exists name path | Below is the the instruction that describes the task:
### Input:
Check if the given path is an alternative for a name.
.. versionadded:: 2015.8.4
CLI Example:
.. code-block:: bash
salt '*' alternatives.check_exists name path
### Response:
def check_exists(name, path):
'''
Check if the given path is an alternative for a name.
.. versionadded:: 2015.8.4
CLI Example:
.. code-block:: bash
salt '*' alternatives.check_exists name path
'''
cmd = [_get_cmd(), '--display', name]
out = __salt__['cmd.run_all'](cmd, python_shell=False)
if out['retcode'] > 0 and out['stderr'] != '':
return False
return any((line.startswith(path) for line in out['stdout'].splitlines())) |
def get_templates(self):
"""
Return a list of template_dicts based on the config.yaml in
$self.base_path. Keys correspond to templates and values represent
further settings regarding each template. A pystache object containing
the parsed corresponding mustache file is added to the sub-dictionary.
"""
config_path = rel_to_cwd(self.base_path, 'templates', 'config.yaml')
templates = get_yaml_dict(config_path)
for temp, sub in templates.items():
mustache_path = os.path.join(get_parent_dir(config_path),
'{}.mustache'.format(temp))
sub['parsed'] = get_pystache_parsed(mustache_path)
return templates | Return a list of template_dicts based on the config.yaml in
$self.base_path. Keys correspond to templates and values represent
further settings regarding each template. A pystache object containing
the parsed corresponding mustache file is added to the sub-dictionary. | Below is the the instruction that describes the task:
### Input:
Return a list of template_dicts based on the config.yaml in
$self.base_path. Keys correspond to templates and values represent
further settings regarding each template. A pystache object containing
the parsed corresponding mustache file is added to the sub-dictionary.
### Response:
def get_templates(self):
"""
Return a list of template_dicts based on the config.yaml in
$self.base_path. Keys correspond to templates and values represent
further settings regarding each template. A pystache object containing
the parsed corresponding mustache file is added to the sub-dictionary.
"""
config_path = rel_to_cwd(self.base_path, 'templates', 'config.yaml')
templates = get_yaml_dict(config_path)
for temp, sub in templates.items():
mustache_path = os.path.join(get_parent_dir(config_path),
'{}.mustache'.format(temp))
sub['parsed'] = get_pystache_parsed(mustache_path)
return templates |
def select(self):
'''If supported, select this frame and return True; return False if unsupported
Not all builds have a gdb.Frame.select method; seems to be present on Fedora 12
onwards, but absent on Ubuntu buildbot'''
if not hasattr(self._gdbframe, 'select'):
print ('Unable to select frame: '
'this build of gdb does not expose a gdb.Frame.select method')
return False
self._gdbframe.select()
return True | If supported, select this frame and return True; return False if unsupported
Not all builds have a gdb.Frame.select method; seems to be present on Fedora 12
onwards, but absent on Ubuntu buildbot | Below is the the instruction that describes the task:
### Input:
If supported, select this frame and return True; return False if unsupported
Not all builds have a gdb.Frame.select method; seems to be present on Fedora 12
onwards, but absent on Ubuntu buildbot
### Response:
def select(self):
'''If supported, select this frame and return True; return False if unsupported
Not all builds have a gdb.Frame.select method; seems to be present on Fedora 12
onwards, but absent on Ubuntu buildbot'''
if not hasattr(self._gdbframe, 'select'):
print ('Unable to select frame: '
'this build of gdb does not expose a gdb.Frame.select method')
return False
self._gdbframe.select()
return True |
def describe_training_job_with_log(self, job_name, positions, stream_names,
instance_count, state, last_description,
last_describe_job_call):
"""
Return the training job info associated with job_name and print CloudWatch logs
"""
log_group = '/aws/sagemaker/TrainingJobs'
if len(stream_names) < instance_count:
# Log streams are created whenever a container starts writing to stdout/err, so this list
# may be dynamic until we have a stream for every instance.
logs_conn = self.get_log_conn()
try:
streams = logs_conn.describe_log_streams(
logGroupName=log_group,
logStreamNamePrefix=job_name + '/',
orderBy='LogStreamName',
limit=instance_count
)
stream_names = [s['logStreamName'] for s in streams['logStreams']]
positions.update([(s, Position(timestamp=0, skip=0))
for s in stream_names if s not in positions])
except logs_conn.exceptions.ResourceNotFoundException:
# On the very first training job run on an account, there's no log group until
# the container starts logging, so ignore any errors thrown about that
pass
if len(stream_names) > 0:
for idx, event in self.multi_stream_iter(log_group, stream_names, positions):
self.log.info(event['message'])
ts, count = positions[stream_names[idx]]
if event['timestamp'] == ts:
positions[stream_names[idx]] = Position(timestamp=ts, skip=count + 1)
else:
positions[stream_names[idx]] = Position(timestamp=event['timestamp'], skip=1)
if state == LogState.COMPLETE:
return state, last_description, last_describe_job_call
if state == LogState.JOB_COMPLETE:
state = LogState.COMPLETE
elif time.time() - last_describe_job_call >= 30:
description = self.describe_training_job(job_name)
last_describe_job_call = time.time()
if secondary_training_status_changed(description, last_description):
self.log.info(secondary_training_status_message(description, last_description))
last_description = description
status = description['TrainingJobStatus']
if status not in self.non_terminal_states:
state = LogState.JOB_COMPLETE
return state, last_description, last_describe_job_call | Return the training job info associated with job_name and print CloudWatch logs | Below is the the instruction that describes the task:
### Input:
Return the training job info associated with job_name and print CloudWatch logs
### Response:
def describe_training_job_with_log(self, job_name, positions, stream_names,
instance_count, state, last_description,
last_describe_job_call):
"""
Return the training job info associated with job_name and print CloudWatch logs
"""
log_group = '/aws/sagemaker/TrainingJobs'
if len(stream_names) < instance_count:
# Log streams are created whenever a container starts writing to stdout/err, so this list
# may be dynamic until we have a stream for every instance.
logs_conn = self.get_log_conn()
try:
streams = logs_conn.describe_log_streams(
logGroupName=log_group,
logStreamNamePrefix=job_name + '/',
orderBy='LogStreamName',
limit=instance_count
)
stream_names = [s['logStreamName'] for s in streams['logStreams']]
positions.update([(s, Position(timestamp=0, skip=0))
for s in stream_names if s not in positions])
except logs_conn.exceptions.ResourceNotFoundException:
# On the very first training job run on an account, there's no log group until
# the container starts logging, so ignore any errors thrown about that
pass
if len(stream_names) > 0:
for idx, event in self.multi_stream_iter(log_group, stream_names, positions):
self.log.info(event['message'])
ts, count = positions[stream_names[idx]]
if event['timestamp'] == ts:
positions[stream_names[idx]] = Position(timestamp=ts, skip=count + 1)
else:
positions[stream_names[idx]] = Position(timestamp=event['timestamp'], skip=1)
if state == LogState.COMPLETE:
return state, last_description, last_describe_job_call
if state == LogState.JOB_COMPLETE:
state = LogState.COMPLETE
elif time.time() - last_describe_job_call >= 30:
description = self.describe_training_job(job_name)
last_describe_job_call = time.time()
if secondary_training_status_changed(description, last_description):
self.log.info(secondary_training_status_message(description, last_description))
last_description = description
status = description['TrainingJobStatus']
if status not in self.non_terminal_states:
state = LogState.JOB_COMPLETE
return state, last_description, last_describe_job_call |
def sls(mods, saltenv='base', test=None, exclude=None, **kwargs):
'''
Create the seed file for a state.sls run
'''
st_kwargs = __salt__.kwargs
__opts__['grains'] = __grains__
__pillar__.update(kwargs.get('pillar', {}))
opts = salt.utils.state.get_sls_opts(__opts__, **kwargs)
st_ = salt.client.ssh.state.SSHHighState(
opts,
__pillar__,
__salt__,
__context__['fileclient'])
st_.push_active()
mods = _parse_mods(mods)
high_data, errors = st_.render_highstate({saltenv: mods})
if exclude:
if isinstance(exclude, six.string_types):
exclude = exclude.split(',')
if '__exclude__' in high_data:
high_data['__exclude__'].extend(exclude)
else:
high_data['__exclude__'] = exclude
high_data, ext_errors = st_.state.reconcile_extend(high_data)
errors += ext_errors
errors += st_.state.verify_high(high_data)
if errors:
return errors
high_data, req_in_errors = st_.state.requisite_in(high_data)
errors += req_in_errors
high_data = st_.state.apply_exclude(high_data)
# Verify that the high data is structurally sound
if errors:
return errors
# Compile and verify the raw chunks
chunks = st_.state.compile_high_data(high_data)
file_refs = salt.client.ssh.state.lowstate_file_refs(
chunks,
_merge_extra_filerefs(
kwargs.get('extra_filerefs', ''),
opts.get('extra_filerefs', '')
)
)
roster = salt.roster.Roster(opts, opts.get('roster', 'flat'))
roster_grains = roster.opts['grains']
# Create the tar containing the state pkg and relevant files.
_cleanup_slsmod_low_data(chunks)
trans_tar = salt.client.ssh.state.prep_trans_tar(
__context__['fileclient'],
chunks,
file_refs,
__pillar__,
st_kwargs['id_'],
roster_grains)
trans_tar_sum = salt.utils.hashutils.get_hash(trans_tar, opts['hash_type'])
cmd = 'state.pkg {0}/salt_state.tgz test={1} pkg_sum={2} hash_type={3}'.format(
opts['thin_dir'],
test,
trans_tar_sum,
opts['hash_type'])
single = salt.client.ssh.Single(
opts,
cmd,
fsclient=__context__['fileclient'],
minion_opts=__salt__.minion_opts,
**st_kwargs)
single.shell.send(
trans_tar,
'{0}/salt_state.tgz'.format(opts['thin_dir']))
stdout, stderr, _ = single.cmd_block()
# Clean up our tar
try:
os.remove(trans_tar)
except (OSError, IOError):
pass
# Read in the JSON data and return the data structure
try:
return salt.utils.json.loads(stdout)
except Exception as e:
log.error("JSON Render failed for: %s\n%s", stdout, stderr)
log.error(six.text_type(e))
# If for some reason the json load fails, return the stdout
return stdout | Create the seed file for a state.sls run | Below is the the instruction that describes the task:
### Input:
Create the seed file for a state.sls run
### Response:
def sls(mods, saltenv='base', test=None, exclude=None, **kwargs):
'''
Create the seed file for a state.sls run
'''
st_kwargs = __salt__.kwargs
__opts__['grains'] = __grains__
__pillar__.update(kwargs.get('pillar', {}))
opts = salt.utils.state.get_sls_opts(__opts__, **kwargs)
st_ = salt.client.ssh.state.SSHHighState(
opts,
__pillar__,
__salt__,
__context__['fileclient'])
st_.push_active()
mods = _parse_mods(mods)
high_data, errors = st_.render_highstate({saltenv: mods})
if exclude:
if isinstance(exclude, six.string_types):
exclude = exclude.split(',')
if '__exclude__' in high_data:
high_data['__exclude__'].extend(exclude)
else:
high_data['__exclude__'] = exclude
high_data, ext_errors = st_.state.reconcile_extend(high_data)
errors += ext_errors
errors += st_.state.verify_high(high_data)
if errors:
return errors
high_data, req_in_errors = st_.state.requisite_in(high_data)
errors += req_in_errors
high_data = st_.state.apply_exclude(high_data)
# Verify that the high data is structurally sound
if errors:
return errors
# Compile and verify the raw chunks
chunks = st_.state.compile_high_data(high_data)
file_refs = salt.client.ssh.state.lowstate_file_refs(
chunks,
_merge_extra_filerefs(
kwargs.get('extra_filerefs', ''),
opts.get('extra_filerefs', '')
)
)
roster = salt.roster.Roster(opts, opts.get('roster', 'flat'))
roster_grains = roster.opts['grains']
# Create the tar containing the state pkg and relevant files.
_cleanup_slsmod_low_data(chunks)
trans_tar = salt.client.ssh.state.prep_trans_tar(
__context__['fileclient'],
chunks,
file_refs,
__pillar__,
st_kwargs['id_'],
roster_grains)
trans_tar_sum = salt.utils.hashutils.get_hash(trans_tar, opts['hash_type'])
cmd = 'state.pkg {0}/salt_state.tgz test={1} pkg_sum={2} hash_type={3}'.format(
opts['thin_dir'],
test,
trans_tar_sum,
opts['hash_type'])
single = salt.client.ssh.Single(
opts,
cmd,
fsclient=__context__['fileclient'],
minion_opts=__salt__.minion_opts,
**st_kwargs)
single.shell.send(
trans_tar,
'{0}/salt_state.tgz'.format(opts['thin_dir']))
stdout, stderr, _ = single.cmd_block()
# Clean up our tar
try:
os.remove(trans_tar)
except (OSError, IOError):
pass
# Read in the JSON data and return the data structure
try:
return salt.utils.json.loads(stdout)
except Exception as e:
log.error("JSON Render failed for: %s\n%s", stdout, stderr)
log.error(six.text_type(e))
# If for some reason the json load fails, return the stdout
return stdout |
def master_open():
"""master_open() -> (master_fd, slave_name)
Open a pty master and return the fd, and the filename of the slave end.
Deprecated, use openpty() instead."""
try:
master_fd, slave_fd = os.openpty()
except (AttributeError, OSError):
pass
else:
slave_name = os.ttyname(slave_fd)
os.close(slave_fd)
return master_fd, slave_name
return _open_terminal() | master_open() -> (master_fd, slave_name)
Open a pty master and return the fd, and the filename of the slave end.
Deprecated, use openpty() instead. | Below is the the instruction that describes the task:
### Input:
master_open() -> (master_fd, slave_name)
Open a pty master and return the fd, and the filename of the slave end.
Deprecated, use openpty() instead.
### Response:
def master_open():
"""master_open() -> (master_fd, slave_name)
Open a pty master and return the fd, and the filename of the slave end.
Deprecated, use openpty() instead."""
try:
master_fd, slave_fd = os.openpty()
except (AttributeError, OSError):
pass
else:
slave_name = os.ttyname(slave_fd)
os.close(slave_fd)
return master_fd, slave_name
return _open_terminal() |
def binary(self, new_binary):
"""Sets location of the browser binary, either by string or
``FirefoxBinary`` instance.
"""
if not isinstance(new_binary, FirefoxBinary):
new_binary = FirefoxBinary(new_binary)
self._binary = new_binary | Sets location of the browser binary, either by string or
``FirefoxBinary`` instance. | Below is the the instruction that describes the task:
### Input:
Sets location of the browser binary, either by string or
``FirefoxBinary`` instance.
### Response:
def binary(self, new_binary):
"""Sets location of the browser binary, either by string or
``FirefoxBinary`` instance.
"""
if not isinstance(new_binary, FirefoxBinary):
new_binary = FirefoxBinary(new_binary)
self._binary = new_binary |
def show_description(self):
""" Prints the formatted response for the matching return type """
def printit(c, v):
underline = "-" * len(dr.get_name(c))
resp = self.responses[v["type"]]
name = "%s[%s] %s%s" % (resp.color, resp.label, dr.get_name(c), Style.RESET_ALL)
print(name, file=self.stream)
print(underline, file=self.stream)
print(render(c, v), file=self.stream)
print(file=self.stream)
for c in sorted(self.broker.get_by_type(rule), key=dr.get_name):
v = self.broker[c]
_type = v.get('type')
if _type in self.responses:
self.counts[_type] += 1
if (_type and ((self.fail_only and _type == 'rule') or
((self.missing and _type == 'skip') or
(not self.fail_only and _type != 'skip')))):
printit(c, v)
print(file=self.stream)
self.print_header("Rule Execution Summary", Fore.CYAN)
for c in self.counts:
print(self.responses[c].color + self.responses[c].title + str(self.counts[c]) + Style.RESET_ALL, file=self.stream) | Prints the formatted response for the matching return type | Below is the the instruction that describes the task:
### Input:
Prints the formatted response for the matching return type
### Response:
def show_description(self):
""" Prints the formatted response for the matching return type """
def printit(c, v):
underline = "-" * len(dr.get_name(c))
resp = self.responses[v["type"]]
name = "%s[%s] %s%s" % (resp.color, resp.label, dr.get_name(c), Style.RESET_ALL)
print(name, file=self.stream)
print(underline, file=self.stream)
print(render(c, v), file=self.stream)
print(file=self.stream)
for c in sorted(self.broker.get_by_type(rule), key=dr.get_name):
v = self.broker[c]
_type = v.get('type')
if _type in self.responses:
self.counts[_type] += 1
if (_type and ((self.fail_only and _type == 'rule') or
((self.missing and _type == 'skip') or
(not self.fail_only and _type != 'skip')))):
printit(c, v)
print(file=self.stream)
self.print_header("Rule Execution Summary", Fore.CYAN)
for c in self.counts:
print(self.responses[c].color + self.responses[c].title + str(self.counts[c]) + Style.RESET_ALL, file=self.stream) |
def cmd_startstop(options):
"""Start or Stop the specified instance.
Finds instances that match args and instance-state expected by the
command. Then, the target instance is determined, the action is
performed on the instance, and the eturn information is displayed.
Args:
options (object): contains args and data from parser.
"""
statelu = {"start": "stopped", "stop": "running"}
options.inst_state = statelu[options.command]
debg.dprint("toggle set state: ", options.inst_state)
(i_info, param_str) = gather_data(options)
(tar_inst, tar_idx) = determine_inst(i_info, param_str, options.command)
response = awsc.startstop(tar_inst, options.command)
responselu = {"start": "StartingInstances", "stop": "StoppingInstances"}
filt = responselu[options.command]
resp = {}
state_term = ('CurrentState', 'PreviousState')
for i, j in enumerate(state_term):
resp[i] = response["{0}".format(filt)][0]["{0}".format(j)]['Name']
print("Current State: {}{}{} - Previous State: {}{}{}\n".
format(C_STAT[resp[0]], resp[0], C_NORM,
C_STAT[resp[1]], resp[1], C_NORM)) | Start or Stop the specified instance.
Finds instances that match args and instance-state expected by the
command. Then, the target instance is determined, the action is
performed on the instance, and the eturn information is displayed.
Args:
options (object): contains args and data from parser. | Below is the the instruction that describes the task:
### Input:
Start or Stop the specified instance.
Finds instances that match args and instance-state expected by the
command. Then, the target instance is determined, the action is
performed on the instance, and the eturn information is displayed.
Args:
options (object): contains args and data from parser.
### Response:
def cmd_startstop(options):
"""Start or Stop the specified instance.
Finds instances that match args and instance-state expected by the
command. Then, the target instance is determined, the action is
performed on the instance, and the eturn information is displayed.
Args:
options (object): contains args and data from parser.
"""
statelu = {"start": "stopped", "stop": "running"}
options.inst_state = statelu[options.command]
debg.dprint("toggle set state: ", options.inst_state)
(i_info, param_str) = gather_data(options)
(tar_inst, tar_idx) = determine_inst(i_info, param_str, options.command)
response = awsc.startstop(tar_inst, options.command)
responselu = {"start": "StartingInstances", "stop": "StoppingInstances"}
filt = responselu[options.command]
resp = {}
state_term = ('CurrentState', 'PreviousState')
for i, j in enumerate(state_term):
resp[i] = response["{0}".format(filt)][0]["{0}".format(j)]['Name']
print("Current State: {}{}{} - Previous State: {}{}{}\n".
format(C_STAT[resp[0]], resp[0], C_NORM,
C_STAT[resp[1]], resp[1], C_NORM)) |
def __fetch_issues(self, from_date, to_date):
"""Fetch the issues"""
issues_groups = self.client.issues(from_date=from_date)
for raw_issues in issues_groups:
issues = json.loads(raw_issues)
for issue in issues:
if str_to_datetime(issue['updated_at']) > to_date:
return
self.__init_extra_issue_fields(issue)
for field in TARGET_ISSUE_FIELDS:
if not issue[field]:
continue
if field == 'user':
issue[field + '_data'] = self.__get_user(issue[field]['login'])
elif field == 'assignee':
issue[field + '_data'] = self.__get_issue_assignee(issue[field])
elif field == 'assignees':
issue[field + '_data'] = self.__get_issue_assignees(issue[field])
elif field == 'comments':
issue[field + '_data'] = self.__get_issue_comments(issue['number'])
elif field == 'reactions':
issue[field + '_data'] = \
self.__get_issue_reactions(issue['number'], issue['reactions']['total_count'])
yield issue | Fetch the issues | Below is the the instruction that describes the task:
### Input:
Fetch the issues
### Response:
def __fetch_issues(self, from_date, to_date):
"""Fetch the issues"""
issues_groups = self.client.issues(from_date=from_date)
for raw_issues in issues_groups:
issues = json.loads(raw_issues)
for issue in issues:
if str_to_datetime(issue['updated_at']) > to_date:
return
self.__init_extra_issue_fields(issue)
for field in TARGET_ISSUE_FIELDS:
if not issue[field]:
continue
if field == 'user':
issue[field + '_data'] = self.__get_user(issue[field]['login'])
elif field == 'assignee':
issue[field + '_data'] = self.__get_issue_assignee(issue[field])
elif field == 'assignees':
issue[field + '_data'] = self.__get_issue_assignees(issue[field])
elif field == 'comments':
issue[field + '_data'] = self.__get_issue_comments(issue['number'])
elif field == 'reactions':
issue[field + '_data'] = \
self.__get_issue_reactions(issue['number'], issue['reactions']['total_count'])
yield issue |
def _get_grouped_dicoms(dicom_input):
"""
Search all dicoms in the dicom directory, sort and validate them
fast_read = True will only read the headers not the data
"""
# if all dicoms have an instance number try sorting by instance number else by position
if [d for d in dicom_input if 'InstanceNumber' in d]:
dicoms = sorted(dicom_input, key=lambda x: x.InstanceNumber)
else:
dicoms = common.sort_dicoms(dicom_input)
# now group per stack
grouped_dicoms = [[]] # list with first element a list
timepoint_index = 0
previous_stack_position = -1
# loop over all sorted dicoms
stack_position_tag = Tag(0x2001, 0x100a) # put this there as this is a slow step and used a lot
for index in range(0, len(dicoms)):
dicom_ = dicoms[index]
stack_position = 0
if stack_position_tag in dicom_:
stack_position = common.get_is_value(dicom_[stack_position_tag])
if previous_stack_position == stack_position:
# if the stack number is the same we move to the next timepoint
timepoint_index += 1
if len(grouped_dicoms) <= timepoint_index:
grouped_dicoms.append([])
else:
# if it changes move back to the first timepoint
timepoint_index = 0
grouped_dicoms[timepoint_index].append(dicom_)
previous_stack_position = stack_position
return grouped_dicoms | Search all dicoms in the dicom directory, sort and validate them
fast_read = True will only read the headers not the data | Below is the the instruction that describes the task:
### Input:
Search all dicoms in the dicom directory, sort and validate them
fast_read = True will only read the headers not the data
### Response:
def _get_grouped_dicoms(dicom_input):
"""
Search all dicoms in the dicom directory, sort and validate them
fast_read = True will only read the headers not the data
"""
# if all dicoms have an instance number try sorting by instance number else by position
if [d for d in dicom_input if 'InstanceNumber' in d]:
dicoms = sorted(dicom_input, key=lambda x: x.InstanceNumber)
else:
dicoms = common.sort_dicoms(dicom_input)
# now group per stack
grouped_dicoms = [[]] # list with first element a list
timepoint_index = 0
previous_stack_position = -1
# loop over all sorted dicoms
stack_position_tag = Tag(0x2001, 0x100a) # put this there as this is a slow step and used a lot
for index in range(0, len(dicoms)):
dicom_ = dicoms[index]
stack_position = 0
if stack_position_tag in dicom_:
stack_position = common.get_is_value(dicom_[stack_position_tag])
if previous_stack_position == stack_position:
# if the stack number is the same we move to the next timepoint
timepoint_index += 1
if len(grouped_dicoms) <= timepoint_index:
grouped_dicoms.append([])
else:
# if it changes move back to the first timepoint
timepoint_index = 0
grouped_dicoms[timepoint_index].append(dicom_)
previous_stack_position = stack_position
return grouped_dicoms |
def generic_is_view_attribute(parents, attrs):
"""Generates is_X_attribute function for given parents and attrs."""
def is_attribute(node):
return _attribute_is_magic(node, attrs, parents)
return is_attribute | Generates is_X_attribute function for given parents and attrs. | Below is the the instruction that describes the task:
### Input:
Generates is_X_attribute function for given parents and attrs.
### Response:
def generic_is_view_attribute(parents, attrs):
"""Generates is_X_attribute function for given parents and attrs."""
def is_attribute(node):
return _attribute_is_magic(node, attrs, parents)
return is_attribute |
def export_events(self, filename, evt_type):
"""Export events to CSV
Parameters
----------
filename : str
path of export file
evt_type : list of str
event types to export
"""
filename = splitext(filename)[0] + '.csv'
headings_row = ['Index',
'Start time',
'End time',
'Stitches',
'Stage',
'Cycle',
'Event type',
'Channel']
events = []
for et in evt_type:
events.extend(self.get_events(name=et))
events = sorted(events, key=lambda evt: evt['start'])
if events is None:
lg.info('No events found.')
return
with open(filename, 'w', newline='') as f:
lg.info('Writing to ' + str(filename))
csv_file = writer(f)
csv_file.writerow(['Wonambi v{}'.format(__version__)])
csv_file.writerow(headings_row)
for i, ev in enumerate(events):
csv_file.writerow([i + 1,
ev['start'],
ev['end'],
0,
ev['stage'],
'',
ev['name'],
', '.join(ev['chan']),
]) | Export events to CSV
Parameters
----------
filename : str
path of export file
evt_type : list of str
event types to export | Below is the the instruction that describes the task:
### Input:
Export events to CSV
Parameters
----------
filename : str
path of export file
evt_type : list of str
event types to export
### Response:
def export_events(self, filename, evt_type):
"""Export events to CSV
Parameters
----------
filename : str
path of export file
evt_type : list of str
event types to export
"""
filename = splitext(filename)[0] + '.csv'
headings_row = ['Index',
'Start time',
'End time',
'Stitches',
'Stage',
'Cycle',
'Event type',
'Channel']
events = []
for et in evt_type:
events.extend(self.get_events(name=et))
events = sorted(events, key=lambda evt: evt['start'])
if events is None:
lg.info('No events found.')
return
with open(filename, 'w', newline='') as f:
lg.info('Writing to ' + str(filename))
csv_file = writer(f)
csv_file.writerow(['Wonambi v{}'.format(__version__)])
csv_file.writerow(headings_row)
for i, ev in enumerate(events):
csv_file.writerow([i + 1,
ev['start'],
ev['end'],
0,
ev['stage'],
'',
ev['name'],
', '.join(ev['chan']),
]) |
def query(self, sql: str, args: tuple = None):
"""Execute a SQL query with a return value."""
with self._cursor() as cursor:
log.debug('Running SQL: ' + str((sql, args)))
cursor.execute(sql, args)
return cursor.fetchall() | Execute a SQL query with a return value. | Below is the the instruction that describes the task:
### Input:
Execute a SQL query with a return value.
### Response:
def query(self, sql: str, args: tuple = None):
"""Execute a SQL query with a return value."""
with self._cursor() as cursor:
log.debug('Running SQL: ' + str((sql, args)))
cursor.execute(sql, args)
return cursor.fetchall() |
def Chisholm(m, x, rhol, rhog, mul, mug, D, roughness=0, L=1,
rough_correction=False):
r'''Calculates two-phase pressure drop with the Chisholm (1973) correlation
from [1]_, also in [2]_ and [3]_.
.. math::
\frac{\Delta P_{tp}}{\Delta P_{lo}} = \phi_{ch}^2
.. math::
\phi_{ch}^2 = 1 + (\Gamma^2 -1)\left\{B x^{(2-n)/2} (1-x)^{(2-n)/2}
+ x^{2-n} \right\}
.. math::
\Gamma ^2 = \frac{\left(\frac{\Delta P}{L}\right)_{go}}{\left(\frac{
\Delta P}{L}\right)_{lo}}
For Gamma < 9.5:
.. math::
B = \frac{55}{G_{tp}^{0.5}} \text{ for } G_{tp} > 1900
.. math::
B = \frac{2400}{G_{tp}} \text{ for } 500 < G_{tp} < 1900
.. math::
B = 4.8 \text{ for } G_{tp} < 500
For 9.5 < Gamma < 28:
.. math::
B = \frac{520}{\Gamma G_{tp}^{0.5}} \text{ for } G_{tp} < 600
.. math::
B = \frac{21}{\Gamma} \text{ for } G_{tp} > 600
For Gamma > 28:
.. math::
B = \frac{15000}{\Gamma^2 G_{tp}^{0.5}}
If `rough_correction` is True, the following correction to B is applied:
.. math::
\frac{B_{rough}}{B_{smooth}} = \left[0.5\left\{1+ \left(\frac{\mu_g}
{\mu_l}\right)^2 + 10^{-600\epsilon/D}\right\}\right]^{\frac{0.25-n}
{0.25}}
.. math::
n = \frac{\log \frac{f_{d,lo}}{f_{d,go}}}{\log \frac{Re_{go}}{Re_{lo}}}
Parameters
----------
m : float
Mass flow rate of fluid, [kg/s]
x : float
Quality of fluid, [-]
rhol : float
Liquid density, [kg/m^3]
rhog : float
Gas density, [kg/m^3]
mul : float
Viscosity of liquid, [Pa*s]
mug : float
Viscosity of gas, [Pa*s]
D : float
Diameter of pipe, [m]
roughness : float, optional
Roughness of pipe for use in calculating friction factor, [m]
L : float, optional
Length of pipe, [m]
rough_correction : bool, optional
Whether or not to use the roughness correction proposed in the 1968
version of the correlation
Returns
-------
dP : float
Pressure drop of the two-phase flow, [Pa]
Notes
-----
Applicable for 0 < x < 1. n = 0.25, the exponent in the Blassius equation.
Originally developed for smooth pipes, a roughness correction is included
as well from the Chisholm's 1968 work [4]_. Neither [2]_ nor [3]_ have any
mention of the correction however.
Examples
--------
>>> Chisholm(m=0.6, x=0.1, rhol=915., rhog=2.67, mul=180E-6,
... mug=14E-6, D=0.05, roughness=0, L=1)
1084.1489922923738
References
----------
.. [1] Chisholm, D. "Pressure Gradients due to Friction during the Flow of
Evaporating Two-Phase Mixtures in Smooth Tubes and Channels."
International Journal of Heat and Mass Transfer 16, no. 2 (February
1973): 347-58. doi:10.1016/0017-9310(73)90063-X.
.. [2] Mekisso, Henock Mateos. "Comparison of Frictional Pressure Drop
Correlations for Isothermal Two-Phase Horizontal Flow." Thesis, Oklahoma
State University, 2013. https://shareok.org/handle/11244/11109.
.. [3] Thome, John R. "Engineering Data Book III." Wolverine Tube Inc
(2004). http://www.wlv.com/heat-transfer-databook/
.. [4] Chisholm, D. "Research Note: Influence of Pipe Surface Roughness on
Friction Pressure Gradient during Two-Phase Flow." Journal of Mechanical
Engineering Science 20, no. 6 (December 1, 1978): 353-354.
doi:10.1243/JMES_JOUR_1978_020_061_02.
'''
G_tp = m/(pi/4*D**2)
n = 0.25 # Blasius friction factor exponent
# Liquid-only properties, for calculation of dP_lo
v_lo = m/rhol/(pi/4*D**2)
Re_lo = Reynolds(V=v_lo, rho=rhol, mu=mul, D=D)
fd_lo = friction_factor(Re=Re_lo, eD=roughness/D)
dP_lo = fd_lo*L/D*(0.5*rhol*v_lo**2)
# Gas-only properties, for calculation of dP_go
v_go = m/rhog/(pi/4*D**2)
Re_go = Reynolds(V=v_go, rho=rhog, mu=mug, D=D)
fd_go = friction_factor(Re=Re_go, eD=roughness/D)
dP_go = fd_go*L/D*(0.5*rhog*v_go**2)
Gamma = (dP_go/dP_lo)**0.5
if Gamma <= 9.5:
if G_tp <= 500:
B = 4.8
elif G_tp < 1900:
B = 2400./G_tp
else:
B = 55*G_tp**-0.5
elif Gamma <= 28:
if G_tp <= 600:
B = 520.*G_tp**-0.5/Gamma
else:
B = 21./Gamma
else:
B = 15000.*G_tp**-0.5/Gamma**2
if rough_correction:
n = log(fd_lo/fd_go)/log(Re_go/Re_lo)
B_ratio = (0.5*(1 + (mug/mul)**2 + 10**(-600*roughness/D)))**((0.25-n)/0.25)
B = B*B_ratio
phi2_ch = 1 + (Gamma**2-1)*(B*x**((2-n)/2.)*(1-x)**((2-n)/2.) + x**(2-n))
return phi2_ch*dP_lo | r'''Calculates two-phase pressure drop with the Chisholm (1973) correlation
from [1]_, also in [2]_ and [3]_.
.. math::
\frac{\Delta P_{tp}}{\Delta P_{lo}} = \phi_{ch}^2
.. math::
\phi_{ch}^2 = 1 + (\Gamma^2 -1)\left\{B x^{(2-n)/2} (1-x)^{(2-n)/2}
+ x^{2-n} \right\}
.. math::
\Gamma ^2 = \frac{\left(\frac{\Delta P}{L}\right)_{go}}{\left(\frac{
\Delta P}{L}\right)_{lo}}
For Gamma < 9.5:
.. math::
B = \frac{55}{G_{tp}^{0.5}} \text{ for } G_{tp} > 1900
.. math::
B = \frac{2400}{G_{tp}} \text{ for } 500 < G_{tp} < 1900
.. math::
B = 4.8 \text{ for } G_{tp} < 500
For 9.5 < Gamma < 28:
.. math::
B = \frac{520}{\Gamma G_{tp}^{0.5}} \text{ for } G_{tp} < 600
.. math::
B = \frac{21}{\Gamma} \text{ for } G_{tp} > 600
For Gamma > 28:
.. math::
B = \frac{15000}{\Gamma^2 G_{tp}^{0.5}}
If `rough_correction` is True, the following correction to B is applied:
.. math::
\frac{B_{rough}}{B_{smooth}} = \left[0.5\left\{1+ \left(\frac{\mu_g}
{\mu_l}\right)^2 + 10^{-600\epsilon/D}\right\}\right]^{\frac{0.25-n}
{0.25}}
.. math::
n = \frac{\log \frac{f_{d,lo}}{f_{d,go}}}{\log \frac{Re_{go}}{Re_{lo}}}
Parameters
----------
m : float
Mass flow rate of fluid, [kg/s]
x : float
Quality of fluid, [-]
rhol : float
Liquid density, [kg/m^3]
rhog : float
Gas density, [kg/m^3]
mul : float
Viscosity of liquid, [Pa*s]
mug : float
Viscosity of gas, [Pa*s]
D : float
Diameter of pipe, [m]
roughness : float, optional
Roughness of pipe for use in calculating friction factor, [m]
L : float, optional
Length of pipe, [m]
rough_correction : bool, optional
Whether or not to use the roughness correction proposed in the 1968
version of the correlation
Returns
-------
dP : float
Pressure drop of the two-phase flow, [Pa]
Notes
-----
Applicable for 0 < x < 1. n = 0.25, the exponent in the Blassius equation.
Originally developed for smooth pipes, a roughness correction is included
as well from the Chisholm's 1968 work [4]_. Neither [2]_ nor [3]_ have any
mention of the correction however.
Examples
--------
>>> Chisholm(m=0.6, x=0.1, rhol=915., rhog=2.67, mul=180E-6,
... mug=14E-6, D=0.05, roughness=0, L=1)
1084.1489922923738
References
----------
.. [1] Chisholm, D. "Pressure Gradients due to Friction during the Flow of
Evaporating Two-Phase Mixtures in Smooth Tubes and Channels."
International Journal of Heat and Mass Transfer 16, no. 2 (February
1973): 347-58. doi:10.1016/0017-9310(73)90063-X.
.. [2] Mekisso, Henock Mateos. "Comparison of Frictional Pressure Drop
Correlations for Isothermal Two-Phase Horizontal Flow." Thesis, Oklahoma
State University, 2013. https://shareok.org/handle/11244/11109.
.. [3] Thome, John R. "Engineering Data Book III." Wolverine Tube Inc
(2004). http://www.wlv.com/heat-transfer-databook/
.. [4] Chisholm, D. "Research Note: Influence of Pipe Surface Roughness on
Friction Pressure Gradient during Two-Phase Flow." Journal of Mechanical
Engineering Science 20, no. 6 (December 1, 1978): 353-354.
doi:10.1243/JMES_JOUR_1978_020_061_02. | Below is the the instruction that describes the task:
### Input:
r'''Calculates two-phase pressure drop with the Chisholm (1973) correlation
from [1]_, also in [2]_ and [3]_.
.. math::
\frac{\Delta P_{tp}}{\Delta P_{lo}} = \phi_{ch}^2
.. math::
\phi_{ch}^2 = 1 + (\Gamma^2 -1)\left\{B x^{(2-n)/2} (1-x)^{(2-n)/2}
+ x^{2-n} \right\}
.. math::
\Gamma ^2 = \frac{\left(\frac{\Delta P}{L}\right)_{go}}{\left(\frac{
\Delta P}{L}\right)_{lo}}
For Gamma < 9.5:
.. math::
B = \frac{55}{G_{tp}^{0.5}} \text{ for } G_{tp} > 1900
.. math::
B = \frac{2400}{G_{tp}} \text{ for } 500 < G_{tp} < 1900
.. math::
B = 4.8 \text{ for } G_{tp} < 500
For 9.5 < Gamma < 28:
.. math::
B = \frac{520}{\Gamma G_{tp}^{0.5}} \text{ for } G_{tp} < 600
.. math::
B = \frac{21}{\Gamma} \text{ for } G_{tp} > 600
For Gamma > 28:
.. math::
B = \frac{15000}{\Gamma^2 G_{tp}^{0.5}}
If `rough_correction` is True, the following correction to B is applied:
.. math::
\frac{B_{rough}}{B_{smooth}} = \left[0.5\left\{1+ \left(\frac{\mu_g}
{\mu_l}\right)^2 + 10^{-600\epsilon/D}\right\}\right]^{\frac{0.25-n}
{0.25}}
.. math::
n = \frac{\log \frac{f_{d,lo}}{f_{d,go}}}{\log \frac{Re_{go}}{Re_{lo}}}
Parameters
----------
m : float
Mass flow rate of fluid, [kg/s]
x : float
Quality of fluid, [-]
rhol : float
Liquid density, [kg/m^3]
rhog : float
Gas density, [kg/m^3]
mul : float
Viscosity of liquid, [Pa*s]
mug : float
Viscosity of gas, [Pa*s]
D : float
Diameter of pipe, [m]
roughness : float, optional
Roughness of pipe for use in calculating friction factor, [m]
L : float, optional
Length of pipe, [m]
rough_correction : bool, optional
Whether or not to use the roughness correction proposed in the 1968
version of the correlation
Returns
-------
dP : float
Pressure drop of the two-phase flow, [Pa]
Notes
-----
Applicable for 0 < x < 1. n = 0.25, the exponent in the Blassius equation.
Originally developed for smooth pipes, a roughness correction is included
as well from the Chisholm's 1968 work [4]_. Neither [2]_ nor [3]_ have any
mention of the correction however.
Examples
--------
>>> Chisholm(m=0.6, x=0.1, rhol=915., rhog=2.67, mul=180E-6,
... mug=14E-6, D=0.05, roughness=0, L=1)
1084.1489922923738
References
----------
.. [1] Chisholm, D. "Pressure Gradients due to Friction during the Flow of
Evaporating Two-Phase Mixtures in Smooth Tubes and Channels."
International Journal of Heat and Mass Transfer 16, no. 2 (February
1973): 347-58. doi:10.1016/0017-9310(73)90063-X.
.. [2] Mekisso, Henock Mateos. "Comparison of Frictional Pressure Drop
Correlations for Isothermal Two-Phase Horizontal Flow." Thesis, Oklahoma
State University, 2013. https://shareok.org/handle/11244/11109.
.. [3] Thome, John R. "Engineering Data Book III." Wolverine Tube Inc
(2004). http://www.wlv.com/heat-transfer-databook/
.. [4] Chisholm, D. "Research Note: Influence of Pipe Surface Roughness on
Friction Pressure Gradient during Two-Phase Flow." Journal of Mechanical
Engineering Science 20, no. 6 (December 1, 1978): 353-354.
doi:10.1243/JMES_JOUR_1978_020_061_02.
### Response:
def Chisholm(m, x, rhol, rhog, mul, mug, D, roughness=0, L=1,
rough_correction=False):
r'''Calculates two-phase pressure drop with the Chisholm (1973) correlation
from [1]_, also in [2]_ and [3]_.
.. math::
\frac{\Delta P_{tp}}{\Delta P_{lo}} = \phi_{ch}^2
.. math::
\phi_{ch}^2 = 1 + (\Gamma^2 -1)\left\{B x^{(2-n)/2} (1-x)^{(2-n)/2}
+ x^{2-n} \right\}
.. math::
\Gamma ^2 = \frac{\left(\frac{\Delta P}{L}\right)_{go}}{\left(\frac{
\Delta P}{L}\right)_{lo}}
For Gamma < 9.5:
.. math::
B = \frac{55}{G_{tp}^{0.5}} \text{ for } G_{tp} > 1900
.. math::
B = \frac{2400}{G_{tp}} \text{ for } 500 < G_{tp} < 1900
.. math::
B = 4.8 \text{ for } G_{tp} < 500
For 9.5 < Gamma < 28:
.. math::
B = \frac{520}{\Gamma G_{tp}^{0.5}} \text{ for } G_{tp} < 600
.. math::
B = \frac{21}{\Gamma} \text{ for } G_{tp} > 600
For Gamma > 28:
.. math::
B = \frac{15000}{\Gamma^2 G_{tp}^{0.5}}
If `rough_correction` is True, the following correction to B is applied:
.. math::
\frac{B_{rough}}{B_{smooth}} = \left[0.5\left\{1+ \left(\frac{\mu_g}
{\mu_l}\right)^2 + 10^{-600\epsilon/D}\right\}\right]^{\frac{0.25-n}
{0.25}}
.. math::
n = \frac{\log \frac{f_{d,lo}}{f_{d,go}}}{\log \frac{Re_{go}}{Re_{lo}}}
Parameters
----------
m : float
Mass flow rate of fluid, [kg/s]
x : float
Quality of fluid, [-]
rhol : float
Liquid density, [kg/m^3]
rhog : float
Gas density, [kg/m^3]
mul : float
Viscosity of liquid, [Pa*s]
mug : float
Viscosity of gas, [Pa*s]
D : float
Diameter of pipe, [m]
roughness : float, optional
Roughness of pipe for use in calculating friction factor, [m]
L : float, optional
Length of pipe, [m]
rough_correction : bool, optional
Whether or not to use the roughness correction proposed in the 1968
version of the correlation
Returns
-------
dP : float
Pressure drop of the two-phase flow, [Pa]
Notes
-----
Applicable for 0 < x < 1. n = 0.25, the exponent in the Blassius equation.
Originally developed for smooth pipes, a roughness correction is included
as well from the Chisholm's 1968 work [4]_. Neither [2]_ nor [3]_ have any
mention of the correction however.
Examples
--------
>>> Chisholm(m=0.6, x=0.1, rhol=915., rhog=2.67, mul=180E-6,
... mug=14E-6, D=0.05, roughness=0, L=1)
1084.1489922923738
References
----------
.. [1] Chisholm, D. "Pressure Gradients due to Friction during the Flow of
Evaporating Two-Phase Mixtures in Smooth Tubes and Channels."
International Journal of Heat and Mass Transfer 16, no. 2 (February
1973): 347-58. doi:10.1016/0017-9310(73)90063-X.
.. [2] Mekisso, Henock Mateos. "Comparison of Frictional Pressure Drop
Correlations for Isothermal Two-Phase Horizontal Flow." Thesis, Oklahoma
State University, 2013. https://shareok.org/handle/11244/11109.
.. [3] Thome, John R. "Engineering Data Book III." Wolverine Tube Inc
(2004). http://www.wlv.com/heat-transfer-databook/
.. [4] Chisholm, D. "Research Note: Influence of Pipe Surface Roughness on
Friction Pressure Gradient during Two-Phase Flow." Journal of Mechanical
Engineering Science 20, no. 6 (December 1, 1978): 353-354.
doi:10.1243/JMES_JOUR_1978_020_061_02.
'''
G_tp = m/(pi/4*D**2)
n = 0.25 # Blasius friction factor exponent
# Liquid-only properties, for calculation of dP_lo
v_lo = m/rhol/(pi/4*D**2)
Re_lo = Reynolds(V=v_lo, rho=rhol, mu=mul, D=D)
fd_lo = friction_factor(Re=Re_lo, eD=roughness/D)
dP_lo = fd_lo*L/D*(0.5*rhol*v_lo**2)
# Gas-only properties, for calculation of dP_go
v_go = m/rhog/(pi/4*D**2)
Re_go = Reynolds(V=v_go, rho=rhog, mu=mug, D=D)
fd_go = friction_factor(Re=Re_go, eD=roughness/D)
dP_go = fd_go*L/D*(0.5*rhog*v_go**2)
Gamma = (dP_go/dP_lo)**0.5
if Gamma <= 9.5:
if G_tp <= 500:
B = 4.8
elif G_tp < 1900:
B = 2400./G_tp
else:
B = 55*G_tp**-0.5
elif Gamma <= 28:
if G_tp <= 600:
B = 520.*G_tp**-0.5/Gamma
else:
B = 21./Gamma
else:
B = 15000.*G_tp**-0.5/Gamma**2
if rough_correction:
n = log(fd_lo/fd_go)/log(Re_go/Re_lo)
B_ratio = (0.5*(1 + (mug/mul)**2 + 10**(-600*roughness/D)))**((0.25-n)/0.25)
B = B*B_ratio
phi2_ch = 1 + (Gamma**2-1)*(B*x**((2-n)/2.)*(1-x)**((2-n)/2.) + x**(2-n))
return phi2_ch*dP_lo |
def mavlink_packet(self, m):
'''handle an incoming mavlink packet'''
if m.get_type() == 'PPP' and self.ppp_fd != -1:
print("got ppp mavlink pkt len=%u" % m.length)
os.write(self.ppp_fd, m.data[:m.length]) | handle an incoming mavlink packet | Below is the the instruction that describes the task:
### Input:
handle an incoming mavlink packet
### Response:
def mavlink_packet(self, m):
'''handle an incoming mavlink packet'''
if m.get_type() == 'PPP' and self.ppp_fd != -1:
print("got ppp mavlink pkt len=%u" % m.length)
os.write(self.ppp_fd, m.data[:m.length]) |
def _prepare_axes(node, sort_key):
"""
Sort axes and combine those that point to the same target and go
in the same direction.
"""
links = node.links
o_links = node._overlapping_links
overlap = {ax2 for ax in links for ax2 in o_links.get(ax, [])}
axes = []
for axis in sorted(links.keys(), key=sort_key):
if axis in overlap: continue
tgt = links[axis]
if axis in o_links:
s, e = axis[0], axis[-1]
axis = '%s%s%s' % (
s, '&'.join(a[1:-1] for a in [axis] + o_links[axis]), e
)
axes.append((axis, tgt))
return axes | Sort axes and combine those that point to the same target and go
in the same direction. | Below is the the instruction that describes the task:
### Input:
Sort axes and combine those that point to the same target and go
in the same direction.
### Response:
def _prepare_axes(node, sort_key):
"""
Sort axes and combine those that point to the same target and go
in the same direction.
"""
links = node.links
o_links = node._overlapping_links
overlap = {ax2 for ax in links for ax2 in o_links.get(ax, [])}
axes = []
for axis in sorted(links.keys(), key=sort_key):
if axis in overlap: continue
tgt = links[axis]
if axis in o_links:
s, e = axis[0], axis[-1]
axis = '%s%s%s' % (
s, '&'.join(a[1:-1] for a in [axis] + o_links[axis]), e
)
axes.append((axis, tgt))
return axes |
def add_cache(self, namespace, key, query_hash, length, cache):
"""Add cached values for the specified date range and query"""
start = 0
bulk_insert = self.bulk_insert
cache_len = len(cache)
row = '(%s,%s,%s,%s,%s,%s)'
query = 'INSERT INTO gauged_cache ' \
'(namespace, key, "hash", length, start, value) VALUES '
execute = self.cursor.execute
query_hash = self.psycopg2.Binary(query_hash)
while start < cache_len:
rows = cache[start:start+bulk_insert]
params = []
for timestamp, value in rows:
params.extend((namespace, key, query_hash, length,
timestamp, value))
insert = (row + ',') * (len(rows) - 1) + row
execute(query + insert, params)
start += bulk_insert
self.db.commit() | Add cached values for the specified date range and query | Below is the the instruction that describes the task:
### Input:
Add cached values for the specified date range and query
### Response:
def add_cache(self, namespace, key, query_hash, length, cache):
"""Add cached values for the specified date range and query"""
start = 0
bulk_insert = self.bulk_insert
cache_len = len(cache)
row = '(%s,%s,%s,%s,%s,%s)'
query = 'INSERT INTO gauged_cache ' \
'(namespace, key, "hash", length, start, value) VALUES '
execute = self.cursor.execute
query_hash = self.psycopg2.Binary(query_hash)
while start < cache_len:
rows = cache[start:start+bulk_insert]
params = []
for timestamp, value in rows:
params.extend((namespace, key, query_hash, length,
timestamp, value))
insert = (row + ',') * (len(rows) - 1) + row
execute(query + insert, params)
start += bulk_insert
self.db.commit() |
def PopAttributeContainer(self):
"""Pops a serialized attribute container from the list.
Returns:
bytes: serialized attribute container data.
"""
try:
serialized_data = self._list.pop(0)
self.data_size -= len(serialized_data)
return serialized_data
except IndexError:
return None | Pops a serialized attribute container from the list.
Returns:
bytes: serialized attribute container data. | Below is the the instruction that describes the task:
### Input:
Pops a serialized attribute container from the list.
Returns:
bytes: serialized attribute container data.
### Response:
def PopAttributeContainer(self):
"""Pops a serialized attribute container from the list.
Returns:
bytes: serialized attribute container data.
"""
try:
serialized_data = self._list.pop(0)
self.data_size -= len(serialized_data)
return serialized_data
except IndexError:
return None |
def commajoin_as_strings(iterable):
""" Join the given iterable with ',' """
return _(u',').join((six.text_type(i) for i in iterable)) | Join the given iterable with ',' | Below is the the instruction that describes the task:
### Input:
Join the given iterable with ','
### Response:
def commajoin_as_strings(iterable):
""" Join the given iterable with ',' """
return _(u',').join((six.text_type(i) for i in iterable)) |
def plate_exchanger_identifier(self):
'''Method to create an identifying string in format 'L' + wavelength +
'A' + amplitude + 'B' + chevron angle-chevron angle. Wavelength and
amplitude are specified in units of mm and rounded to two decimal places.
'''
s = ('L' + str(round(self.wavelength*1000, 2))
+ 'A' + str(round(self.amplitude*1000, 2))
+ 'B' + '-'.join([str(i) for i in self.chevron_angles]))
return s | Method to create an identifying string in format 'L' + wavelength +
'A' + amplitude + 'B' + chevron angle-chevron angle. Wavelength and
amplitude are specified in units of mm and rounded to two decimal places. | Below is the the instruction that describes the task:
### Input:
Method to create an identifying string in format 'L' + wavelength +
'A' + amplitude + 'B' + chevron angle-chevron angle. Wavelength and
amplitude are specified in units of mm and rounded to two decimal places.
### Response:
def plate_exchanger_identifier(self):
'''Method to create an identifying string in format 'L' + wavelength +
'A' + amplitude + 'B' + chevron angle-chevron angle. Wavelength and
amplitude are specified in units of mm and rounded to two decimal places.
'''
s = ('L' + str(round(self.wavelength*1000, 2))
+ 'A' + str(round(self.amplitude*1000, 2))
+ 'B' + '-'.join([str(i) for i in self.chevron_angles]))
return s |
def can_be_executed_by(self, thread_id):
'''By default, it must be in the same thread to be executed
'''
return self.thread_id == thread_id or self.thread_id.endswith('|' + thread_id) | By default, it must be in the same thread to be executed | Below is the the instruction that describes the task:
### Input:
By default, it must be in the same thread to be executed
### Response:
def can_be_executed_by(self, thread_id):
'''By default, it must be in the same thread to be executed
'''
return self.thread_id == thread_id or self.thread_id.endswith('|' + thread_id) |
def append(self, to_append, ignore_index=False, verify_integrity=False):
"""
Concatenate two or more Series.
Parameters
----------
to_append : Series or list/tuple of Series
Series to append with self.
ignore_index : bool, default False
If True, do not use the index labels.
.. versionadded:: 0.19.0
verify_integrity : bool, default False
If True, raise Exception on creating index with duplicates.
Returns
-------
Series
Concatenated Series.
See Also
--------
concat : General function to concatenate DataFrame, Series
or Panel objects.
Notes
-----
Iteratively appending to a Series can be more computationally intensive
than a single concatenate. A better solution is to append values to a
list and then concatenate the list with the original Series all at
once.
Examples
--------
>>> s1 = pd.Series([1, 2, 3])
>>> s2 = pd.Series([4, 5, 6])
>>> s3 = pd.Series([4, 5, 6], index=[3, 4, 5])
>>> s1.append(s2)
0 1
1 2
2 3
0 4
1 5
2 6
dtype: int64
>>> s1.append(s3)
0 1
1 2
2 3
3 4
4 5
5 6
dtype: int64
With `ignore_index` set to True:
>>> s1.append(s2, ignore_index=True)
0 1
1 2
2 3
3 4
4 5
5 6
dtype: int64
With `verify_integrity` set to True:
>>> s1.append(s2, verify_integrity=True)
Traceback (most recent call last):
...
ValueError: Indexes have overlapping values: [0, 1, 2]
"""
from pandas.core.reshape.concat import concat
if isinstance(to_append, (list, tuple)):
to_concat = [self] + to_append
else:
to_concat = [self, to_append]
return concat(to_concat, ignore_index=ignore_index,
verify_integrity=verify_integrity) | Concatenate two or more Series.
Parameters
----------
to_append : Series or list/tuple of Series
Series to append with self.
ignore_index : bool, default False
If True, do not use the index labels.
.. versionadded:: 0.19.0
verify_integrity : bool, default False
If True, raise Exception on creating index with duplicates.
Returns
-------
Series
Concatenated Series.
See Also
--------
concat : General function to concatenate DataFrame, Series
or Panel objects.
Notes
-----
Iteratively appending to a Series can be more computationally intensive
than a single concatenate. A better solution is to append values to a
list and then concatenate the list with the original Series all at
once.
Examples
--------
>>> s1 = pd.Series([1, 2, 3])
>>> s2 = pd.Series([4, 5, 6])
>>> s3 = pd.Series([4, 5, 6], index=[3, 4, 5])
>>> s1.append(s2)
0 1
1 2
2 3
0 4
1 5
2 6
dtype: int64
>>> s1.append(s3)
0 1
1 2
2 3
3 4
4 5
5 6
dtype: int64
With `ignore_index` set to True:
>>> s1.append(s2, ignore_index=True)
0 1
1 2
2 3
3 4
4 5
5 6
dtype: int64
With `verify_integrity` set to True:
>>> s1.append(s2, verify_integrity=True)
Traceback (most recent call last):
...
ValueError: Indexes have overlapping values: [0, 1, 2] | Below is the the instruction that describes the task:
### Input:
Concatenate two or more Series.
Parameters
----------
to_append : Series or list/tuple of Series
Series to append with self.
ignore_index : bool, default False
If True, do not use the index labels.
.. versionadded:: 0.19.0
verify_integrity : bool, default False
If True, raise Exception on creating index with duplicates.
Returns
-------
Series
Concatenated Series.
See Also
--------
concat : General function to concatenate DataFrame, Series
or Panel objects.
Notes
-----
Iteratively appending to a Series can be more computationally intensive
than a single concatenate. A better solution is to append values to a
list and then concatenate the list with the original Series all at
once.
Examples
--------
>>> s1 = pd.Series([1, 2, 3])
>>> s2 = pd.Series([4, 5, 6])
>>> s3 = pd.Series([4, 5, 6], index=[3, 4, 5])
>>> s1.append(s2)
0 1
1 2
2 3
0 4
1 5
2 6
dtype: int64
>>> s1.append(s3)
0 1
1 2
2 3
3 4
4 5
5 6
dtype: int64
With `ignore_index` set to True:
>>> s1.append(s2, ignore_index=True)
0 1
1 2
2 3
3 4
4 5
5 6
dtype: int64
With `verify_integrity` set to True:
>>> s1.append(s2, verify_integrity=True)
Traceback (most recent call last):
...
ValueError: Indexes have overlapping values: [0, 1, 2]
### Response:
def append(self, to_append, ignore_index=False, verify_integrity=False):
"""
Concatenate two or more Series.
Parameters
----------
to_append : Series or list/tuple of Series
Series to append with self.
ignore_index : bool, default False
If True, do not use the index labels.
.. versionadded:: 0.19.0
verify_integrity : bool, default False
If True, raise Exception on creating index with duplicates.
Returns
-------
Series
Concatenated Series.
See Also
--------
concat : General function to concatenate DataFrame, Series
or Panel objects.
Notes
-----
Iteratively appending to a Series can be more computationally intensive
than a single concatenate. A better solution is to append values to a
list and then concatenate the list with the original Series all at
once.
Examples
--------
>>> s1 = pd.Series([1, 2, 3])
>>> s2 = pd.Series([4, 5, 6])
>>> s3 = pd.Series([4, 5, 6], index=[3, 4, 5])
>>> s1.append(s2)
0 1
1 2
2 3
0 4
1 5
2 6
dtype: int64
>>> s1.append(s3)
0 1
1 2
2 3
3 4
4 5
5 6
dtype: int64
With `ignore_index` set to True:
>>> s1.append(s2, ignore_index=True)
0 1
1 2
2 3
3 4
4 5
5 6
dtype: int64
With `verify_integrity` set to True:
>>> s1.append(s2, verify_integrity=True)
Traceback (most recent call last):
...
ValueError: Indexes have overlapping values: [0, 1, 2]
"""
from pandas.core.reshape.concat import concat
if isinstance(to_append, (list, tuple)):
to_concat = [self] + to_append
else:
to_concat = [self, to_append]
return concat(to_concat, ignore_index=ignore_index,
verify_integrity=verify_integrity) |
def _configure_iam_role(config):
"""Setup a gcp service account with IAM roles.
Creates a gcp service acconut and binds IAM roles which allow it to control
control storage/compute services. Specifically, the head node needs to have
an IAM role that allows it to create further gce instances and store items
in google cloud storage.
TODO: Allow the name/id of the service account to be configured
"""
email = SERVICE_ACCOUNT_EMAIL_TEMPLATE.format(
account_id=DEFAULT_SERVICE_ACCOUNT_ID,
project_id=config["provider"]["project_id"])
service_account = _get_service_account(email, config)
if service_account is None:
logger.info("_configure_iam_role: "
"Creating new service account {}".format(
DEFAULT_SERVICE_ACCOUNT_ID))
service_account = _create_service_account(
DEFAULT_SERVICE_ACCOUNT_ID, DEFAULT_SERVICE_ACCOUNT_CONFIG, config)
assert service_account is not None, "Failed to create service account"
_add_iam_policy_binding(service_account, DEFAULT_SERVICE_ACCOUNT_ROLES)
config["head_node"]["serviceAccounts"] = [{
"email": service_account["email"],
# NOTE: The amount of access is determined by the scope + IAM
# role of the service account. Even if the cloud-platform scope
# gives (scope) access to the whole cloud-platform, the service
# account is limited by the IAM rights specified below.
"scopes": ["https://www.googleapis.com/auth/cloud-platform"]
}]
return config | Setup a gcp service account with IAM roles.
Creates a gcp service acconut and binds IAM roles which allow it to control
control storage/compute services. Specifically, the head node needs to have
an IAM role that allows it to create further gce instances and store items
in google cloud storage.
TODO: Allow the name/id of the service account to be configured | Below is the the instruction that describes the task:
### Input:
Setup a gcp service account with IAM roles.
Creates a gcp service acconut and binds IAM roles which allow it to control
control storage/compute services. Specifically, the head node needs to have
an IAM role that allows it to create further gce instances and store items
in google cloud storage.
TODO: Allow the name/id of the service account to be configured
### Response:
def _configure_iam_role(config):
"""Setup a gcp service account with IAM roles.
Creates a gcp service acconut and binds IAM roles which allow it to control
control storage/compute services. Specifically, the head node needs to have
an IAM role that allows it to create further gce instances and store items
in google cloud storage.
TODO: Allow the name/id of the service account to be configured
"""
email = SERVICE_ACCOUNT_EMAIL_TEMPLATE.format(
account_id=DEFAULT_SERVICE_ACCOUNT_ID,
project_id=config["provider"]["project_id"])
service_account = _get_service_account(email, config)
if service_account is None:
logger.info("_configure_iam_role: "
"Creating new service account {}".format(
DEFAULT_SERVICE_ACCOUNT_ID))
service_account = _create_service_account(
DEFAULT_SERVICE_ACCOUNT_ID, DEFAULT_SERVICE_ACCOUNT_CONFIG, config)
assert service_account is not None, "Failed to create service account"
_add_iam_policy_binding(service_account, DEFAULT_SERVICE_ACCOUNT_ROLES)
config["head_node"]["serviceAccounts"] = [{
"email": service_account["email"],
# NOTE: The amount of access is determined by the scope + IAM
# role of the service account. Even if the cloud-platform scope
# gives (scope) access to the whole cloud-platform, the service
# account is limited by the IAM rights specified below.
"scopes": ["https://www.googleapis.com/auth/cloud-platform"]
}]
return config |
def sanitize_resources(resource: dict):
"""Cleans up incoming scene data
:param resource: The dict with scene data to be sanitized.
:returns: Cleaned up scene dict.
"""
try:
for shade in resource[ATTR_SHADE_DATA]:
_name = shade.get(ATTR_NAME)
if _name:
shade[ATTR_NAME_UNICODE] = base64_to_unicode(_name)
return resource
except (KeyError, TypeError):
LOGGER.debug("no shade data available")
return None | Cleans up incoming scene data
:param resource: The dict with scene data to be sanitized.
:returns: Cleaned up scene dict. | Below is the the instruction that describes the task:
### Input:
Cleans up incoming scene data
:param resource: The dict with scene data to be sanitized.
:returns: Cleaned up scene dict.
### Response:
def sanitize_resources(resource: dict):
"""Cleans up incoming scene data
:param resource: The dict with scene data to be sanitized.
:returns: Cleaned up scene dict.
"""
try:
for shade in resource[ATTR_SHADE_DATA]:
_name = shade.get(ATTR_NAME)
if _name:
shade[ATTR_NAME_UNICODE] = base64_to_unicode(_name)
return resource
except (KeyError, TypeError):
LOGGER.debug("no shade data available")
return None |
def _get_prioritized_parameters(plugins_dict, is_using_default_value_map, prefer_default=True):
"""
:type plugins_dict: dict(plugin_name => plugin_params)
:param plugin_dict: mapping of plugin name to all plugin params
:type is_using_default_value_map: dict(str => bool)
:param is_using_default_value_map: mapping of parameter name to whether its value is derived
from a default value.
:param prefer_default: if True, will yield if plugin parameters are from default values.
Otherwise, will yield if plugin parameters are *not* from default values.
"""
for plugin_name, plugin_params in plugins_dict.items():
for param_name, param_value in plugin_params.items():
is_using_default = is_using_default_value_map.get(param_name, False)
if is_using_default == prefer_default:
yield plugin_name, param_name, param_value | :type plugins_dict: dict(plugin_name => plugin_params)
:param plugin_dict: mapping of plugin name to all plugin params
:type is_using_default_value_map: dict(str => bool)
:param is_using_default_value_map: mapping of parameter name to whether its value is derived
from a default value.
:param prefer_default: if True, will yield if plugin parameters are from default values.
Otherwise, will yield if plugin parameters are *not* from default values. | Below is the the instruction that describes the task:
### Input:
:type plugins_dict: dict(plugin_name => plugin_params)
:param plugin_dict: mapping of plugin name to all plugin params
:type is_using_default_value_map: dict(str => bool)
:param is_using_default_value_map: mapping of parameter name to whether its value is derived
from a default value.
:param prefer_default: if True, will yield if plugin parameters are from default values.
Otherwise, will yield if plugin parameters are *not* from default values.
### Response:
def _get_prioritized_parameters(plugins_dict, is_using_default_value_map, prefer_default=True):
"""
:type plugins_dict: dict(plugin_name => plugin_params)
:param plugin_dict: mapping of plugin name to all plugin params
:type is_using_default_value_map: dict(str => bool)
:param is_using_default_value_map: mapping of parameter name to whether its value is derived
from a default value.
:param prefer_default: if True, will yield if plugin parameters are from default values.
Otherwise, will yield if plugin parameters are *not* from default values.
"""
for plugin_name, plugin_params in plugins_dict.items():
for param_name, param_value in plugin_params.items():
is_using_default = is_using_default_value_map.get(param_name, False)
if is_using_default == prefer_default:
yield plugin_name, param_name, param_value |
def prepare_url(self, uri, kwargs):
"""Convert dict for URL params
"""
params = dict()
for key in kwargs:
if key in ('include', 'exclude', 'fields'):
params.update({
key: ','.join(kwargs.get(key))
})
elif key in ('search', 'kind'):
params.update({
key: kwargs.get(key)
})
if params:
params = urlencode(params)
uri = '%s?%s' % (uri, params)
return uri | Convert dict for URL params | Below is the the instruction that describes the task:
### Input:
Convert dict for URL params
### Response:
def prepare_url(self, uri, kwargs):
"""Convert dict for URL params
"""
params = dict()
for key in kwargs:
if key in ('include', 'exclude', 'fields'):
params.update({
key: ','.join(kwargs.get(key))
})
elif key in ('search', 'kind'):
params.update({
key: kwargs.get(key)
})
if params:
params = urlencode(params)
uri = '%s?%s' % (uri, params)
return uri |
def correlation(self, x, y=None, binby=[], limits=None, shape=default_shape, sort=False, sort_key=np.abs, selection=False, delay=False, progress=None):
"""Calculate the correlation coefficient cov[x,y]/(std[x]*std[y]) between and x and y, possibly on a grid defined by binby.
Example:
>>> df.correlation("x**2+y**2+z**2", "-log(-E+1)")
array(0.6366637382215669)
>>> df.correlation("x**2+y**2+z**2", "-log(-E+1)", binby="Lz", shape=4)
array([ 0.40594394, 0.69868851, 0.61394099, 0.65266318])
:param x: {expression}
:param y: {expression}
:param binby: {binby}
:param limits: {limits}
:param shape: {shape}
:param selection: {selection}
:param delay: {delay}
:param progress: {progress}
:return: {return_stat_scalar}
"""
@delayed
def corr(cov):
with np.errstate(divide='ignore', invalid='ignore'): # these are fine, we are ok with nan's in vaex
return cov[..., 0, 1] / (cov[..., 0, 0] * cov[..., 1, 1])**0.5
if y is None:
if not isinstance(x, (tuple, list)):
raise ValueError("if y not given, x is expected to be a list or tuple, not %r" % x)
if _issequence(x) and not _issequence(x[0]) and len(x) == 2:
x = [x]
if not(_issequence(x) and all([_issequence(k) and len(k) == 2 for k in x])):
raise ValueError("if y not given, x is expected to be a list of lists with length 2, not %r" % x)
# waslist, [xlist,ylist] = vaex.utils.listify(*x)
waslist = True
xlist, ylist = zip(*x)
# print xlist, ylist
else:
waslist, [xlist, ylist] = vaex.utils.listify(x, y)
limits = self.limits(binby, limits, selection=selection, delay=True)
@delayed
def echo(limits):
logger.debug(">>>>>>>>: %r %r", limits, np.array(limits).shape)
echo(limits)
@delayed
def calculate(limits):
results = []
for x, y in zip(xlist, ylist):
task = self.cov(x, y, binby=binby, limits=limits, shape=shape, selection=selection, delay=True,
progress=progressbar)
results.append(corr(task))
return results
progressbar = vaex.utils.progressbars(progress)
correlations = calculate(limits)
@delayed
def finish(correlations):
if sort:
correlations = np.array(correlations)
indices = np.argsort(sort_key(correlations) if sort_key else correlations)[::-1]
sorted_x = list([x[k] for k in indices])
return correlations[indices], sorted_x
value = np.array(vaex.utils.unlistify(waslist, correlations))
return value
return self._delay(delay, finish(delayed_list(correlations))) | Calculate the correlation coefficient cov[x,y]/(std[x]*std[y]) between and x and y, possibly on a grid defined by binby.
Example:
>>> df.correlation("x**2+y**2+z**2", "-log(-E+1)")
array(0.6366637382215669)
>>> df.correlation("x**2+y**2+z**2", "-log(-E+1)", binby="Lz", shape=4)
array([ 0.40594394, 0.69868851, 0.61394099, 0.65266318])
:param x: {expression}
:param y: {expression}
:param binby: {binby}
:param limits: {limits}
:param shape: {shape}
:param selection: {selection}
:param delay: {delay}
:param progress: {progress}
:return: {return_stat_scalar} | Below is the the instruction that describes the task:
### Input:
Calculate the correlation coefficient cov[x,y]/(std[x]*std[y]) between and x and y, possibly on a grid defined by binby.
Example:
>>> df.correlation("x**2+y**2+z**2", "-log(-E+1)")
array(0.6366637382215669)
>>> df.correlation("x**2+y**2+z**2", "-log(-E+1)", binby="Lz", shape=4)
array([ 0.40594394, 0.69868851, 0.61394099, 0.65266318])
:param x: {expression}
:param y: {expression}
:param binby: {binby}
:param limits: {limits}
:param shape: {shape}
:param selection: {selection}
:param delay: {delay}
:param progress: {progress}
:return: {return_stat_scalar}
### Response:
def correlation(self, x, y=None, binby=[], limits=None, shape=default_shape, sort=False, sort_key=np.abs, selection=False, delay=False, progress=None):
"""Calculate the correlation coefficient cov[x,y]/(std[x]*std[y]) between and x and y, possibly on a grid defined by binby.
Example:
>>> df.correlation("x**2+y**2+z**2", "-log(-E+1)")
array(0.6366637382215669)
>>> df.correlation("x**2+y**2+z**2", "-log(-E+1)", binby="Lz", shape=4)
array([ 0.40594394, 0.69868851, 0.61394099, 0.65266318])
:param x: {expression}
:param y: {expression}
:param binby: {binby}
:param limits: {limits}
:param shape: {shape}
:param selection: {selection}
:param delay: {delay}
:param progress: {progress}
:return: {return_stat_scalar}
"""
@delayed
def corr(cov):
with np.errstate(divide='ignore', invalid='ignore'): # these are fine, we are ok with nan's in vaex
return cov[..., 0, 1] / (cov[..., 0, 0] * cov[..., 1, 1])**0.5
if y is None:
if not isinstance(x, (tuple, list)):
raise ValueError("if y not given, x is expected to be a list or tuple, not %r" % x)
if _issequence(x) and not _issequence(x[0]) and len(x) == 2:
x = [x]
if not(_issequence(x) and all([_issequence(k) and len(k) == 2 for k in x])):
raise ValueError("if y not given, x is expected to be a list of lists with length 2, not %r" % x)
# waslist, [xlist,ylist] = vaex.utils.listify(*x)
waslist = True
xlist, ylist = zip(*x)
# print xlist, ylist
else:
waslist, [xlist, ylist] = vaex.utils.listify(x, y)
limits = self.limits(binby, limits, selection=selection, delay=True)
@delayed
def echo(limits):
logger.debug(">>>>>>>>: %r %r", limits, np.array(limits).shape)
echo(limits)
@delayed
def calculate(limits):
results = []
for x, y in zip(xlist, ylist):
task = self.cov(x, y, binby=binby, limits=limits, shape=shape, selection=selection, delay=True,
progress=progressbar)
results.append(corr(task))
return results
progressbar = vaex.utils.progressbars(progress)
correlations = calculate(limits)
@delayed
def finish(correlations):
if sort:
correlations = np.array(correlations)
indices = np.argsort(sort_key(correlations) if sort_key else correlations)[::-1]
sorted_x = list([x[k] for k in indices])
return correlations[indices], sorted_x
value = np.array(vaex.utils.unlistify(waslist, correlations))
return value
return self._delay(delay, finish(delayed_list(correlations))) |
def register_macro(name: str, func: Callable, allow_overwrite: bool = False) -> None:
"""Register new macro to Circuit.
Args:
name (str): The name of macro.
func (callable): The function to be called.
allow_overwrite (bool, optional): If True, allow to overwrite the existing macro.
Otherwise, raise the ValueError.
Raises:
ValueError: The name is duplicated with existing macro, gate or method.
When `allow_overwrite=True`, this error is not raised.
"""
if hasattr(Circuit, name):
if allow_overwrite:
warnings.warn(f"Circuit has attribute `{name}`.")
else:
raise ValueError(f"Circuit has attribute `{name}`.")
if name.startswith("run_with_"):
if allow_overwrite:
warnings.warn(f"Gate name `{name}` may conflict with run of backend.")
else:
raise ValueError(f"Gate name `{name}` shall not start with 'run_with_'.")
if not allow_overwrite:
if name in GATE_SET:
raise ValueError(f"Gate '{name}' is already exists in gate set.")
if name in GLOBAL_MACROS:
raise ValueError(f"Macro '{name}' is already exists.")
GLOBAL_MACROS[name] = func | Register new macro to Circuit.
Args:
name (str): The name of macro.
func (callable): The function to be called.
allow_overwrite (bool, optional): If True, allow to overwrite the existing macro.
Otherwise, raise the ValueError.
Raises:
ValueError: The name is duplicated with existing macro, gate or method.
When `allow_overwrite=True`, this error is not raised. | Below is the the instruction that describes the task:
### Input:
Register new macro to Circuit.
Args:
name (str): The name of macro.
func (callable): The function to be called.
allow_overwrite (bool, optional): If True, allow to overwrite the existing macro.
Otherwise, raise the ValueError.
Raises:
ValueError: The name is duplicated with existing macro, gate or method.
When `allow_overwrite=True`, this error is not raised.
### Response:
def register_macro(name: str, func: Callable, allow_overwrite: bool = False) -> None:
"""Register new macro to Circuit.
Args:
name (str): The name of macro.
func (callable): The function to be called.
allow_overwrite (bool, optional): If True, allow to overwrite the existing macro.
Otherwise, raise the ValueError.
Raises:
ValueError: The name is duplicated with existing macro, gate or method.
When `allow_overwrite=True`, this error is not raised.
"""
if hasattr(Circuit, name):
if allow_overwrite:
warnings.warn(f"Circuit has attribute `{name}`.")
else:
raise ValueError(f"Circuit has attribute `{name}`.")
if name.startswith("run_with_"):
if allow_overwrite:
warnings.warn(f"Gate name `{name}` may conflict with run of backend.")
else:
raise ValueError(f"Gate name `{name}` shall not start with 'run_with_'.")
if not allow_overwrite:
if name in GATE_SET:
raise ValueError(f"Gate '{name}' is already exists in gate set.")
if name in GLOBAL_MACROS:
raise ValueError(f"Macro '{name}' is already exists.")
GLOBAL_MACROS[name] = func |
def save(cls, network, phases=[], filename=''):
r"""
Write Network to a Mat file for exporting to Matlab.
Parameters
----------
network : OpenPNM Network Object
filename : string
Desired file name, defaults to network name if not given
phases : list of phase objects ([])
Phases that have properties we want to write to file
"""
project, network, phases = cls._parse_args(network=network,
phases=phases)
network = network[0]
# Write to file
if filename == '':
filename = project.name
filename = cls._parse_filename(filename=filename, ext='mat')
d = Dict.to_dict(network=network, phases=phases, interleave=True)
d = FlatDict(d, delimiter='|')
d = sanitize_dict(d)
new_d = {}
for key in list(d.keys()):
new_key = key.replace('|', '_').replace('.', '_')
new_d[new_key] = d.pop(key)
spio.savemat(file_name=filename, mdict=new_d) | r"""
Write Network to a Mat file for exporting to Matlab.
Parameters
----------
network : OpenPNM Network Object
filename : string
Desired file name, defaults to network name if not given
phases : list of phase objects ([])
Phases that have properties we want to write to file | Below is the the instruction that describes the task:
### Input:
r"""
Write Network to a Mat file for exporting to Matlab.
Parameters
----------
network : OpenPNM Network Object
filename : string
Desired file name, defaults to network name if not given
phases : list of phase objects ([])
Phases that have properties we want to write to file
### Response:
def save(cls, network, phases=[], filename=''):
r"""
Write Network to a Mat file for exporting to Matlab.
Parameters
----------
network : OpenPNM Network Object
filename : string
Desired file name, defaults to network name if not given
phases : list of phase objects ([])
Phases that have properties we want to write to file
"""
project, network, phases = cls._parse_args(network=network,
phases=phases)
network = network[0]
# Write to file
if filename == '':
filename = project.name
filename = cls._parse_filename(filename=filename, ext='mat')
d = Dict.to_dict(network=network, phases=phases, interleave=True)
d = FlatDict(d, delimiter='|')
d = sanitize_dict(d)
new_d = {}
for key in list(d.keys()):
new_key = key.replace('|', '_').replace('.', '_')
new_d[new_key] = d.pop(key)
spio.savemat(file_name=filename, mdict=new_d) |
def handle_close(self, header, payload):
"""
Called when a close frame has been decoded from the stream.
:param header: The decoded `Header`.
:param payload: The bytestring payload associated with the close frame.
"""
if not payload:
self.close(1000, None)
return
if len(payload) < 2:
raise WebSocketError('Invalid close frame: {0} {1}'.format(header, payload))
rv = payload[:2]
if six.PY2:
code = struct.unpack('!H', str(rv))[0]
else:
code = struct.unpack('!H', bytes(rv))[0]
payload = payload[2:]
if payload:
validator = Utf8Validator()
val = validator.validate(payload)
if not val[0]:
raise UnicodeError
if not self._is_valid_close_code(code):
raise WebSocketError('Invalid close code {0}'.format(code))
self.close(code, payload) | Called when a close frame has been decoded from the stream.
:param header: The decoded `Header`.
:param payload: The bytestring payload associated with the close frame. | Below is the the instruction that describes the task:
### Input:
Called when a close frame has been decoded from the stream.
:param header: The decoded `Header`.
:param payload: The bytestring payload associated with the close frame.
### Response:
def handle_close(self, header, payload):
"""
Called when a close frame has been decoded from the stream.
:param header: The decoded `Header`.
:param payload: The bytestring payload associated with the close frame.
"""
if not payload:
self.close(1000, None)
return
if len(payload) < 2:
raise WebSocketError('Invalid close frame: {0} {1}'.format(header, payload))
rv = payload[:2]
if six.PY2:
code = struct.unpack('!H', str(rv))[0]
else:
code = struct.unpack('!H', bytes(rv))[0]
payload = payload[2:]
if payload:
validator = Utf8Validator()
val = validator.validate(payload)
if not val[0]:
raise UnicodeError
if not self._is_valid_close_code(code):
raise WebSocketError('Invalid close code {0}'.format(code))
self.close(code, payload) |
def on_tblFunctions1_itemSelectionChanged(self):
"""Choose selected hazard x exposure combination.
.. note:: This is an automatic Qt slot
executed when the category selection changes.
"""
# Clear the selection on the 2nd matrix
self.parent.step_fc_functions2.tblFunctions2.clearContents()
self.parent.step_fc_functions2.lblAvailableFunctions2.clear()
self.parent.pbnNext.setEnabled(True)
# Put a dot to the selected cell - note there is no way
# to center an icon without using a custom ItemDelegate
selection = self.tblFunctions1.selectedItems()
selItem = (len(selection) == 1) and selection[0] or None
for row in range(self.tblFunctions1.rowCount()):
for column in range(self.tblFunctions1.columnCount()):
item = self.tblFunctions1.item(row, column)
item.setText((item == selItem) and '•' or '') | Choose selected hazard x exposure combination.
.. note:: This is an automatic Qt slot
executed when the category selection changes. | Below is the the instruction that describes the task:
### Input:
Choose selected hazard x exposure combination.
.. note:: This is an automatic Qt slot
executed when the category selection changes.
### Response:
def on_tblFunctions1_itemSelectionChanged(self):
"""Choose selected hazard x exposure combination.
.. note:: This is an automatic Qt slot
executed when the category selection changes.
"""
# Clear the selection on the 2nd matrix
self.parent.step_fc_functions2.tblFunctions2.clearContents()
self.parent.step_fc_functions2.lblAvailableFunctions2.clear()
self.parent.pbnNext.setEnabled(True)
# Put a dot to the selected cell - note there is no way
# to center an icon without using a custom ItemDelegate
selection = self.tblFunctions1.selectedItems()
selItem = (len(selection) == 1) and selection[0] or None
for row in range(self.tblFunctions1.rowCount()):
for column in range(self.tblFunctions1.columnCount()):
item = self.tblFunctions1.item(row, column)
item.setText((item == selItem) and '•' or '') |
def setRows(self, rows):
""" Sets the number of rows in the raster (if columns have not been initialized, set to 1 as well) """
self._raster[0] = rows
if self._raster[1] == 0:
self._raster[1] = 1 | Sets the number of rows in the raster (if columns have not been initialized, set to 1 as well) | Below is the the instruction that describes the task:
### Input:
Sets the number of rows in the raster (if columns have not been initialized, set to 1 as well)
### Response:
def setRows(self, rows):
""" Sets the number of rows in the raster (if columns have not been initialized, set to 1 as well) """
self._raster[0] = rows
if self._raster[1] == 0:
self._raster[1] = 1 |
def get_network(ipv4addr=None, network=None, return_fields=None, **api_opts):
'''
Get list of all networks. This is helpful when looking up subnets to use
with func:nextavailableip
This call is offen slow and not cached!
some return_fields
comment,network,network_view,ddns_domainname,disable,enable_ddns
CLI Example:
.. code-block:: bash
salt-call infoblox.get_network
'''
infoblox = _get_infoblox(**api_opts)
return infoblox.get_network(ipv4addr=ipv4addr, network=network, return_fields=return_fields) | Get list of all networks. This is helpful when looking up subnets to use
with func:nextavailableip
This call is offen slow and not cached!
some return_fields
comment,network,network_view,ddns_domainname,disable,enable_ddns
CLI Example:
.. code-block:: bash
salt-call infoblox.get_network | Below is the the instruction that describes the task:
### Input:
Get list of all networks. This is helpful when looking up subnets to use
with func:nextavailableip
This call is offen slow and not cached!
some return_fields
comment,network,network_view,ddns_domainname,disable,enable_ddns
CLI Example:
.. code-block:: bash
salt-call infoblox.get_network
### Response:
def get_network(ipv4addr=None, network=None, return_fields=None, **api_opts):
'''
Get list of all networks. This is helpful when looking up subnets to use
with func:nextavailableip
This call is offen slow and not cached!
some return_fields
comment,network,network_view,ddns_domainname,disable,enable_ddns
CLI Example:
.. code-block:: bash
salt-call infoblox.get_network
'''
infoblox = _get_infoblox(**api_opts)
return infoblox.get_network(ipv4addr=ipv4addr, network=network, return_fields=return_fields) |
def provideObjectsToLearn(self, learningConfig, plot=False):
"""
Returns the objects in a canonical format to be sent to an experiment.
The input, learningConfig, should have the following format. It is a
mapping from object to a list of features to sample locations from, and
the number of points to sample from each feature. Note that these objects
should be first added with .addObjects().
These features can be either hard-coded with their key or accessed
with .getFeatures.
An other possibility is to directly specify locations. The machine will
use the object to find the corresponding feature (an empty feature will
be sent if the location is not on the object's surface).
learningConfig = {
# hard-coded keys and number of points
"cube": [("face", 5), ("edge", 5), ("vertex", 3)],
# programmatically-accessed keys and number of points
"cylinder": [(feature, 5) for feature in cylinder.getFeatures()],
# specific locations
"sphere": [(10, 5, 3), (12, 45, 32), (12, 5, 46)],
}
The returned format is a a dictionary where the keys are object names, and
values are lists of sensations, each sensation being a mapping from
cortical column index to a pair of SDR's (one location and one feature).
Parameters:
----------------------------
@param learningConfig (dict)
Configuration for learning, as described above.
"""
objects = {}
for objectName, locationList in learningConfig.iteritems():
sensationList = []
physicalObject = self.objects[objectName]
if plot:
fig, ax = physicalObject.plot()
for element in locationList:
# location name and number of points
if len(element) == 2:
featureName, numLocations = element
for _ in xrange(numLocations):
location = physicalObject.sampleLocationFromFeature(featureName)
sensationList.append(
self._getSDRPairs(
[(location,
physicalObject.getFeatureID(location))] * self.numColumns
)
)
if plot:
x, y, z = tuple(location)
ax.scatter(x, y, z, marker="v", s=100, c="r")
# explicit location
elif len(element) == 3:
location = list(element)
sensationList.append(
self._getSDRPairs(
[(location,
physicalObject.getFeatureID(location))] * self.numColumns
)
)
if plot:
x, y, z = tuple(location)
ax.scatter(x, y, z, marker="v", s=100, c="r")
else:
raise ValueError("Unsupported type for location spec")
objects[objectName] = sensationList
if plot:
plt.title("Learning points for object {}".format(objectName))
plt.savefig("learn_{}.png".format(objectName))
plt.close()
self._checkObjectsToLearn(objects)
return objects | Returns the objects in a canonical format to be sent to an experiment.
The input, learningConfig, should have the following format. It is a
mapping from object to a list of features to sample locations from, and
the number of points to sample from each feature. Note that these objects
should be first added with .addObjects().
These features can be either hard-coded with their key or accessed
with .getFeatures.
An other possibility is to directly specify locations. The machine will
use the object to find the corresponding feature (an empty feature will
be sent if the location is not on the object's surface).
learningConfig = {
# hard-coded keys and number of points
"cube": [("face", 5), ("edge", 5), ("vertex", 3)],
# programmatically-accessed keys and number of points
"cylinder": [(feature, 5) for feature in cylinder.getFeatures()],
# specific locations
"sphere": [(10, 5, 3), (12, 45, 32), (12, 5, 46)],
}
The returned format is a a dictionary where the keys are object names, and
values are lists of sensations, each sensation being a mapping from
cortical column index to a pair of SDR's (one location and one feature).
Parameters:
----------------------------
@param learningConfig (dict)
Configuration for learning, as described above. | Below is the the instruction that describes the task:
### Input:
Returns the objects in a canonical format to be sent to an experiment.
The input, learningConfig, should have the following format. It is a
mapping from object to a list of features to sample locations from, and
the number of points to sample from each feature. Note that these objects
should be first added with .addObjects().
These features can be either hard-coded with their key or accessed
with .getFeatures.
An other possibility is to directly specify locations. The machine will
use the object to find the corresponding feature (an empty feature will
be sent if the location is not on the object's surface).
learningConfig = {
# hard-coded keys and number of points
"cube": [("face", 5), ("edge", 5), ("vertex", 3)],
# programmatically-accessed keys and number of points
"cylinder": [(feature, 5) for feature in cylinder.getFeatures()],
# specific locations
"sphere": [(10, 5, 3), (12, 45, 32), (12, 5, 46)],
}
The returned format is a a dictionary where the keys are object names, and
values are lists of sensations, each sensation being a mapping from
cortical column index to a pair of SDR's (one location and one feature).
Parameters:
----------------------------
@param learningConfig (dict)
Configuration for learning, as described above.
### Response:
def provideObjectsToLearn(self, learningConfig, plot=False):
"""
Returns the objects in a canonical format to be sent to an experiment.
The input, learningConfig, should have the following format. It is a
mapping from object to a list of features to sample locations from, and
the number of points to sample from each feature. Note that these objects
should be first added with .addObjects().
These features can be either hard-coded with their key or accessed
with .getFeatures.
An other possibility is to directly specify locations. The machine will
use the object to find the corresponding feature (an empty feature will
be sent if the location is not on the object's surface).
learningConfig = {
# hard-coded keys and number of points
"cube": [("face", 5), ("edge", 5), ("vertex", 3)],
# programmatically-accessed keys and number of points
"cylinder": [(feature, 5) for feature in cylinder.getFeatures()],
# specific locations
"sphere": [(10, 5, 3), (12, 45, 32), (12, 5, 46)],
}
The returned format is a a dictionary where the keys are object names, and
values are lists of sensations, each sensation being a mapping from
cortical column index to a pair of SDR's (one location and one feature).
Parameters:
----------------------------
@param learningConfig (dict)
Configuration for learning, as described above.
"""
objects = {}
for objectName, locationList in learningConfig.iteritems():
sensationList = []
physicalObject = self.objects[objectName]
if plot:
fig, ax = physicalObject.plot()
for element in locationList:
# location name and number of points
if len(element) == 2:
featureName, numLocations = element
for _ in xrange(numLocations):
location = physicalObject.sampleLocationFromFeature(featureName)
sensationList.append(
self._getSDRPairs(
[(location,
physicalObject.getFeatureID(location))] * self.numColumns
)
)
if plot:
x, y, z = tuple(location)
ax.scatter(x, y, z, marker="v", s=100, c="r")
# explicit location
elif len(element) == 3:
location = list(element)
sensationList.append(
self._getSDRPairs(
[(location,
physicalObject.getFeatureID(location))] * self.numColumns
)
)
if plot:
x, y, z = tuple(location)
ax.scatter(x, y, z, marker="v", s=100, c="r")
else:
raise ValueError("Unsupported type for location spec")
objects[objectName] = sensationList
if plot:
plt.title("Learning points for object {}".format(objectName))
plt.savefig("learn_{}.png".format(objectName))
plt.close()
self._checkObjectsToLearn(objects)
return objects |
def parametrize_peaks(self, intervals, max_peakwidth=50, min_peakwidth=25, symmetric_bounds=True):
"""
Computes and stores the intonation profile of an audio recording.
:param intervals: these will be the reference set of intervals to which peak positions
correspond to. For each interval, the properties of corresponding peak, if exists,
will be computed and stored as intonation profile.
:param max_peakwidth: the maximum allowed width of the peak at the base for computing
parameters of the distribution.
:param min_peakwidth: the minimum allowed width of the peak at the base for computing
parameters of the distribution.
"""
assert isinstance(self.pitch_obj.pitch, np.ndarray)
valid_pitch = self.pitch_obj.pitch
valid_pitch = [i for i in valid_pitch if i > -10000]
valid_pitch = np.array(valid_pitch)
parameters = {}
for i in xrange(len(self.histogram.peaks["peaks"][0])):
peak_pos = self.histogram.peaks["peaks"][0][i]
#Set left and right bounds of the distribution.
max_leftbound = peak_pos - max_peakwidth
max_rightbound = peak_pos + max_peakwidth
leftbound = max_leftbound
rightbound = max_rightbound
nearest_valleyindex = utils.find_nearest_index(self.histogram.peaks["valleys"][0], peak_pos)
if peak_pos > self.histogram.peaks["valleys"][0][nearest_valleyindex]:
leftbound = self.histogram.peaks["valleys"][0][nearest_valleyindex]
if len(self.histogram.peaks["valleys"][0][nearest_valleyindex + 1:]) == 0:
rightbound = peak_pos + max_peakwidth
else:
offset = nearest_valleyindex + 1
nearest_valleyindex = utils.find_nearest_index(
self.histogram.peaks["valleys"][0][offset:], peak_pos)
rightbound = self.histogram.peaks["valleys"][0][offset + nearest_valleyindex]
else:
rightbound = self.histogram.peaks["valleys"][0][nearest_valleyindex]
if len(self.histogram.peaks["valleys"][0][:nearest_valleyindex]) == 0:
leftbound = peak_pos - max_peakwidth
else:
nearest_valleyindex = utils.find_nearest_index(
self.histogram.peaks["valleys"][0][:nearest_valleyindex], peak_pos)
leftbound = self.histogram.peaks["valleys"][0][nearest_valleyindex]
#In terms of x-axis, leftbound should be at least min_peakwidth
# less than peak_pos, and at max max_peakwidth less than peak_pos,
# and viceversa for the rightbound.
if leftbound < max_leftbound:
leftbound = max_leftbound
elif leftbound > peak_pos - min_peakwidth:
leftbound = peak_pos - min_peakwidth
if rightbound > max_rightbound:
rightbound = max_rightbound
elif rightbound < peak_pos + min_peakwidth:
rightbound = peak_pos + min_peakwidth
#If symmetric bounds are asked for, then make the bounds symmetric
if symmetric_bounds:
if peak_pos - leftbound < rightbound - peak_pos:
imbalance = (rightbound - peak_pos) - (peak_pos - leftbound)
rightbound -= imbalance
else:
imbalance = (peak_pos - leftbound) - (rightbound - peak_pos)
leftbound += imbalance
#extract the distribution and estimate the parameters
distribution = valid_pitch[valid_pitch >= leftbound]
distribution = distribution[distribution <= rightbound]
#print peak_pos, "\t", len(distribution), "\t", leftbound, "\t", rightbound
interval_index = utils.find_nearest_index(intervals, peak_pos)
interval = intervals[interval_index]
_mean = float(np.mean(distribution))
_variance = float(variation(distribution))
_skew = float(skew(distribution))
_kurtosis = float(kurtosis(distribution))
pearson_skew = float(3.0 * (_mean - peak_pos) / np.sqrt(abs(_variance)))
parameters[interval] = {"position": float(peak_pos),
"mean": _mean,
"amplitude": float(self.histogram.peaks["peaks"][1][i]),
"variance": _variance,
"skew1": _skew,
"skew2": pearson_skew,
"kurtosis": _kurtosis}
self.intonation_profile = parameters | Computes and stores the intonation profile of an audio recording.
:param intervals: these will be the reference set of intervals to which peak positions
correspond to. For each interval, the properties of corresponding peak, if exists,
will be computed and stored as intonation profile.
:param max_peakwidth: the maximum allowed width of the peak at the base for computing
parameters of the distribution.
:param min_peakwidth: the minimum allowed width of the peak at the base for computing
parameters of the distribution. | Below is the the instruction that describes the task:
### Input:
Computes and stores the intonation profile of an audio recording.
:param intervals: these will be the reference set of intervals to which peak positions
correspond to. For each interval, the properties of corresponding peak, if exists,
will be computed and stored as intonation profile.
:param max_peakwidth: the maximum allowed width of the peak at the base for computing
parameters of the distribution.
:param min_peakwidth: the minimum allowed width of the peak at the base for computing
parameters of the distribution.
### Response:
def parametrize_peaks(self, intervals, max_peakwidth=50, min_peakwidth=25, symmetric_bounds=True):
"""
Computes and stores the intonation profile of an audio recording.
:param intervals: these will be the reference set of intervals to which peak positions
correspond to. For each interval, the properties of corresponding peak, if exists,
will be computed and stored as intonation profile.
:param max_peakwidth: the maximum allowed width of the peak at the base for computing
parameters of the distribution.
:param min_peakwidth: the minimum allowed width of the peak at the base for computing
parameters of the distribution.
"""
assert isinstance(self.pitch_obj.pitch, np.ndarray)
valid_pitch = self.pitch_obj.pitch
valid_pitch = [i for i in valid_pitch if i > -10000]
valid_pitch = np.array(valid_pitch)
parameters = {}
for i in xrange(len(self.histogram.peaks["peaks"][0])):
peak_pos = self.histogram.peaks["peaks"][0][i]
#Set left and right bounds of the distribution.
max_leftbound = peak_pos - max_peakwidth
max_rightbound = peak_pos + max_peakwidth
leftbound = max_leftbound
rightbound = max_rightbound
nearest_valleyindex = utils.find_nearest_index(self.histogram.peaks["valleys"][0], peak_pos)
if peak_pos > self.histogram.peaks["valleys"][0][nearest_valleyindex]:
leftbound = self.histogram.peaks["valleys"][0][nearest_valleyindex]
if len(self.histogram.peaks["valleys"][0][nearest_valleyindex + 1:]) == 0:
rightbound = peak_pos + max_peakwidth
else:
offset = nearest_valleyindex + 1
nearest_valleyindex = utils.find_nearest_index(
self.histogram.peaks["valleys"][0][offset:], peak_pos)
rightbound = self.histogram.peaks["valleys"][0][offset + nearest_valleyindex]
else:
rightbound = self.histogram.peaks["valleys"][0][nearest_valleyindex]
if len(self.histogram.peaks["valleys"][0][:nearest_valleyindex]) == 0:
leftbound = peak_pos - max_peakwidth
else:
nearest_valleyindex = utils.find_nearest_index(
self.histogram.peaks["valleys"][0][:nearest_valleyindex], peak_pos)
leftbound = self.histogram.peaks["valleys"][0][nearest_valleyindex]
#In terms of x-axis, leftbound should be at least min_peakwidth
# less than peak_pos, and at max max_peakwidth less than peak_pos,
# and viceversa for the rightbound.
if leftbound < max_leftbound:
leftbound = max_leftbound
elif leftbound > peak_pos - min_peakwidth:
leftbound = peak_pos - min_peakwidth
if rightbound > max_rightbound:
rightbound = max_rightbound
elif rightbound < peak_pos + min_peakwidth:
rightbound = peak_pos + min_peakwidth
#If symmetric bounds are asked for, then make the bounds symmetric
if symmetric_bounds:
if peak_pos - leftbound < rightbound - peak_pos:
imbalance = (rightbound - peak_pos) - (peak_pos - leftbound)
rightbound -= imbalance
else:
imbalance = (peak_pos - leftbound) - (rightbound - peak_pos)
leftbound += imbalance
#extract the distribution and estimate the parameters
distribution = valid_pitch[valid_pitch >= leftbound]
distribution = distribution[distribution <= rightbound]
#print peak_pos, "\t", len(distribution), "\t", leftbound, "\t", rightbound
interval_index = utils.find_nearest_index(intervals, peak_pos)
interval = intervals[interval_index]
_mean = float(np.mean(distribution))
_variance = float(variation(distribution))
_skew = float(skew(distribution))
_kurtosis = float(kurtosis(distribution))
pearson_skew = float(3.0 * (_mean - peak_pos) / np.sqrt(abs(_variance)))
parameters[interval] = {"position": float(peak_pos),
"mean": _mean,
"amplitude": float(self.histogram.peaks["peaks"][1][i]),
"variance": _variance,
"skew1": _skew,
"skew2": pearson_skew,
"kurtosis": _kurtosis}
self.intonation_profile = parameters |
def main(log_files):
""" Main executor of the trimmomatic_report template.
Parameters
----------
log_files : list
List of paths to the trimmomatic log files.
"""
log_storage = OrderedDict()
for log in log_files:
log_id = log.rstrip("_trimlog.txt")
# Populate storage of current sample
log_storage[log_id] = parse_log(log)
# Remove temporary trim log file
os.remove(log)
write_report(log_storage, "trimmomatic_report.csv", log_id) | Main executor of the trimmomatic_report template.
Parameters
----------
log_files : list
List of paths to the trimmomatic log files. | Below is the the instruction that describes the task:
### Input:
Main executor of the trimmomatic_report template.
Parameters
----------
log_files : list
List of paths to the trimmomatic log files.
### Response:
def main(log_files):
""" Main executor of the trimmomatic_report template.
Parameters
----------
log_files : list
List of paths to the trimmomatic log files.
"""
log_storage = OrderedDict()
for log in log_files:
log_id = log.rstrip("_trimlog.txt")
# Populate storage of current sample
log_storage[log_id] = parse_log(log)
# Remove temporary trim log file
os.remove(log)
write_report(log_storage, "trimmomatic_report.csv", log_id) |
def main():
"""
NAME
umich_magic.py
DESCRIPTION
converts UMICH .mag format files to magic_measurements format files
SYNTAX
umich_magic.py [command line options]
OPTIONS
-h: prints the help message and quits.
-usr USER: identify user, default is ""
-f FILE: specify .mag format input file, required
-fsa SAMPFILE : specify er_samples.txt file relating samples, site and locations names,default is none
-F FILE: specify output file, default is magic_measurements.txt
-spc NUM : specify number of characters to designate a specimen, default = 0
-loc LOCNAME : specify location/study name, must have either LOCNAME or SAMPFILE or be a synthetic
-ncn NCON: specify naming convention: default is #1 below
-A: don't average replicate measurements
Sample naming convention:
[1] XXXXY: where XXXX is an arbitrary length site designation and Y
is the single character sample designation. e.g., TG001a is the
first sample from site TG001. [default]
[2] XXXX-YY: YY sample from site XXXX (XXX, YY of arbitary length)
[3] XXXX.YY: YY sample from site XXXX (XXX, YY of arbitary length)
[4-Z] XXXX[YYY]: YYY is sample designation with Z characters from site XXX
[5] site name same as sample
[6] site is entered under a separate column -- NOT CURRENTLY SUPPORTED
[7-Z] [XXXX]YYY: XXXX is site designation with Z characters with sample name XXXXYYYY
NB: all others you will have to customize your self
or e-mail [email protected] for help.
Format of UMICH .mag files:
Spec Treat CSD Intensity Declination Inclination metadata string
Spec: specimen name
Treat: treatment step
XXX T in Centigrade
XXX AF in mT
Intensity assumed to be total moment in 10^3 Am^2 (emu)
Declination: Declination in specimen coordinate system
Inclination: Declination in specimen coordinate system
metatdata string: mm/dd/yy;hh:mm;[dC,mT];xx.xx;UNITS;USER;INST;NMEAS
hh in 24 hours.
dC or mT units of treatment XXX (see Treat above) for thermal or AF respectively
xx.xxx DC field
UNITS of DC field (microT, mT)
INST: instrument code, number of axes, number of positions (e.g., G34 is 2G, three axes,
measured in four positions)
NMEAS: number of measurements in a single position (1,3,200...)
"""
# initialize some stuff
dir_path='.'
infile_type="mag"
noave=0
methcode,inst="",""
phi,theta,peakfield,labfield=0,0,0,0
pTRM,MD,samp_con,Z=0,0,'1',1
missing=1
demag="N"
er_location_name=""
citation='This study'
args=sys.argv
methcode="LP-NO"
samp_file,ErSamps='',[]
specnum=0
#
# get command line arguments
#
meas_file="magic_measurements.txt"
user=""
if '-WD' in args:
ind=args.index("-WD")
dir_path=args[ind+1]
if "-h" in args:
print(main.__doc__)
sys.exit()
if "-usr" in args:
ind=args.index("-usr")
user=args[ind+1]
if '-F' in args:
ind=args.index("-F")
meas_file=dir_path+'/'+args[ind+1]
if '-f' in args:
ind=args.index("-f")
magfile=dir_path+'/'+args[ind+1]
try:
input=open(magfile,'r')
except:
print("bad mag file name")
sys.exit()
else:
print("mag_file field is required option")
print(main.__doc__)
sys.exit()
if "-spc" in args:
ind=args.index("-spc")
specnum=int(args[ind+1])
if specnum!=0:specnum=-specnum
if "-loc" in args:
ind=args.index("-loc")
er_location_name=args[ind+1]
if "-fsa" in args:
ind=args.index("-fsa")
samp_file=dir_path+'/'+args[ind+1]
Samps,file_type=pmag.magic_read(samp_file)
if "-A" in args: noave=1
if "-ncn" in args:
ind=args.index("-ncn")
samp_con=sys.argv[ind+1]
if "4" in samp_con:
if "-" not in samp_con:
print("option [4] must be in form 4-Z where Z is an integer")
sys.exit()
else:
Z=samp_con.split("-")[1]
samp_con="4"
samp_con=sys.argv[ind+1]
if "7" in samp_con:
if "-" not in samp_con:
print("option [7] must be in form 7-Z where Z is an integer")
sys.exit()
else:
Z=samp_con.split("-")[1]
samp_con="7"
MagRecs,specs=[],[]
version_num=pmag.get_version()
if infile_type=="mag":
for line in input.readlines():
instcode=""
if len(line)>2:
MagRec={}
MagRec['er_location_name']=er_location_name
MagRec['magic_software_packages']=version_num
MagRec["treatment_temp"]='%8.3e' % (273) # room temp in kelvin
MagRec["measurement_temp"]='%8.3e' % (273) # room temp in kelvin
MagRec["treatment_ac_field"]='0'
MagRec["treatment_dc_field"]='0'
MagRec["treatment_dc_field_phi"]='0'
MagRec["treatment_dc_field_theta"]='0'
meas_type="LT-NO"
rec=line.split()
labfield=0
code1=rec[6].split(';')
date=code1[0].split('/') # break date into mon/day/year
yy=int(date[2])
if yy <90:
yyyy=str(2000+yy)
else: yyyy=str(1900+yy)
mm=int(date[0])
if mm<10:
mm="0"+str(mm)
else: mm=str(mm)
dd=int(date[1])
if dd<10:
dd="0"+str(dd)
else: dd=str(dd)
time=code1[1].split(':')
hh=int(time[0])
if hh<10:
hh="0"+str(hh)
else: hh=str(hh)
min=int(time[1])
if min<10:
min= "0"+str(min)
else: min=str(min)
MagRec["measurement_date"]=yyyy+":"+mm+":"+dd+":"+hh+":"+min+":00.00"
MagRec["measurement_time_zone"]=''
instcode=''
if len(code1)>1:
MagRec["measurement_positions"]=code1[6][2]
else:
MagRec["measurement_positions"]=code1[7] # takes care of awkward format with bubba and flo being different
if user=="":user=code1[5]
if code1[2][-1]=='C': demag="T"
if code1[2]=='mT': demag="AF"
treat=rec[1].split('.')
if len(treat)==1:treat.append('0')
if demag=='T' and treat!=0:
meas_type="LT-T-Z"
MagRec["treatment_temp"]='%8.3e' % (float(treat[0])+273.) # temp in kelvin
if demag=="AF":
meas_type="LT-AF-Z"
MagRec["treatment_ac_field"]='%8.3e' % (float(treat[0])*1e-3) # Af field in T
MagRec["treatment_dc_field"]='0'
MagRec["er_specimen_name"]=rec[0]
if rec[0] not in specs:specs.append(rec[0]) # get a list of specimen names
experiment=rec[0]+":"
MagRec["er_site_name"]=""
if specnum!=0:
MagRec["er_sample_name"]=rec[0][:specnum]
else:
MagRec["er_sample_name"]=rec[0]
if "-fsa" in args:
for samp in Samps:
if samp["er_sample_name"] == MagRec["er_sample_name"]:
MagRec["er_location_name"]=samp["er_location_name"]
MagRec["er_site_name"]=samp["er_site_name"]
break
elif int(samp_con)!=6:
site=pmag.parse_site(MagRec['er_sample_name'],samp_con,Z)
MagRec["er_site_name"]=site
if MagRec['er_site_name']=="":
print('No site name found for: ',MagRec['er_specimen_name'],MagRec['er_sample_name'])
if MagRec["er_location_name"]=="":
print('no location name for: ',MagRec["er_specimen_name"])
if rec[1]==".00":rec[1]="0.00"
MagRec["measurement_csd"]=rec[2]
MagRec["measurement_magn_moment"]='%10.3e'% (float(rec[3])*1e-3) # moment in Am^2 (from emu)
MagRec["measurement_dec"]=rec[4]
MagRec["measurement_inc"]=rec[5]
MagRec["magic_instrument_codes"]=instcode
MagRec["er_analyst_mail_names"]=user
MagRec["er_citation_names"]=citation
MagRec["magic_method_codes"]=meas_type
MagRec["measurement_flag"]='g'
MagRec["er_specimen_name"]=rec[0]
MagRec["measurement_number"]='1'
MagRecs.append(MagRec)
MagOuts=[]
for spec in specs: # gather all demag types for this specimen
SpecRecs,meths,measnum=[],[],1
for rec in MagRecs:
if rec['er_specimen_name']==spec:
rec['measurement_number']=str(measnum)
measnum+=1
if rec['magic_method_codes'] not in meths:meths.append(rec['magic_method_codes'])
SpecRecs.append(rec)
expname=spec
if "LT-AF-Z" in meths:expname=expname+ ':LP-DIR-AF'
if "LT-T-Z" in meths:expname=expname+ ':LP-DIR-T'
for rec in SpecRecs:
rec['magic_experiment_name']=expname
MagOuts.append(rec)
pmag.magic_write(meas_file,MagOuts,'magic_measurements')
print("results put in ",meas_file) | NAME
umich_magic.py
DESCRIPTION
converts UMICH .mag format files to magic_measurements format files
SYNTAX
umich_magic.py [command line options]
OPTIONS
-h: prints the help message and quits.
-usr USER: identify user, default is ""
-f FILE: specify .mag format input file, required
-fsa SAMPFILE : specify er_samples.txt file relating samples, site and locations names,default is none
-F FILE: specify output file, default is magic_measurements.txt
-spc NUM : specify number of characters to designate a specimen, default = 0
-loc LOCNAME : specify location/study name, must have either LOCNAME or SAMPFILE or be a synthetic
-ncn NCON: specify naming convention: default is #1 below
-A: don't average replicate measurements
Sample naming convention:
[1] XXXXY: where XXXX is an arbitrary length site designation and Y
is the single character sample designation. e.g., TG001a is the
first sample from site TG001. [default]
[2] XXXX-YY: YY sample from site XXXX (XXX, YY of arbitary length)
[3] XXXX.YY: YY sample from site XXXX (XXX, YY of arbitary length)
[4-Z] XXXX[YYY]: YYY is sample designation with Z characters from site XXX
[5] site name same as sample
[6] site is entered under a separate column -- NOT CURRENTLY SUPPORTED
[7-Z] [XXXX]YYY: XXXX is site designation with Z characters with sample name XXXXYYYY
NB: all others you will have to customize your self
or e-mail [email protected] for help.
Format of UMICH .mag files:
Spec Treat CSD Intensity Declination Inclination metadata string
Spec: specimen name
Treat: treatment step
XXX T in Centigrade
XXX AF in mT
Intensity assumed to be total moment in 10^3 Am^2 (emu)
Declination: Declination in specimen coordinate system
Inclination: Declination in specimen coordinate system
metatdata string: mm/dd/yy;hh:mm;[dC,mT];xx.xx;UNITS;USER;INST;NMEAS
hh in 24 hours.
dC or mT units of treatment XXX (see Treat above) for thermal or AF respectively
xx.xxx DC field
UNITS of DC field (microT, mT)
INST: instrument code, number of axes, number of positions (e.g., G34 is 2G, three axes,
measured in four positions)
NMEAS: number of measurements in a single position (1,3,200...) | Below is the the instruction that describes the task:
### Input:
NAME
umich_magic.py
DESCRIPTION
converts UMICH .mag format files to magic_measurements format files
SYNTAX
umich_magic.py [command line options]
OPTIONS
-h: prints the help message and quits.
-usr USER: identify user, default is ""
-f FILE: specify .mag format input file, required
-fsa SAMPFILE : specify er_samples.txt file relating samples, site and locations names,default is none
-F FILE: specify output file, default is magic_measurements.txt
-spc NUM : specify number of characters to designate a specimen, default = 0
-loc LOCNAME : specify location/study name, must have either LOCNAME or SAMPFILE or be a synthetic
-ncn NCON: specify naming convention: default is #1 below
-A: don't average replicate measurements
Sample naming convention:
[1] XXXXY: where XXXX is an arbitrary length site designation and Y
is the single character sample designation. e.g., TG001a is the
first sample from site TG001. [default]
[2] XXXX-YY: YY sample from site XXXX (XXX, YY of arbitary length)
[3] XXXX.YY: YY sample from site XXXX (XXX, YY of arbitary length)
[4-Z] XXXX[YYY]: YYY is sample designation with Z characters from site XXX
[5] site name same as sample
[6] site is entered under a separate column -- NOT CURRENTLY SUPPORTED
[7-Z] [XXXX]YYY: XXXX is site designation with Z characters with sample name XXXXYYYY
NB: all others you will have to customize your self
or e-mail [email protected] for help.
Format of UMICH .mag files:
Spec Treat CSD Intensity Declination Inclination metadata string
Spec: specimen name
Treat: treatment step
XXX T in Centigrade
XXX AF in mT
Intensity assumed to be total moment in 10^3 Am^2 (emu)
Declination: Declination in specimen coordinate system
Inclination: Declination in specimen coordinate system
metatdata string: mm/dd/yy;hh:mm;[dC,mT];xx.xx;UNITS;USER;INST;NMEAS
hh in 24 hours.
dC or mT units of treatment XXX (see Treat above) for thermal or AF respectively
xx.xxx DC field
UNITS of DC field (microT, mT)
INST: instrument code, number of axes, number of positions (e.g., G34 is 2G, three axes,
measured in four positions)
NMEAS: number of measurements in a single position (1,3,200...)
### Response:
def main():
"""
NAME
umich_magic.py
DESCRIPTION
converts UMICH .mag format files to magic_measurements format files
SYNTAX
umich_magic.py [command line options]
OPTIONS
-h: prints the help message and quits.
-usr USER: identify user, default is ""
-f FILE: specify .mag format input file, required
-fsa SAMPFILE : specify er_samples.txt file relating samples, site and locations names,default is none
-F FILE: specify output file, default is magic_measurements.txt
-spc NUM : specify number of characters to designate a specimen, default = 0
-loc LOCNAME : specify location/study name, must have either LOCNAME or SAMPFILE or be a synthetic
-ncn NCON: specify naming convention: default is #1 below
-A: don't average replicate measurements
Sample naming convention:
[1] XXXXY: where XXXX is an arbitrary length site designation and Y
is the single character sample designation. e.g., TG001a is the
first sample from site TG001. [default]
[2] XXXX-YY: YY sample from site XXXX (XXX, YY of arbitary length)
[3] XXXX.YY: YY sample from site XXXX (XXX, YY of arbitary length)
[4-Z] XXXX[YYY]: YYY is sample designation with Z characters from site XXX
[5] site name same as sample
[6] site is entered under a separate column -- NOT CURRENTLY SUPPORTED
[7-Z] [XXXX]YYY: XXXX is site designation with Z characters with sample name XXXXYYYY
NB: all others you will have to customize your self
or e-mail [email protected] for help.
Format of UMICH .mag files:
Spec Treat CSD Intensity Declination Inclination metadata string
Spec: specimen name
Treat: treatment step
XXX T in Centigrade
XXX AF in mT
Intensity assumed to be total moment in 10^3 Am^2 (emu)
Declination: Declination in specimen coordinate system
Inclination: Declination in specimen coordinate system
metatdata string: mm/dd/yy;hh:mm;[dC,mT];xx.xx;UNITS;USER;INST;NMEAS
hh in 24 hours.
dC or mT units of treatment XXX (see Treat above) for thermal or AF respectively
xx.xxx DC field
UNITS of DC field (microT, mT)
INST: instrument code, number of axes, number of positions (e.g., G34 is 2G, three axes,
measured in four positions)
NMEAS: number of measurements in a single position (1,3,200...)
"""
# initialize some stuff
dir_path='.'
infile_type="mag"
noave=0
methcode,inst="",""
phi,theta,peakfield,labfield=0,0,0,0
pTRM,MD,samp_con,Z=0,0,'1',1
missing=1
demag="N"
er_location_name=""
citation='This study'
args=sys.argv
methcode="LP-NO"
samp_file,ErSamps='',[]
specnum=0
#
# get command line arguments
#
meas_file="magic_measurements.txt"
user=""
if '-WD' in args:
ind=args.index("-WD")
dir_path=args[ind+1]
if "-h" in args:
print(main.__doc__)
sys.exit()
if "-usr" in args:
ind=args.index("-usr")
user=args[ind+1]
if '-F' in args:
ind=args.index("-F")
meas_file=dir_path+'/'+args[ind+1]
if '-f' in args:
ind=args.index("-f")
magfile=dir_path+'/'+args[ind+1]
try:
input=open(magfile,'r')
except:
print("bad mag file name")
sys.exit()
else:
print("mag_file field is required option")
print(main.__doc__)
sys.exit()
if "-spc" in args:
ind=args.index("-spc")
specnum=int(args[ind+1])
if specnum!=0:specnum=-specnum
if "-loc" in args:
ind=args.index("-loc")
er_location_name=args[ind+1]
if "-fsa" in args:
ind=args.index("-fsa")
samp_file=dir_path+'/'+args[ind+1]
Samps,file_type=pmag.magic_read(samp_file)
if "-A" in args: noave=1
if "-ncn" in args:
ind=args.index("-ncn")
samp_con=sys.argv[ind+1]
if "4" in samp_con:
if "-" not in samp_con:
print("option [4] must be in form 4-Z where Z is an integer")
sys.exit()
else:
Z=samp_con.split("-")[1]
samp_con="4"
samp_con=sys.argv[ind+1]
if "7" in samp_con:
if "-" not in samp_con:
print("option [7] must be in form 7-Z where Z is an integer")
sys.exit()
else:
Z=samp_con.split("-")[1]
samp_con="7"
MagRecs,specs=[],[]
version_num=pmag.get_version()
if infile_type=="mag":
for line in input.readlines():
instcode=""
if len(line)>2:
MagRec={}
MagRec['er_location_name']=er_location_name
MagRec['magic_software_packages']=version_num
MagRec["treatment_temp"]='%8.3e' % (273) # room temp in kelvin
MagRec["measurement_temp"]='%8.3e' % (273) # room temp in kelvin
MagRec["treatment_ac_field"]='0'
MagRec["treatment_dc_field"]='0'
MagRec["treatment_dc_field_phi"]='0'
MagRec["treatment_dc_field_theta"]='0'
meas_type="LT-NO"
rec=line.split()
labfield=0
code1=rec[6].split(';')
date=code1[0].split('/') # break date into mon/day/year
yy=int(date[2])
if yy <90:
yyyy=str(2000+yy)
else: yyyy=str(1900+yy)
mm=int(date[0])
if mm<10:
mm="0"+str(mm)
else: mm=str(mm)
dd=int(date[1])
if dd<10:
dd="0"+str(dd)
else: dd=str(dd)
time=code1[1].split(':')
hh=int(time[0])
if hh<10:
hh="0"+str(hh)
else: hh=str(hh)
min=int(time[1])
if min<10:
min= "0"+str(min)
else: min=str(min)
MagRec["measurement_date"]=yyyy+":"+mm+":"+dd+":"+hh+":"+min+":00.00"
MagRec["measurement_time_zone"]=''
instcode=''
if len(code1)>1:
MagRec["measurement_positions"]=code1[6][2]
else:
MagRec["measurement_positions"]=code1[7] # takes care of awkward format with bubba and flo being different
if user=="":user=code1[5]
if code1[2][-1]=='C': demag="T"
if code1[2]=='mT': demag="AF"
treat=rec[1].split('.')
if len(treat)==1:treat.append('0')
if demag=='T' and treat!=0:
meas_type="LT-T-Z"
MagRec["treatment_temp"]='%8.3e' % (float(treat[0])+273.) # temp in kelvin
if demag=="AF":
meas_type="LT-AF-Z"
MagRec["treatment_ac_field"]='%8.3e' % (float(treat[0])*1e-3) # Af field in T
MagRec["treatment_dc_field"]='0'
MagRec["er_specimen_name"]=rec[0]
if rec[0] not in specs:specs.append(rec[0]) # get a list of specimen names
experiment=rec[0]+":"
MagRec["er_site_name"]=""
if specnum!=0:
MagRec["er_sample_name"]=rec[0][:specnum]
else:
MagRec["er_sample_name"]=rec[0]
if "-fsa" in args:
for samp in Samps:
if samp["er_sample_name"] == MagRec["er_sample_name"]:
MagRec["er_location_name"]=samp["er_location_name"]
MagRec["er_site_name"]=samp["er_site_name"]
break
elif int(samp_con)!=6:
site=pmag.parse_site(MagRec['er_sample_name'],samp_con,Z)
MagRec["er_site_name"]=site
if MagRec['er_site_name']=="":
print('No site name found for: ',MagRec['er_specimen_name'],MagRec['er_sample_name'])
if MagRec["er_location_name"]=="":
print('no location name for: ',MagRec["er_specimen_name"])
if rec[1]==".00":rec[1]="0.00"
MagRec["measurement_csd"]=rec[2]
MagRec["measurement_magn_moment"]='%10.3e'% (float(rec[3])*1e-3) # moment in Am^2 (from emu)
MagRec["measurement_dec"]=rec[4]
MagRec["measurement_inc"]=rec[5]
MagRec["magic_instrument_codes"]=instcode
MagRec["er_analyst_mail_names"]=user
MagRec["er_citation_names"]=citation
MagRec["magic_method_codes"]=meas_type
MagRec["measurement_flag"]='g'
MagRec["er_specimen_name"]=rec[0]
MagRec["measurement_number"]='1'
MagRecs.append(MagRec)
MagOuts=[]
for spec in specs: # gather all demag types for this specimen
SpecRecs,meths,measnum=[],[],1
for rec in MagRecs:
if rec['er_specimen_name']==spec:
rec['measurement_number']=str(measnum)
measnum+=1
if rec['magic_method_codes'] not in meths:meths.append(rec['magic_method_codes'])
SpecRecs.append(rec)
expname=spec
if "LT-AF-Z" in meths:expname=expname+ ':LP-DIR-AF'
if "LT-T-Z" in meths:expname=expname+ ':LP-DIR-T'
for rec in SpecRecs:
rec['magic_experiment_name']=expname
MagOuts.append(rec)
pmag.magic_write(meas_file,MagOuts,'magic_measurements')
print("results put in ",meas_file) |
def has_no_title(self, title, **kwargs):
"""
Checks if the page doesn't have the given title.
Args:
title (str | RegexObject): The string that the title should include.
**kwargs: Arbitrary keyword arguments for :class:`TitleQuery`.
Returns:
bool: Whether it doesn't match.
"""
try:
self.assert_no_title(title, **kwargs)
return True
except ExpectationNotMet:
return False | Checks if the page doesn't have the given title.
Args:
title (str | RegexObject): The string that the title should include.
**kwargs: Arbitrary keyword arguments for :class:`TitleQuery`.
Returns:
bool: Whether it doesn't match. | Below is the the instruction that describes the task:
### Input:
Checks if the page doesn't have the given title.
Args:
title (str | RegexObject): The string that the title should include.
**kwargs: Arbitrary keyword arguments for :class:`TitleQuery`.
Returns:
bool: Whether it doesn't match.
### Response:
def has_no_title(self, title, **kwargs):
"""
Checks if the page doesn't have the given title.
Args:
title (str | RegexObject): The string that the title should include.
**kwargs: Arbitrary keyword arguments for :class:`TitleQuery`.
Returns:
bool: Whether it doesn't match.
"""
try:
self.assert_no_title(title, **kwargs)
return True
except ExpectationNotMet:
return False |
def __normalize_progress(self):
"""
Adjust the funding progress filter to be a factor of 10
"""
progress = self['funding_progress']
if progress % 10 != 0:
progress = round(float(progress) / 10)
progress = int(progress) * 10
self['funding_progress'] = progress | Adjust the funding progress filter to be a factor of 10 | Below is the the instruction that describes the task:
### Input:
Adjust the funding progress filter to be a factor of 10
### Response:
def __normalize_progress(self):
"""
Adjust the funding progress filter to be a factor of 10
"""
progress = self['funding_progress']
if progress % 10 != 0:
progress = round(float(progress) / 10)
progress = int(progress) * 10
self['funding_progress'] = progress |
def process_incoming_tuples(self):
"""Should be called when tuple was buffered into in_stream
This method is equivalent to ``addBoltTasks()`` but
is designed for event-driven single-thread bolt.
"""
# back-pressure
if self.output_helper.is_out_queue_available():
self._read_tuples_and_execute()
self.output_helper.send_out_tuples()
else:
# update outqueue full count
self.bolt_metrics.update_out_queue_full_count() | Should be called when tuple was buffered into in_stream
This method is equivalent to ``addBoltTasks()`` but
is designed for event-driven single-thread bolt. | Below is the the instruction that describes the task:
### Input:
Should be called when tuple was buffered into in_stream
This method is equivalent to ``addBoltTasks()`` but
is designed for event-driven single-thread bolt.
### Response:
def process_incoming_tuples(self):
"""Should be called when tuple was buffered into in_stream
This method is equivalent to ``addBoltTasks()`` but
is designed for event-driven single-thread bolt.
"""
# back-pressure
if self.output_helper.is_out_queue_available():
self._read_tuples_and_execute()
self.output_helper.send_out_tuples()
else:
# update outqueue full count
self.bolt_metrics.update_out_queue_full_count() |
def share(self, name, item):
'''
Share an object via the telepath protocol.
Args:
name (str): Name of the shared object
item (object): The object to share over telepath.
'''
try:
if isinstance(item, s_telepath.Aware):
item.onTeleShare(self, name)
self.shared[name] = item
except Exception:
logger.exception(f'onTeleShare() error for: {name}') | Share an object via the telepath protocol.
Args:
name (str): Name of the shared object
item (object): The object to share over telepath. | Below is the the instruction that describes the task:
### Input:
Share an object via the telepath protocol.
Args:
name (str): Name of the shared object
item (object): The object to share over telepath.
### Response:
def share(self, name, item):
'''
Share an object via the telepath protocol.
Args:
name (str): Name of the shared object
item (object): The object to share over telepath.
'''
try:
if isinstance(item, s_telepath.Aware):
item.onTeleShare(self, name)
self.shared[name] = item
except Exception:
logger.exception(f'onTeleShare() error for: {name}') |
def scencd(sc, sclkch, MXPART=None):
"""
Encode character representation of spacecraft clock time into a
double precision number.
http://naif.jpl.nasa.gov/pub/naif/toolkit_docs/C/cspice/scencd_c.html
:param sc: NAIF spacecraft identification code.
:type sc: int
:param sclkch: Character representation of a spacecraft clock.
:type sclkch: str
:param MXPART: Maximum number of spacecraft clock partitions.
:type MXPART: int
:return: Encoded representation of the clock count.
:rtype: float
"""
sc = ctypes.c_int(sc)
sclkch = stypes.stringToCharP(sclkch)
sclkdp = ctypes.c_double()
libspice.scencd_c(sc, sclkch, ctypes.byref(sclkdp))
return sclkdp.value | Encode character representation of spacecraft clock time into a
double precision number.
http://naif.jpl.nasa.gov/pub/naif/toolkit_docs/C/cspice/scencd_c.html
:param sc: NAIF spacecraft identification code.
:type sc: int
:param sclkch: Character representation of a spacecraft clock.
:type sclkch: str
:param MXPART: Maximum number of spacecraft clock partitions.
:type MXPART: int
:return: Encoded representation of the clock count.
:rtype: float | Below is the the instruction that describes the task:
### Input:
Encode character representation of spacecraft clock time into a
double precision number.
http://naif.jpl.nasa.gov/pub/naif/toolkit_docs/C/cspice/scencd_c.html
:param sc: NAIF spacecraft identification code.
:type sc: int
:param sclkch: Character representation of a spacecraft clock.
:type sclkch: str
:param MXPART: Maximum number of spacecraft clock partitions.
:type MXPART: int
:return: Encoded representation of the clock count.
:rtype: float
### Response:
def scencd(sc, sclkch, MXPART=None):
"""
Encode character representation of spacecraft clock time into a
double precision number.
http://naif.jpl.nasa.gov/pub/naif/toolkit_docs/C/cspice/scencd_c.html
:param sc: NAIF spacecraft identification code.
:type sc: int
:param sclkch: Character representation of a spacecraft clock.
:type sclkch: str
:param MXPART: Maximum number of spacecraft clock partitions.
:type MXPART: int
:return: Encoded representation of the clock count.
:rtype: float
"""
sc = ctypes.c_int(sc)
sclkch = stypes.stringToCharP(sclkch)
sclkdp = ctypes.c_double()
libspice.scencd_c(sc, sclkch, ctypes.byref(sclkdp))
return sclkdp.value |
def is_iq_handler(type_, payload_cls, coro, *, with_send_reply=False):
"""
Return true if `coro` has been decorated with :func:`iq_handler` for the
given `type_` and `payload_cls` and the specified keyword arguments.
"""
try:
handlers = get_magic_attr(coro)
except AttributeError:
return False
hs = HandlerSpec(
(_apply_iq_handler, (type_, payload_cls)),
)
try:
return handlers[hs] == dict(with_send_reply=with_send_reply)
except KeyError:
return False | Return true if `coro` has been decorated with :func:`iq_handler` for the
given `type_` and `payload_cls` and the specified keyword arguments. | Below is the the instruction that describes the task:
### Input:
Return true if `coro` has been decorated with :func:`iq_handler` for the
given `type_` and `payload_cls` and the specified keyword arguments.
### Response:
def is_iq_handler(type_, payload_cls, coro, *, with_send_reply=False):
"""
Return true if `coro` has been decorated with :func:`iq_handler` for the
given `type_` and `payload_cls` and the specified keyword arguments.
"""
try:
handlers = get_magic_attr(coro)
except AttributeError:
return False
hs = HandlerSpec(
(_apply_iq_handler, (type_, payload_cls)),
)
try:
return handlers[hs] == dict(with_send_reply=with_send_reply)
except KeyError:
return False |
def _add_chart_graphicFrame(self, rId, x, y, cx, cy):
"""Return new `p:graphicFrame` element appended to this shape tree.
The `p:graphicFrame` element has the specified position and size and
refers to the chart part identified by *rId*.
"""
shape_id = self._next_shape_id
name = 'Chart %d' % (shape_id-1)
graphicFrame = CT_GraphicalObjectFrame.new_chart_graphicFrame(
shape_id, name, rId, x, y, cx, cy
)
self._spTree.append(graphicFrame)
return graphicFrame | Return new `p:graphicFrame` element appended to this shape tree.
The `p:graphicFrame` element has the specified position and size and
refers to the chart part identified by *rId*. | Below is the the instruction that describes the task:
### Input:
Return new `p:graphicFrame` element appended to this shape tree.
The `p:graphicFrame` element has the specified position and size and
refers to the chart part identified by *rId*.
### Response:
def _add_chart_graphicFrame(self, rId, x, y, cx, cy):
"""Return new `p:graphicFrame` element appended to this shape tree.
The `p:graphicFrame` element has the specified position and size and
refers to the chart part identified by *rId*.
"""
shape_id = self._next_shape_id
name = 'Chart %d' % (shape_id-1)
graphicFrame = CT_GraphicalObjectFrame.new_chart_graphicFrame(
shape_id, name, rId, x, y, cx, cy
)
self._spTree.append(graphicFrame)
return graphicFrame |
def get_curie(self, uri):
'''Get a CURIE from a URI '''
prefix = self.get_curie_prefix(uri)
if prefix is not None:
key = self.curie_map[prefix]
return '%s:%s' % (prefix, uri[len(key):len(uri)])
return None | Get a CURIE from a URI | Below is the the instruction that describes the task:
### Input:
Get a CURIE from a URI
### Response:
def get_curie(self, uri):
'''Get a CURIE from a URI '''
prefix = self.get_curie_prefix(uri)
if prefix is not None:
key = self.curie_map[prefix]
return '%s:%s' % (prefix, uri[len(key):len(uri)])
return None |
def notify(self, event_id):
"""Let the FlowControl system know that there is an event."""
self._event_buffer.extend([event_id])
self._event_count += 1
if self._event_count >= self.threshold:
logger.debug("Eventcount >= threshold")
self.make_callback(kind="event") | Let the FlowControl system know that there is an event. | Below is the the instruction that describes the task:
### Input:
Let the FlowControl system know that there is an event.
### Response:
def notify(self, event_id):
"""Let the FlowControl system know that there is an event."""
self._event_buffer.extend([event_id])
self._event_count += 1
if self._event_count >= self.threshold:
logger.debug("Eventcount >= threshold")
self.make_callback(kind="event") |
def obfn_dfd(self):
r"""Compute data fidelity term :math:`(1/2) \| W \left( \sum_m
\mathbf{d}_m * \mathbf{x}_m - \mathbf{s} \right) \|_2^2`.
"""
Ef = sl.inner(self.Zf, self.obfn_fvarf(), axis=self.cri.axisM) \
- self.Sf
return (np.linalg.norm(self.W * sl.irfftn(Ef, self.cri.Nv,
self.cri.axisN))**2) / 2.0 | r"""Compute data fidelity term :math:`(1/2) \| W \left( \sum_m
\mathbf{d}_m * \mathbf{x}_m - \mathbf{s} \right) \|_2^2`. | Below is the the instruction that describes the task:
### Input:
r"""Compute data fidelity term :math:`(1/2) \| W \left( \sum_m
\mathbf{d}_m * \mathbf{x}_m - \mathbf{s} \right) \|_2^2`.
### Response:
def obfn_dfd(self):
r"""Compute data fidelity term :math:`(1/2) \| W \left( \sum_m
\mathbf{d}_m * \mathbf{x}_m - \mathbf{s} \right) \|_2^2`.
"""
Ef = sl.inner(self.Zf, self.obfn_fvarf(), axis=self.cri.axisM) \
- self.Sf
return (np.linalg.norm(self.W * sl.irfftn(Ef, self.cri.Nv,
self.cri.axisN))**2) / 2.0 |
def is_almost_full(self):
"""Return whether the activity is almost full (>90%)."""
capacity = self.get_true_capacity()
if capacity != -1:
num_signed_up = self.eighthsignup_set.count()
return num_signed_up >= (0.9 * capacity)
return False | Return whether the activity is almost full (>90%). | Below is the the instruction that describes the task:
### Input:
Return whether the activity is almost full (>90%).
### Response:
def is_almost_full(self):
"""Return whether the activity is almost full (>90%)."""
capacity = self.get_true_capacity()
if capacity != -1:
num_signed_up = self.eighthsignup_set.count()
return num_signed_up >= (0.9 * capacity)
return False |
def lemmatize(self, tokens):
"""define list of lemmas"""
entries = self.entries
forms_and_lemmas = self.forms_and_lemmas
lemma_list = [x[0] for x in entries]
"""Provide a lemma for each token"""
lemmatized = []
for token in tokens:
"""check for a match between token and list of lemmas"""
if token in lemma_list:
lemmed = (token, token)
lemmatized.append(lemmed)
else:
"""if no match check for a match between token and list of lemma forms"""
lemma = [k for k, v in forms_and_lemmas.items() if token in v]
if lemma != []:
lemmed = (token, lemma)
lemmatized.append(lemmed)
elif lemma == []:
"""if no match apply regular expressions and check for a match against the list of lemmas again"""
regexed = regex(token)
if regexed in lemma_list:
lemmed = (token, regexed)
lemmatized.append(lemmed)
else:
lemmed = (token, "None")
lemmatized.append(lemmed)
return lemmatized | define list of lemmas | Below is the the instruction that describes the task:
### Input:
define list of lemmas
### Response:
def lemmatize(self, tokens):
"""define list of lemmas"""
entries = self.entries
forms_and_lemmas = self.forms_and_lemmas
lemma_list = [x[0] for x in entries]
"""Provide a lemma for each token"""
lemmatized = []
for token in tokens:
"""check for a match between token and list of lemmas"""
if token in lemma_list:
lemmed = (token, token)
lemmatized.append(lemmed)
else:
"""if no match check for a match between token and list of lemma forms"""
lemma = [k for k, v in forms_and_lemmas.items() if token in v]
if lemma != []:
lemmed = (token, lemma)
lemmatized.append(lemmed)
elif lemma == []:
"""if no match apply regular expressions and check for a match against the list of lemmas again"""
regexed = regex(token)
if regexed in lemma_list:
lemmed = (token, regexed)
lemmatized.append(lemmed)
else:
lemmed = (token, "None")
lemmatized.append(lemmed)
return lemmatized |
def update_resource(self, resource, filename, change=None):
"""Update resource from uri to filename on local system.
Update means three things:
1. GET resources
2. set mtime in local time to be equal to timestamp in UTC (should perhaps
or at least warn if different from LastModified from the GET response instead
but maybe warn if different (or just earlier than) the lastmod we expected
from the resource list
3. check that resource matches expected information
Also update self.last_timestamp if the timestamp (in source frame) of this
resource is later and the current value.
Returns the number of resources updated/created (0 or 1)
"""
path = os.path.dirname(filename)
distutils.dir_util.mkpath(path)
num_updated = 0
if (self.dryrun):
self.logger.info(
"dryrun: would GET %s --> %s" %
(resource.uri, filename))
else:
# 1. GET
for try_i in range(1, self.tries + 1):
try:
r = requests.get(resource.uri, timeout=self.timeout, stream=True)
# Fail on 4xx or 5xx
r.raise_for_status()
with open(filename, 'wb') as fd:
for chunk in r.iter_content(chunk_size=1024):
fd.write(chunk)
num_updated += 1
break
except requests.Timeout as e:
if try_i < self.tries:
msg = 'Download timed out, retrying...'
self.logger.info(msg)
# Continue loop
else:
# No more tries left, so fail
msg = "Failed to GET %s after %s tries -- %s" % (resource.uri, self.tries, str(e))
if (self.ignore_failures):
self.logger.warning(msg)
return(num_updated)
else:
raise ClientFatalError(msg)
except (requests.RequestException, IOError) as e:
msg = "Failed to GET %s -- %s" % (resource.uri, str(e))
if (self.ignore_failures):
self.logger.warning(msg)
return(num_updated)
else:
raise ClientFatalError(msg)
# 2. set timestamp if we have one
if (resource.timestamp is not None):
unixtime = int(resource.timestamp) # no fractional
os.utime(filename, (unixtime, unixtime))
if (resource.timestamp > self.last_timestamp):
self.last_timestamp = resource.timestamp
self.log_event(Resource(resource=resource, change=change))
# 3. sanity check
length = os.stat(filename).st_size
if (resource.length is not None and resource.length != length):
self.logger.info(
"Downloaded size for %s of %d bytes does not match expected %d bytes" %
(resource.uri, length, resource.length))
if (len(self.hashes) > 0):
self.check_hashes(filename, resource)
return(num_updated) | Update resource from uri to filename on local system.
Update means three things:
1. GET resources
2. set mtime in local time to be equal to timestamp in UTC (should perhaps
or at least warn if different from LastModified from the GET response instead
but maybe warn if different (or just earlier than) the lastmod we expected
from the resource list
3. check that resource matches expected information
Also update self.last_timestamp if the timestamp (in source frame) of this
resource is later and the current value.
Returns the number of resources updated/created (0 or 1) | Below is the the instruction that describes the task:
### Input:
Update resource from uri to filename on local system.
Update means three things:
1. GET resources
2. set mtime in local time to be equal to timestamp in UTC (should perhaps
or at least warn if different from LastModified from the GET response instead
but maybe warn if different (or just earlier than) the lastmod we expected
from the resource list
3. check that resource matches expected information
Also update self.last_timestamp if the timestamp (in source frame) of this
resource is later and the current value.
Returns the number of resources updated/created (0 or 1)
### Response:
def update_resource(self, resource, filename, change=None):
"""Update resource from uri to filename on local system.
Update means three things:
1. GET resources
2. set mtime in local time to be equal to timestamp in UTC (should perhaps
or at least warn if different from LastModified from the GET response instead
but maybe warn if different (or just earlier than) the lastmod we expected
from the resource list
3. check that resource matches expected information
Also update self.last_timestamp if the timestamp (in source frame) of this
resource is later and the current value.
Returns the number of resources updated/created (0 or 1)
"""
path = os.path.dirname(filename)
distutils.dir_util.mkpath(path)
num_updated = 0
if (self.dryrun):
self.logger.info(
"dryrun: would GET %s --> %s" %
(resource.uri, filename))
else:
# 1. GET
for try_i in range(1, self.tries + 1):
try:
r = requests.get(resource.uri, timeout=self.timeout, stream=True)
# Fail on 4xx or 5xx
r.raise_for_status()
with open(filename, 'wb') as fd:
for chunk in r.iter_content(chunk_size=1024):
fd.write(chunk)
num_updated += 1
break
except requests.Timeout as e:
if try_i < self.tries:
msg = 'Download timed out, retrying...'
self.logger.info(msg)
# Continue loop
else:
# No more tries left, so fail
msg = "Failed to GET %s after %s tries -- %s" % (resource.uri, self.tries, str(e))
if (self.ignore_failures):
self.logger.warning(msg)
return(num_updated)
else:
raise ClientFatalError(msg)
except (requests.RequestException, IOError) as e:
msg = "Failed to GET %s -- %s" % (resource.uri, str(e))
if (self.ignore_failures):
self.logger.warning(msg)
return(num_updated)
else:
raise ClientFatalError(msg)
# 2. set timestamp if we have one
if (resource.timestamp is not None):
unixtime = int(resource.timestamp) # no fractional
os.utime(filename, (unixtime, unixtime))
if (resource.timestamp > self.last_timestamp):
self.last_timestamp = resource.timestamp
self.log_event(Resource(resource=resource, change=change))
# 3. sanity check
length = os.stat(filename).st_size
if (resource.length is not None and resource.length != length):
self.logger.info(
"Downloaded size for %s of %d bytes does not match expected %d bytes" %
(resource.uri, length, resource.length))
if (len(self.hashes) > 0):
self.check_hashes(filename, resource)
return(num_updated) |
def bbox(self):
bbox = self._df[['id', 'x1', 'y1', 'x2', 'y2']].copy()
# TODO: Fix this to become x, y, w, h
if self.bbox_with_size:
bbox['y2'] -= bbox['y1']
bbox['x2'] -= bbox['x1']
"""Converts a dataframe to a list of arrays
:param df:
:param length:
:return:
"""
return to_array_list(bbox) | Converts a dataframe to a list of arrays
:param df:
:param length:
:return: | Below is the the instruction that describes the task:
### Input:
Converts a dataframe to a list of arrays
:param df:
:param length:
:return:
### Response:
def bbox(self):
bbox = self._df[['id', 'x1', 'y1', 'x2', 'y2']].copy()
# TODO: Fix this to become x, y, w, h
if self.bbox_with_size:
bbox['y2'] -= bbox['y1']
bbox['x2'] -= bbox['x1']
"""Converts a dataframe to a list of arrays
:param df:
:param length:
:return:
"""
return to_array_list(bbox) |
def available_repositories(self, **kwargs):
"""Lists available repositories for the repository set
:param synchronous: What should happen if the server returns an HTTP
202 (accepted) status code? Wait for the task to complete if
``True``. Immediately return the server's response otherwise.
:param kwargs: Arguments to pass to requests.
:returns: The server's response, with all JSON decoded.
:raises: ``requests.exceptions.HTTPError`` If the server responds with
an HTTP 4XX or 5XX message.
"""
if 'data' not in kwargs:
kwargs['data'] = dict()
kwargs['data']['product_id'] = self.product.id
kwargs = kwargs.copy() # shadow the passed-in kwargs
kwargs.update(self._server_config.get_client_kwargs())
response = client.get(self.path('available_repositories'), **kwargs)
return _handle_response(response, self._server_config) | Lists available repositories for the repository set
:param synchronous: What should happen if the server returns an HTTP
202 (accepted) status code? Wait for the task to complete if
``True``. Immediately return the server's response otherwise.
:param kwargs: Arguments to pass to requests.
:returns: The server's response, with all JSON decoded.
:raises: ``requests.exceptions.HTTPError`` If the server responds with
an HTTP 4XX or 5XX message. | Below is the the instruction that describes the task:
### Input:
Lists available repositories for the repository set
:param synchronous: What should happen if the server returns an HTTP
202 (accepted) status code? Wait for the task to complete if
``True``. Immediately return the server's response otherwise.
:param kwargs: Arguments to pass to requests.
:returns: The server's response, with all JSON decoded.
:raises: ``requests.exceptions.HTTPError`` If the server responds with
an HTTP 4XX or 5XX message.
### Response:
def available_repositories(self, **kwargs):
"""Lists available repositories for the repository set
:param synchronous: What should happen if the server returns an HTTP
202 (accepted) status code? Wait for the task to complete if
``True``. Immediately return the server's response otherwise.
:param kwargs: Arguments to pass to requests.
:returns: The server's response, with all JSON decoded.
:raises: ``requests.exceptions.HTTPError`` If the server responds with
an HTTP 4XX or 5XX message.
"""
if 'data' not in kwargs:
kwargs['data'] = dict()
kwargs['data']['product_id'] = self.product.id
kwargs = kwargs.copy() # shadow the passed-in kwargs
kwargs.update(self._server_config.get_client_kwargs())
response = client.get(self.path('available_repositories'), **kwargs)
return _handle_response(response, self._server_config) |
def mps_device_name():
"""
Returns name of MPS device that will be used, else None.
"""
lib = _load_tcmps_lib()
if lib is None:
return None
n = 256
c_name = (_ctypes.c_char * n)()
ret = lib.TCMPSMetalDeviceName(_ctypes.byref(c_name), _ctypes.c_int32(n))
if ret == 0:
return _decode_bytes_to_native_string(c_name.value)
else:
return None | Returns name of MPS device that will be used, else None. | Below is the the instruction that describes the task:
### Input:
Returns name of MPS device that will be used, else None.
### Response:
def mps_device_name():
"""
Returns name of MPS device that will be used, else None.
"""
lib = _load_tcmps_lib()
if lib is None:
return None
n = 256
c_name = (_ctypes.c_char * n)()
ret = lib.TCMPSMetalDeviceName(_ctypes.byref(c_name), _ctypes.c_int32(n))
if ret == 0:
return _decode_bytes_to_native_string(c_name.value)
else:
return None |
def MI_associators(self,
env,
objectName,
assocClassName,
resultClassName,
role,
resultRole,
propertyList):
# pylint: disable=invalid-name
"""Return instances associated to a given object.
Implements the WBEM operation Associators in terms
of the references method. A derived class will not normally
override this method.
"""
# NOTE: This should honor the parameters resultClassName, role,
# resultRole, and propertyList
logger = env.get_logger()
logger.log_debug('CIMProvider2 MI_associators called. ' \
'assocClass: %s' % (assocClassName))
if not assocClassName:
raise pywbem.CIMError(pywbem.CIM_ERR_FAILED,
"Empty assocClassName passed to Associators")
ch = env.get_cimom_handle()
model = pywbem.CIMInstance(classname=assocClassName)
model.path = pywbem.CIMInstanceName(classname=assocClassName,
namespace=objectName.namespace)
gen = self.references(env=env,
object_name=objectName,
model=model,
result_class_name=resultClassName,
role=role,
result_role=None,
keys_only=False)
if gen is None:
logger.log_debug('references() returned None instead of ' \
'generator object')
return
for inst in gen:
for prop in inst.properties.values():
lpname = prop.name.lower()
if prop.type != 'reference':
continue
if role and role.lower() == lpname:
continue
if resultRole and resultRole.lower() != lpname:
continue
if self.paths_equal(prop.value, objectName):
continue
if resultClassName and \
resultClassName.lower() != prop.value.classname.lower():
continue
try:
if prop.value.namespace is None:
prop.value.namespace = objectName.namespace
inst = ch.GetInstance(prop.value, propertyList)
except pywbem.CIMError as exc:
num, msg = exc.args
if num == pywbem.CIM_ERR_NOT_FOUND:
continue
else:
raise
if inst.path is None:
inst.path = prop.value
yield inst
logger.log_debug('CIMProvider2 MI_associators returning') | Return instances associated to a given object.
Implements the WBEM operation Associators in terms
of the references method. A derived class will not normally
override this method. | Below is the the instruction that describes the task:
### Input:
Return instances associated to a given object.
Implements the WBEM operation Associators in terms
of the references method. A derived class will not normally
override this method.
### Response:
def MI_associators(self,
env,
objectName,
assocClassName,
resultClassName,
role,
resultRole,
propertyList):
# pylint: disable=invalid-name
"""Return instances associated to a given object.
Implements the WBEM operation Associators in terms
of the references method. A derived class will not normally
override this method.
"""
# NOTE: This should honor the parameters resultClassName, role,
# resultRole, and propertyList
logger = env.get_logger()
logger.log_debug('CIMProvider2 MI_associators called. ' \
'assocClass: %s' % (assocClassName))
if not assocClassName:
raise pywbem.CIMError(pywbem.CIM_ERR_FAILED,
"Empty assocClassName passed to Associators")
ch = env.get_cimom_handle()
model = pywbem.CIMInstance(classname=assocClassName)
model.path = pywbem.CIMInstanceName(classname=assocClassName,
namespace=objectName.namespace)
gen = self.references(env=env,
object_name=objectName,
model=model,
result_class_name=resultClassName,
role=role,
result_role=None,
keys_only=False)
if gen is None:
logger.log_debug('references() returned None instead of ' \
'generator object')
return
for inst in gen:
for prop in inst.properties.values():
lpname = prop.name.lower()
if prop.type != 'reference':
continue
if role and role.lower() == lpname:
continue
if resultRole and resultRole.lower() != lpname:
continue
if self.paths_equal(prop.value, objectName):
continue
if resultClassName and \
resultClassName.lower() != prop.value.classname.lower():
continue
try:
if prop.value.namespace is None:
prop.value.namespace = objectName.namespace
inst = ch.GetInstance(prop.value, propertyList)
except pywbem.CIMError as exc:
num, msg = exc.args
if num == pywbem.CIM_ERR_NOT_FOUND:
continue
else:
raise
if inst.path is None:
inst.path = prop.value
yield inst
logger.log_debug('CIMProvider2 MI_associators returning') |
def get_cipher(data):
"""http://git.kernel.org/cgit/linux/kernel/git/jberg/iw.git/tree/scan.c?id=v3.17#n336.
Positional arguments:
data -- bytearray data to read.
Returns:
WiFi stream cipher used by the access point (string).
"""
legend = {0: 'Use group cipher suite', 1: 'WEP-40', 2: 'TKIP', 4: 'CCMP', 5: 'WEP-104', }
key = data[3]
if ieee80211_oui == bytes(data[:3]):
legend.update({6: 'AES-128-CMAC', 8: 'GCMP', })
elif ms_oui != bytes(data[:3]):
key = None
return legend.get(key, '{0:02x}-{1:02x}-{2:02x}:{3}'.format(data[0], data[1], data[2], data[3])) | http://git.kernel.org/cgit/linux/kernel/git/jberg/iw.git/tree/scan.c?id=v3.17#n336.
Positional arguments:
data -- bytearray data to read.
Returns:
WiFi stream cipher used by the access point (string). | Below is the the instruction that describes the task:
### Input:
http://git.kernel.org/cgit/linux/kernel/git/jberg/iw.git/tree/scan.c?id=v3.17#n336.
Positional arguments:
data -- bytearray data to read.
Returns:
WiFi stream cipher used by the access point (string).
### Response:
def get_cipher(data):
"""http://git.kernel.org/cgit/linux/kernel/git/jberg/iw.git/tree/scan.c?id=v3.17#n336.
Positional arguments:
data -- bytearray data to read.
Returns:
WiFi stream cipher used by the access point (string).
"""
legend = {0: 'Use group cipher suite', 1: 'WEP-40', 2: 'TKIP', 4: 'CCMP', 5: 'WEP-104', }
key = data[3]
if ieee80211_oui == bytes(data[:3]):
legend.update({6: 'AES-128-CMAC', 8: 'GCMP', })
elif ms_oui != bytes(data[:3]):
key = None
return legend.get(key, '{0:02x}-{1:02x}-{2:02x}:{3}'.format(data[0], data[1], data[2], data[3])) |
def expand_env_variables(lines_enum):
# type: (ReqFileLines) -> ReqFileLines
"""Replace all environment variables that can be retrieved via `os.getenv`.
The only allowed format for environment variables defined in the
requirement file is `${MY_VARIABLE_1}` to ensure two things:
1. Strings that contain a `$` aren't accidentally (partially) expanded.
2. Ensure consistency across platforms for requirement files.
These points are the result of a discusssion on the `github pull
request #3514 <https://github.com/pypa/pip/pull/3514>`_.
Valid characters in variable names follow the `POSIX standard
<http://pubs.opengroup.org/onlinepubs/9699919799/>`_ and are limited
to uppercase letter, digits and the `_` (underscore).
"""
for line_number, line in lines_enum:
for env_var, var_name in ENV_VAR_RE.findall(line):
value = os.getenv(var_name)
if not value:
continue
line = line.replace(env_var, value)
yield line_number, line | Replace all environment variables that can be retrieved via `os.getenv`.
The only allowed format for environment variables defined in the
requirement file is `${MY_VARIABLE_1}` to ensure two things:
1. Strings that contain a `$` aren't accidentally (partially) expanded.
2. Ensure consistency across platforms for requirement files.
These points are the result of a discusssion on the `github pull
request #3514 <https://github.com/pypa/pip/pull/3514>`_.
Valid characters in variable names follow the `POSIX standard
<http://pubs.opengroup.org/onlinepubs/9699919799/>`_ and are limited
to uppercase letter, digits and the `_` (underscore). | Below is the the instruction that describes the task:
### Input:
Replace all environment variables that can be retrieved via `os.getenv`.
The only allowed format for environment variables defined in the
requirement file is `${MY_VARIABLE_1}` to ensure two things:
1. Strings that contain a `$` aren't accidentally (partially) expanded.
2. Ensure consistency across platforms for requirement files.
These points are the result of a discusssion on the `github pull
request #3514 <https://github.com/pypa/pip/pull/3514>`_.
Valid characters in variable names follow the `POSIX standard
<http://pubs.opengroup.org/onlinepubs/9699919799/>`_ and are limited
to uppercase letter, digits and the `_` (underscore).
### Response:
def expand_env_variables(lines_enum):
# type: (ReqFileLines) -> ReqFileLines
"""Replace all environment variables that can be retrieved via `os.getenv`.
The only allowed format for environment variables defined in the
requirement file is `${MY_VARIABLE_1}` to ensure two things:
1. Strings that contain a `$` aren't accidentally (partially) expanded.
2. Ensure consistency across platforms for requirement files.
These points are the result of a discusssion on the `github pull
request #3514 <https://github.com/pypa/pip/pull/3514>`_.
Valid characters in variable names follow the `POSIX standard
<http://pubs.opengroup.org/onlinepubs/9699919799/>`_ and are limited
to uppercase letter, digits and the `_` (underscore).
"""
for line_number, line in lines_enum:
for env_var, var_name in ENV_VAR_RE.findall(line):
value = os.getenv(var_name)
if not value:
continue
line = line.replace(env_var, value)
yield line_number, line |
def get(cls, whitelist_id, whitelist_result_id,
note_text_whitelist_result_id, monetary_account_id=None,
custom_headers=None):
"""
:type api_context: context.ApiContext
:type user_id: int
:type monetary_account_id: int
:type whitelist_id: int
:type whitelist_result_id: int
:type note_text_whitelist_result_id: int
:type custom_headers: dict[str, str]|None
:rtype: BunqResponseNoteTextWhitelistResult
"""
if custom_headers is None:
custom_headers = {}
api_client = client.ApiClient(cls._get_api_context())
endpoint_url = cls._ENDPOINT_URL_READ.format(cls._determine_user_id(),
cls._determine_monetary_account_id(
monetary_account_id),
whitelist_id,
whitelist_result_id,
note_text_whitelist_result_id)
response_raw = api_client.get(endpoint_url, {}, custom_headers)
return BunqResponseNoteTextWhitelistResult.cast_from_bunq_response(
cls._from_json(response_raw, cls._OBJECT_TYPE_GET)
) | :type api_context: context.ApiContext
:type user_id: int
:type monetary_account_id: int
:type whitelist_id: int
:type whitelist_result_id: int
:type note_text_whitelist_result_id: int
:type custom_headers: dict[str, str]|None
:rtype: BunqResponseNoteTextWhitelistResult | Below is the the instruction that describes the task:
### Input:
:type api_context: context.ApiContext
:type user_id: int
:type monetary_account_id: int
:type whitelist_id: int
:type whitelist_result_id: int
:type note_text_whitelist_result_id: int
:type custom_headers: dict[str, str]|None
:rtype: BunqResponseNoteTextWhitelistResult
### Response:
def get(cls, whitelist_id, whitelist_result_id,
note_text_whitelist_result_id, monetary_account_id=None,
custom_headers=None):
"""
:type api_context: context.ApiContext
:type user_id: int
:type monetary_account_id: int
:type whitelist_id: int
:type whitelist_result_id: int
:type note_text_whitelist_result_id: int
:type custom_headers: dict[str, str]|None
:rtype: BunqResponseNoteTextWhitelistResult
"""
if custom_headers is None:
custom_headers = {}
api_client = client.ApiClient(cls._get_api_context())
endpoint_url = cls._ENDPOINT_URL_READ.format(cls._determine_user_id(),
cls._determine_monetary_account_id(
monetary_account_id),
whitelist_id,
whitelist_result_id,
note_text_whitelist_result_id)
response_raw = api_client.get(endpoint_url, {}, custom_headers)
return BunqResponseNoteTextWhitelistResult.cast_from_bunq_response(
cls._from_json(response_raw, cls._OBJECT_TYPE_GET)
) |
def parse_readme():
"""Parse contents of the README."""
# Get the long description from the relevant file
here = os.path.abspath(os.path.dirname(__file__))
readme_path = os.path.join(here, 'README.md')
with codecs.open(readme_path, encoding='utf-8') as handle:
long_description = handle.read()
return long_description | Parse contents of the README. | Below is the the instruction that describes the task:
### Input:
Parse contents of the README.
### Response:
def parse_readme():
"""Parse contents of the README."""
# Get the long description from the relevant file
here = os.path.abspath(os.path.dirname(__file__))
readme_path = os.path.join(here, 'README.md')
with codecs.open(readme_path, encoding='utf-8') as handle:
long_description = handle.read()
return long_description |
def cached_tree(self, subject_ids=None, visit_ids=None, fill=False):
"""
Access the repository tree and caches it for subsequent
accesses
Parameters
----------
subject_ids : list(str)
List of subject IDs with which to filter the tree with. If
None all are returned
visit_ids : list(str)
List of visit IDs with which to filter the tree with. If
None all are returned
fill : bool
Create empty sessions for any that are missing in the
subject_id x visit_id block. Typically only used if all
the inputs to the study are coming from different repositories
to the one that the derived products are stored in
Returns
-------
tree : arcana.repository.Tree
A hierarchical tree of subject, vist, session information and that
of the filesets and fields they contain
"""
if subject_ids is not None:
subject_ids = frozenset(subject_ids)
if visit_ids is not None:
visit_ids = frozenset(visit_ids)
try:
tree = self._cache[subject_ids][visit_ids]
except KeyError:
if fill:
fill_subjects = subject_ids
fill_visits = visit_ids
else:
fill_subjects = fill_visits = None
tree = self.tree(
subject_ids=subject_ids, visit_ids=visit_ids,
fill_visits=fill_visits, fill_subjects=fill_subjects)
# Save the tree within the cache under the given subject/
# visit ID filters and the IDs that were actually returned
self._cache[subject_ids][visit_ids] = self._cache[
frozenset(tree.subject_ids)][frozenset(tree.visit_ids)] = tree
return tree | Access the repository tree and caches it for subsequent
accesses
Parameters
----------
subject_ids : list(str)
List of subject IDs with which to filter the tree with. If
None all are returned
visit_ids : list(str)
List of visit IDs with which to filter the tree with. If
None all are returned
fill : bool
Create empty sessions for any that are missing in the
subject_id x visit_id block. Typically only used if all
the inputs to the study are coming from different repositories
to the one that the derived products are stored in
Returns
-------
tree : arcana.repository.Tree
A hierarchical tree of subject, vist, session information and that
of the filesets and fields they contain | Below is the the instruction that describes the task:
### Input:
Access the repository tree and caches it for subsequent
accesses
Parameters
----------
subject_ids : list(str)
List of subject IDs with which to filter the tree with. If
None all are returned
visit_ids : list(str)
List of visit IDs with which to filter the tree with. If
None all are returned
fill : bool
Create empty sessions for any that are missing in the
subject_id x visit_id block. Typically only used if all
the inputs to the study are coming from different repositories
to the one that the derived products are stored in
Returns
-------
tree : arcana.repository.Tree
A hierarchical tree of subject, vist, session information and that
of the filesets and fields they contain
### Response:
def cached_tree(self, subject_ids=None, visit_ids=None, fill=False):
"""
Access the repository tree and caches it for subsequent
accesses
Parameters
----------
subject_ids : list(str)
List of subject IDs with which to filter the tree with. If
None all are returned
visit_ids : list(str)
List of visit IDs with which to filter the tree with. If
None all are returned
fill : bool
Create empty sessions for any that are missing in the
subject_id x visit_id block. Typically only used if all
the inputs to the study are coming from different repositories
to the one that the derived products are stored in
Returns
-------
tree : arcana.repository.Tree
A hierarchical tree of subject, vist, session information and that
of the filesets and fields they contain
"""
if subject_ids is not None:
subject_ids = frozenset(subject_ids)
if visit_ids is not None:
visit_ids = frozenset(visit_ids)
try:
tree = self._cache[subject_ids][visit_ids]
except KeyError:
if fill:
fill_subjects = subject_ids
fill_visits = visit_ids
else:
fill_subjects = fill_visits = None
tree = self.tree(
subject_ids=subject_ids, visit_ids=visit_ids,
fill_visits=fill_visits, fill_subjects=fill_subjects)
# Save the tree within the cache under the given subject/
# visit ID filters and the IDs that were actually returned
self._cache[subject_ids][visit_ids] = self._cache[
frozenset(tree.subject_ids)][frozenset(tree.visit_ids)] = tree
return tree |
def latex2png(snippet, outfile):
"""Compiles a LaTeX snippet to png"""
pngimage = os.path.join(IMAGEDIR, outfile + '.png')
texdocument = os.path.join(IMAGEDIR, 'tmp.tex')
with open(texdocument, 'w') as doc:
doc.write(LATEX_DOC % (snippet))
environment = os.environ
environment['shell_escape_commands'] = \
"bibtex,bibtex8,kpsewhich,makeindex,mpost,repstopdf," + \
','.join(
os.path.basename(n) for n in chain.from_iterable(
iglob(os.path.join(chemin, 'gregorio*'))
for chemin in os.environ["PATH"].split(os.pathsep)
)
)
proc = Popen(
["lualatex", '-output-directory=' + IMAGEDIR, texdocument],
stdin=PIPE,
stdout=STDERR,
env=environment
)
proc.communicate()
proc.stdin.close()
call(["pdfcrop", os.path.join(IMAGEDIR, "tmp.pdf")], stdout=STDERR)
call(
[
"gs",
"-sDEVICE=pngalpha",
"-r144",
"-sOutputFile=" + pngimage,
os.path.join(IMAGEDIR, "tmp-crop.pdf"),
],
stdout=STDERR,
) | Compiles a LaTeX snippet to png | Below is the the instruction that describes the task:
### Input:
Compiles a LaTeX snippet to png
### Response:
def latex2png(snippet, outfile):
"""Compiles a LaTeX snippet to png"""
pngimage = os.path.join(IMAGEDIR, outfile + '.png')
texdocument = os.path.join(IMAGEDIR, 'tmp.tex')
with open(texdocument, 'w') as doc:
doc.write(LATEX_DOC % (snippet))
environment = os.environ
environment['shell_escape_commands'] = \
"bibtex,bibtex8,kpsewhich,makeindex,mpost,repstopdf," + \
','.join(
os.path.basename(n) for n in chain.from_iterable(
iglob(os.path.join(chemin, 'gregorio*'))
for chemin in os.environ["PATH"].split(os.pathsep)
)
)
proc = Popen(
["lualatex", '-output-directory=' + IMAGEDIR, texdocument],
stdin=PIPE,
stdout=STDERR,
env=environment
)
proc.communicate()
proc.stdin.close()
call(["pdfcrop", os.path.join(IMAGEDIR, "tmp.pdf")], stdout=STDERR)
call(
[
"gs",
"-sDEVICE=pngalpha",
"-r144",
"-sOutputFile=" + pngimage,
os.path.join(IMAGEDIR, "tmp-crop.pdf"),
],
stdout=STDERR,
) |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.