code
stringlengths 75
104k
| docstring
stringlengths 1
46.9k
|
---|---|
def parse_csr():
"""
Parse certificate signing request for domains
"""
LOGGER.info("Parsing CSR...")
cmd = [
'openssl', 'req',
'-in', os.path.join(gettempdir(), 'domain.csr'),
'-noout',
'-text'
]
devnull = open(os.devnull, 'wb')
out = subprocess.check_output(cmd, stderr=devnull)
domains = set([])
common_name = re.search(r"Subject:.*? CN\s?=\s?([^\s,;/]+)", out.decode('utf8'))
if common_name is not None:
domains.add(common_name.group(1))
subject_alt_names = re.search(r"X509v3 Subject Alternative Name: \n +([^\n]+)\n", out.decode('utf8'), re.MULTILINE | re.DOTALL)
if subject_alt_names is not None:
for san in subject_alt_names.group(1).split(", "):
if san.startswith("DNS:"):
domains.add(san[4:])
return domains | Parse certificate signing request for domains |
def appendRandomLenPadding(str, blocksize=AES_blocksize):
'ISO 10126 Padding (withdrawn, 2007): Pad with random bytes + last byte equal to the number of padding bytes'
pad_len = paddingLength(len(str), blocksize) - 1
from os import urandom
padding = urandom(pad_len)+chr(pad_len)
return str + padding | ISO 10126 Padding (withdrawn, 2007): Pad with random bytes + last byte equal to the number of padding bytes |
def render(self, *args, **kwargs):
'''
fun(uid_with_str)
fun(uid_with_str, slug = val1, glyph = val2)
'''
uid_with_str = args[0]
slug = kwargs.get('slug', False)
with_title = kwargs.get('with_title', False)
glyph = kwargs.get('glyph', '')
kwd = {
'glyph': glyph
}
curinfo = MCategory.get_by_uid(uid_with_str)
sub_cats = MCategory.query_sub_cat(uid_with_str)
if slug:
tmpl = 'modules/info/catalog_slug.html'
else:
tmpl = 'modules/info/catalog_of.html'
return self.render_string(tmpl,
pcatinfo=curinfo,
sub_cats=sub_cats,
recs=sub_cats,
with_title=with_title,
kwd=kwd) | fun(uid_with_str)
fun(uid_with_str, slug = val1, glyph = val2) |
def _win32_is_hardlinked(fpath1, fpath2):
"""
Test if two hard links point to the same location
CommandLine:
python -m ubelt._win32_links _win32_is_hardlinked
Example:
>>> # xdoc: +REQUIRES(WIN32)
>>> import ubelt as ub
>>> root = ub.ensure_app_cache_dir('ubelt', 'win32_hardlink')
>>> ub.delete(root)
>>> ub.ensuredir(root)
>>> fpath1 = join(root, 'fpath1')
>>> fpath2 = join(root, 'fpath2')
>>> ub.touch(fpath1)
>>> ub.touch(fpath2)
>>> fjunc1 = _win32_junction(fpath1, join(root, 'fjunc1'))
>>> fjunc2 = _win32_junction(fpath2, join(root, 'fjunc2'))
>>> assert _win32_is_hardlinked(fjunc1, fpath1)
>>> assert _win32_is_hardlinked(fjunc2, fpath2)
>>> assert not _win32_is_hardlinked(fjunc2, fpath1)
>>> assert not _win32_is_hardlinked(fjunc1, fpath2)
"""
# NOTE: jwf.samefile(fpath1, fpath2) seems to behave differently
def get_read_handle(fpath):
if os.path.isdir(fpath):
dwFlagsAndAttributes = jwfs.api.FILE_FLAG_BACKUP_SEMANTICS
else:
dwFlagsAndAttributes = 0
hFile = jwfs.api.CreateFile(fpath, jwfs.api.GENERIC_READ,
jwfs.api.FILE_SHARE_READ, None,
jwfs.api.OPEN_EXISTING,
dwFlagsAndAttributes, None)
return hFile
def get_unique_id(hFile):
info = jwfs.api.BY_HANDLE_FILE_INFORMATION()
res = jwfs.api.GetFileInformationByHandle(hFile, info)
jwfs.handle_nonzero_success(res)
unique_id = (info.volume_serial_number, info.file_index_high,
info.file_index_low)
return unique_id
hFile1 = get_read_handle(fpath1)
hFile2 = get_read_handle(fpath2)
try:
are_equal = (get_unique_id(hFile1) == get_unique_id(hFile2))
except Exception:
raise
finally:
jwfs.api.CloseHandle(hFile1)
jwfs.api.CloseHandle(hFile2)
return are_equal | Test if two hard links point to the same location
CommandLine:
python -m ubelt._win32_links _win32_is_hardlinked
Example:
>>> # xdoc: +REQUIRES(WIN32)
>>> import ubelt as ub
>>> root = ub.ensure_app_cache_dir('ubelt', 'win32_hardlink')
>>> ub.delete(root)
>>> ub.ensuredir(root)
>>> fpath1 = join(root, 'fpath1')
>>> fpath2 = join(root, 'fpath2')
>>> ub.touch(fpath1)
>>> ub.touch(fpath2)
>>> fjunc1 = _win32_junction(fpath1, join(root, 'fjunc1'))
>>> fjunc2 = _win32_junction(fpath2, join(root, 'fjunc2'))
>>> assert _win32_is_hardlinked(fjunc1, fpath1)
>>> assert _win32_is_hardlinked(fjunc2, fpath2)
>>> assert not _win32_is_hardlinked(fjunc2, fpath1)
>>> assert not _win32_is_hardlinked(fjunc1, fpath2) |
def cover(self):
"""
album cover as :class:`Picture` object
"""
if not self._cover:
self._cover = Picture(self._cover_url, self._connection)
return self._cover | album cover as :class:`Picture` object |
def build(self, recipe=None,
image=None,
isolated=False,
sandbox=False,
writable=False,
build_folder=None,
robot_name=False,
ext='simg',
sudo=True,
stream=False):
'''build a singularity image, optionally for an isolated build
(requires sudo). If you specify to stream, expect the image name
and an iterator to be returned.
image, builder = Client.build(...)
Parameters
==========
recipe: the path to the recipe file (or source to build from). If not
defined, we look for "Singularity" file in $PWD
image: the image to build (if None, will use arbitary name
isolated: if True, run build with --isolated flag
sandbox: if True, create a writable sandbox
writable: if True, use writable ext3 (sandbox takes preference)
build_folder: where the container should be built.
ext: the image extension to use.
robot_name: boolean, default False. if you don't give your image a
name (with "image") then a fun robot name will be generated
instead. Highly recommended :)
sudo: give sudo to the command (or not) default is True for build
'''
from spython.utils import check_install
check_install()
cmd = self._init_command('build')
if 'version 3' in self.version():
ext = 'sif'
# No image provided, default to use the client's loaded image
if recipe is None:
recipe = self._get_uri()
# If it's still None, try default build recipe
if recipe is None:
recipe = 'Singularity'
if not os.path.exists(recipe):
bot.exit('Cannot find %s, exiting.' %image)
if image is None:
if re.search('(docker|shub)://', recipe) and robot_name is False:
image = self._get_filename(recipe, ext)
else:
image = "%s.%s" %(self.RobotNamer.generate(), ext)
# Does the user want a custom build folder?
if build_folder is not None:
if not os.path.exists(build_folder):
bot.exit('%s does not exist!' % build_folder)
image = os.path.join(build_folder, image)
# The user wants to run an isolated build
if isolated is True:
cmd.append('--isolated')
if sandbox is True:
cmd.append('--sandbox')
elif sandbox is True:
cmd.append('--writable')
cmd = cmd + [image, recipe]
if stream is False:
output = self._run_command(cmd, sudo=sudo, capture=False)
else:
# Here we return the expected image, and an iterator!
# The caller must iterate over
return image, stream_command(cmd, sudo=sudo)
if os.path.exists(image):
return image | build a singularity image, optionally for an isolated build
(requires sudo). If you specify to stream, expect the image name
and an iterator to be returned.
image, builder = Client.build(...)
Parameters
==========
recipe: the path to the recipe file (or source to build from). If not
defined, we look for "Singularity" file in $PWD
image: the image to build (if None, will use arbitary name
isolated: if True, run build with --isolated flag
sandbox: if True, create a writable sandbox
writable: if True, use writable ext3 (sandbox takes preference)
build_folder: where the container should be built.
ext: the image extension to use.
robot_name: boolean, default False. if you don't give your image a
name (with "image") then a fun robot name will be generated
instead. Highly recommended :)
sudo: give sudo to the command (or not) default is True for build |
def _expectation(p, mean1, none1, mean2, none2, nghp=None):
"""
Compute the expectation:
expectation[n] = <m1(x_n)^T m2(x_n)>_p(x_n)
- m1(.) :: Linear mean function
- m2(.) :: Identity mean function
:return: NxQxD
"""
with params_as_tensors_for(mean1):
N = tf.shape(p.mu)[0]
e_xxt = p.cov + (p.mu[:, :, None] * p.mu[:, None, :]) # NxDxD
e_A_xxt = tf.matmul(tf.tile(mean1.A[None, ...], (N, 1, 1)), e_xxt, transpose_a=True) # NxQxD
e_b_xt = mean1.b[None, :, None] * p.mu[:, None, :] # NxQxD
return e_A_xxt + e_b_xt | Compute the expectation:
expectation[n] = <m1(x_n)^T m2(x_n)>_p(x_n)
- m1(.) :: Linear mean function
- m2(.) :: Identity mean function
:return: NxQxD |
def _on_send_complete(self, handle, error):
"""Callback used with handle.send()."""
assert handle is self._handle
self._write_buffer_size -= 1
assert self._write_buffer_size >= 0
if self._error:
self._log.debug('ignore sendto status {} after error', error)
# See note in _on_write_complete() about UV_ECANCELED
elif error and error != pyuv.errno.UV_ECANCELED:
self._log.warning('pyuv error {} in sendto callback', error)
self._protocol.error_received(TransportError.from_errno(error))
self._maybe_resume_protocol()
self._maybe_close() | Callback used with handle.send(). |
def set_access_cookies(response, encoded_access_token, max_age=None):
"""
Takes a flask response object, and an encoded access token, and configures
the response to set in the access token in a cookie. If `JWT_CSRF_IN_COOKIES`
is `True` (see :ref:`Configuration Options`), this will also set the CSRF
double submit values in a separate cookie.
:param response: The Flask response object to set the access cookies in.
:param encoded_access_token: The encoded access token to set in the cookies.
:param max_age: The max age of the cookie. If this is None, it will use the
`JWT_SESSION_COOKIE` option (see :ref:`Configuration Options`).
Otherwise, it will use this as the cookies `max-age` and the
JWT_SESSION_COOKIE option will be ignored. Values should be
the number of seconds (as an integer).
"""
if not config.jwt_in_cookies:
raise RuntimeWarning("set_access_cookies() called without "
"'JWT_TOKEN_LOCATION' configured to use cookies")
# Set the access JWT in the cookie
response.set_cookie(config.access_cookie_name,
value=encoded_access_token,
max_age=max_age or config.cookie_max_age,
secure=config.cookie_secure,
httponly=True,
domain=config.cookie_domain,
path=config.access_cookie_path,
samesite=config.cookie_samesite)
# If enabled, set the csrf double submit access cookie
if config.csrf_protect and config.csrf_in_cookies:
response.set_cookie(config.access_csrf_cookie_name,
value=get_csrf_token(encoded_access_token),
max_age=max_age or config.cookie_max_age,
secure=config.cookie_secure,
httponly=False,
domain=config.cookie_domain,
path=config.access_csrf_cookie_path,
samesite=config.cookie_samesite) | Takes a flask response object, and an encoded access token, and configures
the response to set in the access token in a cookie. If `JWT_CSRF_IN_COOKIES`
is `True` (see :ref:`Configuration Options`), this will also set the CSRF
double submit values in a separate cookie.
:param response: The Flask response object to set the access cookies in.
:param encoded_access_token: The encoded access token to set in the cookies.
:param max_age: The max age of the cookie. If this is None, it will use the
`JWT_SESSION_COOKIE` option (see :ref:`Configuration Options`).
Otherwise, it will use this as the cookies `max-age` and the
JWT_SESSION_COOKIE option will be ignored. Values should be
the number of seconds (as an integer). |
def _to_dict(self):
"""Return a json dictionary representing this model."""
_dict = {}
if hasattr(self, 'type') and self.type is not None:
_dict['type'] = self.type
if hasattr(self, 'credential_id') and self.credential_id is not None:
_dict['credential_id'] = self.credential_id
if hasattr(self, 'schedule') and self.schedule is not None:
_dict['schedule'] = self.schedule._to_dict()
if hasattr(self, 'options') and self.options is not None:
_dict['options'] = self.options._to_dict()
return _dict | Return a json dictionary representing this model. |
def process(self, request, response, environ):
"""
Generates a new authorization token.
A form to authorize the access of the application can be displayed with
the help of `oauth2.web.SiteAdapter`.
"""
data = self.authorize(request, response, environ,
self.scope_handler.scopes)
if isinstance(data, Response):
return data
code = self.token_generator.generate()
expires = int(time.time()) + self.token_expiration
auth_code = AuthorizationCode(client_id=self.client.identifier,
code=code, expires_at=expires,
redirect_uri=self.client.redirect_uri,
scopes=self.scope_handler.scopes,
data=data[0], user_id=data[1])
self.auth_code_store.save_code(auth_code)
response.add_header("Location", self._generate_location(code))
response.body = ""
response.status_code = 302
return response | Generates a new authorization token.
A form to authorize the access of the application can be displayed with
the help of `oauth2.web.SiteAdapter`. |
def clone(self):
""" This method clones AttributeMap object.
Returns AttributeMap object that has the same values with the
original one.
"""
cloned_filters = [f.clone() for f in self.filters]
return self.__class__(cloned_filters, self.attr_type, self.attr_value) | This method clones AttributeMap object.
Returns AttributeMap object that has the same values with the
original one. |
def consul_fetch(client, path):
'''
Query consul for all keys/values within base path
'''
# Unless the root path is blank, it needs a trailing slash for
# the kv get from Consul to work as expected
return client.kv.get('' if not path else path.rstrip('/') + '/', recurse=True) | Query consul for all keys/values within base path |
def can_user_update_settings(request, view, obj=None):
""" Only staff can update shared settings, otherwise user has to be an owner of the settings."""
if obj is None:
return
# TODO [TM:3/21/17] clean it up after WAL-634. Clean up service settings update tests as well.
if obj.customer and not obj.shared:
return permissions.is_owner(request, view, obj)
else:
return permissions.is_staff(request, view, obj) | Only staff can update shared settings, otherwise user has to be an owner of the settings. |
def attach(self, to_linode, config=None):
"""
Attaches this Volume to the given Linode
"""
result = self._client.post('{}/attach'.format(Volume.api_endpoint), model=self,
data={
"linode_id": to_linode.id if issubclass(type(to_linode), Base) else to_linode,
"config": None if not config else config.id if issubclass(type(config), Base) else config,
})
if not 'id' in result:
raise UnexpectedResponseError('Unexpected response when attaching volume!', json=result)
self._populate(result)
return True | Attaches this Volume to the given Linode |
def raise_check_result(self):
"""Raise ACTIVE CHECK RESULT entry
Example : "ACTIVE HOST CHECK: server;DOWN;HARD;1;I don't know what to say..."
:return: None
"""
if not self.__class__.log_active_checks:
return
log_level = 'info'
if self.state == 'DOWN':
log_level = 'error'
elif self.state == 'UNREACHABLE':
log_level = 'warning'
brok = make_monitoring_log(
log_level, 'ACTIVE HOST CHECK: %s;%s;%d;%s' % (self.get_name(), self.state,
self.attempt, self.output)
)
self.broks.append(brok) | Raise ACTIVE CHECK RESULT entry
Example : "ACTIVE HOST CHECK: server;DOWN;HARD;1;I don't know what to say..."
:return: None |
def get_mask_from_prob(self, cloud_probs, threshold=None):
"""
Returns cloud mask by applying morphological operations -- convolution and dilation --
to input cloud probabilities.
:param cloud_probs: cloud probability map
:type cloud_probs: numpy array of cloud probabilities (shape n_images x n x m)
:param threshold: A float from [0,1] specifying threshold
:type threshold: float
:return: raster cloud mask
:rtype: numpy array (shape n_images x n x m)
"""
threshold = self.threshold if threshold is None else threshold
if self.average_over:
cloud_masks = np.asarray([convolve(cloud_prob, self.conv_filter) > threshold
for cloud_prob in cloud_probs], dtype=np.int8)
else:
cloud_masks = (cloud_probs > threshold).astype(np.int8)
if self.dilation_size:
cloud_masks = np.asarray([dilation(cloud_mask, self.dilation_filter) for cloud_mask in cloud_masks],
dtype=np.int8)
return cloud_masks | Returns cloud mask by applying morphological operations -- convolution and dilation --
to input cloud probabilities.
:param cloud_probs: cloud probability map
:type cloud_probs: numpy array of cloud probabilities (shape n_images x n x m)
:param threshold: A float from [0,1] specifying threshold
:type threshold: float
:return: raster cloud mask
:rtype: numpy array (shape n_images x n x m) |
def cancel_download_task(self, task_id, expires=None, **kwargs):
"""取消离线下载任务.
:param task_id: 要取消的任务ID号。
:type task_id: str
:param expires: 请求失效时间,如果有,则会校验。
:type expires: int
:return: requests.Response
"""
data = {
'expires': expires,
'task_id': task_id,
}
url = 'http://{0}/rest/2.0/services/cloud_dl'.format(BAIDUPAN_SERVER)
return self._request('services/cloud_dl', 'cancel_task', url=url,
data=data, **kwargs) | 取消离线下载任务.
:param task_id: 要取消的任务ID号。
:type task_id: str
:param expires: 请求失效时间,如果有,则会校验。
:type expires: int
:return: requests.Response |
def place2thing(self, name, location):
"""Turn a Place into a Thing with the given location.
It will keep all its attached Portals.
"""
self.engine._set_thing_loc(
self.name, name, location
)
if (self.name, name) in self.engine._node_objs:
obj = self.engine._node_objs[self.name, name]
thing = Thing(self, name)
for port in obj.portals():
port.origin = thing
for port in obj.preportals():
port.destination = thing
self.engine._node_objs[self.name, name] = thing | Turn a Place into a Thing with the given location.
It will keep all its attached Portals. |
def _show_menu(self):
"""Show the overlay menu."""
# If the current widget in the TabbedWindowWidget has a menu,
# overlay it on the TabbedWindowWidget.
current_widget = self._tabbed_window.get_current_widget()
if hasattr(current_widget, 'get_menu_widget'):
menu_widget = current_widget.get_menu_widget(self._hide_menu)
overlay = urwid.Overlay(menu_widget, self._tabbed_window,
align='center', width=('relative', 80),
valign='middle', height=('relative', 80))
self._urwid_loop.widget = overlay | Show the overlay menu. |
def nvim_io_recover(self, io: NvimIORecover[A]) -> NvimIO[B]:
'''calls `map` to shift the recover execution to flat_map_nvim_io
'''
return eval_step(self.vim)(io.map(lambda a: a)) | calls `map` to shift the recover execution to flat_map_nvim_io |
def _get_adc_value(self, channel, average=None):
'''Read ADC
'''
conf = self.SCAN_OFF | self.SINGLE_ENDED | ((0x1e) & (channel << 1))
self._intf.write(self._base_addr + self.MAX_1239_ADD, array('B', pack('B', conf)))
def read_data():
ret = self._intf.read(self._base_addr + self.MAX_1239_ADD | 1, size=2)
ret.reverse()
ret[1] = ret[1] & 0x0f # 12-bit ADC
return unpack_from('H', ret)[0]
if average:
raw = 0
for _ in range(average):
raw += read_data()
raw /= average
else:
raw = read_data()
return raw | Read ADC |
def furtherArgsProcessing(args):
"""
Converts args, and deals with incongruities that argparse couldn't handle
"""
if isinstance(args, str):
unprocessed = args.strip().split(' ')
if unprocessed[0] == 'cyther':
del unprocessed[0]
args = parser.parse_args(unprocessed).__dict__
elif isinstance(args, argparse.Namespace):
args = args.__dict__
elif isinstance(args, dict):
pass
else:
raise CytherError(
"Args must be a instance of str or argparse.Namespace, not '{}'".format(
str(type(args))))
if args['watch']:
args['timestamp'] = True
args['watch_stats'] = {'counter': 0, 'errors': 0, 'compiles': 0,
'polls': 0}
args['print_args'] = True
return args | Converts args, and deals with incongruities that argparse couldn't handle |
def _mutect_variant_stats(variant, sample_info):
"""Parse a single sample"s variant calling statistics based on Mutect"s (v1) VCF output
Parameters
----------
variant : varcode.Variant
sample_info : dict
Dictionary of Mutect-specific variant calling fields
Returns
-------
VariantStats
"""
# Parse out the AD (or allele depth field), which is an array of [REF_DEPTH, ALT_DEPTH]
ref_depth, alt_depth = sample_info["AD"]
depth = int(ref_depth) + int(alt_depth)
vaf = float(alt_depth) / depth
return VariantStats(depth=depth, alt_depth=alt_depth, variant_allele_frequency=vaf) | Parse a single sample"s variant calling statistics based on Mutect"s (v1) VCF output
Parameters
----------
variant : varcode.Variant
sample_info : dict
Dictionary of Mutect-specific variant calling fields
Returns
-------
VariantStats |
def FromFile(cls, path, actions_dict, resources_dict, file_format="yaml", name=None):
"""Create a RecipeObject from a file.
The file should be a specially constructed yaml file that describes
the recipe as well as the actions that it performs.
Args:
path (str): The path to the recipe file that we wish to load
actions_dict (dict): A dictionary of named RecipeActionObject
types that is used to look up all of the steps listed in
the recipe file.
resources_dict (dict): A dictionary of named RecipeResource types
that is used to look up all of the shared resources listed in
the recipe file.
file_format (str): The file format of the recipe file. Currently
we only support yaml.
name (str): The name of this recipe if we created it originally from an
archive.
"""
format_map = {
"yaml": cls._process_yaml
}
format_handler = format_map.get(file_format)
if format_handler is None:
raise ArgumentError("Unknown file format or file extension", file_format=file_format, \
known_formats=[x for x in format_map if format_map[x] is not None])
recipe_info = format_handler(path)
if name is None:
name, _ext = os.path.splitext(os.path.basename(path))
# Validate that the recipe file is correctly formatted
try:
recipe_info = RecipeSchema.verify(recipe_info)
except ValidationError as exc:
raise RecipeFileInvalid("Recipe file does not match expected schema", file=path, error_message=exc.msg, **exc.params)
description = recipe_info.get('description')
# Parse out global default and shared resource information
try:
resources = cls._parse_resource_declarations(recipe_info.get('resources', []), resources_dict)
defaults = cls._parse_variable_defaults(recipe_info.get("defaults", []))
steps = []
for i, action in enumerate(recipe_info.get('actions', [])):
action_name = action.pop('name')
if action_name is None:
raise RecipeFileInvalid("Action is missing required name parameter", \
parameters=action, path=path)
action_class = actions_dict.get(action_name)
if action_class is None:
raise UnknownRecipeActionType("Unknown step specified in recipe", \
action=action_name, step=i + 1, path=path)
# Parse out any resource usage in this step and make sure we only
# use named resources
step_resources = cls._parse_resource_usage(action, declarations=resources)
fixed_files, _variable_files = cls._parse_file_usage(action_class, action)
step = RecipeStep(action_class, action, step_resources, fixed_files)
steps.append(step)
return RecipeObject(name, description, steps, resources, defaults, path)
except RecipeFileInvalid as exc:
cls._future_raise(RecipeFileInvalid, RecipeFileInvalid(exc.msg, recipe=name, **exc.params),
sys.exc_info()[2]) | Create a RecipeObject from a file.
The file should be a specially constructed yaml file that describes
the recipe as well as the actions that it performs.
Args:
path (str): The path to the recipe file that we wish to load
actions_dict (dict): A dictionary of named RecipeActionObject
types that is used to look up all of the steps listed in
the recipe file.
resources_dict (dict): A dictionary of named RecipeResource types
that is used to look up all of the shared resources listed in
the recipe file.
file_format (str): The file format of the recipe file. Currently
we only support yaml.
name (str): The name of this recipe if we created it originally from an
archive. |
def link_page_filter(self, page, modelview_name):
"""
Arguments are passed like: page_<VIEW_NAME>=<PAGE_NUMBER>
"""
new_args = request.view_args.copy()
args = request.args.copy()
args["page_" + modelview_name] = page
return url_for(
request.endpoint,
**dict(list(new_args.items()) + list(args.to_dict().items()))
) | Arguments are passed like: page_<VIEW_NAME>=<PAGE_NUMBER> |
def format_struct(struct_def):
'''Returns a cython struct from a :attr:`StructSpec` instance.
'''
text = []
text.append('cdef struct {}:'.format(struct_def.tp_name))
text.extend(
['{}{}'.format(tab, format_variable(var))
for var in struct_def.members]
)
for name in struct_def.names:
text.append('ctypedef {} {}'.format(struct_def.tp_name, name))
return text | Returns a cython struct from a :attr:`StructSpec` instance. |
def read_memory(self):
"""
This function read mean value of target`d`
and input vector `x` from history
"""
if self.mem_empty == True:
if self.mem_idx == 0:
m_x = np.zeros(self.n)
m_d = 0
else:
m_x = np.mean(self.mem_x[:self.mem_idx+1], axis=0)
m_d = np.mean(self.mem_d[:self.mem_idx])
else:
m_x = np.mean(self.mem_x, axis=0)
m_d = np.mean(np.delete(self.mem_d, self.mem_idx))
self.mem_idx += 1
if self.mem_idx > len(self.mem_x)-1:
self.mem_idx = 0
self.mem_empty = False
return m_d, m_x | This function read mean value of target`d`
and input vector `x` from history |
def method(self, quote_id, payment_data, store_view=None):
"""
Allows you to set a payment method for a shopping cart (quote).
:param quote_id: Shopping cart ID (quote ID)
:param payment_data, dict of payment details, example
{
'po_number': '',
'method': 'checkmo',
'cc_cid': '',
'cc_owner': '',
'cc_number': '',
'cc_type': '',
'cc_exp_year': '',
'cc_exp_month': ''
}
:param store_view: Store view ID or code
:return: boolean, True on success
"""
return bool(
self.call('cart_payment.method',
[quote_id, payment_data, store_view])
) | Allows you to set a payment method for a shopping cart (quote).
:param quote_id: Shopping cart ID (quote ID)
:param payment_data, dict of payment details, example
{
'po_number': '',
'method': 'checkmo',
'cc_cid': '',
'cc_owner': '',
'cc_number': '',
'cc_type': '',
'cc_exp_year': '',
'cc_exp_month': ''
}
:param store_view: Store view ID or code
:return: boolean, True on success |
def make_library(**kwargs):
"""Build and return a ModelManager object and fill the associated model library
"""
library_yaml = kwargs.pop('library', 'models/library.yaml')
comp_yaml = kwargs.pop('comp', 'config/binning.yaml')
basedir = kwargs.pop('basedir', os.path.abspath('.'))
model_man = kwargs.get('ModelManager', ModelManager(basedir=basedir))
model_comp_dict = model_man.make_library(library_yaml, library_yaml, comp_yaml)
return dict(model_comp_dict=model_comp_dict,
ModelManager=model_man) | Build and return a ModelManager object and fill the associated model library |
def publish_proto_in_ipfs(self):
""" Publish proto files in ipfs and print hash """
ipfs_hash_base58 = utils_ipfs.publish_proto_in_ipfs(self._get_ipfs_client(), self.args.protodir)
self._printout(ipfs_hash_base58) | Publish proto files in ipfs and print hash |
def parse_delete_zone(prs, conn):
"""Delete zone.
Arguments:
prs: parser object of argparse
conn: dictionary of connection information
"""
prs_zone_delete = prs.add_parser('zone_delete', help='delete zone')
prs_zone_delete.add_argument('--domain', action='store', required=True,
help='specify zone')
conn_options(prs_zone_delete, conn)
prs_zone_delete.set_defaults(func=delete_zone) | Delete zone.
Arguments:
prs: parser object of argparse
conn: dictionary of connection information |
def AddMapping(self, filename, new_mapping):
"""Adds an entry to the list of known filenames.
Args:
filename: The filename whose mapping is being added.
new_mapping: A dictionary with the mapping to add. Must contain all
fields in _REQUIRED_MAPPING_FIELDS.
Raises:
DuplicateMapping if the filename already exists in the mapping
InvalidMapping if not all required fields are present
"""
for field in self._REQUIRED_MAPPING_FIELDS:
if field not in new_mapping:
raise problems.InvalidMapping(field)
if filename in self.GetKnownFilenames():
raise problems.DuplicateMapping(filename)
self._file_mapping[filename] = new_mapping | Adds an entry to the list of known filenames.
Args:
filename: The filename whose mapping is being added.
new_mapping: A dictionary with the mapping to add. Must contain all
fields in _REQUIRED_MAPPING_FIELDS.
Raises:
DuplicateMapping if the filename already exists in the mapping
InvalidMapping if not all required fields are present |
def replace_namespaced_pod(self, name, namespace, body, **kwargs): # noqa: E501
"""replace_namespaced_pod # noqa: E501
replace the specified Pod # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.replace_namespaced_pod(name, namespace, body, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str name: name of the Pod (required)
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param V1Pod body: (required)
:param str pretty: If 'true', then the output is pretty printed.
:param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
:return: V1Pod
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.replace_namespaced_pod_with_http_info(name, namespace, body, **kwargs) # noqa: E501
else:
(data) = self.replace_namespaced_pod_with_http_info(name, namespace, body, **kwargs) # noqa: E501
return data | replace_namespaced_pod # noqa: E501
replace the specified Pod # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.replace_namespaced_pod(name, namespace, body, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str name: name of the Pod (required)
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param V1Pod body: (required)
:param str pretty: If 'true', then the output is pretty printed.
:param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
:return: V1Pod
If the method is called asynchronously,
returns the request thread. |
def spin(self, use_thread=False):
'''call callback for all data forever (until \C-c)
:param use_thread: use thread for spin (do not block)
'''
if use_thread:
if self._thread is not None:
raise 'spin called twice'
self._thread = threading.Thread(target=self._spin_internal)
self._thread.setDaemon(True)
self._thread.start()
else:
self._spin_internal() | call callback for all data forever (until \C-c)
:param use_thread: use thread for spin (do not block) |
def send(self):
"""
Send the message.
First, a message is constructed, then a session with the email
servers is created, finally the message is sent and the session
is stopped.
"""
self._generate_email()
if self.verbose:
print(
"Debugging info"
"\n--------------"
"\n{} Message created.".format(timestamp())
)
recipients = []
for i in (self.to, self.cc, self.bcc):
if i:
if isinstance(i, MutableSequence):
recipients += i
else:
recipients.append(i)
session = self._get_session()
if self.verbose:
print(timestamp(), "Login successful.")
session.sendmail(self.from_, recipients, self.message.as_string())
session.quit()
if self.verbose:
print(timestamp(), "Logged out.")
if self.verbose:
print(
timestamp(),
type(self).__name__ + " info:",
self.__str__(indentation="\n * "),
)
print("Message sent.") | Send the message.
First, a message is constructed, then a session with the email
servers is created, finally the message is sent and the session
is stopped. |
def pretty_print(self):
"""
Pretty print representation of this fragment,
as ``(identifier, begin, end, text)``.
:rtype: string
.. versionadded:: 1.7.0
"""
return u"%s\t%.3f\t%.3f\t%s" % (
(self.identifier or u""),
(self.begin if self.begin is not None else TimeValue("-2.000")),
(self.end if self.end is not None else TimeValue("-1.000")),
(self.text or u"")
) | Pretty print representation of this fragment,
as ``(identifier, begin, end, text)``.
:rtype: string
.. versionadded:: 1.7.0 |
def _to_dict(self):
"""Return a json dictionary representing this model."""
_dict = {}
if hasattr(self, 'content') and self.content is not None:
_dict['content'] = self.content
if hasattr(self, 'id') and self.id is not None:
_dict['id'] = self.id
if hasattr(self, 'created') and self.created is not None:
_dict['created'] = self.created
if hasattr(self, 'updated') and self.updated is not None:
_dict['updated'] = self.updated
if hasattr(self, 'contenttype') and self.contenttype is not None:
_dict['contenttype'] = self.contenttype
if hasattr(self, 'language') and self.language is not None:
_dict['language'] = self.language
if hasattr(self, 'parentid') and self.parentid is not None:
_dict['parentid'] = self.parentid
if hasattr(self, 'reply') and self.reply is not None:
_dict['reply'] = self.reply
if hasattr(self, 'forward') and self.forward is not None:
_dict['forward'] = self.forward
return _dict | Return a json dictionary representing this model. |
def depth(self):
"""
Returns the number of ancestors of this directory.
"""
return len(self.path.rstrip(os.sep).split(os.sep)) | Returns the number of ancestors of this directory. |
def update(self, name, color):
"""Update this label.
:param str name: (required), new name of the label
:param str color: (required), color code, e.g., 626262, no leading '#'
:returns: bool
"""
json = None
if name and color:
if color[0] == '#':
color = color[1:]
json = self._json(self._patch(self._api, data=dumps({
'name': name, 'color': color})), 200)
if json:
self._update_(json)
return True
return False | Update this label.
:param str name: (required), new name of the label
:param str color: (required), color code, e.g., 626262, no leading '#'
:returns: bool |
def rename(self, container, name):
"""
Rename a container. Similar to the ``docker rename`` command.
Args:
container (str): ID of the container to rename
name (str): New name for the container
Raises:
:py:class:`docker.errors.APIError`
If the server returns an error.
"""
url = self._url("/containers/{0}/rename", container)
params = {'name': name}
res = self._post(url, params=params)
self._raise_for_status(res) | Rename a container. Similar to the ``docker rename`` command.
Args:
container (str): ID of the container to rename
name (str): New name for the container
Raises:
:py:class:`docker.errors.APIError`
If the server returns an error. |
def min_base_size_mask(self, size, hs_dims=None, prune=False):
"""Returns MinBaseSizeMask object with correct row, col and table masks.
The returned object stores the necessary information about the base size, as
well as about the base values. It can create corresponding masks in teh row,
column, and table directions, based on the corresponding base values
(the values of the unweighted margins).
Usage:
>>> cube_slice = CrunchCube(response).slices[0] # obtain a valid cube slice
>>> cube_slice.min_base_size_mask(30).row_mask
>>> cube_slice.min_base_size_mask(50).column_mask
>>> cube_slice.min_base_size_mask(22).table_mask
"""
return MinBaseSizeMask(self, size, hs_dims=hs_dims, prune=prune) | Returns MinBaseSizeMask object with correct row, col and table masks.
The returned object stores the necessary information about the base size, as
well as about the base values. It can create corresponding masks in teh row,
column, and table directions, based on the corresponding base values
(the values of the unweighted margins).
Usage:
>>> cube_slice = CrunchCube(response).slices[0] # obtain a valid cube slice
>>> cube_slice.min_base_size_mask(30).row_mask
>>> cube_slice.min_base_size_mask(50).column_mask
>>> cube_slice.min_base_size_mask(22).table_mask |
def check_bitdepth_rescale(
palette, bitdepth, transparent, alpha, greyscale):
"""
Returns (bitdepth, rescale) pair.
"""
if palette:
if len(bitdepth) != 1:
raise ProtocolError(
"with palette, only a single bitdepth may be used")
(bitdepth, ) = bitdepth
if bitdepth not in (1, 2, 4, 8):
raise ProtocolError(
"with palette, bitdepth must be 1, 2, 4, or 8")
if transparent is not None:
raise ProtocolError("transparent and palette not compatible")
if alpha:
raise ProtocolError("alpha and palette not compatible")
if greyscale:
raise ProtocolError("greyscale and palette not compatible")
return bitdepth, None
# No palette, check for sBIT chunk generation.
if greyscale and not alpha:
# Single channel, L.
(bitdepth,) = bitdepth
if bitdepth in (1, 2, 4, 8, 16):
return bitdepth, None
if bitdepth > 8:
targetbitdepth = 16
elif bitdepth == 3:
targetbitdepth = 4
else:
assert bitdepth in (5, 6, 7)
targetbitdepth = 8
return targetbitdepth, [(bitdepth, targetbitdepth)]
assert alpha or not greyscale
depth_set = tuple(set(bitdepth))
if depth_set in [(8,), (16,)]:
# No sBIT required.
(bitdepth, ) = depth_set
return bitdepth, None
targetbitdepth = (8, 16)[max(bitdepth) > 8]
return targetbitdepth, [(b, targetbitdepth) for b in bitdepth] | Returns (bitdepth, rescale) pair. |
def frompsl(args):
"""
%prog frompsl old.new.psl old.fasta new.fasta
Generate chain file from psl file. The pipeline is describe in:
<http://genomewiki.ucsc.edu/index.php/Minimal_Steps_For_LiftOver>
"""
from jcvi.formats.sizes import Sizes
p = OptionParser(frompsl.__doc__)
opts, args = p.parse_args(args)
if len(args) != 3:
sys.exit(not p.print_help())
pslfile, oldfasta, newfasta = args
pf = oldfasta.split(".")[0]
# Chain together alignments from using axtChain
chainfile = pf + ".chain"
twobitfiles = []
for fastafile in (oldfasta, newfasta):
tbfile = faToTwoBit(fastafile)
twobitfiles.append(tbfile)
oldtwobit, newtwobit = twobitfiles
if need_update(pslfile, chainfile):
cmd = "axtChain -linearGap=medium -psl {0}".format(pslfile)
cmd += " {0} {1} {2}".format(oldtwobit, newtwobit, chainfile)
sh(cmd)
# Sort chain files
sortedchain = chainfile.rsplit(".", 1)[0] + ".sorted.chain"
if need_update(chainfile, sortedchain):
cmd = "chainSort {0} {1}".format(chainfile, sortedchain)
sh(cmd)
# Make alignment nets from chains
netfile = pf + ".net"
oldsizes = Sizes(oldfasta).filename
newsizes = Sizes(newfasta).filename
if need_update((sortedchain, oldsizes, newsizes), netfile):
cmd = "chainNet {0} {1} {2}".format(sortedchain, oldsizes, newsizes)
cmd += " {0} /dev/null".format(netfile)
sh(cmd)
# Create liftOver chain file
liftoverfile = pf + ".liftover.chain"
if need_update((netfile, sortedchain), liftoverfile):
cmd = "netChainSubset {0} {1} {2}".\
format(netfile, sortedchain, liftoverfile)
sh(cmd) | %prog frompsl old.new.psl old.fasta new.fasta
Generate chain file from psl file. The pipeline is describe in:
<http://genomewiki.ucsc.edu/index.php/Minimal_Steps_For_LiftOver> |
def as_qubit_order(val: 'qubit_order_or_list.QubitOrderOrList'
) -> 'QubitOrder':
"""Converts a value into a basis.
Args:
val: An iterable or a basis.
Returns:
The basis implied by the value.
"""
if isinstance(val, collections.Iterable):
return QubitOrder.explicit(val)
if isinstance(val, QubitOrder):
return val
raise ValueError(
"Don't know how to interpret <{}> as a Basis.".format(val)) | Converts a value into a basis.
Args:
val: An iterable or a basis.
Returns:
The basis implied by the value. |
def add_double_proxy_for(self, label: str, shape: Collection[int] = None) -> Vertex:
"""
Creates a proxy vertex for the given label and adds to the sequence item
"""
if shape is None:
return Vertex._from_java_vertex(self.unwrap().addDoubleProxyFor(_VertexLabel(label).unwrap()))
else:
return Vertex._from_java_vertex(self.unwrap().addDoubleProxyFor(_VertexLabel(label).unwrap(), shape)) | Creates a proxy vertex for the given label and adds to the sequence item |
def get_object(cls, abbr):
'''
This particular model needs its own constructor in order to take
advantage of the metadata cache in billy.util, which would otherwise
return unwrapped objects.
'''
obj = get_metadata(abbr)
if obj is None:
msg = 'No metadata found for abbreviation %r' % abbr
raise DoesNotExist(msg)
return cls(obj) | This particular model needs its own constructor in order to take
advantage of the metadata cache in billy.util, which would otherwise
return unwrapped objects. |
def daemon_mode(self, args, options):
"""
Open a ControlWebSocket to SushiBar server and listend for remote commands.
Args:
args (dict): chef command line arguments
options (dict): additional compatibility mode options given on command line
"""
cws = ControlWebSocket(self, args, options)
cws.start()
if 'cmdsock' in args and args['cmdsock']:
lcs = LocalControlSocket(self, args, options)
lcs.start()
lcs.join()
cws.join() | Open a ControlWebSocket to SushiBar server and listend for remote commands.
Args:
args (dict): chef command line arguments
options (dict): additional compatibility mode options given on command line |
def set_vcard(self, vcard):
"""
Store the vCard `vcard` for the connected entity.
:param vcard: the vCard to store.
.. note::
`vcard` should always be derived from the result of
`get_vcard` to preserve the elements of the vcard the
client does not modify.
.. warning::
It is in the responsibility of the user to supply valid
vcard data as per :xep:`0054`.
"""
iq = aioxmpp.IQ(
type_=aioxmpp.IQType.SET,
payload=vcard,
)
yield from self.client.send(iq) | Store the vCard `vcard` for the connected entity.
:param vcard: the vCard to store.
.. note::
`vcard` should always be derived from the result of
`get_vcard` to preserve the elements of the vcard the
client does not modify.
.. warning::
It is in the responsibility of the user to supply valid
vcard data as per :xep:`0054`. |
def swapon(name, priority=None):
'''
Activate a swap disk
.. versionchanged:: 2016.3.2
CLI Example:
.. code-block:: bash
salt '*' mount.swapon /root/swapfile
'''
ret = {}
on_ = swaps()
if name in on_:
ret['stats'] = on_[name]
ret['new'] = False
return ret
if __grains__['kernel'] == 'SunOS':
if __grains__['virtual'] != 'zone':
__salt__['cmd.run']('swap -a {0}'.format(name), python_shell=False)
else:
return False
else:
cmd = 'swapon {0}'.format(name)
if priority and 'AIX' not in __grains__['kernel']:
cmd += ' -p {0}'.format(priority)
__salt__['cmd.run'](cmd, python_shell=False)
on_ = swaps()
if name in on_:
ret['stats'] = on_[name]
ret['new'] = True
return ret
return ret | Activate a swap disk
.. versionchanged:: 2016.3.2
CLI Example:
.. code-block:: bash
salt '*' mount.swapon /root/swapfile |
def destinations(stop):
"""Get destination information."""
from pyruter.api import Departures
async def get_destinations():
"""Get departure information."""
async with aiohttp.ClientSession() as session:
data = Departures(LOOP, stop, session=session)
result = await data.get_final_destination()
print(json.dumps(result, indent=4, sort_keys=True,
ensure_ascii=False))
LOOP.run_until_complete(get_destinations()) | Get destination information. |
def read_anchors(ac, qorder, sorder, minsize=0):
"""
anchors file are just (geneA, geneB) pairs (with possible deflines)
"""
all_anchors = defaultdict(list)
nanchors = 0
anchor_to_block = {}
for a, b, idx in ac.iter_pairs(minsize=minsize):
if a not in qorder or b not in sorder:
continue
qi, q = qorder[a]
si, s = sorder[b]
pair = (qi, si)
all_anchors[(q.seqid, s.seqid)].append(pair)
anchor_to_block[pair] = idx
nanchors += 1
logging.debug("A total of {0} anchors imported.".format(nanchors))
assert nanchors == len(anchor_to_block)
return all_anchors, anchor_to_block | anchors file are just (geneA, geneB) pairs (with possible deflines) |
def jeffreys(logu, name=None):
"""The Jeffreys Csiszar-function in log-space.
A Csiszar-function is a member of,
```none
F = { f:R_+ to R : f convex }.
```
The Jeffreys Csiszar-function is:
```none
f(u) = 0.5 ( u log(u) - log(u) )
= 0.5 kl_forward + 0.5 kl_reverse
= symmetrized_csiszar_function(kl_reverse)
= symmetrized_csiszar_function(kl_forward)
```
This Csiszar-function induces a symmetric f-Divergence, i.e.,
`D_f[p, q] = D_f[q, p]`.
Warning: this function makes non-log-space calculations and may therefore be
numerically unstable for `|logu| >> 0`.
Args:
logu: `float`-like `Tensor` representing `log(u)` from above.
name: Python `str` name prefixed to Ops created by this function.
Returns:
jeffreys_of_u: `float`-like `Tensor` of the Csiszar-function evaluated
at `u = exp(logu)`.
"""
with tf.compat.v1.name_scope(name, "jeffreys", [logu]):
logu = tf.convert_to_tensor(value=logu, name="logu")
return 0.5 * tf.math.expm1(logu) * logu | The Jeffreys Csiszar-function in log-space.
A Csiszar-function is a member of,
```none
F = { f:R_+ to R : f convex }.
```
The Jeffreys Csiszar-function is:
```none
f(u) = 0.5 ( u log(u) - log(u) )
= 0.5 kl_forward + 0.5 kl_reverse
= symmetrized_csiszar_function(kl_reverse)
= symmetrized_csiszar_function(kl_forward)
```
This Csiszar-function induces a symmetric f-Divergence, i.e.,
`D_f[p, q] = D_f[q, p]`.
Warning: this function makes non-log-space calculations and may therefore be
numerically unstable for `|logu| >> 0`.
Args:
logu: `float`-like `Tensor` representing `log(u)` from above.
name: Python `str` name prefixed to Ops created by this function.
Returns:
jeffreys_of_u: `float`-like `Tensor` of the Csiszar-function evaluated
at `u = exp(logu)`. |
def dataset_search(q=None, type=None, keyword=None,
owningOrg=None, publishingOrg=None, hostingOrg=None, decade=None,
publishingCountry = None, facet = None, facetMincount=None,
facetMultiselect = None, hl = False, limit = 100, offset = None,
**kwargs):
'''
Full text search across all datasets. Results are ordered by relevance.
:param q: [str] Query term(s) for full text search. The value for this parameter
can be a simple word or a phrase. Wildcards can be added to the simple word
parameters only, e.g. ``q=*puma*``
:param type: [str] Type of dataset, options include OCCURRENCE, etc.
:param keyword: [str] Keyword to search by. Datasets can be tagged by keywords, which
you can search on. The search is done on the merged collection of tags, the
dataset keywordCollections and temporalCoverages. SEEMS TO NOT BE WORKING
ANYMORE AS OF 2016-09-02.
:param owningOrg: [str] Owning organization. A uuid string. See :func:`~pygbif.registry.organizations`
:param publishingOrg: [str] Publishing organization. A uuid string. See :func:`~pygbif.registry.organizations`
:param hostingOrg: [str] Hosting organization. A uuid string. See :func:`~pygbif.registry.organizations`
:param publishingCountry: [str] Publishing country.
:param decade: [str] Decade, e.g., 1980. Filters datasets by their temporal coverage
broken down to decades. Decades are given as a full year, e.g. 1880, 1960, 2000,
etc, and will return datasets wholly contained in the decade as well as those
that cover the entire decade or more. Facet by decade to get the break down,
e.g. ``/search?facet=DECADE&facet_only=true`` (see example below)
:param facet: [str] A list of facet names used to retrieve the 100 most frequent values
for a field. Allowed facets are: type, keyword, publishingOrg, hostingOrg, decade,
and publishingCountry. Additionally subtype and country are legal values but not
yet implemented, so data will not yet be returned for them.
:param facetMincount: [str] Used in combination with the facet parameter. Set
facetMincount={#} to exclude facets with a count less than {#}, e.g.
http://api.gbif.org/v1/dataset/search?facet=type&limit=0&facetMincount=10000
only shows the type value 'OCCURRENCE' because 'CHECKLIST' and 'METADATA' have
counts less than 10000.
:param facetMultiselect: [bool] Used in combination with the facet parameter. Set
facetMultiselect=True to still return counts for values that are not currently
filtered, e.g.
http://api.gbif.org/v1/dataset/search?facet=type&limit=0&type=CHECKLIST&facetMultiselect=true
still shows type values 'OCCURRENCE' and 'METADATA' even though type is being
filtered by type=CHECKLIST
:param hl: [bool] Set ``hl=True`` to highlight terms matching the query when in fulltext
search fields. The highlight will be an emphasis tag of class 'gbifH1' e.g.
http://api.gbif.org/v1/dataset/search?q=plant&hl=true
Fulltext search fields include: title, keyword, country, publishing country,
publishing organization title, hosting organization title, and description. One
additional full text field is searched which includes information from metadata
documents, but the text of this field is not returned in the response.
:param limit: [int] Number of results to return. Default: ``300``
:param offset: [int] Record to start at. Default: ``0``
:note: Note that you can pass in additional faceting parameters on a per field basis.
For example, if you want to limit the numbef of facets returned from a field ``foo`` to
3 results, pass in ``foo_facetLimit = 3``. GBIF does not allow all per field parameters,
but does allow some. See also examples.
:return: A dictionary
References: http://www.gbif.org/developer/registry#datasetSearch
Usage::
from pygbif import registry
# Gets all datasets of type "OCCURRENCE".
registry.dataset_search(type="OCCURRENCE", limit = 10)
# Fulltext search for all datasets having the word "amsterdam" somewhere in
# its metadata (title, description, etc).
registry.dataset_search(q="amsterdam", limit = 10)
# Limited search
registry.dataset_search(type="OCCURRENCE", limit=2)
registry.dataset_search(type="OCCURRENCE", limit=2, offset=10)
# Search by decade
registry.dataset_search(decade=1980, limit = 10)
# Faceting
## just facets
registry.dataset_search(facet="decade", facetMincount=10, limit=0)
## data and facets
registry.dataset_search(facet="decade", facetMincount=10, limit=2)
## many facet variables
registry.dataset_search(facet=["decade", "type"], facetMincount=10, limit=0)
## facet vars
### per variable paging
x = registry.dataset_search(
facet = ["decade", "type"],
decade_facetLimit = 3,
type_facetLimit = 3,
limit = 0
)
## highlight
x = registry.dataset_search(q="plant", hl=True, limit = 10)
[ z['description'] for z in x['results'] ]
'''
url = gbif_baseurl + 'dataset/search'
args = {'q': q, 'type': type, 'keyword': keyword,
'owningOrg': owningOrg, 'publishingOrg': publishingOrg,
'hostingOrg': hostingOrg, 'decade': decade,
'publishingCountry': publishingCountry, 'facet': facet,
'facetMincount': facetMincount, 'facetMultiselect': facetMultiselect,
'hl': hl, 'limit': limit, 'offset': offset}
gbif_kwargs = {key: kwargs[key] for key in kwargs if key not in requests_argset}
if gbif_kwargs is not None:
xx = dict(zip( [ re.sub('_', '.', x) for x in gbif_kwargs.keys() ], gbif_kwargs.values() ))
args.update(xx)
kwargs = {key: kwargs[key] for key in kwargs if key in requests_argset}
out = gbif_GET(url, args, **kwargs)
return out | Full text search across all datasets. Results are ordered by relevance.
:param q: [str] Query term(s) for full text search. The value for this parameter
can be a simple word or a phrase. Wildcards can be added to the simple word
parameters only, e.g. ``q=*puma*``
:param type: [str] Type of dataset, options include OCCURRENCE, etc.
:param keyword: [str] Keyword to search by. Datasets can be tagged by keywords, which
you can search on. The search is done on the merged collection of tags, the
dataset keywordCollections and temporalCoverages. SEEMS TO NOT BE WORKING
ANYMORE AS OF 2016-09-02.
:param owningOrg: [str] Owning organization. A uuid string. See :func:`~pygbif.registry.organizations`
:param publishingOrg: [str] Publishing organization. A uuid string. See :func:`~pygbif.registry.organizations`
:param hostingOrg: [str] Hosting organization. A uuid string. See :func:`~pygbif.registry.organizations`
:param publishingCountry: [str] Publishing country.
:param decade: [str] Decade, e.g., 1980. Filters datasets by their temporal coverage
broken down to decades. Decades are given as a full year, e.g. 1880, 1960, 2000,
etc, and will return datasets wholly contained in the decade as well as those
that cover the entire decade or more. Facet by decade to get the break down,
e.g. ``/search?facet=DECADE&facet_only=true`` (see example below)
:param facet: [str] A list of facet names used to retrieve the 100 most frequent values
for a field. Allowed facets are: type, keyword, publishingOrg, hostingOrg, decade,
and publishingCountry. Additionally subtype and country are legal values but not
yet implemented, so data will not yet be returned for them.
:param facetMincount: [str] Used in combination with the facet parameter. Set
facetMincount={#} to exclude facets with a count less than {#}, e.g.
http://api.gbif.org/v1/dataset/search?facet=type&limit=0&facetMincount=10000
only shows the type value 'OCCURRENCE' because 'CHECKLIST' and 'METADATA' have
counts less than 10000.
:param facetMultiselect: [bool] Used in combination with the facet parameter. Set
facetMultiselect=True to still return counts for values that are not currently
filtered, e.g.
http://api.gbif.org/v1/dataset/search?facet=type&limit=0&type=CHECKLIST&facetMultiselect=true
still shows type values 'OCCURRENCE' and 'METADATA' even though type is being
filtered by type=CHECKLIST
:param hl: [bool] Set ``hl=True`` to highlight terms matching the query when in fulltext
search fields. The highlight will be an emphasis tag of class 'gbifH1' e.g.
http://api.gbif.org/v1/dataset/search?q=plant&hl=true
Fulltext search fields include: title, keyword, country, publishing country,
publishing organization title, hosting organization title, and description. One
additional full text field is searched which includes information from metadata
documents, but the text of this field is not returned in the response.
:param limit: [int] Number of results to return. Default: ``300``
:param offset: [int] Record to start at. Default: ``0``
:note: Note that you can pass in additional faceting parameters on a per field basis.
For example, if you want to limit the numbef of facets returned from a field ``foo`` to
3 results, pass in ``foo_facetLimit = 3``. GBIF does not allow all per field parameters,
but does allow some. See also examples.
:return: A dictionary
References: http://www.gbif.org/developer/registry#datasetSearch
Usage::
from pygbif import registry
# Gets all datasets of type "OCCURRENCE".
registry.dataset_search(type="OCCURRENCE", limit = 10)
# Fulltext search for all datasets having the word "amsterdam" somewhere in
# its metadata (title, description, etc).
registry.dataset_search(q="amsterdam", limit = 10)
# Limited search
registry.dataset_search(type="OCCURRENCE", limit=2)
registry.dataset_search(type="OCCURRENCE", limit=2, offset=10)
# Search by decade
registry.dataset_search(decade=1980, limit = 10)
# Faceting
## just facets
registry.dataset_search(facet="decade", facetMincount=10, limit=0)
## data and facets
registry.dataset_search(facet="decade", facetMincount=10, limit=2)
## many facet variables
registry.dataset_search(facet=["decade", "type"], facetMincount=10, limit=0)
## facet vars
### per variable paging
x = registry.dataset_search(
facet = ["decade", "type"],
decade_facetLimit = 3,
type_facetLimit = 3,
limit = 0
)
## highlight
x = registry.dataset_search(q="plant", hl=True, limit = 10)
[ z['description'] for z in x['results'] ] |
def hget(self, name, key):
"""
Returns the value stored in the field, None if the field doesn't exist.
:param name: str the name of the redis key
:param key: the member of the hash
:return: Future()
"""
with self.pipe as pipe:
f = Future()
res = pipe.hget(self.redis_key(name),
self.memberparse.encode(key))
def cb():
f.set(self._value_decode(key, res.result))
pipe.on_execute(cb)
return f | Returns the value stored in the field, None if the field doesn't exist.
:param name: str the name of the redis key
:param key: the member of the hash
:return: Future() |
def init(self):
"""Init the connection to the ES server."""
if not self.export_enable:
return None
self.index='{}-{}'.format(self.index, datetime.utcnow().strftime("%Y.%m.%d"))
template_body = {
"mappings": {
"glances": {
"dynamic_templates": [
{
"integers": {
"match_mapping_type": "long",
"mapping": {
"type": "integer"
}
}
},
{
"strings": {
"match_mapping_type": "string",
"mapping": {
"type": "text",
"fields": {
"raw": {
"type": "keyword",
"ignore_above": 256
}
}
}
}
}
]
}
}
}
try:
es = Elasticsearch(hosts=['{}:{}'.format(self.host, self.port)])
except Exception as e:
logger.critical("Cannot connect to ElasticSearch server %s:%s (%s)" % (self.host, self.port, e))
sys.exit(2)
else:
logger.info("Connected to the ElasticSearch server %s:%s" % (self.host, self.port))
try:
index_count = es.count(index=self.index)['count']
except Exception as e:
# Index did not exist, it will be created at the first write
# Create it...
es.indices.create(index=self.index,body=template_body)
else:
logger.info("The index %s exists and holds %s entries." % (self.index, index_count))
return es | Init the connection to the ES server. |
def apply_T8(word):
'''Split /ie/, /uo/, or /yö/ sequences in syllables that do not take
primary stress.'''
WORD = word
offset = 0
for vv in tail_diphthongs(WORD):
i = vv.start(1) + 1 + offset
WORD = WORD[:i] + '.' + WORD[i:]
offset += 1
RULE = ' T8' if word != WORD else ''
return WORD, RULE | Split /ie/, /uo/, or /yö/ sequences in syllables that do not take
primary stress. |
def _get_one_pending_job(self):
"""
Retrieve a pending job.
:return: A CFGJob instance or None
"""
pending_job_key, pending_job = self._pending_jobs.popitem()
pending_job_state = pending_job.state
pending_job_call_stack = pending_job.call_stack
pending_job_src_block_id = pending_job.src_block_id
pending_job_src_exit_stmt_idx = pending_job.src_exit_stmt_idx
self._deregister_analysis_job(pending_job.caller_func_addr, pending_job)
# Let's check whether this address has been traced before.
if pending_job_key in self._nodes:
node = self._nodes[pending_job_key]
if node in self.graph:
pending_exit_addr = self._block_id_addr(pending_job_key)
# That block has been traced before. Let's forget about it
l.debug("Target 0x%08x has been traced before. Trying the next one...", pending_exit_addr)
# However, we should still create the FakeRet edge
self._graph_add_edge(pending_job_src_block_id, pending_job_key, jumpkind="Ijk_FakeRet",
stmt_idx=pending_job_src_exit_stmt_idx, ins_addr=pending_job.src_exit_ins_addr)
return None
pending_job_state.history.jumpkind = 'Ijk_FakeRet'
job = CFGJob(pending_job_state.addr,
pending_job_state,
self._context_sensitivity_level,
src_block_id=pending_job_src_block_id,
src_exit_stmt_idx=pending_job_src_exit_stmt_idx,
src_ins_addr=pending_job.src_exit_ins_addr,
call_stack=pending_job_call_stack,
)
l.debug("Tracing a missing return exit %s", self._block_id_repr(pending_job_key))
return job | Retrieve a pending job.
:return: A CFGJob instance or None |
def ensure_schema(self):
"""Create file and schema if it does not exist yet."""
self._ensure_filename()
if not os.path.isfile(self.filename):
self.create_schema() | Create file and schema if it does not exist yet. |
def regroup_commands(commands):
"""
Returns a list of tuples:
[(command_to_run, [list, of, commands])]
If the list of commands has a single item, the command was not grouped.
"""
grouped = []
pending = []
def group_pending():
if not pending:
return
new_command = grouped_command(pending)
result = []
while pending:
result.append(pending.pop(0))
grouped.append((new_command, result))
for command, next_command in peek(commands):
# if the previous command was a get, and this is a set we must execute
# any pending commands
# TODO: unless this command is a get_multi and it matches the same option
# signature
if can_group_commands(command, next_command):
# if previous command does not match this command
if pending and not can_group_commands(pending[0], command):
group_pending()
pending.append(command)
else:
# if pending exists for this command, group it
if pending and can_group_commands(pending[0], command):
pending.append(command)
else:
grouped.append((command.clone(), [command]))
# We couldn't group with previous command, so ensure we bubble up
group_pending()
group_pending()
return grouped | Returns a list of tuples:
[(command_to_run, [list, of, commands])]
If the list of commands has a single item, the command was not grouped. |
def straight_throat(target, throat_centroid='throat.centroid',
throat_vector='throat.vector',
throat_length='throat.length'):
r"""
Calculate the coordinates of throat endpoints given a central coordinate,
unit vector along the throat direction and a length.
Parameters
----------
target : OpenPNM Object
The object which this model is associated with. This controls the
length of the calculated array, and also provides access to other
necessary properties.
throat_centroid : string
Dictionary key of the throat center coordinates.
throat_vector : string
Dictionary key of the throat vector pointing along the length of the
throats.
throat_length : string
Dictionary key of the throat length.
Returns
-------
EP : dictionary
Coordinates of throat endpoints stored in Dict form. Can be accessed
via the dict keys 'head' and 'tail'.
"""
network = target.project.network
throats = network.map_throats(throats=target.Ts, origin=target)
center = network[throat_centroid][throats]
vector = network[throat_vector][throats]
length = network[throat_length][throats]
EP1 = center - 0.5 * length[:, _sp.newaxis] * vector
EP2 = center + 0.5 * length[:, _sp.newaxis] * vector
return {'head': EP1, 'tail': EP2} | r"""
Calculate the coordinates of throat endpoints given a central coordinate,
unit vector along the throat direction and a length.
Parameters
----------
target : OpenPNM Object
The object which this model is associated with. This controls the
length of the calculated array, and also provides access to other
necessary properties.
throat_centroid : string
Dictionary key of the throat center coordinates.
throat_vector : string
Dictionary key of the throat vector pointing along the length of the
throats.
throat_length : string
Dictionary key of the throat length.
Returns
-------
EP : dictionary
Coordinates of throat endpoints stored in Dict form. Can be accessed
via the dict keys 'head' and 'tail'. |
def get_par_css_dataframe(self):
""" get a dataframe of composite scaled sensitivities. Includes both
PEST-style and Hill-style.
Returns
-------
css : pandas.DataFrame
"""
assert self.jco is not None
assert self.pst is not None
jco = self.jco.to_dataframe()
weights = self.pst.observation_data.loc[jco.index,"weight"].copy().values
jco = (jco.T * weights).T
dss_sum = jco.apply(np.linalg.norm)
css = (dss_sum / float(self.pst.nnz_obs)).to_frame()
css.columns = ["pest_css"]
# log transform stuff
self.pst.add_transform_columns()
parval1 = self.pst.parameter_data.loc[dss_sum.index,"parval1_trans"].values
css.loc[:,"hill_css"] = (dss_sum * parval1) / (float(self.pst.nnz_obs)**2)
return css | get a dataframe of composite scaled sensitivities. Includes both
PEST-style and Hill-style.
Returns
-------
css : pandas.DataFrame |
def clear(self):
"""T.clear() -> None. Remove all items from T."""
def _clear(node):
if node is not None:
_clear(node.left)
_clear(node.right)
node.free()
_clear(self._root)
self._count = 0
self._root = None | T.clear() -> None. Remove all items from T. |
def StatFSFromClient(args):
"""Call os.statvfs for a given list of paths.
Args:
args: An `rdf_client_action.StatFSRequest`.
Yields:
`rdf_client_fs.UnixVolume` instances.
Raises:
RuntimeError: if called on a Windows system.
"""
if platform.system() == "Windows":
raise RuntimeError("os.statvfs not available on Windows")
for path in args.path_list:
try:
fd = vfs.VFSOpen(rdf_paths.PathSpec(path=path, pathtype=args.pathtype))
st = fd.StatFS()
mount_point = fd.GetMountPoint()
except (IOError, OSError):
continue
unix = rdf_client_fs.UnixVolume(mount_point=mount_point)
# On linux pre 2.6 kernels don't have frsize, so we fall back to bsize.
# The actual_available_allocation_units attribute is set to blocks
# available to the unprivileged user, root may have some additional
# reserved space.
yield rdf_client_fs.Volume(
bytes_per_sector=(st.f_frsize or st.f_bsize),
sectors_per_allocation_unit=1,
total_allocation_units=st.f_blocks,
actual_available_allocation_units=st.f_bavail,
unixvolume=unix) | Call os.statvfs for a given list of paths.
Args:
args: An `rdf_client_action.StatFSRequest`.
Yields:
`rdf_client_fs.UnixVolume` instances.
Raises:
RuntimeError: if called on a Windows system. |
def show_vcs_output_vcs_guid(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
show_vcs = ET.Element("show_vcs")
config = show_vcs
output = ET.SubElement(show_vcs, "output")
vcs_guid = ET.SubElement(output, "vcs-guid")
vcs_guid.text = kwargs.pop('vcs_guid')
callback = kwargs.pop('callback', self._callback)
return callback(config) | Auto Generated Code |
def OpenFile(self, filepath):
"""open()-replacement that automatically handles zip files.
This assumes there is at most one .zip in the file path.
Args:
filepath: the path to the file to open.
Returns:
An open file-like object.
"""
archive = False
if '.zip/' in filepath:
archive = True
archive_type = '.zip'
if '.par/' in filepath:
archive = True
archive_type = '.par'
if archive:
path, archived_file = filepath.split(archive_type)
path += archive_type
zip_file = zipfile.ZipFile(path)
return zip_file.open(archived_file.strip('/'))
return open(filepath) | open()-replacement that automatically handles zip files.
This assumes there is at most one .zip in the file path.
Args:
filepath: the path to the file to open.
Returns:
An open file-like object. |
def udf_signature(input_type, pin, klass):
"""Compute the appropriate signature for a
:class:`~ibis.expr.operations.Node` from a list of input types
`input_type`.
Parameters
----------
input_type : List[ibis.expr.datatypes.DataType]
A list of :class:`~ibis.expr.datatypes.DataType` instances representing
the signature of a UDF/UDAF.
pin : Optional[int]
If this is not None, pin the `pin`-th argument type to `klass`
klass : Union[Type[pd.Series], Type[SeriesGroupBy]]
The pandas object that every argument type should contain
Returns
-------
Tuple[Type]
A tuple of types appropriate for use in a multiple dispatch signature.
Examples
--------
>>> from pprint import pprint
>>> import pandas as pd
>>> from pandas.core.groupby import SeriesGroupBy
>>> import ibis.expr.datatypes as dt
>>> input_type = [dt.string, dt.double]
>>> sig = udf_signature(input_type, pin=None, klass=pd.Series)
>>> pprint(sig) # doctest: +ELLIPSIS
((<class '...Series'>, <... '...str...'>, <... 'NoneType'>),
(<class '...Series'>,
<... 'float'>,
<... 'numpy.floating'>,
<... 'NoneType'>))
>>> not_nullable_types = [
... dt.String(nullable=False), dt.Double(nullable=False)]
>>> sig = udf_signature(not_nullable_types, pin=None, klass=pd.Series)
>>> pprint(sig) # doctest: +ELLIPSIS
((<class '...Series'>, <... '...str...'>),
(<class '...Series'>,
<... 'float'>,
<... 'numpy.floating'>))
>>> sig0 = udf_signature(input_type, pin=0, klass=SeriesGroupBy)
>>> sig1 = udf_signature(input_type, pin=1, klass=SeriesGroupBy)
>>> pprint(sig0) # doctest: +ELLIPSIS
(<class '...SeriesGroupBy'>,
(<class '...SeriesGroupBy'>,
<... 'float'>,
<... 'numpy.floating'>,
<... 'NoneType'>))
>>> pprint(sig1) # doctest: +ELLIPSIS
((<class '...SeriesGroupBy'>,
<... '...str...'>,
<... 'NoneType'>),
<class '...SeriesGroupBy'>)
"""
nargs = len(input_type)
if not nargs:
return ()
if nargs == 1:
r, = input_type
result = (klass,) + rule_to_python_type(r) + nullable(r)
return (result,)
return tuple(
klass
if pin is not None and pin == i
else ((klass,) + rule_to_python_type(r) + nullable(r))
for i, r in enumerate(input_type)
) | Compute the appropriate signature for a
:class:`~ibis.expr.operations.Node` from a list of input types
`input_type`.
Parameters
----------
input_type : List[ibis.expr.datatypes.DataType]
A list of :class:`~ibis.expr.datatypes.DataType` instances representing
the signature of a UDF/UDAF.
pin : Optional[int]
If this is not None, pin the `pin`-th argument type to `klass`
klass : Union[Type[pd.Series], Type[SeriesGroupBy]]
The pandas object that every argument type should contain
Returns
-------
Tuple[Type]
A tuple of types appropriate for use in a multiple dispatch signature.
Examples
--------
>>> from pprint import pprint
>>> import pandas as pd
>>> from pandas.core.groupby import SeriesGroupBy
>>> import ibis.expr.datatypes as dt
>>> input_type = [dt.string, dt.double]
>>> sig = udf_signature(input_type, pin=None, klass=pd.Series)
>>> pprint(sig) # doctest: +ELLIPSIS
((<class '...Series'>, <... '...str...'>, <... 'NoneType'>),
(<class '...Series'>,
<... 'float'>,
<... 'numpy.floating'>,
<... 'NoneType'>))
>>> not_nullable_types = [
... dt.String(nullable=False), dt.Double(nullable=False)]
>>> sig = udf_signature(not_nullable_types, pin=None, klass=pd.Series)
>>> pprint(sig) # doctest: +ELLIPSIS
((<class '...Series'>, <... '...str...'>),
(<class '...Series'>,
<... 'float'>,
<... 'numpy.floating'>))
>>> sig0 = udf_signature(input_type, pin=0, klass=SeriesGroupBy)
>>> sig1 = udf_signature(input_type, pin=1, klass=SeriesGroupBy)
>>> pprint(sig0) # doctest: +ELLIPSIS
(<class '...SeriesGroupBy'>,
(<class '...SeriesGroupBy'>,
<... 'float'>,
<... 'numpy.floating'>,
<... 'NoneType'>))
>>> pprint(sig1) # doctest: +ELLIPSIS
((<class '...SeriesGroupBy'>,
<... '...str...'>,
<... 'NoneType'>),
<class '...SeriesGroupBy'>) |
def down_ec2(instance_id, region, access_key_id, secret_access_key):
""" shutdown of an existing EC2 instance """
conn = connect_to_ec2(region, access_key_id, secret_access_key)
# get the instance_id from the state file, and stop the instance
instance = conn.stop_instances(instance_ids=instance_id)[0]
while instance.state != "stopped":
log_yellow("Instance state: %s" % instance.state)
sleep(10)
instance.update()
log_green('Instance state: %s' % instance.state) | shutdown of an existing EC2 instance |
def writecc (listoflists,file,writetype='w',extra=2):
"""
Writes a list of lists to a file in columns, customized by the max
size of items within the columns (max size of items in col, +2 characters)
to specified file. File-overwrite is the default.
Usage: writecc (listoflists,file,writetype='w',extra=2)
Returns: None
"""
if type(listoflists[0]) not in [ListType,TupleType]:
listoflists = [listoflists]
outfile = open(file,writetype)
rowstokill = []
list2print = copy.deepcopy(listoflists)
for i in range(len(listoflists)):
if listoflists[i] == ['\n'] or listoflists[i]=='\n' or listoflists[i]=='dashes':
rowstokill = rowstokill + [i]
rowstokill.reverse()
for row in rowstokill:
del list2print[row]
maxsize = [0]*len(list2print[0])
for col in range(len(list2print[0])):
items = pstat.colex(list2print,col)
items = [pstat.makestr(_) for _ in items]
maxsize[col] = max(map(len,items)) + extra
for row in listoflists:
if row == ['\n'] or row == '\n':
outfile.write('\n')
elif row == ['dashes'] or row == 'dashes':
dashes = [0]*len(maxsize)
for j in range(len(maxsize)):
dashes[j] = '-'*(maxsize[j]-2)
outfile.write(pstat.lineincustcols(dashes,maxsize))
else:
outfile.write(pstat.lineincustcols(row,maxsize))
outfile.write('\n')
outfile.close()
return None | Writes a list of lists to a file in columns, customized by the max
size of items within the columns (max size of items in col, +2 characters)
to specified file. File-overwrite is the default.
Usage: writecc (listoflists,file,writetype='w',extra=2)
Returns: None |
def stringIO(value,
allow_empty = False,
**kwargs):
"""Validate that ``value`` is a :class:`StringIO <python:io.StringIO>` object.
:param value: The value to validate.
:param allow_empty: If ``True``, returns :obj:`None <python:None>` if
``value`` is empty. If ``False``, raises a
:class:`EmptyValueError <validator_collection.errors.EmptyValueError>`
if ``value`` is empty. Defaults to ``False``.
:type allow_empty: :class:`bool <python:bool>`
:returns: ``value`` / :obj:`None <python:None>`
:rtype: :class:`StringIO <python:io.StringIO>` / :obj:`None <python:None>`
:raises EmptyValueError: if ``value`` is empty and ``allow_empty`` is ``False``
:raises NotStringIOError: if ``value`` is not a :class:`StringIO <python:io.StringIO>`
object
"""
if not value and not allow_empty:
raise errors.EmptyValueError('value (%s) was empty' % value)
elif not value:
return None
if not isinstance(value, io.StringIO):
raise ValueError('value (%s) is not an io.StringIO object, '
'is a %s' % (value, type(value)))
return value | Validate that ``value`` is a :class:`StringIO <python:io.StringIO>` object.
:param value: The value to validate.
:param allow_empty: If ``True``, returns :obj:`None <python:None>` if
``value`` is empty. If ``False``, raises a
:class:`EmptyValueError <validator_collection.errors.EmptyValueError>`
if ``value`` is empty. Defaults to ``False``.
:type allow_empty: :class:`bool <python:bool>`
:returns: ``value`` / :obj:`None <python:None>`
:rtype: :class:`StringIO <python:io.StringIO>` / :obj:`None <python:None>`
:raises EmptyValueError: if ``value`` is empty and ``allow_empty`` is ``False``
:raises NotStringIOError: if ``value`` is not a :class:`StringIO <python:io.StringIO>`
object |
def main():
"""
NAME
dir_redo.py
DESCRIPTION
converts the Cogne DIR format to PmagPy redo file
SYNTAX
dir_redo.py [-h] [command line options]
OPTIONS
-h: prints help message and quits
-f FILE: specify input file
-F FILE: specify output file, default is 'zeq_redo'
"""
dir_path='.'
zfile='zeq_redo'
if '-WD' in sys.argv:
ind=sys.argv.index('-WD')
dir_path=sys.argv[ind+1]
if '-h' in sys.argv:
print(main.__doc__)
sys.exit()
if '-f' in sys.argv:
ind=sys.argv.index('-f')
inspec=sys.argv[ind+1]
if '-F' in sys.argv:
ind=sys.argv.index('-F')
zfile=sys.argv[ind+1]
inspec=dir_path+"/"+inspec
zfile=dir_path+"/"+zfile
zredo=open(zfile,"w")
#
# read in DIR file
#
specs=[]
prior_spec_data=open(inspec,'r').readlines()
for line in prior_spec_data:
line=line.replace("Dir"," Dir")
line=line.replace("OKir"," OKir")
line=line.replace("Fish"," Fish")
line=line.replace("Man"," Man")
line=line.replace("GC"," GC")
line=line.replace("-T"," - T")
line=line.replace("-M"," - M")
rec=line.split()
if len(rec)<2:
sys.exit()
if rec[1]=='Dir' or rec[1]=='GC': # skip all the other stuff
spec=rec[0]
specs.append(spec)
comp_name=string.uppercase[specs.count(spec)-1] # assign component names
calculation_type="DE-FM"
if rec[1]=='Dir' and rec[2]=="Kir": calculation_type="DE-BFL" # assume default calculation type is best-fit line
if rec[1]=='Dir' and rec[2]=="OKir": calculation_type="DE-BFL-A" # anchored best-fit line
if rec[1]=='Dir' and rec[2]=="Fish": calculation_type="DE-FM" # fisher mean
if rec[1]=='GC' : calculation_type="DE-BFP" # best-fit plane
min,max=rec[3],rec[5]
beg,end="",""
if min=="NRM": beg=0
if min[0]=='M':
beg=float(min[1:])*1e-3 # convert to T from mT
elif min[0]=='T':
beg=float(min[1:])+273 # convert to C to kelvin
if max[0]=='M':
end=float(max[1:])*1e-3 # convert to T from mT
elif max[0]=='T':
end=float(max[1:])+273 # convert to C to kelvin
if beg==0:beg=273
outstring='%s %s %s %s %s \n'%(spec,calculation_type,beg,end,comp_name)
zredo.write(outstring) | NAME
dir_redo.py
DESCRIPTION
converts the Cogne DIR format to PmagPy redo file
SYNTAX
dir_redo.py [-h] [command line options]
OPTIONS
-h: prints help message and quits
-f FILE: specify input file
-F FILE: specify output file, default is 'zeq_redo' |
def cee_map_priority_table_map_cos4_pgid(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
cee_map = ET.SubElement(config, "cee-map", xmlns="urn:brocade.com:mgmt:brocade-cee-map")
name_key = ET.SubElement(cee_map, "name")
name_key.text = kwargs.pop('name')
priority_table = ET.SubElement(cee_map, "priority-table")
map_cos4_pgid = ET.SubElement(priority_table, "map-cos4-pgid")
map_cos4_pgid.text = kwargs.pop('map_cos4_pgid')
callback = kwargs.pop('callback', self._callback)
return callback(config) | Auto Generated Code |
def modify(self, current_modified_line, anchors, file_path, file_lines=None,
index=None):
"""
Removes the trailing AnchorHub tag from the end of the line being
examined.
:param current_modified_line: string representing the the line at
file_lines[index] _after_ any previous modifications from other
WriterStrategy objects
:param anchors: Dictionary mapping string file paths to inner
dictionaries. These inner dictionaries map string AnchorHub tags
to string generated anchors
:param file_path: string representing the file_path of the current
file being examined by this WriterStrategy
:param file_lines: List of strings corresponding to lines in a text file
:param index: index of file_lines corresponding to the current line
:return: string. A version of current_modified_line that has the
AnchorHub tag removed from the end of it
"""
open_wrapper_index = current_modified_line.rfind(self._open)
# '- 1' removes trailing space. May want to modify to completely
# strip whitespace at the end, instead of only working for a single
# space
return current_modified_line[:open_wrapper_index - 1] + "\n" | Removes the trailing AnchorHub tag from the end of the line being
examined.
:param current_modified_line: string representing the the line at
file_lines[index] _after_ any previous modifications from other
WriterStrategy objects
:param anchors: Dictionary mapping string file paths to inner
dictionaries. These inner dictionaries map string AnchorHub tags
to string generated anchors
:param file_path: string representing the file_path of the current
file being examined by this WriterStrategy
:param file_lines: List of strings corresponding to lines in a text file
:param index: index of file_lines corresponding to the current line
:return: string. A version of current_modified_line that has the
AnchorHub tag removed from the end of it |
def setArg(self, namespace, key, value):
"""Set a single argument in this namespace"""
assert key is not None
assert value is not None
namespace = self._fixNS(namespace)
self.args[(namespace, key)] = value
if not (namespace is BARE_NS):
self.namespaces.add(namespace) | Set a single argument in this namespace |
def create_token(self, request, refresh_token=False):
"""Create a JWT Token, using requestvalidator method."""
if callable(self.expires_in):
expires_in = self.expires_in(request)
else:
expires_in = self.expires_in
request.expires_in = expires_in
return self.request_validator.get_jwt_bearer_token(None, None, request) | Create a JWT Token, using requestvalidator method. |
def remove_acl(cursor, uuid_, permissions):
"""Given a ``uuid`` and a set of permissions given as a tuple
of ``uid`` and ``permission``, remove these entries from the database.
"""
if not isinstance(permissions, (list, set, tuple,)):
raise TypeError("``permissions`` is an invalid type: {}"
.format(type(permissions)))
permissions = set(permissions)
# Remove the the entries.
for uid, permission in permissions:
cursor.execute("""\
DELETE FROM document_acl
WHERE uuid = %s AND user_id = %s AND permission = %s""",
(uuid_, uid, permission,)) | Given a ``uuid`` and a set of permissions given as a tuple
of ``uid`` and ``permission``, remove these entries from the database. |
def sqlite_default():
'''
Prepend default scheme if none is specified. This helps provides backwards
compatibility with old versions of taxtastic where sqlite was the automatic
default database.
'''
def parse_url(url):
# TODO: need separate option for a config file
if url.endswith('.db') or url.endswith('.sqlite'):
if not url.startswith('sqlite:///'):
url = 'sqlite:///' + url
elif url.endswith('.cfg') or url.endswith('.conf'):
conf = configparser.SafeConfigParser(allow_no_value=True)
conf.optionxform = str # options are case-sensitive
conf.read(url)
url = conf.get('sqlalchemy', 'url')
return url
return parse_url | Prepend default scheme if none is specified. This helps provides backwards
compatibility with old versions of taxtastic where sqlite was the automatic
default database. |
def sin(duration: int, amp: complex, freq: float = None,
phase: float = 0, name: str = None) -> SamplePulse:
"""Generates sine wave `SamplePulse`.
Args:
duration: Duration of pulse. Must be greater than zero.
amp: Pulse amplitude.
freq: Pulse frequency, units of 1/dt. If `None` defaults to single cycle.
phase: Pulse phase.
name: Name of pulse.
"""
if freq is None:
freq = 1/duration
return _sampled_sin_pulse(duration, amp, freq, phase=phase, name=name) | Generates sine wave `SamplePulse`.
Args:
duration: Duration of pulse. Must be greater than zero.
amp: Pulse amplitude.
freq: Pulse frequency, units of 1/dt. If `None` defaults to single cycle.
phase: Pulse phase.
name: Name of pulse. |
def datagram_handler(name, logname, host, port):
"""
A Bark logging handler logging output to a datagram (UDP) socket.
The server listening at the given 'host' and 'port' will be sent a
pickled dictionary.
Similar to logging.handlers.DatagramHandler.
"""
return wrap_log_handler(logging.handlers.DatagramHandler(host, port)) | A Bark logging handler logging output to a datagram (UDP) socket.
The server listening at the given 'host' and 'port' will be sent a
pickled dictionary.
Similar to logging.handlers.DatagramHandler. |
def sanitize_ep(endpoint, plural=False):
"""
Sanitize an endpoint to a singular or plural form.
Used mostly for convenience in the `_parse` method to grab the raw
data from queried datasets.
XXX: this is el cheapo (no bastante bien)
"""
# if we need a plural endpoint (acessing lists)
if plural:
if endpoint.endswith('y'):
endpoint = endpoint[:-1] + 'ies'
elif not endpoint.endswith('s'):
endpoint += 's'
else:
# otherwise make sure it's singular form
if endpoint.endswith('ies'):
endpoint = endpoint[:-3] + 'y'
elif endpoint.endswith('s'):
endpoint = endpoint[:-1]
return endpoint | Sanitize an endpoint to a singular or plural form.
Used mostly for convenience in the `_parse` method to grab the raw
data from queried datasets.
XXX: this is el cheapo (no bastante bien) |
def manager(self, value):
"Set the manager object in the global _managers dict."
pid = current_process().ident
if _managers is None:
raise RuntimeError("Can not set the manager following a system exit.")
if pid not in _managers:
_managers[pid] = value
else:
raise Exception("Manager already set for pid %s" % pid) | Set the manager object in the global _managers dict. |
def muscle(sequences=None, alignment_file=None, fasta=None,
fmt='fasta', as_file=False, maxiters=None, diags=False,
gap_open=None, gap_extend=None, muscle_bin=None):
'''
Performs multiple sequence alignment with MUSCLE.
Args:
sequences (list): Sequences to be aligned. ``sequences`` can be one of four things:
1. a FASTA-formatted string
2. a list of BioPython ``SeqRecord`` objects
3. a list of AbTools ``Sequence`` objects
4. a list of lists/tuples, of the format ``[sequence_id, sequence]``
alignment_file (str): Path for the output alignment file. If not supplied,
a name will be generated using ``tempfile.NamedTemporaryFile()``.
fasta (str): Path to a FASTA-formatted file of sequences. Used as an
alternative to ``sequences`` when suppling a FASTA file.
fmt (str): Format of the alignment. Options are 'fasta' and 'clustal'. Default
is 'fasta'.
threads (int): Number of threads (CPU cores) for MUSCLE to use. Default is ``-1``, which
results in MUSCLE using all available cores.
as_file (bool): If ``True``, returns a path to the alignment file. If ``False``,
returns a BioPython ``MultipleSeqAlignment`` object (obtained by calling
``Bio.AlignIO.read()`` on the alignment file).
maxiters (int): Passed directly to MUSCLE using the ``-maxiters`` flag.
diags (int): Passed directly to MUSCLE using the ``-diags`` flag.
gap_open (float): Passed directly to MUSCLE using the ``-gapopen`` flag. Ignored
if ``gap_extend`` is not also provided.
gap_extend (float): Passed directly to MUSCLE using the ``-gapextend`` flag. Ignored
if ``gap_open`` is not also provided.
muscle_bin (str): Path to MUSCLE executable. ``abutils`` includes built-in MUSCLE binaries
for MacOS and Linux, however, if a different MUSCLE binary can be provided. Default is
``None``, which results in using the appropriate built-in MUSCLE binary.
Returns:
Returns a BioPython ``MultipleSeqAlignment`` object, unless ``as_file`` is ``True``,
in which case the path to the alignment file is returned.
'''
if sequences:
fasta_string = _get_fasta_string(sequences)
elif fasta:
fasta_string = open(fasta, 'r').read()
if muscle_bin is None:
# mod_dir = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
muscle_bin = os.path.join(BINARY_DIR, 'muscle_{}'.format(platform.system().lower()))
aln_format = ''
if fmt == 'clustal':
aln_format = ' -clwstrict'
muscle_cline = '{}{} '.format(muscle_bin, aln_format)
if maxiters is not None:
muscle_cline += ' -maxiters {}'.format(maxiters)
if diags:
muscle_cline += ' -diags'
if all([gap_open is not None, gap_extend is not None]):
muscle_cline += ' -gapopen {} -gapextend {}'.format(gap_open, gap_extend)
muscle = sp.Popen(str(muscle_cline),
stdin=sp.PIPE,
stdout=sp.PIPE,
stderr=sp.PIPE,
universal_newlines=True,
shell=True)
if sys.version_info[0] > 2:
alignment = muscle.communicate(input=fasta_string)[0]
else:
alignment = unicode(muscle.communicate(input=fasta_string)[0], 'utf-8')
aln = AlignIO.read(StringIO(alignment), fmt)
if as_file:
if not alignment_file:
alignment_file = tempfile.NamedTemporaryFile().name
AlignIO.write(aln, alignment_file, fmt)
return alignment_file
return aln | Performs multiple sequence alignment with MUSCLE.
Args:
sequences (list): Sequences to be aligned. ``sequences`` can be one of four things:
1. a FASTA-formatted string
2. a list of BioPython ``SeqRecord`` objects
3. a list of AbTools ``Sequence`` objects
4. a list of lists/tuples, of the format ``[sequence_id, sequence]``
alignment_file (str): Path for the output alignment file. If not supplied,
a name will be generated using ``tempfile.NamedTemporaryFile()``.
fasta (str): Path to a FASTA-formatted file of sequences. Used as an
alternative to ``sequences`` when suppling a FASTA file.
fmt (str): Format of the alignment. Options are 'fasta' and 'clustal'. Default
is 'fasta'.
threads (int): Number of threads (CPU cores) for MUSCLE to use. Default is ``-1``, which
results in MUSCLE using all available cores.
as_file (bool): If ``True``, returns a path to the alignment file. If ``False``,
returns a BioPython ``MultipleSeqAlignment`` object (obtained by calling
``Bio.AlignIO.read()`` on the alignment file).
maxiters (int): Passed directly to MUSCLE using the ``-maxiters`` flag.
diags (int): Passed directly to MUSCLE using the ``-diags`` flag.
gap_open (float): Passed directly to MUSCLE using the ``-gapopen`` flag. Ignored
if ``gap_extend`` is not also provided.
gap_extend (float): Passed directly to MUSCLE using the ``-gapextend`` flag. Ignored
if ``gap_open`` is not also provided.
muscle_bin (str): Path to MUSCLE executable. ``abutils`` includes built-in MUSCLE binaries
for MacOS and Linux, however, if a different MUSCLE binary can be provided. Default is
``None``, which results in using the appropriate built-in MUSCLE binary.
Returns:
Returns a BioPython ``MultipleSeqAlignment`` object, unless ``as_file`` is ``True``,
in which case the path to the alignment file is returned. |
def make_bindings_type(filenames,color_input,colorkey,file_dictionary,sidebar,bounds):
# instantiating string the main string block for the javascript block of html code
string = ''
'''
# logic for instantiating variable colorkey input
if not colorkeyfields == False:
colorkey = 'selectedText'
'''
# iterating through each geojson filename
count = 0
for row in filenames:
color_input = ''
colorkeyfields = False
count += 1
filename = row
zoomrange = ['','']
# reading in geojson file into memory
with open(filename) as data_file:
data = json.load(data_file)
#pprint(data)
# getting the featuretype which will later dictate what javascript splices are needed
data = data['features']
data = data[0]
featuretype = data['geometry']
featuretype = featuretype['type']
data = data['properties']
# logic for overwriting colorkey fields if it exists for the filename
# in the file dictionary
try:
colorkeyfields = file_dictionary[filename][str('colorkeyfields')]
except KeyError:
colorkeyfields = False
except TypeError:
colorkeyfields = False
if not colorkeyfields == False:
if len(colorkeyfields) == 1:
colorkey = colorkeyfields[0]
colorkeyfields = False
try:
zoomrange = file_dictionary[filename][str('zooms')]
except KeyError:
zoomrange = ['','']
except TypeError:
zoomrange = ['','']
# code for if the file_dictionary input isn't false
#(i.e. getting the color inputs out of dictionary variable)
if file_dictionary==False and colorkey == False:
# logic for getting the colorline for different feature types
# the point feature requires a different line of code
if featuretype == 'Point':
colorline = get_colorline_marker(color_input)
else:
colorline = get_colorline_marker2(color_input)
# setting minzoom and maxzoom to be sent into js parsing
minzoom,maxzoom = zoomrange
# getting filter file dictionary if filter_dictonary exists
if not file_dictionary == False:
filter_file_dictionary = file_dictionary[filename]
else:
filter_file_dictionary = False
# checking to see if a chart_dictionary exists
try:
chart_dictionary = filter_file_dictionary['chart_dictionary']
except KeyError:
chart_dictionary = False
except TypeError:
chart_dictionary = False
# sending min and max zoom into the function that makes the zoom block
zoomblock = make_zoom_block(minzoom,maxzoom,count,colorkeyfields,bounds,filter_file_dictionary)
# logic for if a color key is given
# HINT look here for rgb raw color integration in a color line
if not colorkey == '':
if row == filenames[0]:
if colorkey == 'selectedText':
colorkey = """feature.properties[%s]""" % colorkey
else:
colorkey = """feature.properties['%s']""" % colorkey
if featuretype == 'Point':
colorline = get_colorline_marker(str(colorkey))
else:
colorline = get_colorline_marker2(str(colorkey))
# this may be able to be deleted
# test later
# im not sure what the fuck its here for
if file_dictionary == False and colorkey == '':
if featuretype == 'Point':
colorline = get_colorline_marker(color_input)
else:
colorline = get_colorline_marker2(color_input)
if colorkey == '' and colorkeyfields == False:
if featuretype == 'Point':
colorline = get_colorline_marker(color_input)
else:
colorline = get_colorline_marker2(color_input)
# iterating through each header
headers = []
for row in data:
headers.append(str(row))
# logic for getting sidebar string that will be added in make_blockstr()
if sidebar == True:
sidebarstring = make_sidebar_string(headers,chart_dictionary)
else:
sidebarstring = ''
# section of javascript code dedicated to the adding the data layer
if count == 1:
blocky = """
function add%s() {
\n\tfunction addDataToMap%s(data, map) {
\t\tvar dataLayer = L.geoJson(data);
\t\tvar map = L.mapbox.map('map', 'mapbox.streets',{
\t\t\tzoom: 5
\t\t\t}).fitBounds(dataLayer.getBounds());
\t\tdataLayer.addTo(map)
\t}\n""" % (count,count)
else:
blocky = """
function add%s() {
\n\tfunction addDataToMap%s(data, map) {
\t\tvar dataLayer = L.geoJson(data);
\t\tdataLayer.addTo(map)
\t}\n""" % (count,count)
# making the string section that locally links the geojson file to the html document
'''
if not time == '':
preloc='\tfunction add%s() {\n' % (str(count))
loc = """\t$.getJSON('http://localhost:8000/%s',function(data) { addDataToMap%s(data,map); });""" % (filename,count)
loc = preloc + loc
else:
'''
loc = """\t$.getJSON('http://localhost:8000/%s',function(data) { addDataToMap%s(data,map); });""" % (filename,count)
# creating block to be added to the total or constituent string block
if featuretype == 'Point':
bindings = make_bindings(headers,count,colorline,featuretype,zoomblock,filename,sidebarstring,colorkeyfields)+'\n'
stringblock = blocky + loc + bindings
else:
bindings = make_bindings(headers,count,colorline,featuretype,zoomblock,filename,sidebarstring,colorkeyfields)+'\n'
stringblock = blocky + loc + bindings
# adding the stringblock (one geojson file javascript block) to the total string block
string += stringblock
# adding async function to end of string block
string = string + async_function_call(count)
return string | # logic for instantiating variable colorkey input
if not colorkeyfields == False:
colorkey = 'selectedText' |
def proximal_quadratic_perturbation(prox_factory, a, u=None):
r"""Calculate the proximal of function F(x) + a * \|x\|^2 + <u,x>.
Parameters
----------
prox_factory : callable
A factory function that, when called with a step size, returns the
proximal operator of ``F``
a : non-negative float
Scaling of the quadratic term
u : Element in domain of F, optional
Defines the linear functional. For ``None``, the zero element
is taken.
Returns
-------
prox_factory : function
Factory for the proximal operator to be initialized
Notes
-----
Given a functional :math:`F`, this is calculated according to the rule
.. math::
\mathrm{prox}_{\sigma \left(F( \cdot ) + a \| \cdot \|^2 +
<u, \cdot >\right)}(x) =
c \; \mathrm{prox}_{\sigma F( \cdot \, c)}((x - \sigma u) c)
where :math:`c` is the constant
.. math::
c = \frac{1}{\sqrt{2 \sigma a + 1}},
:math:`a` is the scaling parameter belonging to the quadratic term,
:math:`u` is the space element defining the linear functional, and
:math:`\sigma` is the step size.
For reference on the identity used, see [CP2011c]. Note that this identity
is not the exact one given in the reference, but was recalculated for
arbitrary step lengths.
References
----------
[CP2011c] Combettes, P L, and Pesquet, J-C. *Proximal splitting
methods in signal processing.* In: Bauschke, H H, Burachik, R S,
Combettes, P L, Elser, V, Luke, D R, and Wolkowicz, H. Fixed-point
algorithms for inverse problems in science and engineering, Springer,
2011.
"""
a = float(a)
if a < 0:
raise ValueError('scaling parameter muts be non-negative, got {}'
''.format(a))
if u is not None and not isinstance(u, LinearSpaceElement):
raise TypeError('`u` must be `None` or a `LinearSpaceElement` '
'instance, got {!r}.'.format(u))
def quadratic_perturbation_prox_factory(sigma):
r"""Create proximal for the quadratic perturbation with a given sigma.
Parameters
----------
sigma : positive float
Step size parameter
Returns
-------
proximal : `Operator`
The proximal operator of ``sigma * (F(x) + a * \|x\|^2 + <u,x>)``,
where ``sigma`` is the step size
"""
if np.isscalar(sigma):
sigma = float(sigma)
else:
sigma = np.asarray(sigma)
const = 1.0 / np.sqrt(sigma * 2.0 * a + 1)
prox = proximal_arg_scaling(prox_factory, const)(sigma)
if u is not None:
return (MultiplyOperator(const, domain=u.space, range=u.space) *
prox *
(MultiplyOperator(const, domain=u.space, range=u.space) -
sigma * const * u))
else:
space = prox.domain
return (MultiplyOperator(const, domain=space, range=space) *
prox * MultiplyOperator(const, domain=space, range=space))
return quadratic_perturbation_prox_factory | r"""Calculate the proximal of function F(x) + a * \|x\|^2 + <u,x>.
Parameters
----------
prox_factory : callable
A factory function that, when called with a step size, returns the
proximal operator of ``F``
a : non-negative float
Scaling of the quadratic term
u : Element in domain of F, optional
Defines the linear functional. For ``None``, the zero element
is taken.
Returns
-------
prox_factory : function
Factory for the proximal operator to be initialized
Notes
-----
Given a functional :math:`F`, this is calculated according to the rule
.. math::
\mathrm{prox}_{\sigma \left(F( \cdot ) + a \| \cdot \|^2 +
<u, \cdot >\right)}(x) =
c \; \mathrm{prox}_{\sigma F( \cdot \, c)}((x - \sigma u) c)
where :math:`c` is the constant
.. math::
c = \frac{1}{\sqrt{2 \sigma a + 1}},
:math:`a` is the scaling parameter belonging to the quadratic term,
:math:`u` is the space element defining the linear functional, and
:math:`\sigma` is the step size.
For reference on the identity used, see [CP2011c]. Note that this identity
is not the exact one given in the reference, but was recalculated for
arbitrary step lengths.
References
----------
[CP2011c] Combettes, P L, and Pesquet, J-C. *Proximal splitting
methods in signal processing.* In: Bauschke, H H, Burachik, R S,
Combettes, P L, Elser, V, Luke, D R, and Wolkowicz, H. Fixed-point
algorithms for inverse problems in science and engineering, Springer,
2011. |
def delete_field_value(self, name):
"""
Mark this field to be deleted
"""
name = self.get_real_name(name)
if name and self._can_write_field(name):
if name in self.__modified_data__:
self.__modified_data__.pop(name)
if name in self.__original_data__ and name not in self.__deleted_fields__:
self.__deleted_fields__.append(name) | Mark this field to be deleted |
def values(self):
"""Return data in `self` as a numpy array.
If all columns are the same dtype, the resulting array
will have this dtype. If there are >1 dtypes in columns,
then the resulting array will have dtype `object`.
"""
dtypes = [col.dtype for col in self.columns]
if len(set(dtypes)) > 1:
dtype = object
else:
dtype = None
return np.array(self.columns, dtype=dtype).T | Return data in `self` as a numpy array.
If all columns are the same dtype, the resulting array
will have this dtype. If there are >1 dtypes in columns,
then the resulting array will have dtype `object`. |
def plot(*args, ax=None, **kwargs):
"""
Plots but automatically resizes x axis.
.. versionadded:: 1.4
Parameters
----------
args
Passed on to :meth:`matplotlib.axis.Axis.plot`.
ax : :class:`matplotlib.axis.Axis`, optional
The axis to plot to.
kwargs
Passed on to :meth:`matplotlib.axis.Axis.plot`.
"""
if ax is None:
fig, ax = _setup_axes()
pl = ax.plot(*args, **kwargs)
if _np.shape(args)[0] > 1:
if type(args[1]) is not str:
min_x = min(args[0])
max_x = max(args[0])
ax.set_xlim((min_x, max_x))
return pl | Plots but automatically resizes x axis.
.. versionadded:: 1.4
Parameters
----------
args
Passed on to :meth:`matplotlib.axis.Axis.plot`.
ax : :class:`matplotlib.axis.Axis`, optional
The axis to plot to.
kwargs
Passed on to :meth:`matplotlib.axis.Axis.plot`. |
def _interactive_input_fn(hparams, decode_hp):
"""Generator that reads from the terminal and yields "interactive inputs".
Due to temporary limitations in tf.learn, if we don't want to reload the
whole graph, then we are stuck encoding all of the input as one fixed-size
numpy array.
We yield int32 arrays with shape [const_array_size]. The format is:
[num_samples, decode_length, len(input ids), <input ids>, <padding>]
Args:
hparams: model hparams
decode_hp: decode hparams
Yields:
numpy arrays
Raises:
Exception: when `input_type` is invalid.
"""
num_samples = decode_hp.num_samples if decode_hp.num_samples > 0 else 1
decode_length = decode_hp.extra_length
input_type = "text"
p_hparams = hparams.problem_hparams
has_input = "inputs" in p_hparams.modality
vocabulary = p_hparams.vocabulary["inputs" if has_input else "targets"]
# This should be longer than the longest input.
const_array_size = 10000
# Import readline if available for command line editing and recall.
try:
import readline # pylint: disable=g-import-not-at-top,unused-variable
except ImportError:
pass
while True:
prompt = ("INTERACTIVE MODE num_samples=%d decode_length=%d \n"
" it=<input_type> ('text' or 'image' or 'label', default: "
"text)\n"
" ns=<num_samples> (changes number of samples, default: 1)\n"
" dl=<decode_length> (changes decode length, default: 100)\n"
" <%s> (decode)\n"
" q (quit)\n"
">" % (num_samples, decode_length,
"source_string" if has_input else "target_prefix"))
input_string = input(prompt)
if input_string == "q":
return
elif input_string[:3] == "ns=":
num_samples = int(input_string[3:])
elif input_string[:3] == "dl=":
decode_length = int(input_string[3:])
elif input_string[:3] == "it=":
input_type = input_string[3:]
else:
if input_type == "text":
input_ids = vocabulary.encode(input_string)
if has_input:
input_ids.append(text_encoder.EOS_ID)
x = [num_samples, decode_length, len(input_ids)] + input_ids
assert len(x) < const_array_size
x += [0] * (const_array_size - len(x))
features = {
"inputs": np.array(x).astype(np.int32),
}
elif input_type == "image":
input_path = input_string
img = vocabulary.encode(input_path)
features = {
"inputs": img.astype(np.int32),
}
elif input_type == "label":
input_ids = [int(input_string)]
x = [num_samples, decode_length, len(input_ids)] + input_ids
features = {
"inputs": np.array(x).astype(np.int32),
}
else:
raise Exception("Unsupported input type.")
for k, v in six.iteritems(
problem_lib.problem_hparams_to_features(p_hparams)):
features[k] = np.array(v).astype(np.int32)
yield features | Generator that reads from the terminal and yields "interactive inputs".
Due to temporary limitations in tf.learn, if we don't want to reload the
whole graph, then we are stuck encoding all of the input as one fixed-size
numpy array.
We yield int32 arrays with shape [const_array_size]. The format is:
[num_samples, decode_length, len(input ids), <input ids>, <padding>]
Args:
hparams: model hparams
decode_hp: decode hparams
Yields:
numpy arrays
Raises:
Exception: when `input_type` is invalid. |
def _validate_oneof(self, definitions, field, value):
""" {'type': 'list', 'logical': 'oneof'} """
valids, _errors = \
self.__validate_logical('oneof', definitions, field, value)
if valids != 1:
self._error(field, errors.ONEOF, _errors,
valids, len(definitions)) | {'type': 'list', 'logical': 'oneof'} |
def set_change(name, change):
'''
Sets the time at which the password expires (in seconds since the UNIX
epoch). See ``man 8 usermod`` on NetBSD and OpenBSD or ``man 8 pw`` on
FreeBSD.
A value of ``0`` sets the password to never expire.
CLI Example:
.. code-block:: bash
salt '*' shadow.set_change username 1419980400
'''
pre_info = info(name)
if change == pre_info['change']:
return True
if __grains__['kernel'] == 'FreeBSD':
cmd = ['pw', 'user', 'mod', name, '-f', change]
else:
cmd = ['usermod', '-f', change, name]
__salt__['cmd.run'](cmd, python_shell=False)
post_info = info(name)
if post_info['change'] != pre_info['change']:
return post_info['change'] == change | Sets the time at which the password expires (in seconds since the UNIX
epoch). See ``man 8 usermod`` on NetBSD and OpenBSD or ``man 8 pw`` on
FreeBSD.
A value of ``0`` sets the password to never expire.
CLI Example:
.. code-block:: bash
salt '*' shadow.set_change username 1419980400 |
def communicate(self, job_ids = None):
"""Communicates with the SGE grid (using qstat) to see if jobs are still running."""
self.lock()
# iterate over all jobs
jobs = self.get_jobs(job_ids)
for job in jobs:
job.refresh()
if job.status in ('queued', 'executing', 'waiting') and job.queue_name != 'local':
status = qstat(job.id, context=self.context)
if len(status) == 0:
job.status = 'failure'
job.result = 70 # ASCII: 'F'
logger.warn("The job '%s' was not executed successfully (maybe a time-out happened). Please check the log files." % job)
for array_job in job.array:
if array_job.status in ('queued', 'executing'):
array_job.status = 'failure'
array_job.result = 70 # ASCII: 'F'
self.session.commit()
self.unlock() | Communicates with the SGE grid (using qstat) to see if jobs are still running. |
def LinShuReductionFactor(axiPot,R,sigmar,nonaxiPot=None,
k=None,m=None,OmegaP=None):
"""
NAME:
LinShuReductionFactor
PURPOSE:
Calculate the Lin & Shu (1966) reduction factor: the reduced linear response of a kinematically-warm stellar disk to a perturbation
INPUT:
axiPot - The background, axisymmetric potential
R - Cylindrical radius (can be Quantity)
sigmar - radial velocity dispersion of the population (can be Quantity)
Then either provide:
1) m= m in the perturbation's m x phi (number of arms for a spiral)
k= wavenumber (see Binney & Tremaine 2008)
OmegaP= pattern speed (can be Quantity)
2) nonaxiPot= a non-axisymmetric Potential instance (such as SteadyLogSpiralPotential) that has functions that return OmegaP, m, and wavenumber
OUTPUT:
reduction factor
HISTORY:
2014-08-23 - Written - Bovy (IAS)
"""
axiPot= flatten(axiPot)
from galpy.potential import omegac, epifreq
if nonaxiPot is None and (OmegaP is None or k is None or m is None):
raise IOError("Need to specify either nonaxiPot= or m=, k=, OmegaP= for LinShuReductionFactor")
elif not nonaxiPot is None:
OmegaP= nonaxiPot.OmegaP()
k= nonaxiPot.wavenumber(R)
m= nonaxiPot.m()
tepif= epifreq(axiPot,R)
s= m*(OmegaP-omegac(axiPot,R))/tepif
chi= sigmar**2.*k**2./tepif**2.
return (1.-s**2.)/nu.sin(nu.pi*s)\
*integrate.quad(lambda t: nu.exp(-chi*(1.+nu.cos(t)))\
*nu.sin(s*t)*nu.sin(t),
0.,nu.pi)[0] | NAME:
LinShuReductionFactor
PURPOSE:
Calculate the Lin & Shu (1966) reduction factor: the reduced linear response of a kinematically-warm stellar disk to a perturbation
INPUT:
axiPot - The background, axisymmetric potential
R - Cylindrical radius (can be Quantity)
sigmar - radial velocity dispersion of the population (can be Quantity)
Then either provide:
1) m= m in the perturbation's m x phi (number of arms for a spiral)
k= wavenumber (see Binney & Tremaine 2008)
OmegaP= pattern speed (can be Quantity)
2) nonaxiPot= a non-axisymmetric Potential instance (such as SteadyLogSpiralPotential) that has functions that return OmegaP, m, and wavenumber
OUTPUT:
reduction factor
HISTORY:
2014-08-23 - Written - Bovy (IAS) |
def register_command(self, namespace, command, method):
"""
Registers the given command to the shell.
The namespace can be None, empty or "default"
:param namespace: The command name space.
:param command: The shell name of the command
:param method: The method to call
:return: True if the method has been registered, False if it was
already known or invalid
"""
if method is None:
self._logger.error("No method given for %s.%s", namespace, command)
return False
# Store everything in lower case
namespace = (namespace or "").strip().lower()
command = (command or "").strip().lower()
if not namespace:
namespace = DEFAULT_NAMESPACE
if not command:
self._logger.error("No command name given")
return False
if namespace not in self._commands:
space = self._commands[namespace] = {}
else:
space = self._commands[namespace]
if command in space:
self._logger.error(
"Command already registered: %s.%s", namespace, command
)
return False
space[command] = method
return True | Registers the given command to the shell.
The namespace can be None, empty or "default"
:param namespace: The command name space.
:param command: The shell name of the command
:param method: The method to call
:return: True if the method has been registered, False if it was
already known or invalid |
def verify(self):
"""
## FOR DEBUGGING ONLY ##
Checks the table to ensure that the invariants are held.
"""
if self.all_intervals:
## top_node.all_children() == self.all_intervals
try:
assert self.top_node.all_children() == self.all_intervals
except AssertionError as e:
print(
'Error: the tree and the membership set are out of sync!'
)
tivs = set(self.top_node.all_children())
print('top_node.all_children() - all_intervals:')
try:
pprint
except NameError:
from pprint import pprint
pprint(tivs - self.all_intervals)
print('all_intervals - top_node.all_children():')
pprint(self.all_intervals - tivs)
raise e
## All members are Intervals
for iv in self:
assert isinstance(iv, Interval), (
"Error: Only Interval objects allowed in IntervalTree:"
" {0}".format(iv)
)
## No null intervals
for iv in self:
assert not iv.is_null(), (
"Error: Null Interval objects not allowed in IntervalTree:"
" {0}".format(iv)
)
## Reconstruct boundary_table
bound_check = {}
for iv in self:
if iv.begin in bound_check:
bound_check[iv.begin] += 1
else:
bound_check[iv.begin] = 1
if iv.end in bound_check:
bound_check[iv.end] += 1
else:
bound_check[iv.end] = 1
## Reconstructed boundary table (bound_check) ==? boundary_table
assert set(self.boundary_table.keys()) == set(bound_check.keys()),\
'Error: boundary_table is out of sync with ' \
'the intervals in the tree!'
# For efficiency reasons this should be iteritems in Py2, but we
# don't care much for efficiency in debug methods anyway.
for key, val in self.boundary_table.items():
assert bound_check[key] == val, \
'Error: boundary_table[{0}] should be {1},' \
' but is {2}!'.format(
key, bound_check[key], val)
## Internal tree structure
self.top_node.verify(set())
else:
## Verify empty tree
assert not self.boundary_table, \
"Error: boundary table should be empty!"
assert self.top_node is None, \
"Error: top_node isn't None!" | ## FOR DEBUGGING ONLY ##
Checks the table to ensure that the invariants are held. |
def to_table(result):
''' normalize raw result to table '''
max_count = 20
table, count = [], 0
for role, envs_topos in result.items():
for env, topos in envs_topos.items():
for topo in topos:
count += 1
if count > max_count:
continue
else:
table.append([role, env, topo])
header = ['role', 'env', 'topology']
rest_count = 0 if count <= max_count else count - max_count
return table, header, rest_count | normalize raw result to table |
def get_object_metadata(self, container, obj, prefix=None):
"""
Returns the metadata for the specified object as a dict.
"""
return self._manager.get_object_metadata(container, obj, prefix=prefix) | Returns the metadata for the specified object as a dict. |
def get(self, endpoint, params=None):
"""
Get items or item in alignak backend
If an error occurs, a BackendException is raised.
This method builds a response as a dictionary that always contains: _items and _status::
{
u'_items': [
...
],
u'_status': u'OK'
}
:param endpoint: endpoint (API URL) relative from root endpoint
:type endpoint: str
:param params: parameters for the backend API
:type params: dict
:return: dictionary as specified upper
:rtype: dict
"""
response = self.get_response(method='GET', endpoint=endpoint, params=params)
resp = self.decode(response=response)
if '_status' not in resp: # pragma: no cover - need specific backend tests
resp['_status'] = u'OK' # TODO: Sure??
return resp | Get items or item in alignak backend
If an error occurs, a BackendException is raised.
This method builds a response as a dictionary that always contains: _items and _status::
{
u'_items': [
...
],
u'_status': u'OK'
}
:param endpoint: endpoint (API URL) relative from root endpoint
:type endpoint: str
:param params: parameters for the backend API
:type params: dict
:return: dictionary as specified upper
:rtype: dict |
def generate(self):
"""
:return: A new token
:rtype: str
"""
random_data = os.urandom(100)
hash_gen = hashlib.new("sha512")
hash_gen.update(random_data)
return hash_gen.hexdigest()[:self.token_length] | :return: A new token
:rtype: str |
def get_transfers(self, start=0, stop=None, inclusion_states=False):
# type: (int, Optional[int], bool) -> dict
"""
Returns all transfers associated with the seed.
:param start:
Starting key index.
:param stop:
Stop before this index.
Note that this parameter behaves like the ``stop`` attribute
in a :py:class:`slice` object; the stop index is *not*
included in the result.
If ``None`` (default), then this method will check every
address until it finds one without any transfers.
:param inclusion_states:
Whether to also fetch the inclusion states of the transfers.
This requires an additional API call to the node, so it is
disabled by default.
:return:
Dict with the following structure::
{
'bundles': List[Bundle],
Matching bundles, sorted by tail transaction
timestamp.
This value is always a list, even if only one
bundle was found.
}
References:
- https://github.com/iotaledger/wiki/blob/master/api-proposal.md#gettransfers
"""
return extended.GetTransfersCommand(self.adapter)(
seed=self.seed,
start=start,
stop=stop,
inclusionStates=inclusion_states,
) | Returns all transfers associated with the seed.
:param start:
Starting key index.
:param stop:
Stop before this index.
Note that this parameter behaves like the ``stop`` attribute
in a :py:class:`slice` object; the stop index is *not*
included in the result.
If ``None`` (default), then this method will check every
address until it finds one without any transfers.
:param inclusion_states:
Whether to also fetch the inclusion states of the transfers.
This requires an additional API call to the node, so it is
disabled by default.
:return:
Dict with the following structure::
{
'bundles': List[Bundle],
Matching bundles, sorted by tail transaction
timestamp.
This value is always a list, even if only one
bundle was found.
}
References:
- https://github.com/iotaledger/wiki/blob/master/api-proposal.md#gettransfers |
def _expand_list(names):
""" Do a wildchar name expansion of object names in a list and return expanded list.
The objects are expected to exist as this is used for copy sources or delete targets.
Currently we support wildchars in the key name only.
"""
if names is None:
names = []
elif isinstance(names, basestring):
names = [names]
results = [] # The expanded list.
objects = {} # Cached contents of buckets; used for matching.
for name in names:
bucket, key = google.datalab.storage._bucket.parse_name(name)
results_len = len(results) # If we fail to add any we add name and let caller deal with it.
if bucket:
if not key:
# Just a bucket; add it.
results.append('gs://%s' % bucket)
elif google.datalab.storage.Object(bucket, key).exists():
results.append('gs://%s/%s' % (bucket, key))
else:
# Expand possible key values.
if bucket not in objects and key[:1] == '*':
# We need the full list; cache a copy for efficiency.
objects[bucket] = [obj.metadata.name
for obj in list(google.datalab.storage.Bucket(bucket).objects())]
# If we have a cached copy use it
if bucket in objects:
candidates = objects[bucket]
# else we have no cached copy but can use prefix matching which is more efficient than
# getting the full contents.
else:
# Get the non-wildchar prefix.
match = re.search('\?|\*|\[', key)
prefix = key
if match:
prefix = key[0:match.start()]
candidates = [obj.metadata.name
for obj in google.datalab.storage.Bucket(bucket).objects(prefix=prefix)]
for obj in candidates:
if fnmatch.fnmatch(obj, key):
results.append('gs://%s/%s' % (bucket, obj))
# If we added no matches, add the original name and let caller deal with it.
if len(results) == results_len:
results.append(name)
return results | Do a wildchar name expansion of object names in a list and return expanded list.
The objects are expected to exist as this is used for copy sources or delete targets.
Currently we support wildchars in the key name only. |
Subsets and Splits