text
stringlengths 78
104k
| score
float64 0
0.18
|
---|---|
def _load_meta(path):
"""
Load meta data about this package from file pypi.json.
:param path: The path to pypi.json
:return: Dictionary of key value pairs.
"""
with open(path) as f:
meta = load(f, encoding='utf-8')
meta = {k: v.decode('utf-8') if isinstance(v, bytes) else v
for k, v in meta.items()}
src_dir = abspath(dirname(path))
if 'requirements' in meta and \
str(meta['requirements']).startswith('file://'):
req_path = str(meta['requirements'])[7:]
req_path = join(src_dir, req_path)
if exists(req_path):
reqs = open(req_path, 'r').read().strip().split('\n')
reqs = [req.strip() for req in reqs if 'git+' not in req]
meta['requirements'] = reqs
else:
meta['requirements'] = ''
if 'long_description' in meta and \
str(meta['long_description']).startswith('file://'):
readme_path = str(meta['long_description'])[7:]
readme_path = join(src_dir, readme_path)
if exists(readme_path):
readme = open(readme_path, 'r').read().strip()
meta['long_description'] = readme
else:
meta['long_description'] = ''
return meta | 0.000745 |
def save(self, model, path=''):
"""Save the file model and return the model with no content."""
if model['type'] != 'notebook':
return super(TextFileContentsManager, self).save(model, path)
nbk = model['content']
try:
metadata = nbk.get('metadata')
rearrange_jupytext_metadata(metadata)
jupytext_metadata = metadata.setdefault('jupytext', {})
jupytext_formats = jupytext_metadata.get('formats') or self.default_formats(path)
if not jupytext_formats:
text_representation = jupytext_metadata.get('text_representation', {})
ext = os.path.splitext(path)[1]
fmt = {'extension': ext}
if ext == text_representation.get('extension') and text_representation.get('format_name'):
fmt['format_name'] = text_representation.get('format_name')
jupytext_formats = [fmt]
jupytext_formats = long_form_multiple_formats(jupytext_formats, metadata)
# Set preferred formats if not format name is given yet
jupytext_formats = [preferred_format(fmt, self.preferred_jupytext_formats_save) for fmt in jupytext_formats]
base, fmt = find_base_path_and_format(path, jupytext_formats)
self.update_paired_notebooks(path, fmt, jupytext_formats)
self.set_default_format_options(jupytext_metadata)
if not jupytext_metadata:
metadata.pop('jupytext')
# Save as ipynb first
return_value = None
value = None
for fmt in jupytext_formats[::-1]:
if fmt['extension'] != '.ipynb':
continue
alt_path = full_path(base, fmt)
self.create_prefix_dir(alt_path, fmt)
self.log.info("Saving %s", os.path.basename(alt_path))
value = super(TextFileContentsManager, self).save(model, alt_path)
if alt_path == path:
return_value = value
# And then to the other formats, in reverse order so that
# the first format is the most recent
for fmt in jupytext_formats[::-1]:
if fmt['extension'] == '.ipynb':
continue
alt_path = full_path(base, fmt)
self.create_prefix_dir(alt_path, fmt)
if 'format_name' in fmt and fmt['extension'] not in ['.Rmd', '.md']:
self.log.info("Saving %s in format %s:%s",
os.path.basename(alt_path), fmt['extension'][1:], fmt['format_name'])
else:
self.log.info("Saving %s", os.path.basename(alt_path))
with mock.patch('nbformat.writes', _jupytext_writes(fmt)):
value = super(TextFileContentsManager, self).save(model, alt_path)
if alt_path == path:
return_value = value
# Update modified timestamp to match that of the pair #207
return_value['last_modified'] = value['last_modified']
return return_value
except Exception as err:
raise HTTPError(400, str(err)) | 0.003362 |
def GetFormatSpecification(self):
"""Retrieves the format specification.
Returns:
FormatSpecification: format specification or None if the format cannot
be defined by a specification object.
"""
format_specification = specification.FormatSpecification(
self.type_indicator)
# TODO: add support for signature chains so that we add the 'BZ' at
# offset 0.
# BZIP2 compressed steam signature.
format_specification.AddNewSignature(b'\x31\x41\x59\x26\x53\x59', offset=4)
return format_specification | 0.001795 |
def finish_displayhook(self):
"""Finish up all displayhook activities."""
io.stdout.write(self.shell.separate_out2)
io.stdout.flush() | 0.012739 |
def to_mask(self, method='exact', subpixels=5):
"""
Return a list of `~photutils.ApertureMask` objects, one for each
aperture position.
Parameters
----------
method : {'exact', 'center', 'subpixel'}, optional
The method used to determine the overlap of the aperture on
the pixel grid. Not all options are available for all
aperture types. Note that the more precise methods are
generally slower. The following methods are available:
* ``'exact'`` (default):
The the exact fractional overlap of the aperture and
each pixel is calculated. The returned mask will
contain values between 0 and 1.
* ``'center'``:
A pixel is considered to be entirely in or out of the
aperture depending on whether its center is in or out
of the aperture. The returned mask will contain
values only of 0 (out) and 1 (in).
* ``'subpixel'``:
A pixel is divided into subpixels (see the
``subpixels`` keyword), each of which are considered
to be entirely in or out of the aperture depending on
whether its center is in or out of the aperture. If
``subpixels=1``, this method is equivalent to
``'center'``. The returned mask will contain values
between 0 and 1.
subpixels : int, optional
For the ``'subpixel'`` method, resample pixels by this factor
in each dimension. That is, each pixel is divided into
``subpixels ** 2`` subpixels.
Returns
-------
mask : list of `~photutils.ApertureMask`
A list of aperture mask objects.
"""
use_exact, subpixels = self._translate_mask_mode(method, subpixels)
if hasattr(self, 'a'):
a = self.a
b = self.b
elif hasattr(self, 'a_in'): # annulus
a = self.a_out
b = self.b_out
b_in = self.a_in * self.b_out / self.a_out
else:
raise ValueError('Cannot determine the aperture shape.')
masks = []
for bbox, edges in zip(self.bounding_boxes, self._centered_edges):
ny, nx = bbox.shape
mask = elliptical_overlap_grid(edges[0], edges[1], edges[2],
edges[3], nx, ny, a, b, self.theta,
use_exact, subpixels)
# subtract the inner ellipse for an annulus
if hasattr(self, 'a_in'):
mask -= elliptical_overlap_grid(edges[0], edges[1], edges[2],
edges[3], nx, ny, self.a_in,
b_in, self.theta, use_exact,
subpixels)
masks.append(ApertureMask(mask, bbox))
return masks | 0.000646 |
def revoke_sudo_privileges(request):
"""
Revoke sudo privileges from a request explicitly
"""
request._sudo = False
if COOKIE_NAME in request.session:
del request.session[COOKIE_NAME] | 0.004739 |
def byte_adaptor(fbuffer):
""" provides py3 compatibility by converting byte based
file stream to string based file stream
Arguments:
fbuffer: file like objects containing bytes
Returns:
string buffer
"""
if six.PY3:
strings = fbuffer.read().decode('latin-1')
fbuffer = six.StringIO(strings)
return fbuffer
else:
return fbuffer | 0.002469 |
def unique_justseen(iterable, key=None):
"""Yields elements in order, ignoring serial duplicates
>>> list(unique_justseen('AAAABBBCCDAABBB'))
['A', 'B', 'C', 'D', 'A', 'B']
>>> list(unique_justseen('ABBCcAD', str.lower))
['A', 'B', 'C', 'A', 'D']
"""
return map(next, map(operator.itemgetter(1), groupby(iterable, key))) | 0.002732 |
def remove_task(cls, task):
"""
:param Task|callable task: Remove 'task' from the list of tasks to run periodically
"""
with cls._lock:
if not isinstance(task, Task):
task = cls.resolved_task(task)
if task:
cls.tasks.remove(task)
cls.tasks.sort() | 0.008621 |
def computePerturbedFreeEnergies(self, u_ln, compute_uncertainty=True, uncertainty_method=None, warning_cutoff=1.0e-10):
"""Compute the free energies for a new set of states.
Here, we desire the free energy differences among a set of new states, as well as the uncertainty estimates in these differences.
Parameters
----------
u_ln : np.ndarray, float, shape=(L, Nmax)
u_ln[l,n] is the reduced potential energy of uncorrelated
configuration n evaluated at new state k. Can be completely indepednent of the original number of states.
compute_uncertainty : bool, optional, default=True
If False, the uncertainties will not be computed (default: True)
uncertainty_method : string, optional
Choice of method used to compute asymptotic covariance method, or None to use default
See help for computeAsymptoticCovarianceMatrix() for more information on various methods. (default: None)
warning_cutoff : float, optional
Warn if squared-uncertainty is negative and larger in magnitude than this number (default: 1.0e-10)
Returns
-------
result_vals : dictionary
Possible keys in the result_vals dictionary:
'Delta_f' : np.ndarray, float, shape=(L, L)
result_vals['Delta_f'] = f_j - f_i, the dimensionless free energy difference between new states i and j
'dDelta_f' : np.ndarray, float, shape=(L, L)
result_vals['dDelta_f'] is the estimated statistical uncertainty in result_vals['Delta_f']
or not included if `compute_uncertainty` is False
Examples
--------
>>> from pymbar import testsystems
>>> (x_n, u_kn, N_k, s_n) = testsystems.HarmonicOscillatorsTestCase().sample(mode='u_kn')
>>> mbar = MBAR(u_kn, N_k)
>>> results = mbar.computePerturbedFreeEnergies(u_kn)
"""
# Convert to np matrix.
u_ln = np.array(u_ln, dtype=np.float64)
# Get the dimensions of the matrix of reduced potential energies, and convert if necessary
if len(np.shape(u_ln)) == 3:
u_ln = kln_to_kn(u_ln, N_k=self.N_k)
[L, N] = u_ln.shape
# Check dimensions.
if (N < self.N):
raise DataError("There seems to be too few samples in u_kn. You must evaluate at the new potential with all of the samples used originally.")
state_list = np.arange(L) # need to get it into the correct shape
A_in = np.array([0])
inner_results = self.computeExpectationsInner(A_in, u_ln, state_list,
return_theta=compute_uncertainty,
uncertainty_method=uncertainty_method,
warning_cutoff=warning_cutoff)
Deltaf_ij, dDeltaf_ij = None, None
f_k = np.matrix(inner_results['f'])
result_vals = dict()
result_vals['Delta_f'] = np.array(f_k - f_k.transpose())
if (compute_uncertainty):
result_vals['dDelta_f'] = self._ErrorOfDifferences(inner_results['Theta'],warning_cutoff=warning_cutoff)
# Return matrix of free energy differences and uncertainties.
return result_vals | 0.005405 |
def fit(self, ini_betas=None, tol=1.0e-6, max_iter=200, solve='iwls'):
"""
Method that fits a model with a particular estimation routine.
Parameters
----------
ini_betas : array
k*1, initial coefficient values, including constant.
Default is None, which calculates initial values during
estimation.
tol: float
Tolerence for estimation convergence.
max_iter : integer
Maximum number of iterations if convergence not
achieved.
solve :string
Technique to solve MLE equations.
'iwls' = iteratively (re)weighted least squares (default)
"""
self.fit_params['ini_betas'] = ini_betas
self.fit_params['tol'] = tol
self.fit_params['max_iter'] = max_iter
self.fit_params['solve'] = solve
if solve.lower() == 'iwls':
params, predy, w, n_iter = iwls(
self.y, self.X, self.family, self.offset, self.y_fix, ini_betas, tol, max_iter)
self.fit_params['n_iter'] = n_iter
return GLMResults(self, params.flatten(), predy, w) | 0.003113 |
def list_of_all_href(self,html):
'''
It will return all hyper links found in the mr-jatt page for download
'''
soup=BeautifulSoup(html)
links=[]
a_list=soup.findAll('a','touch')
for x in xrange(len(a_list)-1):
link = a_list[x].get('href')
name = a_list[x]
name = str(name)
name=re.sub(r'<a.*/>|<span.*">|</span>|</a>|<a.*html">|<font.*">|</font>','',name)
name=re.sub(r'^[0-9]+\.','',name)
links.append([link,name])
#quit()
return links | 0.067511 |
def _set_all_tables(self, schema, **kwargs):
"""
You can run into a problem when you are trying to set a table and it has a
foreign key to a table that doesn't exist, so this method will go through
all fk refs and make sure the tables exist
"""
with self.transaction(**kwargs) as connection:
kwargs['connection'] = connection
# go through and make sure all foreign key referenced tables exist
for field_name, field_val in schema.fields.items():
s = field_val.schema
if s:
self._set_all_tables(s, **kwargs)
# now that we know all fk tables exist, create this table
self.set_table(schema, **kwargs)
return True | 0.007692 |
def getparser(use_datetime=0):
"""getparser() -> parser, unmarshaller
Create an instance of the fastest available parser, and attach it
to an unmarshalling object. Return both objects.
"""
if use_datetime and not datetime:
raise ValueError("the datetime module is not available")
if FastParser and FastUnmarshaller:
if use_datetime:
mkdatetime = _datetime_type
else:
mkdatetime = _datetime
target = FastUnmarshaller(True, False, _binary, mkdatetime, Fault)
parser = FastParser(target)
else:
target = Unmarshaller(use_datetime=use_datetime)
if FastParser:
parser = FastParser(target)
elif SgmlopParser:
parser = SgmlopParser(target)
elif ExpatParser:
parser = ExpatParser(target)
else:
parser = SlowParser(target)
return parser, target | 0.001087 |
def get_gradebooks(self):
"""Pass through to provider GradebookLookupSession.get_gradebooks"""
# Implemented from kitosid template for -
# osid.resource.BinLookupSession.get_bins_template
catalogs = self._get_provider_session('gradebook_lookup_session').get_gradebooks()
cat_list = []
for cat in catalogs:
cat_list.append(Gradebook(self._provider_manager, cat, self._runtime, self._proxy))
return GradebookList(cat_list) | 0.008197 |
def index():
""" Display productpage with normal user and test user buttons"""
global productpage
table = json2html.convert(json = json.dumps(productpage),
table_attributes="class=\"table table-condensed table-bordered table-hover\"")
return render_template('index.html', serviceTable=table) | 0.011799 |
def _get_values(self, rdn):
"""
Returns a dict of prepped values contained in an RDN
:param rdn:
A RelativeDistinguishedName object
:return:
A dict object with unicode strings of NameTypeAndValue value field
values that have been prepped for comparison
"""
output = {}
[output.update([(ntv['type'].native, ntv.prepped_value)]) for ntv in rdn]
return output | 0.006536 |
def get_trending_daily_not_starred(self):
"""Gets trending repositories NOT starred by user
:return: List of daily-trending repositories which are not starred
"""
trending_daily = self.get_trending_daily() # repos trending daily
starred_repos = self.get_starred_repos() # repos starred by user
repos_list = []
for repo in trending_daily:
if repo not in starred_repos:
repos_list.append(repo)
return repos_list | 0.00396 |
def _get_bounds(mapper, values):
"""
Extract first and second value from tuples of mapped bins.
"""
array = np.array([mapper.get(x) for x in values])
return array[:, 0], array[:, 1] | 0.009009 |
def _microtime():
'''
Return a Unix timestamp as a string of digits
:return:
'''
val1, val2 = math.modf(time.time())
val2 = int(val2)
return '{0:f}{1}'.format(val1, val2) | 0.005051 |
def uri(ctx, uri, touch, force):
"""
Add a new credential from URI.
Use a URI to add a new credential to your YubiKey.
"""
if not uri:
while True:
uri = click.prompt('Enter an OATH URI', err=True)
try:
uri = CredentialData.from_uri(uri)
break
except Exception as e:
click.echo(e)
ensure_validated(ctx)
data = uri
# Steam is a special case where we allow the otpauth
# URI to contain a 'digits' value of '5'.
if data.digits == 5 and data.issuer == 'Steam':
data.digits = 6
data.touch = touch
_add_cred(ctx, data, force=force) | 0.001475 |
def find_item_by_id(self, object_id):
"""Get item based on its id or uuid
:param object_id:
:type object_id: int | str
:return:
:rtype: alignak.objects.item.Item | None
"""
# Item id may be an item
if isinstance(object_id, Item):
return object_id
# Item id should be a uuid string
if not isinstance(object_id, string_types):
logger.debug("Find an item by id, object_id is not int nor string: %s", object_id)
return object_id
for items in [self.hosts, self.services, self.actions, self.checks, self.hostgroups,
self.servicegroups, self.contacts, self.contactgroups]:
if object_id in items:
return items[object_id]
# raise AttributeError("Item with id %s not found" % object_id) # pragma: no cover,
logger.error("Item with id %s not found", str(object_id)) # pragma: no cover,
return None | 0.006061 |
def selected_indexes(self, ):
"""Return the current index
:returns: the current index in a list
:rtype: list of QtCore.QModelIndex
:raises: None
"""
i = self.model().index(self.currentIndex(), 0, self.rootModelIndex())
return [i] | 0.006993 |
def get_section_by_offset(self, offset):
"""Get the section containing the given file offset."""
sections = [s for s in self.sections if s.contains_offset(offset)]
if sections:
return sections[0]
return None | 0.007874 |
def start_slaves(slave_dir,exe_rel_path,pst_rel_path,num_slaves=None,slave_root="..",
port=4004,rel_path=None,local=True,cleanup=True,master_dir=None,
verbose=False,silent_master=False):
""" start a group of pest(++) slaves on the local machine
Parameters
----------
slave_dir : str
the path to a complete set of input files
exe_rel_path : str
the relative path to the pest(++) executable from within the slave_dir
pst_rel_path : str
the relative path to the pst file from within the slave_dir
num_slaves : int
number of slaves to start. defaults to number of cores
slave_root : str
the root to make the new slave directories in
rel_path: str
the relative path to where pest(++) should be run from within the
slave_dir, defaults to the uppermost level of the slave dir
local: bool
flag for using "localhost" instead of hostname on slave command line
cleanup: bool
flag to remove slave directories once processes exit
master_dir: str
name of directory for master instance. If master_dir
exists, then it will be removed. If master_dir is None,
no master instance will be started
verbose : bool
flag to echo useful information to stdout
Note
----
if all slaves (and optionally master) exit gracefully, then the slave
dirs will be removed unless cleanup is false
Example
-------
``>>>import pyemu``
start 10 slaves using the directory "template" as the base case and
also start a master instance in a directory "master".
``>>>pyemu.helpers.start_slaves("template","pestpp","pest.pst",10,master_dir="master")``
"""
warnings.warn("start_slaves has moved to pyemu.os_utils",PyemuWarning)
pyemu.os_utils.start_slaves(slave_dir=slave_dir,exe_rel_path=exe_rel_path,pst_rel_path=pst_rel_path
,num_slaves=num_slaves,slave_root=slave_root,port=port,rel_path=rel_path,
local=local,cleanup=cleanup,master_dir=master_dir,verbose=verbose,
silent_master=silent_master) | 0.013376 |
def run(self):
'''
Execute the salt command line
'''
import salt.client
self.parse_args()
if self.config['log_level'] not in ('quiet', ):
# Setup file logging!
self.setup_logfile_logger()
verify_log(self.config)
try:
# We don't need to bail on config file permission errors
# if the CLI process is run with the -a flag
skip_perm_errors = self.options.eauth != ''
self.local_client = salt.client.get_local_client(
self.get_config_file_path(),
skip_perm_errors=skip_perm_errors,
auto_reconnect=True)
except SaltClientError as exc:
self.exit(2, '{0}\n'.format(exc))
return
if self.options.batch or self.options.static:
# _run_batch() will handle all output and
# exit with the appropriate error condition
# Execution will not continue past this point
# in batch mode.
self._run_batch()
return
if self.options.preview_target:
minion_list = self._preview_target()
self._output_ret(minion_list, self.config.get('output', 'nested'))
return
if self.options.timeout <= 0:
self.options.timeout = self.local_client.opts['timeout']
kwargs = {
'tgt': self.config['tgt'],
'fun': self.config['fun'],
'arg': self.config['arg'],
'timeout': self.options.timeout,
'show_timeout': self.options.show_timeout,
'show_jid': self.options.show_jid}
if 'token' in self.config:
import salt.utils.files
try:
with salt.utils.files.fopen(os.path.join(self.config['cachedir'], '.root_key'), 'r') as fp_:
kwargs['key'] = fp_.readline()
except IOError:
kwargs['token'] = self.config['token']
kwargs['delimiter'] = self.options.delimiter
if self.selected_target_option:
kwargs['tgt_type'] = self.selected_target_option
else:
kwargs['tgt_type'] = 'glob'
# If batch_safe_limit is set, check minions matching target and
# potentially switch to batch execution
if self.options.batch_safe_limit > 1:
if len(self._preview_target()) >= self.options.batch_safe_limit:
salt.utils.stringutils.print_cli('\nNOTICE: Too many minions targeted, switching to batch execution.')
self.options.batch = self.options.batch_safe_size
self._run_batch()
return
if getattr(self.options, 'return'):
kwargs['ret'] = getattr(self.options, 'return')
if getattr(self.options, 'return_config'):
kwargs['ret_config'] = getattr(self.options, 'return_config')
if getattr(self.options, 'return_kwargs'):
kwargs['ret_kwargs'] = yamlify_arg(
getattr(self.options, 'return_kwargs'))
if getattr(self.options, 'module_executors'):
kwargs['module_executors'] = yamlify_arg(getattr(self.options, 'module_executors'))
if getattr(self.options, 'executor_opts'):
kwargs['executor_opts'] = yamlify_arg(getattr(self.options, 'executor_opts'))
if getattr(self.options, 'metadata'):
kwargs['metadata'] = yamlify_arg(
getattr(self.options, 'metadata'))
# If using eauth and a token hasn't already been loaded into
# kwargs, prompt the user to enter auth credentials
if 'token' not in kwargs and 'key' not in kwargs and self.options.eauth:
# This is expensive. Don't do it unless we need to.
import salt.auth
resolver = salt.auth.Resolver(self.config)
res = resolver.cli(self.options.eauth)
if self.options.mktoken and res:
tok = resolver.token_cli(
self.options.eauth,
res
)
if tok:
kwargs['token'] = tok.get('token', '')
if not res:
sys.stderr.write('ERROR: Authentication failed\n')
sys.exit(2)
kwargs.update(res)
kwargs['eauth'] = self.options.eauth
if self.config['async']:
jid = self.local_client.cmd_async(**kwargs)
salt.utils.stringutils.print_cli('Executed command with job ID: {0}'.format(jid))
return
# local will be None when there was an error
if not self.local_client:
return
retcodes = []
errors = []
try:
if self.options.subset:
cmd_func = self.local_client.cmd_subset
kwargs['sub'] = self.options.subset
kwargs['cli'] = True
else:
cmd_func = self.local_client.cmd_cli
if self.options.progress:
kwargs['progress'] = True
self.config['progress'] = True
ret = {}
for progress in cmd_func(**kwargs):
out = 'progress'
try:
self._progress_ret(progress, out)
except LoaderError as exc:
raise SaltSystemExit(exc)
if 'return_count' not in progress:
ret.update(progress)
self._progress_end(out)
self._print_returns_summary(ret)
elif self.config['fun'] == 'sys.doc':
ret = {}
out = ''
for full_ret in self.local_client.cmd_cli(**kwargs):
ret_, out, retcode = self._format_ret(full_ret)
ret.update(ret_)
self._output_ret(ret, out, retcode=retcode)
else:
if self.options.verbose:
kwargs['verbose'] = True
ret = {}
for full_ret in cmd_func(**kwargs):
try:
ret_, out, retcode = self._format_ret(full_ret)
retcodes.append(retcode)
self._output_ret(ret_, out, retcode=retcode)
ret.update(full_ret)
except KeyError:
errors.append(full_ret)
# Returns summary
if self.config['cli_summary'] is True:
if self.config['fun'] != 'sys.doc':
if self.options.output is None:
self._print_returns_summary(ret)
self._print_errors_summary(errors)
# NOTE: Return code is set here based on if all minions
# returned 'ok' with a retcode of 0.
# This is the final point before the 'salt' cmd returns,
# which is why we set the retcode here.
if not all(exit_code == salt.defaults.exitcodes.EX_OK for exit_code in retcodes):
sys.stderr.write('ERROR: Minions returned with non-zero exit code\n')
sys.exit(salt.defaults.exitcodes.EX_GENERIC)
except (AuthenticationError,
AuthorizationError,
SaltInvocationError,
EauthAuthenticationError,
SaltClientError) as exc:
ret = six.text_type(exc)
self._output_ret(ret, '', retcode=1) | 0.001322 |
def _execute_with_retries(conn, function, **kwargs):
'''
Retry if we're rate limited by AWS or blocked by another call.
Give up and return error message if resource not found or argument is invalid.
conn
The connection established by the calling method via _get_conn()
function
The function to call on conn. i.e. create_stream
**kwargs
Any kwargs required by the above function, with their keywords
i.e. StreamName=stream_name
Returns:
The result dict with the HTTP response and JSON data if applicable
as 'result', or an error as 'error'
CLI example::
salt myminion boto_kinesis._execute_with_retries existing_conn function_name function_kwargs
'''
r = {}
max_attempts = 18
max_retry_delay = 10
for attempt in range(max_attempts):
log.info("attempt: %s function: %s", attempt, function)
try:
fn = getattr(conn, function)
r['result'] = fn(**kwargs)
return r
except botocore.exceptions.ClientError as e:
error_code = e.response['Error']['Code']
if "LimitExceededException" in error_code or "ResourceInUseException" in error_code:
# could be rate limited by AWS or another command is blocking,
# retry with exponential backoff
log.debug("Retrying due to AWS exception", exc_info=True)
time.sleep(_jittered_backoff(attempt, max_retry_delay))
else:
# ResourceNotFoundException or InvalidArgumentException
r['error'] = e.response['Error']
log.error(r['error'])
r['result'] = None
return r
r['error'] = "Tried to execute function {0} {1} times, but was unable".format(function, max_attempts)
log.error(r['error'])
return r | 0.002654 |
def exit(self, code=0, message=None):
"""
Exit the console program sanely.
"""
## If we have a parser, use it to exit
if self._parser:
if code > 0:
self.parser.error(message)
else:
self.parser.exit(code, message)
## Else we are exiting before parser creation
else:
if message is not None:
if code > 0:
sys.stderr.write(message)
else:
sys.stdout.write(message)
sys.exit(code)
## If we're here we didn't exit for some reason?
raise Exception("Unable to exit the %s" % self.__class__.__name__) | 0.006974 |
def spline_base1d(length, nr_knots = 20, spline_order = 5, marginal = None):
"""Computes a 1D spline basis
Input:
length: int
length of each basis
nr_knots: int
Number of knots, i.e. number of basis functions.
spline_order: int
Order of the splines.
marginal: array, optional
Estimate of the marginal distribution of the input to be fitted.
If given, it is used to determine the positioning of knots, each
knot will cover the same amount of probability mass. If not given,
knots are equally spaced.
"""
if marginal is None:
knots = augknt(np.linspace(0,length+1, nr_knots), spline_order)
else:
knots = knots_from_marginal(marginal, nr_knots, spline_order)
x_eval = np.arange(1,length+1).astype(float)
Bsplines = spcol(x_eval,knots,spline_order)
return Bsplines, knots | 0.016824 |
def quickRPCServer(provider, prefix, target,
maxsize=20,
workers=1,
useenv=True, conf=None, isolate=False):
"""Run an RPC server in the current thread
Calls are handled sequentially, and always in the current thread, if workers=1 (the default).
If workers>1 then calls are handled concurrently by a pool of worker threads.
Requires NTURI style argument encoding.
:param str provider: A provider name. Must be unique in this process.
:param str prefix: PV name prefix. Along with method names, must be globally unique.
:param target: The object which is exporting methods. (use the :func:`rpc` decorator)
:param int maxsize: Number of pending RPC calls to be queued.
:param int workers: Number of worker threads (default 1)
:param useenv: Passed to :class:`~p4p.server.Server`
:param conf: Passed to :class:`~p4p.server.Server`
:param isolate: Passed to :class:`~p4p.server.Server`
"""
from p4p.server import Server
import time
queue = ThreadedWorkQueue(maxsize=maxsize, workers=workers)
provider = NTURIDispatcher(queue, target=target, prefix=prefix, name=provider)
threads = []
server = Server(providers=[provider], useenv=useenv, conf=conf, isolate=isolate)
with server, queue:
while True:
time.sleep(10.0) | 0.005109 |
def recomb_probability(cM, method="kosambi"):
"""
<http://statgen.ncsu.edu/qtlcart/manual/node46.html>
>>> recomb_probability(1)
0.009998666879965463
>>> recomb_probability(100)
0.48201379003790845
>>> recomb_probability(10000)
0.5
"""
assert method in ("kosambi", "haldane")
d = cM / 100.
if method == "kosambi":
e4d = exp(4 * d)
return (e4d - 1) / (e4d + 1) / 2
elif method == "haldane":
return (1 - exp(-2 * d)) / 2 | 0.00202 |
def _get_data(self, p_p_resource_id, start_date=None, end_date=None):
"""Get data."""
data = {
'_' + REQ_PART + '_dateDebut': start_date,
'_' + REQ_PART + '_dateFin': end_date
}
params = {
'p_p_id': REQ_PART,
'p_p_lifecycle': 2,
'p_p_state': 'normal',
'p_p_mode': 'view',
'p_p_resource_id': p_p_resource_id,
'p_p_cacheability': 'cacheLevelPage',
'p_p_col_id': 'column-1',
'p_p_col_pos': 1,
'p_p_col_count': 3
}
try:
raw_res = self._session.post(DATA_URL,
data=data,
params=params,
allow_redirects=False,
timeout=self._timeout)
if 300 <= raw_res.status_code < 400:
raw_res = self._session.post(DATA_URL,
data=data,
params=params,
allow_redirects=False,
timeout=self._timeout)
except OSError as e:
raise PyLinkyError("Could not access enedis.fr: " + str(e))
if raw_res.text is "":
raise PyLinkyError("No data")
if 302 == raw_res.status_code and "/messages/maintenance.html" in raw_res.text:
raise PyLinkyError("Site in maintenance")
try:
json_output = raw_res.json()
except (OSError, json.decoder.JSONDecodeError, simplejson.errors.JSONDecodeError) as e:
raise PyLinkyError("Impossible to decode response: " + str(e) + "\nResponse was: " + str(raw_res.text))
if json_output.get('etat').get('valeur') == 'erreur':
raise PyLinkyError("Enedis.fr answered with an error: " + str(json_output))
return json_output.get('graphe') | 0.00297 |
def autoscan():
"""autoscan will check all of the serial ports to see if they have
a matching VID:PID for a MicroPython board.
"""
for port in serial.tools.list_ports.comports():
if is_micropython_usb_device(port):
connect_serial(port[0]) | 0.00361 |
def get_pos_name(code, name='parent', english=True, pos_tags=POS_MAP):
"""Gets the part of speech name for *code*.
:param str code: The part of speech code to lookup, e.g. ``'nsf'``.
:param str name: Which part of speech name to include in the output. Must
be one of ``'parent'``, ``'child'``, or ``'all'``. Defaults to
``'parent'``. ``'parent'`` indicates that only the most generic name
should be used, e.g. ``'noun'`` for ``'nsf'``. ``'child'`` indicates
that the most specific name should be used, e.g.
``'transcribed toponym'`` for ``'nsf'``. ``'all'`` indicates that all
names should be used, e.g. ``('noun', 'toponym',
'transcribed toponym')`` for ``'nsf'``.
:param bool english: Whether to return an English or Chinese name.
:param dict pos_tags: Custom part of speech tags to use.
:returns: ``str`` (``unicode`` for Python 2) if *name* is ``'parent'`` or
``'child'``. ``tuple`` if *name* is ``'all'``.
"""
return _get_pos_name(code, name, english, pos_tags) | 0.00094 |
def add_func(self, func, *arg, **kwargs):
"""QADATASTRUCT的指标/函数apply入口
Arguments:
func {[type]} -- [description]
Returns:
[type] -- [description]
"""
return self.groupby(level=1, sort=False).apply(func, *arg, **kwargs) | 0.007018 |
def url_quote(string, charset='utf-8', errors='strict', safe='/:'):
"""URL encode a single string with a given encoding.
:param s: the string to quote.
:param charset: the charset to be used.
:param safe: an optional sequence of safe characters.
"""
if not isinstance(string, (text_type, bytes, bytearray)):
string = text_type(string)
if isinstance(string, text_type):
string = string.encode(charset, errors)
if isinstance(safe, text_type):
safe = safe.encode(charset, errors)
safe = frozenset(bytearray(safe) + _always_safe)
rv = bytearray()
for char in bytearray(string):
if char in safe:
rv.append(char)
else:
rv.extend(('%%%02X' % char).encode('ascii'))
return to_native(bytes(rv)) | 0.001253 |
def _narrow_unichr(code_point):
"""Retrieves the unicode character representing any given code point, in a way that won't break on narrow builds.
This is necessary because the built-in unichr function will fail for ordinals above 0xFFFF on narrow builds (UCS2);
ordinals above 0xFFFF would require recalculating and combining surrogate pairs. This avoids that by retrieving the
unicode character that was initially read.
Args:
code_point (int|CodePoint): An int or a subclass of int that contains the unicode character representing its
code point in an attribute named 'char'.
"""
try:
if len(code_point.char) > 1:
return code_point.char
except AttributeError:
pass
return six.unichr(code_point) | 0.006402 |
def items(self):
"""Return a list of the (name, value) pairs of the enum.
These are returned in the order they were defined in the .proto file.
"""
return [(value_descriptor.name, value_descriptor.number)
for value_descriptor in self._enum_type.values] | 0.003559 |
def Text(name, encoding=None):
"""
Match a route parameter.
`Any` is a synonym for `Text`.
:type name: `bytes`
:param name: Route parameter name.
:type encoding: `bytes`
:param encoding: Default encoding to assume if the ``Content-Type``
header is lacking one.
:return: ``callable`` suitable for use with `route` or `subroute`.
"""
def _match(request, value):
return name, query.Text(
value,
encoding=contentEncoding(request.requestHeaders, encoding))
return _match | 0.001802 |
def remove_hook(self, repo_id, name):
"""Remove repository hook."""
ghrepo = self.api.repository_with_id(repo_id)
if ghrepo:
hooks = (h for h in ghrepo.hooks()
if h.config.get('url', '') == self.webhook_url)
hook = next(hooks, None)
if not hook or hook.delete():
Repository.disable(user_id=self.user_id,
github_id=repo_id,
name=name)
return True
return False | 0.003643 |
def plot_data(self, proj, ax):
"""
Creates and plots a scatter plot of the original data.
"""
x, y = proj
ax.scatter(self.ig.independent_data[x],
self.ig.dependent_data[y], c='b') | 0.008403 |
def derive_child_context(self, whence):
"""Derives a scalar context as a child of the current context."""
return _HandlerContext(
container=self.container,
queue=self.queue,
field_name=None,
annotations=None,
depth=self.depth,
whence=whence,
value=bytearray(), # children start without a value
ion_type=None,
pending_symbol=None
) | 0.00432 |
def describe_version(self):
"""
Query the Cassandra server for the version.
:returns: string -- the version tag
"""
def _vers(client):
return client.describe_version()
d = self._connection()
d.addCallback(_vers)
return d | 0.006711 |
def get_mean_and_stddevs(self, sctx, rctx, dctx, imt, stddev_types):
"""
See :meth:`superclass method
<.base.GroundShakingIntensityModel.get_mean_and_stddevs>`
for spec of input and result values.
"""
# Get original mean and standard deviations
mean, stddevs = super().get_mean_and_stddevs(
sctx, rctx, dctx, imt, stddev_types)
cff = SInterCan15Mid.SITE_COEFFS[imt]
mean += np.log(cff['mf'])
return mean, stddevs | 0.003968 |
def _read_page(file_obj, page_header, column_metadata):
"""Read the data page from the given file-object and convert it to raw, uncompressed bytes (if necessary)."""
bytes_from_file = file_obj.read(page_header.compressed_page_size)
codec = column_metadata.codec
if codec is not None and codec != parquet_thrift.CompressionCodec.UNCOMPRESSED:
if column_metadata.codec == parquet_thrift.CompressionCodec.SNAPPY:
raw_bytes = snappy.decompress(bytes_from_file)
elif column_metadata.codec == parquet_thrift.CompressionCodec.GZIP:
io_obj = io.BytesIO(bytes_from_file)
with gzip.GzipFile(fileobj=io_obj, mode='rb') as file_data:
raw_bytes = file_data.read()
else:
raise ParquetFormatException(
"Unsupported Codec: {0}".format(codec))
else:
raw_bytes = bytes_from_file
if logger.isEnabledFor(logging.DEBUG):
logger.debug(
"Read page with compression type %s. Bytes %d -> %d",
_get_name(parquet_thrift.CompressionCodec, codec),
page_header.compressed_page_size,
page_header.uncompressed_page_size)
assert len(raw_bytes) == page_header.uncompressed_page_size, \
"found {0} raw bytes (expected {1})".format(
len(raw_bytes),
page_header.uncompressed_page_size)
return raw_bytes | 0.002146 |
def serviceResponse(self, response):
'''
Checks the response received from the I{ViewServer}.
@return: C{True} if the response received matches L{PARCEL_TRUE}, C{False} otherwise
'''
PARCEL_TRUE = "Result: Parcel(00000000 00000001 '........')\r\n"
''' The TRUE response parcel '''
if DEBUG:
print >>sys.stderr, "serviceResponse: comparing '%s' vs Parcel(%s)" % (response, PARCEL_TRUE)
return response == PARCEL_TRUE | 0.008081 |
def dict_to_hdf5(dic, endpoint):
"""Dump a dict to an HDF5 file.
"""
filename = gen_filename(endpoint)
with h5py.File(filename, 'w') as handler:
walk_dict_to_hdf5(dic, handler)
print('dumped to', filename) | 0.004292 |
def create_background(bg_type, fafile, outfile, genome="hg18", width=200, nr_times=10, custom_background=None):
"""Create background of a specific type.
Parameters
----------
bg_type : str
Name of background type.
fafile : str
Name of input FASTA file.
outfile : str
Name of output FASTA file.
genome : str, optional
Genome name.
width : int, optional
Size of regions.
nr_times : int, optional
Generate this times as many background sequences as compared to
input file.
Returns
-------
nr_seqs : int
Number of sequences created.
"""
width = int(width)
config = MotifConfig()
fg = Fasta(fafile)
if bg_type in ["genomic", "gc"]:
if not genome:
logger.error("Need a genome to create background")
sys.exit(1)
if bg_type == "random":
f = MarkovFasta(fg, k=1, n=nr_times * len(fg))
logger.debug("Random background: %s", outfile)
elif bg_type == "genomic":
logger.debug("Creating genomic background")
f = RandomGenomicFasta(genome, width, nr_times * len(fg))
elif bg_type == "gc":
logger.debug("Creating GC matched background")
f = MatchedGcFasta(fafile, genome, nr_times * len(fg))
logger.debug("GC matched background: %s", outfile)
elif bg_type == "promoter":
fname = Genome(genome).filename
gene_file = fname.replace(".fa", ".annotation.bed.gz")
if not gene_file:
gene_file = os.path.join(config.get_gene_dir(), "%s.bed" % genome)
if not os.path.exists(gene_file):
print("Could not find a gene file for genome {}")
print("Did you use the --annotation flag for genomepy?")
print("Alternatively make sure there is a file called {}.bed in {}".format(genome, config.get_gene_dir()))
raise ValueError()
logger.info(
"Creating random promoter background (%s, using genes in %s)",
genome, gene_file)
f = PromoterFasta(gene_file, genome, width, nr_times * len(fg))
logger.debug("Random promoter background: %s", outfile)
elif bg_type == "custom":
bg_file = custom_background
if not bg_file:
raise IOError(
"Background file not specified!")
if not os.path.exists(bg_file):
raise IOError(
"Custom background file %s does not exist!",
bg_file)
else:
logger.info("Copying custom background file %s to %s.",
bg_file, outfile)
f = Fasta(bg_file)
l = np.median([len(seq) for seq in f.seqs])
if l < (width * 0.95) or l > (width * 1.05):
logger.warn(
"The custom background file %s contains sequences with a "
"median length of %s, while GimmeMotifs predicts motifs in sequences "
"of length %s. This will influence the statistics! It is recommended "
"to use background sequences of the same length.",
bg_file, l, width)
f.writefasta(outfile)
return len(f) | 0.004292 |
def _callback(self, wType, uFmt, hConv, hsz1, hsz2, hDdeData, dwData1, dwData2):
"""DdeCallback callback function for processing Dynamic Data Exchange (DDE)
transactions sent by DDEML in response to DDE events
Parameters
----------
wType : transaction type (UINT)
uFmt : clipboard data format (UINT)
hConv : handle to conversation (HCONV)
hsz1 : handle to string (HSZ)
hsz2 : handle to string (HSZ)
hDDedata : handle to global memory object (HDDEDATA)
dwData1 : transaction-specific data (DWORD)
dwData2 : transaction-specific data (DWORD)
Returns
-------
ret : specific to the type of transaction (HDDEDATA)
"""
if wType == XTYP_ADVDATA: # value of the data item has changed [hsz1 = topic; hsz2 = item; hDdeData = data]
dwSize = DWORD(0)
pData = DDE.AccessData(hDdeData, byref(dwSize))
if pData:
item = create_string_buffer('\000' * 128)
DDE.QueryString(self._idInst, hsz2, item, 128, CP_WINANSI)
self.callback(pData, item.value)
DDE.UnaccessData(hDdeData)
return DDE_FACK
else:
print("Error: AccessData returned NULL! (err = %s)"% (hex(DDE.GetLastError(self._idInst))))
if wType == XTYP_DISCONNECT:
print("Disconnect notification received from server")
return 0 | 0.004667 |
def _ensure_tf_install(): # pylint: disable=g-statement-before-imports
"""Attempt to import tensorflow, and ensure its version is sufficient.
Raises:
ImportError: if either tensorflow is not importable or its version is
inadequate.
"""
try:
import tensorflow as tf
except ImportError:
# Print more informative error message, then reraise.
print("\n\nFailed to import TensorFlow. Please note that TensorFlow is not "
"installed by default when you install TensorFlow Probability. This "
"is so that users can decide whether to install the GPU-enabled "
"TensorFlow package. To use TensorFlow Probability, please install "
"the most recent version of TensorFlow, by following instructions at "
"https://tensorflow.org/install.\n\n")
raise
import distutils.version
#
# Update this whenever we need to depend on a newer TensorFlow release.
#
required_tensorflow_version = "1.13"
if (distutils.version.LooseVersion(tf.__version__) <
distutils.version.LooseVersion(required_tensorflow_version)):
raise ImportError(
"This version of TensorFlow Probability requires TensorFlow "
"version >= {required}; Detected an installation of version {present}. "
"Please upgrade TensorFlow to proceed.".format(
required=required_tensorflow_version,
present=tf.__version__)) | 0.009908 |
def on_receive(self, broker):
"""
Drain the pipe and fire callbacks. Since :attr:`_deferred` is
synchronized, :meth:`defer` and :meth:`on_receive` can conspire to
ensure only one byte needs to be pending regardless of queue length.
"""
_vv and IOLOG.debug('%r.on_receive()', self)
self._lock.acquire()
try:
self.receive_side.read(1)
deferred = self._deferred
self._deferred = []
finally:
self._lock.release()
for func, args, kwargs in deferred:
try:
func(*args, **kwargs)
except Exception:
LOG.exception('defer() crashed: %r(*%r, **%r)',
func, args, kwargs)
self._broker.shutdown() | 0.002463 |
def colorize(self, colormap, band_name=None, vmin=None, vmax=None):
"""Apply a colormap on a selected band.
colormap list: https://matplotlib.org/examples/color/colormaps_reference.html
Parameters
----------
colormap : str
Colormap name from this list https://matplotlib.org/examples/color/colormaps_reference.html
band_name : str, optional
Name of band to colorize, if none the first band will be used
vmin, vmax : int, optional
minimum and maximum range for normalizing array values, if None actual raster values will be used
Returns
-------
GeoRaster2
"""
vmin = vmin if vmin is not None else min(self.min())
vmax = vmax if vmax is not None else max(self.max())
cmap = matplotlib.cm.get_cmap(colormap) # type: matplotlib.colors.Colormap
band_index = 0
if band_name is None:
if self.num_bands > 1:
warnings.warn("Using the first band to colorize the raster", GeoRaster2Warning)
else:
band_index = self.band_names.index(band_name)
normalized = (self.image[band_index, :, :] - vmin) / (vmax - vmin)
# Colormap instances are used to convert data values (floats)
# to RGBA color that the respective Colormap
#
# https://matplotlib.org/_modules/matplotlib/colors.html#Colormap
image_data = cmap(normalized)
image_data = image_data[:, :, 0:3]
# convert floats [0,1] to uint8 [0,255]
image_data = image_data * 255
image_data = image_data.astype(np.uint8)
image_data = np.rollaxis(image_data, 2)
# force nodata where it was in original raster:
mask = _join_masks_from_masked_array(self.image)
mask = np.stack([mask[0, :, :]] * 3)
array = np.ma.array(image_data.data, mask=mask).filled(0) # type: np.ndarray
array = np.ma.array(array, mask=mask)
return self.copy_with(image=array, band_names=['red', 'green', 'blue']) | 0.003882 |
async def from_client(cls, client, *, shard_id=None, session=None, sequence=None, resume=False):
"""Creates a main websocket for Discord from a :class:`Client`.
This is for internal use only.
"""
gateway = await client.http.get_gateway()
ws = await websockets.connect(gateway, loop=client.loop, klass=cls, compression=None)
# dynamically add attributes needed
ws.token = client.http.token
ws._connection = client._connection
ws._dispatch = client.dispatch
ws.gateway = gateway
ws.shard_id = shard_id
ws.shard_count = client._connection.shard_count
ws.session_id = session
ws.sequence = sequence
ws._max_heartbeat_timeout = client._connection.heartbeat_timeout
client._connection._update_references(ws)
log.info('Created websocket connected to %s', gateway)
# poll event for OP Hello
await ws.poll_event()
if not resume:
await ws.identify()
return ws
await ws.resume()
try:
await ws.ensure_open()
except websockets.exceptions.ConnectionClosed:
# ws got closed so let's just do a regular IDENTIFY connect.
log.info('RESUME failed (the websocket decided to close) for Shard ID %s. Retrying.', shard_id)
return await cls.from_client(client, shard_id=shard_id)
else:
return ws | 0.003439 |
def add_vrf(self, auth, attr):
""" Add a new VRF.
* `auth` [BaseAuth]
AAA options.
* `attr` [vrf_attr]
The news VRF's attributes.
Add a VRF based on the values stored in the `attr` dict.
Returns a dict describing the VRF which was added.
This is the documentation of the internal backend function. It's
exposed over XML-RPC, please also see the XML-RPC documentation for
:py:func:`nipap.xmlrpc.NipapXMLRPC.add_vrf` for full understanding.
"""
self._logger.debug("add_vrf called; attr: %s" % unicode(attr))
# sanity check - do we have all attributes?
req_attr = [ 'rt', 'name' ]
self._check_attr(attr, req_attr, _vrf_attrs)
insert, params = self._sql_expand_insert(attr)
sql = "INSERT INTO ip_net_vrf " + insert
self._execute(sql, params)
vrf_id = self._lastrowid()
vrf = self.list_vrf(auth, { 'id': vrf_id })[0]
# write to audit table
audit_params = {
'vrf_id': vrf['id'],
'vrf_rt': vrf['rt'],
'vrf_name': vrf['name'],
'username': auth.username,
'authenticated_as': auth.authenticated_as,
'full_name': auth.full_name,
'authoritative_source': auth.authoritative_source,
'description': 'Added VRF %s with attr: %s' % (vrf['rt'], unicode(vrf))
}
sql, params = self._sql_expand_insert(audit_params)
self._execute('INSERT INTO ip_net_log %s' % sql, params)
return vrf | 0.004318 |
def _facet_counts(items):
"""Returns facet counts as dict.
Given the `items()` on the raw dictionary from Elasticsearch this processes
it and returns the counts keyed on the facet name provided in the original
query.
"""
facets = {}
for name, data in items:
facets[name] = FacetResult(name, data)
return facets | 0.002841 |
def check_docstring(cls):
"""
Asserts that the class has a docstring, returning it if successful.
"""
docstring = inspect.getdoc(cls)
if not docstring:
breadcrumbs = " -> ".join(t.__name__ for t in inspect.getmro(cls)[:-1][::-1])
msg = "docstring required for plugin '%s' (%s, defined in %s)"
args = (cls.__name__, breadcrumbs, cls.__module__)
raise InternalCashewException(msg % args)
max_line_length = cls._class_settings.get('max-docstring-length')
if max_line_length:
for i, line in enumerate(docstring.splitlines()):
if len(line) > max_line_length:
msg = "docstring line %s of %s is %s chars too long"
args = (i, cls.__name__, len(line) - max_line_length)
raise Exception(msg % args)
return docstring | 0.004405 |
def parse_file_entities(filename, entities=None, config=None,
include_unmatched=False):
""" Parse the passed filename for entity/value pairs.
Args:
filename (str): The filename to parse for entity values
entities (list): An optional list of Entity instances to use in
extraction. If passed, the config argument is ignored.
config (str, Config, list): One or more Config objects or names of
configurations to use in matching. Each element must be a Config
object, or a valid Config name (e.g., 'bids' or 'derivatives').
If None, all available configs are used.
include_unmatched (bool): If True, unmatched entities are included
in the returned dict, with values set to None. If False
(default), unmatched entities are ignored.
Returns: A dict, where keys are Entity names and values are the
values extracted from the filename.
"""
# Load Configs if needed
if entities is None:
if config is None:
config = ['bids', 'derivatives']
config = [Config.load(c) if not isinstance(c, Config) else c
for c in listify(config)]
# Consolidate entities from all Configs into a single dict
entities = {}
for c in config:
entities.update(c.entities)
entities = entities.values()
# Extract matches
bf = BIDSFile(filename)
ent_vals = {}
for ent in entities:
match = ent.match_file(bf)
if match is not None or include_unmatched:
ent_vals[ent.name] = match
return ent_vals | 0.000604 |
def _sort_labels(label_data):
"""Returns the labels in `label_data` sorted in descending order
according to the 'size' (total token count) of their referent
corpora.
:param label_data: labels (with their token counts) to sort
:type: `dict`
:rtype: `list`
"""
labels = list(label_data)
labels.sort(key=label_data.get, reverse=True)
return labels | 0.004695 |
def format_rpc(data):
"""Format an RPC call and response.
Args:
data (tuple): A tuple containing the address, rpc_id, argument and
response payloads and any error code.
Returns:
str: The formated RPC string.
"""
address, rpc_id, args, resp, _status = data
name = rpc_name(rpc_id)
if isinstance(args, (bytes, bytearray)):
arg_str = hexlify(args)
else:
arg_str = repr(args)
if isinstance(resp, (bytes, bytearray)):
resp_str = hexlify(resp)
else:
resp_str = repr(resp)
#FIXME: Check and print status as well
return "%s called on address %d, payload=%s, response=%s" % (name, address, arg_str, resp_str) | 0.004202 |
def _get_hanging_wall_coeffs_rx(self, C, rup, r_x):
"""
Returns the hanging wall r-x caling term defined in equation 7 to 12
"""
# Define coefficients R1 and R2
r_1 = rup.width * cos(radians(rup.dip))
r_2 = 62.0 * rup.mag - 350.0
fhngrx = np.zeros(len(r_x))
# Case when 0 <= Rx <= R1
idx = np.logical_and(r_x >= 0., r_x < r_1)
fhngrx[idx] = self._get_f1rx(C, r_x[idx], r_1)
# Case when Rx > R1
idx = r_x >= r_1
f2rx = self._get_f2rx(C, r_x[idx], r_1, r_2)
f2rx[f2rx < 0.0] = 0.0
fhngrx[idx] = f2rx
return fhngrx | 0.00313 |
def get(self, key, default_val=None, require_value=False):
""" Returns a dictionary value
"""
val = dict.get(self, key, default_val)
if val is None and require_value:
raise KeyError('key "%s" not found' % key)
if isinstance(val, dict):
return AttributeDict(val)
return val | 0.005814 |
def set_element_attributes(elem_to_parse, **attrib_kwargs):
"""
Adds the specified key/value pairs to the element's attributes, and
returns the updated set of attributes.
If the element already contains any of the attributes specified in
attrib_kwargs, they are updated accordingly.
"""
element = get_element(elem_to_parse)
if element is None:
return element
if len(attrib_kwargs):
element.attrib.update(attrib_kwargs)
return element.attrib | 0.001996 |
def _get_coarse_dataset(self, key, info):
"""Get the coarse dataset refered to by `key` from the XML data."""
angles = self.root.find('.//Tile_Angles')
if key in ['solar_zenith_angle', 'solar_azimuth_angle']:
elts = angles.findall(info['xml_tag'] + '/Values_List/VALUES')
return np.array([[val for val in elt.text.split()] for elt in elts],
dtype=np.float)
elif key in ['satellite_zenith_angle', 'satellite_azimuth_angle']:
arrays = []
elts = angles.findall(info['xml_tag'] + '[@bandId="1"]')
for elt in elts:
items = elt.findall(info['xml_item'] + '/Values_List/VALUES')
arrays.append(np.array([[val for val in item.text.split()] for item in items],
dtype=np.float))
return np.nanmean(np.dstack(arrays), -1)
else:
return | 0.004233 |
def delete(self):
"""
Deletes record, and removes it from database.
"""
# workflow
# --------
# (methods belonging to create/update/delete framework:
# epm._dev_populate_from_json_data, table.batch_add, record.update, queryset.delete, record.delete)
# 1. unregister: links, hooks and external files
# 3. remove from table without unregistering
# unregister links
self._unregister_links()
# unregister hooks
self._unregister_hooks()
# unregister external files
self._unregister_external_files()
# tell table to remove without unregistering
self.get_table()._dev_remove_record_without_unregistering(self)
# make stale
self._table = None
self._data = None | 0.003654 |
def _render_list(data):
""" Helper to render a list of objects as an HTML list object. """
return IPython.core.display.HTML(google.datalab.utils.commands.HtmlBuilder.render_list(data)) | 0.021277 |
def query_ball_point(self, x, r, p=2., eps=0):
"""
Find all points within distance r of point(s) x.
Parameters
----------
x : array_like, shape tuple + (self.m,)
The point or points to search for neighbors of.
r : positive float
The radius of points to return.
p : float, optional
Which Minkowski p-norm to use. Should be in the range [1, inf].
eps : nonnegative float, optional
Approximate search. Branches of the tree are not explored if their
nearest points are further than ``r / (1 + eps)``, and branches are
added in bulk if their furthest points are nearer than
``r * (1 + eps)``.
Returns
-------
results : list or array of lists
If `x` is a single point, returns a list of the indices of the
neighbors of `x`. If `x` is an array of points, returns an object
array of shape tuple containing lists of neighbors.
Notes
-----
If you have many points whose neighbors you want to find, you may
save substantial amounts of time by putting them in a
PeriodicCKDTree and using query_ball_tree.
"""
x = np.asarray(x).astype(np.float)
if x.shape[-1] != self.m:
raise ValueError("Searching for a %d-dimensional point in a " \
"%d-dimensional KDTree" % (x.shape[-1], self.m))
if len(x.shape) == 1:
return self.__query_ball_point(x, r, p, eps)
else:
retshape = x.shape[:-1]
result = np.empty(retshape, dtype=np.object)
for c in np.ndindex(retshape):
result[c] = self.__query_ball_point(x[c], r, p, eps)
return result | 0.001649 |
def trun_setup(conf):
"""
Setup the testrunner data-structure, embedding the parsed environment
variables and command-line arguments and continues with setup for testplans,
testsuites, and testcases
"""
declr = None
try:
with open(conf["TESTPLAN_FPATH"]) as declr_fd:
declr = yaml.safe_load(declr_fd)
except AttributeError as exc:
cij.err("rnr: %r" % exc)
if not declr:
return None
trun = copy.deepcopy(TRUN)
trun["ver"] = cij.VERSION
trun["conf"] = copy.deepcopy(conf)
trun["res_root"] = conf["OUTPUT"]
trun["aux_root"] = os.sep.join([trun["res_root"], "_aux"])
trun["evars"].update(copy.deepcopy(declr.get("evars", {})))
os.makedirs(trun["aux_root"])
hook_names = declr.get("hooks", [])
if "lock" not in hook_names:
hook_names = ["lock"] + hook_names
if hook_names[0] != "lock":
return None
# Setup top-level hooks
trun["hooks"] = hooks_setup(trun, trun, hook_names)
for enum, declr in enumerate(declr["testsuites"]): # Setup testsuites
tsuite = tsuite_setup(trun, declr, enum)
if tsuite is None:
cij.err("main::FAILED: setting up tsuite: %r" % tsuite)
return 1
trun["testsuites"].append(tsuite)
trun["progress"]["UNKN"] += len(tsuite["testcases"])
return trun | 0.001456 |
def _surfdens(self,R,z,phi=0.,t=0.):
"""
NAME:
_surfdens
PURPOSE:
evaluate the surface density for this potential
INPUT:
R - Galactocentric cylindrical radius
z - vertical height
phi - azimuth
t - time
OUTPUT:
the surface density
HISTORY:
2018-08-19 - Written - Bovy (UofT)
"""
r= numpy.sqrt(R**2.+z**2.)
x= r/self.a
Rpa= numpy.sqrt(R**2.+self.a**2.)
Rma= numpy.sqrt(R**2.-self.a**2.+0j)
if Rma == 0:
za= z/self.a
return self.a**2./2.*((2.-2.*numpy.sqrt(za**2.+1)
+numpy.sqrt(2.)*za\
*numpy.arctan(za/numpy.sqrt(2.)))/z
+numpy.sqrt(2*za**2.+2.)\
*numpy.arctanh(za/numpy.sqrt(2.*(za**2.+1)))
/numpy.sqrt(self.a**2.+z**2.))
else:
return self.a**2.*(numpy.arctan(z/x/Rma)/Rma
+numpy.arctanh(z/x/Rpa)/Rpa
-numpy.arctan(z/Rma)/Rma
+numpy.arctan(z/Rpa)/Rpa).real | 0.018341 |
def step_I_create_logrecord_with_table(context):
"""
Create an log record by using a table to provide the parts.
.. seealso: :func:`step_I_create_logrecords_with_table()`
"""
assert context.table, "REQUIRE: context.table"
assert len(context.table.rows) == 1, "REQUIRE: table.row.size == 1"
step_I_create_logrecords_with_table(context) | 0.002755 |
def view_portfolio_loss(token, dstore):
"""
The mean and stddev loss for the full portfolio for each loss type,
extracted from the event loss table, averaged over the realizations
"""
data = portfolio_loss(dstore) # shape (R, L)
loss_types = list(dstore['oqparam'].loss_dt().names)
header = ['portfolio_loss'] + loss_types
mean = ['mean'] + [row.mean() for row in data.T]
stddev = ['stddev'] + [row.std(ddof=1) for row in data.T]
return rst_table([mean, stddev], header) | 0.001957 |
def getlist(self, section, option):
"""Read a list of strings.
The value of `section` and `option` is treated as a comma- and newline-
separated list of strings. Each value is stripped of whitespace.
Returns the list of strings.
"""
value_list = self.get(section, option)
values = []
for value_line in value_list.split('\n'):
for value in value_line.split(','):
value = value.strip()
if value:
values.append(value)
return values | 0.003521 |
def build(site, tagdata):
""" Returns the tag cloud for a list of tags.
"""
tagdata.sort()
# we get the most popular tag to calculate the tags' weigth
tagmax = 0
for tagname, tagcount in tagdata:
if tagcount > tagmax:
tagmax = tagcount
steps = getsteps(site.tagcloud_levels, tagmax)
tags = []
for tagname, tagcount in tagdata:
weight = [twt[0] \
for twt in steps if twt[1] >= tagcount and twt[1] > 0][0]+1
tags.append({'tagname':tagname, 'count':tagcount, 'weight':weight})
return tags | 0.041176 |
def to_api_repr(self):
"""Generate a resource for :meth:`_begin`."""
configuration = self._configuration.to_api_repr()
resource = {
"jobReference": self._properties["jobReference"],
"configuration": configuration,
}
configuration["query"]["query"] = self.query
return resource | 0.005714 |
def uniqify(func):
"""Make sure that a method returns a unique name."""
@six.wraps(func)
def unique(self, *args, **kwargs):
return self.unique(func(self, *args, **kwargs))
return unique | 0.025381 |
def instantiate(config):
"""
instantiate all registered vodka applications
Args:
config (dict or MungeConfig): configuration object
"""
for handle, cfg in list(config["apps"].items()):
if not cfg.get("enabled", True):
continue
app = get_application(handle)
instances[app.handle] = app(cfg) | 0.002809 |
def queries2alignments(cfg):
"""
All the processes in alignannoted detection are here.
:param cfg: Configuration settings provided in .yml file
"""
from rohan.dandage.align import get_genomes
get_genomes(cfg)
cfg['datad']=cfg['prjd']
cfg['plotd']=cfg['datad']
dalignannotedp=f"{cfg['datad']}/dalignannoted.tsv"
# stepn='04_alignannoteds'
# logging.info(stepn)
cfg['datatmpd']=f"{cfg['datad']}/tmp"
for dp in [cfg['datatmpd']]:
if not exists(dp):
makedirs(dp)
step2doutp={
1:'01_queries_queryl*.fa',
2:'02_dalignbed.tsv',
3:'03_annotations.bed',
4:'04_dalignbedqueries.tsv',
5:'05_dalignedfasta.tsv',
6:'06_dalignbedqueriesseq.tsv',
7:'07_dalignbedstats.tsv',
8:'08_dannotsagg.tsv',
9:'09_dalignbedannot.tsv',
10:'10_daggbyquery.tsv',
}
cfg['dqueriesp']=cfg['dinp']
cfg['alignmentbedp']=f"{cfg['datatmpd']}/02_alignment.bed"
cfg['dalignbedp']=f"{cfg['datatmpd']}/02_dalignbed.tsv"
cfg['dalignbedqueriesp']=f"{cfg['datatmpd']}/04_dalignbedqueries.tsv"
cfg['dalignedfastap']=f"{cfg['datatmpd']}/05_dalignedfasta.tsv"
cfg['dalignbedqueriesseqp']=f"{cfg['datatmpd']}/06_dalignbedqueriesseq.tsv"
cfg['dalignbedstatsp']=f"{cfg['datatmpd']}/07_dalignbedstats.tsv"
cfg['dannotsaggp']=f"{cfg['datatmpd']}/08_dannotsagg.tsv"
cfg['dalignbedannotp']=f"{cfg['datatmpd']}/09_dalignbedannot.tsv"
cfg['daggbyqueryp']=f"{cfg['datatmpd']}/10_daggbyquery.tsv"
annotationsbedp=f"{cfg['datatmpd']}/03_annotations.bed"
cfg['annotationsbedp']=annotationsbedp
dqueries=read_table(cfg['dqueriesp'])
print(dqueries.head())
#check which step to process
for step in range(2,10+1,1):
if not exists(f"{cfg['datatmpd']}/{step2doutp[step]}"):
if step==2:
step=-1
break
logging.info(f'process from step:{step}')
cfg['dalignannotedp']='{}/dalignannoted.tsv'.format(cfg['datad'])
if not exists(cfg['dalignannotedp']) or cfg['force']:
if step<=1:
cfg=dqueries2queriessam(cfg,dqueries)
if step<=2:
cfg=queriessam2dalignbed(cfg)
if step<=3:
cfg=dalignbed2annotationsbed(cfg)
if step<=4:
cfg=dalignbed2dalignbedqueries(cfg)
if step<=5:
cfg=alignmentbed2dalignedfasta(cfg)
if step<=6:
cfg=dalignbed2dalignbedqueriesseq(cfg)
if step<=7:
cfg=dalignbedqueriesseq2dalignbedstats(cfg)
if step<=8:
cfg=dannots2dalignbed2dannotsagg(cfg)
if step<=9:
cfg=dannotsagg2dannots2dalignbedannot(cfg)
if step<=10:
cfg=dalignbedannot2daggbyquery(cfg)
if cfg is None:
logging.warning(f"no alignment found")
cfg['step']=4
return saveemptytable(cfg,cfg['dalignannotedp'])
import gc
gc.collect() | 0.025444 |
def filter_tag(arg):
""" Parses a --filter-tag argument """
try:
strip_len = len('Key=')
key, value = arg[strip_len:].split(',Value=', 1)
return key, value
except:
msg = 'Invalid --filter-tag argument: {}'
raise argparse.ArgumentTypeError(msg.format(arg)) | 0.006515 |
def _get_options_for_model(self, model, opts_class=None, **options):
"""
Returns an instance of translation options with translated fields
defined for the ``model`` and inherited from superclasses.
"""
if model not in self._registry:
# Create a new type for backwards compatibility.
opts = type("%sTranslationOptions" % model.__name__,
(opts_class or TranslationOptions,), options)(model)
# Fields for translation may be inherited from abstract
# superclasses, so we need to look at all parents.
for base in model.__bases__:
if not hasattr(base, '_meta'):
# Things without _meta aren't functional models, so they're
# uninteresting parents.
continue
opts.update(self._get_options_for_model(base))
# Cache options for all models -- we may want to compute options
# of registered subclasses of unregistered models.
self._registry[model] = opts
return self._registry[model] | 0.001765 |
def drop_duplicates(self, subset=None, keep='first', inplace=False):
"""
Return DataFrame with duplicate rows removed, optionally only
considering certain columns. Indexes, including time indexes
are ignored.
Parameters
----------
subset : column label or sequence of labels, optional
Only consider certain columns for identifying duplicates, by
default use all of the columns
keep : {'first', 'last', False}, default 'first'
- ``first`` : Drop duplicates except for the first occurrence.
- ``last`` : Drop duplicates except for the last occurrence.
- False : Drop all duplicates.
inplace : boolean, default False
Whether to drop duplicates in place or to return a copy
Returns
-------
DataFrame
"""
if self.empty:
return self.copy()
inplace = validate_bool_kwarg(inplace, 'inplace')
duplicated = self.duplicated(subset, keep=keep)
if inplace:
inds, = (-duplicated)._ndarray_values.nonzero()
new_data = self._data.take(inds)
self._update_inplace(new_data)
else:
return self[-duplicated] | 0.00158 |
def _http_headers(self):
"""Return dictionary of http headers necessary for making an http
connection to the endpoint of this Connection.
:return: Dictionary of headers
"""
if not self.usertag:
return {}
creds = u'{}:{}'.format(
self.usertag,
self.password or ''
)
token = base64.b64encode(creds.encode())
return {
'Authorization': 'Basic {}'.format(token.decode())
} | 0.004016 |
def append_to_arg_count(self, data):
"""
Add digit to the input argument.
:param data: the typed digit as string
"""
assert data in '-0123456789'
current = self._arg
if data == '-':
assert current is None or current == '-'
result = data
elif current is None:
result = data
else:
result = "%s%s" % (current, data)
self.input_processor.arg = result | 0.004193 |
def p_expression(self, p):
"""expression : jsonpath
| jsonpath FILTER_OP ID
| jsonpath FILTER_OP FLOAT
| jsonpath FILTER_OP NUMBER
| jsonpath FILTER_OP BOOL
"""
if len(p) == 2:
left, op, right = p[1], None, None
else:
__, left, op, right = p
p[0] = _filter.Expression(left, op, right) | 0.004587 |
def _quilc_compile(self, quil_program, isa, specs):
"""
Sends a quilc job to Forest.
Users should use :py:func:`LocalCompiler.quil_to_native_quil` instead of calling this
directly.
"""
payload = quilc_compile_payload(quil_program, isa, specs)
response = post_json(self.session, self.sync_endpoint + "/", payload)
unpacked_response = response.json()
return unpacked_response | 0.006726 |
def import_or_die(module_name, entrypoint_names):
'''
Import user code; return reference to usercode function.
(str) -> function reference
'''
log_debug("Importing {}".format(module_name))
module_name = os.path.abspath(module_name)
if module_name.endswith('.py'):
module_name,ext = os.path.splitext(module_name)
modname = os.path.basename(module_name)
dirname = os.path.dirname(module_name)
if dirname and dirname not in sys.path:
sys.path.append(dirname)
# first, try to reload code
if modname in sys.modules:
user_module = sys.modules.get(modname)
user_module = importlib.reload(user_module)
# if it isn't in sys.modules, load it for the first time, or
# try to.
else:
try:
mypaths = [ x for x in sys.path if ("Cellar" not in x and "packages" not in x)]
# print("Loading {} from {} ({})".format(modname, dirname, mypaths))
# user_module = importlib.import_module(modname)
user_module = importlib.__import__(modname)
except ImportError as e:
log_failure("Fatal error: couldn't import module (error: {}) while executing {}".format(str(e), modname))
raise ImportError(e)
# if there aren't any functions to call into, then the caller
# just wanted the module/code to be imported, and that's it.
if not entrypoint_names:
return
existing_names = dir(user_module)
for method in entrypoint_names:
if method in existing_names:
return getattr(user_module, method)
if len(entrypoint_names) > 1:
entrypoints = "one of {}".format(', '.join(entrypoint_names))
else:
entrypoints = entrypoint_names[0]
raise ImportError("Required entrypoint function or symbol ({}) not found in your code".format(entrypoints)) | 0.003772 |
def set_updated(self):
"""
Mark the module as updated.
We check if the actual content has changed and if so we trigger an
update in py3status.
"""
# get latest output
output = []
for method in self.methods.values():
data = method["last_output"]
if isinstance(data, list):
if self.testing and data:
data[0]["cached_until"] = method.get("cached_until")
output.extend(data)
else:
# if the output is not 'valid' then don't add it.
if data.get("full_text") or "separator" in data:
if self.testing:
data["cached_until"] = method.get("cached_until")
output.append(data)
# if changed store and force display update.
if output != self.last_output:
# has the modules output become urgent?
# we only care the update that this happens
# not any after then.
urgent = True in [x.get("urgent") for x in output]
if urgent != self.urgent:
self.urgent = urgent
else:
urgent = False
self.last_output = output
self._py3_wrapper.notify_update(self.module_full_name, urgent) | 0.001488 |
def isFits(input):
"""
Returns
--------
isFits: tuple
An ``(isfits, fitstype)`` tuple. The values of ``isfits`` and
``fitstype`` are specified as:
- ``isfits``: True|False
- ``fitstype``: if True, one of 'waiver', 'mef', 'simple'; if False, None
Notes
-----
Input images which do not have a valid FITS filename will automatically
result in a return of (False, None).
In the case that the input has a valid FITS filename but runs into some
error upon opening, this routine will raise that exception for the calling
routine/user to handle.
"""
isfits = False
fitstype = None
names = ['fits', 'fit', 'FITS', 'FIT']
#determine if input is a fits file based on extension
# Only check type of FITS file if filename ends in valid FITS string
f = None
fileclose = False
if isinstance(input, fits.HDUList):
isfits = True
f = input
else:
isfits = True in [input.endswith(l) for l in names]
# if input is a fits file determine what kind of fits it is
#waiver fits len(shape) == 3
if isfits:
if f is None:
try:
f = fits.open(input, mode='readonly')
fileclose = True
except Exception:
if f is not None:
f.close()
raise
data0 = f[0].data
if data0 is not None:
try:
if isinstance(f[1], fits.TableHDU):
fitstype = 'waiver'
except IndexError:
fitstype = 'simple'
else:
fitstype = 'mef'
if fileclose:
f.close()
return isfits, fitstype | 0.002892 |
def handle_error(self, message: str, e: mastodon.MastodonError) -> OutputRecord:
"""Handle error while trying to do something."""
self.lerror(f"Got an error! {e}")
# Handle errors if we know how.
try:
code = e[0]["code"]
if code in self.handled_errors:
self.handled_errors[code]
else:
pass
except Exception:
pass
return TootRecord(error=e) | 0.006383 |
def split_every(n, iterable):
"""Returns a generator that spits an iteratable into n-sized chunks. The last chunk may have
less than n elements.
See http://stackoverflow.com/a/22919323/503377."""
items = iter(iterable)
return itertools.takewhile(bool, (list(itertools.islice(items, n)) for _ in itertools.count())) | 0.008955 |
def parse(self, text, fn=None):
"""
Parse the Mapfile
"""
if PY2 and not isinstance(text, unicode):
# specify Unicode for Python 2.7
text = unicode(text, 'utf-8')
if self.expand_includes:
text = self.load_includes(text, fn=fn)
try:
self._comments[:] = [] # clear any comments from a previous parse
tree = self.lalr.parse(text)
if self.include_comments:
self.assign_comments(tree, self._comments)
return tree
except (ParseError, UnexpectedInput) as ex:
if fn:
log.error("Parsing of {} unsuccessful".format(fn))
else:
log.error("Parsing of Mapfile unsuccessful")
log.info(ex)
raise | 0.002442 |
def set_common_attributes(span):
"""Set the common attributes."""
common = {
attributes_helper.COMMON_ATTRIBUTES.get('AGENT'): AGENT,
}
common_attrs = Attributes(common)\
.format_attributes_json()\
.get('attributeMap')
_update_attr_map(span, common_attrs) | 0.003333 |
def chdir(method):
"""Decorator executing method in directory 'dir'.
"""
def wrapper(self, dir, *args, **kw):
dirstack = ChdirStack()
dirstack.push(dir)
try:
return method(self, dir, *args, **kw)
finally:
dirstack.pop()
return functools.wraps(method)(wrapper) | 0.003012 |
def add_parameters(self, **params):
"""
Add URL parameters
Also ensure that only valid format/content combinations are requested
"""
self.url_params = None
# we want JSON by default
if not params.get("format"):
params["format"] = "json"
# non-standard content must be retrieved as Atom
if params.get("content"):
params["format"] = "atom"
# TODO: rewrite format=atom, content=json request
if "limit" not in params or params.get("limit") == 0:
params["limit"] = 100
# Need ability to request arbitrary number of results for version
# response
# -1 value is hack that works with current version
elif params["limit"] == -1 or params["limit"] is None:
del params["limit"]
# bib format can't have a limit
if params.get("format") == "bib":
del params["limit"]
self.url_params = urlencode(params, doseq=True) | 0.001992 |
def make_count_grid(data):
"""
Takes a 2 or 3d grid of strings representing binary numbers.
Returns a grid of the same dimensions in which each binary number has been
replaced by an integer indicating the number of ones that were in that
number.
"""
data = deepcopy(data)
for i in range(len(data)):
for j in range(len(data[i])):
for k in range(len(data[i][j])):
if type(data[i][j][k]) is list:
for l in range(len(data[i][j][k])):
try:
data[i][j][k] = data[i][j][k][l].count("1")
except:
data[i][j][k] = len(data[i][j][k][l])
else:
try:
data[i][j][k] = data[i][j][k].count("1")
except:
data[i][j][k] = len(data[i][j][k])
return data | 0.004283 |
def diff_with_models(self):
"""
Return a dict stating the differences between current state of models
and the configuration itself.
TODO: Detect fields that are in conf, but not in models
"""
missing_from_conf = defaultdict(set)
for model in get_models():
db_tables_and_columns = get_db_tables_and_columns_of_model(model)
for (table_name, columns) in db_tables_and_columns.items():
model_strategy = self.strategy.get(table_name)
for column in columns:
if not model_strategy or column not in model_strategy:
missing_from_conf[table_name].add(column)
return missing_from_conf | 0.00271 |
def delete(self):
""" Deletes this instance """
self.__dmlquery__(self.__class__, self,
batch=self._batch,
timestamp=self._timestamp,
consistency=self.__consistency__,
timeout=self._timeout).delete() | 0.00627 |
def prt_data(self, name, vals, prt=sys.stdout):
"""Print stats data in markdown style."""
fld2val = self.get_fld2val(name, vals)
prt.write(self.fmt.format(**fld2val))
return fld2val | 0.00939 |
def chown(self, path, uid, gid):
"""
Change the owner (``uid``) and group (``gid``) of a file. As with
Python's `os.chown` function, you must pass both arguments, so if you
only want to change one, use `stat` first to retrieve the current
owner and group.
:param str path: path of the file to change the owner and group of
:param int uid: new owner's uid
:param int gid: new group id
"""
path = self._adjust_cwd(path)
self._log(DEBUG, "chown({!r}, {!r}, {!r})".format(path, uid, gid))
attr = SFTPAttributes()
attr.st_uid, attr.st_gid = uid, gid
self._request(CMD_SETSTAT, path, attr) | 0.002869 |
async def _get_full_user(self) -> Dict:
"""
Sometimes Telegram does not provide all the user info with the message.
In order to get the full profile (aka the language code) you need to
call this method which will make sure that the full User object is
loaded.
The result is cached for the lifetime of the object, so if the function
is called multiple times it will only fetch the user once. There is
a locking mechanism around the cache to allow concurrent calls.
"""
if 'language_code' in self._user:
return self._user
async with self._lock:
if self._full_user is None:
cm = await self._telegram.call(
'getChatMember',
user_id=self._user['id'],
chat_id=self._chat['id'],
)
self._full_user = cm['result']['user']
return self._full_user | 0.00206 |
def _metadata_unit(unit):
"""Given the name of a unit (e.g. apache2/0), get the unit charm's
metadata.yaml. Very similar to metadata() but allows us to inspect
other units. Unit needs to be co-located, such as a subordinate or
principal/primary.
:returns: metadata.yaml as a python object.
"""
basedir = os.sep.join(charm_dir().split(os.sep)[:-2])
unitdir = 'unit-{}'.format(unit.replace(os.sep, '-'))
joineddir = os.path.join(basedir, unitdir, 'charm', 'metadata.yaml')
if not os.path.exists(joineddir):
return None
with open(joineddir) as md:
return yaml.safe_load(md) | 0.001582 |
def save(self, commit=True):
""" Save model to database """
db.session.add(self)
if commit:
db.session.commit()
return self | 0.011905 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.