text
stringlengths 78
104k
| score
float64 0
0.18
|
---|---|
def removeAllEntitlements(self, appId):
"""
This operation removes all entitlements from the portal for ArcGIS
Pro or additional products such as Navigator for ArcGIS and revokes
all entitlements assigned to users for the specified product. The
portal is no longer a licensing portal for that product.
License assignments are retained on disk. Therefore, if you decide
to configure this portal as a licensing portal for the product
again in the future, all licensing assignments will be available in
the website.
Parameters:
appId - The identifier for the application for which the
entitlements are being removed.
"""
params = {
"f" : "json",
"appId" : appId
}
url = self._url + "/licenses/removeAllEntitlements"
return self._post(url=url,
param_dict=params,
proxy_url=self._proxy_url,
proxy_port=self._proxy_port) | 0.003774 |
def encode(self, text):
r"""Perform encoding of run-length-encoding (RLE).
Parameters
----------
text : str
A text string to encode
Returns
-------
str
Word decoded by RLE
Examples
--------
>>> rle = RLE()
>>> bwt = BWT()
>>> rle.encode(bwt.encode('align'))
'n\x00ilag'
>>> rle.encode('align')
'align'
>>> rle.encode(bwt.encode('banana'))
'annb\x00aa'
>>> rle.encode('banana')
'banana'
>>> rle.encode(bwt.encode('aaabaabababa'))
'ab\x00abbab5a'
>>> rle.encode('aaabaabababa')
'3abaabababa'
"""
if text:
text = ((len(list(g)), k) for k, g in groupby(text))
text = (
(str(n) + k if n > 2 else (k if n == 1 else 2 * k))
for n, k in text
)
return ''.join(text) | 0.002079 |
def print_datetime_object(dt):
"""prints a date-object"""
print(dt)
print('ctime :', dt.ctime())
print('tuple :', dt.timetuple())
print('ordinal:', dt.toordinal())
print('Year :', dt.year)
print('Mon :', dt.month)
print('Day :', dt.day) | 0.003597 |
def setup_handler(setup_fixtures_fn, setup_fn):
"""Returns a function that adds fixtures handling to the setup method.
Makes sure that fixtures are setup before calling the given setup method.
"""
def handler(obj):
setup_fixtures_fn(obj)
setup_fn(obj)
return handler | 0.009063 |
def connect(self, config):
"""Connect to database with given configuration, which may be a dict or
a path to a pymatgen-db configuration.
"""
if isinstance(config, str):
conn = dbutil.get_database(config_file=config)
elif isinstance(config, dict):
conn = dbutil.get_database(settings=config)
else:
raise ValueError("Configuration, '{}', must be a path to "
"a configuration file or dict".format(config))
return conn | 0.003724 |
def libvlc_media_set_meta(p_md, e_meta, psz_value):
'''Set the meta of the media (this function will not save the meta, call
L{libvlc_media_save_meta} in order to save the meta).
@param p_md: the media descriptor.
@param e_meta: the meta to write.
@param psz_value: the media's meta.
'''
f = _Cfunctions.get('libvlc_media_set_meta', None) or \
_Cfunction('libvlc_media_set_meta', ((1,), (1,), (1,),), None,
None, Media, Meta, ctypes.c_char_p)
return f(p_md, e_meta, psz_value) | 0.003731 |
def portal(self, portalID=None):
"""returns a specific reference to a portal"""
if portalID is None:
portalID = self.portalSelf.id
url = "%s/%s" % (self.root, portalID)
return Portal(url=url,
securityHandler=self._securityHandler,
proxy_url=self._proxy_url,
proxy_port=self._proxy_port,
initalize=True) | 0.014354 |
def get_all_roles(self, view = None):
"""
Get all roles in the service.
@param view: View to materialize ('full' or 'summary')
@return: A list of ApiRole objects.
"""
return roles.get_all_roles(self._get_resource_root(), self.name,
self._get_cluster_name(), view) | 0.013514 |
def bait(self, maskmiddle='f', k='19'):
"""
Use bbduk to perform baiting
:param maskmiddle: boolean argument treat the middle base of a kmer as a wildcard; increases sensitivity
in the presence of errors.
:param k: keyword argument for length of kmers to use in the analyses
"""
logging.info('Performing kmer baiting of fastq files with {at} targets'.format(at=self.analysistype))
# There seems to be some sort of issue with java incorrectly calculating the total system memory on certain
# computers. For now, calculate the memory, and feed it into the bbduk call
if self.kmer_size is None:
kmer = k
else:
kmer = self.kmer_size
with progressbar(self.runmetadata) as bar:
for sample in bar:
if sample.general.bestassemblyfile != 'NA' and sample[self.analysistype].runanalysis:
# Create the folder (if necessary)
make_path(sample[self.analysistype].outputdir)
# Make the system call
if len(sample.general.fastqfiles) == 2:
# Create the command to run the baiting - paired inputs and a single, zipped output
sample[self.analysistype].bbdukcmd = \
'bbduk.sh -Xmx{mem} ref={ref} in1={in1} in2={in2} k={kmer} maskmiddle={mm} ' \
'threads={c} outm={om}' \
.format(mem=self.mem,
ref=sample[self.analysistype].baitfile,
in1=sample.general.trimmedcorrectedfastqfiles[0],
in2=sample.general.trimmedcorrectedfastqfiles[1],
kmer=kmer,
mm=maskmiddle,
c=str(self.cpus),
om=sample[self.analysistype].baitedfastq)
else:
sample[self.analysistype].bbdukcmd = \
'bbduk.sh -Xmx{mem} ref={ref} in={in1} k={kmer} maskmiddle={mm} ' \
'threads={cpus} outm={outm}' \
.format(mem=self.mem,
ref=sample[self.analysistype].baitfile,
in1=sample.general.trimmedcorrectedfastqfiles[0],
kmer=kmer,
mm=maskmiddle,
cpus=str(self.cpus),
outm=sample[self.analysistype].baitedfastq)
# Run the system call (if necessary)
if not os.path.isfile(sample[self.analysistype].baitedfastq):
out, err = run_subprocess(sample[self.analysistype].bbdukcmd)
write_to_logfile(sample[self.analysistype].bbdukcmd,
sample[self.analysistype].bbdukcmd,
self.logfile, sample.general.logout, sample.general.logerr,
sample[self.analysistype].logout, sample[self.analysistype].logerr)
write_to_logfile(out,
err,
self.logfile, sample.general.logout, sample.general.logerr,
sample[self.analysistype].logout, sample[self.analysistype].logerr) | 0.005291 |
def getModel(self, modelIdentifier):
"""
Return the requested model.
:param modelIdentifier: <str> model identifier
:return: <object> model instance
"""
if modelIdentifier in self._models:
return self._models[modelIdentifier]
else:
message = "Application - getModel() - " \
"Model with identifier {} does not exist." \
.format(modelIdentifier)
raise Exception(message) | 0.00396 |
def changes(self):
"""
Return a tuple with the removed and added facts since last run.
"""
try:
return self.added, self.removed
finally:
self.added = list()
self.removed = list() | 0.007874 |
def load_shellcode(shellcode, arch, start_offset=0, load_address=0):
"""
Load a new project based on a string of raw bytecode.
:param shellcode: The data to load
:param arch: The name of the arch to use, or an archinfo class
:param start_offset: The offset into the data to start analysis (default 0)
:param load_address: The address to place the data in memory (default 0)
"""
return Project(
BytesIO(shellcode),
main_opts={
'backend': 'blob',
'arch': arch,
'entry_point': start_offset,
'base_addr': load_address,
}
) | 0.002933 |
def primitive(self):
"""
Returns a primitive object representation for this container (which is a dict).
WARNING: The returned container does not contain any markup or formatting metadata.
"""
raw_container = raw.to_raw(self._navigable)
# Collapsing the anonymous table onto the top-level container is present
if '' in raw_container:
raw_container.update(raw_container[''])
del raw_container['']
return raw_container | 0.009862 |
def matchSubset(**kwargs):
"""extract matches from player's entire match history given matching criteria kwargs"""
ret = []
for m in self.matches:
allMatched = True
for k,v in iteritems(kwargs):
mVal = getattr(m, k)
try:
if v == mVal or v in mVal: continue # this check passed
except Exception: pass # if attempting to check __contains__ and exception is raised, it's assumed to be false
allMatched = False
break
if allMatched: ret.append(m)
return ret | 0.016129 |
def validate(self, schema=None):
"""
Validate that we have a valid object.
On error, this will raise a `ScrapeValueError`
This also expects that the schemas assume that omitting required
in the schema asserts the field is optional, not required. This is
due to upstream schemas being in JSON Schema v3, and not validictory's
modified syntax.
^ TODO: FIXME
"""
if schema is None:
schema = self._schema
type_checker = Draft3Validator.TYPE_CHECKER.redefine(
"datetime", lambda c, d: isinstance(d, (datetime.date, datetime.datetime))
)
ValidatorCls = jsonschema.validators.extend(Draft3Validator, type_checker=type_checker)
validator = ValidatorCls(schema, format_checker=FormatChecker())
errors = [str(error) for error in validator.iter_errors(self.as_dict())]
if errors:
raise ScrapeValueError('validation of {} {} failed: {}'.format(
self.__class__.__name__, self._id, '\n\t'+'\n\t'.join(errors)
)) | 0.004575 |
def get_peak_pos(im, wrap=False):
"""Get the peak position with subpixel precision
Parameters
----------
im: 2d array
The image containing a peak
wrap: boolean, defaults False
True if the image reoresents a torric world
Returns
-------
[y,x]: 2 numbers
The position of the highest peak with subpixel precision
Notes
-----
This is a bit hacky and could be improved
"""
im = np.asarray(im)
# remove invalid values (assuming im>0)
im[np.logical_not(np.isfinite(im))] = 0
# remove mean
im = im - im.mean()
# get maximum value
argmax = im.argmax()
dsize = im.size
# get cut value (30% biggest peak)
# TODO: choose less random value
cut = .3 * im[argmax]
# isolate peak
peak = im > cut
peak, __ = label(peak)
# wrap border
if wrap and peak[0] != 0 and peak[-1] != 0 and peak[0] != peak[-1]:
peak[peak == peak[-1]] = peak[0]
# extract peak
peak = peak == peak[argmax]
# get values along X and Y
X = np.arange(dsize)[peak]
Y = im[peak]
# wrap border
if wrap:
# wrap X values d
X[X > dsize // 2] -= dsize
# remove argmax as in X**4 X should be small
offset = X[Y == Y.max()][0]
X -= offset
# We want to fit in a radius of 3 around the center
Y = Y[abs(X) < 3]
X = X[abs(X) < 3]
# if>2, use fit_log
if peak.sum() > 2:
ret, __ = gauss_fit_log(X, Y)
# if fails, use center_of_mass
if ret is np.nan:
ret = center_of_mass(X, Y)
elif peak.sum() > 1:
# If only 2 pixel, gauss fitting is imposible, use center_of_mass
ret = center_of_mass(X, Y)
else:
# 1 px peak is easy
ret = X[0]
"""
import matplotlib.pyplot as plt
plt.figure()
plt.plot(X,Y,'x',label='im')
plt.plot([ret,ret],[1,Y.max()],label='logfit')
plt.plot([X.min(),X.max()],[cut,cut])
plt.plot([X.min(),X.max()],[im.std(),im.std()])
#"""
return ret + offset | 0.00049 |
def run_external_commands(self, cmds):
"""Run external commands Arbiter/Receiver sent
:param cmds: commands to run
:type cmds: list
:return: None
"""
if not self.external_commands_manager:
return
try:
_t0 = time.time()
logger.debug("Scheduler '%s' got %d commands", self.name, len(cmds))
for command in cmds:
self.external_commands_manager.resolve_command(ExternalCommand(command))
statsmgr.counter('external-commands.got.count', len(cmds))
statsmgr.timer('external-commands.got.time', time.time() - _t0)
except Exception as exp: # pylint: disable=broad-except
logger.warning("External command parsing error: %s", exp)
logger.warning("Exception: %s / %s", str(exp), traceback.print_exc())
for command in cmds:
try:
command = command.decode('utf8', 'ignore')
except UnicodeEncodeError:
pass
except AttributeError:
pass
logger.warning("Command: %s", command) | 0.004266 |
def get_resources(self, collections):
""" Get resources that correspond to values from :collections:.
:param collections: Collection names for which resources should be
gathered
:type collections: list of str
:return: Gathered resources
:rtype: list of Resource instances
"""
res_map = self.request.registry._model_collections
resources = [res for res in res_map.values()
if res.collection_name in collections]
resources = [res for res in resources if res]
return set(resources) | 0.003378 |
def cancel_instruction(bet_id, size_reduction=None):
"""
Instruction to fully or partially cancel an order (only applies to LIMIT orders)
:param str bet_id: identifier of the bet to cancel.
:param float size_reduction: If supplied then this is a partial cancel.
:returns: cancellation report detailing status, cancellation requested and actual cancellation details.
:rtype: dict
"""
args = locals()
return {
to_camel_case(k): v for k, v in args.items() if v is not None
} | 0.005769 |
def standard_output(self, ds, limit, check_name, groups):
"""
Generates the Terminal Output for Standard cases
Returns the dataset needed for the verbose output, as well as the failure flags.
"""
score_list, points, out_of = self.get_points(groups, limit)
issue_count = out_of - points
# Let's add the version number to the check name if it's missing
check_name = self._get_check_versioned_name(check_name)
check_url = self._get_check_url(check_name)
width = 2 * self.col_width
print('\n')
print("-" * width)
print('{:^{width}}'.format("IOOS Compliance Checker Report", width=width))
print('{:^{width}}'.format(check_name, width=width))
print('{:^{width}}'.format(check_url, width=width))
print("-" * width)
if issue_count > 0:
print('{:^{width}}'.format("Corrective Actions", width=width))
plural = '' if issue_count == 1 else 's'
print("{} has {} potential issue{}".format(os.path.basename(ds), issue_count, plural))
return [groups, points, out_of] | 0.004409 |
def _list(env, key, more, loader, _all=False, output=None):
"""Lists all user defined config values
and if `--all` is passed it also shows dynaconf internal variables.
"""
if env:
env = env.strip()
if key:
key = key.strip()
if loader:
loader = loader.strip()
if env:
settings.setenv(env)
cur_env = settings.current_env.lower()
click.echo(
click.style(
"Working in %s environment " % cur_env,
bold=True,
bg="blue",
fg="bright_black",
)
)
if not loader:
data = settings.as_dict(env=env, internal=_all)
else:
identifier = "{}_{}".format(loader, cur_env)
data = settings._loaded_by_loaders.get(identifier, {})
data = data or settings._loaded_by_loaders.get(loader, {})
# remove to avoid displaying twice
data.pop("SETTINGS_MODULE", None)
def color(_k):
if _k in dir(default_settings):
return "blue"
return "green"
if not key:
datalines = "\n".join(
"%s: %s"
% (click.style(k, bg=color(k), fg="white"), pprint.pformat(v))
for k, v in data.items()
)
(click.echo_via_pager if more else click.echo)(datalines)
if output:
with open(output, "w") as output_file:
json.dump({cur_env: data}, output_file)
else:
key = key.upper()
value = data.get(key)
if not value:
click.echo(click.style("Key not found", bg="red", fg="white"))
return
click.echo(
"%s: %s"
% (
click.style(key.upper(), bg=color(key), fg="white"),
pprint.pformat(value),
)
)
if output:
with open(output, "w") as output_file:
json.dump({cur_env: {key.upper(): value}}, output_file)
if env:
settings.setenv() | 0.000509 |
def check_error(self):
"""Check if the async response is an error.
Take care to call `is_done` before calling `error`. Note that the error
messages are always encoded as strings.
:raises CloudUnhandledError: When not checking `is_done` first
:return: status_code, error_msg, payload
:rtype: tuple
"""
if not self.is_done:
raise CloudUnhandledError("Need to check if request is done, before checking for error")
response = self.db[self.async_id]
error_msg = response["error"]
status_code = int(response["status_code"])
payload = response["payload"]
return status_code, error_msg, payload | 0.004261 |
def safe_unicode_stdin(string):
"""
Safely convert the given string to a Unicode string,
decoding using ``sys.stdin.encoding`` if needed.
If running from a frozen binary, ``utf-8`` encoding is assumed.
:param variant string: the byte string or Unicode string to convert
:rtype: string
"""
if string is None:
return None
if is_bytes(string):
if FROZEN:
return string.decode("utf-8")
try:
return string.decode(sys.stdin.encoding)
except UnicodeDecodeError:
return string.decode(sys.stdin.encoding, "replace")
except:
return string.decode("utf-8")
return string | 0.002907 |
async def filter_by(cls, db, offset=None, limit=None, **kwargs):
"""Query by attributes iteratively. Ordering is not supported
Example:
User.get_by(db, age=[32, 54])
User.get_by(db, age=23, name="guido")
"""
if limit and type(limit) is not int:
raise InvalidQuery('If limit is supplied it must be an int')
if offset and type(offset) is not int:
raise InvalidQuery('If offset is supplied it must be an int')
ids_to_iterate = await cls._get_ids_filter_by(db, **kwargs)
if offset:
# Using offset without order_by is pretty strange, but allowed
if limit:
ids_to_iterate = ids_to_iterate[offset:offset+limit]
else:
ids_to_iterate = ids_to_iterate[offset:]
elif limit:
ids_to_iterate = ids_to_iterate[:limit]
for key in ids_to_iterate:
yield await cls.load(db, key) | 0.002053 |
def ismethod(func):
'''this function should return the information gathered on a function
@param func: this is the function we want to get info on
@return a tuple where:
0 = indicates whether the parameter passed is a method or not
1 = a list of classes 'Info', with the info gathered from the function
this is a list because when we have methods from java with the same name and different signatures,
we actually have many methods, each with its own set of arguments
'''
try:
if isinstance(func, core.PyFunction):
#ok, this is from python, created by jython
#print_ ' PyFunction'
def getargs(func_code):
"""Get information about the arguments accepted by a code object.
Three things are returned: (args, varargs, varkw), where 'args' is
a list of argument names (possibly containing nested lists), and
'varargs' and 'varkw' are the names of the * and ** arguments or None."""
nargs = func_code.co_argcount
names = func_code.co_varnames
args = list(names[:nargs])
step = 0
if not hasattr(func_code, 'CO_VARARGS'):
from org.python.core import CodeFlag # @UnresolvedImport
co_varargs_flag = CodeFlag.CO_VARARGS.flag
co_varkeywords_flag = CodeFlag.CO_VARKEYWORDS.flag
else:
co_varargs_flag = func_code.CO_VARARGS
co_varkeywords_flag = func_code.CO_VARKEYWORDS
varargs = None
if func_code.co_flags & co_varargs_flag:
varargs = func_code.co_varnames[nargs]
nargs = nargs + 1
varkw = None
if func_code.co_flags & co_varkeywords_flag:
varkw = func_code.co_varnames[nargs]
return args, varargs, varkw
args = getargs(func.func_code)
return 1, [Info(func.func_name, args=args[0], varargs=args[1], kwargs=args[2], doc=func.func_doc)]
if isinstance(func, core.PyMethod):
#this is something from java itself, and jython just wrapped it...
#things to play in func:
#['__call__', '__class__', '__cmp__', '__delattr__', '__dir__', '__doc__', '__findattr__', '__name__', '_doget', 'im_class',
#'im_func', 'im_self', 'toString']
#print_ ' PyMethod'
#that's the PyReflectedFunction... keep going to get it
func = func.im_func
if isinstance(func, PyReflectedFunction):
#this is something from java itself, and jython just wrapped it...
#print_ ' PyReflectedFunction'
infos = []
for i in xrange(len(func.argslist)):
#things to play in func.argslist[i]:
#'PyArgsCall', 'PyArgsKeywordsCall', 'REPLACE', 'StandardCall', 'args', 'compare', 'compareTo', 'data', 'declaringClass'
#'flags', 'isStatic', 'matches', 'precedence']
#print_ ' ', func.argslist[i].data.__class__
#func.argslist[i].data.__class__ == java.lang.reflect.Method
if func.argslist[i]:
met = func.argslist[i].data
name = met.getName()
try:
ret = met.getReturnType()
except AttributeError:
ret = ''
parameterTypes = met.getParameterTypes()
args = []
for j in xrange(len(parameterTypes)):
paramTypesClass = parameterTypes[j]
try:
try:
paramClassName = paramTypesClass.getName()
except:
paramClassName = paramTypesClass.getName(paramTypesClass)
except AttributeError:
try:
paramClassName = repr(paramTypesClass) #should be something like <type 'object'>
paramClassName = paramClassName.split('\'')[1]
except:
paramClassName = repr(paramTypesClass) #just in case something else happens... it will at least be visible
#if the parameter equals [C, it means it it a char array, so, let's change it
a = format_param_class_name(paramClassName)
#a = a.replace('[]','Array')
#a = a.replace('Object', 'obj')
#a = a.replace('String', 's')
#a = a.replace('Integer', 'i')
#a = a.replace('Char', 'c')
#a = a.replace('Double', 'd')
args.append(a) #so we don't leave invalid code
info = Info(name, args=args, ret=ret)
#print_ info.basic_as_str()
infos.append(info)
return 1, infos
except Exception:
s = StringIO.StringIO()
traceback.print_exc(file=s)
return 1, [Info(str('ERROR'), doc=s.getvalue())]
return 0, None | 0.008333 |
def run(self, N=100):
"""
Parameter
---------
N: int
number of particles
Returns
-------
wgts: Weights object
The importance weights (with attributes lw, W, and ESS)
X: ThetaParticles object
The N particles (with attributes theta, logpost)
norm_cst: float
Estimate of the normalising constant of the target
"""
th = self.proposal.rvs(size=N)
self.X = ThetaParticles(theta=th, lpost=None)
self.X.lpost = self.model.logpost(th)
lw = self.X.lpost - self.proposal.logpdf(th)
self.wgts = rs.Weights(lw=lw)
self.norm_cst = rs.log_mean_exp(lw) | 0.004155 |
def initialize(self, training_info, model, environment, device):
""" Initialize policy gradient from reinforcer settings """
self.target_model = self.model_factory.instantiate(action_space=environment.action_space).to(device)
self.target_model.load_state_dict(model.state_dict())
self.target_model.eval() | 0.008929 |
def _iexplode_path(path):
"""Iterate over all the parts of a path.
Splits path recursively with os.path.split().
"""
(head, tail) = os.path.split(path)
if not head or (not tail and head == path):
if head:
yield head
if tail or not head:
yield tail
return
for p in _iexplode_path(head):
yield p
yield tail | 0.002571 |
def add_item(self, item, index=True):
# pylint: disable=too-many-branches, too-many-locals, too-many-nested-blocks
"""
Add an item into our containers, and index it depending on the `index` flag.
:param item: object to add
:type item: alignak.objects.item.Item
:param index: Flag indicating if the item should be indexed
:type index: bool
:return: the new items created
:rtype list
"""
name_property = getattr(self.__class__, "name_property", None)
# Check if some hosts are to be self-generated...
generated_hosts = []
if name_property:
name = getattr(item, name_property, None)
if name and '[' in name and ']' in name:
# We can create several objects from the same configuration!
pattern = name[name.find("[")+1:name.find("]")]
if '-' in pattern:
logger.debug("Found an host with a patterned name: %s", pattern)
# pattern is format-min-max
# format is optional
limits = pattern.split('-')
fmt = "%d"
min_v = 1
max_v = 1
if len(limits) == 3:
fmt = limits[2]
new_name = name.replace('[%s-%s-%s]' % (limits[0], limits[1], fmt), '***')
else:
new_name = name.replace('[%s-%s]' % (limits[0], limits[1]), '***')
try:
min_v = int(limits[0])
except ValueError:
pass
try:
max_v = int(limits[1])
except ValueError:
pass
for idx in range(min_v, max_v + 1):
logger.debug("- cloning host: %s", new_name.replace('***', fmt % idx))
new_host = deepcopy(item)
new_host.uuid = get_a_new_object_id()
new_host.host_name = new_name.replace('***', fmt % idx)
# Update some fields with the newly generated host name
for prop in ['display_name', 'alias', 'notes', 'notes_url', 'action_url']:
if getattr(new_host, prop, None) is None:
continue
value = getattr(new_host, prop)
if '$HOSTNAME$' in value:
setattr(new_host, prop, value.replace('$HOSTNAME$',
new_host.host_name))
generated_hosts.append(new_host)
if generated_hosts:
for new_host in generated_hosts:
if index is True:
new_host = self.index_item(new_host)
self.items[new_host.uuid] = new_host
logger.info(" cloned %d hosts from %s", len(generated_hosts), item.get_name())
else:
if index is True and name_property:
item = self.index_item(item)
self.items[item.uuid] = item
return generated_hosts | 0.003942 |
def is_verified(self):
"""
Verifies an SES bounce message.
"""
if self._verified is None:
signature = self._data.get('Signature')
if not signature:
self._verified = False
return self._verified
# Decode the signature from base64
signature = bytes(base64.b64decode(signature))
# Get the message to sign
sign_bytes = self._get_bytes_to_sign()
if not sign_bytes:
self._verified = False
return self._verified
if not self.certificate:
self._verified = False
return self._verified
# Extract the public key
pkey = self.certificate.get_pubkey()
# Use the public key to verify the signature.
pkey.verify_init()
pkey.verify_update(sign_bytes)
verify_result = pkey.verify_final(signature)
self._verified = verify_result == 1
return self._verified | 0.00189 |
def create(self, bucket, descriptor, force=False):
"""https://github.com/frictionlessdata/tableschema-pandas-py#storage
"""
# Make lists
buckets = bucket
if isinstance(bucket, six.string_types):
buckets = [bucket]
descriptors = descriptor
if isinstance(descriptor, dict):
descriptors = [descriptor]
# Check buckets for existence
for bucket in buckets:
if bucket in self.buckets:
if not force:
message = 'Bucket "%s" already exists' % bucket
raise tableschema.exceptions.StorageError(message)
self.delete(bucket)
# Define dataframes
for bucket, descriptor in zip(buckets, descriptors):
tableschema.validate(descriptor)
self.__descriptors[bucket] = descriptor
self.__dataframes[bucket] = pd.DataFrame() | 0.002139 |
def _set_status_data(self, userdata):
"""Set status properties from userdata response.
Response values:
d3: On Mask
d4: Off Mask
d5: X10 House Code
d6: X10 Unit
d7: Ramp Rate
d8: On-Level
d9: LED Brightness
d10: Non-Toggle Mask
d11: LED Bit Mask
d12: X10 ALL Bit Mask
d13: On/Off Bit Mask
"""
self._on_mask = userdata['d3']
self._off_mask = userdata['d4']
self._x10_house_code = userdata['d5']
self._x10_unit = userdata['d6']
self._ramp_rate = userdata['d7']
self._on_level = userdata['d8']
self._led_brightness = userdata['d9']
self._non_toggle_mask = userdata['d10']
self._led_bit_mask = userdata['d11']
self._x10_all_bit_mask = userdata['d12']
self._on_off_bit_mask = userdata['d13']
self._trigger_group_bit_mask = userdata['d14'] | 0.002014 |
def p_string_list(self, p):
'''string_list : string_list COMMA IDENT
| IDENT
| empty'''
if p[1] is None:
p[0] = []
elif len(p) == 4:
p[1].append(p[3])
p[0] = p[1]
elif len(p) == 2:
p[0] = [p[1]] | 0.00625 |
def _parse_list(element, definition):
"""Parse xml element by definition given by list.
Find all elements matched by the string given as the first value
in the list (as XPath or @attribute).
If there is a second argument it will be handled as a definitions
for the elements matched or the text when not.
:param element: ElementTree element
:param definition: definition schema
:type definition: list
:return: parsed xml
:rtype: list
"""
if len(definition) == 0:
raise XmlToJsonException('List definition needs some definition')
tag = definition[0]
tag_def = definition[1] if len(definition) > 1 else None
sub_list = []
for el in element.findall(tag):
sub_list.append(xml_to_json(el, tag_def))
return sub_list | 0.001252 |
def calculate_squared_differences(image_tile_dict, transformed_array, template, sq_diff_tolerance=0.1):
"""As above, but for when the squared differences matching method is used
"""
template_norm_squared = np.sum(template**2)
image_norms_squared = {(x,y):np.sum(image_tile_dict[(x,y)]**2) for (x,y) in image_tile_dict.keys()}
match_points = image_tile_dict.keys()
# for correlation, then need to transofrm back to get correct value for division
h, w = template.shape
image_matches_normalised = {match_points[i]:-2*transformed_array[match_points[i][0], match_points[i][1]] + image_norms_squared[match_points[i]] + template_norm_squared for i in range(len(match_points))}
#print image_matches_normalised
cutoff = h*w*255**2*sq_diff_tolerance
normalised_matches = {key:value for key, value in image_matches_normalised.items() if np.round(value, decimals=3) <= cutoff}
return normalised_matches.keys() | 0.013757 |
def show_correlation_matrix(sync_output_dynamic, iteration = None):
"""!
@brief Shows correlation matrix between oscillators at the specified iteration.
@param[in] sync_output_dynamic (sync_dynamic): Output dynamic of the Sync network.
@param[in] iteration (uint): Number of interation of simulation for which correlation matrix should be allocated.
If iternation number is not specified, the last step of simulation is used for the matrix allocation.
"""
_ = plt.figure();
correlation_matrix = sync_output_dynamic.allocate_correlation_matrix(iteration);
plt.imshow(correlation_matrix, cmap = plt.get_cmap('cool'), interpolation='kaiser', vmin = 0.0, vmax = 1.0);
plt.show(); | 0.029904 |
def plot_fermi_surface(data, structure, cbm, energy_levels=[],
multiple_figure=True,
mlab_figure=None, kpoints_dict={}, color=(0, 0, 1),
transparency_factor=[], labels_scale_factor=0.05,
points_scale_factor=0.02, interative=True):
"""
Plot the Fermi surface at specific energy value.
Args:
data: energy values in a 3D grid from a CUBE file
via read_cube_file function, or from a
BoltztrapAnalyzer.fermi_surface_data
structure: structure object of the material
energy_levels: list of energy value of the fermi surface.
By default 0 eV correspond to the VBM, as in
the plot of band structure along symmetry line.
Default: max energy value + 0.01 eV
cbm: Boolean value to specify if the considered band is
a conduction band or not
multiple_figure: if True a figure for each energy level will be shown.
If False all the surfaces will be shown in the same figure.
In this las case, tune the transparency factor.
mlab_figure: provide a previous figure to plot a new surface on it.
kpoints_dict: dictionary of kpoints to show in the plot.
example: {"K":[0.5,0.0,0.5]},
where the coords are fractional.
color: tuple (r,g,b) of integers to define the color of the surface.
transparency_factor: list of values in the range [0,1] to tune
the opacity of the surfaces.
labels_scale_factor: factor to tune the size of the kpoint labels
points_scale_factor: factor to tune the size of the kpoint points
interative: if True an interactive figure will be shown.
If False a non interactive figure will be shown, but
it is possible to plot other surfaces on the same figure.
To make it interactive, run mlab.show().
Returns:
a Mayavi figure and a mlab module to control the plot.
Note: Experimental.
Please, double check the surface shown by using some
other software and report issues.
"""
try:
from mayavi import mlab
except ImportError:
raise BoltztrapError(
"Mayavi package should be installed to use this function")
bz = structure.lattice.reciprocal_lattice.get_wigner_seitz_cell()
cell = structure.lattice.reciprocal_lattice.matrix
fact = 1 if cbm == False else -1
en_min = np.min(fact * data.ravel())
en_max = np.max(fact * data.ravel())
if energy_levels == []:
energy_levels = [en_min + 0.01] if cbm == True else \
[en_max - 0.01]
print("Energy level set to: " + str(energy_levels[0]) + " eV")
else:
for e in energy_levels:
if e > en_max or e < en_min:
raise BoltztrapError("energy level " + str(e) +
" not in the range of possible energies: [" +
str(en_min) + ", " + str(en_max) + "]")
if transparency_factor == []:
transparency_factor = [1] * len(energy_levels)
if mlab_figure:
fig = mlab_figure
if mlab_figure == None and not multiple_figure:
fig = mlab.figure(size=(1024, 768), bgcolor=(1, 1, 1))
for iface in range(len(bz)):
for line in itertools.combinations(bz[iface], 2):
for jface in range(len(bz)):
if iface < jface and any(np.all(line[0] == x)
for x in bz[jface]) and \
any(np.all(line[1] == x)
for x in bz[jface]):
mlab.plot3d(*zip(line[0], line[1]), color=(0, 0, 0),
tube_radius=None, figure=fig)
for label, coords in kpoints_dict.items():
label_coords = structure.lattice.reciprocal_lattice \
.get_cartesian_coords(coords)
mlab.points3d(*label_coords, scale_factor=points_scale_factor,
color=(0, 0, 0), figure=fig)
mlab.text3d(*label_coords, text=label, scale=labels_scale_factor,
color=(0, 0, 0), figure=fig)
for isolevel, alpha in zip(energy_levels, transparency_factor):
if multiple_figure:
fig = mlab.figure(size=(1024, 768), bgcolor=(1, 1, 1))
for iface in range(len(bz)):
for line in itertools.combinations(bz[iface], 2):
for jface in range(len(bz)):
if iface < jface and any(np.all(line[0] == x)
for x in bz[jface]) and \
any(np.all(line[1] == x)
for x in bz[jface]):
mlab.plot3d(*zip(line[0], line[1]), color=(0, 0, 0),
tube_radius=None, figure=fig)
for label, coords in kpoints_dict.items():
label_coords = structure.lattice.reciprocal_lattice \
.get_cartesian_coords(coords)
mlab.points3d(*label_coords, scale_factor=points_scale_factor,
color=(0, 0, 0), figure=fig)
mlab.text3d(*label_coords, text=label,
scale=labels_scale_factor, color=(0, 0, 0),
figure=fig)
cp = mlab.contour3d(fact * data, contours=[isolevel], transparent=True,
colormap='hot', color=color, opacity=alpha,
figure=fig)
polydata = cp.actor.actors[0].mapper.input
pts = np.array(polydata.points) # - 1
polydata.points = np.dot(pts,
cell / np.array(data.shape)[:, np.newaxis])
cx, cy, cz = [np.mean(np.array(polydata.points)[:, i])
for i in range(3)]
polydata.points = (np.array(polydata.points) - [cx, cy, cz]) * 2
#mlab.view(distance='auto')
fig.scene.isometric_view()
if interative == True:
mlab.show()
return fig, mlab | 0.002978 |
def do_allowrep(self, line):
"""allowrep Allow new objects to be replicated."""
self._split_args(line, 0, 0)
self._command_processor.get_session().get_replication_policy().set_replication_allowed(
True
)
self._print_info_if_verbose("Set replication policy to allow replication") | 0.012121 |
def __MaxSizeToInt(self, max_size):
"""Convert max_size to an int."""
size_groups = re.match(r'(?P<size>\d+)(?P<unit>.B)?$', max_size)
if size_groups is None:
raise ValueError('Could not parse maxSize')
size, unit = size_groups.group('size', 'unit')
shift = 0
if unit is not None:
unit_dict = {'KB': 10, 'MB': 20, 'GB': 30, 'TB': 40}
shift = unit_dict.get(unit.upper())
if shift is None:
raise ValueError('Unknown unit %s' % unit)
return int(size) * (1 << shift) | 0.003436 |
def hash_and_stat_file(self, path, saltenv='base'):
'''
Return the hash of a file, to get the hash of a file in the pillar_roots
prepend the path with salt://<file on server> otherwise, prepend the
file with / for a local file.
Additionally, return the stat result of the file, or None if no stat
results were found.
'''
ret = {}
fnd = self.__get_file_path(path, saltenv)
if fnd is None:
return ret, None
try:
# Remote file path (self._find_file() invoked)
fnd_path = fnd['path']
fnd_stat = fnd.get('stat')
except TypeError:
# Local file path
fnd_path = fnd
try:
fnd_stat = list(os.stat(fnd_path))
except Exception:
fnd_stat = None
hash_type = self.opts.get('hash_type', 'md5')
ret['hsum'] = salt.utils.hashutils.get_hash(fnd_path, form=hash_type)
ret['hash_type'] = hash_type
return ret, fnd_stat | 0.002841 |
def derivable(self):
"""
Whether the spec (only valid for derived specs) can be derived
given the inputs and switches provided to the study
"""
try:
# Just need to iterate all study inputs and catch relevant
# exceptions
list(self.pipeline.study_inputs)
except (ArcanaOutputNotProducedException,
ArcanaMissingDataException):
return False
return True | 0.004264 |
def all(self, *, collection, attribute, word, func=None, operation=None):
""" Performs a filter with the OData 'all' keyword on the collection
For example:
q.any(collection='email_addresses', attribute='address',
operation='eq', word='[email protected]')
will transform to a filter such as:
emailAddresses/all(a:a/address eq '[email protected]')
:param str collection: the collection to apply the any keyword on
:param str attribute: the attribute of the collection to check
:param str word: the word to check
:param str func: the logical function to apply to the attribute
inside the collection
:param str operation: the logical operation to apply to the
attribute inside the collection
:rtype: Query
"""
return self.iterable('all', collection=collection, attribute=attribute,
word=word, func=func, operation=operation) | 0.002041 |
def checker(location, receiver):
"""Construct a function that checks a directory for process configuration
The function checks for additions or removals
of JSON process configuration files and calls the appropriate receiver
methods.
:param location: string, the directory to monitor
:param receiver: IEventReceiver
:returns: a function with no parameters
"""
path = filepath.FilePath(location)
files = set()
filesContents = {}
def _check(path):
currentFiles = set(fname for fname in os.listdir(location)
if not fname.endswith('.new'))
removed = files - currentFiles
added = currentFiles - files
for fname in added:
contents = path.child(fname).getContent()
filesContents[fname] = contents
receiver.add(fname, contents)
for fname in removed:
receiver.remove(fname)
same = currentFiles & files
for fname in same:
newContents = path.child(fname).getContent()
oldContents = filesContents[fname]
if newContents == oldContents:
continue
receiver.remove(fname)
filesContents[fname] = newContents
receiver.add(fname, newContents)
files.clear()
files.update(currentFiles)
return functools.partial(_check, path) | 0.000719 |
def add(self,dimlist,dimvalues):
'''
add dimensions
:parameter dimlist: list of dimensions
:parameter dimvalues: list of values for dimlist
'''
for i,d in enumerate(dimlist):
self[d] = dimvalues[i]
self.set_ndims() | 0.022581 |
def transitionStates(self,state):
"""
Return the indices of new states and their rates.
"""
newstates,rates = self.transition(state)
newindices = self.getStateIndex(newstates)
return newindices,rates | 0.031802 |
def multipart_uploadpart(self, multipart):
"""Upload a part.
:param multipart: A :class:`invenio_files_rest.models.MultipartObject`
instance.
:returns: A Flask response.
"""
content_length, part_number, stream, content_type, content_md5, tags =\
current_files_rest.multipart_partfactory()
if content_length:
ck = multipart.last_part_size if \
part_number == multipart.last_part_number \
else multipart.chunk_size
if ck != content_length:
raise MultipartInvalidChunkSize()
# Create part
try:
p = Part.get_or_create(multipart, part_number)
p.set_contents(stream)
db.session.commit()
except Exception:
# We remove the Part since incomplete data may have been written to
# disk (e.g. client closed connection etc.) so it must be
# reuploaded.
db.session.rollback()
Part.delete(multipart, part_number)
raise
return self.make_response(
data=p,
context={
'class': Part,
},
etag=p.checksum
) | 0.001609 |
def add_virtual_columns_equatorial_to_galactic_cartesian(self, alpha, delta, distance, xname, yname, zname, radians=True, alpha_gp=np.radians(192.85948), delta_gp=np.radians(27.12825), l_omega=np.radians(32.93192)):
"""From http://arxiv.org/pdf/1306.2945v2.pdf"""
if not radians:
alpha = "pi/180.*%s" % alpha
delta = "pi/180.*%s" % delta
self.virtual_columns[zname] = "{distance} * (cos({delta}) * cos({delta_gp}) * cos({alpha} - {alpha_gp}) + sin({delta}) * sin({delta_gp}))".format(**locals())
self.virtual_columns[xname] = "{distance} * (cos({delta}) * sin({alpha} - {alpha_gp}))".format(**locals())
self.virtual_columns[yname] = "{distance} * (sin({delta}) * cos({delta_gp}) - cos({delta}) * sin({delta_gp}) * cos({alpha} - {alpha_gp}))".format(**locals()) | 0.006281 |
def get_content_id(self, content_metadata_item):
"""
Return the id for the given content_metadata_item, `uuid` for programs or `key` for other content
"""
content_id = content_metadata_item.get('key', '')
if content_metadata_item['content_type'] == 'program':
content_id = content_metadata_item.get('uuid', '')
return content_id | 0.007732 |
def _sigmainf(N, h, m, dW, Km0, Pm0):
"""Asymptotic covariance matrix \Sigma_\infty Wiktorsson2001 eqn (4.5)"""
M = m*(m-1)//2
Im = broadcast_to(np.eye(m), (N, m, m))
IM = broadcast_to(np.eye(M), (N, M, M))
Ims0 = np.eye(m**2)
factor1 = broadcast_to((2.0/h)*np.dot(Km0, Ims0 - Pm0), (N, M, m**2))
factor2 = _kp2(Im, _dot(dW, _t(dW)))
factor3 = broadcast_to(np.dot(Ims0 - Pm0, Km0.T), (N, m**2, M))
return 2*IM + _dot(_dot(factor1, factor2), factor3) | 0.006173 |
def verify_classification(self, classification):
"""
Mark the given ClassifiedFailure as verified.
Handles the classification not currently being related to this
TextLogError and no Metadata existing.
"""
if classification not in self.classified_failures.all():
self.create_match("ManualDetector", classification)
# create a TextLogErrorMetadata instance for this TextLogError if it
# doesn't exist. We can't use update_or_create here since OneToOne
# relations don't use an object manager so a missing relation is simply
# None as opposed to RelatedManager.
if self.metadata is None:
TextLogErrorMetadata.objects.create(text_log_error=self,
best_classification=classification,
best_is_verified=True)
else:
self.metadata.best_classification = classification
self.metadata.best_is_verified = True
self.metadata.save(update_fields=['best_classification', 'best_is_verified'])
self.metadata.failure_line.elastic_search_insert()
# Send event to NewRelic when a verifing an autoclassified failure.
match = self.matches.filter(classified_failure=classification).first()
if not match:
return
newrelic.agent.record_custom_event('user_verified_classification', {
'matcher': match.matcher_name,
'job_id': self.id,
}) | 0.002584 |
def _update_header_size(self):
"""Update the column width of the header."""
column_count = self.table_header.model().columnCount()
for index in range(0, column_count):
if index < column_count:
column_width = self.dataTable.columnWidth(index)
self.table_header.setColumnWidth(index, column_width)
else:
break | 0.004866 |
def _check_for_uploads_from_md5(self):
# type: (Uploader) -> None
"""Check queue for a file to upload
:param Uploader self: this
"""
cv = self._md5_offload.done_cv
while not self.termination_check_md5:
result = None
cv.acquire()
while True:
result = self._md5_offload.pop_done_queue()
if result is None:
# use cv timeout due to possible non-wake while running
cv.wait(1)
# check for terminating conditions
if self.termination_check_md5:
break
else:
break
cv.release()
if result is not None:
self._post_md5_skip_on_check(result[0], result[3]) | 0.003584 |
def CheckRedundantVirtual(filename, clean_lines, linenum, error):
"""Check if line contains a redundant "virtual" function-specifier.
Args:
filename: The name of the current file.
clean_lines: A CleansedLines instance containing the file.
linenum: The number of the line to check.
error: The function to call with any errors found.
"""
# Look for "virtual" on current line.
line = clean_lines.elided[linenum]
virtual = Match(r'^(.*)(\bvirtual\b)(.*)$', line)
if not virtual: return
# Ignore "virtual" keywords that are near access-specifiers. These
# are only used in class base-specifier and do not apply to member
# functions.
if (Search(r'\b(public|protected|private)\s+$', virtual.group(1)) or
Match(r'^\s+(public|protected|private)\b', virtual.group(3))):
return
# Ignore the "virtual" keyword from virtual base classes. Usually
# there is a column on the same line in these cases (virtual base
# classes are rare in google3 because multiple inheritance is rare).
if Match(r'^.*[^:]:[^:].*$', line): return
# Look for the next opening parenthesis. This is the start of the
# parameter list (possibly on the next line shortly after virtual).
# TODO(unknown): doesn't work if there are virtual functions with
# decltype() or other things that use parentheses, but csearch suggests
# that this is rare.
end_col = -1
end_line = -1
start_col = len(virtual.group(2))
for start_line in xrange(linenum, min(linenum + 3, clean_lines.NumLines())):
line = clean_lines.elided[start_line][start_col:]
parameter_list = Match(r'^([^(]*)\(', line)
if parameter_list:
# Match parentheses to find the end of the parameter list
(_, end_line, end_col) = CloseExpression(
clean_lines, start_line, start_col + len(parameter_list.group(1)))
break
start_col = 0
if end_col < 0:
return # Couldn't find end of parameter list, give up
# Look for "override" or "final" after the parameter list
# (possibly on the next few lines).
for i in xrange(end_line, min(end_line + 3, clean_lines.NumLines())):
line = clean_lines.elided[i][end_col:]
match = Search(r'\b(override|final)\b', line)
if match:
error(filename, linenum, 'readability/inheritance', 4,
('"virtual" is redundant since function is '
'already declared as "%s"' % match.group(1)))
# Set end_col to check whole lines after we are done with the
# first line.
end_col = 0
if Search(r'[^\w]\s*$', line):
break | 0.013736 |
def median(self):
"""Computes the median of a log-normal distribution built with the stats data."""
mu = self.mean()
ret_val = math.exp(mu)
if math.isnan(ret_val):
ret_val = float("inf")
return ret_val | 0.011858 |
def pdfa_status(self):
"""Returns the PDF/A conformance level claimed by this PDF, or False
A PDF may claim to PDF/A compliant without this being true. Use an
independent verifier such as veraPDF to test if a PDF is truly
conformant.
Returns:
str: The conformance level of the PDF/A, or an empty string if the
PDF does not claim PDF/A conformance. Possible valid values
are: 1A, 1B, 2A, 2B, 2U, 3A, 3B, 3U.
"""
key_part = QName(XMP_NS_PDFA_ID, 'part')
key_conformance = QName(XMP_NS_PDFA_ID, 'conformance')
try:
return self[key_part] + self[key_conformance]
except KeyError:
return '' | 0.002755 |
def bring_gpio_interrupt_into_userspace(): # activate gpio interrupt
"""Bring the interrupt pin on the GPIO into Linux userspace."""
try:
# is it already there?
with open(GPIO_INTERRUPT_DEVICE_VALUE):
return
except IOError:
# no, bring it into userspace
with open(GPIO_EXPORT_FILE, 'w') as export_file:
export_file.write(str(GPIO_INTERRUPT_PIN))
wait_until_file_exists(GPIO_INTERRUPT_DEVICE_VALUE) | 0.002105 |
def read(self, frames=-1, dtype='float64', always_2d=False,
fill_value=None, out=None):
"""Read from the file and return data as NumPy array.
Reads the given number of frames in the given data format
starting at the current read/write position. This advances the
read/write position by the same number of frames.
By default, all frames from the current read/write position to
the end of the file are returned.
Use :meth:`.seek` to move the current read/write position.
Parameters
----------
frames : int, optional
The number of frames to read. If ``frames < 0``, the whole
rest of the file is read.
dtype : {'float64', 'float32', 'int32', 'int16'}, optional
Data type of the returned array, by default ``'float64'``.
Floating point audio data is typically in the range from
``-1.0`` to ``1.0``. Integer data is in the range from
``-2**15`` to ``2**15-1`` for ``'int16'`` and from
``-2**31`` to ``2**31-1`` for ``'int32'``.
.. note:: Reading int values from a float file will *not*
scale the data to [-1.0, 1.0). If the file contains
``np.array([42.6], dtype='float32')``, you will read
``np.array([43], dtype='int32')`` for
``dtype='int32'``.
Returns
-------
audiodata : numpy.ndarray or type(out)
A two-dimensional NumPy (frames x channels) array is
returned. If the sound file has only one channel, a
one-dimensional array is returned. Use ``always_2d=True``
to return a two-dimensional array anyway.
If `out` was specified, it is returned. If `out` has more
frames than available in the file (or if `frames` is
smaller than the length of `out`) and no `fill_value` is
given, then only a part of `out` is overwritten and a view
containing all valid frames is returned. numpy.ndarray or
type(out)
Other Parameters
----------------
always_2d : bool, optional
By default, reading a mono sound file will return a
one-dimensional array. With ``always_2d=True``, audio data
is always returned as a two-dimensional array, even if the
audio file has only one channel.
fill_value : float, optional
If more frames are requested than available in the file,
the rest of the output is be filled with `fill_value`. If
`fill_value` is not specified, a smaller array is
returned.
out : numpy.ndarray or subclass, optional
If `out` is specified, the data is written into the given
array instead of creating a new array. In this case, the
arguments `dtype` and `always_2d` are silently ignored! If
`frames` is not given, it is obtained from the length of
`out`.
Examples
--------
>>> from soundfile import SoundFile
>>> myfile = SoundFile('stereo_file.wav')
Reading 3 frames from a stereo file:
>>> myfile.read(3)
array([[ 0.71329652, 0.06294799],
[-0.26450912, -0.38874483],
[ 0.67398441, -0.11516333]])
>>> myfile.close()
See Also
--------
buffer_read, .write
"""
if out is None:
frames = self._check_frames(frames, fill_value)
out = self._create_empty_array(frames, always_2d, dtype)
else:
if frames < 0 or frames > len(out):
frames = len(out)
frames = self._array_io('read', out, frames)
if len(out) > frames:
if fill_value is None:
out = out[:frames]
else:
out[frames:] = fill_value
return out | 0.000756 |
def __setAttributeDefaults(self):
"""Looks for default values for unset attributes. If
class variable representing attribute is None, then
it must be defined as an instance variable.
"""
for k,v in self.__class__.attributes.items():
if v is not None and self.attributes.has_key(k) is False:
if isinstance(v, types.FunctionType):
self.attributes[k] = v(self)
else:
self.attributes[k] = v | 0.007737 |
def ae_latent_softmax(latents_pred, latents_discrete, hparams):
"""Latent prediction and loss."""
vocab_size = 2 ** hparams.z_size
if hparams.num_decode_blocks < 2:
latents_logits = tf.layers.dense(latents_pred, vocab_size,
name="extra_logits")
if hparams.logit_normalization:
latents_logits *= tf.rsqrt(1e-8 +
tf.reduce_mean(tf.square(latents_logits)))
loss = None
if latents_discrete is not None:
if hparams.soft_em:
# latents_discrete is actually one-hot of multinomial samples
assert hparams.num_decode_blocks == 1
loss = tf.nn.softmax_cross_entropy_with_logits_v2(
labels=latents_discrete, logits=latents_logits)
else:
loss = tf.nn.sparse_softmax_cross_entropy_with_logits(
labels=latents_discrete, logits=latents_logits)
sample = multinomial_sample(
latents_logits, vocab_size, hparams.sampling_temp)
return sample, loss
# Multi-block case.
vocab_bits = int(math.log(vocab_size, 2))
assert vocab_size == 2**vocab_bits
assert vocab_bits % hparams.num_decode_blocks == 0
block_vocab_size = 2**(vocab_bits // hparams.num_decode_blocks)
latents_logits = [
tf.layers.dense(
latents_pred, block_vocab_size, name="extra_logits_%d" % i)
for i in range(hparams.num_decode_blocks)
]
loss = None
if latents_discrete is not None:
losses = []
for i in range(hparams.num_decode_blocks):
d = tf.floormod(tf.floordiv(latents_discrete,
block_vocab_size**i), block_vocab_size)
losses.append(tf.nn.sparse_softmax_cross_entropy_with_logits(
labels=d, logits=latents_logits[i]))
loss = sum(losses)
samples = [multinomial_sample(l, block_vocab_size, hparams.sampling_temp)
for l in latents_logits]
sample = sum([s * block_vocab_size**i for i, s in enumerate(samples)])
return sample, loss | 0.01059 |
def add_missing(self, distribution, requirement):
"""
Add a missing *requirement* for the given *distribution*.
:type distribution: :class:`distutils2.database.InstalledDistribution`
or :class:`distutils2.database.EggInfoDistribution`
:type requirement: ``str``
"""
logger.debug('%s missing %r', distribution, requirement)
self.missing.setdefault(distribution, []).append(requirement) | 0.004264 |
def scroll(self, clicks):
"""Zoom using a mouse scroll wheel motion.
Parameters
----------
clicks : int
The number of clicks. Positive numbers indicate forward wheel
movement.
"""
target = self._target
ratio = 0.90
mult = 1.0
if clicks > 0:
mult = ratio**clicks
elif clicks < 0:
mult = (1.0 / ratio)**abs(clicks)
z_axis = self._n_pose[:3, 2].flatten()
eye = self._n_pose[:3, 3].flatten()
radius = np.linalg.norm(eye - target)
translation = (mult * radius - radius) * z_axis
t_tf = np.eye(4)
t_tf[:3, 3] = translation
self._n_pose = t_tf.dot(self._n_pose)
z_axis = self._pose[:3, 2].flatten()
eye = self._pose[:3, 3].flatten()
radius = np.linalg.norm(eye - target)
translation = (mult * radius - radius) * z_axis
t_tf = np.eye(4)
t_tf[:3, 3] = translation
self._pose = t_tf.dot(self._pose) | 0.001938 |
def default_blocks(self):
"""
Return a list of default block tuples (appname.ModelName, verbose name).
Next to the dropdown list of block types, a small number of common blocks which are
frequently used can be added immediately to a column with one click. This method defines
the list of default blocks.
"""
# Use the block list provided by settings if it's defined
block_list = getattr(settings, 'GLITTER_DEFAULT_BLOCKS', None)
if block_list is not None:
return block_list
# Try and auto fill in default blocks if the apps are installed
block_list = []
for block in GLITTER_FALLBACK_BLOCKS:
app_name, model_name = block.split('.')
try:
model_class = apps.get_model(app_name, model_name)
verbose_name = capfirst(model_class._meta.verbose_name)
block_list.append((block, verbose_name))
except LookupError:
# Block isn't installed - don't add it as a quick add default
pass
return block_list | 0.004437 |
def open(self):
"""This is the only way to open a file resource."""
self.__sf = _sftp_open(self.__sftp_session_int,
self.__filepath,
self.access_type_int,
self.__create_mode)
if self.access_type_is_append is True:
self.seek(self.filesize)
return SftpFileObject(self) | 0.012225 |
def apply_ctx(fn, ctx):
"""Return fn with ctx partially applied, if requested.
If the `fn` callable accepts an argument named "ctx", returns a
functools.partial object with ctx=ctx applied, else returns `fn` unchanged.
For this to work, the 'ctx' argument must come after any arguments that are
passed as positional arguments. For example, 'ctx' must be the 2nd argument
for request handlers, serializers and deserializers, that are always called
with one positional argument (the request, object to serialize, and input
filehandle, respectively).
"""
if 'ctx' in get_args(fn):
return functools.partial(fn, ctx=ctx)
else:
return fn | 0.001443 |
def invoke_controller(self, controller, args, kwargs, state):
'''
The main request handler for Pecan applications.
'''
cfg = _cfg(controller)
content_types = cfg.get('content_types', {})
req = state.request
resp = state.response
pecan_state = req.pecan
# If a keyword is supplied via HTTP GET or POST arguments, but the
# function signature does not allow it, just drop it (rather than
# generating a TypeError).
argspec = getargspec(controller)
keys = kwargs.keys()
for key in keys:
if key not in argspec.args and not argspec.keywords:
kwargs.pop(key)
# get the result from the controller
result = controller(*args, **kwargs)
# a controller can return the response object which means they've taken
# care of filling it out
if result is response:
return
elif isinstance(result, WebObResponse):
state.response = result
return
raw_namespace = result
# pull the template out based upon content type and handle overrides
template = content_types.get(pecan_state['content_type'])
# check if for controller override of template
template = pecan_state.get('override_template', template)
if template is None and cfg['explicit_content_type'] is False:
if self.default_renderer == 'json':
template = 'json'
pecan_state['content_type'] = pecan_state.get(
'override_content_type',
pecan_state['content_type']
)
# if there is a template, render it
if template:
if template == 'json':
pecan_state['content_type'] = 'application/json'
result = self.render(template, result)
# If we are in a test request put the namespace where it can be
# accessed directly
if req.environ.get('paste.testing'):
testing_variables = req.environ['paste.testing_variables']
testing_variables['namespace'] = raw_namespace
testing_variables['template_name'] = template
testing_variables['controller_output'] = result
# set the body content
if result and isinstance(result, six.text_type):
resp.text = result
elif result:
resp.body = result
if pecan_state['content_type']:
# set the content type
resp.content_type = pecan_state['content_type'] | 0.00078 |
def download_file_content(self, file_id, etag=None):
'''Download file content.
Args:
file_id (str): The UUID of the file whose content is requested
etag (str): If the content is not changed since the provided ETag,
the content won't be downloaded. If the content is changed, it
will be downloaded and returned with its new ETag.
Note:
ETags should be enclosed in double quotes::
my_etag = '"71e1ed9ee52e565a56aec66bc648a32c"'
Returns:
A tuple of ETag and content (etag, content) if the content was
retrieved. If an etag was provided, and content didn't change
returns (None, None)::
('"71e1ed9ee52e565a56aec66bc648a32c"', 'Hello world!')
Raises:
StorageArgumentException: Invalid arguments
StorageForbiddenException: Server response code 403
StorageNotFoundException: Server response code 404
StorageException: other 400-600 error codes
'''
if not is_valid_uuid(file_id):
raise StorageArgumentException(
'Invalid UUID for file_id: {0}'.format(file_id))
headers = {'Accept': '*/*'}
if etag:
headers['If-None-Match'] = etag
resp = self._authenticated_request \
.to_endpoint('file/{}/content/'.format(file_id)) \
.with_headers(headers) \
.get()
if resp.status_code == 304:
return (None, None)
if 'ETag' not in resp.headers:
raise StorageException('No ETag received from the service with the download')
return (resp.headers['ETag'], resp.content) | 0.001722 |
def a2b_hashed_base58(s):
"""
If the passed string is hashed_base58, return the binary data.
Otherwise raises an EncodingError.
"""
data = a2b_base58(s)
data, the_hash = data[:-4], data[-4:]
if double_sha256(data)[:4] == the_hash:
return data
raise EncodingError("hashed base58 has bad checksum %s" % s) | 0.002915 |
def _execute_request(self, request):
"""Helper method to execute a request, since a lock should be used
to not fire up multiple requests at the same time.
:return: Result of `request.execute`
"""
with GoogleCloudProvider.__gce_lock:
return request.execute(http=self._auth_http) | 0.006061 |
def infer_complexes(stmts):
"""Return inferred Complex from Statements implying physical interaction.
Parameters
----------
stmts : list[indra.statements.Statement]
A list of Statements to infer Complexes from.
Returns
-------
linked_stmts : list[indra.mechlinker.LinkedStatement]
A list of LinkedStatements representing the inferred Statements.
"""
interact_stmts = _get_statements_by_type(stmts, Modification)
linked_stmts = []
for mstmt in interact_stmts:
if mstmt.enz is None:
continue
st = Complex([mstmt.enz, mstmt.sub], evidence=mstmt.evidence)
linked_stmts.append(st)
return linked_stmts | 0.003901 |
def create_bucket(self, bucket):
"""
Create a new bucket.
"""
details = self._details(
method=b"PUT",
url_context=self._url_context(bucket=bucket),
)
query = self._query_factory(details)
return self._submit(query) | 0.006826 |
def _create_dataset(self, *data):
"""Converts input data to the appropriate Dataset"""
# Make sure data is a tuple of dense tensors
data = [self._to_torch(x, dtype=torch.FloatTensor) for x in data]
return TensorDataset(*data) | 0.007782 |
def get_video_url_from_video_id(video_id):
"""Splicing URLs according to video ID to get video details"""
# from js
data = [""] * 256
for index, _ in enumerate(data):
t = index
for i in range(8):
t = -306674912 ^ unsigned_right_shitf(t, 1) if 1 & t else unsigned_right_shitf(t, 1)
data[index] = t
def tmp():
rand_num = random.random()
path = "/video/urls/v/1/toutiao/mp4/{video_id}?r={random_num}".format(video_id=video_id,
random_num=str(rand_num)[2:])
e = o = r = -1
i, a = 0, len(path)
while i < a:
e = ord(path[i])
i += 1
if e < 128:
r = unsigned_right_shitf(r, 8) ^ data[255 & (r ^ e)]
else:
if e < 2048:
r = unsigned_right_shitf(r, 8) ^ data[255 & (r ^ (192 | e >> 6 & 31))]
r = unsigned_right_shitf(r, 8) ^ data[255 & (r ^ (128 | 63 & e))]
else:
if 55296 <= e < 57344:
e = (1023 & e) + 64
i += 1
o = 1023 & t.url(i)
r = unsigned_right_shitf(r, 8) ^ data[255 & (r ^ (240 | e >> 8 & 7))]
r = unsigned_right_shitf(r, 8) ^ data[255 & (r ^ (128 | e >> 2 & 63))]
r = unsigned_right_shitf(r, 8) ^ data[255 & (r ^ (128 | o >> 6 & 15 | (3 & e) << 4))]
r = unsigned_right_shitf(r, 8) ^ data[255 & (r ^ (128 | 63 & o))]
else:
r = unsigned_right_shitf(r, 8) ^ data[255 & (r ^ (224 | e >> 12 & 15))]
r = unsigned_right_shitf(r, 8) ^ data[255 & (r ^ (128 | e >> 6 & 63))]
r = unsigned_right_shitf(r, 8) ^ data[255 & (r ^ (128 | 63 & e))]
return "https://ib.365yg.com{path}&s={param}".format(path=path, param=unsigned_right_shitf(r ^ -1, 0))
while 1:
url = tmp()
if url.split("=")[-1][0] != "-": # 参数s不能为负数
return url | 0.006533 |
def boxplot(self, **vargs):
"""Plots a boxplot for the table.
Every column must be numerical.
Kwargs:
vargs: Additional arguments that get passed into `plt.boxplot`.
See http://matplotlib.org/api/pyplot_api.html#matplotlib.pyplot.boxplot
for additional arguments that can be passed into vargs. These include
`vert` and `showmeans`.
Returns:
None
Raises:
ValueError: The Table contains columns with non-numerical values.
>>> table = Table().with_columns(
... 'test1', make_array(92.5, 88, 72, 71, 99, 100, 95, 83, 94, 93),
... 'test2', make_array(89, 84, 74, 66, 92, 99, 88, 81, 95, 94))
>>> table
test1 | test2
92.5 | 89
88 | 84
72 | 74
71 | 66
99 | 92
100 | 99
95 | 88
83 | 81
94 | 95
93 | 94
>>> table.boxplot() # doctest: +SKIP
<boxplot of test1 and boxplot of test2 side-by-side on the same figure>
"""
# Check for non-numerical values and raise a ValueError if any found
for col in self:
if any(isinstance(cell, np.flexible) for cell in self[col]):
raise ValueError("The column '{0}' contains non-numerical "
"values. A histogram cannot be drawn for this table."
.format(col))
columns = self._columns.copy()
vargs['labels'] = columns.keys()
values = list(columns.values())
plt.boxplot(values, **vargs) | 0.003679 |
def post(fqdn, package, result, entry, bound, ekey, *argl, **argd):
"""Adds logging for the post-call result of calling the method externally.
Args:
fqdn (str): fully-qualified domain name of the function being logged.
package (str): name of the package we are logging for. Usually the first
element of `fqdn.split('.')`.
result: returned from calling the method we are logging.
entry (dict): one of the values returned by :func:`pre`.
bound (bool): true if the method is bound.
ekey (str): key under which to store the entry in the database.
"""
global _atdepth_call, _cstack_call
_cstack_call.pop()
if len(_cstack_call) == 0:
_atdepth_call = False
r = _post_call(_atdepth_call, package, fqdn, result,
entry, bound, ekey, argl, argd)
return r | 0.002323 |
def run_collection(self, conf, rm_conf, branch_info):
'''
Run specs and collect all the data
'''
if rm_conf is None:
rm_conf = {}
logger.debug('Beginning to run collection spec...')
exclude = None
if rm_conf:
try:
exclude = rm_conf['patterns']
logger.warn("WARNING: Skipping patterns found in remove.conf")
except LookupError:
logger.debug('Patterns section of remove.conf is empty.')
for c in conf['commands']:
# remember hostname archive path
if c.get('symbolic_name') == 'hostname':
self.hostname_path = os.path.join(
'insights_commands', mangle.mangle_command(c['command']))
rm_commands = rm_conf.get('commands', [])
if c['command'] in rm_commands or c.get('symbolic_name') in rm_commands:
logger.warn("WARNING: Skipping command %s", c['command'])
elif self.mountpoint == "/" or c.get("image"):
cmd_specs = self._parse_command_spec(c, conf['pre_commands'])
for s in cmd_specs:
cmd_spec = InsightsCommand(self.config, s, exclude, self.mountpoint)
self.archive.add_to_archive(cmd_spec)
for f in conf['files']:
rm_files = rm_conf.get('files', [])
if f['file'] in rm_files or f.get('symbolic_name') in rm_files:
logger.warn("WARNING: Skipping file %s", f['file'])
else:
file_specs = self._parse_file_spec(f)
for s in file_specs:
# filter files post-wildcard parsing
if s['file'] in rm_conf.get('files', []):
logger.warn("WARNING: Skipping file %s", s['file'])
else:
file_spec = InsightsFile(s, exclude, self.mountpoint)
self.archive.add_to_archive(file_spec)
if 'globs' in conf:
for g in conf['globs']:
glob_specs = self._parse_glob_spec(g)
for g in glob_specs:
if g['file'] in rm_conf.get('files', []):
logger.warn("WARNING: Skipping file %s", g)
else:
glob_spec = InsightsFile(g, exclude, self.mountpoint)
self.archive.add_to_archive(glob_spec)
logger.debug('Spec collection finished.')
# collect metadata
logger.debug('Collecting metadata...')
self._write_branch_info(branch_info)
logger.debug('Metadata collection finished.') | 0.001486 |
def length(self):
"""Gets the length of this Vector"""
return math.sqrt((self.X * self.X) + (self.Y * self.Y)) | 0.015873 |
def remove(self, element):
"""
Remove an element from the bag.
>>> s = pbag([1, 1, 2])
>>> s2 = s.remove(1)
>>> s3 = s.remove(2)
>>> s2
pbag([1, 2])
>>> s3
pbag([1, 1])
"""
if element not in self._counts:
raise KeyError(element)
elif self._counts[element] == 1:
newc = self._counts.remove(element)
else:
newc = self._counts.set(element, self._counts[element] - 1)
return PBag(newc) | 0.003774 |
def plot_confidence(self, lower=2.5, upper=97.5, plot_limits=None, fixed_inputs=None,
resolution=None, plot_raw=False,
apply_link=False, visible_dims=None,
which_data_ycols='all', label='gp confidence',
predict_kw=None,
**kwargs):
"""
Plot the confidence interval between the percentiles lower and upper.
E.g. the 95% confidence interval is $2.5, 97.5$.
Note: Only implemented for one dimension!
You can deactivate the legend for this one plot by supplying None to label.
Give the Y_metadata in the predict_kw if you need it.
:param float lower: the lower percentile to plot
:param float upper: the upper percentile to plot
:param plot_limits: The limits of the plot. If 1D [xmin,xmax], if 2D [[xmin,ymin],[xmax,ymax]]. Defaluts to data limits
:type plot_limits: np.array
:param fixed_inputs: a list of tuple [(i,v), (i,v)...], specifying that input dimension i should be set to value v.
:type fixed_inputs: a list of tuples
:param int resolution: The resolution of the prediction [default:200]
:param bool plot_raw: plot the latent function (usually denoted f) only?
:param bool apply_link: whether to apply the link function of the GP to the raw prediction.
:param array-like visible_dims: which columns of the input X (!) to plot (array-like or list of ints)
:param array-like which_data_ycols: which columns of the output y (!) to plot (array-like or list of ints)
:param dict predict_kw: the keyword arguments for the prediction. If you want to plot a specific kernel give dict(kern=<specific kernel>) in here
"""
canvas, kwargs = pl().new_canvas(**kwargs)
ycols = get_which_data_ycols(self, which_data_ycols)
X = get_x_y_var(self)[0]
helper_data = helper_for_plot_data(self, X, plot_limits, visible_dims, fixed_inputs, resolution)
helper_prediction = helper_predict_with_model(self, helper_data[2], plot_raw, apply_link,
(lower, upper),
ycols, predict_kw)
plots = _plot_confidence(self, canvas, helper_data, helper_prediction, label, **kwargs)
return pl().add_to_canvas(canvas, plots, legend=label is not None) | 0.007853 |
def anglesep_meeus(lon0: float, lat0: float,
lon1: float, lat1: float, deg: bool = True) -> float:
"""
Parameters
----------
lon0 : float or numpy.ndarray of float
longitude of first point
lat0 : float or numpy.ndarray of float
latitude of first point
lon1 : float or numpy.ndarray of float
longitude of second point
lat1 : float or numpy.ndarray of float
latitude of second point
deg : bool, optional
degrees input/output (False: radians in/out)
Returns
-------
sep_rad : float or numpy.ndarray of float
angular separation
Meeus p. 109
from "Astronomical Algorithms" by Jean Meeus Ch. 16 p. 111 (16.5)
gives angular distance in degrees between two rightAscension,Declination
points in the sky. Neglecting atmospheric effects, of course.
Meeus haversine method is stable all the way to exactly 0 deg.
either the arrays must be the same size, or one of them must be a scalar
"""
if deg:
lon0 = radians(lon0)
lat0 = radians(lat0)
lon1 = radians(lon1)
lat1 = radians(lat1)
sep_rad = 2 * arcsin(sqrt(haversine(lat0 - lat1) +
cos(lat0) * cos(lat1) * haversine(lon0 - lon1)))
if deg:
return degrees(sep_rad)
else:
return sep_rad | 0.00073 |
def CMOVNO(cpu, dest, src):
"""
Conditional move - Not overflow.
Tests the status flags in the EFLAGS register and moves the source operand
(second operand) to the destination operand (first operand) if the given
test condition is true.
:param cpu: current CPU.
:param dest: destination operand.
:param src: source operand.
"""
dest.write(Operators.ITEBV(dest.size, cpu.OF == False, src.read(), dest.read())) | 0.012245 |
def add_response(self, req, resp):
"""Adds the response from sending to `req` to this instance's cache.
Args:
req (`ServicecontrolServicesAllocateQuotaRequest`): the request
resp (AllocateQuotaResponse): the response from sending the request
"""
if self._cache is None:
return
signature = sign(req.allocateQuotaRequest)
with self._cache as c:
now = self._timer()
item = c.get(signature)
if item is None:
c[signature] = CachedItem(
req, resp, self.service_name, now)
else:
# Update the cached item to reflect that it is updated
item.last_check_time = now
item.response = resp
item.is_in_flight = False
c[signature] = item | 0.002312 |
def _isNewTxn(self, identifier, reply, txnId) -> bool:
"""
If client is not in `processedRequests` or requestId is not there in
processed requests and txnId is present then its a new reply
"""
return (identifier not in self.processedRequests or
reply.reqId not in self.processedRequests[identifier]) and \
txnId is not None | 0.005115 |
def padto8(data):
"""Pads data to the multiplies of 8 bytes.
This makes x86_64 faster and prevents
undefined behavior on other platforms"""
length = len(data)
return data + b'\xdb' * (roundto8(length) - length) | 0.004219 |
def get_repository_lookup_session(self, proxy, *args, **kwargs):
"""Gets the repository lookup session.
arg proxy (osid.proxy.Proxy): a proxy
return: (osid.repository.RepositoryLookupSession) - a
RepositoryLookupSession
raise: OperationFailed - unable to complete request
raise: Unimplemented - supports_repository_lookup() is false
compliance: optional - This method must be implemented if
supports_repository_lookup() is true.
"""
if not self.supports_repository_lookup():
raise Unimplemented()
try:
from . import sessions
except ImportError:
raise # OperationFailed()
proxy = self._convert_proxy(proxy)
try:
session = sessions.RepositoryLookupSession(proxy, runtime=self._runtime, **kwargs)
except AttributeError:
raise # OperationFailed()
return session | 0.003077 |
def dump_tables_to_tskit(pop):
"""
Converts fwdpy11.TableCollection to an
tskit.TreeSequence
"""
node_view = np.array(pop.tables.nodes, copy=True)
node_view['time'] -= node_view['time'].max()
node_view['time'][np.where(node_view['time'] != 0.0)[0]] *= -1.0
edge_view = np.array(pop.tables.edges, copy=False)
mut_view = np.array(pop.tables.mutations, copy=False)
tc = tskit.TableCollection(pop.tables.genome_length)
# We must initialize population and individual
# tables before we can do anything else.
# Attempting to set population to anything
# other than -1 in an tskit.NodeTable will
# raise an exception if the PopulationTable
# isn't set up.
_initializePopulationTable(node_view, tc)
node_to_individual = _initializeIndividualTable(pop, tc)
individual = [-1 for i in range(len(node_view))]
for k, v in node_to_individual.items():
individual[k] = v
flags = [1]*2*pop.N + [0]*(len(node_view) - 2*pop.N)
# Bug fixed in 0.3.1: add preserved nodes to samples list
for i in pop.tables.preserved_nodes:
flags[i] = 1
tc.nodes.set_columns(flags=flags, time=node_view['time'],
population=node_view['population'],
individual=individual)
tc.edges.set_columns(left=edge_view['left'],
right=edge_view['right'],
parent=edge_view['parent'],
child=edge_view['child'])
mpos = np.array([pop.mutations[i].pos for i in mut_view['key']])
ancestral_state = np.zeros(len(mut_view), dtype=np.int8)+ord('0')
ancestral_state_offset = np.arange(len(mut_view)+1, dtype=np.uint32)
tc.sites.set_columns(position=mpos,
ancestral_state=ancestral_state,
ancestral_state_offset=ancestral_state_offset)
derived_state = np.zeros(len(mut_view), dtype=np.int8)+ord('1')
md, mdo = _generate_mutation_metadata(pop)
tc.mutations.set_columns(site=np.arange(len(mpos), dtype=np.int32),
node=mut_view['node'],
derived_state=derived_state,
derived_state_offset=ancestral_state_offset,
metadata=md,
metadata_offset=mdo)
return tc.tree_sequence() | 0.000421 |
def get_prob(self, src, tgt, mask, pre_compute, return_logits=False):
'''
:param s: [src_sequence_length, batch_size, src_dim]
:param h: [batch_size, tgt_dim] or [tgt_sequence_length, batch_size, tgt_dim]
:param mask: [src_sequence_length, batch_size]\
or [tgt_sequence_length, src_sequence_length, batch_sizse]
:param pre_compute: [src_sequence_length, batch_size, hidden_dim]
:return: [src_sequence_length, batch_size]\
or [tgt_sequence_length, src_sequence_length, batch_size]
'''
s_shape = src.get_shape().as_list()
h_shape = tgt.get_shape().as_list()
src_dim = s_shape[-1]
tgt_dim = h_shape[-1]
assert src_dim is not None, 'src dimension must be defined'
assert tgt_dim is not None, 'tgt dimension must be defined'
self._define_params(src_dim, tgt_dim)
if len(h_shape) == 2:
tgt = tf.expand_dims(tgt, 0)
if pre_compute is None:
pre_compute = self.get_pre_compute(src)
buf0 = pre_compute
buf1 = tf.tensordot(tgt, self.var['U'], axes=[[2], [0]])
buf2 = tf.tanh(tf.expand_dims(buf0, 0) + tf.expand_dims(buf1, 1))
if not self.is_vanilla:
xh1 = tgt
xh2 = tgt
s1 = src
if self.need_padding:
xh1 = tf.tensordot(xh1, self.var['V_t'], 1)
xh2 = tf.tensordot(xh2, self.var['S_t'], 1)
s1 = tf.tensordot(s1, self.var['V_s'], 1)
if not self.is_identity_transform:
xh1 = tf.tensordot(xh1, self.var['T'], 1)
xh2 = tf.tensordot(xh2, self.var['T'], 1)
buf3 = tf.expand_dims(s1, 0) * tf.expand_dims(xh1, 1)
buf3 = tf.tanh(tf.tensordot(buf3, self.var['V'], axes=[[3], [0]]))
buf = tf.reshape(tf.tanh(buf2 + buf3), shape=tf.shape(buf3))
else:
buf = buf2
v = self.var['v']
e = tf.tensordot(buf, v, [[3], [0]])
e = tf.squeeze(e, axis=[3])
tmp = tf.reshape(e + (mask - 1) * 10000.0, shape=tf.shape(e))
prob = tf.nn.softmax(tmp, 1)
if len(h_shape) == 2:
prob = tf.squeeze(prob, axis=[0])
tmp = tf.squeeze(tmp, axis=[0])
if return_logits:
return prob, tmp
return prob | 0.001273 |
def dynamize_request_items(self, batch_list):
"""
Convert a request_items parameter into the data structure
required for Layer1.
"""
d = None
if batch_list:
d = {}
for batch in batch_list:
batch_dict = {}
key_list = []
for key in batch.keys:
if isinstance(key, tuple):
hash_key, range_key = key
else:
hash_key = key
range_key = None
k = self.build_key_from_values(batch.table.schema,
hash_key, range_key)
key_list.append(k)
batch_dict['Keys'] = key_list
if batch.attributes_to_get:
batch_dict['AttributesToGet'] = batch.attributes_to_get
d[batch.table.name] = batch_dict
return d | 0.002053 |
def interpolate_linear(self, lons, lats, data):
"""
Interpolate using linear approximation
Returns the same as interpolate(lons,lats,data,order=1)
"""
return self.interpolate(lons, lats, data, order=1) | 0.008299 |
def parse_color(self, color):
'''
color : string, eg: '#rrggbb' or 'none'
(where rr, gg, bb are hex digits from 00 to ff)
returns a triple of unsigned bytes, eg: (0, 128, 255)
'''
if color == 'none':
return None
return (
int(color[1:3], 16),
int(color[3:5], 16),
int(color[5:7], 16)) | 0.005168 |
def forward(self,
input_ids: torch.LongTensor,
offsets: torch.LongTensor = None,
token_type_ids: torch.LongTensor = None) -> torch.Tensor:
"""
Parameters
----------
input_ids : ``torch.LongTensor``
The (batch_size, ..., max_sequence_length) tensor of wordpiece ids.
offsets : ``torch.LongTensor``, optional
The BERT embeddings are one per wordpiece. However it's possible/likely
you might want one per original token. In that case, ``offsets``
represents the indices of the desired wordpiece for each original token.
Depending on how your token indexer is configured, this could be the
position of the last wordpiece for each token, or it could be the position
of the first wordpiece for each token.
For example, if you had the sentence "Definitely not", and if the corresponding
wordpieces were ["Def", "##in", "##ite", "##ly", "not"], then the input_ids
would be 5 wordpiece ids, and the "last wordpiece" offsets would be [3, 4].
If offsets are provided, the returned tensor will contain only the wordpiece
embeddings at those positions, and (in particular) will contain one embedding
per token. If offsets are not provided, the entire tensor of wordpiece embeddings
will be returned.
token_type_ids : ``torch.LongTensor``, optional
If an input consists of two sentences (as in the BERT paper),
tokens from the first sentence should have type 0 and tokens from
the second sentence should have type 1. If you don't provide this
(the default BertIndexer doesn't) then it's assumed to be all 0s.
"""
# pylint: disable=arguments-differ
if token_type_ids is None:
token_type_ids = torch.zeros_like(input_ids)
input_mask = (input_ids != 0).long()
# input_ids may have extra dimensions, so we reshape down to 2-d
# before calling the BERT model and then reshape back at the end.
all_encoder_layers, _ = self.bert_model(input_ids=util.combine_initial_dims(input_ids),
token_type_ids=util.combine_initial_dims(token_type_ids),
attention_mask=util.combine_initial_dims(input_mask))
if self._scalar_mix is not None:
mix = self._scalar_mix(all_encoder_layers, input_mask)
else:
mix = all_encoder_layers[-1]
# At this point, mix is (batch_size * d1 * ... * dn, sequence_length, embedding_dim)
if offsets is None:
# Resize to (batch_size, d1, ..., dn, sequence_length, embedding_dim)
return util.uncombine_initial_dims(mix, input_ids.size())
else:
# offsets is (batch_size, d1, ..., dn, orig_sequence_length)
offsets2d = util.combine_initial_dims(offsets)
# now offsets is (batch_size * d1 * ... * dn, orig_sequence_length)
range_vector = util.get_range_vector(offsets2d.size(0),
device=util.get_device_of(mix)).unsqueeze(1)
# selected embeddings is also (batch_size * d1 * ... * dn, orig_sequence_length)
selected_embeddings = mix[range_vector, offsets2d]
return util.uncombine_initial_dims(selected_embeddings, offsets.size()) | 0.006534 |
def remove(name, conf_file=default_conf):
'''
Remove log pattern from logadm
CLI Example:
.. code-block:: bash
salt '*' logadm.remove myapplog
'''
command = "logadm -f {0} -r {1}".format(conf_file, name)
result = __salt__['cmd.run_all'](command, python_shell=False)
if result['retcode'] != 0:
return dict(
Error='Failure in removing log. Possibly already removed?',
Output=result['stderr']
)
return dict(Result='Success') | 0.001976 |
def concat_t_vars_np(self, vars_idx=None):
"""
Concatenate `self.np_t` with `self.np_vars` and return a single matrix.
The first column corresponds to time, and the rest of the matrix is the variables.
Returns
-------
np.array : concatenated matrix
"""
selected_np_vars = self.np_vars
if vars_idx is not None:
selected_np_vars = self.np_vars[:, vars_idx]
return np.concatenate([self.np_t[:self.np_nrows].reshape((-1, 1)),
selected_np_vars[:self.np_nrows, :]], axis=1) | 0.005059 |
def close(self):
"""
OPTIONAL COMMIT-AND-CLOSE
IF THIS IS NOT DONE, THEN THE THREAD THAT SPAWNED THIS INSTANCE
:return:
"""
self.closed = True
signal = _allocate_lock()
signal.acquire()
self.queue.add(CommandItem(COMMIT, None, signal, None, None))
signal.acquire()
self.worker.please_stop.go()
return | 0.005051 |
def db_alter(name, user=None, host=None, port=None, maintenance_db=None,
password=None, tablespace=None, owner=None, owner_recurse=False,
runas=None):
'''
Change tablespace or/and owner of database.
CLI Example:
.. code-block:: bash
salt '*' postgres.db_alter dbname owner=otheruser
'''
if not any((tablespace, owner)):
return True # Nothing todo?
if owner and owner_recurse:
ret = owner_to(name, owner,
user=user,
host=host,
port=port,
password=password,
runas=runas)
else:
queries = []
if owner:
queries.append('ALTER DATABASE "{0}" OWNER TO "{1}"'.format(
name, owner
))
if tablespace:
queries.append('ALTER DATABASE "{0}" SET TABLESPACE "{1}"'.format(
name, tablespace
))
for query in queries:
ret = _psql_prepare_and_run(['-c', query],
user=user, host=host, port=port,
maintenance_db=maintenance_db,
password=password, runas=runas)
if ret['retcode'] != 0:
return False
return True | 0.000741 |
def find_package_docs(package_dir, skippedNames=None):
"""Find documentation directories in a package using ``manifest.yaml``.
Parameters
----------
package_dir : `str`
Directory of an EUPS package.
skippedNames : `list` of `str`, optional
List of package or module names to skip when creating links.
Returns
-------
doc_dirs : namedtuple
Attributes of the namedtuple are:
- ``package_dirs`` (`dict`). Keys are package names (for example,
``'afw'``). Values are absolute directory paths to the package's
documentation directory inside the package's ``doc`` directory. If
there is no package-level documentation the dictionary will be empty.
- ``modules_dirs`` (`dict`). Keys are module names (for example,
``'lsst.afw.table'``). Values are absolute directory paths to the
module's directory inside the package's ``doc`` directory. If a
package has no modules the returned dictionary will be empty.
- ``static_doc_dirs`` (`dict`). Keys are directory names relative to
the ``_static`` directory. Values are absolute directory paths to
the static documentation directory in the package. If there
isn't a declared ``_static`` directory, this dictionary is empty.
Raises
------
NoPackageDocs
Raised when the ``manifest.yaml`` file cannot be found in a package.
Notes
-----
Stack packages have documentation in subdirectories of their `doc`
directory. The ``manifest.yaml`` file declares what these directories are
so that they can be symlinked into the root project.
There are three types of documentation directories:
1. Package doc directories contain documentation for the EUPS package
aspect. This is optional.
2. Module doc directories contain documentation for a Python package
aspect. These are optional.
3. Static doc directories are root directories inside the package's
``doc/_static/`` directory. These are optional.
These are declared in a package's ``doc/manifest.yaml`` file. For example:
.. code-block:: yaml
package: "afw"
modules:
- "lsst.afw.image"
- "lsst.afw.geom"
statics:
- "_static/afw"
This YAML declares *module* documentation directories:
- ``afw/doc/lsst.afw.image/``
- ``afw/doc/lsst.afw.geom/``
It also declares a *package* documentation directory:
- ``afw/doc/afw``
And a static documentaton directory:
- ``afw/doc/_static/afw``
"""
logger = logging.getLogger(__name__)
if skippedNames is None:
skippedNames = []
doc_dir = os.path.join(package_dir, 'doc')
modules_yaml_path = os.path.join(doc_dir, 'manifest.yaml')
if not os.path.exists(modules_yaml_path):
raise NoPackageDocs(
'Manifest YAML not found: {0}'.format(modules_yaml_path))
with open(modules_yaml_path) as f:
manifest_data = yaml.safe_load(f)
module_dirs = {}
package_dirs = {}
static_dirs = {}
if 'modules' in manifest_data:
for module_name in manifest_data['modules']:
if module_name in skippedNames:
logger.debug('Skipping module {0}'.format(module_name))
continue
module_dir = os.path.join(doc_dir, module_name)
# validate that the module's documentation directory does exist
if not os.path.isdir(module_dir):
message = 'module doc dir not found: {0}'.format(module_dir)
logger.warning(message)
continue
module_dirs[module_name] = module_dir
logger.debug('Found module doc dir {0}'.format(module_dir))
if 'package' in manifest_data:
package_name = manifest_data['package']
full_package_dir = os.path.join(doc_dir, package_name)
# validate the directory exists
if os.path.isdir(full_package_dir) \
and package_name not in skippedNames:
package_dirs[package_name] = full_package_dir
logger.debug('Found package doc dir {0}'.format(full_package_dir))
else:
logger.warning('package doc dir excluded or not found: {0}'.format(
full_package_dir))
if 'statics' in manifest_data:
for static_dirname in manifest_data['statics']:
full_static_dir = os.path.join(doc_dir, static_dirname)
# validate the directory exists
if not os.path.isdir(full_static_dir):
message = '_static doc dir not found: {0}'.format(
full_static_dir)
logger.warning(message)
continue
# Make a relative path to `_static` that's used as the
# link source in the root docproject's _static/ directory
relative_static_dir = os.path.relpath(
full_static_dir,
os.path.join(doc_dir, '_static'))
static_dirs[relative_static_dir] = full_static_dir
logger.debug('Found _static doc dir: {0}'.format(full_static_dir))
Dirs = namedtuple('Dirs', ['module_dirs', 'package_dirs', 'static_dirs'])
return Dirs(module_dirs=module_dirs,
package_dirs=package_dirs,
static_dirs=static_dirs) | 0.000186 |
def from_vocabfile(filename):
""" Construct a CountedVocabulary out of a vocabulary file.
Note:
File has the following format word1 count1
word2 count2
"""
word_count = [x.strip().split() for x in _open(filename, 'r').read().splitlines()]
word_count = {w:int(c) for w,c in word_count}
return CountedVocabulary(word_count=word_count) | 0.010025 |
def dl_files(db, dl_dir, files, keep_subdirs=True, overwrite=False):
"""
Download specified files from a Physiobank database.
Parameters
----------
db : str
The Physiobank database directory to download. eg. For database:
'http://physionet.org/physiobank/database/mitdb', db='mitdb'.
dl_dir : str
The full local directory path in which to download the files.
files : list
A list of strings specifying the file names to download relative to the
database base directory.
keep_subdirs : bool, optional
Whether to keep the relative subdirectories of downloaded files as they
are organized in Physiobank (True), or to download all files into the
same base directory (False).
overwrite : bool, optional
If True, all files will be redownloaded regardless. If False, existing
files with the same name and relative subdirectory will be checked.
If the local file is the same size as the online file, the download is
skipped. If the local file is larger, it will be deleted and the file
will be redownloaded. If the local file is smaller, the file will be
assumed to be partially downloaded and the remaining bytes will be
downloaded and appended.
Examples
--------
>>> wfdb.dl_files('ahadb', os.getcwd(),
['STAFF-Studies-bibliography-2016.pdf', 'data/001a.hea',
'data/001a.dat'])
"""
# Full url physiobank database
db_url = posixpath.join(config.db_index_url, db)
# Check if the database is valid
response = requests.get(db_url)
response.raise_for_status()
# Construct the urls to download
dl_inputs = [(os.path.split(file)[1], os.path.split(file)[0], db, dl_dir, keep_subdirs, overwrite) for file in files]
# Make any required local directories
make_local_dirs(dl_dir, dl_inputs, keep_subdirs)
print('Downloading files...')
# Create multiple processes to download files.
# Limit to 2 connections to avoid overloading the server
pool = multiprocessing.Pool(processes=2)
pool.map(dl_pb_file, dl_inputs)
print('Finished downloading files')
return | 0.0009 |
def setExpandedIcon( self, column, icon ):
"""
Sets the icon to be used when the item is expanded.
:param column | <int>
icon | <QtGui.QIcon> || None
"""
self._expandedIcon[column] = QtGui.QIcon(icon) | 0.017668 |
def decode_bytes(f):
"""Decode a buffer length from a 2-byte unsigned int then read the
subsequent bytes.
Parameters
----------
f: file
File-like object with read method.
Raises
------
UnderflowDecodeError
When the end of stream is encountered before the end of the
encoded bytes.
Returns
-------
int
Number of bytes read from `f`.
bytes
Value bytes decoded from `f`.
"""
buf = f.read(FIELD_U16.size)
if len(buf) < FIELD_U16.size:
raise UnderflowDecodeError()
(num_bytes,) = FIELD_U16.unpack_from(buf)
num_bytes_consumed = FIELD_U16.size + num_bytes
buf = f.read(num_bytes)
if len(buf) < num_bytes:
raise UnderflowDecodeError()
return num_bytes_consumed, buf | 0.001248 |
def reflex_correct(coords, galactocentric_frame=None):
"""Correct the input Astropy coordinate object for solar reflex motion.
The input coordinate instance must have distance and radial velocity information. If the radial velocity is not known, fill the
Parameters
----------
coords : `~astropy.coordinates.SkyCoord`
The Astropy coordinate object with position and velocity information.
galactocentric_frame : `~astropy.coordinates.Galactocentric` (optional)
To change properties of the Galactocentric frame, like the height of the
sun above the midplane, or the velocity of the sun in a Galactocentric
intertial frame, set arguments of the
`~astropy.coordinates.Galactocentric` object and pass in to this
function with your coordinates.
Returns
-------
coords : `~astropy.coordinates.SkyCoord`
The coordinates in the same frame as input, but with solar motion
removed.
"""
c = coord.SkyCoord(coords)
# If not specified, use the Astropy default Galactocentric frame
if galactocentric_frame is None:
galactocentric_frame = coord.Galactocentric()
v_sun = galactocentric_frame.galcen_v_sun
observed = c.transform_to(galactocentric_frame)
rep = observed.cartesian.without_differentials()
rep = rep.with_differentials(observed.cartesian.differentials['s'] + v_sun)
fr = galactocentric_frame.realize_frame(rep).transform_to(c.frame)
return coord.SkyCoord(fr) | 0.001989 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.