text
stringlengths 78
104k
| score
float64 0
0.18
|
---|---|
def use_plenary_composition_view(self):
"""Pass through to provider CompositionLookupSession.use_plenary_composition_view"""
self._object_views['composition'] = PLENARY
# self._get_provider_session('composition_lookup_session') # To make sure the session is tracked
for session in self._get_provider_sessions():
try:
session.use_plenary_composition_view()
except AttributeError:
pass | 0.008493 |
def get_file_contents(source_path: str) -> str:
"""
Loads the contents of the source into a string for execution using multiple
loading methods to handle cross-platform encoding edge cases. If none of
the load methods work, a string is returned that contains an error function
response that will be displayed when the step is run alert the user to the
error.
:param source_path:
Path of the step file to load.
"""
open_funcs = [
functools.partial(codecs.open, source_path, encoding='utf-8'),
functools.partial(open, source_path, 'r')
]
for open_func in open_funcs:
try:
with open_func() as f:
return f.read()
except Exception:
pass
return (
'raise IOError("Unable to load step file at: {}")'
.format(source_path)
) | 0.001155 |
def fetch(yts, needed_range, fh):
"""
Download desired range of data and put it in `yts` object (e.g. ``YTStor``).
Parameters
----------
yts : YTStor
Stor-like object to which we will write.
needed_range : tuple
Two element tuple that represents a data range - compliant with ``range_t`` subrange definition.
fh : int
Descriptor used by a process for filesystem operations.
Returns
-------
None
Method does not return; data is written directly to `yts` object.
"""
if yts.preferences['audio'] and yts.preferences['video'] and isinstance(yts.url, tuple) and not yts.preferences['stream']:
#condition for merging.
with tempfile.NamedTemporaryFile(suffix=".mp4", delete=False) as d, tempfile.NamedTemporaryFile(prefix='a') as a, tempfile.NamedTemporaryFile(prefix='v') as v:
# after with statement, files - save d - shall be removed
v.write(yts.r_session.get(yts.url[0]).content)
a.write(yts.r_session.get(yts.url[1]).content)
PP = youtube_dl.postprocessor.FFmpegMergerPP(yts.ytdl)
PP.run({'filepath': d.name, '__files_to_merge': (v.name, a.name)}) # merge
_d = d.name
with open(_d, mode="rb") as d:
yts.data.write( d.read() )
yts.data.flush()
yts.filesize = os.path.getsize(_d)
yts.avail += (0, yts.filesize)
os.remove(_d)
else: # no merging
if yts.preferences['stream'] is False: # preload
yts.data.write(yts.r_session.get(yts.url).content)
yts.data.flush()
yts.avail += (0, yts.filesize)
else: # stream
hr = (needed_range[0], needed_range[1] - 1)
get = yts.r_session.get(yts.url, headers={'Range': 'bytes=' + '-'.join(str(i) for i in hr)})
yts.data.seek(hr[0])
yts.data.write(get.content)
yts.data.flush()
ret = list( int(s) for s in get.headers.get('content-range').split(' ')[1].split('/')[0].split('-') )
ret[1] += 1
yts.avail += tuple(ret)
yts.processing_range -= needed_range | 0.00796 |
def __callback (self, event):
'''
Callback function to receive and save Bumper Scans.
@param event: ROS BumperScan received
@type event: BumperScan
'''
bump = bumperEvent2BumperData(event)
if bump.state == 1:
self.lock.acquire()
self.time = current_milli_time()
self.data = bump
self.lock.release() | 0.012048 |
def _applyTriggerValue(self, triggerName, outval):
""" Here we look through the entire .cfgspc to see if any parameters
are affected by this trigger. For those that are, we apply the action
to the GUI widget. The action is specified by depType. """
# First find which items are dependent upon this trigger (cached)
# e.g. { scope1.name1 : dep'cy-type, scope2.name2 : dep'cy-type, ... }
depParsDict = self._taskParsObj.getParsWhoDependOn(triggerName)
if not depParsDict: return
if 0: print("Dependent parameters:\n"+str(depParsDict)+"\n")
# Get model data, the list of pars
theParamList = self._taskParsObj.getParList()
# Then go through the dependent pars and apply the trigger to them
settingMsg = ''
for absName in depParsDict:
used = False
# For each dep par, loop to find the widget for that scope.name
for i in range(self.numParams):
scopedName = theParamList[i].scope+'.'+theParamList[i].name # diff from makeFullName!!
if absName == scopedName: # a match was found
depType = depParsDict[absName]
if depType == 'active_if':
self.entryNo[i].setActiveState(outval)
elif depType == 'inactive_if':
self.entryNo[i].setActiveState(not outval)
elif depType == 'is_set_by':
self.entryNo[i].forceValue(outval, noteEdited=True)
# WARNING! noteEdited=True may start recursion!
if len(settingMsg) > 0: settingMsg += ", "
settingMsg += '"'+theParamList[i].name+'" to "'+\
outval+'"'
elif depType in ('set_yes_if', 'set_no_if'):
if bool(outval):
newval = 'yes'
if depType == 'set_no_if': newval = 'no'
self.entryNo[i].forceValue(newval, noteEdited=True)
# WARNING! noteEdited=True may start recursion!
if len(settingMsg) > 0: settingMsg += ", "
settingMsg += '"'+theParamList[i].name+'" to "'+\
newval+'"'
else:
if len(settingMsg) > 0: settingMsg += ", "
settingMsg += '"'+theParamList[i].name+\
'" (no change)'
elif depType == 'is_disabled_by':
# this one is only used with boolean types
on = self.entryNo[i].convertToNative(outval)
if on:
# do not activate whole section or change
# any values, only activate this one
self.entryNo[i].setActiveState(True)
else:
# for off, set the bool par AND grey WHOLE section
self.entryNo[i].forceValue(outval, noteEdited=True)
self.entryNo[i].setActiveState(False)
# we'd need this if the par had no _section_switch_
# self._toggleSectionActiveState(
# theParamList[i].scope, False, None)
if len(settingMsg) > 0: settingMsg += ", "
settingMsg += '"'+theParamList[i].name+'" to "'+\
outval+'"'
else:
raise RuntimeError('Unknown dependency: "'+depType+ \
'" for par: "'+scopedName+'"')
used = True
break
# Or maybe it is a whole section
if absName.endswith('._section_'):
scope = absName[:-10]
depType = depParsDict[absName]
if depType == 'active_if':
self._toggleSectionActiveState(scope, outval, None)
elif depType == 'inactive_if':
self._toggleSectionActiveState(scope, not outval, None)
used = True
# Help to debug the .cfgspc rules
if not used:
raise RuntimeError('UNUSED "'+triggerName+'" dependency: '+ \
str({absName:depParsDict[absName]}))
if len(settingMsg) > 0:
# why ?! self.freshenFocus()
self.showStatus('Automatically set '+settingMsg, keep=1) | 0.004654 |
def _reorient_3d(image):
"""
Reorganize the data for a 3d nifti
"""
# Create empty array where x,y,z correspond to LR (sagittal), PA (coronal), IS (axial) directions and the size
# of the array in each direction is the same with the corresponding direction of the input image.
new_image = numpy.zeros([image.dimensions[image.sagittal_orientation.normal_component],
image.dimensions[image.coronal_orientation.normal_component],
image.dimensions[image.axial_orientation.normal_component]],
dtype=image.nifti_data.dtype)
# Fill the new image with the values of the input image but with matching the orientation with x,y,z
if image.coronal_orientation.y_inverted:
for i in range(new_image.shape[2]):
new_image[:, :, i] = numpy.fliplr(numpy.squeeze(image.get_slice(SliceType.AXIAL,
new_image.shape[2] - 1 - i).original_data))
else:
for i in range(new_image.shape[2]):
new_image[:, :, i] = numpy.fliplr(numpy.squeeze(image.get_slice(SliceType.AXIAL,
i).original_data))
return new_image | 0.008468 |
def segment_intersection1(start0, end0, start1, end1, s):
"""Image for :func:`.segment_intersection` docstring."""
if NO_IMAGES:
return
line0 = bezier.Curve.from_nodes(stack1d(start0, end0))
line1 = bezier.Curve.from_nodes(stack1d(start1, end1))
ax = line0.plot(2)
line1.plot(256, ax=ax)
(x_val,), (y_val,) = line0.evaluate(s)
ax.plot([x_val], [y_val], color="black", marker="o")
ax.axis("scaled")
save_image(ax.figure, "segment_intersection1.png") | 0.002012 |
def get_user_autocompletions(ctx, args, incomplete, cmd_param):
"""
:param ctx: context associated with the parsed command
:param args: full list of args
:param incomplete: the incomplete text to autocomplete
:param cmd_param: command definition
:return: all the possible user-specified completions for the param
"""
results = []
if isinstance(cmd_param.type, Choice):
# Choices don't support descriptions.
results = [(c, None)
for c in cmd_param.type.choices if str(c).startswith(incomplete)]
elif cmd_param.autocompletion is not None:
dynamic_completions = cmd_param.autocompletion(ctx=ctx,
args=args,
incomplete=incomplete)
results = [c if isinstance(c, tuple) else (c, None)
for c in dynamic_completions]
return results | 0.002112 |
def _wrap_response(self, status=None, **kwargs):
"""Convenience method to wrap a status with any key word args.
Args:
status (enum): enum response status, defaults to OK
Returns:
dict: inlcudes a 'status' attribute and any key word arguments
"""
kwargs['status'] = status if status is not None else self._status.OK
return kwargs | 0.004975 |
def _move_path(self):
"""
Move the downloaded file to the authentic path (identified by
effective URL)
"""
if is_temp_path(self._path) and self._pycurl is not None:
eurl = self._pycurl.getinfo(pycurl.EFFECTIVE_URL)
er = get_resource_name(eurl)
r = get_resource_name(self.url)
if er != r and os.path.exists(self.path):
new_path = self._get_path(self._path, eurl)
shutil.move(self.path, new_path)
self.path = new_path | 0.00363 |
def add_package(self, name):
"""
Registers a single package
:param name: (str) The effect package to add
"""
name, cls_name = parse_package_string(name)
if name in self.package_map:
return
package = EffectPackage(name)
package.load()
self.packages.append(package)
self.package_map[package.name] = package
# Load effect package dependencies
self.polulate(package.effect_packages) | 0.004073 |
def add(self, registry):
""" Add works like replace, but only previously pushed metrics with the
same name (and the same job and instance) will be replaced.
(It uses HTTP method 'POST' to push to the Pushgateway.)
"""
# POST
payload = self.formatter.marshall(registry)
r = requests.post(self.path, data=payload, headers=self.headers) | 0.005038 |
def login_required(function=None, message=None, login_url=None):
"""
Decorator for views that checks that the user is logged in, redirecting
to the log-in page if necessary.
"""
actual_decorator = user_passes_test(
lambda u: u.is_authenticated(),
message=message,
login_url=login_url
)
if function:
return actual_decorator(function)
return actual_decorator | 0.002375 |
def uvindex_forecast_around_coords(self, lat, lon):
"""
Queries the OWM Weather API for forecast Ultra Violet values in the next 8
days in the surroundings of the provided geocoordinates.
:param lat: the location's latitude, must be between -90.0 and 90.0
:type lat: int/float
:param lon: the location's longitude, must be between -180.0 and 180.0
:type lon: int/float
:return: a list of *UVIndex* instances or empty list if data is not available
:raises: *ParseResponseException* when OWM Weather API responses' data
cannot be parsed, *APICallException* when OWM Weather API can not be
reached, *ValueError* for wrong input values
"""
geo.assert_is_lon(lon)
geo.assert_is_lat(lat)
params = {'lon': lon, 'lat': lat}
json_data = self._uvapi.get_uvi_forecast(params)
uvindex_list = self._parsers['uvindex_list'].parse_JSON(json_data)
return uvindex_list | 0.00498 |
def _dimension(rank0, rankt, dim, singular_values):
""" output dimension """
if dim is None or (isinstance(dim, float) and dim == 1.0):
return min(rank0, rankt)
if isinstance(dim, float):
return np.searchsorted(VAMPModel._cumvar(singular_values), dim) + 1
else:
return np.min([rank0, rankt, dim]) | 0.005495 |
def merge_consecutive_filter_clauses(ir_blocks):
"""Merge consecutive Filter(x), Filter(y) blocks into Filter(x && y) block."""
if not ir_blocks:
return ir_blocks
new_ir_blocks = [ir_blocks[0]]
for block in ir_blocks[1:]:
last_block = new_ir_blocks[-1]
if isinstance(last_block, Filter) and isinstance(block, Filter):
new_ir_blocks[-1] = Filter(
BinaryComposition(u'&&', last_block.predicate, block.predicate))
else:
new_ir_blocks.append(block)
return new_ir_blocks | 0.005357 |
def observe_all(self, callback: Callable[[str, Any, Any], None]):
"""Subscribes to all keys changes"""
self._all_callbacks.append(callback) | 0.012903 |
def __init_keystone_session(self):
"""Create and return a Keystone session object."""
api = self._identity_api_version # for readability
tried = []
if api in ['3', None]:
sess = self.__init_keystone_session_v3(check=(api is None))
tried.append('v3')
if sess:
return sess
if api in ['2', None]:
sess = self.__init_keystone_session_v2(check=(api is None))
tried.append('v2')
if sess:
return sess
raise RuntimeError(
"Cannot establish Keystone session (tried: {0})."
.format(', '.join(tried))) | 0.002999 |
def _validate_config(self):
""" Handle and check configuration.
"""
groups = dict(
job=defaultdict(Bunch),
httpd=defaultdict(Bunch),
)
for key, val in config.torque.items():
# Auto-convert numbers and bools
if val.isdigit():
config.torque[key] = val = int(val)
elif val.lower() in (matching.TRUE | matching.FALSE):
val = matching.truth(str(val), key)
# Assemble grouped parameters
stem = key.split('.', 1)[0]
if key == "httpd.active":
groups[stem]["active"] = val
elif stem in groups:
try:
stem, name, param = key.split('.', 2)
except (TypeError, ValueError):
self.fatal("Bad %s configuration key %r (expecting %s.NAME.PARAM)" % (stem, key, stem))
else:
groups[stem][name][param] = val
for key, val in groups.iteritems():
setattr(self, key.replace("job", "jobs"), Bunch(val))
# Validate httpd config
if self.httpd.active:
if self.httpd.waitress.url_scheme not in ("http", "https"):
self.fatal("HTTP URL scheme must be either 'http' or 'https'")
if not isinstance(self.httpd.waitress.port, int) or not(1024 <= self.httpd.waitress.port < 65536):
self.fatal("HTTP port must be a 16 bit number >= 1024")
# Validate jobs
for name, params in self.jobs.items():
for key in ("handler", "schedule"):
if key not in params:
self.fatal("Job '%s' is missing the required 'job.%s.%s' parameter" % (name, name, key))
bool_param = lambda k, default, p=params: matching.truth(p.get(k, default), "job.%s.%s" % (name, k))
params.job_name = name
params.dry_run = bool_param("dry_run", False) or self.options.dry_run
params.active = bool_param("active", True)
params.schedule = self._parse_schedule(params.schedule)
if params.active:
try:
params.handler = pymagic.import_name(params.handler)
except ImportError as exc:
self.fatal("Bad handler name '%s' for job '%s':\n %s" % (params.handler, name, exc)) | 0.003755 |
def create_temporary_workspace(version=None, mode=0700):
# type: (str, int) -> str
"""
Create a temporary directory, optionally by placing a subdirectory: version
:rtype: str
:return: Directory path
"""
workspace = mkdtemp('hemp_')
if version is not None:
workspace = join(workspace, version)
mkdir(workspace, mode)
chmod(workspace, mode)
return workspace | 0.002433 |
def handle_offchain_secretreveal(
mediator_state: MediatorTransferState,
mediator_state_change: ReceiveSecretReveal,
channelidentifiers_to_channels: ChannelMap,
pseudo_random_generator: random.Random,
block_number: BlockNumber,
block_hash: BlockHash,
) -> TransitionResult[MediatorTransferState]:
""" Handles the secret reveal and sends SendBalanceProof/RevealSecret if necessary. """
is_valid_reveal = is_valid_secret_reveal(
state_change=mediator_state_change,
transfer_secrethash=mediator_state.secrethash,
secret=mediator_state_change.secret,
)
is_secret_unknown = mediator_state.secret is None
# a SecretReveal should be rejected if the payer transfer
# has expired. To check for this, we use the last
# transfer pair.
transfer_pair = mediator_state.transfers_pair[-1]
payer_transfer = transfer_pair.payer_transfer
channel_identifier = payer_transfer.balance_proof.channel_identifier
payer_channel = channelidentifiers_to_channels.get(channel_identifier)
if not payer_channel:
return TransitionResult(mediator_state, list())
has_payer_transfer_expired = channel.is_transfer_expired(
transfer=transfer_pair.payer_transfer,
affected_channel=payer_channel,
block_number=block_number,
)
if is_secret_unknown and is_valid_reveal and not has_payer_transfer_expired:
iteration = secret_learned(
state=mediator_state,
channelidentifiers_to_channels=channelidentifiers_to_channels,
pseudo_random_generator=pseudo_random_generator,
block_number=block_number,
block_hash=block_hash,
secret=mediator_state_change.secret,
secrethash=mediator_state_change.secrethash,
payee_address=mediator_state_change.sender,
)
else:
iteration = TransitionResult(mediator_state, list())
return iteration | 0.001517 |
def _add_sub(self, other, op):
"""Implements both addition and subtraction."""
if not isinstance(other, Number):
return NotImplemented
# If either side is unitless, inherit the other side's units. Skip all
# the rest of the conversion math, too.
if self.is_unitless or other.is_unitless:
return Number(
op(self.value, other.value),
unit_numer=self.unit_numer or other.unit_numer,
unit_denom=self.unit_denom or other.unit_denom,
)
# Likewise, if either side is zero, it can auto-cast to any units
if self.value == 0:
return Number(
op(self.value, other.value),
unit_numer=other.unit_numer,
unit_denom=other.unit_denom,
)
elif other.value == 0:
return Number(
op(self.value, other.value),
unit_numer=self.unit_numer,
unit_denom=self.unit_denom,
)
# Reduce both operands to the same units
left = self.to_base_units()
right = other.to_base_units()
if left.unit_numer != right.unit_numer or left.unit_denom != right.unit_denom:
raise ValueError("Can't reconcile units: %r and %r" % (self, other))
new_amount = op(left.value, right.value)
# Convert back to the left side's units
if left.value != 0:
new_amount = new_amount * self.value / left.value
return Number(new_amount, unit_numer=self.unit_numer, unit_denom=self.unit_denom) | 0.003102 |
def delimiter_groups(line, begin_delim=begin_delim,
end_delim=end_delim):
"""Split a line into alternating groups.
The first group cannot have a line feed inserted,
the next one can, etc.
"""
text = []
line = iter(line)
while True:
# First build and yield an unsplittable group
for item in line:
text.append(item)
if item in begin_delim:
break
if not text:
break
yield text
# Now build and yield a splittable group
level = 0
text = []
for item in line:
if item in begin_delim:
level += 1
elif item in end_delim:
level -= 1
if level < 0:
yield text
text = [item]
break
text.append(item)
else:
assert not text, text
break | 0.001034 |
def update_favorite_song(self, song_id, op):
"""
:param str op: `add` or `del`
"""
op = 'un' if op == 'del' else ''
action = 'mtop.alimusic.fav.songfavoriteservice.{}favoritesong'.format(op)
payload = {
'songId': song_id
}
code, msg, rv = self.request(action, payload)
return rv['data']['data']['status'] == 'true' | 0.007538 |
def dump_dict_to_file(dictionary, filepath):
"""Dump @dictionary as a line to @filepath."""
create_dirs(
os.path.dirname(filepath)
)
with open(filepath, 'a') as outfile:
json.dump(dictionary, outfile)
outfile.write('\n') | 0.003831 |
def __set_transaction_detail(self, *args, **kwargs):
"""
Checks kwargs for 'customer_transaction_id' and sets it if present.
"""
customer_transaction_id = kwargs.get('customer_transaction_id', None)
if customer_transaction_id:
transaction_detail = self.client.factory.create('TransactionDetail')
transaction_detail.CustomerTransactionId = customer_transaction_id
self.logger.debug(transaction_detail)
self.TransactionDetail = transaction_detail | 0.005629 |
def parse_bind(bind):
"""Parses a connection string and creates SQL trace metadata"""
if isinstance(bind, Connection):
engine = bind.engine
else:
engine = bind
m = re.match(r"Engine\((.*?)\)", str(engine))
if m is not None:
u = urlparse(m.group(1))
# Add Scheme to uses_netloc or // will be missing from url.
uses_netloc.append(u.scheme)
safe_url = ""
if u.password is None:
safe_url = u.geturl()
else:
# Strip password from URL
host_info = u.netloc.rpartition('@')[-1]
parts = u._replace(netloc='{}@{}'.format(u.username, host_info))
safe_url = parts.geturl()
sql = {}
sql['database_type'] = u.scheme
sql['url'] = safe_url
if u.username is not None:
sql['user'] = "{}".format(u.username)
return sql | 0.001122 |
def get(self, aspect):
"""Get a network, system or configure or contextualize with the same id as aspect passed."""
classification = [(network, self.networks), (system, self.systems),
(configure, self.configures)]
aspect_list = [l for t, l in classification if isinstance(aspect, t)]
assert len(aspect_list) == 1, "Unexpected aspect for RADL."
aspect_list = aspect_list[0]
old_aspect = [a for a in aspect_list if a.getId() == aspect.getId()]
return old_aspect[0] if old_aspect else None | 0.005263 |
def apply(query, replacements=None, vars=None, allow_io=False,
libs=("stdcore", "stdmath")):
"""Run 'query' on 'vars' and return the result(s).
Arguments:
query: A query object or string with the query.
replacements: Built-time parameters to the query, either as dict or
as an array (for positional interpolation).
vars: The variables to be supplied to the query solver.
allow_io: (Default: False) Include 'stdio' and allow IO functions.
libs: Iterable of library modules to include, given as strings.
Default: ('stdcore', 'stdmath')
For full list of bundled libraries, see efilter.stdlib.
Note: 'stdcore' must always be included.
WARNING: Including 'stdio' must be done in conjunction with
'allow_io'. This is to make enabling IO explicit. 'allow_io'
implies that 'stdio' should be included and so adding it to
libs is actually not required.
Notes on IO: If allow_io is set to True then 'stdio' will be included and
the EFILTER query will be allowed to read files from disk. Use this with
caution.
If the query returns a lazily-evaluated result that depends on reading
from a file (for example, filtering a CSV file) then the file
descriptor will remain open until the returned result is deallocated.
The caller is responsible for releasing the result when it's no longer
needed.
Returns:
The result of evaluating the query. The type of the output will depend
on the query, and can be predicted using 'infer' (provided reflection
callbacks are implemented). In the common case of a SELECT query the
return value will be an iterable of filtered data (actually an object
implementing IRepeated, as well as __iter__.)
A word on cardinality of the return value:
Types in EFILTER always refer to a scalar. If apply returns more than
one value, the type returned by 'infer' will refer to the type of
the value inside the returned container.
If you're unsure whether your query returns one or more values (rows),
use the 'getvalues' function.
Raises:
efilter.errors.EfilterError if there are issues with the query.
Examples:
apply("5 + 5") # -> 10
apply("SELECT * FROM people WHERE age > 10",
vars={"people":({"age": 10, "name": "Bob"},
{"age": 20, "name": "Alice"},
{"age": 30, "name": "Eve"}))
# This will replace the question mark (?) with the string "Bob" in a
# safe manner, preventing SQL injection.
apply("SELECT * FROM people WHERE name = ?", replacements=["Bob"], ...)
"""
if vars is None:
vars = {}
if allow_io:
libs = list(libs)
libs.append("stdio")
query = q.Query(query, params=replacements)
stdcore_included = False
for lib in libs:
if lib == "stdcore":
stdcore_included = True
# 'solve' always includes this automatically - we don't have a say
# in the matter.
continue
if lib == "stdio" and not allow_io:
raise ValueError("Attempting to include 'stdio' but IO not "
"enabled. Pass allow_io=True.")
module = std_core.LibraryModule.ALL_MODULES.get(lib)
if not lib:
raise ValueError("There is no standard library module %r." % lib)
vars = scope.ScopeStack(module, vars)
if not stdcore_included:
raise ValueError("EFILTER cannot work without standard lib 'stdcore'.")
results = solve.solve(query, vars).value
return results | 0.000264 |
def wrap_results_for_axis(self):
""" return the results for the rows """
results = self.results
result = self.obj._constructor(data=results)
if not isinstance(results[0], ABCSeries):
try:
result.index = self.res_columns
except ValueError:
pass
try:
result.columns = self.res_index
except ValueError:
pass
return result | 0.004367 |
def LSL(value, amount, width):
"""
The ARM LSL (logical left shift) operation.
:param value: Value to shift
:type value: int or long or BitVec
:param int amount: How many bits to shift it.
:param int width: Width of the value
:return: Resultant value
:rtype int or BitVec
"""
if amount == 0:
return value
result, _ = LSL_C(value, amount, width)
return result | 0.002404 |
def handle_abort(self, obj):
"""Handle an incoming ``Data`` abort processing request.
.. IMPORTANT::
This only makes manager's state consistent and doesn't
affect Data object in any way. Any changes to the Data
must be applied over ``handle_update`` method.
:param obj: The Channels message object. Command object format:
.. code-block:: none
{
'command': 'abort',
'data_id': [id of the :class:`~resolwe.flow.models.Data` object
this command was triggered by],
}
"""
async_to_sync(consumer.send_event)({
WorkerProtocol.COMMAND: WorkerProtocol.ABORT,
WorkerProtocol.DATA_ID: obj[ExecutorProtocol.DATA_ID],
WorkerProtocol.FINISH_COMMUNICATE_EXTRA: {
'executor': getattr(settings, 'FLOW_EXECUTOR', {}).get('NAME', 'resolwe.flow.executors.local'),
},
}) | 0.003933 |
def list(self, order=values.unset, from_=values.unset, bounds=values.unset,
limit=None, page_size=None):
"""
Lists SyncListItemInstance records from the API as a list.
Unlike stream(), this operation is eager and will load `limit` records into
memory before returning.
:param SyncListItemInstance.QueryResultOrder order: The order
:param unicode from_: The from
:param SyncListItemInstance.QueryFromBoundType bounds: The bounds
:param int limit: Upper limit for the number of records to return. list() guarantees
never to return more than limit. Default is no limit
:param int page_size: Number of records to fetch per request, when not set will use
the default value of 50 records. If no page_size is defined
but a limit is defined, list() will attempt to read the limit
with the most efficient page size, i.e. min(limit, 1000)
:returns: Generator that will yield up to limit results
:rtype: list[twilio.rest.preview.sync.service.sync_list.sync_list_item.SyncListItemInstance]
"""
return list(self.stream(order=order, from_=from_, bounds=bounds, limit=limit, page_size=page_size, )) | 0.008333 |
def update_from_pypi(self):
"""Call get_latest_version and then save the object."""
package = pypi.Package(self.package_name)
self.licence = package.licence()
if self.is_parseable:
self.latest_version = package.latest_version()
self.next_version = package.next_version(self.current_version)
self.diff_status = pypi.version_diff(self.current_version, self.latest_version)
self.python_support = package.python_support()
self.django_support = package.django_support()
self.supports_py3 = package.supports_py3()
self.checked_pypi_at = tz_now()
self.save()
return self | 0.004342 |
def add_output_path(path: str = None) -> str:
"""
Adds the specified path to the output logging paths if it is not
already in the listed paths.
:param path:
The path to add to the logging output paths. If the path is empty
or no path is given, the current working directory will be used
instead.
"""
cleaned = paths.clean(path or os.getcwd())
if cleaned not in _logging_paths:
_logging_paths.append(cleaned)
return cleaned | 0.002053 |
def no_auth(self):
"""Unset authentication temporarily as a context manager."""
old_basic_auth, self.auth = self.auth, None
old_token_auth = self.headers.pop('Authorization', None)
yield
self.auth = old_basic_auth
if old_token_auth:
self.headers['Authorization'] = old_token_auth | 0.005865 |
def returnAllChips(self,extname=None,exclude=None):
""" Returns a list containing all the chips which match the
extname given minus those specified for exclusion (if any).
"""
extensions = self._findExtnames(extname=extname,exclude=exclude)
chiplist = []
for i in range(1,self._nextend+1,1):
if 'extver' in self._image[i].header:
extver = self._image[i].header['extver']
else:
extver = 1
if hasattr(self._image[i],'_extension') and \
"IMAGE" in self._image[i]._extension:
if (self._image[i].extname in extensions) and self._image[self.scienceExt,extver].group_member:
chiplist.append(self._image[i])
return chiplist | 0.013836 |
def _add_iam_policy_binding(service_account, roles):
"""Add new IAM roles for the service account."""
project_id = service_account["projectId"]
email = service_account["email"]
member_id = "serviceAccount:" + email
policy = crm.projects().getIamPolicy(resource=project_id).execute()
already_configured = True
for role in roles:
role_exists = False
for binding in policy["bindings"]:
if binding["role"] == role:
if member_id not in binding["members"]:
binding["members"].append(member_id)
already_configured = False
role_exists = True
if not role_exists:
already_configured = False
policy["bindings"].append({
"members": [member_id],
"role": role,
})
if already_configured:
# In some managed environments, an admin needs to grant the
# roles, so only call setIamPolicy if needed.
return
result = crm.projects().setIamPolicy(
resource=project_id, body={
"policy": policy,
}).execute()
return result | 0.000854 |
def unpack_from(self, buff, offset=0):
"""Unpack the next bytes from a file object."""
return self._create(super(DictStruct, self).unpack_from(buff, offset)) | 0.011561 |
def welch_anova(dv=None, between=None, data=None, export_filename=None):
"""One-way Welch ANOVA.
Parameters
----------
dv : string
Name of column containing the dependant variable.
between : string
Name of column containing the between factor.
data : pandas DataFrame
DataFrame. Note that this function can also directly be used as a
Pandas method, in which case this argument is no longer needed.
export_filename : string
Filename (without extension) for the output file.
If None, do not export the table.
By default, the file will be created in the current python console
directory. To change that, specify the filename with full path.
Returns
-------
aov : DataFrame
ANOVA summary ::
'Source' : Factor names
'SS' : Sums of squares
'DF' : Degrees of freedom
'MS' : Mean squares
'F' : F-values
'p-unc' : uncorrected p-values
'np2' : Partial eta-square effect sizes
See Also
--------
anova : One-way ANOVA
rm_anova : One-way and two-way repeated measures ANOVA
mixed_anova : Two way mixed ANOVA
kruskal : Non-parametric one-way ANOVA
Notes
-----
The classic ANOVA is very powerful when the groups are normally distributed
and have equal variances. However, when the groups have unequal variances,
it is best to use the Welch ANOVA that better controls for
type I error (Liu 2015). The homogeneity of variances can be measured with
the `homoscedasticity` function. The two other assumptions of
normality and independance remain.
The main idea of Welch ANOVA is to use a weight :math:`w_i` to reduce
the effect of unequal variances. This weight is calculated using the sample
size :math:`n_i` and variance :math:`s_i^2` of each group
:math:`i=1,...,r`:
.. math:: w_i = \\frac{n_i}{s_i^2}
Using these weights, the adjusted grand mean of the data is:
.. math::
\\overline{Y}_{welch} = \\frac{\\sum_{i=1}^r w_i\\overline{Y}_i}
{\\sum w}
where :math:`\\overline{Y}_i` is the mean of the :math:`i` group.
The treatment sums of squares is defined as:
.. math::
SS_{treatment} = \\sum_{i=1}^r w_i
(\\overline{Y}_i - \\overline{Y}_{welch})^2
We then need to calculate a term lambda:
.. math::
\\Lambda = \\frac{3\\sum_{i=1}^r(\\frac{1}{n_i-1})
(1 - \\frac{w_i}{\\sum w})^2}{r^2 - 1}
from which the F-value can be calculated:
.. math::
F_{welch} = \\frac{SS_{treatment} / (r-1)}
{1 + \\frac{2\\Lambda(r-2)}{3}}
and the p-value approximated using a F-distribution with
:math:`(r-1, 1 / \\Lambda)` degrees of freedom.
When the groups are balanced and have equal variances, the optimal post-hoc
test is the Tukey-HSD test (`pairwise_tukey`). If the groups have unequal
variances, the Games-Howell test is more adequate.
Results have been tested against R.
References
----------
.. [1] Liu, Hangcheng. "Comparing Welch's ANOVA, a Kruskal-Wallis test and
traditional ANOVA in case of Heterogeneity of Variance." (2015).
.. [2] Welch, Bernard Lewis. "On the comparison of several mean values:
an alternative approach." Biometrika 38.3/4 (1951): 330-336.
Examples
--------
1. One-way Welch ANOVA on the pain threshold dataset.
>>> from pingouin import welch_anova, read_dataset
>>> df = read_dataset('anova')
>>> aov = welch_anova(dv='Pain threshold', between='Hair color',
... data=df, export_filename='pain_anova.csv')
>>> aov
Source ddof1 ddof2 F p-unc
0 Hair color 3 8.33 5.89 0.018813
"""
# Check data
_check_dataframe(dv=dv, between=between, data=data, effects='between')
# Reset index (avoid duplicate axis error)
data = data.reset_index(drop=True)
# Number of groups
r = data[between].nunique()
ddof1 = r - 1
# Compute weights and ajusted means
grp = data.groupby(between)[dv]
weights = grp.count() / grp.var()
adj_grandmean = (weights * grp.mean()).sum() / weights.sum()
# Treatment sum of squares
ss_tr = np.sum(weights * np.square(grp.mean() - adj_grandmean))
ms_tr = ss_tr / ddof1
# Calculate lambda, F-value and p-value
lamb = (3 * np.sum((1 / (grp.count() - 1)) *
(1 - (weights / weights.sum()))**2)) / (r**2 - 1)
fval = ms_tr / (1 + (2 * lamb * (r - 2)) / 3)
pval = f.sf(fval, ddof1, 1 / lamb)
# Create output dataframe
aov = pd.DataFrame({'Source': between,
'ddof1': ddof1,
'ddof2': 1 / lamb,
'F': fval,
'p-unc': pval,
}, index=[0])
col_order = ['Source', 'ddof1', 'ddof2', 'F', 'p-unc']
aov = aov.reindex(columns=col_order)
aov[['F', 'ddof2']] = aov[['F', 'ddof2']].round(3)
# Export to .csv
if export_filename is not None:
_export_table(aov, export_filename)
return aov | 0.000194 |
def grid(self, z_x_y):
""" Return the UTFGrid content """
# sources.py -> MapnikRenderer -> grid
(z, x, y) = z_x_y
content = self.reader.grid(z, x, y, self.grid_fields, self.grid_layer)
return content | 0.008333 |
def set_theme(theme=None, for_code=None):
""" set md and code theme """
try:
if theme == 'default':
return
theme = theme or os.environ.get('AXC_THEME', 'random')
# all the themes from here:
themes = read_themes()
if theme == 'random':
rand = randint(0, len(themes)-1)
theme = themes.keys()[rand]
t = themes.get(theme)
if not t or len(t.get('ct')) != 5:
# leave defaults:
return
_for = ''
if for_code:
_for = ' (code)'
if is_app:
print >> sys.stderr, low('theme%s: %s (%s)' % (_for, theme,
t.get('name')))
t = t['ct']
cols = (t[0], t[1], t[2], t[3], t[4])
if for_code:
global CH1, CH2, CH3, CH4, CH5
CH1, CH2, CH3, CH4, CH5 = cols
else:
global H1, H2, H3, H4, H5
# set the colors now from the ansi codes in the theme:
H1, H2, H3, H4, H5 = cols
finally:
if for_code:
build_hl_by_token() | 0.001775 |
def replace_uri(self, src, dest):
"""Replace a uri reference everywhere it appears in the graph with
another one. It could appear as the subject, predicate, or object of
a statement, so for each position loop through each statement that
uses the reference in that position, remove the old statement, and
add the replacement. """
# NB: The hypothetical statement <src> <src> <src> will be removed
# and re-added several times. The subject block will remove it and
# add <dest> <src> <src>. The predicate block will remove that and
# add <dest> <dest> <src>. The object block will then remove that
# and add <dest> <dest> <dest>.
# NB2: The list() call here is necessary. .triples() is a generator:
# It calculates its matches as it progressively iterates through the
# graph. Actively changing the graph inside the for loop while the
# generator is in the middle of examining it risks invalidating the
# generator and could conceivably make it Just Break, depending on
# the implementation of .triples(). Wrapping .triples() in a list()
# forces it to exhaust the generator, running through the entire
# graph to calculate the list of matches before continuing to the
# for loop.
subject_triples = list(self.content.triples((src, None, None)))
for s, p, o in subject_triples:
self.content.remove((src, p, o))
self.content.add((dest, p, o))
predicate_triples = list(self.content.triples((None, src, None)))
for s, p, o in predicate_triples:
self.content.remove((s, src, o))
self.content.add((s, dest, o))
object_triples = list(self.content.triples((None, None, src)))
for s, p, o in object_triples:
self.content.remove((s, p, src))
self.content.add((s, p, dest)) | 0.001033 |
def get_elastix_exes():
""" Get the executables for elastix and transformix. Raises an error
if they cannot be found.
"""
if EXES:
if EXES[0]:
return EXES
else:
raise RuntimeError('No Elastix executable.')
# Find exe
elastix, ver = _find_executables('elastix')
if elastix:
base, ext = os.path.splitext(elastix)
base = os.path.dirname(base)
transformix = os.path.join(base, 'transformix' + ext)
EXES.extend([elastix, transformix])
print('Found %s in %r' % (ver, elastix))
return EXES
else:
raise RuntimeError('Could not find Elastix executable. Download '
'Elastix from http://elastix.isi.uu.nl/. Pyelastix '
'looks for the exe in a series of common locations. '
'Set ELASTIX_PATH if necessary.') | 0.0033 |
def histogram_equalization(
data,
mask_to_equalize,
number_of_bins=1000,
std_mult_cutoff=4.0,
do_zerotoone_normalization=True,
valid_data_mask=None,
# these are theoretically hooked up, but not useful with only one
# equalization
clip_limit=None,
slope_limit=None,
# these parameters don't do anything, they're just here to mirror those
# in the other call
do_log_scale=False,
log_offset=None,
local_radius_px=None,
out=None):
"""
Perform a histogram equalization on the data selected by mask_to_equalize.
The data will be separated into number_of_bins levels for equalization and
outliers beyond +/- std_mult_cutoff*std will be ignored.
If do_zerotoone_normalization is True the data selected by mask_to_equalize
will be returned in the 0 to 1 range. Otherwise the data selected by
mask_to_equalize will be returned in the 0 to number_of_bins range.
Note: the data will be changed in place.
"""
out = out if out is not None else data.copy()
mask_to_use = mask_to_equalize if valid_data_mask is None else valid_data_mask
LOG.debug("determining DNB data range for histogram equalization")
avg = np.mean(data[mask_to_use])
std = np.std(data[mask_to_use])
# limit our range to +/- std_mult_cutoff*std; e.g. the default
# std_mult_cutoff is 4.0 so about 99.8% of the data
concervative_mask = (data < (avg + std * std_mult_cutoff)) & (
data > (avg - std * std_mult_cutoff)) & mask_to_use
LOG.debug("running histogram equalization")
cumulative_dist_function, temp_bins = _histogram_equalization_helper(
data[concervative_mask],
number_of_bins,
clip_limit=clip_limit,
slope_limit=slope_limit)
# linearly interpolate using the distribution function to get the new
# values
out[mask_to_equalize] = np.interp(data[mask_to_equalize], temp_bins[:-1],
cumulative_dist_function)
# if we were asked to, normalize our data to be between zero and one,
# rather than zero and number_of_bins
if do_zerotoone_normalization:
_linear_normalization_from_0to1(out, mask_to_equalize, number_of_bins)
return out | 0.000865 |
def apply_to(self, x, columns=False):
"""Apply this rotation to the given object
The argument can be several sorts of objects:
* ``np.array`` with shape (3, )
* ``np.array`` with shape (N, 3)
* ``np.array`` with shape (3, N), use ``columns=True``
* ``Translation``
* ``Rotation``
* ``Complete``
* ``UnitCell``
In case of arrays, the 3D vectors are rotated. In case of trans-
formations, a transformation is returned that consists of this
rotation applied AFTER the given translation. In case of a unit cell,
a unit cell with rotated cell vectors is returned.
This method is equivalent to ``self*x``.
"""
if isinstance(x, np.ndarray) and len(x.shape) == 2 and x.shape[0] == 3 and columns:
return np.dot(self.r, x)
if isinstance(x, np.ndarray) and (x.shape == (3, ) or (len(x.shape) == 2 and x.shape[1] == 3)) and not columns:
return np.dot(x, self.r.transpose())
elif isinstance(x, Complete):
return Complete(np.dot(self.r, x.r), np.dot(self.r, x.t))
elif isinstance(x, Translation):
return Complete(self.r, np.dot(self.r, x.t))
elif isinstance(x, Rotation):
return Rotation(np.dot(self.r, x.r))
elif isinstance(x, UnitCell):
return UnitCell(np.dot(self.r, x.matrix), x.active)
else:
raise ValueError("Can not apply this rotation to %s" % x) | 0.00324 |
def from_csv(cls, name=None, col_names=None, sep=None, **kwargs):
"""Create instrument metadata object from csv.
Parameters
----------
name : string
absolute filename for csv file or name of file
stored in pandas instruments location
col_names : list-like collection of strings
column names in csv and resultant meta object
sep : string
column seperator for supplied csv filename
Note
----
column names must include at least ['name', 'long_name', 'units'],
assumed if col_names is None.
"""
import pysat
req_names = ['name','long_name','units']
if col_names is None:
col_names = req_names
elif not all([i in col_names for i in req_names]):
raise ValueError('col_names must include name, long_name, units.')
if sep is None:
sep = ','
if name is None:
raise ValueError('Must supply an instrument name or file path.')
elif not isinstance(name, str):
raise ValueError('keyword name must be related to a string')
elif not os.path.isfile(name):
# Not a real file, assume input is a pysat instrument name
# and look in the standard pysat location.
test = os.path.join(pysat.__path__[0],'instruments',name)
if os.path.isfile(test):
name = test
else:
#trying to form an absolute path for success
test = os.path.abspath(name)
if not os.path.isfile(test):
raise ValueError("Unable to create valid file path.")
else:
#success
name = test
mdata = pds.read_csv(name, names=col_names, sep=sep, **kwargs)
if not mdata.empty:
# make sure the data name is the index
mdata.index = mdata['name']
del mdata['name']
return cls(metadata=mdata)
else:
raise ValueError('Unable to retrieve information from ' + name) | 0.006378 |
def deserialize(self, obj=None, ignore_non_existing=False):
"""
:type obj dict|None
:type ignore_non_existing bool
"""
if not isinstance(obj, dict):
if ignore_non_existing:
return
raise TypeError("Wrong data '{}' passed for '{}' deserialization".format(obj, self.__class__.__name__))
definitions = {k: v for k, v in self.__class__.__dict__.items() if k[:1] != "_"}
def_property_keys = set(definitions.keys())
property_keys = set(obj.keys())
existing_keys = def_property_keys & property_keys
non_defined_keys = property_keys - def_property_keys
non_existing_keys = def_property_keys - property_keys
if not ignore_non_existing and non_defined_keys:
raise TypeError(self.__class__.__name__ + " doesn't contain properties: {}".format(", ".join(non_defined_keys)))
for k in existing_keys:
v = obj[k]
attr_type = definitions[k]
try:
if isinstance(attr_type, list) and self._isclass(attr_type[0], BaseView):
if isinstance(v, list):
obj_list = [attr_type[0](serialized_obj=v_item, ignore_non_existing=ignore_non_existing) for v_item in v]
else:
obj_list = [attr_type[0](serialized_obj=v, ignore_non_existing=ignore_non_existing)]
self.__setattr__(k, obj_list)
elif self._isclass(attr_type, BaseView):
self.__setattr__(k, attr_type(v))
else:
self.__setattr__(k, v)
except IndexError:
self.__setattr__(k, v) # check test_empty_view_deserialization test suite for test-case
for k in non_existing_keys:
attr_type = definitions[k]
if attr_type is None:
self.__setattr__(k, None)
elif isinstance(attr_type, (list, set, tuple, dict)) and len(attr_type) == 0:
self.__setattr__(k, attr_type.__class__())
elif isinstance(attr_type, (list, set, tuple)) and self._isclass(attr_type[0], BaseView):
self.__setattr__(k, attr_type.__class__())
else:
self.__setattr__(k, attr_type.__class__(attr_type))
return self | 0.012993 |
def params(self, hidden=True):
"""
Gets all instance parameters, and their *cast* values.
:return: dict of the form: ``{<name>: <value>, ... }``
:rtype: :class:`dict`
"""
param_names = self.class_param_names(hidden=hidden)
return dict(
(name, getattr(self, name))
for name in param_names
) | 0.005291 |
def coupl_model5(self):
""" Toggle switch.
"""
self.Coupl = -0.2*self.Adj
self.Coupl[2,0] *= -1
self.Coupl[3,0] *= -1
self.Coupl[4,1] *= -1
self.Coupl[5,1] *= -1 | 0.02765 |
def force_seek(fd, offset, chunk=CHUNK):
""" Force adjustment of read cursort to specified offset
This function takes a file descriptor ``fd`` and tries to seek to position
specified by ``offset`` argument. If the descriptor does not support the
``seek()`` method, it will fall back to ``emulate_seek()``.
The optional ``chunk`` argument can be used to adjust the chunk size for
``emulate_seek()``.
"""
try:
fd.seek(offset)
except (AttributeError, io.UnsupportedOperation):
# This file handle probably has no seek()
emulate_seek(fd, offset, chunk) | 0.001642 |
def get_quoted_columns(self, platform):
"""
Returns the quoted representation of the column names
the constraint is associated with.
But only if they were defined with one or a column name
is a keyword reserved by the platform.
Otherwise the plain unquoted value as inserted is returned.
:param platform: The platform to use for quotation.
:type platform: Platform
:rtype: list
"""
columns = []
for column in self._columns.values():
columns.append(column.get_quoted_name(platform))
return columns | 0.003241 |
def _item_check(self, dim_vals, data):
"""
Applies optional checks to individual data elements before
they are inserted ensuring that they are of a certain
type. Subclassed may implement further element restrictions.
"""
if not self._check_items:
return
elif self.data_type is not None and not isinstance(data, self.data_type):
if isinstance(self.data_type, tuple):
data_type = tuple(dt.__name__ for dt in self.data_type)
else:
data_type = self.data_type.__name__
raise TypeError('{slf} does not accept {data} type, data elements have '
'to be a {restr}.'.format(slf=type(self).__name__,
data=type(data).__name__,
restr=data_type))
elif not len(dim_vals) == self.ndims:
raise KeyError('The data contains keys of length %d, but the kdims '
'only declare %d dimensions. Ensure that the number '
'of kdims match the length of the keys in your data.'
% (len(dim_vals), self.ndims)) | 0.0056 |
def get(self, key, no_cache=False):
"""Return the value of a single preference using a dotted path key
:arg no_cache: if true, the cache is bypassed
"""
section, name = self.parse_lookup(key)
preference = self.registry.get(
section=section, name=name, fallback=False)
if no_cache or not preferences_settings.ENABLE_CACHE:
return self.get_db_pref(section=section, name=name).value
try:
return self.from_cache(section, name)
except CachedValueNotFound:
pass
db_pref = self.get_db_pref(section=section, name=name)
self.to_cache(db_pref)
return db_pref.value | 0.00289 |
def notify(self, data):
"""Notify subscribers that data was received"""
triggered_channels = []
for channel_name, items in data.items():
for item in items or []:
LOG.debug('notify received: %s', item)
try:
# some channels return strings rather than objects (e.g. de-registrations),
# normalize them here
item = {'value': item} if isinstance(item, six.string_types) else dict(item)
# inject the channel name to the data (so channels can filter on it)
item['channel'] = channel_name
triggered_channels.extend(list(self._notify_single_item(item)))
except Exception: # noqa
LOG.exception('Subscription notification failed')
return triggered_channels | 0.006834 |
def get_slope(self):
"""Return the slope m of this line segment."""
# y1 = m*x1 + t
# y2 = m*x2 + t => y1-y2 = m*(x1-x2) <=> m = (y1-y2)/(x1-x2)
return ((self.p1.y-self.p2.y) / (self.p1.x-self.p2.x)) | 0.008658 |
def sigmoid_cross_entropy_one_hot(logits, labels, weights_fn=None):
"""Calculate sigmoid cross entropy for one-hot lanels and logits.
Args:
logits: Tensor of size [batch-size, o=1, p=1, num-classes]
labels: Tensor of size [batch-size, o=1, p=1, num-classes]
weights_fn: Function that takes in labels and weighs examples (unused)
Returns:
cross_entropy (scalar), weights
"""
with tf.variable_scope("sigmoid_cross_entropy_one_hot",
values=[logits, labels]):
del weights_fn
cross_entropy = tf.losses.sigmoid_cross_entropy(
multi_class_labels=labels, logits=logits)
return cross_entropy, tf.constant(1.0) | 0.004464 |
def doc(model):
""" Get documentation object for an SqlAlchemy model
:param model: Model
:type model: sqlalchemy.ext.declarative.DeclarativeBase
:rtype: SaModelDoc
"""
ins = inspect(model)
return SaModelDoc(
name=model.__name__,
table=[t.name for t in ins.tables],
doc=getdoc(ins.class_),
columns=_model_columns(ins),
primary=_model_primary(ins),
foreign=_model_foreign(ins),
unique=_model_unique(ins),
relations=_model_relations(ins)
) | 0.001873 |
def from_buffer(string, serverEndpoint=ServerEndpoint):
'''
Parse from buffered content
:param string: buffered content
:param serverEndpoint: Tika server URL (Optional)
:return: parsed content
'''
status, response = callServer('put', serverEndpoint, '/unpack/all', string,
{'Accept': 'application/x-tar'}, False,
rawResponse=True)
return _parse((status, response)) | 0.002141 |
def clear(cls, persistent=False):
""" If persistent is True, delete the temporary file
Parameters:
----------------------------------------------------------------
persistent: if True, custom configuration file is deleted
"""
if persistent:
try:
os.unlink(cls.getPath())
except OSError, e:
if e.errno != errno.ENOENT:
_getLogger().exception("Error %s while trying to remove dynamic " \
"configuration file: %s", e.errno,
cls.getPath())
raise
cls._path = None | 0.009983 |
def bloch_vector_of(self, qubit: ops.Qid) -> np.ndarray:
"""Returns the bloch vector of a qubit in the state.
Calculates the bloch vector of the given qubit
in the state given by self.state_vector(), given that
self.state_vector() follows the standard Kronecker convention of
numpy.kron.
Args:
qubit: qubit who's bloch vector we want to find.
Returns:
A length 3 numpy array representing the qubit's bloch vector.
Raises:
ValueError: if the size of the state represents more than 25 qubits.
IndexError: if index is out of range for the number of qubits
corresponding to the state.
"""
return bloch_vector_from_state_vector(self.state_vector(),
self.qubit_map[qubit]) | 0.003488 |
def _case_insensitive_rpartition(input_string: str, separator: str) -> typing.Tuple[str, str, str]:
"""Same as str.rpartition(), except that the partitioning is done case insensitive."""
lowered_input_string = input_string.lower()
lowered_separator = separator.lower()
try:
split_index = lowered_input_string.rindex(lowered_separator)
except ValueError:
# Did not find the separator in the input_string.
# Follow https://docs.python.org/3/library/stdtypes.html#text-sequence-type-str
# str.rpartition documentation and return the tuple ("", "", unmodified_input) in this case
return "", "", input_string
else:
split_index_2 = split_index+len(separator)
return input_string[:split_index], input_string[split_index: split_index_2], input_string[split_index_2:] | 0.007865 |
def launch_new_checks(self):
"""Launch checks that are in status
REF: doc/alignak-action-queues.png (4)
:return: None
"""
# queue
for chk in self.checks:
if chk.status not in [ACT_STATUS_QUEUED]:
continue
logger.debug("Launch check: %s", chk.uuid)
self._idletime = 0
self.actions_launched += 1
process = chk.execute()
# Maybe we got a true big problem in the action launching
if process == 'toomanyopenfiles':
# We should die as soon as we return all checks
logger.error("I am dying because of too many open files: %s", chk)
self.i_am_dying = True
else:
if not isinstance(process, string_types):
logger.debug("Launched check: %s, pid=%d", chk.uuid, process.pid) | 0.004415 |
def _expand_data(self, old_data, new_data, group):
""" data expansion - uvision needs filename and path separately. """
for file in old_data:
if file:
extension = file.split(".")[-1].lower()
if extension in self.file_types.keys():
new_data['groups'][group].append(self._expand_one_file(normpath(file),
new_data, extension))
else:
logger.debug("Filetype for file %s not recognized" % file)
if hasattr(self, '_expand_sort_key'):
new_data['groups'][group] = sorted(new_data['groups'][group],
key=self._expand_sort_key) | 0.005175 |
def p_UnionMemberType_anyType(p):
"""UnionMemberType : any "[" "]" TypeSuffix"""
p[0] = helper.unwrapTypeSuffix(model.Array(t=model.SimpleType(
type=model.SimpleType.ANY)), p[4]) | 0.016129 |
def pauli_group(number_of_qubits, case='weight'):
"""Return the Pauli group with 4^n elements.
The phases have been removed.
case 'weight' is ordered by Pauli weights and
case 'tensor' is ordered by I,X,Y,Z counting lowest qubit fastest.
Args:
number_of_qubits (int): number of qubits
case (str): determines ordering of group elements ('weight' or 'tensor')
Returns:
list: list of Pauli objects
Raises:
QiskitError: case is not 'weight' or 'tensor'
QiskitError: number_of_qubits is larger than 4
"""
if number_of_qubits < 5:
temp_set = []
if case == 'weight':
tmp = pauli_group(number_of_qubits, case='tensor')
# sort on the weight of the Pauli operator
return sorted(tmp, key=lambda x: -np.count_nonzero(
np.array(x.to_label(), 'c') == b'I'))
elif case == 'tensor':
# the Pauli set is in tensor order II IX IY IZ XI ...
for k in range(4 ** number_of_qubits):
z = np.zeros(number_of_qubits, dtype=np.bool)
x = np.zeros(number_of_qubits, dtype=np.bool)
# looping over all the qubits
for j in range(number_of_qubits):
# making the Pauli for each j fill it in from the
# end first
element = (k // (4 ** j)) % 4
if element == 1:
x[j] = True
elif element == 2:
z[j] = True
x[j] = True
elif element == 3:
z[j] = True
temp_set.append(Pauli(z, x))
return temp_set
else:
raise QiskitError("Only support 'weight' or 'tensor' cases "
"but you have {}.".format(case))
raise QiskitError("Only support number of qubits is less than 5") | 0.001018 |
def get_global_sources(self):
"""
Gets streams that live outside of the plates
:return: Global streams
"""
sources = []
if self.sources:
for source in self.sources:
if None in source.streams:
sources.append(source.streams[None])
return sources | 0.005747 |
def __gen_hierarchy_file(self, layer):
"""
Hierarchical structures (<structList> elements) are used to create
hierarchically nested annotation graphs (e.g. to express consists-of
relationships or dominance-edges in syntax trees, RST).
A <struct> element will be created for each hierarchical node
(e.g. an NP) with edges (<rel> elements) to each dominated element
(e.g. tokens, other <struct> elements).
NOTE: The types/labels of these newly create hierarchical nodes and
edges aren't stored in this file, but in feat/multiFeat files
referencing this one! See: __gen_struct_anno_files() and
__gen_rel_anno_file()).
There will be one hierarchy file for each top level layer.
TODO: check, if we can omit hierarchy files for layers that don't
contain dominance edges
"""
paula_id = '{0}.{1}.{2}_{3}'.format(layer, self.corpus_name, self.name,
layer)
self.paulamap['hierarchy'][layer] = paula_id
E, tree = gen_paula_etree(paula_id)
dominance_edges = select_edges_by(
self.dg, layer=layer, edge_type=EdgeTypes.dominance_relation,
data=True)
span_edges = select_edges_by(
self.dg, layer=layer, edge_type=EdgeTypes.spanning_relation,
data=True)
dominance_dict = defaultdict(lambda: defaultdict(str))
for source_id, target_id, edge_attrs in dominance_edges:
if source_id != layer+':root_node':
dominance_dict[source_id][target_id] = edge_attrs
# in PAULA XML, token spans are also part of the hierarchy
for source_id, target_id, edge_attrs in span_edges:
if istoken(self.dg, target_id):
dominance_dict[source_id][target_id] = edge_attrs
# NOTE: we don't add a base file here, because the nodes could be
# tokens or structural nodes
slist = E('structList', {'type': layer})
for source_id in dominance_dict:
struct = E('struct',
{'id': str(source_id)})
if self.human_readable:
struct.append(Comment(self.dg.node[source_id].get('label')))
for target_id in dominance_dict[source_id]:
if istoken(self.dg, target_id):
href = '{0}.xml#{1}'.format(self.paulamap['tokenization'],
target_id)
else:
href = '#{0}'.format(target_id)
rel = E(
'rel',
{'id': 'rel_{0}_{1}'.format(source_id, target_id),
'type': dominance_dict[source_id][target_id]['edge_type'],
XLINKHREF: href})
struct.append(rel)
if self.human_readable:
struct.append(
Comment(self.dg.node[target_id].get('label')))
slist.append(struct)
tree.append(slist)
self.files[paula_id] = tree
self.file2dtd[paula_id] = PaulaDTDs.struct
return paula_id | 0.001258 |
def build(self, obj=None, queryset=None, push=True):
"""Trigger building of the indexes.
Support passing ``obj`` parameter to the indexes, so we can
trigger build only for one object.
"""
for index in self.indexes:
index.build(obj, queryset, push) | 0.006667 |
def calc_x_from_L(L, y):
""" Calculate the industry output x from L and a y vector
Parameters
----------
L : pandas.DataFrame or numpy.array
Symmetric input output Leontief table
y : pandas.DataFrame or numpy.array
a column vector of the total final demand
Returns
-------
pandas.DataFrame or numpy.array
Industry output x as column vector
The type is determined by the type of L. If DataFrame index as L
"""
x = L.dot(y)
if type(x) is pd.Series:
x = pd.DataFrame(x)
if type(x) is pd.DataFrame:
x.columns = ['indout']
return x | 0.00159 |
def create_table(
self, table_name, obj=None, schema=None, database=None, max_rows=None
):
"""
Create a new table in MapD using an Ibis table expression.
Parameters
----------
table_name : string
obj : TableExpr or pandas.DataFrame, optional
If passed, creates table from select statement results
schema : ibis.Schema, optional
Mutually exclusive with expr, creates an empty table with a
particular schema
database : string, default None (optional)
max_rows : int, Default None
Set the maximum number of rows allowed in a table to create a capped
collection. When this limit is reached, the oldest fragment is
removed. Default = 2^62.
Examples
--------
>>> con.create_table('new_table_name', table_expr) # doctest: +SKIP
"""
_database = self.db_name
self.set_database(database)
if obj is not None:
if isinstance(obj, pd.DataFrame):
raise NotImplementedError(
'Pandas Data Frame input not implemented.'
)
else:
to_insert = obj
ast = self._build_ast(to_insert, MapDDialect.make_context())
select = ast.queries[0]
statement = ddl.CTAS(table_name, select, database=database)
elif schema is not None:
statement = ddl.CreateTableWithSchema(
table_name, schema, database=database, max_rows=max_rows
)
else:
raise com.IbisError('Must pass expr or schema')
result = self._execute(statement, False)
self.set_database(_database)
return result | 0.001706 |
def Jpjmcoeff(ls, m, shift=False) -> sympy.Expr:
r'''Eigenvalue of the $\Op{J}_{+}$ (:class:`Jplus`) operator
.. math::
\Op{J}_{+} \ket{s, m} = \sqrt{s (s+1) - m (m+1)} \ket{s, m}
where the multiplicity $s$ is implied by the size of the Hilbert space
`ls`: there are $2s+1$ eigenstates with $m = -s, -s+1, \dots, s$.
Args:
ls (LocalSpace): The Hilbert space in which the $\Op{J}_{+}$ operator
acts.
m (str or int): If str, the label of the basis state of `hs` to which
the operator is applied. If integer together with ``shift=True``,
the zero-based index of the basis state. Otherwise, directly the
quantum number $m$.
shift (bool): If True for a integer value of `m`, treat `m` as the
zero-based index of the basis state (i.e., shift `m` down by $s$ to
obtain the quantum number $m$)
'''
assert isinstance(ls, SpinSpace)
n = ls.dimension
s = sympify(n - 1) / 2
assert n == int(2 * s + 1)
if isinstance(m, str):
m = ls.basis_labels.index(m) - s # m is now Sympy expression
elif isinstance(m, int):
if shift:
assert 0 <= m < n
m = m - s
return sqrt(s * (s + 1) - m * (m + 1)) | 0.000784 |
def find_harpoon_options(self, configuration, args_dict):
"""Return us all the harpoon options"""
d = lambda r: {} if r in (None, "", NotSpecified) else r
return MergedOptions.using(
dict(d(configuration.get('harpoon')).items())
, dict(d(args_dict.get("harpoon")).items())
).as_dict() | 0.014451 |
def _pruaf(self):
"""
Return percentage runoff urban adjustment factor.
Methodology source: eqn. 6, Kjeldsen 2010
"""
return 1 + 0.47 * self.catchment.descriptors.urbext(self.year) \
* self.catchment.descriptors.bfihost / (1 - self.catchment.descriptors.bfihost) | 0.012422 |
def _filter_schemas(schemas, schema_tables, exclude_table_columns):
"""Wrapper method for _filter_schema to filter multiple schemas."""
return [_filter_schema(s, schema_tables, exclude_table_columns)
for s in schemas] | 0.004219 |
def _get_data_attr(data, attr):
"""Get data object field."""
if isinstance(data, dict):
# `Data` object's id is hydrated as `__id` in expression engine
data = data['__id']
data_obj = Data.objects.get(id=data)
return getattr(data_obj, attr) | 0.003663 |
def get_nested_blocks_spec(self):
"""
Converts allowed_nested_blocks items to NestedXBlockSpec to provide common interface
"""
return [
block_spec if isinstance(block_spec, NestedXBlockSpec) else NestedXBlockSpec(block_spec)
for block_spec in self.allowed_nested_blocks
] | 0.01194 |
def CompressedHistograms(self, run, tag):
"""Retrieve the compressed histogram events associated with a run and tag.
Args:
run: A string name of the run for which values are retrieved.
tag: A string name of the tag for which values are retrieved.
Raises:
KeyError: If the run is not found, or the tag is not available for
the given run.
Returns:
An array of `event_accumulator.CompressedHistogramEvents`.
"""
accumulator = self.GetAccumulator(run)
return accumulator.CompressedHistograms(tag) | 0.001802 |
def from_string(cls, rawstr):
"""
Creates an ApcorData record from the raw string format.
Expected string format:
ap_in ap_out ap_cor apcor_err
"""
try:
args = map(float, rawstr.split())
except Exception as ex:
import sys
logger.error("Failed to convert aperture correction: {}".format(ex))
raise ex
return cls(*args) | 0.006928 |
def DeleteJob(self, job_id, token=None):
"""Deletes cron job with the given URN."""
job_urn = self.CRON_JOBS_PATH.Add(job_id)
aff4.FACTORY.Delete(job_urn, token=token) | 0.005587 |
def _append(self, sh):
'''
Internal. Chains a command after this.
:param sh: Next command.
'''
sh._input = self
self._output = sh
if self._env:
sh._env = dict(self._env)
if self._cwd:
sh._cwd = self._cwd | 0.006873 |
def threshold_monitor_hidden_threshold_monitor_sfp_apply(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
threshold_monitor_hidden = ET.SubElement(config, "threshold-monitor-hidden", xmlns="urn:brocade.com:mgmt:brocade-threshold-monitor")
threshold_monitor = ET.SubElement(threshold_monitor_hidden, "threshold-monitor")
sfp = ET.SubElement(threshold_monitor, "sfp")
apply = ET.SubElement(sfp, "apply")
apply.text = kwargs.pop('apply')
callback = kwargs.pop('callback', self._callback)
return callback(config) | 0.006515 |
def onesided_cl_to_dlnl(cl):
"""Compute the delta-loglikehood values that corresponds to an
upper limit of the given confidence level.
Parameters
----------
cl : float
Confidence level.
Returns
-------
dlnl : float
Delta-loglikelihood value with respect to the maximum of the
likelihood function.
"""
alpha = 1.0 - cl
return 0.5 * np.power(np.sqrt(2.) * special.erfinv(1 - 2 * alpha), 2.) | 0.002183 |
def get(self, now):
"""
Get a bucket key to compact. If none are available, returns
None. This uses a Lua script to ensure that the bucket key is
popped off the sorted set in an atomic fashion.
:param now: The current time, as a float. Used to ensure the
bucket key has been aged sufficiently to be
quiescent.
:returns: A bucket key ready for compaction, or None if no
bucket keys are available or none have aged
sufficiently.
"""
items = self.script(keys=[self.key], args=[now - self.min_age])
return items[0] if items else None | 0.002928 |
def _parse_memory_embedded_health(self, data):
"""Parse the get_host_health_data() for essential properties
:param data: the output returned by get_host_health_data()
:returns: memory size in MB.
:raises IloError, if unable to get the memory details.
"""
memory_mb = 0
memory = self._get_memory_details_value_based_on_model(data)
if memory is None:
msg = "Unable to get memory data. Error: Data missing"
raise exception.IloError(msg)
total_memory_size = 0
for memory_item in memory:
memsize = memory_item[self.MEMORY_SIZE_TAG]["VALUE"]
if memsize != self.MEMORY_SIZE_NOT_PRESENT_TAG:
memory_bytes = (
strutils.string_to_bytes(
memsize.replace(' ', ''), return_int=True))
memory_mb = int(memory_bytes / (1024 * 1024))
total_memory_size = total_memory_size + memory_mb
return total_memory_size | 0.001957 |
def setup_logging(
config='logging.yaml',
default_level=logging.INFO,
env_key='LOG_CFG'
):
"""Setup logging configuration
"""
path = config
value = os.getenv(env_key, None)
if value:
path = value
if path.exists():
with open(path, 'rt') as f:
config = yaml.safe_load(f.read())
logging.config.dictConfig(config)
else:
print('cannot read: ' + str(path))
logging.basicConfig(level=default_level) | 0.002066 |
def _set_lsp_frr_revertive(self, v, load=False):
"""
Setter method for lsp_frr_revertive, mapped from YANG variable /mpls_config/router/mpls/mpls_cmds_holder/lsp/lsp_frr/lsp_frr_revertive (container)
If this variable is read-only (config: false) in the
source YANG file, then _set_lsp_frr_revertive is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_lsp_frr_revertive() directly.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=lsp_frr_revertive.lsp_frr_revertive, is_container='container', presence=False, yang_name="lsp-frr-revertive", rest_name="revertive", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Configure FRR revertiveness for the LSP', u'alt-name': u'revertive', u'cli-incomplete-no': None, u'cli-incomplete-command': None}}, namespace='urn:brocade.com:mgmt:brocade-mpls', defining_module='brocade-mpls', yang_type='container', is_config=True)
except (TypeError, ValueError):
raise ValueError({
'error-string': """lsp_frr_revertive must be of a type compatible with container""",
'defined-type': "container",
'generated-type': """YANGDynClass(base=lsp_frr_revertive.lsp_frr_revertive, is_container='container', presence=False, yang_name="lsp-frr-revertive", rest_name="revertive", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Configure FRR revertiveness for the LSP', u'alt-name': u'revertive', u'cli-incomplete-no': None, u'cli-incomplete-command': None}}, namespace='urn:brocade.com:mgmt:brocade-mpls', defining_module='brocade-mpls', yang_type='container', is_config=True)""",
})
self.__lsp_frr_revertive = t
if hasattr(self, '_set'):
self._set() | 0.005147 |
def _canFormAraPhrase( araVerb, otherVerb ):
''' Teeb kindlaks, kas etteantud 'ära' verb (araVerb) yhildub teise verbiga;
Arvestab järgimisi ühilduvusi:
ains 2. pööre: ära_neg.o + V_o
ains 3. pööre: ära_neg.gu + V_gu
mitm 1. pööre: ära_neg.me + V_me
ära_neg.me + V_o
ära_neg.gem + V_gem
mitm 2. pööre: ära_neg.ge + V_ge
mitm 3. pööre: ära_neg.gu + V_gu
passiiv: ära_neg.gu + V_tagu
Kui yhildub, tagastab listide listi, vastasel juhul tagastab tyhja listi.
Tagastatava listi esimene liige on 'ära' verbi analüüside indeksite list
(millised analüüsid vastavad 'ära' verbile) ning listi teine liige on yhilduva
verbi analüüside indeksite list (millised analüüsid vastavad ühilduvale verbile);
Indeksite listid on sellised, nagu neid leitakse meetodi
wordtemplate.matchingAnalyseIndexes(token) abil;
'''
global _verbAraAgreements
for i in range(0, len(_verbAraAgreements), 2):
araVerbTemplate = _verbAraAgreements[i]
otherVerbTemplate = _verbAraAgreements[i+1]
matchingAraAnalyses = araVerbTemplate.matchingAnalyseIndexes(araVerb)
if matchingAraAnalyses:
matchingVerbAnalyses = otherVerbTemplate.matchingAnalyseIndexes(otherVerb)
if matchingVerbAnalyses:
return [matchingAraAnalyses, matchingVerbAnalyses]
return [] | 0.010133 |
def validate_slashes(param, value, minimum=2, maximum=None, form=None):
"""Ensure that parameter has slashes and minimum parts."""
try:
value = value.split("/")
except ValueError:
value = None
if value:
if len(value) < minimum:
value = None
elif maximum and len(value) > maximum:
value = None
if not value:
form = form or "/".join("VALUE" for _ in range(minimum))
raise click.BadParameter(
"Must be in the form of %(form)s" % {"form": form}, param=param
)
value = [v.strip() for v in value]
if not all(value):
raise click.BadParameter("Individual values cannot be blank", param=param)
return value | 0.002729 |
def median2D(const, bin1, label1, bin2, label2, data_label,
returnData=False):
"""Return a 2D average of data_label over a season and label1, label2.
Parameters
----------
const: Constellation or Instrument
bin#: [min, max, number of bins]
label#: string
identifies data product for bin#
data_label: list-like
contains strings identifying data product(s) to be averaged
Returns
-------
median : dictionary
2D median accessed by data_label as a function of label1 and label2
over the season delineated by bounds of passed instrument objects.
Also includes 'count' and 'avg_abs_dev' as well as the values of
the bin edges in 'bin_x' and 'bin_y'.
"""
# const is either an Instrument or a Constellation, and we want to
# iterate over it.
# If it's a Constellation, then we can do that as is, but if it's
# an Instrument, we just have to put that Instrument into something
# that will yeild that Instrument, like a list.
if isinstance(const, pysat.Instrument):
const = [const]
elif not isinstance(const, pysat.Constellation):
raise ValueError("Parameter must be an Instrument or a Constellation.")
# create bins
#// seems to create the boundaries used for sorting into bins
binx = np.linspace(bin1[0], bin1[1], bin1[2]+1)
biny = np.linspace(bin2[0], bin2[1], bin2[2]+1)
#// how many bins are used
numx = len(binx)-1
numy = len(biny)-1
#// how many different data products
numz = len(data_label)
# create array to store all values before taking median
#// the indices of the bins/data products? used for looping.
yarr = np.arange(numy)
xarr = np.arange(numx)
zarr = np.arange(numz)
#// 3d array: stores the data that is sorted into each bin? - in a deque
ans = [ [ [collections.deque() for i in xarr] for j in yarr] for k in zarr]
for inst in const:
# do loop to iterate over instrument season
#// probably iterates by date but that all depends on the
#// configuration of that particular instrument.
#// either way, it iterates over the instrument, loading successive
#// data between start and end bounds
for inst in inst:
# collect data in bins for averaging
if len(inst.data) != 0:
#// sort the data into bins (x) based on label 1
#// (stores bin indexes in xind)
xind = np.digitize(inst.data[label1], binx)-1
#// for each possible x index
for xi in xarr:
#// get the indicies of those pieces of data in that bin
xindex, = np.where(xind==xi)
if len(xindex) > 0:
#// look up the data along y (label2) at that set of indicies (a given x)
yData = inst.data.iloc[xindex]
#// digitize that, to sort data into bins along y (label2) (get bin indexes)
yind = np.digitize(yData[label2], biny)-1
#// for each possible y index
for yj in yarr:
#// select data with this y index (and we already filtered for this x index)
yindex, = np.where(yind==yj)
if len(yindex) > 0:
#// for each data product label zk
for zk in zarr:
#// take the data (already filtered by x); filter it by y and
#// select the data product, put it in a list, and extend the deque
ans[zk][yj][xi].extend( yData.ix[yindex,data_label[zk]].tolist() )
return _calc_2d_median(ans, data_label, binx, biny, xarr, yarr, zarr, numx, numy, numz, returnData) | 0.010793 |
def _harmonic_number(x):
"""Compute the harmonic number from its analytic continuation.
Derivation from [here](
https://en.wikipedia.org/wiki/Digamma_function#Relation_to_harmonic_numbers)
and [Euler's constant](
https://en.wikipedia.org/wiki/Euler%E2%80%93Mascheroni_constant).
Args:
x: input float.
Returns:
z: The analytic continuation of the harmonic number for the input.
"""
one = tf.ones([], dtype=x.dtype)
return tf.math.digamma(x + one) - tf.math.digamma(one) | 0.008016 |
def stats(self, input_filepath):
'''Display time domain statistical information about the audio
channels. Audio is passed unmodified through the SoX processing chain.
Statistics are calculated and displayed for each audio channel
Unlike other Transformer methods, this does not modify the transformer
effects chain. Instead it computes statistics on the output file that
would be created if the build command were invoked.
Note: The file is downmixed to mono prior to computation.
Parameters
----------
input_filepath : str
Path to input file to compute stats on.
Returns
-------
stats_dict : dict
List of frequency (Hz), amplitude pairs.
See Also
--------
stat, sox.file_info
'''
effect_args = ['channels', '1', 'stats']
_, _, stats_output = self.build(
input_filepath, None, extra_args=effect_args, return_output=True
)
stats_dict = {}
lines = stats_output.split('\n')
for line in lines:
split_line = line.split()
if len(split_line) == 0:
continue
value = split_line[-1]
key = ' '.join(split_line[:-1])
stats_dict[key] = value
return stats_dict | 0.001473 |
def get_next_step(self):
"""Find the proper step when user clicks the Next button.
:returns: The step to be switched to
:rtype: WizardStep instance or None
"""
is_raster = is_raster_layer(self.parent.layer)
subcategory = self.parent.step_kw_subcategory.selected_subcategory()
has_unit = subcategory.get('units') or subcategory.get(
'continuous_hazard_units')
selected_layer_mode = self.selected_layermode()
# continuous
if selected_layer_mode == layer_mode_continuous and has_unit:
new_step = self.parent.step_kw_unit
# no unit and vector
elif not is_raster:
new_step = self.parent.step_kw_field
# no unit and raster
elif is_raster:
new_step = self.parent.step_kw_multi_classifications
else:
raise InvalidWizardStep
return new_step | 0.002169 |
def faulty():
'''
Display list of faulty resources
CLI Example:
.. code-block:: bash
salt '*' fmadm.faulty
'''
fmadm = _check_fmadm()
cmd = '{cmd} faulty'.format(
cmd=fmadm,
)
res = __salt__['cmd.run_all'](cmd)
result = {}
if res['stdout'] == '':
result = False
else:
result = _parse_fmadm_faulty(res['stdout'])
return result | 0.002421 |
def autocompleter():
"""Return autocompleter results"""
# set blocked engines
disabled_engines = request.preferences.engines.get_disabled()
# parse query
if PY3:
raw_text_query = RawTextQuery(request.form.get('q', b''), disabled_engines)
else:
raw_text_query = RawTextQuery(request.form.get('q', u'').encode('utf-8'), disabled_engines)
raw_text_query.parse_query()
# check if search query is set
if not raw_text_query.getSearchQuery():
return '', 400
# run autocompleter
completer = autocomplete_backends.get(request.preferences.get_value('autocomplete'))
# parse searx specific autocompleter results like !bang
raw_results = searx_bang(raw_text_query)
# normal autocompletion results only appear if max 3 inner results returned
if len(raw_results) <= 3 and completer:
# get language from cookie
language = request.preferences.get_value('language')
if not language or language == 'all':
language = 'en'
else:
language = language.split('-')[0]
# run autocompletion
raw_results.extend(completer(raw_text_query.getSearchQuery(), language))
# parse results (write :language and !engine back to result string)
results = []
for result in raw_results:
raw_text_query.changeSearchQuery(result)
# add parsed result
results.append(raw_text_query.getFullQuery())
# return autocompleter results
if request.form.get('format') == 'x-suggestions':
return Response(json.dumps([raw_text_query.query, results]),
mimetype='application/json')
return Response(json.dumps(results),
mimetype='application/json') | 0.002851 |
def export_as_file(self, filepath, hyperparameters):
"""Generates a Python file with the importable base learner set to ``hyperparameters``
This function generates a Python file in the specified file path that contains
the base learner as an importable variable stored in ``base_learner``. The base
learner will be set to the appropriate hyperparameters through ``set_params``.
Args:
filepath (str, unicode): File path to save file in
hyperparameters (dict): Dictionary to use for ``set_params``
"""
if not filepath.endswith('.py'):
filepath += '.py'
file_contents = ''
file_contents += self.source
file_contents += '\n\nbase_learner.set_params(**{})\n'.format(hyperparameters)
file_contents += '\nmeta_feature_generator = "{}"\n'.format(self.meta_feature_generator)
with open(filepath, 'wb') as f:
f.write(file_contents.encode('utf8')) | 0.008097 |
def col_widths(self):
# type: () -> defaultdict
"""Get MAX possible width of each column in the table.
:return: defaultdict
"""
_widths = defaultdict(int)
all_rows = [self.headers]
all_rows.extend(self._rows)
for row in all_rows:
for idx, col in enumerate(row):
_col_l = len(col)
if _col_l > _widths[idx]:
_widths[idx] = _col_l
return _widths | 0.006224 |
def recipe_zheng17(adata, n_top_genes=1000, log=True, plot=False, copy=False):
"""Normalization and filtering as of [Zheng17]_.
Reproduces the preprocessing of [Zheng17]_ - the Cell Ranger R Kit of 10x
Genomics.
Expects non-logarithmized data. If using logarithmized data, pass `log=False`.
The recipe runs the following steps
.. code:: python
sc.pp.filter_genes(adata, min_counts=1) # only consider genes with more than 1 count
sc.pp.normalize_per_cell( # normalize with total UMI count per cell
adata, key_n_counts='n_counts_all')
filter_result = sc.pp.filter_genes_dispersion( # select highly-variable genes
adata.X, flavor='cell_ranger', n_top_genes=n_top_genes, log=False)
adata = adata[:, filter_result.gene_subset] # subset the genes
sc.pp.normalize_per_cell(adata) # renormalize after filtering
if log: sc.pp.log1p(adata) # log transform: adata.X = log(adata.X + 1)
sc.pp.scale(adata) # scale to unit variance and shift to zero mean
Parameters
----------
adata : :class:`~anndata.AnnData`
Annotated data matrix.
n_top_genes : `int`, optional (default: 1000)
Number of genes to keep.
log : `bool`, optional (default: `True`)
Take logarithm.
plot : `bool`, optional (default: `True`)
Show a plot of the gene dispersion vs. mean relation.
copy : `bool`, optional (default: `False`)
Return a copy of `adata` instead of updating it.
Returns
-------
Returns or updates `adata` depending on `copy`.
"""
logg.info('running recipe zheng17', reset=True)
if copy: adata = adata.copy()
pp.filter_genes(adata, min_counts=1) # only consider genes with more than 1 count
pp.normalize_per_cell(adata, # normalize with total UMI count per cell
key_n_counts='n_counts_all')
filter_result = filter_genes_dispersion(
adata.X, flavor='cell_ranger', n_top_genes=n_top_genes, log=False)
if plot:
from ..plotting import _preprocessing as ppp # should not import at the top of the file
ppp.filter_genes_dispersion(filter_result, log=True)
# actually filter the genes, the following is the inplace version of
# adata = adata[:, filter_result.gene_subset]
adata._inplace_subset_var(filter_result.gene_subset) # filter genes
pp.normalize_per_cell(adata) # renormalize after filtering
if log: pp.log1p(adata) # log transform: X = log(X + 1)
pp.scale(adata)
logg.info(' finished', time=True)
return adata if copy else None | 0.004101 |
def value_compare(left, right, ordering=1):
"""
SORT VALUES, NULL IS THE LEAST VALUE
:param left: LHS
:param right: RHS
:param ordering: (-1, 0, 1) TO AFFECT SORT ORDER
:return: The return value is negative if x < y, zero if x == y and strictly positive if x > y.
"""
try:
ltype = left.__class__
rtype = right.__class__
if ltype in list_types or rtype in list_types:
if left == None:
return ordering
elif right == None:
return - ordering
left = listwrap(left)
right = listwrap(right)
for a, b in zip(left, right):
c = value_compare(a, b) * ordering
if c != 0:
return c
if len(left) < len(right):
return - ordering
elif len(left) > len(right):
return ordering
else:
return 0
if ltype is float and isnan(left):
left = None
ltype = none_type
if rtype is float and isnan(right):
right = None
rtype = none_type
null_order = ordering*10
ltype_num = TYPE_ORDER.get(ltype, null_order)
rtype_num = TYPE_ORDER.get(rtype, null_order)
type_diff = ltype_num - rtype_num
if type_diff != 0:
return ordering if type_diff > 0 else -ordering
if ltype_num == null_order:
return 0
elif ltype is builtin_tuple:
for a, b in zip(left, right):
c = value_compare(a, b)
if c != 0:
return c * ordering
return 0
elif ltype in data_types:
for k in sorted(set(left.keys()) | set(right.keys())):
c = value_compare(left.get(k), right.get(k)) * ordering
if c != 0:
return c
return 0
elif left > right:
return ordering
elif left < right:
return -ordering
else:
return 0
except Exception as e:
Log.error("Can not compare values {{left}} to {{right}}", left=left, right=right, cause=e) | 0.002255 |
def nanmean(values, axis=None, skipna=True, mask=None):
"""
Compute the mean of the element along an axis ignoring NaNs
Parameters
----------
values : ndarray
axis: int, optional
skipna : bool, default True
mask : ndarray[bool], optional
nan-mask if known
Returns
-------
result : float
Unless input is a float array, in which case use the same
precision as the input array.
Examples
--------
>>> import pandas.core.nanops as nanops
>>> s = pd.Series([1, 2, np.nan])
>>> nanops.nanmean(s)
1.5
"""
values, mask, dtype, dtype_max, _ = _get_values(
values, skipna, 0, mask=mask)
dtype_sum = dtype_max
dtype_count = np.float64
if (is_integer_dtype(dtype) or is_timedelta64_dtype(dtype) or
is_datetime64_dtype(dtype) or is_datetime64tz_dtype(dtype)):
dtype_sum = np.float64
elif is_float_dtype(dtype):
dtype_sum = dtype
dtype_count = dtype
count = _get_counts(mask, axis, dtype=dtype_count)
the_sum = _ensure_numeric(values.sum(axis, dtype=dtype_sum))
if axis is not None and getattr(the_sum, 'ndim', False):
with np.errstate(all="ignore"):
# suppress division by zero warnings
the_mean = the_sum / count
ct_mask = count == 0
if ct_mask.any():
the_mean[ct_mask] = np.nan
else:
the_mean = the_sum / count if count > 0 else np.nan
return _wrap_results(the_mean, dtype) | 0.000661 |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.