text
stringlengths 78
104k
| score
float64 0
0.18
|
---|---|
def nlsq_fit(x, y, dy, func, params_init, verbose=False, **kwargs):
"""Perform a non-linear least squares fit
Inputs:
x: one-dimensional numpy array of the independent variable
y: one-dimensional numpy array of the dependent variable
dy: absolute error (square root of the variance) of the dependent
variable. Either a one-dimensional numpy array or None. In the array
case, if any of its elements is NaN, the whole array is treated as
NaN (= no weighting)
func: a callable with the signature
func(x,par1,par2,par3,...)
params_init: list or tuple of the first estimates of the
parameters par1, par2, par3 etc. to be fitted
`verbose`: if various messages useful for debugging should be printed on
stdout.
other optional keyword arguments will be passed to leastsq().
Outputs: p, dp, statdict where
p: list of fitted values of par1, par2 etc.
dp: list of estimated errors
statdict: dictionary of various statistical parameters:
'DoF': Degrees of freedom
'Chi2': Chi-squared
'Chi2_reduced': Reduced Chi-squared
'R2': Coefficient of determination
'num_func_eval': number of function evaluations during fit.
'func_value': the function evaluated in the best fitting parameters
'message': status message from leastsq()
'error_flag': integer status flag from leastsq() ('ier')
'Covariance': covariance matrix (variances in the diagonal)
'Correlation_coeffs': Pearson's correlation coefficients (usually
denoted by 'r') in a matrix. The diagonal is unity.
Notes:
for the actual fitting, scipy.optimize.leastsq() is used.
"""
if verbose:
t0 = time.monotonic()
print("nlsq_fit starting.")
else:
t0 = 0
func_orig = func
params_init_orig = params_init
func, params_init = hide_fixedparams(func_orig, params_init_orig)
if (dy is None) or (dy == np.nan).sum() > 0 or (dy <= 0).sum() > 0:
if verbose:
print("nlsq_fit: no weighting")
dy = None
def objectivefunc(params, x, y, dy):
"""The target function for leastsq()."""
if dy is None:
return (func(x, *(params.tolist())) - y)
else:
return (func(x, *(params.tolist())) - y) / dy
# do the fitting
if verbose:
print("nlsq_fit: now doing the fitting...")
t1 = time.monotonic()
else:
t1 = 0
par, cov, infodict, mesg, ier = leastsq(objectivefunc,
np.array(params_init),
(x, y, dy), full_output=True,
**kwargs)
if verbose:
print("nlsq_fit: fitting done in %.2f seconds." % (time.monotonic() - t1))
print("nlsq_fit: status from scipy.optimize.leastsq(): %d (%s)" % (ier, mesg))
print("nlsq_fit: extracting statistics.")
# test if the covariance was singular (cov is None)
if cov is None:
cov = np.ones((len(par), len(par))) * np.nan # set it to a NaN matrix
# calculate the Pearson's R^2 parameter (coefficient of determination)
if dy is None:
sserr = np.sum(((func(x, *(par.tolist())) - y)) ** 2)
sstot = np.sum((y - np.mean(y)) ** 2)
else:
sserr = np.sum(((func(x, *(par.tolist())) - y) / dy) ** 2)
sstot = np.sum((y - np.mean(y)) ** 2 / dy ** 2)
r2 = 1 - sserr / sstot
# assemble the statistics dictionary
statdict = {'DoF' : len(x) - len(par), # degrees of freedom
'Chi2' : (infodict['fvec'] ** 2).sum(),
'R2' : r2,
'num_func_eval' : infodict['nfev'],
'func_value' : func(x, *(par.tolist())),
'message' : mesg,
'error_flag' : ier,
}
statdict['Chi2_reduced'] = statdict['Chi2'] / statdict['DoF']
statdict['Covariance'] = cov * statdict['Chi2_reduced']
par, statdict['Covariance'] = resubstitute_fixedparams(par, params_init_orig, statdict['Covariance'])
# calculate the estimated errors of the fit parameters
dpar = np.sqrt(statdict['Covariance'].diagonal())
# Pearson's correlation coefficients (usually 'r') in a matrix.
statdict['Correlation_coeffs'] = statdict['Covariance'] / np.outer(dpar,
dpar)
if verbose:
print("nlsq_fit: returning with results.")
print("nlsq_fit: total time: %.2f sec." % (time.monotonic() - t0))
return par, dpar, statdict | 0.002955 |
def sofia_process_text():
"""Process text with Sofia and return INDRA Statements."""
if request.method == 'OPTIONS':
return {}
response = request.body.read().decode('utf-8')
body = json.loads(response)
text = body.get('text')
auth = body.get('auth')
sp = sofia.process_text(text, auth=auth)
return _stmts_from_proc(sp) | 0.002793 |
def prepare_inventory(self):
"""
Prepares the inventory default under ``private_data_dir`` if it's not overridden by the constructor.
"""
if self.inventory is None:
self.inventory = os.path.join(self.private_data_dir, "inventory") | 0.014545 |
def _querystring(self):
"""Additional keyword arguments"""
kw = {"studyoid": self.studyoid}
if self.location_oid is not None:
kw["locationoid"] = self.location_oid
return kw | 0.009174 |
def setBatchSize(self, val):
"""
Sets the value of :py:attr:`batchSize`.
"""
self._paramMap[self.batchSize] = val
pythonBigDL_method_name = "setBatchSize" + self.__class__.__name__
callBigDlFunc(self.bigdl_type, pythonBigDL_method_name, self.value, val)
return self | 0.009346 |
def _get_cron_cmdstr(path, user=None):
'''
Returns a format string, to be used to build a crontab command.
'''
if user:
cmd = 'crontab -u {0}'.format(user)
else:
cmd = 'crontab'
return '{0} {1}'.format(cmd, path) | 0.003968 |
def metapolicy(self, permitted):
"""
Sets metapolicy to ``permitted``. (only applicable to master
policy files). Acceptable values correspond to those listed in
Section 3(b)(i) of the crossdomain.xml specification, and are
also available as a set of constants defined in this module.
By default, Flash assumes a value of ``master-only`` for all
policies except socket policies, (which assume a default of
``all``) so if this is desired (and, for security, it
typically is), this method does not need to be called.
Note that a metapolicy of ``none`` forbids **all** access,
even if one or more domains, headers or identities have
previously been specified as allowed. As such, setting the
metapolicy to ``none`` will remove all access previously
granted by ``allow_domain``, ``allow_headers`` or
``allow_identity``. Additionally, attempting to grant access
via ``allow_domain``, ``allow_headers`` or ``allow_identity``
will, when the metapolicy is ``none``, raise ``TypeError``.
"""
if permitted not in VALID_SITE_CONTROL:
raise TypeError(SITE_CONTROL_ERROR.format(permitted))
if permitted == SITE_CONTROL_NONE:
# Metapolicy 'none' means no access is permitted.
self.domains = {}
self.header_domains = {}
self.identities = []
self.site_control = permitted | 0.001347 |
def create_game(
self,
map_name,
bot_difficulty=sc_pb.VeryEasy,
bot_race=sc_common.Random,
bot_first=False):
"""Create a game, one remote agent vs the specified bot.
Args:
map_name: The map to use.
bot_difficulty: The difficulty of the bot to play against.
bot_race: The race for the bot.
bot_first: Whether the bot should be player 1 (else is player 2).
"""
self._controller.ping()
# Form the create game message.
map_inst = maps.get(map_name)
map_data = map_inst.data(self._run_config)
if map_name not in self._saved_maps:
self._controller.save_map(map_inst.path, map_data)
self._saved_maps.add(map_name)
create = sc_pb.RequestCreateGame(
local_map=sc_pb.LocalMap(map_path=map_inst.path, map_data=map_data),
disable_fog=False)
# Set up for one bot, one agent.
if not bot_first:
create.player_setup.add(type=sc_pb.Participant)
create.player_setup.add(
type=sc_pb.Computer, race=bot_race, difficulty=bot_difficulty)
if bot_first:
create.player_setup.add(type=sc_pb.Participant)
# Create the game.
self._controller.create_game(create) | 0.00416 |
def _get_workflow_with_uuid_or_name(uuid_or_name, user_uuid):
"""Get Workflow from database with uuid or name.
:param uuid_or_name: String representing a valid UUIDv4 or valid
Workflow name. Valid name contains only ASCII alphanumerics.
Name might be in format 'reana.workflow.123' with arbitrary
number of dot-delimited substrings, where last substring specifies
the run number of the workflow this workflow name refers to.
If name does not contain a valid run number, but it is a valid name,
workflow with latest run number of all the workflows with this name
is returned.
:type uuid_or_name: String
:rtype: reana-db.models.Workflow
"""
from reana_db.models import Workflow
# Check existence
if not uuid_or_name:
raise ValueError('No Workflow was specified.')
# Check validity
try:
uuid_or_name.encode('ascii')
except UnicodeEncodeError:
# `workflow_name` contains something else than just ASCII.
raise ValueError('Workflow name {} is not valid.'.format(uuid_or_name))
# Check if UUIDv4
try:
# is_uuid = UUID(uuid_or_name, version=4)
is_uuid = UUID('{' + uuid_or_name + '}', version=4)
except (TypeError, ValueError):
is_uuid = None
if is_uuid:
# `uuid_or_name` is an UUIDv4.
# Search with it since it is expected to be unique.
return _get_workflow_by_uuid(uuid_or_name)
else:
# `uuid_or_name` is not and UUIDv4. Expect it is a name.
# Expect name might be in format 'reana.workflow.123' with arbitrary
# number of dot-delimited substring, where last substring specifies
# the run_number of the workflow this workflow name refers to.
# Possible candidates for names are e.g. :
# 'workflow_name' -> ValueError
# 'workflow.name' -> True, True
# 'workflow.name.123' -> True, True
# '123.' -> True, False
# '' -> ValueError
# '.123' -> False, True
# '..' -> False, False
# '123.12' -> True, True
# '123.12.' -> True, False
# Try to split the dot-separated string.
try:
workflow_name, run_number = uuid_or_name.rsplit('.', maxsplit=1)
except ValueError:
# Couldn't split. Probably not a dot-separated string.
# -> Search with `uuid_or_name`
return _get_workflow_by_name(uuid_or_name, user_uuid)
# Check if `run_number` was specified
if not run_number:
# No `run_number` specified.
# -> Search by `workflow_name`
return _get_workflow_by_name(workflow_name, user_uuid)
# `run_number` was specified.
# Check `run_number` is valid.
if not run_number.isdigit():
# `uuid_or_name` was split, so it is a dot-separated string
# but it didn't contain a valid `run_number`.
# Assume that this dot-separated string is the name of
# the workflow and search with it.
return _get_workflow_by_name(uuid_or_name, user_uuid)
# `run_number` is valid.
# Search by `run_number` since it is a primary key.
workflow = Workflow.query.filter(
Workflow.name == workflow_name,
Workflow.run_number == run_number,
Workflow.owner_id == user_uuid).\
one_or_none()
if not workflow:
raise ValueError(
'REANA_WORKON is set to {0}, but '
'that workflow does not exist. '
'Please set your REANA_WORKON environment '
'variable appropriately.'.
format(workflow_name, run_number))
return workflow | 0.000265 |
def rotate(self, angle, center=None):
"""Rotate the shape, in-place.
Parameters
----------
angle : float
Angle to rotate, in radians counter-clockwise.
center : array-like, optional
Point about which to rotate.
If not passed, the center of the shape will be used.
"""
args = [angle]
if center is not None:
args.extend(center)
self.poly.rotate(*args)
return self | 0.004073 |
def compute(self, gsim, num_events, seed=None):
"""
:param gsim: a GSIM instance
:param num_events: the number of seismic events
:param seed: a random seed or None
:returns:
a 32 bit array of shape (num_imts, num_sites, num_events) and
two arrays with shape (num_imts, num_events): sig for stddev_inter
and eps for the random part
"""
try: # read the seed from self.rupture.serial
seed = seed or self.rupture.serial
except AttributeError:
pass
if seed is not None:
numpy.random.seed(seed)
result = numpy.zeros((len(self.imts), len(self.sids), num_events), F32)
sig = numpy.zeros((len(self.imts), num_events), F32)
eps = numpy.zeros((len(self.imts), num_events), F32)
for imti, imt in enumerate(self.imts):
if isinstance(gsim, MultiGMPE):
gs = gsim[str(imt)] # MultiGMPE
else:
gs = gsim # regular GMPE
try:
result[imti], sig[imti], eps[imti] = self._compute(
None, gs, num_events, imt)
except Exception as exc:
raise exc.__class__(
'%s for %s, %s, srcidx=%s' % (exc, gs, imt, self.srcidx)
).with_traceback(exc.__traceback__)
return result, sig, eps | 0.001427 |
def get_modules(self, type_name):
'''Getting modules by type name.
Parameters
----------
type_name : string
Type name of the modules to be returned.
Returns
-------
List of modules of given type name else empty list.
'''
modules = []
for module in self:
if module.__class__.__name__ == type_name:
modules.append(module)
return modules | 0.00431 |
def fetch(self):
"""
Fetch a SyncListItemInstance
:returns: Fetched SyncListItemInstance
:rtype: twilio.rest.preview.sync.service.sync_list.sync_list_item.SyncListItemInstance
"""
params = values.of({})
payload = self._version.fetch(
'GET',
self._uri,
params=params,
)
return SyncListItemInstance(
self._version,
payload,
service_sid=self._solution['service_sid'],
list_sid=self._solution['list_sid'],
index=self._solution['index'],
) | 0.004894 |
def add_head(self, *args, **kwargs):
"""
Shortcut for add_route with method HEAD
"""
return self.add_route(hdrs.METH_HEAD, *args, **kwargs) | 0.011696 |
def get_families_by_ids(self, *args, **kwargs):
"""Pass through to provider FamilyLookupSession.get_families_by_ids"""
# Implemented from kitosid template for -
# osid.resource.BinLookupSession.get_bins_by_ids
catalogs = self._get_provider_session('family_lookup_session').get_families_by_ids(*args, **kwargs)
cat_list = []
for cat in catalogs:
cat_list.append(Family(self._provider_manager, cat, self._runtime, self._proxy))
return FamilyList(cat_list) | 0.007678 |
def new_multiifo_output_list_opt(self, opt, ifos, analysis_time, extension,
tags=None, store_file=None,
use_tmp_subdirs=False):
""" Add an option that determines a list of outputs from multiple
detectors. Files will be supplied as --opt ifo1:input1 ifo2:input2
.....
File names are created internally from the provided extension and
analysis time.
"""
if tags is None:
tags = []
all_tags = copy.deepcopy(self.executable.tags)
for tag in tags:
if tag not in all_tags:
all_tags.append(tag)
output_files = FileList([])
store_file = store_file if store_file is not None \
else self.executable.retain_files
for ifo in ifos:
curr_file = File(ifo, self.executable.name, analysis_time,
extension=extension, store_file=store_file,
directory=self.executable.out_dir, tags=all_tags,
use_tmp_subdirs=use_tmp_subdirs)
output_files.append(curr_file)
self.add_multiifo_output_list_opt(opt, output_files) | 0.003903 |
def encode_varint_1(num):
""" Encode an integer to a varint presentation. See
https://developers.google.com/protocol-buffers/docs/encoding?csw=1#varints
on how those can be produced.
Arguments:
num (int): Value to encode
Returns:
bytearray: Encoded presentation of integer with length from 1 to 10
bytes
"""
# Shift sign to the end of number
num = (num << 1) ^ (num >> 63)
# Max 10 bytes. We assert those are allocated
buf = bytearray(10)
for i in range(10):
# 7 lowest bits from the number and set 8th if we still have pending
# bits left to encode
buf[i] = num & 0x7f | (0x80 if num > 0x7f else 0)
num = num >> 7
if num == 0:
break
else:
# Max size of endcoded double is 10 bytes for unsigned values
raise ValueError("Out of double range")
return buf[:i + 1] | 0.001073 |
def add_job(session, command_line, name = 'job', dependencies = [], array = None, exec_dir=None, log_dir = None, stop_on_failure = False, **kwargs):
"""Helper function to create a job, add the dependencies and the array jobs."""
job = Job(command_line=command_line, name=name, exec_dir=exec_dir, log_dir=log_dir, array_string=array, stop_on_failure=stop_on_failure, kwargs=kwargs)
session.add(job)
session.flush()
session.refresh(job)
# by default id and unique id are identical, but the id might be overwritten later on
job.id = job.unique
for d in dependencies:
if d == job.unique:
logger.warn("Adding self-dependency of job %d is not allowed" % d)
continue
depending = list(session.query(Job).filter(Job.unique == d))
if len(depending):
session.add(JobDependence(job.unique, depending[0].unique))
else:
logger.warn("Could not find dependent job with id %d in database" % d)
if array:
(start, stop, step) = array
# add array jobs
for i in range(start, stop+1, step):
session.add(ArrayJob(i, job.unique))
session.commit()
return job | 0.027703 |
def add_cut(problem, indicators, bound, Constraint):
"""
Add an integer cut to the problem.
Ensure that the same solution involving these indicator variables cannot be
found by enforcing their sum to be less than before.
Parameters
----------
problem : optlang.Model
Specific optlang interface Model instance.
indicators : iterable
Binary indicator `optlang.Variable`s.
bound : int
Should be one less than the sum of indicators. Corresponds to P - 1 in
equation (14) in [1]_.
Constraint : optlang.Constraint
Constraint class for a specific optlang interface.
References
----------
.. [1] Gevorgyan, A., M. G Poolman, and D. A Fell.
"Detection of Stoichiometric Inconsistencies in Biomolecular
Models."
Bioinformatics 24, no. 19 (2008): 2245.
"""
cut = Constraint(sympy.Add(*indicators), ub=bound)
problem.add(cut)
return cut | 0.001032 |
def datatype(self, value):
"""
Args:
value (string): 'uint8', 'uint16', 'uint64'
Raises:
ValueError
"""
self._datatype = self.validate_datatype(value)
self._cutout_ready = True | 0.008065 |
def start(self):
"""
Starts this VMware VM.
"""
if self.status == "started":
return
if (yield from self.is_running()):
raise VMwareError("The VM is already running in VMware")
ubridge_path = self.ubridge_path
if not ubridge_path or not os.path.isfile(ubridge_path):
raise VMwareError("ubridge is necessary to start a VMware VM")
yield from self._start_ubridge()
self._read_vmx_file()
# check if there is enough RAM to run
if "memsize" in self._vmx_pairs:
self.check_available_ram(int(self._vmx_pairs["memsize"]))
self._set_network_options()
self._set_serial_console()
self._write_vmx_file()
if self._headless:
yield from self._control_vm("start", "nogui")
else:
yield from self._control_vm("start")
try:
if self._ubridge_hypervisor:
for adapter_number in range(0, self._adapters):
nio = self._ethernet_adapters[adapter_number].get_nio(0)
if nio:
yield from self._add_ubridge_connection(nio, adapter_number)
yield from self._start_console()
except VMwareError:
yield from self.stop()
raise
if self._get_vmx_setting("vhv.enable", "TRUE"):
self._hw_virtualization = True
self._started = True
self.status = "started"
log.info("VMware VM '{name}' [{id}] started".format(name=self.name, id=self.id)) | 0.002516 |
def CheckForeignKeysWrapper(conn):
"""Migration wrapper that checks foreign keys.
Note that this may raise different exceptions depending on the
underlying database API.
"""
yield
cur = conn.cursor()
cur.execute('PRAGMA foreign_key_check')
errors = cur.fetchall()
if errors:
raise ForeignKeyError(errors) | 0.002865 |
def get_unicode_str(obj):
"""Makes sure obj is a unicode string."""
if isinstance(obj, six.text_type):
return obj
if isinstance(obj, six.binary_type):
return obj.decode("utf-8", errors="ignore")
return six.text_type(obj) | 0.003968 |
def __get_segment_types(self, element):
"""
given a <segment> or <group> element, returns its segment type and the
segment type of its parent (i.e. its dominating node)
Parameters
----------
element : ??? etree Element
Returns
-------
segment_type : str
'nucleus', 'satellite' or 'isolated' (unconnected segment, e.g. a
news headline) or 'span' (iff the segment type is currently
unknown -- i.e. ``relname`` is ``span``)
parent_segment_type : str or None
'nucleus', 'satellite' or None (e.g. for the root group node)
"""
if not 'parent' in element.attrib:
if element.tag == 'segment':
segment_type = 'isolated'
parent_segment_type = None
else: # element.tag == 'group'
segment_type = 'span'
parent_segment_type = None
return segment_type, parent_segment_type
# ``relname`` either contains the name of an RST relation or
# the string ``span`` (iff the segment is dominated by a span
# node -- a horizontal line spanning one or more segments/groups
# in an RST diagram). ``relname`` is '', if the segment is
# unconnected.
relname = element.attrib.get('relname', '')
# we look up, if ``relname`` represents a regular, binary RST
# relation or a multinucular relation. ``reltype`` is '',
# if ``relname`` is ``span`` (i.e. a span isn't an RST relation).
reltype = self.relations.get(relname, '')
if reltype == 'rst':
segment_type = 'satellite'
parent_segment_type = 'nucleus'
elif reltype == 'multinuc':
segment_type = 'nucleus'
parent_segment_type = None # we don't know it's type, yet
else: # reltype == ''
# the segment is of unknown type, it is dominated by
# a span group node
segment_type = 'span'
parent_segment_type = 'span'
return segment_type, parent_segment_type | 0.00188 |
async def metrics(self, offs, size=None):
'''
Yield metrics rows starting at offset.
Args:
offs (int): The index offset.
size (int): The maximum number of records to yield.
Yields:
((int, dict)): An index offset, info tuple for metrics.
'''
for i, (indx, item) in enumerate(self._metrics.iter(offs)):
if size is not None and i >= size:
return
yield indx, item | 0.004107 |
def get_avatar(self):
"""Gets the asset.
return: (osid.repository.Asset) - the asset
raise: IllegalState - ``has_avatar()`` is ``false``
raise: OperationFailed - unable to complete request
*compliance: mandatory -- This method must be implemented.*
"""
# Implemented from template for osid.resource.Resource.get_avatar_template
if not bool(self._my_map['avatarId']):
raise errors.IllegalState('this Resource has no avatar')
mgr = self._get_provider_manager('REPOSITORY')
if not mgr.supports_asset_lookup():
raise errors.OperationFailed('Repository does not support Asset lookup')
lookup_session = mgr.get_asset_lookup_session(proxy=getattr(self, "_proxy", None))
lookup_session.use_federated_repository_view()
osid_object = lookup_session.get_asset(self.get_avatar_id())
return osid_object | 0.005382 |
def quiet_reset_backend(self, reset_interps=True):
"""
Doesn't update plots or logger or any visable data but resets all
measurement data, hierarchy data, and optionally resets
intepretations.
Parameters
----------
reset_interps : bool to tell the function to reset fits or
not.
"""
new_Data_info = self.get_data_info()
new_Data, new_Data_hierarchy = self.get_data()
if not new_Data:
print("Data read in failed when reseting, aborting reset")
return
else:
self.Data, self.Data_hierarchy, self.Data_info = new_Data, new_Data_hierarchy, new_Data_info
if reset_interps:
self.pmag_results_data = {}
for level in ['specimens', 'samples', 'sites', 'locations', 'study']:
self.pmag_results_data[level] = {}
self.high_level_means = {}
high_level_means = {}
for high_level in ['samples', 'sites', 'locations', 'study']:
if high_level not in list(self.high_level_means.keys()):
self.high_level_means[high_level] = {}
# get list of sites
self.locations = list(self.Data_hierarchy['locations'].keys())
self.locations.sort() # get list of sites
# get list of sites
self.sites = list(self.Data_hierarchy['sites'].keys())
self.sites.sort(key=spec_key_func) # get list of sites
self.samples = [] # sort the samples within each site
for site in self.sites:
self.samples.extend(
sorted(self.Data_hierarchy['sites'][site]['samples'], key=spec_key_func))
self.specimens = [] # sort the specimens within each sample
for samp in self.samples:
self.specimens.extend(
sorted(self.Data_hierarchy['samples'][samp]['specimens'], key=spec_key_func))
# --------------------------------------------------------------------
# initialize first specimen in list as current specimen
# --------------------------------------------------------------------
if self.s in self.specimens:
pass
elif len(self.specimens) > 0:
self.select_specimen(str(self.specimens[0]))
else:
self.select_specimen("")
try:
self.sample = self.Data_hierarchy['sample_of_specimen'][self.s]
except KeyError:
self.sample = ""
try:
self.site = self.Data_hierarchy['site_of_specimen'][self.s]
except KeyError:
self.site = ""
if self.Data and reset_interps:
self.update_pmag_tables()
if self.ie_open:
self.ie.specimens_list = self.specimens | 0.002154 |
def arc(pRA, pDecl, sRA, sDecl, mcRA, lat):
""" Returns the arc of direction between a Promissor
and Significator. It uses the generic proportional
semi-arc method.
"""
pDArc, pNArc = utils.dnarcs(pDecl, lat)
sDArc, sNArc = utils.dnarcs(sDecl, lat)
# Select meridian and arcs to be used
# Default is MC and Diurnal arcs
mdRA = mcRA
sArc = sDArc
pArc = pDArc
if not utils.isAboveHorizon(sRA, sDecl, mcRA, lat):
# Use IC and Nocturnal arcs
mdRA = angle.norm(mcRA + 180)
sArc = sNArc
pArc = pNArc
# Promissor and Significator distance to meridian
pDist = angle.closestdistance(mdRA, pRA)
sDist = angle.closestdistance(mdRA, sRA)
# Promissor should be after significator (in degrees)
if pDist < sDist:
pDist += 360
# Meridian distances proportional to respective semi-arcs
sPropDist = sDist / (sArc / 2.0)
pPropDist = pDist / (pArc / 2.0)
# The arc is how much of the promissor's semi-arc is
# needed to reach the significator
return (pPropDist - sPropDist) * (pArc / 2.0) | 0.007895 |
def _cmp_key(self, obj=None):
"""Comparison key for sorting results from all linters.
The sort should group files and lines from different linters to make it
easier for refactoring.
"""
if not obj:
obj = self
line_nr = int(obj.line_nr) if obj.line_nr else 0
col = int(obj.col) if obj.col else 0
return (obj.path, line_nr, col, obj.msg) | 0.004854 |
def activate(self, engine):
"""
Activates the Component.
:param engine: Engine to attach the Component to.
:type engine: QObject
:return: Method success.
:rtype: bool
"""
LOGGER.debug("> Activating '{0}' Component.".format(self.__class__.__name__))
self.__engine = engine
self.__settings = self.__engine.settings
self.__settings_section = self.name
self.__preferences_manager = self.__engine.components_manager["factory.preferences_manager"]
self.__tcp_server = TCPServer(self.__address, self.__port, RequestsStackDataHandler)
self.activated = True
return True | 0.007289 |
def _setup_sentry_client(context):
"""Produce and configure the sentry client."""
# get_secret will be deprecated soon
dsn = os.environ.get("SENTRY_DSN")
try:
client = raven.Client(dsn, sample_rate=SENTRY_SAMPLE_RATE)
client.user_context(_sentry_context_dict(context))
return client
except:
rlogger.error("Raven client error", exc_info=True)
return None | 0.004831 |
def ext_pillar(minion_id,
pillar, # pylint: disable=W0613
command): # pylint: disable=W0613
'''
Read in the generated libvirt keys
'''
key_dir = os.path.join(
__opts__['pki_dir'],
'libvirt',
minion_id)
cacert = os.path.join(__opts__['pki_dir'],
'libvirt',
'cacert.pem')
if not os.path.isdir(key_dir):
# No keys have been generated
gen_hyper_keys(minion_id,
pillar.get('ext_pillar_virt.country', 'US'),
pillar.get('ext_pillar_virt.st', 'Utah'),
pillar.get('ext_pillar_virt.locality',
'Salt Lake City'),
pillar.get('ext_pillar_virt.organization', 'Salted'),
pillar.get('ext_pillar_virt.expiration_days', '365')
)
ret = {}
for key in os.listdir(key_dir):
if not key.endswith('.pem'):
continue
fn_ = os.path.join(key_dir, key)
with salt.utils.files.fopen(fn_, 'r') as fp_:
ret['libvirt.{0}'.format(key)] = \
salt.utils.stringutils.to_unicode(fp_.read())
with salt.utils.files.fopen(cacert, 'r') as fp_:
ret['libvirt.cacert.pem'] = \
salt.utils.stringutils.to_unicode(fp_.read())
return ret | 0.000706 |
def end_step(self, lineno, timestamp=None, result_code=None):
"""Fill in the current step's summary and update the state to show the current step has ended."""
self.state = self.STATES['step_finished']
step_errors = self.sub_parser.get_artifact()
step_error_count = len(step_errors)
if step_error_count > settings.PARSER_MAX_STEP_ERROR_LINES:
step_errors = step_errors[:settings.PARSER_MAX_STEP_ERROR_LINES]
self.artifact["errors_truncated"] = True
self.current_step.update({
"finished": timestamp,
"finished_linenumber": lineno,
# Whilst the result code is present on both the start and end buildbot-style step
# markers, for Taskcluster logs the start marker line lies about the result, since
# the log output is unbuffered, so Taskcluster does not know the real result at
# that point. As such, we only set the result when ending a step.
"result": self.RESULT_DICT.get(result_code, "unknown"),
"errors": step_errors
})
# reset the sub_parser for the next step
self.sub_parser.clear() | 0.005093 |
def make_g2p_id(self):
"""
Make an association id for phenotypic associations that is defined by:
source of association +
(Annot subject) +
relationship +
phenotype/disease +
environment +
start stage +
end stage
:return:
"""
attributes = [self.environment_id, self.start_stage_id, self.end_stage_id]
assoc_id = self.make_association_id(
self.definedby, self.entity_id, self.rel, self.phenotype_id, attributes)
return assoc_id | 0.00722 |
async def _handle_response(self, response: aiohttp.client_reqrep.ClientResponse, await_final_result: bool) -> dict:
"""
Handles the response returned from the CloudStack API. Some CloudStack API are implemented asynchronous, which
means that the API call returns just a job id. The actually expected API response is postponed and a specific
asyncJobResults API has to be polled using the job id to get the final result once the API call has been
processed.
:param response: The response returned by the aiohttp call.
:type response: aiohttp.client_reqrep.ClientResponse
:param await_final_result: Specifier that indicates whether the function should poll the asyncJobResult API
until the asynchronous API call has been processed
:type await_final_result: bool
:return: Dictionary containing the JSON response of the API call
:rtype: dict
"""
try:
data = await response.json()
except aiohttp.client_exceptions.ContentTypeError:
text = await response.text()
logging.debug('Content returned by server not of type "application/json"\n Content: {}'.format(text))
raise CloudStackClientException(message="Could not decode content. Server did not return json content!")
else:
data = self._transform_data(data)
if response.status != 200:
raise CloudStackClientException(message="Async CloudStack call failed!",
error_code=data.get("errorcode", response.status),
error_text=data.get("errortext"),
response=data)
while await_final_result and ('jobid' in data):
await asyncio.sleep(self.async_poll_latency)
data = await self.queryAsyncJobResult(jobid=data['jobid'])
if data['jobstatus']: # jobstatus is 0 for pending async CloudStack calls
if not data['jobresultcode']: # exit code is zero
try:
return data['jobresult']
except KeyError:
pass
logging.debug("Async CloudStack call returned {}".format(str(data)))
raise CloudStackClientException(message="Async CloudStack call failed!",
error_code=data.get("errorcode"),
error_text=data.get("errortext"),
response=data)
return data | 0.006689 |
def bytes(num, check_result=False):
"""
Returns num bytes of cryptographically strong pseudo-random
bytes. If checkc_result is True, raises error if PRNG is not
seeded enough
"""
if num <= 0:
raise ValueError("'num' should be > 0")
buf = create_string_buffer(num)
result = libcrypto.RAND_bytes(buf, num)
if check_result and result == 0:
raise RandError("Random Number Generator not seeded sufficiently")
return buf.raw[:num] | 0.002079 |
def full_task(self, token_id, presented_pronunciation, pronunciation, pronunciation_probability,
warn=True, default=True):
"""Provide the prediction of the full task.
This function is used to predict the probability of a given pronunciation being reported for a given token.
:param token_id: The token for which the prediction is provided
:param pronunciation: The pronunciation for which the prediction is being made (as a list of strings
or space separated string)
:param pronunciation_probability: The probability of the pronunciation for the given token
:param warn: Set to False in order to avoid warnings about 0 or 1 probabilities
:param default: Set to False in order to avoid generating the default probabilities
"""
if pronunciation_probability is not None and not 0. < pronunciation_probability < 1. and warn:
logging.warning('Setting a probability of [{}] to pronunciation [{}] for token [{}].\n '
'Using probabilities of 0.0 or 1.0 '
'may lead to likelihoods of -Infinity'.format(pronunciation_probability,
pronunciation,
token_id))
key = pronunciation
if isinstance(key, list):
if not all([isinstance(phoneme, basestring) for phoneme in key]):
raise ValueError('The pronunciation must be of type string (a sequence of space separated phonemes) '
'or of type list (containing phonemes of type strings).'
'User supplied: {}'.format(key))
key = ' '.join(pronunciation)
default_preds = self._full_default(presented_pronunciation) if default else {}
self['tokens'].setdefault(token_id, {}) \
.setdefault('full', default_preds)
if key is not None:
if pronunciation_probability is not None:
self['tokens'][token_id]['full'][key] = pronunciation_probability
else:
if key in default_preds:
self['tokens'][token_id]['full'][key] = default_preds[key]
else:
self['tokens'][token_id]['full'].pop(key) | 0.007516 |
async def stations(self, city: str, state: str, country: str) -> list:
"""Return a list of supported stations in a city."""
data = await self._request(
'get',
'stations',
params={
'city': city,
'state': state,
'country': country
})
return [station for station in data['data']] | 0.005051 |
def format(self, source):
"""Specifies the input data source format.
:param source: string, name of the data source, e.g. 'json', 'parquet'.
>>> df = spark.read.format('json').load('python/test_support/sql/people.json')
>>> df.dtypes
[('age', 'bigint'), ('name', 'string')]
"""
self._jreader = self._jreader.format(source)
return self | 0.007481 |
def run(self):
"""
Run dbt for the query, based on the graph.
"""
self._runtime_initialize()
if len(self._flattened_nodes) == 0:
logger.warning("WARNING: Nothing to do. Try checking your model "
"configs and model specification args")
return []
else:
logger.info("")
selected_uids = frozenset(n.unique_id for n in self._flattened_nodes)
result = self.execute_with_hooks(selected_uids)
result.write(self.result_path())
self.task_end_messages(result.results)
return result.results | 0.00316 |
def get_scan_parameters_table_from_meta_data(meta_data_array, scan_parameters=None):
'''Takes the meta data array and returns the scan parameter values as a view of a numpy array only containing the parameter data .
Parameters
----------
meta_data_array : numpy.ndarray
The array with the scan parameters.
scan_parameters : list of strings
The name of the scan parameters to take. If none all are used.
Returns
-------
numpy.Histogram
'''
if scan_parameters is None:
try:
last_not_parameter_column = meta_data_array.dtype.names.index('error_code') # for interpreted meta_data
except ValueError:
return
if last_not_parameter_column == len(meta_data_array.dtype.names) - 1: # no meta_data found
return
# http://stackoverflow.com/questions/15182381/how-to-return-a-view-of-several-columns-in-numpy-structured-array
scan_par_data = {name: meta_data_array.dtype.fields[name] for name in meta_data_array.dtype.names[last_not_parameter_column + 1:]}
else:
scan_par_data = collections.OrderedDict()
for name in scan_parameters:
scan_par_data[name] = meta_data_array.dtype.fields[name]
return np.ndarray(meta_data_array.shape, np.dtype(scan_par_data), meta_data_array, 0, meta_data_array.strides) | 0.005147 |
def create_firewall_rule(protocol, action, profile=None, **kwargs):
'''
Creates a new firewall rule
CLI Example:
.. code-block:: bash
salt '*' neutron.create_firewall_rule protocol action
tenant_id=TENANT_ID name=NAME description=DESCRIPTION ip_version=IP_VERSION
source_ip_address=SOURCE_IP_ADDRESS destination_ip_address=DESTINATION_IP_ADDRESS source_port=SOURCE_PORT
destination_port=DESTINATION_PORT shared=SHARED enabled=ENABLED
:param protocol: Protocol for the firewall rule, choose "tcp","udp","icmp" or "None".
:param action: Action for the firewall rule, choose "allow" or "deny".
:param tenant_id: The owner tenant ID. (Optional)
:param name: Name for the firewall rule. (Optional)
:param description: Description for the firewall rule. (Optional)
:param ip_version: IP protocol version, default: 4. (Optional)
:param source_ip_address: Source IP address or subnet. (Optional)
:param destination_ip_address: Destination IP address or subnet. (Optional)
:param source_port: Source port (integer in [1, 65535] or range in a:b). (Optional)
:param destination_port: Destination port (integer in [1, 65535] or range in a:b). (Optional)
:param shared: Set shared to True, default: False. (Optional)
:param enabled: To enable this rule, default: True. (Optional)
'''
conn = _auth(profile)
return conn.create_firewall_rule(protocol, action, **kwargs) | 0.00403 |
def pdf(cls, uuid):
"""Return a PDF of the invoice identified by the UUID
This is a raw string, which can be written to a file with:
`
with open('invoice.pdf', 'w') as invoice_file:
invoice_file.write(recurly.Invoice.pdf(uuid))
`
"""
url = urljoin(base_uri(), cls.member_path % (uuid,))
pdf_response = cls.http_request(url, headers={'Accept': 'application/pdf'})
return pdf_response.read() | 0.006211 |
def publishing_prepare_published_copy(self, draft_obj):
""" Prepare published copy of draft prior to saving it """
# We call super here, somewhat perversely, to ensure this method will
# be called on publishable subclasses if implemented there.
mysuper = super(PublishingModel, self)
if hasattr(mysuper, 'publishing_prepare_published_copy'):
mysuper.publishing_prepare_published_copy(draft_obj) | 0.004484 |
async def _get_twitter_configuration(self):
"""
create a ``twitter_configuration`` attribute with the response
of the endpoint
https://api.twitter.com/1.1/help/configuration.json
"""
api = self['api', general.twitter_api_version,
".json", general.twitter_base_api_url]
return await api.help.configuration.get() | 0.005181 |
def evaluate(self, env):
"""Evaluate the symbol in the environment, returning a Unicode
string.
"""
if self.ident in env.values:
# Substitute for a value.
return env.values[self.ident]
else:
# Keep original text.
return self.original | 0.006231 |
def makeRequest(self, requestId, request, expectResponse=True):
"""
Send a request to our broker via our self.proto KafkaProtocol object.
Return a deferred which will fire when the reply matching the requestId
comes back from the server, or, if expectResponse is False, then
return None instead.
If we are not currently connected, then we buffer the request to send
when the connection comes back up.
"""
if requestId in self.requests:
# Id is duplicate to 'in-flight' request. Reject it, as we
# won't be able to properly deliver the response(s)
# Note that this won't protect against a client calling us
# twice with the same ID, but first with expectResponse=False
# But that's pathological, and the only defense is to track
# all requestIds sent regardless of whether we expect to see
# a response, which is effectively a memory leak...
raise DuplicateRequestError(
'Reuse of requestId:{}'.format(requestId))
# If we've been told to shutdown (close() called) then fail request
if self._dDown:
return fail(ClientError('makeRequest() called after close()'))
# Ok, we are going to save/send it, create a _Request object to track
canceller = partial(
self.cancelRequest, requestId,
CancelledError(message="Request correlationId={} was cancelled".format(requestId)))
tReq = _Request(requestId, request, expectResponse, canceller)
# add it to our requests dict
self.requests[requestId] = tReq
# Add an errback to the tReq.d to remove it from our requests dict
# if something goes wrong...
tReq.d.addErrback(self._handleRequestFailure, requestId)
# Do we have a connection over which to send the request?
if self.proto:
# Send the request
self._sendRequest(tReq)
# Have we not even started trying to connect yet? Do so now
elif not self.connector:
self._connect()
return tReq.d | 0.001391 |
def _get_collection_for_user(self, collection_id, user):
"""Check that collection exists and user has `add` permission."""
collection_query = Collection.objects.filter(pk=collection_id)
if not collection_query.exists():
raise exceptions.ValidationError('Collection id does not exist')
collection = collection_query.first()
if not user.has_perm('add_collection', obj=collection):
if user.is_authenticated:
raise exceptions.PermissionDenied()
else:
raise exceptions.NotFound()
return collection | 0.003279 |
def to(self, to):
"""
[Edge-only] especifica el destino del lado
"""
if self._type.lower() != 'edge':
raise ValueError('Cannot set From/To to non-edge objects')
self._to = to
return self | 0.00813 |
def get_system_uptime_output_show_system_uptime_hours(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
get_system_uptime = ET.Element("get_system_uptime")
config = get_system_uptime
output = ET.SubElement(get_system_uptime, "output")
show_system_uptime = ET.SubElement(output, "show-system-uptime")
rbridge_id_key = ET.SubElement(show_system_uptime, "rbridge-id")
rbridge_id_key.text = kwargs.pop('rbridge_id')
hours = ET.SubElement(show_system_uptime, "hours")
hours.text = kwargs.pop('hours')
callback = kwargs.pop('callback', self._callback)
return callback(config) | 0.002865 |
def initialize_connection(self): # noqa: E501 pylint:disable=too-many-statements, too-many-branches
"""Initialize a socket to a Chromecast, retrying as necessary."""
tries = self.tries
if self.socket is not None:
self.socket.close()
self.socket = None
# Make sure nobody is blocking.
for callback in self._request_callbacks.values():
callback['event'].set()
self.app_namespaces = []
self.destination_id = None
self.session_id = None
self._request_id = 0
self._request_callbacks = {}
self._open_channels = []
self.connecting = True
retry_log_fun = self.logger.error
# Dict keeping track of individual retry delay for each named service
retries = {}
def mdns_backoff(service, retry):
"""Exponentional backoff for service name mdns lookups."""
now = time.time()
retry['next_retry'] = now + retry['delay']
retry['delay'] = min(retry['delay']*2, 300)
retries[service] = retry
while not self.stop.is_set() and (tries is None or tries > 0): # noqa: E501 pylint:disable=too-many-nested-blocks
# Prune retries dict
retries = {key: retries[key] for key in self.services if (
key is not None and key in retries)}
for service in self.services.copy():
now = time.time()
retry = retries.get(
service, {'delay': self.retry_wait, 'next_retry': now})
# If we're connecting to a named service, check if it's time
if service and now < retry['next_retry']:
continue
try:
self.socket = new_socket()
self.socket.settimeout(self.timeout)
self._report_connection_status(
ConnectionStatus(CONNECTION_STATUS_CONNECTING,
NetworkAddress(self.host, self.port)))
# Resolve the service name. If service is None, we're
# connecting directly to a host name or IP-address
if service:
host = None
port = None
service_info = get_info_from_service(service,
self.zconf)
host, port = get_host_from_service_info(service_info)
if host and port:
try:
self.fn = service_info.properties[b'fn']\
.decode('utf-8')
except (AttributeError, KeyError, UnicodeError):
pass
self.logger.debug(
"[%s:%s] Resolved service %s to %s:%s",
self.fn or self.host, self.port, service, host,
port)
self.host = host
self.port = port
else:
self.logger.debug(
"[%s:%s] Failed to resolve service %s",
self.fn or self.host, self.port, service)
self._report_connection_status(
ConnectionStatus(
CONNECTION_STATUS_FAILED_RESOLVE,
NetworkAddress(service, None)))
mdns_backoff(service, retry)
# If zeroconf fails to receive the necessary data,
# try next service
continue
self.logger.debug("[%s:%s] Connecting to %s:%s",
self.fn or self.host, self.port,
self.host, self.port)
self.socket.connect((self.host, self.port))
self.socket = ssl.wrap_socket(self.socket)
self.connecting = False
self._force_recon = False
self._report_connection_status(
ConnectionStatus(CONNECTION_STATUS_CONNECTED,
NetworkAddress(self.host, self.port)))
self.receiver_controller.update_status()
self.heartbeat_controller.ping()
self.heartbeat_controller.reset()
self.logger.debug("[%s:%s] Connected!",
self.fn or self.host, self.port)
return
except OSError as err:
self.connecting = True
if self.stop.is_set():
self.logger.error(
"[%s:%s] Failed to connect: %s. "
"aborting due to stop signal.",
self.fn or self.host, self.port, err)
raise ChromecastConnectionError("Failed to connect")
self._report_connection_status(
ConnectionStatus(CONNECTION_STATUS_FAILED,
NetworkAddress(self.host, self.port)))
if service is not None:
retry_log_fun(
"[%s:%s] Failed to connect to service %s"
", retrying in %.1fs",
self.fn or self.host, self.port,
service, retry['delay'])
mdns_backoff(service, retry)
else:
retry_log_fun(
"[%s:%s] Failed to connect, retrying in %.1fs",
self.fn or self.host, self.port, self.retry_wait)
retry_log_fun = self.logger.debug
# Only sleep if we have another retry remaining
if tries is None or tries > 1:
self.logger.debug(
"[%s:%s] Not connected, sleeping for %.1fs. Services: %s",
self.fn or self.host, self.port,
self.retry_wait, self.services)
time.sleep(self.retry_wait)
if tries:
tries -= 1
self.stop.set()
self.logger.error("[%s:%s] Failed to connect. No retries.",
self.fn or self.host, self.port)
raise ChromecastConnectionError("Failed to connect") | 0.000297 |
def is_overlapping_viewport(self, hotspot, xy):
"""
Checks to see if the hotspot at position ``(x, y)``
is (at least partially) visible according to the
position of the viewport.
"""
l1, t1, r1, b1 = calc_bounds(xy, hotspot)
l2, t2, r2, b2 = calc_bounds(self._position, self._device)
return range_overlap(l1, r1, l2, r2) and range_overlap(t1, b1, t2, b2) | 0.004785 |
def collection_items(self, collection_name, **kwargs):
"""
implements Requirement 17 (/req/core/fc-op)
@type collection_name: string
@param collection_name: name of collection
@type bbox: list
@param bbox: list of minx,miny,maxx,maxy
@type time: string
@param time: time extent or time instant
@type limit: int
@param limit: limit number of features
@type startindex: int
@param startindex: start position of results
@returns: feature results
"""
if 'bbox' in kwargs:
kwargs['bbox'] = ','.join(kwargs['bbox'])
path = 'collections/{}/items'.format(collection_name)
url = self._build_url(path)
LOGGER.debug('Request: {}'.format(url))
response = requests.get(url, headers=REQUEST_HEADERS,
params=kwargs).json()
return response | 0.002148 |
def check_compound_consistency(database, solver, exchange=set(),
zeromass=set()):
"""Yield each compound in the database with assigned mass
Each compound will be assigned a mass and the number of compounds having a
positive mass will be approximately maximized.
This is an implementation of the solution originally proposed by
[Gevorgyan08]_ but using the new method proposed by [Thiele14]_ to avoid
MILP constraints. This is similar to the way Fastcore avoids MILP
contraints.
"""
# Create mass balance problem
prob = solver.create_problem()
compound_set = _non_localized_compounds(database)
mass_compounds = compound_set.difference(zeromass)
# Define mass variables
m = prob.namespace(mass_compounds, lower=0)
# Define z variables
z = prob.namespace(mass_compounds, lower=0, upper=1)
prob.set_objective(z.sum(mass_compounds))
prob.add_linear_constraints(m.set(mass_compounds) >= z.set(mass_compounds))
massbalance_lhs = {reaction_id: 0 for reaction_id in database.reactions}
for (compound, reaction_id), value in iteritems(database.matrix):
if compound not in zeromass:
mass_var = m(compound.in_compartment(None))
massbalance_lhs[reaction_id] += mass_var * value
for reaction_id, lhs in iteritems(massbalance_lhs):
if reaction_id not in exchange:
prob.add_linear_constraints(lhs == 0)
# Solve
try:
prob.solve(lp.ObjectiveSense.Maximize)
except lp.SolverError as e:
raise_from(
MassConsistencyError('Failed to solve mass consistency: {}'.format(
e)), e)
for compound in mass_compounds:
yield compound, m.value(compound) | 0.000567 |
def update_model_snapshot(self, job_id, snapshot_id, body, params=None):
"""
`<http://www.elastic.co/guide/en/elasticsearch/reference/current/ml-update-snapshot.html>`_
:arg job_id: The ID of the job to fetch
:arg snapshot_id: The ID of the snapshot to update
:arg body: The model snapshot properties to update
"""
for param in (job_id, snapshot_id, body):
if param in SKIP_IN_PATH:
raise ValueError("Empty value passed for a required argument.")
return self.transport.perform_request(
"POST",
_make_path(
"_ml",
"anomaly_detectors",
job_id,
"model_snapshots",
snapshot_id,
"_update",
),
params=params,
body=body,
) | 0.002294 |
def _getTrafficClassAndFlowLabel(self):
"""Page 6, draft feb 2011 """
if self.tf == 0x0:
return (self.tc_ecn << 6) + self.tc_dscp, self.flowlabel
elif self.tf == 0x1:
return (self.tc_ecn << 6), self.flowlabel
elif self.tf == 0x2:
return (self.tc_ecn << 6) + self.tc_dscp, 0
else:
return 0, 0 | 0.005277 |
def get_lib_module_dict(self):
"""Load the 'lib' directory as a python module, so it can be used to provide functions
for rowpipe transforms. This only works filesystem packages"""
from importlib import import_module
if not self.ref:
return {}
u = parse_app_url(self.ref)
if u.scheme == 'file':
if not self.set_sys_path():
return {}
for module_name in self.lib_dir_names:
try:
m = import_module(module_name)
return {k: v for k, v in m.__dict__.items() if not k.startswith('__')}
except ModuleNotFoundError as e:
# We need to know if it is the datapackage's module that is missing
# or if it is a module that it imported
if not module_name in str(e):
raise # If not our module, it's a real error.
continue
else:
return {} | 0.008763 |
def label_absent(name, node=None, apiserver_url=None):
'''
.. versionadded:: 2016.3.0
Delete label to the current node
CLI Example:
.. code-block:: bash
salt '*' k8s.label_absent hw/disktype
salt '*' k8s.label_absent hw/disktype kube-node.cluster.local http://kube-master.cluster.local
'''
ret = {'name': name, 'result': True, 'comment': '', 'changes': {}}
# Get salt minion ID
node = _guess_node_id(node)
# Try to get kubernetes master
apiserver_url = _guess_apiserver(apiserver_url)
if apiserver_url is None:
return False
# Get all labels
old_labels = _get_labels(node, apiserver_url)
# Prepare a temp labels dict
labels = dict([(key, value) for key, value in old_labels.items()
if key != name])
# Compare old labels and what we want
if labels == old_labels:
# Label already absent
ret['comment'] = "Label {0} already absent".format(name)
else:
# Label needs to be delete
res = _set_labels(node, apiserver_url, labels)
if res.get('status') == 409:
# there is an update during operation, need to retry
log.debug("Got 409, will try later")
ret['changes'] = {}
ret['comment'] = "Could not delete label {0}, please retry".format(name)
else:
ret['changes'] = {"deleted": name}
ret['comment'] = "Label {0} absent".format(name)
return ret | 0.002022 |
def _join_paragraphs(cls, lines, use_indent=False, leading_blanks=False, trailing_blanks=False):
"""Join adjacent lines together into paragraphs using either a blank line or indent as separator."""
curr_para = []
paragraphs = []
for line in lines:
if use_indent:
if line.startswith(' '):
curr_para.append(line.lstrip())
continue
elif line == '':
continue
else:
if len(curr_para) > 0:
paragraphs.append(cls._join_paragraph(curr_para, leading_blanks, trailing_blanks))
curr_para = [line.lstrip()]
else:
if len(line) != 0:
curr_para.append(line)
else:
paragraphs.append(cls._join_paragraph(curr_para, leading_blanks, trailing_blanks))
curr_para = []
# Finish the last paragraph if there is one
if len(curr_para) > 0:
paragraphs.append(cls._join_paragraph(curr_para, leading_blanks, trailing_blanks))
return paragraphs | 0.005957 |
def plot(self, flip=False, ax_channels=None, ax=None, *args, **kwargs):
"""
{_gate_plot_doc}
"""
if ax == None:
ax = pl.gca()
if ax_channels is not None:
flip = self._find_orientation(ax_channels)
if flip:
vert = [v[::-1] for v in self.vert]
else:
vert = self.vert
kwargs.setdefault('fill', False)
kwargs.setdefault('color', 'black')
poly = pl.Polygon(vert, *args, **kwargs)
return ax.add_artist(poly) | 0.005576 |
def get_stp_mst_detail_output_msti_port_admin_edge(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
get_stp_mst_detail = ET.Element("get_stp_mst_detail")
config = get_stp_mst_detail
output = ET.SubElement(get_stp_mst_detail, "output")
msti = ET.SubElement(output, "msti")
instance_id_key = ET.SubElement(msti, "instance-id")
instance_id_key.text = kwargs.pop('instance_id')
port = ET.SubElement(msti, "port")
admin_edge = ET.SubElement(port, "admin-edge")
admin_edge.text = kwargs.pop('admin_edge')
callback = kwargs.pop('callback', self._callback)
return callback(config) | 0.002817 |
def translate(to_translate, to_language="auto", from_language="auto"):
"""Returns the translation using google translate
you must shortcut the language you define
(French = fr, English = en, Spanish = es, etc...)
if not defined it will detect it or use english by default
Example:
print(translate("salut tu vas bien?", "en"))
hello you alright?
"""
base_link = "http://translate.google.com/m?hl=%s&sl=%s&q=%s"
if (sys.version_info[0] < 3):
to_translate = urllib.quote_plus(to_translate)
link = base_link % (to_language, from_language, to_translate)
request = urllib2.Request(link, headers=agent)
raw_data = urllib2.urlopen(request).read()
else:
to_translate = urllib.parse.quote(to_translate)
link = base_link % (to_language, from_language, to_translate)
request = urllib.request.Request(link, headers=agent)
raw_data = urllib.request.urlopen(request).read()
data = raw_data.decode("utf-8")
expr = r'class="t0">(.*?)<'
re_result = re.findall(expr, data)
if (len(re_result) == 0):
result = ""
else:
result = unescape(re_result[0])
return (result) | 0.000838 |
def hash_footnote_reference(text, hashes, markdown_obj):
"""Hashes a footnote [^id] reference
This function converts footnote styles:
text here[^id]
Footnotes can be defined anywhere in the Markdown text.
"""
footnotes = markdown_obj.footnotes
numbers = {f: i+1 for i, f in enumerate(footnotes)}
def sub(match):
footnote_id = match.group(1)
if footnote_id not in footnotes:
return ''
number = numbers[footnote_id]
result = '<sup><a href="#fnref-{0}">{0}</a></sup>'.format(number)
hashed = hash_text(result, 'footnote')
hashes[hashed] = result
return hashed
return re_footnote.sub(sub, text) | 0.002853 |
def get_configuration(basename='scriptabit.cfg', parents=None):
"""Parses and returns the program configuration options,
taken from a combination of ini-style config file, and
command line arguments.
Args:
basename (str): The base filename.
parents (list): A list of ArgumentParser objects whose arguments
should also be included in the configuration parsing. These
ArgumentParser instances **must** be instantiated with the
`add_help` argument set to `False`, otherwise the main
ArgumentParser instance will raise an exception due to duplicate
help arguments.
Returns:
The options object, and a function that can be called to print the help
text.
"""
copy_default_config_to_user_directory(basename)
parser = configargparse.ArgParser(
formatter_class=configargparse.ArgumentDefaultsRawHelpFormatter,
add_help=False,
parents=parents or [],
default_config_files=[
resource_filename(
Requirement.parse("scriptabit"),
os.path.join('scriptabit', basename)),
os.path.join(
os.path.expanduser("~/.config/scriptabit"),
basename),
os.path.join(os.curdir, basename)])
# logging config file
parser.add(
'-lc',
'--logging-config',
required=False,
default='scriptabit_logging.cfg',
metavar='FILE',
env_var='SCRIPTABIT_LOGGING_CONFIG',
help='Logging configuration file')
# Authentication file section
parser.add(
'-as',
'--auth-section',
required=False,
default='habitica',
help='''Name of the authentication file section containing the Habitica
credentials''')
parser.add(
'-url',
'--habitica-api-url',
required=False,
default='https://habitica.com/api/v3/',
help='''The base Habitica API URL''')
# plugins
parser.add(
'-r',
'--run',
required=False,
help='''Select the plugin to run. Note you can only run a single
plugin at a time. If you specify more than one, then only the
last one will be executed. To chain plugins together, create a
new plugin that combines the effects as required.''')
parser.add(
'-ls',
'--list-plugins',
required=False,
action='store_true',
help='''List available plugins''')
parser.add(
'-v',
'--version',
required=False,
action='store_true',
help='''Display scriptabit version''')
parser.add(
'-dr',
'--dry-run',
required=False,
action='store_true',
help='''Conduct a dry run. No changes are written to online services''')
parser.add(
'-n',
'--max-updates',
required=False,
type=int,
default=0,
help='''If > 0, this sets a limit on the number of plugin updates.
Note that plugins can still exit before the limit is reached.''')
parser.add(
'-uf',
'--update-frequency',
required=False,
type=int,
default=-1,
help='''If > 0, this specifies the preferred update frequency in minutes
for plugins that run in the update loop. Note that plugins may ignore or limit
this setting if the value is inappropriate for the specific plugin.''')
parser.add(
'-h',
'--help',
required=False,
action='store_true',
help='''Print help''')
return parser.parse_known_args()[0], parser.print_help | 0.000824 |
def __with_argument(node, value):
"""Modifies the flags in value if the node contains an Argument."""
arguments = node.getElementsByTagName('Argument')
if arguments:
logging.debug('Found argument within %s', value['name'])
value['flags'] = vsflags(VSFlags.UserValueIgnored, VSFlags.Continue) | 0.003125 |
def transcribe(model_path, corpus):
""" Applies a trained model to untranscribed data in a Corpus. """
exp_dir = prep_exp_dir()
model = get_simple_model(exp_dir, corpus)
model.transcribe(model_path) | 0.004651 |
def _processCheckAuthResponse(self, response, server_url):
"""Process the response message from a check_authentication
request, invalidating associations if requested.
"""
is_valid = response.getArg(OPENID_NS, 'is_valid', 'false')
invalidate_handle = response.getArg(OPENID_NS, 'invalidate_handle')
if invalidate_handle is not None:
logging.info(
'Received "invalidate_handle" from server %s' % (server_url,))
if self.store is None:
logging.error('Unexpectedly got invalidate_handle without '
'a store!')
else:
self.store.removeAssociation(server_url, invalidate_handle)
if is_valid == 'true':
return True
else:
logging.error('Server responds that checkAuth call is not valid')
return False | 0.003319 |
def document_agents(p):
"""
Document agents in AIKIF (purpose and intent)
"""
p.comment('agent.py', 'base agent class')
p.comment('run_agents.py', 'Top level function to run the agents')
p.comment('agent_image_metadata.py', 'agent to collect file picture metadata')
p.comment('agent_learn_aixi.py', '')
p.comment('dummy_learn_1.py', 'sample (but stub only) learning algorithm to be called as test below')
p.comment('agent_explore_grid.py', 'working prototype of agent to move through a grid world, using very simple path finding.')
p.comment('agent_email.py', 'Agent that reads emails (currently only gmail)')
p.comment('agent_filelist.py', 'TOK - correctly scans and logs filelists from an agent')
p.comment('collect_Win_processes.py', 'script to collect windows processes. Currently not part of agent process, more an exercise on what can be logged')
p.comment('log_PC_usage.py', 'script to read current window title to be used as part of context to see what user is doing')
p.comment('log_browser_history.py', 'script to dump chrome browser history to CSV - not used')
p.comment('agg_context.py', 'detects context of user and computer') | 0.008299 |
def predictor(enc_flat,
action,
lstm_states,
pred_depth,
reuse=False,
scope_prefix='',
hparams=None):
"""LSTM predictor network."""
with tf.variable_scope(scope_prefix + 'predict', reuse=reuse):
enc_final_size = enc_flat.get_shape().as_list()[1]
action_size = action.get_shape().as_list()[1]
initial_size = (enc_final_size + action_size)
batch_size = tf.shape(enc_flat)[0]
init_stddev = 1e-2
pre_pred = tf.concat([enc_flat, action], 1)
pre_pred = tf.layers.dense(
pre_pred,
initial_size,
kernel_initializer=tf.truncated_normal_initializer(stddev=init_stddev))
# This is only needed or the GAN version.
if hparams.pred_noise_std > 0:
# Add the noise like this so a pretrained model can be used.
pred_noise = tf.random_normal(
shape=[batch_size, 100], stddev=hparams.pred_noise_std)
pre_pred += tf.layers.dense(
pred_noise,
initial_size,
kernel_initializer=tf.truncated_normal_initializer(
stddev=init_stddev),
name='noise_dense')
pre_pred = tf.nn.relu(pre_pred)
if lstm_states[pred_depth - 2] is None:
back_connect = tf.tile(
tf.get_variable(
'back_connect_init',
shape=[1, initial_size * 2],
initializer=tf.truncated_normal_initializer(stddev=init_stddev))
, (batch_size, 1))
else:
back_connect = lstm_states[pred_depth - 2]
lstm_init_stddev = 1e-4
part_pred, lstm_states[0] = common_video.lstm_cell(
tf.concat([pre_pred, back_connect], 1),
lstm_states[0],
initial_size,
use_peepholes=True,
initializer=tf.truncated_normal_initializer(stddev=lstm_init_stddev),
num_proj=initial_size)
part_pred = tf.contrib.layers.layer_norm(part_pred)
pred = part_pred
for pred_layer_num in range(1, pred_depth, 2):
part_pred, lstm_states[pred_layer_num] = common_video.lstm_cell(
pred,
lstm_states[pred_layer_num],
initial_size,
use_peepholes=True,
initializer=tf.truncated_normal_initializer(stddev=lstm_init_stddev),
num_proj=initial_size)
pred += part_pred
part_pred, lstm_states[pred_layer_num + 1] = common_video.lstm_cell(
tf.concat([pred, pre_pred], 1),
lstm_states[pred_layer_num + 1],
initial_size,
use_peepholes=True,
initializer=tf.truncated_normal_initializer(stddev=lstm_init_stddev),
num_proj=initial_size)
part_pred = tf.contrib.layers.layer_norm(part_pred)
pred += part_pred
pred = tf.layers.dense(
pred,
enc_final_size,
kernel_initializer=tf.truncated_normal_initializer(stddev=init_stddev))
if hparams.enc_pred_use_l2norm:
pred = tf.nn.l2_normalize(pred, 1)
return pred | 0.005064 |
def validate_utterance(self, utterance):
"""
Validate the given utterance and return a list of uncovered segments (start, end).
"""
uncovered_segments = []
if self.label_list_idx in utterance.label_lists.keys():
start = 0
end = utterance.duration
ll = utterance.label_lists[self.label_list_idx]
ranges = list(ll.ranges(yield_ranges_without_labels=True))
# Check coverage at start
if ranges[0][0] - start > self.threshold:
uncovered_segments.append((start, ranges[0][0]))
# Check for empty ranges
for range in ranges:
if len(range[2]) == 0 and range[1] - range[0] > self.threshold:
uncovered_segments.append((range[0], range[1]))
# Check coverage at end
if ranges[-1][1] > 0 and end - ranges[-1][1] > self.threshold:
uncovered_segments.append((ranges[-1][1], end))
else:
uncovered_segments.append((utterance.start, utterance.end))
return uncovered_segments | 0.002686 |
def exists(Name, region=None, key=None, keyid=None, profile=None):
'''
Given a rule name, check to see if the given rule exists.
Returns True if the given rule exists and returns False if the given
rule does not exist.
CLI example::
salt myminion boto_cloudwatch_event.exists myevent region=us-east-1
'''
conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)
try:
events = conn.list_rules(NamePrefix=Name)
if not events:
return {'exists': False}
for rule in events.get('Rules', []):
if rule.get('Name', None) == Name:
return {'exists': True}
return {'exists': False}
except ClientError as e:
err = __utils__['boto3.get_error'](e)
return {'error': err} | 0.001244 |
def add(self, agent_id, media_type, media_file):
"""
新增其它类型永久素材
详情请参考
https://qydev.weixin.qq.com/wiki/index.php?title=%E4%B8%8A%E4%BC%A0%E6%B0%B8%E4%B9%85%E7%B4%A0%E6%9D%90
:param agent_id: 企业应用的id
:param media_type: 媒体文件类型,分别有图片(image)、语音(voice)、视频(video)普通文件(file)
:param media_file: 要上传的文件,一个 File-object
:return: 返回的 JSON 数据包
"""
params = {
'agentid': agent_id,
'type': media_type,
}
return self._post(
url='material/add_material',
params=params,
files={
'media': media_file
}
) | 0.002959 |
def _JRAxiIntegrand(r,E,L,pot):
"""The J_R integrand"""
return nu.sqrt(2.*(E-potentialAxi(r,pot))-L**2./r**2.) | 0.042373 |
def install_except_hook(except_hook=_hooks.except_hook):
"""
Install an except hook that will show the crash report dialog when an
unhandled exception has occured.
:param except_hook: except_hook function that will be called on the main
thread whenever an unhandled exception occured. The function takes
two parameters: the exception object and the traceback string.
"""
if not _backends:
raise ValueError('no backends found, you must at least install one '
'backend before calling this function')
global _except_hook
_except_hook = _hooks.QtExceptHook(except_hook) | 0.001546 |
def get_action(self, action_id):
"""Returns a specific Action by its ID.
Args:
action_id (int): id of action
"""
return Action.get_object(
api_token=self.token,
action_id=action_id
) | 0.007722 |
def _drop_nodes_from_errorpaths(self, _errors, dp_items, sp_items):
""" Removes nodes by index from an errorpath, relatively to the
basepaths of self.
:param errors: A list of :class:`errors.ValidationError` instances.
:param dp_items: A list of integers, pointing at the nodes to drop from
the :attr:`document_path`.
:param sp_items: Alike ``dp_items``, but for :attr:`schema_path`.
"""
dp_basedepth = len(self.document_path)
sp_basedepth = len(self.schema_path)
for error in _errors:
for i in sorted(dp_items, reverse=True):
error.document_path = \
drop_item_from_tuple(error.document_path, dp_basedepth + i)
for i in sorted(sp_items, reverse=True):
error.schema_path = \
drop_item_from_tuple(error.schema_path, sp_basedepth + i)
if error.child_errors:
self._drop_nodes_from_errorpaths(error.child_errors,
dp_items, sp_items) | 0.001815 |
def process_data(name=None):
'''Fetch the current process local data dictionary.
If ``name`` is not ``None`` it returns the value at``name``,
otherwise it return the process data dictionary
'''
ct = current_process()
if not hasattr(ct, '_pulsar_local'):
ct._pulsar_local = {}
loc = ct._pulsar_local
return loc.get(name) if name else loc | 0.002653 |
def libvlc_media_library_new(p_instance):
'''Create an new Media Library object.
@param p_instance: the libvlc instance.
@return: a new object or NULL on error.
'''
f = _Cfunctions.get('libvlc_media_library_new', None) or \
_Cfunction('libvlc_media_library_new', ((1,),), class_result(MediaLibrary),
ctypes.c_void_p, Instance)
return f(p_instance) | 0.007519 |
def facets_area(self):
"""
Return an array containing the area of each facet.
Returns
---------
area : (len(self.facets),) float
Total area of each facet (group of faces)
"""
# avoid thrashing the cache inside a loop
area_faces = self.area_faces
# sum the area of each group of faces represented by facets
# use native python sum in tight loop as opposed to array.sum()
# as in this case the lower function call overhead of
# native sum provides roughly a 50% speedup
areas = np.array([sum(area_faces[i])
for i in self.facets],
dtype=np.float64)
return areas | 0.002732 |
def process_action(self, request, queryset):
"""
Deletes the object(s). Successful deletes are logged.
Returns a 'render redirect' to the result of the
`get_done_url` method.
If a ProtectedError is raised, the `render` method
is called with message explaining the error added
to the context as `protected`.
"""
count = 0
try:
with transaction.commit_on_success():
for obj in queryset:
self.log_action(obj, CMSLog.DELETE)
count += 1
obj.delete()
msg = "%s object%s deleted." % (count, ('' if count ==1 else 's'))
url = self.get_done_url()
return self.render(request, redirect_url=url, message = msg)
except ProtectedError, e:
protected = []
for x in e.protected_objects:
if hasattr(x, 'delete_blocked_message'):
protected.append(x.delete_blocked_message())
else:
protected.append(u"%s - %s" % (x._meta.verbose_name, x))
msg = "Cannot delete some objects because the following objects depend on them:"
return self.render(request, error_msg = msg, errors = protected) | 0.00771 |
def vcs_virtual_ip_address_address(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
vcs = ET.SubElement(config, "vcs", xmlns="urn:brocade.com:mgmt:brocade-vcs")
virtual = ET.SubElement(vcs, "virtual")
ip = ET.SubElement(virtual, "ip")
address = ET.SubElement(ip, "address")
address = ET.SubElement(address, "address")
address.text = kwargs.pop('address')
callback = kwargs.pop('callback', self._callback)
return callback(config) | 0.005535 |
def restore_descriptor(self, converted_descriptor):
"""Restore descriptor rom BigQuery
"""
# Convert
fields = []
for field in converted_descriptor['fields']:
field_type = self.restore_type(field['type'])
resfield = {
'name': field['name'],
'type': field_type,
}
if field.get('mode', 'NULLABLE') != 'NULLABLE':
resfield['constraints'] = {'required': True}
fields.append(resfield)
descriptor = {'fields': fields}
return descriptor | 0.003367 |
def print_variant(variant, include, exclude):
"""Print a vcf variant
Parameters
----------
variant: cyvcf2.Variant
include: tuple
set of strings with info fields that should be included
include: tuple
set of strings with info fields that should be included
"""
if include:
for info in variant.INFO:
key = info[0]
if not key in include:
del variant.INFO[key]
if exclude:
for exc in exclude:
if variant.INFO.get(exc):
del variant.INFO[exc]
print_string = str(variant).rstrip()
click.echo(print_string) | 0.006015 |
def pack(self):
'''
Make ApptRec json/msgpack-friendly
'''
reqdictf = {k.name.lower(): v for (k, v) in self.reqdict.items()}
incunitf = None if self.incunit is None else self.incunit.name.lower()
return (reqdictf, incunitf, self.incval) | 0.007042 |
def _read(**kwargs):
"""Read csv file from local disk.
Args:
filepath_or_buffer:
The filepath of the csv file.
We only support local files for now.
kwargs: Keyword arguments in pandas.read_csv
"""
pd_obj = BaseFactory.read_csv(**kwargs)
# This happens when `read_csv` returns a TextFileReader object for iterating through
if isinstance(pd_obj, pandas.io.parsers.TextFileReader):
reader = pd_obj.read
pd_obj.read = lambda *args, **kwargs: DataFrame(
query_compiler=reader(*args, **kwargs)
)
return pd_obj
return DataFrame(query_compiler=pd_obj) | 0.003035 |
def _hash_url(self, url):
"""
Hash the URL to an md5sum.
"""
if isinstance(url, six.text_type):
url = url.encode('utf-8')
return hashlib.md5(url).hexdigest() | 0.009479 |
def load(self, file_locations=[], **kwargs):
""" Loads the file_locations into the triplestores
args:
file_locations: list of tuples to load
[('vocabularies', [list of vocabs to load])
('directory', '/directory/path')
('filepath', '/path/to/a/file')
('package_all', 'name.of.a.package.with.defs')
('package_file','name.of.package', 'filename')]
custom: list of custom definitions to load
"""
self.set_load_state(**kwargs)
if file_locations:
self.__file_locations__ += file_locations
else:
file_locations = self.__file_locations__
conn = self.__get_conn__(**kwargs)
if file_locations:
log.info("Uploading files to conn '%s'", conn)
for item in file_locations:
log.info("loading '%s", item)
if item[0] == 'directory':
self.load_directory(item[1], **kwargs)
elif item[0] == 'filepath':
kwargs['is_file'] = True
self.load_file(item[1],**kwargs)
elif item[0].startswith('package'):
log.info("package: %s\nspec: %s",
item[1],
importlib.util.find_spec(item[1]))
try:
pkg_path = \
importlib.util.find_spec(\
item[1]).submodule_search_locations[0]
except TypeError:
pkg_path = importlib.util.find_spec(item[1]).origin
pkg_path = os.path.split(pkg_path)[0]
if item[0].endswith('_all'):
self.load_directory(pkg_path, **kwargs)
elif item[0].endswith('_file'):
filepath = os.path.join(pkg_path, item[2])
self.load_file(filepath, **kwargs)
else:
raise NotImplementedError
self.loaded_files(reset=True)
self.loaded_times = self.load_times(**kwargs) | 0.001876 |
def get_parents():
"""Return sorted list of names of packages without dependants."""
distributions = get_installed_distributions(user_only=ENABLE_USER_SITE)
remaining = {d.project_name.lower() for d in distributions}
requirements = {r.project_name.lower() for d in distributions for
r in d.requires()}
return get_realnames(remaining - requirements) | 0.002571 |
def Ry(rads: Union[float, sympy.Basic]) -> YPowGate:
"""Returns a gate with the matrix e^{-i Y rads / 2}."""
pi = sympy.pi if protocols.is_parameterized(rads) else np.pi
return YPowGate(exponent=rads / pi, global_shift=-0.5) | 0.004237 |
def get_model(self):
"""
Returns the fitted bayesian model
Example
----------
>>> from pgmpy.readwrite import BIFReader
>>> reader = BIFReader("bif_test.bif")
>>> reader.get_model()
<pgmpy.models.BayesianModel.BayesianModel object at 0x7f20af154320>
"""
try:
model = BayesianModel()
model.add_nodes_from(self.variable_names)
model.add_edges_from(self.variable_edges)
model.name = self.network_name
tabular_cpds = []
for var in sorted(self.variable_cpds.keys()):
values = self.variable_cpds[var]
cpd = TabularCPD(var, len(self.variable_states[var]), values,
evidence=self.variable_parents[var],
evidence_card=[len(self.variable_states[evidence_var])
for evidence_var in self.variable_parents[var]])
tabular_cpds.append(cpd)
model.add_cpds(*tabular_cpds)
for node, properties in self.variable_properties.items():
for prop in properties:
prop_name, prop_value = map(lambda t: t.strip(), prop.split('='))
model.node[node][prop_name] = prop_value
return model
except AttributeError:
raise AttributeError('First get states of variables, edges, parents and network name') | 0.004013 |
def health(args):
""" Health FireCloud Server """
r = fapi.health()
fapi._check_response_code(r, 200)
return r.content | 0.007463 |
def depth(self):
"""Return the circuit depth.
Returns:
int: the circuit depth
Raises:
DAGCircuitError: if not a directed acyclic graph
"""
if not nx.is_directed_acyclic_graph(self._multi_graph):
raise DAGCircuitError("not a DAG")
depth = nx.dag_longest_path_length(self._multi_graph) - 1
return depth if depth != -1 else 0 | 0.004819 |
def build_from_issue_comment(gh_token, body):
"""Create a WebhookMetadata from a comment added to an issue.
"""
if body["action"] in ["created", "edited"]:
github_con = Github(gh_token)
repo = github_con.get_repo(body['repository']['full_name'])
issue = repo.get_issue(body['issue']['number'])
text = body['comment']['body']
try:
comment = issue.get_comment(body['comment']['id'])
except UnknownObjectException:
# If the comment has already disapeared, skip the command
return None
return WebhookMetadata(repo, issue, text, comment)
return None | 0.001534 |
def Or(a: Bool, b: Bool) -> Bool:
"""Create an or expression.
:param a:
:param b:
:return:
"""
union = a.annotations + b.annotations
return Bool(z3.Or(a.raw, b.raw), annotations=union) | 0.004695 |
def show_help(fd=sys.stdout):
'''
Convenience wrapper around binwalk.core.module.Modules.help.
@fd - An object with a write method (e.g., sys.stdout, sys.stderr, etc).
Returns None.
'''
with Modules() as m:
fd.write(m.help()) | 0.003861 |
def network_lopf(network, snapshots=None, solver_name="glpk", solver_io=None,
skip_pre=False, extra_functionality=None, solver_logfile=None, solver_options={},
keep_files=False, formulation="angles", ptdf_tolerance=0.,
free_memory={},extra_postprocessing=None):
"""
Linear optimal power flow for a group of snapshots.
Parameters
----------
snapshots : list or index slice
A list of snapshots to optimise, must be a subset of
network.snapshots, defaults to network.snapshots
solver_name : string
Must be a solver name that pyomo recognises and that is
installed, e.g. "glpk", "gurobi"
solver_io : string, default None
Solver Input-Output option, e.g. "python" to use "gurobipy" for
solver_name="gurobi"
skip_pre: bool, default False
Skip the preliminary steps of computing topology, calculating
dependent values and finding bus controls.
extra_functionality : callable function
This function must take two arguments
`extra_functionality(network,snapshots)` and is called after
the model building is complete, but before it is sent to the
solver. It allows the user to
add/change constraints and add/change the objective function.
solver_logfile : None|string
If not None, sets the logfile option of the solver.
solver_options : dictionary
A dictionary with additional options that get passed to the solver.
(e.g. {'threads':2} tells gurobi to use only 2 cpus)
keep_files : bool, default False
Keep the files that pyomo constructs from OPF problem
construction, e.g. .lp file - useful for debugging
formulation : string
Formulation of the linear power flow equations to use; must be
one of ["angles","cycles","kirchhoff","ptdf"]
ptdf_tolerance : float
Value below which PTDF entries are ignored
free_memory : set, default {'pyomo'}
Any subset of {'pypsa', 'pyomo'}. Allows to stash `pypsa` time-series
data away while the solver runs (as a pickle to disk) and/or free
`pyomo` data after the solution has been extracted.
extra_postprocessing : callable function
This function must take three arguments
`extra_postprocessing(network,snapshots,duals)` and is called after
the model has solved and the results are extracted. It allows the user to
extract further information about the solution, such as additional shadow prices.
Returns
-------
None
"""
snapshots = _as_snapshots(network, snapshots)
network_lopf_build_model(network, snapshots, skip_pre=skip_pre,
formulation=formulation, ptdf_tolerance=ptdf_tolerance)
if extra_functionality is not None:
extra_functionality(network,snapshots)
network_lopf_prepare_solver(network, solver_name=solver_name,
solver_io=solver_io)
return network_lopf_solve(network, snapshots, formulation=formulation,
solver_logfile=solver_logfile, solver_options=solver_options,
keep_files=keep_files, free_memory=free_memory,
extra_postprocessing=extra_postprocessing) | 0.002398 |
def merge(self, pdf_files, output):
"""Merge list of PDF files to a single PDF file."""
if self.method is 'pypdf3':
return self.pypdf3(pdf_files, output)
else:
return self.pdfrw(pdf_files, output) | 0.008197 |
def overhang(self, tile):
"""
Get the left and right absolute overflow -- the amount of box
overhanging `tile`, can be viewed as self \\ tile (set theory relative
complement, but in a bounding sense)
"""
ll = np.abs(amin(self.l - tile.l, aN(0, dim=self.dim)))
rr = np.abs(amax(self.r - tile.r, aN(0, dim=self.dim)))
return ll, rr | 0.005089 |
def _generate_one_fake(self, schema):
"""
Recursively traverse schema dictionary and for each "leaf node", evaluate the fake
value
Implementation:
For each key-value pair:
1) If value is not an iterable (i.e. dict or list), evaluate the fake data (base case)
2) If value is a dictionary, recurse
3) If value is a list, iteratively recurse over each item
"""
data = {}
for k, v in schema.items():
if isinstance(v, dict):
data[k] = self._generate_one_fake(v)
elif isinstance(v, list):
data[k] = [self._generate_one_fake(item) for item in v]
else:
data[k] = getattr(self._faker, v)()
return data | 0.005175 |
def get_uncond_agent(agent):
"""Construct the unconditional state of an Agent.
The unconditional Agent is a copy of the original agent but
without any bound conditions and modification conditions.
Mutation conditions, however, are preserved since they are static.
"""
agent_uncond = ist.Agent(_n(agent.name), mutations=agent.mutations)
return agent_uncond | 0.002604 |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.