text
stringlengths 78
104k
| score
float64 0
0.18
|
---|---|
def get_vnetwork_dvs_output_vnetwork_dvs_name(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
get_vnetwork_dvs = ET.Element("get_vnetwork_dvs")
config = get_vnetwork_dvs
output = ET.SubElement(get_vnetwork_dvs, "output")
vnetwork_dvs = ET.SubElement(output, "vnetwork-dvs")
name = ET.SubElement(vnetwork_dvs, "name")
name.text = kwargs.pop('name')
callback = kwargs.pop('callback', self._callback)
return callback(config) | 0.003731 |
def status(self):
""" Status of this SMS. Can be ENROUTE, DELIVERED or FAILED
The actual status report object may be accessed via the 'report' attribute
if status is 'DELIVERED' or 'FAILED'
"""
if self.report == None:
return SentSms.ENROUTE
else:
return SentSms.DELIVERED if self.report.deliveryStatus == StatusReport.DELIVERED else SentSms.FAILED | 0.014019 |
def load_locale_prefixdata_file(prefixdata, filename, locale=None, overall_prefix=None, separator=None):
"""Load per-prefix data from the given file, for the given locale and prefix.
We assume that this file:
- is encoded in UTF-8
- may have comment lines (starting with #) and blank lines
- has data lines of the form '<prefix>|<stringdata>'
- contains only data for prefixes that are extensions of the filename.
If overall_prefix is specified, lines are checked to ensure their prefix falls within this value.
If locale is specified, prefixdata[prefix][locale] is filled in; otherwise, just prefixdata[prefix].
If separator is specified, the string data will be split on this separator, and the output values
in the dict will be tuples of strings rather than strings.
"""
with open(filename, "rb") as infile:
lineno = 0
for line in infile:
uline = line.decode('utf-8')
lineno += 1
dm = DATA_LINE_RE.match(uline)
if dm:
prefix = dm.group('prefix')
stringdata = dm.group('stringdata')
if stringdata != stringdata.rstrip():
print ("%s:%d: Warning: stripping trailing whitespace" % (filename, lineno))
stringdata = stringdata.rstrip()
if overall_prefix is not None and not prefix.startswith(overall_prefix):
raise Exception("%s:%d: Prefix %s is not within %s" %
(filename, lineno, prefix, overall_prefix))
if separator is not None:
stringdata = tuple(stringdata.split(separator))
if prefix not in prefixdata:
prefixdata[prefix] = {}
if locale is not None:
prefixdata[prefix][locale] = stringdata
else:
prefixdata[prefix] = stringdata
elif BLANK_LINE_RE.match(uline):
pass
elif COMMENT_LINE_RE.match(uline):
pass
else:
raise Exception("%s:%d: Unexpected line format: %s" %
(filename, lineno, line)) | 0.004027 |
def content_type(self) -> Optional[ContentTypeHeader]:
"""The ``Content-Type`` header."""
try:
return cast(ContentTypeHeader, self[b'content-type'][0])
except (KeyError, IndexError):
return None | 0.008264 |
def parse_options():
"""
parse_options() -> opts, args
Parse any command-line options given returning both
the parsed options and arguments.
"""
parser = optparse.OptionParser(usage=USAGE, version=VERSION)
parser.add_option("-o", "--ontology",
action="store", type="string", default="", dest="ontology",
help="Specifies which ontology to compare to.")
opts, args = parser.parse_args()
if len(args) < 1 or not opts.ontology:
parser.print_help()
raise SystemExit(1)
return opts, args | 0.033531 |
def forward(self, x):
"""Compute forward-pass of this module on ``x``.
Parameters
----------
x : `torch.autograd.variable.Variable`
Input of this layer. The contained tensor must have shape
``extra_shape + operator.domain.shape``, and
``len(extra_shape)`` must be at least 1 (batch axis).
Returns
-------
out : `torch.autograd.variable.Variable`
The computed output. Its tensor will have shape
``extra_shape + operator.range.shape``, where ``extra_shape``
are the extra axes of ``x``.
Examples
--------
Evaluating on a 2D tensor, where the operator expects a 1D input,
i.e., with extra batch axis only:
>>> matrix = np.array([[1, 0, 0],
... [0, 1, 1]], dtype='float32')
>>> odl_op = odl.MatrixOperator(matrix)
>>> odl_op.domain.shape
(3,)
>>> odl_op.range.shape
(2,)
>>> op_mod = OperatorAsModule(odl_op)
>>> t = torch.ones(3)
>>> x = autograd.Variable(t[None, :]) # "fake" batch axis
>>> op_mod(x)
Variable containing:
1 2
[torch.FloatTensor of size 1x2]
>>> t = torch.ones(3)
>>> x_tensor = torch.stack([0 * t, 1 * t])
>>> x = autograd.Variable(x_tensor) # batch of 2 inputs
>>> op_mod(x)
Variable containing:
0 0
1 2
[torch.FloatTensor of size 2x2]
An arbitrary number of axes is supported:
>>> x = autograd.Variable(t[None, None, :]) # "fake" batch and channel
>>> op_mod(x)
Variable containing:
(0 ,.,.) =
1 2
[torch.FloatTensor of size 1x1x2]
>>> x_tensor = torch.stack([torch.stack([0 * t, 1 * t]),
... torch.stack([2 * t, 3 * t]),
... torch.stack([4 * t, 5 * t])])
>>> x = autograd.Variable(x_tensor) # batch of 3x2 inputs
>>> op_mod(x)
Variable containing:
(0 ,.,.) =
0 0
1 2
<BLANKLINE>
(1 ,.,.) =
2 4
3 6
<BLANKLINE>
(2 ,.,.) =
4 8
5 10
[torch.FloatTensor of size 3x2x2]
"""
in_shape = x.data.shape
op_in_shape = self.op_func.operator.domain.shape
op_out_shape = self.op_func.operator.range.shape
extra_shape = in_shape[:-len(op_in_shape)]
if in_shape[-len(op_in_shape):] != op_in_shape or not extra_shape:
shp_str = str(op_in_shape).strip('()')
raise ValueError('expected input of shape (N, *, {}), got input '
'with shape {}'.format(shp_str, in_shape))
# Flatten extra axes, then do one entry at a time
newshape = (int(np.prod(extra_shape)),) + op_in_shape
x_flat_xtra = x.reshape(*newshape)
results = []
for i in range(x_flat_xtra.data.shape[0]):
results.append(self.op_func(x_flat_xtra[i]))
# Reshape the resulting stack to the expected output shape
stack_flat_xtra = torch.stack(results)
return stack_flat_xtra.view(extra_shape + op_out_shape) | 0.000608 |
def node_stat_copy(self, node_or_char, node=None):
"""Return a node's stats, prepared for pickling, in a dictionary."""
if node is None:
node = node_or_char
else:
node = self._real.character[node_or_char].node[node]
return {
k: v.unwrap() if hasattr(v, 'unwrap') and not hasattr(v, 'no_unwrap') else v
for (k, v) in node.items() if k not in {
'character',
'name',
'arrival_time',
'next_arrival_time'
}
} | 0.005291 |
def _textOutput(self, gaObjects):
"""
Prints out the specified Variant objects in a VCF-like form.
"""
for variantAnnotation in gaObjects:
print(
variantAnnotation.id, variantAnnotation.variant_id,
variantAnnotation.variant_annotation_set_id,
variantAnnotation.created, sep="\t", end="\t")
for effect in variantAnnotation.transcript_effects:
print(effect.alternate_bases, sep="|", end="|")
for so in effect.effects:
print(so.term, sep="&", end="|")
print(so.term_id, sep="&", end="|")
print(effect.hgvs_annotation.transcript,
effect.hgvs_annotation.protein, sep="|", end="\t")
print() | 0.002469 |
def read(self, symbol, chunk_range=None, filter_data=True, **kwargs):
"""
Reads data for a given symbol from the database.
Parameters
----------
symbol: str, or list of str
the symbol(s) to retrieve
chunk_range: object
corresponding range object for the specified chunker (for
DateChunker it is a DateRange object or a DatetimeIndex,
as returned by pandas.date_range
filter_data: boolean
perform chunk level filtering on the data (see filter in _chunker)
only applicable when chunk_range is specified
kwargs: ?
values passed to the serializer. Varies by serializer
Returns
-------
DataFrame or Series, or in the case when multiple symbols are given,
returns a dict of symbols (symbol -> dataframe/series)
"""
if not isinstance(symbol, list):
symbol = [symbol]
sym = self._get_symbol_info(symbol)
if not sym:
raise NoDataFoundException('No data found for %s' % (symbol))
spec = {SYMBOL: {'$in': symbol}}
chunker = CHUNKER_MAP[sym[0][CHUNKER]]
deser = SER_MAP[sym[0][SERIALIZER]].deserialize
if chunk_range is not None:
spec.update(chunker.to_mongo(chunk_range))
by_start_segment = [(SYMBOL, pymongo.ASCENDING),
(START, pymongo.ASCENDING),
(SEGMENT, pymongo.ASCENDING)]
segment_cursor = self._collection.find(spec, sort=by_start_segment)
chunks = defaultdict(list)
for _, segments in groupby(segment_cursor, key=lambda x: (x[START], x[SYMBOL])):
segments = list(segments)
mdata = self._mdata.find_one({SYMBOL: segments[0][SYMBOL],
START: segments[0][START],
END: segments[0][END]})
# when len(segments) == 1, this is essentially a no-op
# otherwise, take all segments and reassemble the data to one chunk
chunk_data = b''.join([doc[DATA] for doc in segments])
chunks[segments[0][SYMBOL]].append({DATA: chunk_data, METADATA: mdata})
skip_filter = not filter_data or chunk_range is None
if len(symbol) > 1:
return {sym: deser(chunks[sym], **kwargs) if skip_filter else chunker.filter(deser(chunks[sym], **kwargs), chunk_range) for sym in symbol}
else:
return deser(chunks[symbol[0]], **kwargs) if skip_filter else chunker.filter(deser(chunks[symbol[0]], **kwargs), chunk_range) | 0.00226 |
def is_primitive(value):
"""
Checks if value has primitive type.
Primitive types are: numbers, strings, booleans, date and time.
Complex (non-primitive types are): objects, maps and arrays
:param value: a value to check
:return: true if the value has primitive type and false if value type is complex.
"""
typeCode = TypeConverter.to_type_code(value)
return typeCode == TypeCode.String or typeCode == TypeCode.Enum or typeCode == TypeCode.Boolean \
or typeCode == TypeCode.Integer or typeCode == TypeCode.Long \
or typeCode == TypeCode.Float or typeCode == TypeCode.Double \
or typeCode == TypeCode.DateTime or typeCode == TypeCode.Duration | 0.010526 |
def get_servers(self, topic):
"""We're assuming that the static list of servers can serve the given
topic, since we have to preexisting knowledge about them.
"""
return (nsq.node.ServerNode(sh) for sh in self.__server_hosts) | 0.011628 |
def run(self, args):
"""
Deletes a single project specified by project_name in args.
:param args Namespace arguments parsed from the command line
"""
project = self.fetch_project(args, must_exist=True, include_children=False)
if not args.force:
delete_prompt = "Are you sure you wish to delete {} (y/n)?".format(project.name)
if not boolean_input_prompt(delete_prompt):
return
self.remote_store.delete_project(self.create_project_name_or_id_from_args(args)) | 0.009042 |
def QA_fetch_get_future_min(code, start, end, frequence='1min', ip=None, port=None):
'期货数据 分钟线'
ip, port = get_extensionmarket_ip(ip, port)
apix = TdxExHq_API()
type_ = ''
start_date = str(start)[0:10]
today_ = datetime.date.today()
lens = QA_util_get_trade_gap(start_date, today_)
global extension_market_list
extension_market_list = QA_fetch_get_extensionmarket_list(
) if extension_market_list is None else extension_market_list
if str(frequence) in ['5', '5m', '5min', 'five']:
frequence, type_ = 0, '5min'
lens = 48 * lens * 2.5
elif str(frequence) in ['1', '1m', '1min', 'one']:
frequence, type_ = 8, '1min'
lens = 240 * lens * 2.5
elif str(frequence) in ['15', '15m', '15min', 'fifteen']:
frequence, type_ = 1, '15min'
lens = 16 * lens * 2.5
elif str(frequence) in ['30', '30m', '30min', 'half']:
frequence, type_ = 2, '30min'
lens = 8 * lens * 2.5
elif str(frequence) in ['60', '60m', '60min', '1h']:
frequence, type_ = 3, '60min'
lens = 4 * lens * 2.5
if lens > 20800:
lens = 20800
# print(lens)
with apix.connect(ip, port):
code_market = extension_market_list.query(
'code=="{}"'.format(code)).iloc[0]
data = pd.concat([apix.to_df(apix.get_instrument_bars(frequence, int(code_market.market), str(
code), (int(lens / 700) - i) * 700, 700)) for i in range(int(lens / 700) + 1)], axis=0)
# print(data)
# print(data.datetime)
data = data \
.assign(tradetime=data['datetime'].apply(str), code=str(code)) \
.assign(datetime=pd.to_datetime(data['datetime'].apply(QA_util_future_to_realdatetime, 1))) \
.drop(['year', 'month', 'day', 'hour', 'minute'], axis=1, inplace=False) \
.assign(date=data['datetime'].apply(lambda x: str(x)[0:10])) \
.assign(date_stamp=data['datetime'].apply(lambda x: QA_util_date_stamp(x))) \
.assign(time_stamp=data['datetime'].apply(lambda x: QA_util_time_stamp(x))) \
.assign(type=type_).set_index('datetime', drop=False, inplace=False)
return data.assign(datetime=data['datetime'].apply(lambda x: str(x)))[start:end].sort_index() | 0.004386 |
def start(self, http_daemon=None): # pylint: disable=unused-argument
"""Actually restart the process if the module is external
Try first to stop the process and create a new Process instance
with target start_module.
Finally start process.
:param http_daemon: Not used here but can be used in other modules
:type http_daemon: None | object
:return: None
"""
if not self.is_external:
return
if self.process:
self.stop_process()
logger.info("Starting external process for module %s...", self.name)
proc = Process(target=self.start_module, args=(), group=None)
# Under windows we should not call start() on an object that got its process
# as an object, so we remove it and we set it in a earlier start
try:
del self.properties['process']
except KeyError:
pass
proc.start()
# We save the process data AFTER the fork()
self.process = proc
self.properties['process'] = proc
logger.info("%s is now started (pid=%d)", self.name, proc.pid) | 0.002602 |
def s_connect(self, server, port, r_server=None):
"""
Link a server.
Required arguments:
* server - Server to link with.
* port - Port to use.
Optional arguments:
* r_server=None - Link r_server with server.
"""
with self.lock:
if not r_server:
self.send('CONNECT %s %s' % (server, port), error_check=True)
else:
self.send('CONNECT %s %s %s' % (server, port, \
r_server), error_check=True) | 0.007233 |
def create_item(self, hash_key, start=0, extra_attrs=None):
'''
Hook point for overriding how the CouterPool creates a DynamoDB item
for a given counter when an existing item can't be found.
'''
table = self.get_table()
now = datetime.utcnow().replace(microsecond=0).isoformat()
attrs = {
'created_on': now,
'modified_on': now,
'count': start,
}
if extra_attrs:
attrs.update(extra_attrs)
item = table.new_item(
hash_key=hash_key,
attrs=attrs,
)
return item | 0.00319 |
def optional(e, default=Ignore):
"""
Create a PEG function to optionally match an expression.
"""
def match_optional(s, grm=None, pos=0):
try:
return e(s, grm, pos)
except PegreError:
return PegreResult(s, default, (pos, pos))
return match_optional | 0.003247 |
def export_public_key(user_id, env=None, sp=subprocess):
"""Export GPG public key for specified `user_id`."""
args = gpg_command(['--export', user_id])
result = check_output(args=args, env=env, sp=sp)
if not result:
log.error('could not find public key %r in local GPG keyring', user_id)
raise KeyError(user_id)
return result | 0.00277 |
def fix_axon_peri(hobj):
"""Replace reconstructed axon with a stub
:param hobj: hoc object
"""
for i,sec in enumerate(hobj.axon):
hobj.axon[i] = None
for i,sec in enumerate(hobj.all):
if 'axon' in sec.name():
hobj.all[i] = None
hobj.all = [sec for sec in hobj.all if sec is not None]
hobj.axon = None
#h.execute('create axon[2]', hobj)
hobj.axon = [h.Section(name='axon[0]'), h.Section(name='axon[1]')]
hobj.axonal = []
for sec in hobj.axon:
sec.L = 30
sec.diam = 1
hobj.axonal.append(sec)
hobj.all.append(sec) # need to remove this comment
hobj.axon[0].connect(hobj.soma[0], 0.5, 0)
hobj.axon[1].connect(hobj.axon[0], 1, 0)
h.define_shape() | 0.005229 |
def _parse(data, obj_name, attr_map):
"""parse xml data into a python map"""
parsed_xml = minidom.parseString(data)
parsed_objects = []
for obj in parsed_xml.getElementsByTagName(obj_name):
parsed_obj = {}
for (py_name, xml_name) in attr_map.items():
parsed_obj[py_name] = _get_minidom_tag_value(obj, xml_name)
parsed_objects.append(parsed_obj)
return parsed_objects | 0.00237 |
async def list_networks(request: web.Request) -> web.Response:
"""
Get request will return a list of discovered ssids:
GET /wifi/list
200 OK
{ "list": [
{
ssid: string // e.g. "linksys", name to connect to
signal: int // e.g. 100; arbitrary signal strength, more is better
active: boolean // e.g. true; whether there is a connection active
security: str // e.g. "WPA2 802.1X" raw nmcli security type output
securityType: str // e.g. "wpa-eap"; see below
}
]
}
The securityType field contains a value suitable for passing to the
securityType argument of /configure, or 'unsupported'. The security
field is mostly useful for debugging if you are unable to connect to
the network even though you think you are using the correct security
type.
"""
try:
networks = await nmcli.available_ssids()
except RuntimeError as e:
return web.json_response({'message': ' '.join(e.args)}, status=500)
else:
return web.json_response({'list': networks}, status=200) | 0.000901 |
def _mark_html_fields_as_safe(self, page):
"""
Mark the html content as safe so we don't have to use the safe
template tag in all cms templates:
"""
page.title = mark_safe(page.title)
page.content = mark_safe(page.content)
return page | 0.006897 |
def isSelfSigned(self):
"""
Return True if the certificate is self signed:
- issuer and subject are the same
- the signature of the certificate is valid.
"""
if self.issuer == self.subject:
return self.isIssuerCert(self)
return False | 0.006557 |
def _onKeyUp(self, evt):
"""Release key."""
key = self._get_key(evt)
#print 'release key', key
evt.Skip()
FigureCanvasBase.key_release_event(self, key, guiEvent=evt) | 0.014634 |
def threads():
'''
This tests the performance of the processor's scheduler
CLI Example:
.. code-block:: bash
salt '*' sysbench.threads
'''
# Test data
thread_yields = [100, 200, 500, 1000]
thread_locks = [2, 4, 8, 16]
# Initializing the test variables
test_command = 'sysbench --num-threads=64 --test=threads '
test_command += '--thread-yields={0} --thread-locks={1} run '
result = None
ret_val = {}
# Test begins!
for yields, locks in zip(thread_yields, thread_locks):
key = 'Yields: {0} Locks: {1}'.format(yields, locks)
run_command = test_command.format(yields, locks)
result = __salt__['cmd.run'](run_command)
ret_val[key] = _parser(result)
return ret_val | 0.001299 |
def _Rzderiv(self,R,z,phi=0.,t=0.):
"""
NAME:
_Rzderiv
PURPOSE:
evaluate the mixed R,z derivative for this potential
INPUT:
R - Galactocentric cylindrical radius
z - vertical height
phi - azimuth
t - time
OUTPUT:
d2phi/dR/dz
HISTORY:
2013-08-28 - Written - Bovy (IAS)
"""
sqrtbz= nu.sqrt(self._b2+z**2.)
asqrtbz= self._a+sqrtbz
if isinstance(R,float) and sqrtbz == asqrtbz:
return -(3.*R*z/(R**2.+asqrtbz**2.)**2.5)
else:
return -(3.*R*z*asqrtbz
/sqrtbz/(R**2.+asqrtbz**2.)**2.5) | 0.014286 |
def process_form(self, instance, field, form, empty_marker = None,
emptyReturnsMarker = False):
""" Some special field handling for disabled fields, which don't
get submitted by the browser but still need to be written away.
"""
bsc = getToolByName(instance, 'bika_setup_catalog')
default = super(PartitionSetupWidget,self).process_form(
instance, field, form, empty_marker, emptyReturnsMarker)
if not default:
return [], {}
value = default[0]
kwargs = len(default) > 1 and default[1] or {}
newvalue = []
for v in value:
v = dict(v)
if v.get('separate', '') == 'on' and not 'preservation' in v:
container_uid = v.get('container', [''])[0];
if container_uid:
container = bsc(UID=container_uid)[0].getObject();
if container.getPrePreserved():
pres = container.getPreservation()
if pres:
v['preservation'] = [pres.UID()]
newvalue.append(v)
return newvalue, kwargs | 0.009346 |
def metzner_mcmc_slow(Z, n_samples, n_thin=1, random_state=None):
"""Metropolis Markov chain Monte Carlo sampler for reversible transition
matrices
Parameters
----------
Z : np.array, shape=(n_states, n_states)
The effective count matrix, the number of observed transitions
between states plus the number of prior counts
n_samples : int
Number of steps to iterate the chain for
n_thin : int
Yield every ``n_thin``-th sample from the MCMC chain
random_state : int or RandomState instance or None (default)
Pseudo Random Number generator seed control. If None, use the
numpy.random singleton.
Notes
-----
The transition matrix posterior distribution is ::
P(T | Z) \propto \Prod_{ij} T_{ij}^{Z_{ij}}
and constrained to be reversible, such that there exists a \pi s.t. ::
\pi_i T_{ij} = \pi_j T_{ji}
Yields
------
T : np.array, shape=(n_states, n_states)
This generator yields samples from the transition matrix posterior
References
----------
.. [1] P. Metzner, F. Noe and C. Schutte, "Estimating the sampling error:
Distribution of transition matrices and functions of transition
matrices for given trajectory data." Phys. Rev. E 80 021106 (2009)
See Also
--------
metzner_mcmc_fast
"""
# Upper and lower bounds on the sum of the K matrix, to ensure proper
# proposal weights. See Eq. 17 of [1].
K_MINUS = 0.9
K_PLUS = 1.1
Z = np.asarray(Z)
n_states = Z.shape[0]
if not Z.ndim == 2 and Z.shape[1] == n_states:
raise ValueError("Z must be square. Z.shape=%s" % str(Z.shape))
K = 0.5 * (Z + Z.T) / np.sum(Z, dtype=float)
random = check_random_state(random_state)
n_accept = 0
for t in range(n_samples):
# proposal
# Select two indices in [0...n_states). We draw them by drawing a
# random floats in [0,1) and then rounding to int so that this method
# is exactly analogous to `metzner_mcmc_fast`, which, for each MCMC
# iteration, draws 4 random floats in [0,1) from the same numpy PSRNG,
# and then inside the C step kernel (src/metzner_mcmc.c) uses two of
# them like this. This ensures that this function and
# `metzner_mcmc_fast` give _exactly_ the same sequence of transition
# matricies, given the same random seed.
i, j = (random.rand(2) * n_states).astype(np.int)
sc = np.sum(K)
if i == j:
a, b = max(-K[i,j], K_MINUS - sc), K_PLUS - sc
else:
a, b = max(-K[i,j], 0.5*(K_MINUS - sc)), 0.5*(K_PLUS - sc)
epsilon = random.uniform(a, b)
K_proposal = np.copy(K)
K_proposal[i, j] += epsilon
if i != j:
K_proposal[j, i] += epsilon
# acceptance?
cutoff = np.exp(_logprob_T(_K_to_T(K_proposal), Z) -
_logprob_T(_K_to_T(K), Z))
r = random.rand()
# print 'i', i, 'j', j
# print 'a', a, 'b', b
# print 'cutoff', cutoff
# print 'r', r
# print 'sc', sc
if r < cutoff:
n_accept += 1
K = K_proposal
if (t+1) % n_thin == 0:
yield _K_to_T(K) | 0.002735 |
def check_bad_data(raw_data, prepend_data_headers=None, trig_count=None):
"""Checking FEI4 raw data array for corrupted data.
"""
consecutive_triggers = 16 if trig_count == 0 else trig_count
is_fe_data_header = logical_and(is_fe_word, is_data_header)
trigger_idx = np.where(is_trigger_word(raw_data) >= 1)[0]
fe_dh_idx = np.where(is_fe_data_header(raw_data) >= 1)[0]
n_triggers = trigger_idx.shape[0]
n_dh = fe_dh_idx.shape[0]
# get index of the last trigger
if n_triggers:
last_event_data_headers_cnt = np.where(fe_dh_idx > trigger_idx[-1])[0].shape[0]
if consecutive_triggers and last_event_data_headers_cnt == consecutive_triggers:
if not np.all(trigger_idx[-1] > fe_dh_idx):
trigger_idx = np.r_[trigger_idx, raw_data.shape]
last_event_data_headers_cnt = None
elif last_event_data_headers_cnt != 0:
fe_dh_idx = fe_dh_idx[:-last_event_data_headers_cnt]
elif not np.all(trigger_idx[-1] > fe_dh_idx):
trigger_idx = np.r_[trigger_idx, raw_data.shape]
# if any data header, add trigger for histogramming, next readout has to have trigger word
elif n_dh:
trigger_idx = np.r_[trigger_idx, raw_data.shape]
last_event_data_headers_cnt = None
# no trigger, no data header
# assuming correct data, return input values
else:
return False, prepend_data_headers, n_triggers, n_dh
# # no triggers, check for the right amount of data headers
# if consecutive_triggers and prepend_data_headers and prepend_data_headers + n_dh != consecutive_triggers:
# return True, n_dh, n_triggers, n_dh
n_triggers_cleaned = trigger_idx.shape[0]
n_dh_cleaned = fe_dh_idx.shape[0]
# check that trigger comes before data header
if prepend_data_headers is None and n_triggers_cleaned and n_dh_cleaned and not trigger_idx[0] < fe_dh_idx[0]:
return True, last_event_data_headers_cnt, n_triggers, n_dh # FIXME: 0?
# check that no trigger comes before the first data header
elif consecutive_triggers and prepend_data_headers is not None and n_triggers_cleaned and n_dh_cleaned and trigger_idx[0] < fe_dh_idx[0]:
return True, last_event_data_headers_cnt, n_triggers, n_dh # FIXME: 0?
# check for two consecutive triggers
elif consecutive_triggers is None and prepend_data_headers == 0 and n_triggers_cleaned and n_dh_cleaned and trigger_idx[0] < fe_dh_idx[0]:
return True, last_event_data_headers_cnt, n_triggers, n_dh # FIXME: 0?
elif prepend_data_headers is not None:
trigger_idx += (prepend_data_headers + 1)
fe_dh_idx += (prepend_data_headers + 1)
# for histogramming add trigger at index 0
trigger_idx = np.r_[0, trigger_idx]
fe_dh_idx = np.r_[range(1, prepend_data_headers + 1), fe_dh_idx]
event_hist, bins = np.histogram(fe_dh_idx, trigger_idx)
if consecutive_triggers is None and np.any(event_hist == 0):
return True, last_event_data_headers_cnt, n_triggers, n_dh
elif consecutive_triggers and np.any(event_hist != consecutive_triggers):
return True, last_event_data_headers_cnt, n_triggers, n_dh
return False, last_event_data_headers_cnt, n_triggers, n_dh | 0.002451 |
def _ord_to_str(ordinal, weights):
"""Reverse function of _str_to_ord."""
chars = []
for weight in weights:
if ordinal == 0:
return "".join(chars)
ordinal -= 1
index, ordinal = divmod(ordinal, weight)
chars.append(_ALPHABET[index])
return "".join(chars) | 0.021201 |
def get_sanitize_files(self):
"""
Return list of all sanitize files provided by the user on the command line.
N.B.: We only support one sanitize file at the moment, but
this is likely to change in the future
"""
if self.parent.config.option.sanitize_with is not None:
return [self.parent.config.option.sanitize_with]
else:
return [] | 0.007143 |
def handle(self, *args, **options):
"""
With no arguments, find the first user in the system with the
is_superuser or is_staff flag set to true, or just the first user in
the system period.
With a single argument, look for the user with that value as the
USERNAME_FIELD value.
When a user is found, print out a URL slug you can paste into your
browser to login as the user.
"""
user_model = get_user_model()
if len(args) == 0:
# find the first superuser, or staff member or user
filters = [{"is_superuser": True}, {"is_staff": True}, {}]
user = None
for f in filters:
try:
user = user_model._default_manager.filter(**f).order_by("pk").first()
if user:
break
except FieldError as e:
pass
if user is None:
raise CommandError("No users found!")
elif len(args) == 1:
# find the user with the USERNAME_FIELD equal to the command line
# argument
try:
user = user_model._default_manager.get_by_natural_key(args[0])
except user_model.DoesNotExist as e:
raise CommandError("The user does not exist")
else:
raise CommandError("You passed me too many arguments")
signer = TimestampSigner()
signature = signer.sign(str(user.pk))
self.stdout.write(reverse(login, args=(signature,))) | 0.001889 |
def _MultiStream(cls, fds):
"""Method overriden by subclasses to optimize the MultiStream behavior."""
for fd in fds:
fd.Seek(0)
while True:
chunk = fd.Read(cls.MULTI_STREAM_CHUNK_SIZE)
if not chunk:
break
yield fd, chunk, None | 0.014235 |
def master_using_raster(mdf, raster, endpoint=False):
""" get single master based on the raster
Parameters
----------
mdf : asammdf.MDF
measurement object
raster : float
new raster
endpoint=False : bool
include maximum time stamp in the new master
Returns
-------
master : np.array
new master
"""
if not raster:
master = np.array([], dtype='<f8')
else:
t_min = []
t_max = []
for i, group in enumerate(mdf.groups):
cycles_nr = group.channel_group.cycles_nr
if cycles_nr:
master_min = mdf.get_master(
i,
record_offset=0,
record_count=1,
)
if len(master_min):
t_min.append(master_min[0])
mdf._master_channel_cache.clear()
master_max = mdf.get_master(
i,
record_offset=cycles_nr-1,
record_count=1,
)
if len(master_max):
t_max.append(master_max[0])
mdf._master_channel_cache.clear()
if t_min:
t_min = np.amin(t_min)
t_max = np.amax(t_max)
num = float(np.float32((t_max - t_min) / raster))
if int(num) == num:
master = np.linspace(t_min, t_max, int(num) + 1)
else:
master = np.arange(t_min, t_max, raster)
if endpoint:
master = np.concatenate([master, [t_max]])
else:
master = np.array([], dtype='<f8')
return master | 0.000588 |
def filter_lines(input_file, output_file, translate=lambda line: line):
""" Translate all the lines of a single file """
filepath, lines = get_lines([input_file])[0]
return filepath, [(tag, translate(line=line, tag=tag)) for (tag, line) in lines] | 0.007752 |
def dchisq(psr,formbats=False,renormalize=True):
"""Return gradient of total chisq for the current timing solution,
after removing noise-averaged mean residual, and ignoring deleted points."""
if formbats:
psr.formbats()
res, err = psr.residuals(removemean=False)[psr.deleted == 0], psr.toaerrs[psr.deleted == 0]
res -= numpy.sum(res/err**2) / numpy.sum(1/err**2)
# bats already updated by residuals(); skip constant-phase column
M = psr.designmatrix(updatebats=False,fixunits=True,fixsigns=True)[psr.deleted==0,1:]
# renormalize design-matrix columns
if renormalize:
norm = numpy.sqrt(numpy.sum(M**2,axis=0))
M /= norm
else:
norm = 1.0
# compute chisq derivative, de-renormalize
dr = -2 * numpy.dot(M.T,res / (1e-12 * err**2)) * norm
return dr | 0.01771 |
def release():
"""
Create a new release and upload it to PyPI.
"""
if not is_working_tree_clean():
print('Your working tree is not clean. Refusing to create a release.')
return
print('Rebuilding the AUTHORS file to check for modifications...')
authors()
if not is_working_tree_clean():
print('Your working tree is not clean after the AUTHORS file was '
'rebuilt.')
print('Please commit the changes before continuing.')
return
if not is_manifest_up_to_date():
print('Manifest is not up to date.')
print('Please update MANIFEST.in or remove spurious files.')
return
# Get version
version = 'v{}'.format(local('python setup.py --version', capture=True))
name = local('python setup.py --name', capture=True)
# Tag
tag_message = '{} release version {}.'.format(name, version)
print('----------------------')
print('Proceeding will tag the release, push the repository upstream,')
print('and release a new version on PyPI.')
print()
print('Version: {}'.format(version))
print('Tag message: {}'.format(tag_message))
print()
if not confirm('Continue?', default=True):
print('Aborting.')
return
local('git tag -a {} -m {}'.format(pipes.quote(version),
pipes.quote(tag_message)))
# Push
local('git push --tags origin develop')
# Package and upload to pypi
local('python setup.py sdist bdist_wheel upload') | 0.000649 |
def consumer_group(self, group, keys, consumer=None):
"""
Create a named :py:class:`ConsumerGroup` instance for the given key(s).
:param group: name of consumer group
:param keys: stream identifier(s) to monitor. May be a single stream
key, a list of stream keys, or a key-to-minimum id mapping. The
minimum id for each stream should be considered an exclusive
lower-bound. The '$' value can also be used to only read values
added *after* our command started blocking.
:param consumer: name for consumer within group
:returns: a :py:class:`ConsumerGroup` instance
"""
return ConsumerGroup(self, group, keys, consumer=consumer) | 0.002706 |
def create_signature(public_key, private_key, data, scheme='ecdsa-sha2-nistp256'):
"""
<Purpose>
Return a (signature, scheme) tuple.
>>> requested_scheme = 'ecdsa-sha2-nistp256'
>>> public, private = generate_public_and_private(requested_scheme)
>>> data = b'The quick brown fox jumps over the lazy dog'
>>> signature, scheme = create_signature(public, private, data, requested_scheme)
>>> securesystemslib.formats.ECDSASIGNATURE_SCHEMA.matches(signature)
True
>>> requested_scheme == scheme
True
<Arguments>
public:
The ECDSA public key in PEM format.
private:
The ECDSA private key in PEM format.
data:
Byte data used by create_signature() to generate the signature returned.
scheme:
The signature scheme used to generate the signature. For example:
'ecdsa-sha2-nistp256'.
<Exceptions>
securesystemslib.exceptions.FormatError, if the arguments are improperly
formatted.
securesystemslib.exceptions.CryptoError, if a signature cannot be created.
securesystemslib.exceptions.UnsupportedAlgorithmError, if 'scheme' is not
one of the supported signature schemes.
<Side Effects>
None.
<Returns>
A signature dictionary conformat to
'securesystemslib.format.SIGNATURE_SCHEMA'. ECDSA signatures are XX bytes,
however, the hexlified signature is stored in the dictionary returned.
"""
# Do 'public_key' and 'private_key' have the correct format?
# This check will ensure that the arguments conform to
# 'securesystemslib.formats.PEMECDSA_SCHEMA'. Raise
# 'securesystemslib.exceptions.FormatError' if the check fails.
securesystemslib.formats.PEMECDSA_SCHEMA.check_match(public_key)
# Is 'private_key' properly formatted?
securesystemslib.formats.PEMECDSA_SCHEMA.check_match(private_key)
# Is 'scheme' properly formatted?
securesystemslib.formats.ECDSA_SCHEME_SCHEMA.check_match(scheme)
# 'ecdsa-sha2-nistp256' is the only currently supported ECDSA scheme, so this
# if-clause isn't strictly needed. Nevertheless, the conditional statement
# is included to accommodate multiple schemes that can potentially be added
# in the future.
if scheme == 'ecdsa-sha2-nistp256':
try:
private_key = load_pem_private_key(private_key.encode('utf-8'),
password=None, backend=default_backend())
signature = private_key.sign(data, ec.ECDSA(hashes.SHA256()))
except TypeError as e:
raise securesystemslib.exceptions.CryptoError('Could not create'
' signature: ' + str(e))
# A defensive check for an invalid 'scheme'. The
# ECDSA_SCHEME_SCHEMA.check_match() above should have already validated it.
else: #pragma: no cover
raise securesystemslib.exceptions.UnsupportedAlgorithmError('Unsupported'
' signature scheme is specified: ' + repr(scheme))
return signature, scheme | 0.010388 |
def attitude_encode(self, time_boot_ms, roll, pitch, yaw, rollspeed, pitchspeed, yawspeed):
'''
The attitude in the aeronautical frame (right-handed, Z-down, X-front,
Y-right).
time_boot_ms : Timestamp (milliseconds since system boot) (uint32_t)
roll : Roll angle (rad, -pi..+pi) (float)
pitch : Pitch angle (rad, -pi..+pi) (float)
yaw : Yaw angle (rad, -pi..+pi) (float)
rollspeed : Roll angular speed (rad/s) (float)
pitchspeed : Pitch angular speed (rad/s) (float)
yawspeed : Yaw angular speed (rad/s) (float)
'''
return MAVLink_attitude_message(time_boot_ms, roll, pitch, yaw, rollspeed, pitchspeed, yawspeed) | 0.006445 |
def make_structure_from_geos(geos):
'''Creates a structure out of a list of geometry objects.'''
model_structure=initialize_res(geos[0])
for i in range(1,len(geos)):
model_structure=add_residue(model_structure, geos[i])
return model_structure | 0.014981 |
def gaussian(N=1000, draw=True, show=True, seed=42, color=None, marker='sphere'):
"""Show N random gaussian distributed points using a scatter plot."""
import ipyvolume as ipv
rng = np.random.RandomState(seed) # pylint: disable=no-member
x, y, z = rng.normal(size=(3, N))
if draw:
if color:
mesh = ipv.scatter(x, y, z, marker=marker, color=color)
else:
mesh = ipv.scatter(x, y, z, marker=marker)
if show:
# ipv.squarelim()
ipv.show()
return mesh
else:
return x, y, z | 0.003442 |
def filter(self, func):
"""Returns a packet list filtered by a truth function. This truth
function has to take a packet as the only argument and return a boolean value.""" # noqa: E501
return self.__class__([x for x in self.res if func(x)],
name="filtered %s" % self.listname) | 0.006042 |
def items(self):
"""Settings as key-value pair.
"""
return [(section, dict(self.conf.items(section, raw=True))) for \
section in [section for section in self.conf.sections()]] | 0.018957 |
def ssh_session(key_filename,
username,
ip_address,
*cli):
""" opens a ssh shell to the host """
local('ssh -t -i %s %s@%s %s' % (key_filename,
username,
ip_address,
"".join(chain.from_iterable(cli)))) | 0.002717 |
def getuserinfo(self, default=None, encoding='utf-8', errors='strict'):
"""Return the decoded userinfo subcomponent of the URI authority, or
`default` if the original URI reference did not contain a
userinfo field.
"""
userinfo = self.userinfo
if userinfo is None:
return default
else:
return uridecode(userinfo, encoding, errors) | 0.004866 |
def get_blob(self, index):
"""Return a blob with the event at the given index"""
self.log.info("Retrieving blob #{}".format(index))
if index > len(self.event_offsets) - 1:
self.log.info("Index not in cache, caching offsets")
self._cache_offsets(index, verbose=False)
self.blob_file.seek(self.event_offsets[index], 0)
blob = self._create_blob()
if blob is None:
self.log.info("Empty blob created...")
raise IndexError
else:
self.log.debug("Applying parsers...")
for parser in self.parsers:
parser(blob)
self.log.debug("Returning the blob")
return blob | 0.002786 |
def __get_numbered_paths(filepath):
"""Append numbers in sequential order to the filename or folder name
Numbers should be appended before the extension on a filename."""
format = '%s (%%d)%s' % splitext_files_only(filepath)
return map(lambda n: format % n, itertools.count(1)) | 0.017544 |
def subscribe(self, handler):
"""Adds a new event handler."""
assert callable(handler), "Invalid handler %s" % handler
self.handlers.append(handler) | 0.011628 |
def get_field_mapping(self):
"""Obtain metadata from current state of the widget.
Null or empty list will be removed.
:returns: Dictionary of values by type in this format:
{'fields': {}, 'values': {}}.
:rtype: dict
"""
field_mapping = self.field_mapping_widget.get_field_mapping()
for k, v in list(field_mapping['values'].items()):
if not v:
field_mapping['values'].pop(k)
for k, v in list(field_mapping['fields'].items()):
if not v:
field_mapping['fields'].pop(k)
return field_mapping | 0.003185 |
def populate_readme(
version, circleci_build, appveyor_build, coveralls_build, travis_build
):
"""Populates ``README.rst`` with release-specific data.
This is because ``README.rst`` is used on PyPI.
Args:
version (str): The current version.
circleci_build (Union[str, int]): The CircleCI build ID corresponding
to the release.
appveyor_build (str): The AppVeyor build ID corresponding to the
release.
coveralls_build (Union[str, int]): The Coveralls.io build ID
corresponding to the release.
travis_build (int): The Travis CI build ID corresponding to
the release.
"""
with open(RELEASE_README_FILE, "r") as file_obj:
template = file_obj.read()
contents = template.format(
version=version,
circleci_build=circleci_build,
appveyor_build=appveyor_build,
coveralls_build=coveralls_build,
travis_build=travis_build,
)
with open(README_FILE, "w") as file_obj:
file_obj.write(contents) | 0.000943 |
def setDatasets(self, datasets):
"""
Sets the dataset list for this chart to the inputed data.
:param datasets | [<XChartDataset>, ..]
"""
self.clearDatasets()
self._datasets = datasets
for dataset in datasets:
self._addDatasetAction(dataset)
self._dataChanged = True
self.recalculate() | 0.012048 |
def parse_bytes_str(value):
"""
Given a value return the integer number of bytes it represents.
Trailing "MB" causes the value multiplied by 1024*1024
:param value:
:return: int number of bytes represented by value.
"""
if type(value) == str:
if "MB" in value:
return int(value.replace("MB", "")) * MB_TO_BYTES
else:
return int(value)
else:
return value | 0.004124 |
def gen_toyn(f, nsample, ntoy, bound, accuracy=10000, quiet=True, **kwd):
"""
just alias of gentoy for nample and then reshape to ntoy,nsample)
:param f:
:param nsample:
:param bound:
:param accuracy:
:param quiet:
:param kwd:
:return:
"""
return gen_toy(f, nsample * ntoy, bound, accuracy, quiet, **kwd).reshape((ntoy, nsample)) | 0.005362 |
def _add_arguments(self):
"""Adds arguments to parser."""
self._parser.add_argument(
'-v', '--version',
action='store_true',
help="show program's version number and exit")
self._parser.add_argument(
'-a', '--alias',
nargs='?',
const=get_alias(),
help='[custom-alias-name] prints alias for current shell')
self._parser.add_argument(
'-l', '--shell-logger',
action='store',
help='log shell output to the file')
self._parser.add_argument(
'--enable-experimental-instant-mode',
action='store_true',
help='enable experimental instant mode, use on your own risk')
self._parser.add_argument(
'-h', '--help',
action='store_true',
help='show this help message and exit')
self._add_conflicting_arguments()
self._parser.add_argument(
'-d', '--debug',
action='store_true',
help='enable debug output')
self._parser.add_argument(
'--force-command',
action='store',
help=SUPPRESS)
self._parser.add_argument(
'command',
nargs='*',
help='command that should be fixed') | 0.001503 |
def _ParseFileData(self, knowledge_base, file_object):
"""Parses file content (data) for a preprocessing attribute.
Args:
knowledge_base (KnowledgeBase): to fill with preprocessing information.
file_object (dfvfs.FileIO): file-like object that contains the artifact
value data.
Raises:
errors.PreProcessFail: if the preprocessing fails.
"""
plist_file = plist.PlistFile()
try:
plist_file.Read(file_object)
except IOError as exception:
raise errors.PreProcessFail(
'Unable to read: {0:s} with error: {1!s}'.format(
self.ARTIFACT_DEFINITION_NAME, exception))
if not plist_file.root_key:
raise errors.PreProcessFail((
'Unable to read: {0:s} with error: missing root key').format(
self.ARTIFACT_DEFINITION_NAME))
matches = []
self._FindKeys(plist_file.root_key, self._PLIST_KEYS, matches)
if not matches:
raise errors.PreProcessFail(
'Unable to read: {0:s} with error: no such keys: {1:s}.'.format(
self.ARTIFACT_DEFINITION_NAME, ', '.join(self._PLIST_KEYS)))
name = None
value = None
for name, value in matches:
if value:
break
if value is None:
raise errors.PreProcessFail((
'Unable to read: {0:s} with error: no values found for keys: '
'{1:s}.').format(
self.ARTIFACT_DEFINITION_NAME, ', '.join(self._PLIST_KEYS)))
self._ParsePlistKeyValue(knowledge_base, name, value) | 0.004605 |
def interfaces(self) -> GraphQLInterfaceList:
"""Get provided interfaces."""
try:
interfaces = resolve_thunk(self._interfaces)
except GraphQLError:
raise
except Exception as error:
raise TypeError(f"{self.name} interfaces cannot be resolved: {error}")
if interfaces is None:
interfaces = []
if not isinstance(interfaces, (list, tuple)):
raise TypeError(
f"{self.name} interfaces must be a list/tuple"
" or a function which returns a list/tuple."
)
if not all(isinstance(value, GraphQLInterfaceType) for value in interfaces):
raise TypeError(f"{self.name} interfaces must be GraphQLInterface objects.")
return interfaces[:] | 0.006234 |
def load(self, mdl_file):
"""
load model from file. fv_type is not set with this function. It is expected to set it before.
"""
import dill as pickle
mdl_file_e = op.expanduser(mdl_file)
sv = pickle.load(open(mdl_file_e, "rb"))
self.mdl = sv["mdl"]
# self.mdl[2] = self.mdl[0]
# try:
# eval(sv['fv_extern_src'])
# eval("fv_extern_temp_name = " + sv['fv_extern_src_name'])
# sv['fv_extern'] = fv_extern_temp_name
# except:
# print "pomoc,necoje blbe"
# pass
self.modelparams.update(sv["modelparams"])
logger.debug("loaded model from path: " + mdl_file_e) | 0.004208 |
def save_load(jid, load, minions=None):
'''
Save the load to the specified jid
'''
serv = _get_serv(ret=None)
serv.setex('load:{0}'.format(jid), _get_ttl(), salt.utils.json.dumps(load)) | 0.004878 |
def as_tuple(self):
"""
:rtype: (str, object)
"""
if self._as_tuple is None:
self._as_tuple = self.converted.items()[0]
return self._as_tuple | 0.010363 |
def errdp(marker, number):
"""
Substitute a double precision number for the first occurrence of
a marker found in the current long error message.
http://naif.jpl.nasa.gov/pub/naif/toolkit_docs/C/cspice/errdp_c.html
:param marker: A substring of the error message to be replaced.
:type marker: str
:param number: The d.p. number to substitute for marker.
:type number: float
"""
marker = stypes.stringToCharP(marker)
number = ctypes.c_double(number)
libspice.errdp_c(marker, number) | 0.001883 |
def vm_profiles_config(path,
providers,
env_var='SALT_CLOUDVM_CONFIG',
defaults=None):
'''
Read in the salt cloud VM config file
'''
if defaults is None:
defaults = VM_CONFIG_DEFAULTS
overrides = salt.config.load_config(
path, env_var, os.path.join(salt.syspaths.CONFIG_DIR, 'cloud.profiles')
)
default_include = overrides.get(
'default_include', defaults['default_include']
)
include = overrides.get('include', [])
overrides.update(
salt.config.include_config(default_include, path, verbose=False)
)
overrides.update(
salt.config.include_config(include, path, verbose=True)
)
return apply_vm_profiles_config(providers, overrides, defaults) | 0.001238 |
def fd_decompress(amp, phase, sample_frequencies, out=None, df=None,
f_lower=None, interpolation='inline_linear'):
"""Decompresses an FD waveform using the given amplitude, phase, and the
frequencies at which they are sampled at.
Parameters
----------
amp : array
The amplitude of the waveform at the sample frequencies.
phase : array
The phase of the waveform at the sample frequencies.
sample_frequencies : array
The frequency (in Hz) of the waveform at the sample frequencies.
out : {None, FrequencySeries}
The output array to save the decompressed waveform to. If this contains
slots for frequencies > the maximum frequency in sample_frequencies,
the rest of the values are zeroed. If not provided, must provide a df.
df : {None, float}
The frequency step to use for the decompressed waveform. Must be
provided if out is None.
f_lower : {None, float}
The frequency to start the decompression at. If None, will use whatever
the lowest frequency is in sample_frequencies. All values at
frequencies less than this will be 0 in the decompressed waveform.
interpolation : {'inline_linear', str}
The interpolation to use for the amplitude and phase. Default is
'inline_linear'. If 'inline_linear' a custom interpolater is used.
Otherwise, ``scipy.interpolate.interp1d`` is used; for other options,
see possible values for that function's ``kind`` argument.
Returns
-------
out : FrequencySeries
If out was provided, writes to that array. Otherwise, a new
FrequencySeries with the decompressed waveform.
"""
precision = _precision_map[sample_frequencies.dtype.name]
if _precision_map[amp.dtype.name] != precision or \
_precision_map[phase.dtype.name] != precision:
raise ValueError("amp, phase, and sample_points must all have the "
"same precision")
if out is None:
if df is None:
raise ValueError("Either provide output memory or a df")
hlen = int(numpy.ceil(sample_frequencies.max()/df+1))
out = FrequencySeries(numpy.zeros(hlen,
dtype=_complex_dtypes[precision]), copy=False,
delta_f=df)
else:
# check for precision compatibility
if out.precision == 'double' and precision == 'single':
raise ValueError("cannot cast single precision to double")
df = out.delta_f
hlen = len(out)
if f_lower is None:
imin = 0 # pylint:disable=unused-variable
f_lower = sample_frequencies[0]
start_index = 0
else:
if f_lower >= sample_frequencies.max():
raise ValueError("f_lower is > than the maximum sample frequency")
if f_lower < sample_frequencies.min():
raise ValueError("f_lower is < than the minimum sample frequency")
imin = int(numpy.searchsorted(sample_frequencies, f_lower,
side='right')) - 1 # pylint:disable=unused-variable
start_index = int(numpy.ceil(f_lower/df))
if start_index >= hlen:
raise ValueError('requested f_lower >= largest frequency in out')
# interpolate the amplitude and the phase
if interpolation == "inline_linear":
# Call the scheme-dependent function
inline_linear_interp(amp, phase, sample_frequencies, out,
df, f_lower, imin, start_index)
else:
# use scipy for fancier interpolation
sample_frequencies = numpy.array(sample_frequencies)
amp = numpy.array(amp)
phase = numpy.array(phase)
outfreq = out.sample_frequencies.numpy()
amp_interp = interpolate.interp1d(sample_frequencies, amp,
kind=interpolation,
bounds_error=False,
fill_value=0.,
assume_sorted=True)
phase_interp = interpolate.interp1d(sample_frequencies, phase,
kind=interpolation,
bounds_error=False,
fill_value=0.,
assume_sorted=True)
A = amp_interp(outfreq)
phi = phase_interp(outfreq)
out.data[:] = A*numpy.cos(phi) + (1j)*A*numpy.sin(phi)
return out | 0.001333 |
def Up(self, n = 1, dl = 0):
"""上方向键n次
"""
self.Delay(dl)
self.keyboard.tap_key(self.keyboard.up_key, n) | 0.044118 |
def correct_spelling(text_string):
'''
Splits string and converts words not found within a pre-built dictionary to their
most likely actual word based on a relative probability dictionary. Returns edited
string as type str.
Keyword argument:
- text_string: string instance
Exceptions raised:
- InputError: occurs should a string or NoneType not be passed as an argument
'''
if text_string is None or text_string == "":
return ""
elif isinstance(text_string, str):
word_list = text_string.split()
spellchecked_word_list = []
for word in word_list:
spellchecked_word_list.append(spellcheck.correct_word(word))
return " ".join(spellchecked_word_list)
else:
raise InputError("none type or string not passed as an argument") | 0.004813 |
def best_prefix(self, system=None):
"""Optional parameter, `system`, allows you to prefer NIST or SI in
the results. By default, the current system is used (Bit/Byte default
to NIST).
Logic discussion/notes:
Base-case, does it need converting?
If the instance is less than one Byte, return the instance as a Bit
instance.
Else, begin by recording the unit system the instance is defined
by. This determines which steps (NIST_STEPS/SI_STEPS) we iterate over.
If the instance is not already a ``Byte`` instance, convert it to one.
NIST units step up by powers of 1024, SI units step up by powers of
1000.
Take integer value of the log(base=STEP_POWER) of the instance's byte
value. E.g.:
>>> int(math.log(Gb(100).bytes, 1000))
3
This will return a value >= 0. The following determines the 'best
prefix unit' for representation:
* result == 0, best represented as a Byte
* result >= len(SYSTEM_STEPS), best represented as an Exbi/Exabyte
* 0 < result < len(SYSTEM_STEPS), best represented as SYSTEM_PREFIXES[result-1]
"""
# Use absolute value so we don't return Bit's for *everything*
# less than Byte(1). From github issue #55
if abs(self) < Byte(1):
return Bit.from_other(self)
else:
if type(self) is Byte: # pylint: disable=unidiomatic-typecheck
_inst = self
else:
_inst = Byte.from_other(self)
# Which table to consult? Was a preferred system provided?
if system is None:
# No preference. Use existing system
if self.system == 'NIST':
_STEPS = NIST_PREFIXES
_BASE = 1024
elif self.system == 'SI':
_STEPS = SI_PREFIXES
_BASE = 1000
# Anything else would have raised by now
else:
# Preferred system provided.
if system == NIST:
_STEPS = NIST_PREFIXES
_BASE = 1024
elif system == SI:
_STEPS = SI_PREFIXES
_BASE = 1000
else:
raise ValueError("Invalid value given for 'system' parameter."
" Must be one of NIST or SI")
# Index of the string of the best prefix in the STEPS list
_index = int(math.log(abs(_inst.bytes), _BASE))
# Recall that the log() function returns >= 0. This doesn't
# map to the STEPS list 1:1. That is to say, 0 is handled with
# special care. So if the _index is 1, we actually want item 0
# in the list.
if _index == 0:
# Already a Byte() type, so return it.
return _inst
elif _index >= len(_STEPS):
# This is a really big number. Use the biggest prefix we've got
_best_prefix = _STEPS[-1]
elif 0 < _index < len(_STEPS):
# There is an appropriate prefix unit to represent this
_best_prefix = _STEPS[_index - 1]
_conversion_method = getattr(
self,
'to_%sB' % _best_prefix)
return _conversion_method() | 0.000637 |
def add_synchronous_cb(self, cb):
'''
Add an expectation of a callback to release a synchronous transaction.
'''
if self.connection.synchronous or self._synchronous:
wrapper = SyncWrapper(cb)
self._pending_events.append(wrapper)
while wrapper._read:
# Don't check that the channel has been closed until after
# reading frames, in the case that this is processing a clean
# channel closed. If there's a protocol error during
# read_frames, this will loop back around and result in a
# channel closed exception.
if self.closed:
if self.close_info and \
len(self.close_info['reply_text']) > 0:
raise ChannelClosed(
"channel %d is closed: %s : %s",
self.channel_id,
self.close_info['reply_code'],
self.close_info['reply_text'])
raise ChannelClosed()
self.connection.read_frames()
return wrapper._result
else:
self._pending_events.append(cb) | 0.001596 |
def lasso_leftdown(self, event=None):
"""leftdown event handler for lasso mode"""
try:
self.report_leftdown(event=event)
except:
return
if event.inaxes:
# set lasso color
color='goldenrod'
cmap = getattr(self.conf, 'cmap', None)
if isinstance(cmap, dict):
cmap = cmap['int']
try:
if cmap is not None:
rgb = (int(i*255)^255 for i in cmap._lut[0][:3])
color = '#%02x%02x%02x' % tuple(rgb)
except:
pass
self.lasso = Lasso(event.inaxes, (event.xdata, event.ydata),
self.lassoHandler)
self.lasso.line.set_color(color) | 0.007653 |
def broadcast_tx(self, address, amount, secret, secondsecret=None, vendorfield=''):
"""broadcasts a transaction to the peerslist using ark-js library"""
peer = random.choice(self.PEERS)
park = Park(
peer,
4001,
constants.ARK_NETHASH,
'1.1.1'
)
return park.transactions().create(address, str(amount), vendorfield, secret, secondsecret) | 0.009434 |
def libvlc_vlm_add_vod(p_instance, psz_name, psz_input, i_options, ppsz_options, b_enabled, psz_mux):
'''Add a vod, with one input.
@param p_instance: the instance.
@param psz_name: the name of the new vod media.
@param psz_input: the input MRL.
@param i_options: number of additional options.
@param ppsz_options: additional options.
@param b_enabled: boolean for enabling the new vod.
@param psz_mux: the muxer of the vod media.
@return: 0 on success, -1 on error.
'''
f = _Cfunctions.get('libvlc_vlm_add_vod', None) or \
_Cfunction('libvlc_vlm_add_vod', ((1,), (1,), (1,), (1,), (1,), (1,), (1,),), None,
ctypes.c_int, Instance, ctypes.c_char_p, ctypes.c_char_p, ctypes.c_int, ListPOINTER(ctypes.c_char_p), ctypes.c_int, ctypes.c_char_p)
return f(p_instance, psz_name, psz_input, i_options, ppsz_options, b_enabled, psz_mux) | 0.006645 |
def Animation_setPaused(self, animations, paused):
"""
Function path: Animation.setPaused
Domain: Animation
Method name: setPaused
Parameters:
Required arguments:
'animations' (type: array) -> Animations to set the pause state of.
'paused' (type: boolean) -> Paused state to set to.
No return value.
Description: Sets the paused state of a set of animations.
"""
assert isinstance(animations, (list, tuple)
), "Argument 'animations' must be of type '['list', 'tuple']'. Received type: '%s'" % type(
animations)
assert isinstance(paused, (bool,)
), "Argument 'paused' must be of type '['bool']'. Received type: '%s'" % type(
paused)
subdom_funcs = self.synchronous_command('Animation.setPaused', animations
=animations, paused=paused)
return subdom_funcs | 0.044471 |
def setup(working_dir, interactive=False):
"""
Do one-time initialization.
Call this to set up global state.
"""
# set up our implementation
log.debug("Working dir: {}".format(working_dir))
if not os.path.exists( working_dir ):
os.makedirs( working_dir, 0700 )
node_config = load_configuration(working_dir)
if node_config is None:
sys.exit(1)
log.debug("config\n{}".format(json.dumps(node_config, indent=4, sort_keys=True)))
return node_config | 0.011881 |
def tags(self):
'Return a thread local :class:`dossier.web.Tags` client.'
if self._tags is None:
config = global_config('dossier.tags')
self._tags = self.create(Tags, config=config)
return self._tags | 0.008097 |
def find_hass_config():
"""Try to find HASS config."""
if "HASSIO_TOKEN" in os.environ:
return "/config"
config_dir = default_hass_config_dir()
if os.path.isdir(config_dir):
return config_dir
raise ValueError(
"Unable to automatically find the location of Home Assistant "
"config. Please pass it in."
) | 0.002762 |
def _reconstruct(keyvals, dialect, keep_order=False,
sort_attribute_values=False):
"""
Reconstructs the original attributes string according to the dialect.
Parameters
==========
keyvals : dict
Attributes from a GFF/GTF feature
dialect : dict
Dialect containing info on how to reconstruct a string version of the
attributes
keep_order : bool
If True, then perform sorting of attribute keys to ensure they are in
the same order as those provided in the original file. Default is
False, which saves time especially on large data sets.
sort_attribute_values : bool
If True, then sort values to ensure they will always be in the same
order. Mostly only useful for testing; default is False.
"""
if not dialect:
raise AttributeStringError()
if not keyvals:
return ""
parts = []
# Re-encode when reconstructing attributes
if constants.ignore_url_escape_characters or dialect['fmt'] != 'gff3':
attributes = keyvals
else:
attributes = {}
for k, v in keyvals.items():
attributes[k] = []
for i in v:
attributes[k].append(''.join([quoter[j] for j in i]))
# May need to split multiple values into multiple key/val pairs
if dialect['repeated keys']:
items = []
for key, val in attributes.items():
if len(val) > 1:
for v in val:
items.append((key, [v]))
else:
items.append((key, val))
else:
items = list(attributes.items())
def sort_key(x):
# sort keys by their order in the dialect; anything not in there will
# be in arbitrary order at the end.
try:
return dialect['order'].index(x[0])
except ValueError:
return 1e6
if keep_order:
items.sort(key=sort_key)
for key, val in items:
# Multival sep is usually a comma:
if val:
if sort_attribute_values:
val = sorted(val)
val_str = dialect['multival separator'].join(val)
if val_str:
# Surround with quotes if needed
if dialect['quoted GFF2 values']:
val_str = '"%s"' % val_str
# Typically "=" for GFF3 or " " otherwise
part = dialect['keyval separator'].join([key, val_str])
else:
if dialect['fmt'] == 'gtf':
part = dialect['keyval separator'].join([key, '""'])
else:
part = key
parts.append(part)
# Typically ";" or "; "
parts_str = dialect['field separator'].join(parts)
# Sometimes need to add this
if dialect['trailing semicolon']:
parts_str += ';'
return parts_str | 0.000347 |
def get_payload(self):
"""Return Payload."""
payload = bytes([self.node_id])
payload += bytes([self.state])
payload += bytes(self.current_position.raw)
payload += bytes(self.target.raw)
payload += bytes(self.current_position_fp1.raw)
payload += bytes(self.current_position_fp2.raw)
payload += bytes(self.current_position_fp3.raw)
payload += bytes(self.current_position_fp4.raw)
payload += bytes([self.remaining_time >> 8 & 255, self.remaining_time & 255])
payload += struct.pack(">I", self.timestamp)
return payload | 0.00491 |
def run(dest, router, args, deadline=None, econtext=None):
"""
Run the command specified by `args` such that ``PATH`` searches for SSH by
the command will cause its attempt to use SSH to execute a remote program
to be redirected to use mitogen to execute that program using the context
`dest` instead.
:param list args:
Argument vector.
:param mitogen.core.Context dest:
The destination context to execute the SSH command line in.
:param mitogen.core.Router router:
:param list[str] args:
Command line arguments for local program, e.g.
``['rsync', '/tmp', 'remote:/tmp']``
:returns:
Exit status of the child process.
"""
if econtext is not None:
mitogen.parent.upgrade_router(econtext)
context_id = router.allocate_id()
fakessh = mitogen.parent.Context(router, context_id)
fakessh.name = u'fakessh.%d' % (context_id,)
sock1, sock2 = socket.socketpair()
stream = mitogen.core.Stream(router, context_id)
stream.name = u'fakessh'
stream.accept(sock1.fileno(), sock1.fileno())
router.register(fakessh, stream)
# Held in socket buffer until process is booted.
fakessh.call_async(_fakessh_main, dest.context_id)
tmp_path = tempfile.mkdtemp(prefix='mitogen_fakessh')
try:
ssh_path = os.path.join(tmp_path, 'ssh')
fp = open(ssh_path, 'w')
try:
fp.write('#!%s\n' % (mitogen.parent.get_sys_executable(),))
fp.write(inspect.getsource(mitogen.core))
fp.write('\n')
fp.write('ExternalContext(%r).main()\n' % (
_get_econtext_config(context, sock2),
))
finally:
fp.close()
os.chmod(ssh_path, int('0755', 8))
env = os.environ.copy()
env.update({
'PATH': '%s:%s' % (tmp_path, env.get('PATH', '')),
'ARGV0': mitogen.parent.get_sys_executable(),
'SSH_PATH': ssh_path,
})
proc = subprocess.Popen(args, env=env)
return proc.wait()
finally:
shutil.rmtree(tmp_path) | 0.000473 |
def request(schema):
"""
Decorate a function with a request schema.
"""
def wrapper(func):
setattr(func, REQUEST, schema)
return func
return wrapper | 0.005405 |
def write_header(self, chunk):
"""Write to header.
Note: the header stream is only available to write before write body.
:param chunk: content to write to header
:except TChannelError:
Raise TChannelError if the response's flush() has been called
"""
if self.serializer:
header = self.serializer.serialize_header(chunk)
else:
header = chunk
if self.flushed:
raise TChannelError("write operation invalid after flush call")
if (self.argstreams[0].state != StreamState.completed and
self.argstreams[0].auto_close):
self.argstreams[0].close()
return self.argstreams[1].write(header) | 0.002699 |
async def executor(func, *args, **kwargs):
'''
Execute a function in an executor thread.
Args:
todo ((func,args,kwargs)): A todo tuple.
'''
def syncfunc():
return func(*args, **kwargs)
loop = asyncio.get_running_loop()
return await loop.run_in_executor(None, syncfunc) | 0.003185 |
def _deep_type(obj, checked, checked_len, depth = None, max_sample = None, get_type = None):
"""checked_len allows to operate with a fake length for checked.
This is necessary to ensure that each depth level operates based
on the same checked list subset. Otherwise our recursion detection
mechanism can fall into false-positives.
"""
if depth is None:
depth = pytypes.default_typecheck_depth
if max_sample is None:
max_sample = pytypes.deep_type_samplesize
if -1 != max_sample < 2:
max_sample = 2
if get_type is None:
get_type = type
try:
res = obj.__orig_class__
except AttributeError:
res = get_type(obj)
if depth == 0 or util._is_in(obj, checked[:checked_len]):
return res
elif not util._is_in(obj, checked[checked_len:]):
checked.append(obj)
# We must operate with a consistent checked list for one certain depth level
# to avoid issues with a list, tuple, dict, etc containing the same element
# multiple times. This could otherwise be misconcepted as a recursion.
# Using a fake len checked_len2 ensures this. Each depth level operates with
# a common fake length of checked list:
checked_len2 = len(checked)
if res == tuple:
res = Tuple[tuple(_deep_type(t, checked, checked_len2, depth-1, None, get_type) for t in obj)]
elif res == list:
if len(obj) == 0:
return Empty[List]
if max_sample == -1 or max_sample >= len(obj)-1 or len(obj) <= 2:
tpl = tuple(_deep_type(t, checked, checked_len2, depth-1, None, get_type) for t in obj)
else:
# In case of lists I somehow feel it's better to ensure that
# first and last element are part of the sample
sample = [0, len(obj)-1]
try:
rsmp = random.sample(xrange(1, len(obj)-1), max_sample-2)
except NameError:
rsmp = random.sample(range(1, len(obj)-1), max_sample-2)
sample.extend(rsmp)
tpl = tuple(_deep_type(obj[t], checked, checked_len2, depth-1, None, get_type) for t in sample)
res = List[Union[tpl]]
elif res == dict:
if len(obj) == 0:
return Empty[Dict]
if max_sample == -1 or max_sample >= len(obj)-1 or len(obj) <= 2:
try:
# We prefer a view (avoid copy)
tpl1 = tuple(_deep_type(t, checked, checked_len2, depth-1, None, get_type) \
for t in obj.viewkeys())
tpl2 = tuple(_deep_type(t, checked, checked_len2, depth-1, None, get_type) \
for t in obj.viewvalues())
except AttributeError:
# Python 3 gives views like this:
tpl1 = tuple(_deep_type(t, checked, checked_len2, depth-1, None, get_type) for t in obj.keys())
tpl2 = tuple(_deep_type(t, checked, checked_len2, depth-1, None, get_type) for t in obj.values())
else:
try:
kitr = iter(obj.viewkeys())
vitr = iter(obj.viewvalues())
except AttributeError:
kitr = iter(obj.keys())
vitr = iter(obj.values())
ksmpl = []
vsmpl = []
block = (len(obj) // max_sample)-1
# I know this method has some bias towards beginning of iteration
# sequence, but it's still more random than just taking the
# initial sample and better than O(n) random.sample.
while len(ksmpl) < max_sample:
if block > 0:
j = random.randint(0, block)
k = random.randint(0, block)
while j > 0:
next(vitr) # discard
j -= 1
while k > 0:
next(kitr) # discard
k -= 1
ksmpl.append(next(kitr))
vsmpl.append(next(vitr))
tpl1 = tuple(_deep_type(t, checked, checked_len2, depth-1, None, get_type) for t in ksmpl)
tpl2 = tuple(_deep_type(t, checked, checked_len2, depth-1, None, get_type) for t in vsmpl)
res = Dict[Union[tpl1], Union[tpl2]]
elif res == set or res == frozenset:
if res == set:
typ = Set
else:
typ = FrozenSet
if len(obj) == 0:
return Empty[typ]
if max_sample == -1 or max_sample >= len(obj)-1 or len(obj) <= 2:
tpl = tuple(_deep_type(t, checked, depth-1, None, None, get_type) for t in obj)
else:
itr = iter(obj)
smpl = []
block = (len(obj) // max_sample)-1
# I know this method has some bias towards beginning of iteration
# sequence, but it's still more random than just taking the
# initial sample and better than O(n) random.sample.
while len(smpl) < max_sample:
if block > 0:
j = random.randint(0, block)
while j > 0:
next(itr) # discard
j -= 1
smpl.append(next(itr))
tpl = tuple(_deep_type(t, checked, depth-1, None, None, get_type) for t in smpl)
res = typ[Union[tpl]]
elif res == types.GeneratorType:
res = get_generator_type(obj)
elif sys.version_info.major == 2 and isinstance(obj, types.InstanceType):
# For old-style instances return the actual class:
return obj.__class__
elif _has_base(res, Container) and len(obj) == 0:
return Empty[res]
elif hasattr(res, '__origin__') and _has_base(res.__origin__, Container) and len(obj) == 0:
return Empty[res.__origin__]
return res | 0.005172 |
def lsumdiffsquared(x,y):
"""
Takes pairwise differences of the values in lists x and y, squares
these differences, and returns the sum of these squares.
Usage: lsumdiffsquared(x,y)
Returns: sum[(x[i]-y[i])**2]
"""
sds = 0
for i in range(len(x)):
sds = sds + (x[i]-y[i])**2
return sds | 0.006431 |
def sample_posterior(x,post,nsamples=1):
""" Returns nsamples from a tabulated posterior (not necessarily normalized)
"""
cdf = post.cumsum()
cdf /= cdf.max()
u = rand.random(size=nsamples)
inds = np.digitize(u,cdf)
return x[inds] | 0.01938 |
def update_custom_service_account(self, account, nickname, password):
"""
修改客服帐号。
:param account: 客服账号的用户名
:param nickname: 客服账号的昵称
:param password: 客服账号的密码
:return: 返回的 JSON 数据包
"""
return self.post(
url="https://api.weixin.qq.com/customservice/kfaccount/update",
data={
"kf_account": account,
"nickname": nickname,
"password": password
}
) | 0.004016 |
def set_status(self, instance, status):
"""Sets the field status for up to 5 minutes."""
status_key = self.get_status_key(instance)
cache.set(status_key, status, timeout=300) | 0.010101 |
def _read(self):
"""Get next packet from transport.
:return: parsed packet in a tuple with message type and payload
:rtype: :py:class:`collections.namedtuple`
"""
raw_response = self.transport.receive()
response = Packet.parse(raw_response)
# FIXME
if response.response_type == Packet.EVENT and response.event_type == "log":
# queue up any debug log messages, and get next
self.log_events.append(response)
# do something?
self._read()
else:
return response | 0.005085 |
def do_rating_by_request(parser, token):
"""
Retrieves the ``Vote`` cast by a user on a particular object and
stores it in a context variable. If the user has not voted, the
context variable will be 0.
Example usage::
{% rating_by_request request on instance as vote %}
"""
bits = token.contents.split()
if len(bits) != 6:
raise template.TemplateSyntaxError("'%s' tag takes exactly five arguments" % bits[0])
if bits[2] != 'on':
raise template.TemplateSyntaxError("second argument to '%s' tag must be 'on'" % bits[0])
if bits[4] != 'as':
raise template.TemplateSyntaxError("fourth argument to '%s' tag must be 'as'" % bits[0])
return RatingByRequestNode(bits[1], bits[3], bits[5]) | 0.009079 |
def seoify_hyperlink(hyperlink):
"""Modify a hyperlink to make it SEO-friendly by replacing
hyphens with spaces and trimming multiple spaces.
:param hyperlink: URL to attempt to grab SEO from """
last_slash = hyperlink.rfind('/')
return re.sub(r' +|-', ' ', hyperlink[last_slash + 1:]) | 0.003268 |
def _lookup_key_parse(table_keys):
"""Return the order in which the stacks should be executed.
Args:
dependencies (dict): a dictionary where each key should be the
fully qualified name of a stack whose value is an array of
fully qualified stack names that the stack depends on. This is
used to generate the order in which the stacks should be
executed.
Returns:
dict: includes a dict of lookup types with data types ('new_keys')
and a list of the lookups with without ('clean_table_keys')
"""
# we need to parse the key lookup passed in
regex_matcher = '\[([^\]]+)]'
valid_dynamodb_datatypes = ['M', 'S', 'N', 'L']
clean_table_keys = []
new_keys = []
for key in table_keys:
match = re.search(regex_matcher, key)
if match:
# the datatypes are pulled from the dynamodb docs
if match.group(1) in valid_dynamodb_datatypes:
match_val = str(match.group(1))
key = key.replace(match.group(0), '')
new_keys.append({match_val: key})
clean_table_keys.append(key)
else:
raise ValueError(
('Stacker does not support looking up the datatype: {}')
.format(str(match.group(1))))
else:
new_keys.append({'S': key})
clean_table_keys.append(key)
key_dict = {}
key_dict['new_keys'] = new_keys
key_dict['clean_table_keys'] = clean_table_keys
return key_dict | 0.001901 |
def nfilter4(consens, hidx, arrayed):
""" applies max haplotypes filter returns pass and consens"""
## if less than two Hs then there is only one allele
if len(hidx) < 2:
return consens, 1
## store base calls for hetero sites
harray = arrayed[:, hidx]
## remove any reads that have N or - base calls at hetero sites
## these cannot be used when calling alleles currently.
harray = harray[~np.any(harray == "-", axis=1)]
harray = harray[~np.any(harray == "N", axis=1)]
## get counts of each allele (e.g., AT:2, CG:2)
ccx = Counter([tuple(i) for i in harray])
## Two possibilities we would like to distinguish, but we can't. Therefore,
## we just throw away low depth third alleles that are within seq. error.
## 1) a third base came up as a sequencing error but is not a unique allele
## 2) a third or more unique allele is there but at low frequency
## remove low freq alleles if more than 2, since they may reflect
## sequencing errors at hetero sites, making a third allele, or a new
## allelic combination that is not real.
if len(ccx) > 2:
totdepth = harray.shape[0]
cutoff = max(1, totdepth // 10)
alleles = [i for i in ccx if ccx[i] > cutoff]
else:
alleles = ccx.keys()
## how many high depth alleles?
nalleles = len(alleles)
## if 2 alleles then save the phase using lowercase coding
if nalleles == 2:
try:
consens = storealleles(consens, hidx, alleles)
except (IndexError, KeyError):
## the H sites do not form good alleles
LOGGER.info("failed at phasing loc, skipping")
LOGGER.info("""
consens %s
hidx %s
alleles %s
""", consens, hidx, alleles)
return consens, nalleles
## just return the info for later filtering
else:
return consens, nalleles | 0.008887 |
def translations_lists(self):
'''Iterator over lists of content translations'''
return (getattr(self.generator, name) for name in
self.info.get('translations_lists', [])) | 0.009901 |
def find(pattern, root=os.curdir):
'''Helper around 'locate' '''
hits = ''
for F in locate(pattern, root):
hits = hits + F + '\n'
l = hits.split('\n')
if(not len(l[-1])): l.pop()
if len(l) == 1 and not len(l[0]):
return None
else:
return l | 0.010309 |
def _auth_key(nonce, username, password):
"""Get an auth key to use for authentication.
"""
digest = _password_digest(username, password)
md5hash = hashlib.md5()
data = "%s%s%s" % (nonce, username, digest)
md5hash.update(data.encode('utf-8'))
return _unicode(md5hash.hexdigest()) | 0.003257 |
def post(self, request):
"""
Save the provided data using the class' serializer.
Args:
request:
The request being made.
Returns:
An ``APIResponse`` instance. If the request was successful
the response will have a 200 status code and contain the
serializer's data. Otherwise a 400 status code and the
request's errors will be returned.
"""
serializer = self.get_serializer(data=request.data)
if serializer.is_valid():
serializer.save()
return Response(serializer.data)
return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST) | 0.002829 |
def handle_integrity_error(cls, exception):
"""Handle integrity error exceptions."""
m = re.match(cls.MYSQL_INSERT_ERROR_REGEX,
exception.statement)
if not m:
raise exception
model = find_model_by_table_name(m.group('table'))
if not model:
raise exception
m = re.match(cls.MYSQL_DUPLICATE_ENTRY_ERROR_REGEX,
exception.orig.args[1])
if not m:
raise exception
entity = model.__name__
eid = m.group('value')
raise AlreadyExistsError(entity=entity, eid=eid) | 0.003231 |
def crop_or_pad(im, size, value=0):
"""
Crops an image in the center.
Parameters
----------
size : tuple, (height, width)
Finally size after cropping.
"""
diff = [im.shape[index] - size[index] for index in (0, 1)]
im2 = im[diff[0]//2:diff[0]//2 + size[0], diff[1]//2:diff[1]//2 + size[1]]
return im2 | 0.002907 |
def send_music_message(
self,
user_id,
url,
hq_url,
thumb_media_id,
title=None,
description=None,
kf_account=None
):
"""
发送音乐消息。
注意如果你遇到了缩略图不能正常显示的问题, 不要慌张; 目前来看是微信服务器端的问题。
对此我们也无能为力 ( `#197 <https://github.com/whtsky/WeRoBot/issues/197>`_ )
:param user_id: 用户 ID 。 就是你收到的 `Message` 的 source
:param url: 音乐链接
:param hq_url: 高品质音乐链接,wifi环境优先使用该链接播放音乐
:param thumb_media_id: 缩略图的媒体ID。 可以通过 :func:`upload_media` 上传。
:param title: 音乐标题
:param description: 音乐描述
:param kf_account: 发送消息的客服账户,默认值为 None,None 为不指定
:return: 返回的 JSON 数据包
"""
music_data = {
"musicurl": url,
"hqmusicurl": hq_url,
"thumb_media_id": thumb_media_id
}
if title:
music_data["title"] = title
if description:
music_data["description"] = description
data = {"touser": user_id, "msgtype": "music", "music": music_data}
if kf_account is not None:
data['customservice'] = {'kf_account': kf_account}
return self.post(
url="https://api.weixin.qq.com/cgi-bin/message/custom/send",
data=data
) | 0.002335 |
def forget(self,rs):
"""
Remove a room from the list of managed rooms.
:Parameters:
- `rs`: the state object of the room.
:Types:
- `rs`: `MucRoomState`
"""
try:
del self.rooms[rs.room_jid.bare().as_unicode()]
except KeyError:
pass | 0.008929 |
def _auth(self, username, password, pkey, key_filenames, allow_agent, look_for_keys):
"""
Try, in order:
- The key passed in, if one was passed in.
- Any key we can find through an SSH agent (if allowed).
- Any "id_rsa" or "id_dsa" key discoverable in ~/.ssh/ (if allowed).
- Plain username/password auth, if a password was given.
(The password might be needed to unlock a private key.)
The password is required for two-factor authentication.
"""
saved_exception = None
two_factor = False
allowed_types = []
if pkey is not None:
try:
self._log(DEBUG, 'Trying SSH key %s' % hexlify(pkey.get_fingerprint()))
allowed_types = self._transport.auth_publickey(username, pkey)
two_factor = (allowed_types == ['password'])
if not two_factor:
return
except SSHException, e:
saved_exception = e
if not two_factor:
for key_filename in key_filenames:
for pkey_class in (RSAKey, DSSKey):
try:
key = pkey_class.from_private_key_file(key_filename, password)
self._log(DEBUG, 'Trying key %s from %s' % (hexlify(key.get_fingerprint()), key_filename))
self._transport.auth_publickey(username, key)
two_factor = (allowed_types == ['password'])
if not two_factor:
return
break
except SSHException, e:
saved_exception = e
if not two_factor and allow_agent:
if self._agent == None:
self._agent = Agent()
for key in self._agent.get_keys():
try:
self._log(DEBUG, 'Trying SSH agent key %s' % hexlify(key.get_fingerprint()))
# for 2-factor auth a successfully auth'd key will result in ['password']
allowed_types = self._transport.auth_publickey(username, key)
two_factor = (allowed_types == ['password'])
if not two_factor:
return
break
except SSHException, e:
saved_exception = e
if not two_factor:
keyfiles = []
rsa_key = os.path.expanduser('~/.ssh/id_rsa')
dsa_key = os.path.expanduser('~/.ssh/id_dsa')
if os.path.isfile(rsa_key):
keyfiles.append((RSAKey, rsa_key))
if os.path.isfile(dsa_key):
keyfiles.append((DSSKey, dsa_key))
# look in ~/ssh/ for windows users:
rsa_key = os.path.expanduser('~/ssh/id_rsa')
dsa_key = os.path.expanduser('~/ssh/id_dsa')
if os.path.isfile(rsa_key):
keyfiles.append((RSAKey, rsa_key))
if os.path.isfile(dsa_key):
keyfiles.append((DSSKey, dsa_key))
if not look_for_keys:
keyfiles = []
for pkey_class, filename in keyfiles:
try:
key = pkey_class.from_private_key_file(filename, password)
self._log(DEBUG, 'Trying discovered key %s in %s' % (hexlify(key.get_fingerprint()), filename))
# for 2-factor auth a successfully auth'd key will result in ['password']
allowed_types = self._transport.auth_publickey(username, key)
two_factor = (allowed_types == ['password'])
if not two_factor:
return
break
except SSHException, e:
saved_exception = e
except IOError, e:
saved_exception = e
if password is not None:
try:
self._transport.auth_password(username, password)
return
except SSHException, e:
saved_exception = e
elif two_factor:
raise SSHException('Two-factor authentication requires a password')
# if we got an auth-failed exception earlier, re-raise it
if saved_exception is not None:
raise saved_exception
raise SSHException('No authentication methods available') | 0.003794 |
def wrap_viscm(cmap, dpi=100, saveplot=False):
'''Evaluate goodness of colormap using perceptual deltas.
:param cmap: Colormap instance.
:param dpi=100: dpi for saved image.
:param saveplot=False: Whether to save the plot or not.
'''
from viscm import viscm
viscm(cmap)
fig = plt.gcf()
fig.set_size_inches(22, 10)
plt.show()
if saveplot:
fig.savefig('figures/eval_' + cmap.name + '.png', bbox_inches='tight', dpi=dpi)
fig.savefig('figures/eval_' + cmap.name + '.pdf', bbox_inches='tight', dpi=dpi) | 0.005338 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.