text
stringlengths 78
104k
| score
float64 0
0.18
|
---|---|
def format_axis(ax, label_padding=2, tick_padding=0, yticks_position='left'):
"""Set standardized axis formatting for figure."""
ax.xaxis.set_ticks_position('bottom')
ax.yaxis.set_ticks_position(yticks_position)
ax.yaxis.set_tick_params(which='both', direction='out', labelsize=fontsize,
pad=tick_padding, length=2, width=0.5)
ax.xaxis.set_tick_params(which='both', direction='out', labelsize=fontsize,
pad=tick_padding, length=2, width=0.5)
ax.xaxis.labelpad = label_padding
ax.yaxis.labelpad = label_padding
ax.xaxis.label.set_size(fontsize)
ax.yaxis.label.set_size(fontsize) | 0.00149 |
def process_post_tags(self, bulk_mode, api_post, post_tags):
"""
Create or update Tags related to a post.
:param bulk_mode: If True, minimize db operations by bulk creating post objects
:param api_post: the API data for the post
:param post_tags: a mapping of Tags keyed by post ID
:return: None
"""
post_tags[api_post["ID"]] = []
for api_tag in six.itervalues(api_post["tags"]):
tag = self.process_post_tag(bulk_mode, api_tag)
if tag:
post_tags[api_post["ID"]].append(tag) | 0.005119 |
def add_node(self, node_id, name, labels):
""" Add a node to the graph with name and labels.
Args:
node_id: the unique node_id e.g. 'www.evil4u.com'
name: the display name of the node e.g. 'evil4u'
labels: a list of labels e.g. ['domain','evil']
Returns:
Nothing
"""
self.neo_db.add_node(node_id, name, labels) | 0.004739 |
def load_data(path, dense=False):
"""Load data from a CSV, LibSVM or HDF5 file based on the file extension.
Args:
path (str): A path to the CSV, LibSVM or HDF5 format file containing data.
dense (boolean): An optional variable indicating if the return matrix
should be dense. By default, it is false.
Returns:
Data matrix X and target vector y
"""
catalog = {'.csv': load_csv, '.sps': load_svmlight_file, '.h5': load_hdf5}
ext = os.path.splitext(path)[1]
func = catalog[ext]
X, y = func(path)
if dense and sparse.issparse(X):
X = X.todense()
return X, y | 0.003044 |
def _get_grammar_errors(self,pos,text,tokens):
"""
Internal function to get the number of grammar errors in given text
pos - part of speech tagged text (list)
text - normal text (list)
tokens - list of lists of tokenized text
"""
word_counts = [max(len(t),1) for t in tokens]
good_pos_tags = []
min_pos_seq=2
max_pos_seq=4
bad_pos_positions=[]
for i in xrange(0, len(text)):
pos_seq = [tag[1] for tag in pos[i]]
pos_ngrams = util_functions.ngrams(pos_seq, min_pos_seq, max_pos_seq)
long_pos_ngrams=[z for z in pos_ngrams if z.count(' ')==(max_pos_seq-1)]
bad_pos_tuples=[[z,z+max_pos_seq] for z in xrange(0,len(long_pos_ngrams)) if long_pos_ngrams[z] not in self._good_pos_ngrams]
bad_pos_tuples.sort(key=operator.itemgetter(1))
to_delete=[]
for m in reversed(xrange(len(bad_pos_tuples)-1)):
start, end = bad_pos_tuples[m]
for j in xrange(m+1, len(bad_pos_tuples)):
lstart, lend = bad_pos_tuples[j]
if lstart >= start and lstart <= end:
bad_pos_tuples[m][1]=bad_pos_tuples[j][1]
to_delete.append(j)
fixed_bad_pos_tuples=[bad_pos_tuples[z] for z in xrange(0,len(bad_pos_tuples)) if z not in to_delete]
bad_pos_positions.append(fixed_bad_pos_tuples)
overlap_ngrams = [z for z in pos_ngrams if z in self._good_pos_ngrams]
if (len(pos_ngrams)-len(overlap_ngrams))>0:
divisor=len(pos_ngrams)/len(pos_seq)
else:
divisor=1
if divisor == 0:
divisor=1
good_grammar_ratio = (len(pos_ngrams)-len(overlap_ngrams))/divisor
good_pos_tags.append(good_grammar_ratio)
return good_pos_tags,bad_pos_positions | 0.014418 |
def _cast(self, _input, _output):
"""
Transforms a pair of input/output into the real slim shoutput.
:param _input: Bag
:param _output: mixed
:return: Bag
"""
if isenvelope(_output):
_output, _flags, _options = _output.unfold()
else:
_flags, _options = [], {}
if len(_flags):
# TODO: parse flags to check constraints are respected (like not modified alone, etc.)
if F_NOT_MODIFIED in _flags:
return _input
if F_INHERIT in _flags:
if self._output_type is None:
self._output_type = concat_types(
self._input_type, self._input_length, self._output_type, len(_output)
)
_output = _input + ensure_tuple(_output)
if not self._output_type:
if issubclass(type(_output), tuple):
self._output_type = type(_output)
return ensure_tuple(_output, cls=self._output_type) | 0.00381 |
def moist_amplification_factor(Tkelvin, relative_humidity=0.8):
'''Compute the moisture amplification factor for the moist diffusivity
given relative humidity and reference temperature profile.'''
deltaT = 0.01
# slope of saturation specific humidity at 1000 hPa
dqsdTs = (qsat(Tkelvin+deltaT/2, 1000.) - qsat(Tkelvin-deltaT/2, 1000.)) / deltaT
return const.Lhvap / const.cp * relative_humidity * dqsdTs | 0.004673 |
def select_links(xmrs, start=None, end=None, rargname=None, post=None):
"""
Return the list of matching links for *xmrs*.
:class:`~delphin.mrs.components.Link` objects for *xmrs* match if
their `start` matches *start*, `end` matches *end*, `rargname`
matches *rargname*, and `post` matches *post*. The *start*, *end*,
*rargname*, and *post* filters are ignored if they are `None`.
Args:
xmrs (:class:`~delphin.mrs.xmrs.Xmrs`): semantic structure to
query
start (optional): link start nodeid to match
end (optional): link end nodeid to match
rargname (str, optional): role name to match
post (str, optional): Link post-slash label to match
Returns:
list: matching links
"""
linkmatch = lambda l: (
(start is None or l.start == start) and
(end is None or l.end == end) and
(rargname is None or l.rargname == rargname) and
(post is None or l.post == post))
return list(filter(linkmatch, links(xmrs))) | 0.001932 |
def find(self):
"""Find slot"""
editorstack = self.get_current_editorstack()
editorstack.find_widget.show()
editorstack.find_widget.search_text.setFocus() | 0.010526 |
def getAccessURL(self, CorpNum, UserID):
""" ํ๋น ๋ก๊ทธ์ธ URL
args
CorpNum : ํ์ ์ฌ์
์๋ฒํธ
UserID : ํ์ ํ๋น์์ด๋
return
30์ด ๋ณด์ ํ ํฐ์ ํฌํจํ url
raise
PopbillException
"""
result = self._httpget('/?TG=LOGIN', CorpNum, UserID)
return result.url | 0.005634 |
def _state_removal_solve(self):
"""The State Removal Operation"""
initial = sorted(
self.mma.states,
key=attrgetter('initial'),
reverse=True)[0].stateid
for state_k in self.mma.states:
if state_k.final:
continue
if state_k.stateid == initial:
continue
self._state_removal_remove(state_k.stateid)
print self.l_transitions
return self.l_transitions | 0.004073 |
def send_discovery_packet(self):
""" Send discovery packet for QTM to respond to """
if self.port is None:
return
self.transport.sendto(
QRTDiscoveryP1.pack(
QRTDiscoveryPacketSize, QRTPacketType.PacketDiscover.value
)
+ QRTDiscoveryP2.pack(self.port),
("<broadcast>", 22226),
) | 0.005168 |
async def start(self, remoteParameters):
"""
Start DTLS transport negotiation with the parameters of the remote
DTLS transport.
:param: remoteParameters: An :class:`RTCDtlsParameters`.
"""
assert self._state == State.NEW
assert len(remoteParameters.fingerprints)
if self.transport.role == 'controlling':
self._role = 'server'
lib.SSL_set_accept_state(self.ssl)
else:
self._role = 'client'
lib.SSL_set_connect_state(self.ssl)
self._set_state(State.CONNECTING)
try:
while not self.encrypted:
result = lib.SSL_do_handshake(self.ssl)
await self._write_ssl()
if result > 0:
self.encrypted = True
break
error = lib.SSL_get_error(self.ssl, result)
if error == lib.SSL_ERROR_WANT_READ:
await self._recv_next()
else:
self.__log_debug('x DTLS handshake failed (error %d)', error)
for info in get_error_queue():
self.__log_debug('x %s', ':'.join(info))
self._set_state(State.FAILED)
return
except ConnectionError:
self.__log_debug('x DTLS handshake failed (connection error)')
self._set_state(State.FAILED)
return
# check remote fingerprint
x509 = lib.SSL_get_peer_certificate(self.ssl)
remote_fingerprint = certificate_digest(x509)
fingerprint_is_valid = False
for f in remoteParameters.fingerprints:
if f.algorithm.lower() == 'sha-256' and f.value.lower() == remote_fingerprint.lower():
fingerprint_is_valid = True
break
if not fingerprint_is_valid:
self.__log_debug('x DTLS handshake failed (fingerprint mismatch)')
self._set_state(State.FAILED)
return
# generate keying material
buf = ffi.new('unsigned char[]', 2 * (SRTP_KEY_LEN + SRTP_SALT_LEN))
extractor = b'EXTRACTOR-dtls_srtp'
_openssl_assert(lib.SSL_export_keying_material(
self.ssl, buf, len(buf), extractor, len(extractor), ffi.NULL, 0, 0) == 1)
view = ffi.buffer(buf)
if self._role == 'server':
srtp_tx_key = get_srtp_key_salt(view, 1)
srtp_rx_key = get_srtp_key_salt(view, 0)
else:
srtp_tx_key = get_srtp_key_salt(view, 0)
srtp_rx_key = get_srtp_key_salt(view, 1)
rx_policy = Policy(key=srtp_rx_key, ssrc_type=Policy.SSRC_ANY_INBOUND)
rx_policy.allow_repeat_tx = True
rx_policy.window_size = 1024
self._rx_srtp = Session(rx_policy)
tx_policy = Policy(key=srtp_tx_key, ssrc_type=Policy.SSRC_ANY_OUTBOUND)
tx_policy.allow_repeat_tx = True
tx_policy.window_size = 1024
self._tx_srtp = Session(tx_policy)
# start data pump
self.__log_debug('- DTLS handshake complete')
self._set_state(State.CONNECTED)
self._task = asyncio.ensure_future(self.__run()) | 0.001561 |
def update_labels(repo):
"""Update labels."""
updated = set()
for label in repo.get_labels():
edit = find_label(label.name, label.color, label.description)
if edit is not None:
print(' Updating {}: #{} "{}"'.format(edit.new, edit.color, edit.description))
label.edit(edit.new, edit.color, edit.description)
updated.add(edit.old)
updated.add(edit.new)
else:
if DELETE_UNSPECIFIED:
print(' Deleting {}: #{} "{}"'.format(label.name, label.color, label.description))
label.delete()
else:
print(' Skipping {}: #{} "{}"'.format(label.name, label.color, label.description))
updated.add(label.name)
for name, values in label_list.items():
color, description = values
if isinstance(name, tuple):
new_name = name[1]
else:
new_name = name
if new_name not in updated:
print(' Creating {}: #{} "{}"'.format(new_name, color, description))
repo.create_label(new_name, color, description) | 0.004401 |
def get(self, block=True, timeout=None):
"""Gets an item from the queue.
Uses polling if block=True, so there is no guarantee of order if
multiple consumers get from the same empty queue.
Returns:
The next item in the queue.
Raises:
Empty if the queue is empty and blocking is False.
"""
if not block:
success, item = ray.get(self.actor.get.remote())
if not success:
raise Empty
elif timeout is None:
# Polling
# Use a not_empty condition variable or return a promise?
success, item = ray.get(self.actor.get.remote())
while not success:
# Consider adding time.sleep here
success, item = ray.get(self.actor.get.remote())
elif timeout < 0:
raise ValueError("'timeout' must be a non-negative number")
else:
endtime = time.time() + timeout
# Polling
# Use a not_full condition variable or return a promise?
success = False
while not success and time.time() < endtime:
success, item = ray.get(self.actor.get.remote())
if not success:
raise Empty
return item | 0.001535 |
def forwards(self, orm):
"Write your forwards methods here."
# Note: Don't use "from appname.models import ModelName".
# Use orm.ModelName to refer to models in this application,
# and orm['appname.ModelName'] for models in other applications.
LEN_TYPE = {
7: 'GMI',
4: 'POW',
2: 'WOJ',
}
for ja in orm.JednostkaAdministracyjna.objects.all():
ja.typ = LEN_TYPE[len(ja.id)]
ja.save() | 0.006012 |
def add_load_constant(self, name, output_name, constant_value, shape):
"""
Add a load constant layer.
Parameters
----------
name: str
The name of this layer.
output_name: str
The output blob name of this layer.
constant_value: numpy.array
value of the constant as a numpy array.
shape: [int]
List of ints representing the shape of the constant. Must be of length 3: [C,H,W]
See Also
--------
add_elementwise
"""
spec = self.spec
nn_spec = self.nn_spec
# Add a new layer
spec_layer = nn_spec.layers.add()
spec_layer.name = name
spec_layer.output.append(output_name)
spec_layer_params = spec_layer.loadConstant
data = spec_layer_params.data
data.floatValue.extend(map(float, constant_value.flatten()))
spec_layer_params.shape.extend(shape)
if len(data.floatValue) != np.prod(shape):
raise ValueError("Dimensions of 'shape' do not match the size of the provided constant")
if len(shape) != 3:
raise ValueError("'shape' must be of length 3") | 0.0033 |
def bootstrap(v):
"""
Constructs Monte Carlo simulated data set using the
Bootstrap algorithm.
Usage:
>>> bootstrap(x)
where x is either an array or a list of arrays. If it is a
list, the code returns the corresponding list of bootstrapped
arrays assuming that the same position in these arrays map the
same "physical" object.
Rodrigo Nemmen, http://goo.gl/8S1Oo
"""
if type(v)==list:
vboot=[] # list of boostrapped arrays
n=v[0].size
iran=scipy.random.randint(0,n,n) # Array of random indexes
for x in v: vboot.append(x[iran])
else: # if v is an array, not a list of arrays
n=v.size
iran=scipy.random.randint(0,n,n) # Array of random indexes
vboot=v[iran]
return vboot | 0.043646 |
def SETNG(cpu, dest):
"""
Sets byte if not greater.
:param cpu: current CPU.
:param dest: destination operand.
"""
dest.write(Operators.ITEBV(dest.size, Operators.OR(cpu.ZF, cpu.SF != cpu.OF), 1, 0)) | 0.012097 |
def del_fields(self, *names):
"""Delete data fields from this struct instance"""
cls = type(self)
self.__class__ = cls
for n in names:
# don't raise error if a field is absent
if isinstance(getattr(cls, n, None), DataField):
if n in self._field_values:
del self._field_values[n]
delattr(cls, n) | 0.004975 |
def get_routes(
feed: "Feed", date: Optional[str] = None, time: Optional[str] = None
) -> DataFrame:
"""
Return a subset of ``feed.routes``
Parameters
-----------
feed : Feed
date : string
YYYYMMDD date string restricting routes to only those active on
the date
time : string
HH:MM:SS time string, possibly with HH > 23, restricting routes
to only those active during the time
Returns
-------
DataFrame
A subset of ``feed.routes``
Notes
-----
Assume the following feed attributes are not ``None``:
- ``feed.routes``
- Those used in :func:`.trips.get_trips`.
"""
if date is None:
return feed.routes.copy()
trips = feed.get_trips(date, time)
R = trips["route_id"].unique()
return feed.routes[feed.routes["route_id"].isin(R)] | 0.001163 |
def OnFindToolbarToggle(self, event):
"""Search toolbar toggle event handler"""
self.main_window.find_toolbar.SetGripperVisible(True)
find_toolbar_info = self.main_window._mgr.GetPane("find_toolbar")
self._toggle_pane(find_toolbar_info)
event.Skip() | 0.006826 |
def get_all_analytics(user, job_id):
"""Get all analytics of a job."""
args = schemas.args(flask.request.args.to_dict())
v1_utils.verify_existence_and_get(job_id, models.JOBS)
query = v1_utils.QueryBuilder(_TABLE, args, _A_COLUMNS)
# If not admin nor rh employee then restrict the view to the team
if user.is_not_super_admin() and not user.is_read_only_user():
query.add_extra_condition(_TABLE.c.team_id.in_(user.teams_ids))
query.add_extra_condition(_TABLE.c.job_id == job_id)
nb_rows = query.get_number_of_rows()
rows = query.execute(fetchall=True)
rows = v1_utils.format_result(rows, _TABLE.name)
return flask.jsonify({'analytics': rows, '_meta': {'count': nb_rows}}) | 0.001376 |
def log(args):
"""
%prog log logfile
Prepare a log of created files, ordered by their creation data. The purpose
for this script is to touch these files sequentially to reflect their build
order. On the JCVI scratch area, the files are touched regularly to avoid
getting deleted, losing their respective timestamps. However, this created a
problem for the make system adopted by ALLPATHS.
An example block to be extracted ==>
[PC] Calling PreCorrect to create 2 file(s):
[PC]
[PC] $(RUN)/frag_reads_prec.fastb
[PC] $(RUN)/frag_reads_prec.qualb
[PC]
[PC] from 2 file(s):
[PC]
[PC] $(RUN)/frag_reads_filt.fastb
[PC] $(RUN)/frag_reads_filt.qualb
"""
from jcvi.algorithms.graph import nx, topological_sort
p = OptionParser(log.__doc__)
opts, args = p.parse_args(args)
if len(args) != 1:
sys.exit(not p.print_help())
g = nx.DiGraph()
logfile, = args
fp = open(logfile)
row = fp.readline()
incalling = False
basedb = {}
while row:
atoms = row.split()
if len(atoms) < 3:
row = fp.readline()
continue
tag, token, trailing = atoms[0], atoms[1], atoms[-1]
if trailing == 'file(s):':
numfiles = int(atoms[-2])
row = fp.readline()
assert row.strip() == tag
if token == "Calling" and not incalling:
createfiles = []
for i in xrange(numfiles):
row = fp.readline()
createfiles.append(row.split()[-1])
incalling = True
if token == "from" and incalling:
fromfiles = []
for i in xrange(numfiles):
row = fp.readline()
fromfiles.append(row.split()[-1])
for a in fromfiles:
for b in createfiles:
ba, bb = op.basename(a), op.basename(b)
basedb[ba] = a
basedb[bb] = b
g.add_edge(ba, bb)
incalling = False
if token == "ln":
fromfile, createfile = atoms[-2:]
ba, bb = op.basename(fromfile), op.basename(createfile)
#print ba, "-->", bb
if ba != bb:
g.add_edge(ba, bb)
row = fp.readline()
ts = [basedb[x] for x in topological_sort(g) if x in basedb]
print("\n".join(ts)) | 0.001202 |
def showtraceback(self, *args, **kwargs):
"""Display the exception that just occurred.
We remove the first stack item because it is our own code.
The output is written by self.write(), below.
"""
try:
type, value, tb = sys.exc_info()
sys.last_type = type
sys.last_value = value
sys.last_traceback = tb
tblist = traceback.extract_tb(tb)
del tblist[:1]
list = traceback.format_list(tblist)
if list:
list.insert(0, "Traceback (most recent call last):\n")
list[len(list):] = traceback.format_exception_only(type, value)
finally:
tblist = tb = None
map(self.write, list) | 0.002632 |
def add(self, *nodes):
""" Adds nodes as siblings
:param nodes: GraphNode(s)
"""
for node in nodes:
node.set_parent(self)
self.add_sibling(node) | 0.01 |
def _set_fcoe_config(self, v, load=False):
"""
Setter method for fcoe_config, mapped from YANG variable /rbridge_id/fcoe_config (container)
If this variable is read-only (config: false) in the
source YANG file, then _set_fcoe_config is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_fcoe_config() directly.
YANG Description: This provides the grouping of all FCoE map configuration
elements.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=fcoe_config.fcoe_config, is_container='container', presence=False, yang_name="fcoe-config", rest_name="fcoe", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'FCoE configuration commands', u'display-when': u'(/vcsmode/vcs-mode = "true")', u'sort-priority': u'RUNNCFG_LEVEL_RBRIDGE', u'cli-suppress-no': None, u'cli-full-command': None, u'cli-add-mode': None, u'alt-name': u'fcoe', u'cli-mode-name': u'config-rbridge-fcoe'}}, namespace='urn:brocade.com:mgmt:brocade-fcoe', defining_module='brocade-fcoe', yang_type='container', is_config=True)
except (TypeError, ValueError):
raise ValueError({
'error-string': """fcoe_config must be of a type compatible with container""",
'defined-type': "container",
'generated-type': """YANGDynClass(base=fcoe_config.fcoe_config, is_container='container', presence=False, yang_name="fcoe-config", rest_name="fcoe", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'FCoE configuration commands', u'display-when': u'(/vcsmode/vcs-mode = "true")', u'sort-priority': u'RUNNCFG_LEVEL_RBRIDGE', u'cli-suppress-no': None, u'cli-full-command': None, u'cli-add-mode': None, u'alt-name': u'fcoe', u'cli-mode-name': u'config-rbridge-fcoe'}}, namespace='urn:brocade.com:mgmt:brocade-fcoe', defining_module='brocade-fcoe', yang_type='container', is_config=True)""",
})
self.__fcoe_config = t
if hasattr(self, '_set'):
self._set() | 0.004606 |
def _setup_notification_listener(self, topic_name, url):
"""Setup notification listener for a service."""
self.notify_listener = rpc.DfaNotifcationListener(
topic_name, url, rpc.DfaNotificationEndpoints(self)) | 0.008403 |
def server_show(endpoint_id, server_id):
"""
Executor for `globus endpoint server show`
"""
client = get_client()
server_doc = client.get_endpoint_server(endpoint_id, server_id)
if not server_doc["uri"]: # GCP endpoint server
fields = (("ID", "id"),)
text_epilog = dedent(
"""
This server is for a Globus Connect Personal installation.
For its connection status, try:
globus endpoint show {}
""".format(
endpoint_id
)
)
else:
def advertised_port_summary(server):
def get_range_summary(start, end):
return (
"unspecified"
if not start and not end
else "unrestricted"
if start == 1024 and end == 65535
else "{}-{}".format(start, end)
)
return "incoming {}, outgoing {}".format(
get_range_summary(
server["incoming_data_port_start"], server["incoming_data_port_end"]
),
get_range_summary(
server["outgoing_data_port_start"], server["outgoing_data_port_end"]
),
)
fields = (
("ID", "id"),
("URI", "uri"),
("Subject", "subject"),
("Data Ports", advertised_port_summary),
)
text_epilog = None
formatted_print(
server_doc,
text_format=FORMAT_TEXT_RECORD,
fields=fields,
text_epilog=text_epilog,
) | 0.001847 |
def play(self):
"""
Sends a "play" command to the player.
"""
msg = cr.Message()
msg.type = cr.PLAY
self.send_message(msg) | 0.011765 |
def SESSION_TIME(stats, info):
"""Total time of this session.
Reports the time elapsed from the construction of the `Stats` object to
this `submit()` call.
This is a flag you can pass to `Stats.submit()`.
"""
duration = time.time() - stats.started_time
secs = int(duration)
msecs = int((duration - secs) * 1000)
info.append(('session_time', '%d.%d' % (secs, msecs))) | 0.002475 |
def scanned(self):
"""Number of items that DynamoDB evaluated, before any filter was applied."""
if self.request["Select"] == "COUNT":
while not self.exhausted:
next(self, None)
return self._scanned | 0.012 |
def qsub(script, job_name, dryrun=False, *args, **kwargs):
"""Submit a job via qsub."""
print("Preparing job script...")
job_string = gen_job(script=script, job_name=job_name, *args, **kwargs)
env = os.environ.copy()
if dryrun:
print(
"This is a dry run! Here is the generated job file, which will "
"not be submitted:"
)
print(job_string)
else:
print("Calling qsub with the generated job script.")
p = subprocess.Popen(
'qsub -V', stdin=subprocess.PIPE, env=env, shell=True
)
p.communicate(input=bytes(job_string.encode('ascii'))) | 0.001546 |
def _configure_port_entries(self, port, vlan_id, device_id, host_id, vni,
is_provider_vlan):
"""Create a nexus switch entry.
if needed, create a VLAN in the appropriate switch or port and
configure the appropriate interfaces for this VLAN.
Called during update postcommit port event.
"""
connections = self._get_active_port_connections(port, host_id)
# (nexus_port,switch_ip) will be unique in each iteration.
# But switch_ip will repeat if host has >1 connection to same switch.
# So track which switch_ips already have vlan created in this loop.
vlan_already_created = []
starttime = time.time()
for switch_ip, intf_type, nexus_port, is_native, _ in connections:
try:
all_bindings = nxos_db.get_nexusvlan_binding(
vlan_id, switch_ip)
except excep.NexusPortBindingNotFound:
LOG.warning("Switch %(switch_ip)s and Vlan "
"%(vlan_id)s not found in port binding "
"database. Skipping this update",
{'switch_ip': switch_ip, 'vlan_id': vlan_id})
continue
previous_bindings = [row for row in all_bindings
if row.instance_id != device_id]
if previous_bindings and (switch_ip in vlan_already_created):
duplicate_type = const.DUPLICATE_VLAN
else:
vlan_already_created.append(switch_ip)
duplicate_type = const.NO_DUPLICATE
port_starttime = time.time()
try:
self._configure_port_binding(
is_provider_vlan, duplicate_type,
is_native,
switch_ip, vlan_id,
intf_type, nexus_port,
vni)
except Exception:
with excutils.save_and_reraise_exception():
self.driver.capture_and_print_timeshot(
port_starttime, "port_configerr",
switch=switch_ip)
self.driver.capture_and_print_timeshot(
starttime, "configerr",
switch=switch_ip)
self.driver.capture_and_print_timeshot(
port_starttime, "port_config",
switch=switch_ip)
self.driver.capture_and_print_timeshot(
starttime, "config") | 0.001577 |
def interpolate_nearest(self, lons, lats, data):
"""
Interpolate using nearest-neighbour approximation
Returns the same as interpolate(lons,lats,data,order=0)
"""
return self.interpolate(lons, lats, data, order=0) | 0.007905 |
def message(self_,msg,*args,**kw):
"""
Print msg merged with args as a message.
See Python's logging module for details of message formatting.
"""
self_.__db_print(INFO,msg,*args,**kw) | 0.035556 |
def visit_While(self, node: ast.While) -> Optional[ast.AST]:
"""Eliminate dead code from while bodies."""
new_node = self.generic_visit(node)
assert isinstance(new_node, ast.While)
return ast.copy_location(
ast.While(
test=new_node.test,
body=_filter_dead_code(new_node.body),
orelse=_filter_dead_code(new_node.orelse),
),
new_node,
) | 0.004367 |
def _get_mapreduce_spec(cls, mr_id):
"""Get Mapreduce spec from mr id."""
key = 'GAE-MR-spec: %s' % mr_id
spec_json = memcache.get(key)
if spec_json:
return cls.from_json(spec_json)
state = MapreduceState.get_by_job_id(mr_id)
spec = state.mapreduce_spec
spec_json = spec.to_json()
memcache.set(key, spec_json)
return spec | 0.00551 |
def add(name, num, minimum=0, maximum=0, ref=None):
'''
Adds together the ``num`` most recent values. Requires a list.
USAGE:
.. code-block:: yaml
foo:
calc.add:
- name: myregentry
- num: 5
'''
return calc(
name=name,
num=num,
oper='add',
minimum=minimum,
maximum=maximum,
ref=ref
) | 0.002481 |
def to_bytes(data: Union[str, bytes]) -> bytes:
"""
:param data: Data to convert to bytes.
:type data: Union[str, bytes]
:return: `data` encoded to UTF8.
:rtype: bytes
"""
if isinstance(data, bytes):
return data
return data.encode('utf-8') | 0.003584 |
def headers(self) -> Optional[List[str]]:
"""
็ๆ headers
"""
if len(self) == 0:
return None
headers = cast(List[str], [])
for cookie in self.values():
headers.append(cookie.OutputString())
return headers | 0.007067 |
def post(method, hmc, uri, uri_parms, body, logon_required,
wait_for_completion):
"""Operation: Mount ISO Image (requires DPM mode)."""
assert wait_for_completion is True # synchronous operation
partition_oid = uri_parms[0]
partition_uri = '/api/partitions/' + partition_oid
try:
partition = hmc.lookup_by_uri(partition_uri)
except KeyError:
raise InvalidResourceError(method, uri)
cpc = partition.manager.parent
assert cpc.dpm_enabled
check_valid_cpc_status(method, uri, cpc)
check_partition_status(method, uri, partition,
invalid_statuses=['starting', 'stopping'])
# Parse and check required query parameters
query_parms = parse_query_parms(method, uri, uri_parms[1])
try:
image_name = query_parms['image-name']
except KeyError:
raise BadRequestError(
method, uri, reason=1,
message="Missing required URI query parameter 'image-name'")
try:
ins_file_name = query_parms['ins-file-name']
except KeyError:
raise BadRequestError(
method, uri, reason=1,
message="Missing required URI query parameter 'ins-file-name'")
# Reflect the effect of mounting in the partition properties
partition.properties['boot-iso-image-name'] = image_name
partition.properties['boot-iso-ins-file'] = ins_file_name
return {} | 0.001944 |
def terminate(self, devices):
"""Terminate one or more running or stopped instances.
"""
for device in devices:
self.logger.info('Terminating: %s', device.id)
try:
device.delete()
except packet.baseapi.Error:
raise PacketManagerException('Unable to terminate instance "{}"'.format(device.id)) | 0.007813 |
def _slugify_internal_collection_name(self, json_repr):
"""Parse the JSON, find its name, return a slug of its name"""
collection = self._coerce_json_to_collection(json_repr)
if collection is None:
return None
internal_name = collection['name']
return slugify(internal_name) | 0.006135 |
def fill_masked(self, value=-1, copy=True):
"""Fill masked genotype calls with a given value.
Parameters
----------
value : int, optional
The fill value.
copy : bool, optional
If False, modify the array in place.
Returns
-------
g : GenotypeArray
Examples
--------
>>> import allel
>>> g = allel.GenotypeArray([[[0, 0], [0, 1]],
... [[0, 1], [1, 1]],
... [[0, 2], [-1, -1]]], dtype='i1')
>>> mask = [[True, False], [False, True], [False, False]]
>>> g.mask = mask
>>> g.fill_masked().values
array([[[-1, -1],
[ 0, 1]],
[[ 0, 1],
[-1, -1]],
[[ 0, 2],
[-1, -1]]], dtype=int8)
"""
if self.mask is None:
raise ValueError('no mask is set')
# apply the mask
data = np.array(self.values, copy=copy)
data[self.mask, ...] = value
if copy:
out = type(self)(data) # wrap
out.is_phased = self.is_phased
# don't set mask because it has been filled in
else:
out = self
out.mask = None # reset mask
return out | 0.001493 |
def generate_synth_data(nsta, ntemplates, nseeds, samp_rate, t_length,
max_amp, max_lag, debug=0):
"""
Generate a synthetic dataset to be used for testing.
This will generate both templates and data to scan through.
Templates will be generated using the utils.synth_seis functions.
The day of data will be random noise, with random signal-to-noise
ratio copies of the templates randomly seeded throughout the day.
It also returns the seed times and signal-to-noise ratios used.
:type nsta: int
:param nsta: Number of stations to generate data for < 15.
:type ntemplates: int
:param ntemplates: Number of templates to generate, will be generated \
with random arrival times.
:type nseeds: int
:param nseeds: Number of copies of the template to seed within the \
day of noisy data for each template.
:type samp_rate: float
:param samp_rate: Sampling rate to use in Hz
:type t_length: float
:param t_length: Length of templates in seconds.
:type max_amp: float
:param max_amp: Maximum signal-to-noise ratio of seeds.
:param max_lag: Maximum lag time in seconds (randomised).
:type max_lag: float
:type debug: int
:param debug: Debug level, bigger the number, the more plotting/output.
:returns: Templates: List of :class:`obspy.core.stream.Stream`
:rtype: list
:returns: Data: :class:`obspy.core.stream.Stream` of seeded noisy data
:rtype: :class:`obspy.core.stream.Stream`
:returns: Seeds: dictionary of seed SNR and time with time in samples.
:rtype: dict
"""
# Generate random arrival times
t_times = np.abs(np.random.random([nsta, ntemplates])) * max_lag
# Generate random node locations - these do not matter as they are only
# used for naming
lats = np.random.random(ntemplates) * 90.0
lons = np.random.random(ntemplates) * 90.0
depths = np.abs(np.random.random(ntemplates) * 40.0)
nodes = zip(lats, lons, depths)
# Generating a 5x3 array to make 3 templates
stations = ['ALPH', 'BETA', 'GAMM', 'KAPP', 'ZETA', 'BOB', 'MAGG',
'ALF', 'WALR', 'ALBA', 'PENG', 'BANA', 'WIGG', 'SAUS',
'MALC']
if debug > 1:
print(nodes)
print(t_times)
print(stations[0:nsta])
templates = template_grid(stations=stations[0:nsta], nodes=nodes,
travel_times=t_times, phase='S',
samp_rate=samp_rate,
flength=int(t_length * samp_rate))
if debug > 2:
for template in templates:
print(template)
# Now we want to create a day of synthetic data
seeds = []
data = templates[0].copy() # Copy a template to get the correct length
# and stats for data, we will overwrite the data on this copy
for tr in data:
tr.data = np.zeros(86400 * int(samp_rate))
# Set all the traces to have a day of zeros
tr.stats.starttime = UTCDateTime(0)
for i, template in enumerate(templates):
impulses = np.zeros(86400 * int(samp_rate))
# Generate a series of impulses for seeding
# Need three seperate impulse traces for each of the three templates,
# all will be convolved within the data though.
impulse_times = np.random.randint(86400 * int(samp_rate),
size=nseeds)
impulse_amplitudes = np.random.randn(nseeds) * max_amp
# Generate amplitudes up to maximum amplitude in a normal distribution
seeds.append({'SNR': impulse_amplitudes,
'time': impulse_times})
for j in range(nseeds):
impulses[impulse_times[j]] = impulse_amplitudes[j]
# We now have one vector of impulses, we need nsta numbers of them,
# shifted with the appropriate lags
mintime = min([template_tr.stats.starttime
for template_tr in template])
for j, template_tr in enumerate(template):
offset = int((template_tr.stats.starttime - mintime) * samp_rate)
pad = np.zeros(offset)
tr_impulses = np.append(pad, impulses)[0:len(impulses)]
# Convolve this with the template trace to give the daylong seeds
data[j].data += np.convolve(tr_impulses,
template_tr.data)[0:len(impulses)]
# Add the noise
for tr in data:
noise = np.random.randn(86400 * int(samp_rate))
tr.data += noise / max(noise)
return templates, data, seeds | 0.000218 |
def get_content_type (headers):
"""
Get the MIME type from the Content-Type header value, or
'application/octet-stream' if not found.
@return: MIME type
@rtype: string
"""
ptype = headers.get('Content-Type', 'application/octet-stream')
if ";" in ptype:
# split off not needed extension info
ptype = ptype.split(';')[0]
return ptype.strip().lower() | 0.005 |
def curve_reduce(curve, reduced):
"""Image for :meth:`.curve.Curve.reduce` docstring."""
if NO_IMAGES:
return
figure, (ax1, ax2) = plt.subplots(1, 2, sharex=True, sharey=True)
curve.plot(256, ax=ax1)
color = ax1.lines[-1].get_color()
add_patch(ax1, curve._nodes, color)
reduced.plot(256, ax=ax2)
color = ax2.lines[-1].get_color()
add_patch(ax2, reduced._nodes, color)
ax1.axis("scaled")
ax2.axis("scaled")
_plot_helpers.add_plot_boundary(ax2)
save_image(figure, "curve_reduce.png") | 0.001845 |
def addUser(self, invitationList,
subject, html):
"""
adds a user without sending an invitation email
Inputs:
invitationList - InvitationList class used to add users without
sending an email
subject - email subject
html - email message sent to users in invitation list object
"""
url = self._url + "/invite"
params = {"f" : "json"}
if isinstance(invitationList, parameters.InvitationList):
params['invitationList'] = invitationList.value()
params['html'] = html
params['subject'] = subject
return self._post(url=url,
param_dict=params,
securityHandler=self._securityHandler,
proxy_url=self._proxy_url,
proxy_port=self._proxy_port) | 0.008889 |
def _onMenuItemSelected(self, evt):
"""Called whenever one of the specific axis menu items is selected"""
current = self._menu.IsChecked(evt.GetId())
if current:
new = False
else:
new = True
self._menu.Check(evt.GetId(), new)
# Lines above would be deleted based on svn tracker ID 2841525;
# not clear whether this matters or not.
self._toolbar.set_active(self.getActiveAxes())
evt.Skip() | 0.004132 |
def _defer_to_worker(deliver, worker, work, *args, **kwargs):
"""
Run a task in a worker, delivering the result as a ``Deferred`` in the
reactor thread.
"""
deferred = Deferred()
def wrapped_work():
try:
result = work(*args, **kwargs)
except BaseException:
f = Failure()
deliver(lambda: deferred.errback(f))
else:
deliver(lambda: deferred.callback(result))
worker.do(wrapped_work)
return deferred | 0.001996 |
def set_content(self, content):
"""Set textual content for the object/node.
Verifies the node is allowed to contain content, and throws an
exception if not.
"""
if self.allows_content:
self.content = content.strip()
else:
raise UNTLStructureException(
'Element "%s" does not allow textual content' % (self.tag,)
) | 0.004831 |
def save_heterozygosity(heterozygosity, samples, out_prefix):
"""Saves the heterozygosity data.
:param heterozygosity: the heterozygosity data.
:param samples: the list of samples.
:param out_prefix: the prefix of the output files.
:type heterozygosity: numpy.array
:type samples: list of tuples of str
:type out_prefix: str
"""
# The output file
o_file = None
try:
o_file = open(out_prefix + ".het", 'wb')
except IOError:
msg = "{}.het: can't write file".format(out_prefix)
raise ProgramError(msg)
# Saving the data
for (fid, iid), het in zip(samples, heterozygosity):
print >>o_file, "\t".join([fid, iid, str(het)])
# Closing the file
o_file.close() | 0.00133 |
def walk(self, visitor):
"""
Walk the branch and call the visitor function
on each node.
@param visitor: A function.
@return: self
@rtype: L{Element}
"""
visitor(self)
for c in self.children:
c.walk(visitor)
return self | 0.006431 |
def is_numeric(obj):
"""
This detects whether an input object is numeric or not.
:param obj: object to be tested.
"""
try:
obj+obj, obj-obj, obj*obj, obj**obj, obj/obj
except ZeroDivisionError:
return True
except Exception:
return False
else:
return True | 0.003135 |
def get_log_entries(self):
"""Gets the log entry list resulting from a search.
return: (osid.logging.LogEntryList) - the log entry list
raise: IllegalState - list already retrieved
*compliance: mandatory -- This method must be implemented.*
"""
if self.retrieved:
raise errors.IllegalState('List has already been retrieved.')
self.retrieved = True
return objects.LogEntryList(self._results, runtime=self._runtime) | 0.004065 |
def is_real_floating_dtype(dtype):
"""Return ``True`` if ``dtype`` is a real floating point type."""
dtype = np.dtype(dtype)
return np.issubsctype(getattr(dtype, 'base', None), np.floating) | 0.004975 |
def elements_equal(first, *others):
"""
Check elements for equality
"""
f = first
lf = list(f)
for e in others:
le = list(e)
if (len(lf) != len(le)
or f.tag != e.tag
or f.text != e.text
or f.tail != e.tail
or f.attrib != e.attrib
or (not all(map(elements_equal, lf, le)))
):
return False
return True | 0.004435 |
def _check_requirements(self):
"""
Checks the IOU image.
"""
if not self._path:
raise IOUError("IOU image is not configured")
if not os.path.isfile(self._path) or not os.path.exists(self._path):
if os.path.islink(self._path):
raise IOUError("IOU image '{}' linked to '{}' is not accessible".format(self._path, os.path.realpath(self._path)))
else:
raise IOUError("IOU image '{}' is not accessible".format(self._path))
try:
with open(self._path, "rb") as f:
# read the first 7 bytes of the file.
elf_header_start = f.read(7)
except OSError as e:
raise IOUError("Cannot read ELF header for IOU image '{}': {}".format(self._path, e))
# IOU images must start with the ELF magic number, be 32-bit or 64-bit, little endian
# and have an ELF version of 1 normal IOS image are big endian!
if elf_header_start != b'\x7fELF\x01\x01\x01' and elf_header_start != b'\x7fELF\x02\x01\x01':
raise IOUError("'{}' is not a valid IOU image".format(self._path))
if not os.access(self._path, os.X_OK):
raise IOUError("IOU image '{}' is not executable".format(self._path)) | 0.006206 |
def disk2ram(self):
"""Move internal data from disk to RAM."""
values = self.series
self.deactivate_disk()
self.ramflag = True
self.__set_array(values)
self.update_fastaccess() | 0.008929 |
def _summarize_accessible_fields(field_descriptions, width=40,
section_title='Accessible fields'):
"""
Create a summary string for the accessible fields in a model. Unlike
`_toolkit_repr_print`, this function does not look up the values of the
fields, it just formats the names and descriptions.
Parameters
----------
field_descriptions : dict{str: str}
Name of each field and its description, in a dictionary. Keys and
values should be strings.
width : int, optional
Width of the names. This is usually determined and passed by the
calling `__repr__` method.
section_title : str, optional
Name of the accessible fields section in the summary string.
Returns
-------
out : str
"""
key_str = "{:<{}}: {}"
items = []
items.append(section_title)
items.append("-" * len(section_title))
for field_name, field_desc in field_descriptions.items():
items.append(key_str.format(field_name, width, field_desc))
return "\n".join(items) | 0.00092 |
def bulk_edit(self, _fields, ids=None, filter=None, type=None, all=False, testvars=None): # pylint: disable=redefined-builtin
"""Bulk edit a set of configs.
:param _fields: :class:`configs.Config <configs.Config>` object
:param ids: (optional) Int list of config IDs.
:param filter: (optional) String list of filters.
:param type: (optional) `union` or `inter` as string.
:param all: (optional) Apply to all if bool `True`.
:param testvars: (optional) :class:`configs.ConfigTestvars <configs.ConfigTestvars>` list
"""
schema = self.EDIT_SCHEMA
_fields = self.service.encode(schema, _fields, skip_none=True)
return self.service.bulk_edit(self.base, self.RESOURCE,
_fields, ids=ids, filter=filter, type=type, all=all, testvars=testvars) | 0.006969 |
def folding_model_energy(rvec, rcut):
r"""computes the potential energy at point rvec"""
r = np.linalg.norm(rvec) - rcut
rr = r ** 2
if r < 0.0:
return -2.5 * rr
return 0.5 * (r - 2.0) * rr | 0.004608 |
def generate_from_text(self, text):
"""Generate wordcloud from text.
The input "text" is expected to be a natural text. If you pass a sorted
list of words, words will appear in your output twice. To remove this
duplication, set ``collocations=False``.
Calls process_text and generate_from_frequencies.
..versionchanged:: 1.2.2
Argument of generate_from_frequencies() is not return of
process_text() any more.
Returns
-------
self
"""
words = self.process_text(text)
self.generate_from_frequencies(words)
return self | 0.003091 |
def get_image(row, output_dir):
"""Downloads the image that corresponds to the given row.
Prints a notification if the download fails."""
if not download_image(image_id=row[0],
url=row[1],
x1=float(row[2]),
y1=float(row[3]),
x2=float(row[4]),
y2=float(row[5]),
output_dir=output_dir):
print("Download failed: " + str(row[0])) | 0.006263 |
def Vdiff(D1, D2):
"""
finds the vector difference between two directions D1,D2
"""
A = dir2cart([D1[0], D1[1], 1.])
B = dir2cart([D2[0], D2[1], 1.])
C = []
for i in range(3):
C.append(A[i] - B[i])
return cart2dir(C) | 0.003906 |
async def login_user(self, password, **kwds):
"""
This function handles the registration of the given user credentials in the database
"""
# find the matching user with the given email
user_data = (await self._get_matching_user(fields=list(kwds.keys()), **kwds))['data']
try:
# look for a matching entry in the local database
passwordEntry = self.model.select().where(
self.model.user == user_data[root_query()][0]['pk']
)[0]
# if we couldn't acess the id of the result
except (KeyError, IndexError) as e:
# yell loudly
raise RuntimeError('Could not find matching registered user')
# if the given password matches the stored hash
if passwordEntry and passwordEntry.password == password:
# the remote entry for the user
user = user_data[root_query()][0]
# then return a dictionary with the user and sessionToken
return {
'user': user,
'sessionToken': self._user_session_token(user)
}
# otherwise the passwords don't match
raise RuntimeError("Incorrect credentials") | 0.004049 |
def get(self, adgroup_id, nick=None):
'''xxxxx.xxxxx.adgroup.catmatch.get
===================================
ๅๅพไธไธชๆจๅนฟ็ป็็ฑป็ฎๅบไปท'''
request = TOPRequest('xxxxx.xxxxx.adgroup.catmatch.get')
request['adgroup_id'] = adgroup_id
if nick!=None: request['nick'] = nick
self.create(self.execute(request), fields=['success','result','success','result_code','result_message'], models={'result':ADGroupCatmatch})
return self.result | 0.023013 |
def get_components(A, no_depend=False):
'''
Returns the components of an undirected graph specified by the binary and
undirected adjacency matrix adj. Components and their constitutent nodes
are assigned the same index and stored in the vector, comps. The vector,
comp_sizes, contains the number of nodes beloning to each component.
Parameters
----------
A : NxN np.ndarray
binary undirected adjacency matrix
no_depend : Any
Does nothing, included for backwards compatibility
Returns
-------
comps : Nx1 np.ndarray
vector of component assignments for each node
comp_sizes : Mx1 np.ndarray
vector of component sizes
Notes
-----
Note: disconnected nodes will appear as components with a component
size of 1
Note: The identity of each component (i.e. its numerical value in the
result) is not guaranteed to be identical the value returned in BCT,
matlab code, although the component topology is.
Many thanks to Nick Cullen for providing this implementation
'''
if not np.all(A == A.T): # ensure matrix is undirected
raise BCTParamError('get_components can only be computed for undirected'
' matrices. If your matrix is noisy, correct it with np.around')
A = binarize(A, copy=True)
n = len(A)
np.fill_diagonal(A, 1)
edge_map = [{u,v} for u in range(n) for v in range(n) if A[u,v] == 1]
union_sets = []
for item in edge_map:
temp = []
for s in union_sets:
if not s.isdisjoint(item):
item = s.union(item)
else:
temp.append(s)
temp.append(item)
union_sets = temp
comps = np.array([i+1 for v in range(n) for i in
range(len(union_sets)) if v in union_sets[i]])
comp_sizes = np.array([len(s) for s in union_sets])
return comps, comp_sizes | 0.004128 |
def safe_dump_all(documents, stream=None, **kwds):
"""
Serialize a sequence of Python objects into a YAML stream.
Produce only basic YAML tags.
If stream is None, return the produced string instead.
"""
return dump_all(documents, stream, Dumper=SafeDumper, **kwds) | 0.003472 |
def save_model(self, request, obj, form, change):
"""
Save model for every language so that field auto-population
is done for every each of it.
"""
super(DisplayableAdmin, self).save_model(request, obj, form, change)
if settings.USE_MODELTRANSLATION:
lang = get_language()
for code in OrderedDict(settings.LANGUAGES):
if code != lang: # Already done
try:
activate(code)
except:
pass
else:
obj.save()
activate(lang) | 0.004637 |
def last(conf):
"""How long you have kept signing in."""
try:
v2ex = V2ex(conf.config)
v2ex.login()
last_date = v2ex.get_last()
click.echo(last_date)
except KeyError:
click.echo('Keyerror, please check your config file.')
except IndexError:
click.echo('Please check your username and password.') | 0.002786 |
def isNonNull(requestContext, seriesList):
"""
Takes a metric or wild card seriesList and counts up how many
non-null values are specified. This is useful for understanding
which metrics have data at a given point in time (ie, to count
which servers are alive).
Example::
&target=isNonNull(webapp.pages.*.views)
Returns a seriesList where 1 is specified for non-null values, and
0 is specified for null values.
"""
def transform(v):
if v is None:
return 0
else:
return 1
for series in seriesList:
series.name = "isNonNull(%s)" % (series.name)
series.pathExpression = series.name
values = [transform(v) for v in series]
series.extend(values)
del series[:len(values)]
return seriesList | 0.001214 |
def opt_pore_diameter(elements, coordinates, bounds=None, com=None, **kwargs):
"""Return optimised pore diameter and it's COM."""
args = elements, coordinates
if com is not None:
pass
else:
com = center_of_mass(elements, coordinates)
if bounds is None:
pore_r = pore_diameter(elements, coordinates, com=com)[0] / 2
bounds = (
(com[0]-pore_r, com[0]+pore_r),
(com[1]-pore_r, com[1]+pore_r),
(com[2]-pore_r, com[2]+pore_r)
)
minimisation = minimize(
correct_pore_diameter, x0=com, args=args, bounds=bounds)
pored = pore_diameter(elements, coordinates, com=minimisation.x)
return (pored[0], pored[1], minimisation.x) | 0.001372 |
def lset(self, name, index, value):
"Set ``position`` of list ``name`` to ``value``"
return self.execute_command('LSET', name, index, value) | 0.012821 |
def get_next_future_timerange_invalid(self, timestamp):
"""Get next invalid time for timeranges
:param timestamp: time to check
:type timestamp: int
:return: next time when a timerange is not valid
:rtype: None | int
"""
sec_from_morning = get_sec_from_morning(timestamp)
ends = []
for timerange in self.timeranges:
tr_end = timerange.hend * 3600 + timerange.mend * 60
if tr_end >= sec_from_morning:
# Remove the last second of the day for 00->24h"
if tr_end == 86400:
tr_end = 86399
ends.append(tr_end)
if ends != []:
return min(ends)
return None | 0.002699 |
def remove_all_logger_handlers(logger: logging.Logger) -> None:
"""
Remove all handlers from a logger.
Args:
logger: logger to modify
"""
while logger.handlers:
h = logger.handlers[0]
logger.removeHandler(h) | 0.003968 |
def derivative(self, point):
"""Derivative of this operator at ``point``.
For the particular case of constant padding with non-zero
constant, the derivative is the corresponding zero-padding
variant. In all other cases, this operator is linear, i.e.
the derivative is equal to ``self``.
"""
if self.pad_mode == 'constant' and self.pad_const != 0:
return ResizingOperator(
domain=self.domain, range=self.range, pad_mode='constant',
pad_const=0.0)
else: # operator is linear
return self | 0.0033 |
def delete_virtualip(self, loadbalancer, vip):
"""Deletes the VirtualIP from its load balancer."""
lb = vip.parent
if not lb:
raise exc.UnattachedVirtualIP("No parent Load Balancer for this "
"VirtualIP could be determined.")
resp, body = self.api.method_delete("/loadbalancers/%s/virtualips/%s" %
(lb.id, vip.id))
return resp, body | 0.009524 |
def pages(self):
"""The total number of pages"""
if self.per_page == 0 or self.total is None:
pages = 0
else:
pages = int(ceil(self.total / float(self.per_page)))
return pages | 0.008658 |
def to_json(df, x, y):
"""Format output for json response."""
values = []
for i, row in df.iterrows():
values.append({
"x": row[x],
"y": row[y]
})
if df.empty:
return {"result": [{"x": 0, "y": 0}], "date": False}
return {"result": values, "date": False} | 0.00545 |
def get_code_breakpoint(self, dwProcessId, address):
"""
Returns the internally used breakpoint object,
for the code breakpoint defined at the given address.
@warning: It's usually best to call the L{Debug} methods
instead of accessing the breakpoint objects directly.
@see:
L{define_code_breakpoint},
L{has_code_breakpoint},
L{enable_code_breakpoint},
L{enable_one_shot_code_breakpoint},
L{disable_code_breakpoint},
L{erase_code_breakpoint}
@type dwProcessId: int
@param dwProcessId: Process global ID.
@type address: int
@param address: Memory address where the breakpoint is defined.
@rtype: L{CodeBreakpoint}
@return: The code breakpoint object.
"""
key = (dwProcessId, address)
if key not in self.__codeBP:
msg = "No breakpoint at process %d, address %s"
address = HexDump.address(address)
raise KeyError(msg % (dwProcessId, address))
return self.__codeBP[key] | 0.001797 |
def split(self, tValues):
"""
Split the segment according the t values
"""
if self.segmentType == "curve":
on1 = self.previousOnCurve
off1 = self.points[0].coordinates
off2 = self.points[1].coordinates
on2 = self.points[2].coordinates
return bezierTools.splitCubicAtT(on1, off1, off2, on2, *tValues)
elif self.segmentType == "line":
segments = []
x1, y1 = self.previousOnCurve
x2, y2 = self.points[0].coordinates
dx = x2 - x1
dy = y2 - y1
pp = x1, y1
for t in tValues:
np = (x1+dx*t, y1+dy*t)
segments.append([pp, np])
pp = np
segments.append([pp, (x2, y2)])
return segments
elif self.segmentType == "qcurve":
raise NotImplementedError
else:
raise NotImplementedError | 0.002077 |
def list_prefix(self, auth, spec = None):
""" List prefixes matching the `spec`.
* `auth` [BaseAuth]
AAA options.
* `spec` [prefix_spec]
Specifies prefixes to list. If omitted, all will be listed.
Returns a list of dicts.
This is a quite blunt tool for finding prefixes, mostly useful for
fetching data about a single prefix. For more capable alternatives,
see the :func:`search_prefix` or :func:`smart_search_prefix` functions.
This is the documentation of the internal backend function. It's
exposed over XML-RPC, please also see the XML-RPC documentation for
:py:func:`nipap.xmlrpc.NipapXMLRPC.list_prefix` for full
understanding.
"""
self._logger.debug("list_prefix called; spec: %s" % unicode(spec))
if type(spec) is dict:
where, params = self._expand_prefix_spec(spec.copy(), 'inp.')
else:
raise NipapError("invalid prefix specification")
if where != '':
where = ' WHERE ' + where
sql = """SELECT
inp.id,
vrf.id AS vrf_id,
vrf.rt AS vrf_rt,
vrf.name AS vrf_name,
family(prefix) AS family,
inp.prefix,
inp.display_prefix,
inp.description,
COALESCE(inp.inherited_tags, '{}') AS inherited_tags,
COALESCE(inp.tags, '{}') AS tags,
inp.node,
inp.comment,
pool.id AS pool_id,
pool.name AS pool_name,
inp.type,
inp.indent,
inp.country,
inp.order_id,
inp.customer_id,
inp.external_key,
inp.authoritative_source,
inp.alarm_priority,
inp.monitor,
inp.vlan,
inp.added,
inp.last_modified,
inp.total_addresses,
inp.used_addresses,
inp.free_addresses,
inp.status,
inp.avps,
inp.expires
FROM ip_net_plan inp
JOIN ip_net_vrf vrf ON (inp.vrf_id = vrf.id)
LEFT JOIN ip_net_pool pool ON (inp.pool_id = pool.id) %s
ORDER BY vrf.rt NULLS FIRST, prefix""" % where
self._execute(sql, params)
res = list()
for row in self._curs_pg:
pref = dict(row)
pref['display_prefix'] = unicode(pref['display_prefix'])
res.append(pref)
return res | 0.002335 |
def from_dict(data, ctx):
"""
Instantiate a new Position from a dict (generally from loading a JSON
response). The data used to instantiate the Position is a shallow copy
of the dict passed in, with any complex child types instantiated
appropriately.
"""
data = data.copy()
if data.get('pl') is not None:
data['pl'] = ctx.convert_decimal_number(
data.get('pl')
)
if data.get('unrealizedPL') is not None:
data['unrealizedPL'] = ctx.convert_decimal_number(
data.get('unrealizedPL')
)
if data.get('marginUsed') is not None:
data['marginUsed'] = ctx.convert_decimal_number(
data.get('marginUsed')
)
if data.get('resettablePL') is not None:
data['resettablePL'] = ctx.convert_decimal_number(
data.get('resettablePL')
)
if data.get('financing') is not None:
data['financing'] = ctx.convert_decimal_number(
data.get('financing')
)
if data.get('commission') is not None:
data['commission'] = ctx.convert_decimal_number(
data.get('commission')
)
if data.get('guaranteedExecutionFees') is not None:
data['guaranteedExecutionFees'] = ctx.convert_decimal_number(
data.get('guaranteedExecutionFees')
)
if data.get('long') is not None:
data['long'] = \
ctx.position.PositionSide.from_dict(
data['long'], ctx
)
if data.get('short') is not None:
data['short'] = \
ctx.position.PositionSide.from_dict(
data['short'], ctx
)
return Position(**data) | 0.001062 |
def sv_variant(store, institute_id, case_name, variant_id=None, variant_obj=None, add_case=True,
get_overlapping=True):
"""Pre-process an SV variant entry for detail page.
Adds information to display variant
Args:
store(scout.adapter.MongoAdapter)
institute_id(str)
case_name(str)
variant_id(str)
variant_obj(dcit)
add_case(bool): If information about case files should be added
Returns:
detailed_information(dict): {
'institute': <institute_obj>,
'case': <case_obj>,
'variant': <variant_obj>,
'overlapping_snvs': <overlapping_snvs>,
'manual_rank_options': MANUAL_RANK_OPTIONS,
'dismiss_variant_options': DISMISS_VARIANT_OPTIONS
}
"""
institute_obj, case_obj = institute_and_case(store, institute_id, case_name)
if not variant_obj:
variant_obj = store.variant(variant_id)
if add_case:
# fill in information for pilup view
variant_case(store, case_obj, variant_obj)
# frequencies
variant_obj['frequencies'] = [
('1000G', variant_obj.get('thousand_genomes_frequency')),
('1000G (left)', variant_obj.get('thousand_genomes_frequency_left')),
('1000G (right)', variant_obj.get('thousand_genomes_frequency_right')),
('ClinGen CGH (benign)', variant_obj.get('clingen_cgh_benign')),
('ClinGen CGH (pathogenic)', variant_obj.get('clingen_cgh_pathogenic')),
('ClinGen NGI', variant_obj.get('clingen_ngi')),
('SweGen', variant_obj.get('swegen')),
('Decipher', variant_obj.get('decipher')),
]
variant_obj['callers'] = callers(variant_obj, category='sv')
overlapping_snvs = []
if get_overlapping:
overlapping_snvs = (parse_variant(store, institute_obj, case_obj, variant) for variant in
store.overlapping(variant_obj))
# parse_gene function is not called for SVs, but a link to ensembl gene is required
for gene_obj in variant_obj['genes']:
if gene_obj.get('common'):
ensembl_id = gene_obj['common']['ensembl_id']
try:
build = int(gene_obj['common'].get('build','37'))
except Exception:
build = 37
gene_obj['ensembl_link'] = ensembl(ensembl_id, build=build)
variant_obj['comments'] = store.events(institute_obj, case=case_obj,
variant_id=variant_obj['variant_id'], comments=True)
case_clinvars = store.case_to_clinVars(case_obj.get('display_name'))
if variant_id in case_clinvars:
variant_obj['clinvar_clinsig'] = case_clinvars.get(variant_id)['clinsig']
if not 'end_chrom' in variant_obj:
variant_obj['end_chrom'] = variant_obj['chromosome']
return {
'institute': institute_obj,
'case': case_obj,
'variant': variant_obj,
'overlapping_snvs': overlapping_snvs,
'manual_rank_options': MANUAL_RANK_OPTIONS,
'dismiss_variant_options': DISMISS_VARIANT_OPTIONS
} | 0.003211 |
def get_tissue_in_references(self, entry):
"""
get list of models.TissueInReference from XML node entry
:param entry: XML node entry
:return: list of :class:`pyuniprot.manager.models.TissueInReference` objects
"""
tissue_in_references = []
query = "./reference/source/tissue"
tissues = {x.text for x in entry.iterfind(query)}
for tissue in tissues:
if tissue not in self.tissues:
self.tissues[tissue] = models.TissueInReference(tissue=tissue)
tissue_in_references.append(self.tissues[tissue])
return tissue_in_references | 0.004658 |
def compute_cyclomatic_complexity(function):
"""
Compute the cyclomatic complexity of a function
Args:
function (core.declarations.function.Function)
Returns:
int
"""
# from https://en.wikipedia.org/wiki/Cyclomatic_complexity
# M = E - N + 2P
# where M is the complexity
# E number of edges
# N number of nodes
# P number of connected components
E = compute_number_edges(function)
N = len(function.nodes)
P = len(compute_strongly_connected_components(function))
return E - N + 2 * P | 0.001789 |
def all(self):
"""Returns a chained generator response containing all matching records
:return:
- Iterable response
"""
if self._stream:
return chain.from_iterable(self._get_streamed_response())
return self._get_buffered_response()[0] | 0.006645 |
def memcpy_dtoh(self, dest, src):
"""perform a device to host memory copy
:param dest: A numpy array in host memory to store the data
:type dest: numpy.ndarray
:param src: A GPU memory allocation unit
:type src: pycuda.driver.DeviceAllocation
"""
if isinstance(src, drv.DeviceAllocation):
drv.memcpy_dtoh(dest, src)
else:
dest = src | 0.004739 |
def slugs_navigation_encode(self, u_m, phi_c, theta_c, psiDot_c, ay_body, totalDist, dist2Go, fromWP, toWP, h_c):
'''
Data used in the navigation algorithm.
u_m : Measured Airspeed prior to the nav filter in m/s (float)
phi_c : Commanded Roll (float)
theta_c : Commanded Pitch (float)
psiDot_c : Commanded Turn rate (float)
ay_body : Y component of the body acceleration (float)
totalDist : Total Distance to Run on this leg of Navigation (float)
dist2Go : Remaining distance to Run on this leg of Navigation (float)
fromWP : Origin WP (uint8_t)
toWP : Destination WP (uint8_t)
h_c : Commanded altitude in 0.1 m (uint16_t)
'''
return MAVLink_slugs_navigation_message(u_m, phi_c, theta_c, psiDot_c, ay_body, totalDist, dist2Go, fromWP, toWP, h_c) | 0.007745 |
def aggr(self, group, *attributes, keep_all_rows=False, **named_attributes):
"""
Aggregation/projection operator
:param group: an entity set whose entities will be grouped per entity of `self`
:param attributes: attributes of self to include in the result
:param keep_all_rows: True = preserve the number of elements in the result (equivalent of LEFT JOIN in SQL)
:param named_attributes: renamings and computations on attributes of self and group
:return: an entity set representing the result of the aggregation/projection operator of entities from `group`
per entity of `self`
"""
return GroupBy.create(self, group, keep_all_rows=keep_all_rows,
attributes=attributes, named_attributes=named_attributes) | 0.008578 |
def get_first(self, attr, value, e=0.000001,
sort_by="__name__", reverse=False):
"""Get the first nested Constant class that met ``klass.attr == value``.
:param attr: attribute name.
:param value: value.
:param e: used for float value comparison.
:param sort_by: nested class is ordered by <sort_by> attribute.
.. versionchanged:: 0.0.5
"""
for _, klass in self.subclasses(sort_by, reverse):
try:
if getattr(klass, attr) == approx(value, e):
return klass
except:
pass
return None | 0.007716 |
def impulse_noise(x, severity=1):
"""Impulse noise corruption to images.
Args:
x: numpy array, uncorrupted image, assumed to have uint8 pixel in [0,255].
severity: integer, severity of corruption.
Returns:
numpy array, image with uint8 pixels in [0,255]. Added impulse noise.
"""
c = [.03, .06, .09, 0.17, 0.27][severity - 1]
x = tfds.core.lazy_imports.skimage.util.random_noise(
np.array(x) / 255., mode='s&p', amount=c)
x_clip = np.clip(x, 0, 1) * 255
return around_and_astype(x_clip) | 0.011494 |
def delete_account_certificate(self, account_id, cert_id, **kwargs): # noqa: E501
"""Delete trusted certificate by ID. # noqa: E501
An endpoint for deleting the trusted certificate. **Example usage:** `curl -X DELETE https://api.us-east-1.mbedcloud.com/v3/accounts/{accountID}/trusted-certificates/{cert-id} -H 'Authorization: Bearer API_KEY'` # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass asynchronous=True
>>> thread = api.delete_account_certificate(account_id, cert_id, asynchronous=True)
>>> result = thread.get()
:param asynchronous bool
:param str account_id: Account ID. (required)
:param str cert_id: The ID of the trusted certificate to be deleted. (required)
:return: None
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('asynchronous'):
return self.delete_account_certificate_with_http_info(account_id, cert_id, **kwargs) # noqa: E501
else:
(data) = self.delete_account_certificate_with_http_info(account_id, cert_id, **kwargs) # noqa: E501
return data | 0.001531 |
def get_all_celcius_commands():
"""Query cron for all celcius commands"""
p = subprocess.Popen(["crontab", "-l"], stdout=subprocess.PIPE, stderr=subprocess.PIPE)
out, err = p.communicate()
return [x for x in out.split('\n') if 'CJOBID' in x] | 0.007782 |
def _parse_tag(self, tag):
"""
Given a tag string (characters enclosed by []), this function will
parse any options and return a tuple of the form:
(valid, tag_name, closer, options)
"""
if not tag.startswith(self.tag_opener) or not tag.endswith(self.tag_closer) or ('\n' in tag) or ('\r' in tag):
return (False, tag, False, None)
tag_name = tag[len(self.tag_opener):-len(self.tag_closer)].strip()
if not tag_name:
return (False, tag, False, None)
closer = False
opts = {}
if tag_name[0] == '/':
tag_name = tag_name[1:]
closer = True
# Parse options inside the opening tag, if needed.
if (('=' in tag_name) or (' ' in tag_name)) and not closer:
tag_name, opts = self._parse_opts(tag_name)
return (True, tag_name.strip().lower(), closer, opts) | 0.003268 |
def get_input_nodes(G: nx.DiGraph) -> List[str]:
""" Get all input nodes from a network. """
return [n for n, d in G.in_degree() if d == 0] | 0.006803 |
def path_to_ls(fn):
""" Converts an absolute path to an entry resembling the output of
the ls command on most UNIX systems."""
st = os.stat(fn)
full_mode = 'rwxrwxrwx'
mode = ''
file_time = ''
d = ''
for i in range(9):
# Incrementally builds up the 9 character string, using characters from the
# fullmode (defined above) and mode bits from the stat() system call.
mode += ((st.st_mode >> (8 - i)) & 1) and full_mode[i] or '-'
d = (os.path.isdir(fn)) and 'd' or '-'
file_time = time.strftime(' %b %d %H:%M ', time.gmtime(st.st_mtime))
list_format = '{0}{1} 1 ftp ftp {2}\t{3}{4}'.format(d, mode, str(st.st_size), file_time, os.path.basename(fn))
return list_format | 0.004011 |
def get_descriptor_by_id(self, id, is_master_id=None):
"""GetDescriptorById.
[Preview API]
:param str id:
:param bool is_master_id:
:rtype: :class:`<str> <azure.devops.v5_0.identity.models.str>`
"""
route_values = {}
if id is not None:
route_values['id'] = self._serialize.url('id', id, 'str')
query_parameters = {}
if is_master_id is not None:
query_parameters['isMasterId'] = self._serialize.query('is_master_id', is_master_id, 'bool')
response = self._send(http_method='GET',
location_id='a230389a-94f2-496c-839f-c929787496dd',
version='5.0-preview.1',
route_values=route_values,
query_parameters=query_parameters)
return self._deserialize('str', response) | 0.004449 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.