text
stringlengths 78
104k
| score
float64 0
0.18
|
---|---|
def handle_ping(self, payload):
"""\
Respond to periodic PING messages from server
"""
self.logger.info('server ping: %s' % payload)
self.send('PONG %s' % payload, True) | 0.009569 |
def apply_vcc(self,vcc):
"""
Applies velocity contrast curve constraint to each population
See :func:`vespa.stars.StarPopulation.apply_vcc`;
all arguments passed to that function for each population.
"""
if 'secondary spectrum' not in self.constraints:
self.constraints.append('secondary spectrum')
for pop in self.poplist:
if not pop.is_specific:
try:
pop.apply_vcc(vcc)
except:
logging.info('VCC constraint not applied to %s model' % (pop.model)) | 0.008306 |
def node_set_to_surface(self, tag):
"""
Converts a node set to surface.
"""
# Create a dummy node with label 0
nodes = self.nodes.copy()
dummy = nodes.iloc[0].copy()
dummy["coords"] *= np.nan
dummy["sets"] = True
nodes.loc[0] = dummy
# Getting element surfaces
element_surfaces= self.split("surfaces").unstack()
# killer hack !
surf = pd.DataFrame(
nodes.sets[tag].loc[element_surfaces.values.flatten()]
.values.reshape(element_surfaces.shape)
.prod(axis = 1)
.astype(np.bool),
index = element_surfaces.index).unstack().fillna(False)
for k in surf.keys():
self.elements["surfaces", tag, "f{0}".format(k[1]+1) ] = surf.loc[:, k] | 0.011658 |
def load(self, code, setup='', teardown=''):
"""Prepares a set of setup, test, and teardown code to be
run in the console.
PARAMETERS:
code -- list; processed lines of code. Elements in the list are
either strings (input) or CodeAnswer objects (output)
setup -- str; raw setup code
teardown -- str; raw teardown code
"""
self._setup = textwrap.dedent(setup).splitlines()
self._code = code
self._teardown = textwrap.dedent(teardown).splitlines() | 0.003623 |
def serialize_break(ctx, document, elem, root):
"Serialize break element."
if elem.break_type == u'textWrapping':
_div = etree.SubElement(root, 'br')
else:
_div = etree.SubElement(root, 'span')
if ctx.options['embed_styles']:
_div.set('style', 'page-break-after: always;')
fire_hooks(ctx, document, elem, _div, ctx.get_hook('page_break'))
return root | 0.002445 |
def on_lstLayerModes_itemSelectionChanged(self):
"""Update layer mode description label and unit widgets.
.. note:: This is an automatic Qt slot
executed when the subcategory selection changes.
"""
self.clear_further_steps()
# Set widgets
layer_mode = self.selected_layermode()
# Exit if no selection
if not layer_mode:
self.lblDescribeLayerMode.setText('')
return
# Set description label
self.lblDescribeLayerMode.setText(layer_mode['description'])
# Enable the next button
self.parent.pbnNext.setEnabled(True) | 0.00311 |
def top_charts(self):
"""Get a listing of the default top charts."""
response = self._call(mc_calls.BrowseTopChart)
top_charts = response.body
return top_charts | 0.035294 |
def initialize():
"""
Initializes the cauldron library by confirming that it can be imported
by the importlib library. If the attempt to import it fails, the system
path will be modified and the attempt retried. If both attempts fail, an
import error will be raised.
"""
cauldron_module = get_cauldron_module()
if cauldron_module is not None:
return cauldron_module
sys.path.append(ROOT_DIRECTORY)
cauldron_module = get_cauldron_module()
if cauldron_module is not None:
return cauldron_module
raise ImportError(' '.join((
'Unable to import cauldron.'
'The package was not installed in a known location.'
))) | 0.001439 |
def set_room_name(self, room_id, name, timestamp=None):
"""Perform PUT /rooms/$room_id/state/m.room.name
Args:
room_id (str): The room ID
name (str): The new room name
timestamp (int): Set origin_server_ts (For application services only)
"""
body = {
"name": name
}
return self.send_state_event(room_id, "m.room.name", body, timestamp=timestamp) | 0.00907 |
def run(self):
""" Run reinforcement learning algorithm """
device = self.model_config.torch_device()
# Reinforcer is the learner for the reinforcement learning model
reinforcer = self.reinforcer.instantiate(device)
optimizer = self.optimizer_factory.instantiate(reinforcer.model)
# All callbacks used for learning
callbacks = self.gather_callbacks(optimizer)
# Metrics to track through this training
metrics = reinforcer.metrics()
training_info = self.resume_training(reinforcer, callbacks, metrics)
reinforcer.initialize_training(training_info)
training_info.on_train_begin()
if training_info.optimizer_initial_state:
optimizer.load_state_dict(training_info.optimizer_initial_state)
global_epoch_idx = training_info.start_epoch_idx + 1
while training_info['frames'] < self.total_frames:
epoch_info = EpochInfo(
training_info,
global_epoch_idx=global_epoch_idx,
batches_per_epoch=self.batches_per_epoch,
optimizer=optimizer,
)
reinforcer.train_epoch(epoch_info)
if self.openai_logging:
self._openai_logging(epoch_info.result)
self.storage.checkpoint(epoch_info, reinforcer.model)
global_epoch_idx += 1
training_info.on_train_end()
return training_info | 0.001365 |
def to_timestamp(dt, timestamp):
"""Convert datetime to google.protobuf.Timestamp.
Args:
dt: a timezone naive datetime.
timestamp: a google.protobuf.Timestamp to populate.
Raises:
TypeError: if a timezone aware datetime was provided.
"""
if dt.tzinfo:
# this is an "aware" datetime with an explicit timezone. Throw an error.
raise TypeError('Cannot store a timezone aware datetime. '
'Convert to UTC and store the naive datetime.')
timestamp.seconds = calendar.timegm(dt.timetuple())
timestamp.nanos = dt.microsecond * _NANOS_PER_MICRO | 0.00846 |
def read(filename, attrs=None):
"""This will read any VTK file! It will figure out what reader to use
then wrap the VTK object for use in ``vtki``.
Parameters
----------
attrs : dict, optional
A dictionary of attributes to call on the reader. Keys of dictionary are
the attribute/method names and values are the arguments passed to those
calls. If you do not have any attributes to call, pass ``None`` as the
value.
"""
filename = os.path.abspath(os.path.expanduser(filename))
ext = get_ext(filename)
# From the extension, decide which reader to use
if attrs is not None:
reader = get_reader(filename)
return standard_reader_routine(reader, filename, attrs=attrs)
elif ext in '.vti': # ImageData
return vtki.UniformGrid(filename)
elif ext in '.vtr': # RectilinearGrid
return vtki.RectilinearGrid(filename)
elif ext in '.vtu': # UnstructuredGrid
return vtki.UnstructuredGrid(filename)
elif ext in ['.ply', '.obj', '.stl']: # PolyData
return vtki.PolyData(filename)
elif ext in '.vts': # StructuredGrid
return vtki.StructuredGrid(filename)
elif ext in ['.vtm', '.vtmb']:
return vtki.MultiBlock(filename)
elif ext in ['.e', '.exo']:
return read_exodus(filename)
elif ext in ['.vtk']:
# Attempt to use the legacy reader...
return read_legacy(filename)
else:
# Attempt find a reader in the readers mapping
try:
reader = get_reader(filename)
return standard_reader_routine(reader, filename)
except KeyError:
pass
raise IOError("This file was not able to be automatically read by vtki.") | 0.004018 |
def SetAlpha(self, alpha):
'''
Change the window's transparency
:param alpha: From 0 to 1 with 0 being completely transparent
:return:
'''
self._AlphaChannel = alpha
if self._AlphaChannel is not None:
self.QT_QMainWindow.setWindowOpacity(self._AlphaChannel) | 0.006154 |
def calc_environment_entropy(world, world_size=(60, 60),
exclude_desert=False):
"""
Calculate the Shannon entropy of a given environment, treating each niche
(where niches are defined by regions in which different sets of resources
are rewarded) as a category. The environment is specified with the
following inputs:
world - a list of lists of sets of resources (strings) indicating
the set of resources in every cell in the world.
world_size - a tuple indicating the dimensions of the world.
Default = 60x60, because that's the default Avida world siz
excludeDesert - an optional argument which defaults to False. If True is
specific, niches in which no tasks are rewarded
will not be considered in the calculation.
"""
niches = make_niche_dictionary(world, world_size)
if exclude_desert and frozenset([]) in niches:
del niches[frozenset([])]
# Calculate entropy
return entropy(niches) | 0.000977 |
def filter_on_wire_representation(ava, acs, required=None, optional=None):
"""
:param ava: A dictionary with attributes and values
:param acs: List of tuples (Attribute Converter name,
Attribute Converter instance)
:param required: A list of saml.Attributes
:param optional: A list of saml.Attributes
:return: Dictionary of expected/wanted attributes and values
"""
acsdic = dict([(ac.name_format, ac) for ac in acs])
if required is None:
required = []
if optional is None:
optional = []
res = {}
for attr, val in ava.items():
done = False
for req in required:
try:
_name = acsdic[req.name_format]._to[attr]
if _name == req.name:
res[attr] = val
done = True
except KeyError:
pass
if done:
continue
for opt in optional:
try:
_name = acsdic[opt.name_format]._to[attr]
if _name == opt.name:
res[attr] = val
break
except KeyError:
pass
return res | 0.000841 |
def dfs_inorder(self, reverse=False):
"""Generator that returns each element of the tree in Inorder order.
Keyword arguments:
reverse -- if true, the search is done from right to left."""
stack = deque()
visited = set()
visited.add(self)
if reverse:
stack.append(self.childs[0])
stack.append(self)
stack.extend(self.childs[1:])
else:
stack.extend(self.childs[1:])
stack.append(self)
stack.append(self.childs[0])
while stack:
node = stack.pop()
if node in visited or not node.childs:
yield node
else:
stack.append(node)
visited.add(node)
if hasattr(node, "childs"):
if reverse:
stack.extend(node.childs)
else:
stack.extend(node.childs[::-1]) | 0.002058 |
def once(ctx, name):
"""Run kibitzr checks once and exit"""
from kibitzr.app import Application
app = Application()
sys.exit(app.run(once=True, log_level=ctx.obj['log_level'], names=name)) | 0.004902 |
def accuracy(current, predicted):
"""
Computes the accuracy of the TM at time-step t based on the prediction
at time-step t-1 and the current active columns at time-step t.
@param current (array) binary vector containing current active columns
@param predicted (array) binary vector containing predicted active columns
@return acc (float) prediction accuracy of the TM at time-step t
"""
acc = 0
if np.count_nonzero(predicted) > 0:
acc = float(np.dot(current, predicted))/float(np.count_nonzero(predicted))
return acc | 0.014599 |
def getcomponentdetails(self, product, component, force_refresh=False):
"""
Helper for accessing a single component's info. This is a wrapper
around getcomponentsdetails, see that for explanation
"""
d = self.getcomponentsdetails(product, force_refresh)
return d[component] | 0.006231 |
def cert(name,
aliases=None,
email=None,
webroot=None,
test_cert=False,
renew=None,
keysize=None,
server=None,
owner='root',
group='root',
mode='0640',
certname=None,
preferred_challenges=None,
tls_sni_01_port=None,
tls_sni_01_address=None,
http_01_port=None,
http_01_address=None,
dns_plugin=None,
dns_plugin_credentials=None,
dns_plugin_propagate_seconds=10):
'''
Obtain/renew a certificate from an ACME CA, probably Let's Encrypt.
:param name: Common Name of the certificate (DNS name of certificate)
:param aliases: subjectAltNames (Additional DNS names on certificate)
:param email: e-mail address for interaction with ACME provider
:param webroot: True or a full path to use to use webroot. Otherwise use standalone mode
:param test_cert: Request a certificate from the Happy Hacker Fake CA (mutually exclusive with 'server')
:param renew: True/'force' to force a renewal, or a window of renewal before expiry in days
:param keysize: RSA key bits
:param server: API endpoint to talk to
:param owner: owner of the private key file
:param group: group of the private key file
:param mode: mode of the private key file
:param certname: Name of the certificate to save
:param preferred_challenges: A sorted, comma delimited list of the preferred
challenge to use during authorization with the
most preferred challenge listed first.
:param tls_sni_01_port: Port used during tls-sni-01 challenge. This only affects
the port Certbot listens on. A conforming ACME server
will still attempt to connect on port 443.
:param tls_sni_01_address: The address the server listens to during tls-sni-01
challenge.
:param http_01_port: Port used in the http-01 challenge. This only affects
the port Certbot listens on. A conforming ACME server
will still attempt to connect on port 80.
:param https_01_address: The address the server listens to during http-01 challenge.
:param dns_plugin: Name of a DNS plugin to use (currently only 'cloudflare' or 'digitalocean')
:param dns_plugin_credentials: Path to the credentials file if required by the specified DNS plugin
:param dns_plugin_propagate_seconds: Number of seconds to wait for DNS propogations before
asking ACME servers to verify the DNS record. (default 10)
:return: dict with 'result' True/False/None, 'comment' and certificate's expiry date ('not_after')
CLI example:
.. code-block:: bash
salt 'gitlab.example.com' acme.cert dev.example.com "[gitlab.example.com]" test_cert=True renew=14 webroot=/opt/gitlab/embedded/service/gitlab-rails/public
'''
cmd = [LEA, 'certonly', '--non-interactive', '--agree-tos']
supported_dns_plugins = ['cloudflare', 'digitalocean']
cert_file = _cert_file(name, 'cert')
if not __salt__['file.file_exists'](cert_file):
log.debug('Certificate %s does not exist (yet)', cert_file)
renew = False
elif needs_renewal(name, renew):
log.debug('Certificate %s will be renewed', cert_file)
cmd.append('--renew-by-default')
renew = True
if server:
cmd.append('--server {0}'.format(server))
if certname:
cmd.append('--cert-name {0}'.format(certname))
if test_cert:
if server:
return {'result': False, 'comment': 'Use either server or test_cert, not both'}
cmd.append('--test-cert')
if webroot:
cmd.append('--authenticator webroot')
if webroot is not True:
cmd.append('--webroot-path {0}'.format(webroot))
elif dns_plugin in supported_dns_plugins:
if dns_plugin == 'cloudflare':
cmd.append('--dns-cloudflare')
cmd.append('--dns-cloudflare-credentials {0}'.format(dns_plugin_credentials))
cmd.append('--dns-cloudflare-propagation-seconds {0}'.format(dns_plugin_propagate_seconds))
elif dns_plugin == 'digitalocean':
cmd.append('--dns-digitalocean')
cmd.append('--dns-digitalocean-credentials {0}'.format(dns_plugin_credentials))
cmd.append('--dns-digitalocean-propagation-seconds {0}'.format(dns_plugin_propagate_seconds))
else:
return {'result': False, 'comment': 'DNS plugin \'{0}\' is not supported'.format(dns_plugin)}
else:
cmd.append('--authenticator standalone')
if email:
cmd.append('--email {0}'.format(email))
if keysize:
cmd.append('--rsa-key-size {0}'.format(keysize))
cmd.append('--domains {0}'.format(name))
if aliases is not None:
for dns in aliases:
cmd.append('--domains {0}'.format(dns))
if preferred_challenges:
cmd.append('--preferred-challenges {}'.format(preferred_challenges))
if tls_sni_01_port:
cmd.append('--tls-sni-01-port {}'.format(tls_sni_01_port))
if tls_sni_01_address:
cmd.append('--tls-sni-01-address {}'.format(tls_sni_01_address))
if http_01_port:
cmd.append('--http-01-port {}'.format(http_01_port))
if http_01_address:
cmd.append('--http-01-address {}'.format(http_01_address))
res = __salt__['cmd.run_all'](' '.join(cmd))
if res['retcode'] != 0:
if 'expand' in res['stderr']:
cmd.append('--expand')
res = __salt__['cmd.run_all'](' '.join(cmd))
if res['retcode'] != 0:
return {'result': False, 'comment': 'Certificate {0} renewal failed with:\n{1}'.format(name, res['stderr'])}
else:
return {'result': False, 'comment': 'Certificate {0} renewal failed with:\n{1}'.format(name, res['stderr'])}
if 'no action taken' in res['stdout']:
comment = 'Certificate {0} unchanged'.format(cert_file)
result = None
elif renew:
comment = 'Certificate {0} renewed'.format(name)
result = True
else:
comment = 'Certificate {0} obtained'.format(name)
result = True
ret = {'comment': comment, 'not_after': expires(name), 'changes': {}, 'result': result}
ret, _ = __salt__['file.check_perms'](_cert_file(name, 'privkey'),
ret,
owner, group, mode,
follow_symlinks=True)
return ret | 0.003599 |
def update_sources(self):
"""Update local sources based on response from Elasticsearch"""
ES_documents = self.get_docs_sources_from_ES()
for doc, update_spec, action_buffer_index, get_from_ES in self.doc_to_update:
if get_from_ES:
# Update source based on response from ES
ES_doc = next(ES_documents)
if ES_doc["found"]:
source = ES_doc["_source"]
else:
# Document not found in elasticsearch,
# Seems like something went wrong during replication
LOG.error(
"mGET: Document id: %s has not been found "
"in Elasticsearch. Due to that "
"following update failed: %s",
doc["_id"],
update_spec,
)
self.reset_action(action_buffer_index)
continue
else:
# Get source stored locally before applying update
# as it is up-to-date
source = self.get_from_sources(doc["_index"], doc["_type"], doc["_id"])
if not source:
LOG.error(
"mGET: Document id: %s has not been found "
"in local sources. Due to that following "
"update failed: %s",
doc["_id"],
update_spec,
)
self.reset_action(action_buffer_index)
continue
updated = self.docman.apply_update(source, update_spec)
# Remove _id field from source
if "_id" in updated:
del updated["_id"]
# Everytime update locally stored sources to keep them up-to-date
self.add_to_sources(doc, updated)
self.action_buffer[action_buffer_index][
"_source"
] = self.docman._formatter.format_document(updated)
# Remove empty actions if there were errors
self.action_buffer = [
each_action for each_action in self.action_buffer if each_action
] | 0.001772 |
def salt_api_acl_tool(username, request):
'''
..versionadded:: 2016.3.0
Verifies user requests against the API whitelist. (User/IP pair)
in order to provide whitelisting for the API similar to the
master, but over the API.
..code-block:: yaml
rest_cherrypy:
api_acl:
users:
'*':
- 1.1.1.1
- 1.1.1.2
foo:
- 8.8.4.4
bar:
- '*'
:param username: Username to check against the API.
:type username: str
:param request: Cherrypy request to check against the API.
:type request: cherrypy.request
'''
failure_str = ("[api_acl] Authentication failed for "
"user %s from IP %s")
success_str = ("[api_acl] Authentication sucessful for "
"user %s from IP %s")
pass_str = ("[api_acl] Authentication not checked for "
"user %s from IP %s")
acl = None
# Salt Configuration
salt_config = cherrypy.config.get('saltopts', None)
if salt_config:
# Cherrypy Config.
cherrypy_conf = salt_config.get('rest_cherrypy', None)
if cherrypy_conf:
# ACL Config.
acl = cherrypy_conf.get('api_acl', None)
ip = request.remote.ip
if acl:
users = acl.get('users', {})
if users:
if username in users:
if ip in users[username] or '*' in users[username]:
logger.info(success_str, username, ip)
return True
else:
logger.info(failure_str, username, ip)
return False
elif username not in users and '*' in users:
if ip in users['*'] or '*' in users['*']:
logger.info(success_str, username, ip)
return True
else:
logger.info(failure_str, username, ip)
return False
else:
logger.info(failure_str, username, ip)
return False
else:
logger.info(pass_str, username, ip)
return True | 0.000446 |
def _check_and_uninstall_ruby(ret, ruby, user=None):
'''
Verify that ruby is uninstalled
'''
ret = _ruby_installed(ret, ruby, user=user)
if ret['result']:
if ret['default']:
__salt__['rbenv.default']('system', runas=user)
if __salt__['rbenv.uninstall_ruby'](ruby, runas=user):
ret['result'] = True
ret['changes'][ruby] = 'Uninstalled'
ret['comment'] = 'Successfully removed ruby'
return ret
else:
ret['result'] = False
ret['comment'] = 'Failed to uninstall ruby'
return ret
else:
ret['result'] = True
ret['comment'] = 'Ruby {0} is already absent'.format(ruby)
return ret | 0.001359 |
def update(gandi, resource, memory, cores, console, password, background,
reboot):
"""Update a virtual machine.
Resource can be a Hostname or an ID
"""
pwd = None
if password:
pwd = click.prompt('password', hide_input=True,
confirmation_prompt=True)
max_memory = None
if memory:
max_memory = gandi.iaas.required_max_memory(resource, memory)
if max_memory and not reboot:
gandi.echo('memory update must be done offline.')
if not click.confirm("reboot machine %s?" % resource):
return
result = gandi.iaas.update(resource, memory, cores, console, pwd,
background, max_memory)
if background:
gandi.pretty_echo(result)
return result | 0.001255 |
def send(self, message_type, message, connection_id, one_way=False):
"""Sends a message via the network.
Args:
message_type (str): The type of the message.
message (bytes): The message to be sent.
connection_id (str): The connection to send it to.
"""
try:
self._network.send(message_type, message, connection_id,
one_way=one_way)
except ValueError:
LOGGER.debug("Connection %s is no longer valid. "
"Removing from list of peers.",
connection_id)
if connection_id in self._peers:
del self._peers[connection_id] | 0.002778 |
def set_frequency(self, host, sem=None, interval=None):
"""Set frequency for host with sem and interval."""
# single sem or global sem
sem = sem or self.sem
interval = self.interval if interval is None else interval
frequency = Frequency(sem, interval, host)
frequencies = {host: frequency}
self.update_frequency(frequencies)
return frequency | 0.004926 |
def get_settings(application, force_instance=False):
"""
Retrieve the media type settings for a application.
:param tornado.web.Application application:
:keyword bool force_instance: if :data:`True` then create the
instance if it does not exist
:return: the content settings instance
:rtype: sprockets.mixins.mediatype.content.ContentSettings
"""
try:
return application.settings[SETTINGS_KEY]
except KeyError:
if not force_instance:
return None
return install(application, None) | 0.001792 |
def set_status(self, new_status, notes=None):
'''Save all changes and set to the given new_status'''
self.status_id = new_status
try:
self.status['id'] = self.status_id
# We don't have the id to name mapping, so blank the name
self.status['name'] = None
except:
pass
self.save(notes) | 0.008086 |
def cmp_public_numbers(pn1, pn2):
"""
Compare 2 sets of public numbers. These is a way to compare
2 public RSA keys. If the sets are the same then the keys are the same.
:param pn1: The set of values belonging to the 1st key
:param pn2: The set of values belonging to the 2nd key
:return: True is the sets are the same otherwise False.
"""
if pn1.n == pn2.n:
if pn1.e == pn2.e:
return True
return False | 0.002179 |
def replace_seqs(ol,value,indexes,**kwargs):
'''
from elist.elist import *
ol = [1,'a',3,'a',5,'a',6,'a']
id(ol)
new = replace_seqs(ol,'AAA',[1,3,7])
ol
new
id(ol)
id(new)
####
ol = [1,'a',3,'a',5,'a',6,'a']
id(ol)
rslt = replace_seqs(ol,'AAA',[1,3,7],mode="original")
ol
rslt
id(ol)
id(rslt)
#replace_indexes = replace_seqs
'''
if('mode' in kwargs):
mode = kwargs["mode"]
else:
mode = "new"
indexes = list(indexes)
new = []
length = ol.__len__()
cpol = copy.deepcopy(ol)
for i in range(0,length):
if(i in indexes):
new.append(value)
else:
new.append(cpol[i])
if(mode == "new"):
return(new)
else:
ol.clear()
ol.extend(new)
return(ol) | 0.005549 |
def post(method, hmc, uri, uri_parms, body, logon_required,
wait_for_completion):
"""Operation: Activate Logical Partition (requires classic mode)."""
assert wait_for_completion is True # async not supported yet
lpar_oid = uri_parms[0]
lpar_uri = '/api/logical-partitions/' + lpar_oid
try:
lpar = hmc.lookup_by_uri(lpar_uri)
except KeyError:
raise InvalidResourceError(method, uri)
cpc = lpar.manager.parent
assert not cpc.dpm_enabled
status = lpar.properties.get('status', None)
force = body.get('force', False) if body else False
if status == 'operating' and not force:
raise ServerError(method, uri, reason=263,
message="LPAR {!r} could not be activated "
"because the LPAR is in status {} "
"(and force was not specified).".
format(lpar.name, status))
act_profile_name = body.get('activation-profile-name', None)
if not act_profile_name:
act_profile_name = lpar.properties.get(
'next-activation-profile-name', None)
if act_profile_name is None:
act_profile_name = ''
# Perform the check between LPAR name and profile name
if act_profile_name != lpar.name:
raise ServerError(method, uri, reason=263,
message="LPAR {!r} could not be activated "
"because the name of the image activation "
"profile {!r} is different from the LPAR name.".
format(lpar.name, act_profile_name))
# Reflect the activation in the resource
lpar.properties['status'] = LparActivateHandler.get_status()
lpar.properties['last-used-activation-profile'] = act_profile_name | 0.001544 |
def toxml(self):
"""
Exports this object into a LEMS XML object
"""
xmlstr = '<OnEvent port="{0}"'.format(self.port)
chxmlstr = ''
for action in self.actions:
chxmlstr += action.toxml()
if chxmlstr:
xmlstr += '>' + chxmlstr + '</OnEvent>'
else:
xmlstr += '/>'
return xmlstr | 0.005181 |
def load_user_options(self):
"""Load user options from self.user_options dict
This can be set via POST to the API or via options_from_form
Only supported argument by default is 'profile'.
Override in subclasses to support other options.
"""
if self._profile_list is None:
if callable(self.profile_list):
self._profile_list = yield gen.maybe_future(self.profile_list(self))
else:
self._profile_list = self.profile_list
if self._profile_list:
yield self._load_profile(self.user_options.get('profile', None)) | 0.004754 |
def getElementsByType(self, type):
"""
retrieves all Elements that are of type type
@type type: class
@param type: type of the element
"""
foundElements=[]
for element in self.getAllElementsOfHirarchy():
if isinstance(element, type):
foundElements.append(element)
return foundElements | 0.016949 |
def pull(options):
"""
pull all remote programs to a local directory
"""
configuration = config.get_default()
app_url = configuration['app_url']
if options.deployment != None:
deployment_name = options.deployment
else:
deployment_name = configuration['deployment_name']
client_id = configuration['client_id']
client_secret = configuration['client_secret']
token_manager = auth.TokenManager(client_id=client_id,
client_secret=client_secret,
app_url=app_url)
if options.all == True:
account_id = None
else:
account_id = accounts.get_logged_in_account_id(token_manager=token_manager,
app_url=app_url)
programs_details = programs.get_programs(deployment_name,
token_manager=token_manager,
created_by=account_id,
app_url=app_url)
if not os.path.exists(options.directory):
os.mkdir(options.directory)
account_ids = set()
for program in programs_details:
account_ids.add(program['createdBy'])
accounts_details = accounts.get_accounts(account_ids,
token_manager=token_manager,
app_url=app_url)
account_lookup = {}
for account in accounts_details['accounts']:
account_lookup[account['id']] = account
decision = None
for program in programs_details:
program_name = program['name']
juttle_filename = '%s.juttle' % escape_filename(program_name)
if options.per_user_directory:
username = account_lookup[program['createdBy']]['username']
userdir = os.path.join(options.directory, username)
if not os.path.exists(userdir):
os.mkdir(userdir)
juttle_filepath = os.path.join(userdir, juttle_filename)
else:
juttle_filepath = os.path.join(options.directory, juttle_filename)
if os.path.exists(juttle_filepath) and decision != 'A':
program_code = None
with codecs.open(juttle_filepath, 'r', encoding='UTF-8') as program_file:
program_code = program_file.read()
local_last_edited = int(os.stat(juttle_filepath).st_mtime)
remote_last_edited = dates.iso8601_to_epoch(program['lastEdited'])
if local_last_edited != remote_last_edited:
info('Juttle changed since last pull for "%s"' % program_name)
decision = console.prompt('Would you like to '
'(O - Override,'
' S - Skip,'
' R - Review Changes,'
' A - override All)?')
if decision == 'R':
info('Following is what would change if we overrode using your copy:')
info('*'*80)
for line in difflib.ndiff(program['code'].split('\n'),
program_code.split('\n')):
info(line)
info('*'*80)
decision = console.prompt('Would you like to '
'(O - Override,'
' S - Skip)?')
if decision == 'S':
# jump to the next file
continue
elif decision == 'O':
pass
elif decision == 'A':
pass
else:
raise JutException('Unexpected option "%s"' % decision)
info('importing program "%s" to %s' % (program['name'], juttle_filepath))
with codecs.open(juttle_filepath, 'w', encoding='UTF-8') as program_file:
program_file.write(program['code'])
# update creation time to match the lastEdited field
epoch = dates.iso8601_to_epoch(program['lastEdited'])
os.utime(juttle_filepath, (epoch, epoch)) | 0.001863 |
def plaintext(cls):
"""Uses only authentication mechanisms that provide the credentials in
un-hashed form, typically meaning
:attr:`~pysasl.AuthenticationCredentials.has_secret` is True.
Returns:
A new :class:`SASLAuth` object.
"""
builtin_mechs = cls._get_builtin_mechanisms()
plaintext_mechs = [mech for _, mech in builtin_mechs.items()
if mech.insecure and mech.priority is not None]
return SASLAuth(plaintext_mechs) | 0.003817 |
def on_config_value_changed(self, config_m, prop_name, info):
"""Callback when a config value has been changed
Only collects information, delegates handling further to _handle_config_update
:param ConfigModel config_m: The config model that has been changed
:param str prop_name: Should always be 'config'
:param dict info: Information e.g. about the changed config key
"""
config_key = info['args'][1] if "key" not in info['kwargs'] else info['kwargs']['key']
# config_value = info['args'][-1] if "value" not in info['kwargs'] else info['kwargs']['value']
self._handle_config_update(config_m, config_key) | 0.007364 |
def pivot_table(expr, values=None, rows=None, columns=None, aggfunc='mean',
fill_value=None):
"""
Create a spreadsheet-style pivot table as a DataFrame.
:param expr: collection
:param values (optional): column to aggregate
:param rows: rows to group
:param columns: keys to group by on the pivot table column
:param aggfunc: aggregate function or functions
:param fill_value (optional): value to replace missing value with, default None
:return: collection
:Example:
>>> df
A B C D
0 foo one small 1
1 foo one large 2
2 foo one large 2
3 foo two small 3
4 foo two small 3
5 bar one large 4
6 bar one small 5
7 bar two small 6
8 bar two large 7
>>> table = df.pivot_table(values='D', rows=['A', 'B'], columns='C', aggfunc='sum')
>>> table
A B large_D_sum small_D_sum
0 bar one 4.0 5.0
1 bar two 7.0 6.0
2 foo one 4.0 1.0
3 foo two NaN 6.0
"""
def get_names(iters):
return [r if isinstance(r, six.string_types) else r.name
for r in iters]
def get_aggfunc_name(f):
if isinstance(f, six.string_types):
if '(' in f:
f = re.sub(r' *\( *', '_', f)
f = re.sub(r' *[+\-\*/,] *', '_', f)
f = re.sub(r' *\) *', '', f)
f = f.replace('.', '_')
return f
if isinstance(f, FunctionWrapper):
return f.output_names[0]
return 'aggregation'
if not rows:
raise ValueError('No group keys passed')
rows = utils.to_list(rows)
rows_names = get_names(rows)
rows = [expr._get_field(r) for r in rows]
if isinstance(aggfunc, dict):
agg_func_names = lkeys(aggfunc)
aggfunc = lvalues(aggfunc)
else:
aggfunc = utils.to_list(aggfunc)
agg_func_names = [get_aggfunc_name(af) for af in aggfunc]
if not columns:
if values is None:
values = [n for n in expr.schema.names if n not in rows_names]
else:
values = utils.to_list(values)
values = [expr._get_field(v) for v in values]
names = rows_names
types = [r.dtype for r in rows]
for func, func_name in zip(aggfunc, agg_func_names):
for value in values:
if isinstance(func, six.string_types):
seq = value.eval(func, rewrite=False)
if isinstance(seq, ReprWrapper):
seq = seq()
else:
seq = value.agg(func)
seq = seq.rename('{0}_{1}'.format(value.name, func_name))
names.append(seq.name)
types.append(seq.dtype)
schema = Schema.from_lists(names, types)
return PivotTableCollectionExpr(_input=expr, _group=rows, _values=values,
_fill_value=fill_value, _schema=schema,
_agg_func=aggfunc, _agg_func_names=agg_func_names)
else:
columns = [expr._get_field(c) for c in utils.to_list(columns)]
if values:
values = utils.to_list(values)
else:
names = set(c.name for c in rows + columns)
values = [n for n in expr.schema.names if n not in names]
if not values:
raise ValueError('No values found for pivot_table')
values = [expr._get_field(v) for v in values]
if len(columns) > 1:
raise ValueError('More than one `columns` are not supported yet')
schema = DynamicSchema.from_lists(rows_names, [r.dtype for r in rows])
base_tp = PivotTableCollectionExpr
tp = type(base_tp.__name__, (DynamicCollectionExpr, base_tp), dict())
return tp(_input=expr, _group=rows, _values=values,
_columns=columns, _agg_func=aggfunc,
_fill_value=fill_value, _schema=schema,
_agg_func_names=agg_func_names) | 0.001207 |
def runRmFile(self, path, timeout=None, **kwargs):
""" remove a file from the worker """
cmd_args = {'path': path, 'logEnviron': self.logEnviron}
if timeout:
cmd_args['timeout'] = timeout
if self.workerVersionIsOlderThan('rmfile', '3.1'):
cmd_args['dir'] = os.path.abspath(path)
return self.runRemoteCommand('rmdir', cmd_args, **kwargs)
return self.runRemoteCommand('rmfile', cmd_args, **kwargs) | 0.004246 |
def log_p(self,z):
"""
The unnormalized log posterior components (the quantity we want to approximate)
RAO-BLACKWELLIZED!
"""
return np.array([self.log_p_blanket(i) for i in z]) | 0.022422 |
def parse_xml(self, xml):
'''
:param key_xml: lxml.etree.Element representing a single VocabularyCodeSet
'''
xmlutils = XmlUtils(xml)
self.name = xmlutils.get_string_by_xpath('name')
self.family = xmlutils.get_string_by_xpath('family')
self.version = xmlutils.get_string_by_xpath('version')
for item in xml.xpath('code-item'):
self.code_item.append(VocabularyCodeItem(item))
self.is_vocab_truncated = xmlutils.get_bool_by_xpath('is-vocab-truncated')
self.language = xmlutils.get_lang() | 0.006885 |
def _plot_time_list(sdat, lovs, tseries, metas, times=None):
"""Plot requested profiles"""
if times is None:
times = {}
for vfig in lovs:
fig, axes = plt.subplots(nrows=len(vfig), sharex=True,
figsize=(12, 2 * len(vfig)))
axes = [axes] if len(vfig) == 1 else axes
fname = ['time']
for iplt, vplt in enumerate(vfig):
ylabel = None
for ivar, tvar in enumerate(vplt):
fname.append(tvar)
time = times[tvar] if tvar in times else tseries['t']
axes[iplt].plot(time, tseries[tvar],
conf.time.style,
label=metas[tvar].description)
lbl = metas[tvar].kind
if ylabel is None:
ylabel = lbl
elif ylabel != lbl:
ylabel = ''
if ivar == 0:
ylabel = metas[tvar].description
if ylabel:
_, unit = sdat.scale(1, metas[tvar].dim)
if unit:
ylabel += ' ({})'.format(unit)
axes[iplt].set_ylabel(ylabel)
if vplt[0][:3] == 'eta': # list of log variables
axes[iplt].set_yscale('log')
axes[iplt].set_ylim(bottom=conf.plot.vmin, top=conf.plot.vmax)
if ivar:
axes[iplt].legend()
axes[iplt].tick_params()
_, unit = sdat.scale(1, 's')
if unit:
unit = ' ({})'.format(unit)
axes[-1].set_xlabel('Time' + unit)
axes[-1].set_xlim((tseries['t'].iloc[0], tseries['t'].iloc[-1]))
axes[-1].tick_params()
misc.saveplot(fig, '_'.join(fname)) | 0.000569 |
def meta_(cls, **kwargs):
"""
Meta allows you to add meta data to site
:params **kwargs:
meta keys we're expecting:
title (str)
description (str)
url (str) (Will pick it up by itself if not set)
image (str)
site_name (str) (but can pick it up from config file)
object_type (str)
keywords (list)
locale (str)
**Boolean By default these keys are True
use_opengraph
use_twitter
use_googleplus
"""
_name_ = "META"
meta_data = cls._context.get(_name_, {})
for k, v in kwargs.items():
# Prepend/Append string
if (k.endswith("__prepend") or k.endswith("__append")) \
and isinstance(v, str):
k, position = k.split("__", 2)
_v = meta_data.get(k, "")
if position == "prepend":
v += _v
elif position == "append":
v = _v + v
if k == "keywords" and not isinstance(k, list):
raise ValueError("Meta keyword must be a list")
meta_data[k] = v
cls.context_(_name_=meta_data) | 0.001584 |
def maskrcnn_loss(mask_logits, fg_labels, fg_target_masks):
"""
Args:
mask_logits: #fg x #category xhxw
fg_labels: #fg, in 1~#class, int64
fg_target_masks: #fgxhxw, float32
"""
num_fg = tf.size(fg_labels, out_type=tf.int64)
indices = tf.stack([tf.range(num_fg), fg_labels - 1], axis=1) # #fgx2
mask_logits = tf.gather_nd(mask_logits, indices) # #fgxhxw
mask_probs = tf.sigmoid(mask_logits)
# add some training visualizations to tensorboard
with tf.name_scope('mask_viz'):
viz = tf.concat([fg_target_masks, mask_probs], axis=1)
viz = tf.expand_dims(viz, 3)
viz = tf.cast(viz * 255, tf.uint8, name='viz')
tf.summary.image('mask_truth|pred', viz, max_outputs=10)
loss = tf.nn.sigmoid_cross_entropy_with_logits(
labels=fg_target_masks, logits=mask_logits)
loss = tf.reduce_mean(loss, name='maskrcnn_loss')
pred_label = mask_probs > 0.5
truth_label = fg_target_masks > 0.5
accuracy = tf.reduce_mean(
tf.cast(tf.equal(pred_label, truth_label), tf.float32),
name='accuracy')
pos_accuracy = tf.logical_and(
tf.equal(pred_label, truth_label),
tf.equal(truth_label, True))
pos_accuracy = tf.reduce_mean(tf.cast(pos_accuracy, tf.float32), name='pos_accuracy')
fg_pixel_ratio = tf.reduce_mean(tf.cast(truth_label, tf.float32), name='fg_pixel_ratio')
add_moving_summary(loss, accuracy, fg_pixel_ratio, pos_accuracy)
return loss | 0.002011 |
def close(self):
"""close all pooled connections"""
print("PGPooledTransaction - shutting down connection pool")
for name, conn in self.pool.iteritems():
conn.close()
print("PGPooledTransaction - connection %s closed" % name) | 0.007326 |
def get_gzh_by_search(text):
"""从搜索公众号获得的文本 提取公众号信息
Parameters
----------
text : str or unicode
搜索公众号获得的文本
Returns
-------
list[dict]
{
'open_id': '', # 微信号唯一ID
'profile_url': '', # 最近10条群发页链接
'headimage': '', # 头像
'wechat_name': '', # 名称
'wechat_id': '', # 微信id
'post_perm': '', # 最近一月群发数
'view_perm': '', # 最近一月阅读量
'qrcode': '', # 二维码
'introduction': '', # 介绍
'authentication': '' # 认证
}
"""
post_view_perms = WechatSogouStructuring.__get_post_view_perm(text)
page = etree.HTML(text)
lis = page.xpath('//ul[@class="news-list2"]/li')
relist = []
for li in lis:
url = get_first_of_element(li, 'div/div[1]/a/@href')
headimage = format_image_url(get_first_of_element(li, 'div/div[1]/a/img/@src'))
wechat_name = get_elem_text(get_first_of_element(li, 'div/div[2]/p[1]'))
info = get_elem_text(get_first_of_element(li, 'div/div[2]/p[2]'))
qrcode = get_first_of_element(li, 'div/div[3]/span/img[1]/@src')
introduction = get_elem_text(get_first_of_element(li, 'dl[1]/dd'))
authentication = get_first_of_element(li, 'dl[2]/dd/text()')
relist.append({
'open_id': headimage.split('/')[-1],
'profile_url': url,
'headimage': headimage,
'wechat_name': wechat_name.replace('red_beg', '').replace('red_end', ''),
'wechat_id': info.replace('微信号:', ''),
'qrcode': qrcode,
'introduction': introduction.replace('red_beg', '').replace('red_end', ''),
'authentication': authentication,
'post_perm': -1,
'view_perm': -1,
})
if post_view_perms:
for i in relist:
if i['open_id'] in post_view_perms:
post_view_perm = post_view_perms[i['open_id']].split(',')
if len(post_view_perm) == 2:
i['post_perm'] = int(post_view_perm[0])
i['view_perm'] = int(post_view_perm[1])
return relist | 0.002533 |
def proof_req_pred_referents(proof_req: dict) -> dict:
"""
Given a proof request with all requested predicates having cred def id restrictions,
return its predicate referents by cred def id and attribute, mapping a predicate and a limit.
The returned structure can be useful in downstream processing to filter cred-infos for predicates.
:param proof_req: proof request with all requested predicate specifications having cred def id restriction; e.g.,
::
{
'name': 'proof_req',
'version': '0.0',
'requested_attributes': {
...
}
'requested_predicates': {
'194_highscore_GE_uuid': {
'name': 'highscore',
'p_type': '>=',
'p_value': '100000',
'restrictions': [
{
'cred_def_id': 'WgWxqztrNooG92RXvxSTWv:3:CL:194:tag'
}
],
'non_revoked': {
...
}
},
'194_level_GE_uuid': {
'name': 'level',
'p_type': '>=',
'p_value': '10',
'restrictions': [
{
'cred_def_id': 'WgWxqztrNooG92RXvxSTWv:3:CL:194:tag'
}
],
'non_revoked': {
...
}
},
'194_attempts_LE_uuid': {
'name': 'attempts',
'p_type': '<=',
'p_value': '3',
'restrictions': [
{
'cred_def_id': 'WgWxqztrNooG92RXvxSTWv:3:CL:194:tag'
}
],
'non_revoked': {
...
}
},
'198_employees_LT_uuid': {
'name': 'employees',
'p_type': '<',
'p_value': '100',
'restrictions': [
{
'cred_def_id': 'WgWxqztrNooG92RXvxSTWv:3:CL:198:tag'
}
],
'non_revoked': {
...
}
},
'198_employees_GE_uuid': {
'name': 'employees',
'p_type': '>=',
'p_value': '50',
'restrictions': [
{
'cred_def_id': 'WgWxqztrNooG92RXvxSTWv:3:CL:198:tag'
}
],
'non_revoked': {
...
}
},
}
}
:return: nested dict mapping cred def id to name to proof request referent to predicate and limit; e.g.,
::
{
'WgWxqztrNooG92RXvxSTWv:3:CL:194:tag': {
'highscore': {
'194_level_GE_uuid': ['>=', 100000]
},
'level': {
'194_level_GE_uuid': ['>=', 10]
},
'attempts': {
'194_attempts_LE_uuid': ['<=', 3]
}
},
'WgWxqztrNooG92RXvxSTWv:3:CL:198:tag': {
'employees': { # may have many preds per attr, but always 1 uuid and 1 relation per pred
'198_LT_employees_uuid': ['<=', 100]
'198_GE_employees_uuid': ['>=', 50]
}
}
}
"""
rv = {}
for uuid, spec in proof_req['requested_predicates'].items():
cd_id = None
for restriction in spec.get('restrictions', []):
cd_id = restriction.get('cred_def_id', None)
if cd_id:
break
if not cd_id:
continue
if cd_id not in rv: # cd_id of None is not OK
rv[cd_id] = {}
if spec['name'] not in rv[cd_id]:
rv[cd_id][spec['name']] = {}
rv[cd_id][spec['name']][uuid] = [spec['p_type'], Predicate.to_int(spec['p_value'])]
return rv | 0.002979 |
def findBinomialNsWithExpectedSampleMinimum(desiredValuesSorted, p, numSamples, nMax):
"""
For each desired value, find an approximate n for which the sample minimum
has a expected value equal to this value.
For each value, find an adjacent pair of n values whose expected sample minima
are below and above the desired value, respectively, and return a
linearly-interpolated n between these two values.
@param p (float)
The p if the binomial distribution.
@param numSamples (int)
The number of samples in the sample minimum distribution.
@return
A list of results. Each result contains
(interpolated_n, lower_value, upper_value).
where each lower_value and upper_value are the expected sample minimum for
floor(interpolated_n) and ceil(interpolated_n)
"""
# mapping from n -> expected value
actualValues = [
getExpectedValue(
SampleMinimumDistribution(numSamples,
BinomialDistribution(n, p, cache=True)))
for n in xrange(nMax + 1)]
results = []
n = 0
for desiredValue in desiredValuesSorted:
while n + 1 <= nMax and actualValues[n + 1] < desiredValue:
n += 1
if n + 1 > nMax:
break
interpolated = n + ((desiredValue - actualValues[n]) /
(actualValues[n+1] - actualValues[n]))
result = (interpolated, actualValues[n], actualValues[n + 1])
results.append(result)
return results | 0.008368 |
def batches(iterable, n=1):
"""
From http://stackoverflow.com/a/8290508/270334
:param n:
:param iterable:
"""
l = len(iterable)
for ndx in range(0, l, n):
yield iterable[ndx:min(ndx + n, l)] | 0.00885 |
def cancel_confirmation(self, confirmation_id):
"""
Cancelles an confirmation
:param confirmation_id: the confirmation id
:return Response
"""
return self._create_put_request(
resource=CONFIRMATIONS,
billomat_id=confirmation_id,
command=CANCEL,
) | 0.0059 |
def rootChild_resetPassword(self, req, webViewer):
"""
Redirect authenticated users to their settings page (hopefully they
have one) when they try to reset their password.
This is the wrong way for this functionality to be implemented. See
#2524.
"""
from xmantissa.ixmantissa import IWebTranslator, IPreferenceAggregator
return URL.fromString(
IWebTranslator(self.store).linkTo(
IPreferenceAggregator(self.store).storeID)) | 0.003868 |
def esc(self, val):
"""
Returns the given object in the appropriate wrapper class from esc_types.py.
In most cases, you will not need to call this directly. However, if you are
passing a string to the interp method that should be used as an SQL bind value
and not raw SQL, you must pass it to this method to avoid a SQL injection
vulnerability. For example:
>>> sqli = SQLInterp()
>>> first_name = 'John'
The following is wrong! This could lead to a SQL injection attack.
>>> sqli.interp("SELECT * FROM table WHERE first_name =", first_name)
('SELECT * FROM table WHERE first_name = John', ())
This is the correct way.
>>> sqli.interp("SELECT * FROM table WHERE first_name =", sqli.esc(first_name))
('SELECT * FROM table WHERE first_name = ?', ('John',))
"""
if type(val) in self.type_map:
return self.type_map[type(val)](val)
else:
return Esc(val) | 0.006883 |
def down(self, h, cr=True):
"""moves current vertical position h mm down
cr True will navigate to the left margin
"""
if cr:
self.oPdf.ln(h=0)
self.oPdf.set_y(self.oPdf.get_y() + h) | 0.008584 |
def set_value(self, key, value):
# type: (str, Any) -> None
"""Modify a value in the configuration.
"""
self._ensure_have_load_only()
fname, parser = self._get_parser_to_modify()
if parser is not None:
section, name = _disassemble_key(key)
# Modify the parser and the configuration
if not parser.has_section(section):
parser.add_section(section)
parser.set(section, name, value)
self._config[self.load_only][key] = value
self._mark_as_modified(fname, parser) | 0.005085 |
def _CreateNewSeasonDir(self, seasonNum):
"""
Creates a new season directory name in the form 'Season <NUM>'.
If skipUserInput is True this will be accepted by default otherwise the
user can choose to accept this, use the base show directory or enter
a different name.
Parameters
----------
seasonNum : int
Season number.
Returns
----------
string or None
If the user accepts the generated directory name or gives a new name
this will be returned. If it the user chooses to use the base
directory an empty string is returned. If the user chooses to skip at
this input stage None is returned.
"""
seasonDirName = "Season {0}".format(seasonNum)
goodlogging.Log.Info("RENAMER", "Generated directory name: '{0}'".format(seasonDirName))
if self._skipUserInput is False:
response = goodlogging.Log.Input("RENAMER", "Enter 'y' to accept this directory, 'b' to use base show directory, 'x' to skip this file or enter a new directory name to use: ")
response = util.CheckEmptyResponse(response)
else:
response = 'y'
if response.lower() == 'b':
return ''
elif response.lower() == 'y':
return seasonDirName
elif response.lower() == 'x':
return None
else:
return response | 0.007519 |
def process_dynamic_completion(self, completion):
""" how to validate and generate completion for dynamic params """
if len(completion.split()) > 1:
completion = '\"' + completion + '\"'
if self.validate_completion(completion):
yield Completion(completion, -len(self.unfinished_word)) | 0.006006 |
def reset(self):
""" reset filter back to state at time of construction"""
self.n = 0 # nth step in the recursion
self.x = np.zeros(self._order + 1)
self.K = np.zeros(self._order + 1)
self.y = 0 | 0.008475 |
def attn(image_feat, query, hparams, name="attn"):
"""Attention on image feature with question as query."""
with tf.variable_scope(name, "attn", values=[image_feat, query]):
attn_dim = hparams.attn_dim
num_glimps = hparams.num_glimps
num_channels = common_layers.shape_list(image_feat)[-1]
if len(common_layers.shape_list(image_feat)) == 4:
image_feat = common_layers.flatten4d3d(image_feat)
query = tf.expand_dims(query, 1)
image_proj = common_attention.compute_attention_component(
image_feat, attn_dim, name="image_proj")
query_proj = common_attention.compute_attention_component(
query, attn_dim, name="query_proj")
h = tf.nn.relu(image_proj + query_proj)
h_proj = common_attention.compute_attention_component(
h, num_glimps, name="h_proj")
p = tf.nn.softmax(h_proj, axis=1)
image_ave = tf.matmul(image_feat, p, transpose_a=True)
image_ave = tf.reshape(image_ave, [-1, num_channels*num_glimps])
return image_ave | 0.003992 |
def restore(self, state):
"""Restore the state of this InMemoryStorageEngine from a dict."""
storage_data = state.get(u'storage_data', [])
streaming_data = state.get(u'streaming_data', [])
if len(storage_data) > self.storage_length or len(streaming_data) > self.streaming_length:
raise ArgumentError("Cannot restore InMemoryStorageEngine, too many readings",
storage_size=len(storage_data), storage_max=self.storage_length,
streaming_size=len(streaming_data), streaming_max=self.streaming_length)
self.storage_data = [IOTileReading.FromDict(x) for x in storage_data]
self.streaming_data = [IOTileReading.FromDict(x) for x in streaming_data] | 0.009126 |
def channel(self, channel_id=None, synchronous=False):
"""
Fetch a Channel object identified by the numeric channel_id, or
create that object if it doesn't already exist. If channel_id is not
None but no channel exists for that id, will raise InvalidChannel. If
there are already too many channels open, will raise TooManyChannels.
If synchronous=True, then the channel will act synchronous in all cases
where a protocol method supports `nowait=False`, or where there is an
implied callback in the protocol.
"""
if channel_id is None:
# adjust for channel 0
if len(self._channels) - 1 >= self._channel_max:
raise Connection.TooManyChannels(
"%d channels already open, max %d",
len(self._channels) - 1,
self._channel_max)
channel_id = self._next_channel_id()
while channel_id in self._channels:
channel_id = self._next_channel_id()
elif channel_id in self._channels:
return self._channels[channel_id]
else:
raise Connection.InvalidChannel(
"%s is not a valid channel id", channel_id)
# Call open() here so that ConnectionChannel doesn't have it called.
# Could also solve this other ways, but it's a HACK regardless.
rval = Channel(
self, channel_id, self._class_map, synchronous=synchronous)
self._channels[channel_id] = rval
rval.add_close_listener(self._channel_closed)
rval.open()
return rval | 0.001217 |
def space_before(self):
"""
The EMU equivalent of the centipoints value in
`./a:spcBef/a:spcPts/@val`.
"""
spcBef = self.spcBef
if spcBef is None:
return None
spcPts = spcBef.spcPts
if spcPts is None:
return None
return spcPts.val | 0.006135 |
def setMimeTypeByName(self, name):
" Guess the mime type "
mimetype = mimetypes.guess_type(name)[0]
if mimetype is not None:
self.mimetype = mimetypes.guess_type(name)[0].split(";")[0] | 0.009091 |
def match(self, row, template_row=None):
"""
匹配一个模板时,只比较起始行,未来考虑支持比较关键字段即可,现在是起始行的所有字段全匹配
:param row:
:return:
"""
if not template_row:
template_cols = self.template[0]['cols']
else:
template_cols = template_row['cols']
#check if length of template_cols great than row, then match failed
if len(template_cols)>len(row):
return False
for c in template_cols:
#如果坐标大于模板最大长度,则表示不匹配,则退出
text = c['value']
if not text or (text and not (text.startswith('{{') and text.endswith('}}'))
and row[c['col']-1].value == text):
pass
else:
return False
# print 'Matched'
return True | 0.01005 |
def login(self, user: str, passwd: str) -> None:
"""Log in to instagram with given username and password and internally store session object.
:raises InvalidArgumentException: If the provided username does not exist.
:raises BadCredentialsException: If the provided password is wrong.
:raises ConnectionException: If connection to Instagram failed.
:raises TwoFactorAuthRequiredException: First step of 2FA login done, now call :meth:`Instaloader.two_factor_login`."""
self.context.login(user, passwd) | 0.009091 |
def from_args(cls, **kwargs):
"""
Generates one or more VSGSuite instances from command line arguments.
:param kwargs: List of additional keyworded arguments to be passed into the VSGSuite defined in the :meth:`~VSGSuite.make_parser` method.
"""
# Create a VSGSuite for each filename on the command line.
if kwargs.get('suite_commands', None) == 'generate':
filenames = kwargs.pop('configuration_filenames', [])
return [cls.from_file(f) for f in filenames]
# Create a VSGSuit from the target directory and override commands
if kwargs.get('suite_commands', None) == 'auto':
type = kwargs.get('suite_type', None)
return [cls.from_directory('', type, **kwargs)]
# Create nothing.
return [] | 0.003667 |
def delete_plate(self, plate_id, delete_meta_data=False):
"""
Delete a plate from the database
:param plate_id: The plate id
:param delete_meta_data: Optionally delete all meta data associated with this plate as well
:return: None
"""
if plate_id not in self.plates:
logging.info("Plate {} not found for deletion".format(plate_id))
return
plate = self.plates[plate_id]
if delete_meta_data:
for pv in plate.values:
identifier = ".".join(map(lambda x: "_".join(x), pv))
self.meta_data_manager.delete(identifier=identifier)
with switch_db(PlateDefinitionModel, "hyperstream"):
try:
p = PlateDefinitionModel.objects.get(plate_id=plate_id)
p.delete()
del self.plates[plate_id]
except DoesNotExist as e:
logging.warn(e)
logging.info("Plate {} deleted".format(plate_id)) | 0.003922 |
def get_announce_filename( working_dir ):
"""
Get the path to the file that stores all of the announcements.
"""
announce_filepath = os.path.join( working_dir, get_default_virtualchain_impl().get_virtual_chain_name() ) + '.announce'
return announce_filepath | 0.033088 |
def set_size(self, data_size):
"""
Set the data slice size.
"""
if len(str(data_size)) > self.first:
raise ValueError(
'Send size is too large for message size-field width!')
self.data_size = data_size | 0.01087 |
def list_available_work_units(self, work_spec_name, start=0, limit=None):
"""Get a dictionary of available work units for some work spec.
The dictionary is from work unit name to work unit definiton.
Only work units that have not been started, or units that were
started but did not complete in a timely fashion, are
included.
"""
return self.registry.filter(WORK_UNITS_ + work_spec_name,
priority_max=time.time(),
start=start, limit=limit) | 0.003509 |
def read(domain, key, user=None):
'''
Write a default to the system
CLI Example:
.. code-block:: bash
salt '*' macdefaults.read com.apple.CrashReporter DialogType
salt '*' macdefaults.read NSGlobalDomain ApplePersistence
domain
The name of the domain to read from
key
The key of the given domain to read from
user
The user to write the defaults to
'''
cmd = 'defaults read "{0}" "{1}"'.format(domain, key)
return __salt__['cmd.run'](cmd, runas=user) | 0.001862 |
def __create_point_comparator(self, type_point):
"""!
@brief Create point comparator.
@details In case of numpy.array specific comparator is required.
@param[in] type_point (data_type): Type of point that is stored in KD-node.
@return (callable) Callable point comparator to compare to points.
"""
if type_point == numpy.ndarray:
return lambda obj1, obj2: numpy.array_equal(obj1, obj2)
return lambda obj1, obj2: obj1 == obj2 | 0.005803 |
def resolve_incident(self, incident_key,
description=None, details=None):
""" Causes the referenced incident to enter resolved state.
Send a resolve event when the problem that caused the initial
trigger has been fixed.
"""
return self.create_event(description, "resolve",
details, incident_key) | 0.007614 |
def _get_isolated(self, hostport):
"""Get a Peer for the given destination for a request.
A new Peer is added and returned if one does not already exist for the
given host-port. Otherwise, the existing Peer is returned.
**NOTE** new peers will not be added to the peer heap.
"""
assert hostport, "hostport is required"
if hostport not in self._peers:
# Add a peer directly from a hostport, do NOT add it to the peer
# heap
peer = self.peer_class(
tchannel=self.tchannel,
hostport=hostport,
)
self._peers[peer.hostport] = peer
return self._peers[hostport] | 0.002805 |
def check_type_keywords(self, schema, rule, path):
"""
All supported keywords:
- allowempty_map
- assertion
- class
- date
- default
- desc
- enum
- example
- extensions
- func
- ident
- include_name
- map_regex_rule
- mapping
- matching
- matching_rule
- name
- nullable
- pattern
- pattern_regexp
- range
- regex_mappings
- required
- schema
- sequence
- type
- type_class
- unique
- version
"""
if not self.strict_rule_validation:
return
global_keywords = ['type', 'desc', 'example', 'extensions', 'name', 'nullable', 'version', 'func', 'include']
all_allowed_keywords = {
'str': global_keywords + ['default', 'pattern', 'range', 'enum', 'required', 'unique', 'req'],
'int': global_keywords + ['default', 'range', 'enum', 'required', 'unique'],
'float': global_keywords + ['default', 'enum', 'range', 'required'],
'number': global_keywords + ['default', 'enum'],
'bool': global_keywords + ['default', 'enum'],
'map': global_keywords + ['allowempty_map', 'mapping', 'map', 'allowempty', 'required', 'matching-rule', 'range', 'class'],
'seq': global_keywords + ['sequence', 'seq', 'required', 'range', 'matching'],
'sequence': global_keywords + ['sequence', 'seq', 'required'],
'mapping': global_keywords + ['mapping', 'seq', 'required'],
'timestamp': global_keywords + ['default', 'enum'],
'date': global_keywords + ['default', 'enum'],
'symbol': global_keywords + ['default', 'enum'],
'scalar': global_keywords + ['default', 'enum'],
'text': global_keywords + ['default', 'enum', 'pattern'],
'any': global_keywords + ['default', 'enum'],
'enum': global_keywords + ['default', 'enum'],
'none': global_keywords + ['default', 'enum', 'required'],
}
rule_type = schema.get('type')
if not rule_type:
# Special cases for the "shortcut methods"
if 'sequence' in schema or 'seq' in schema:
rule_type = 'sequence'
elif 'mapping' in schema or 'map' in schema:
rule_type = 'mapping'
allowed_keywords = all_allowed_keywords.get(rule_type)
if not allowed_keywords and 'sequence' not in schema and 'mapping' not in schema and 'seq' not in schema and 'map' not in schema:
raise RuleError('No allowed keywords found for type: {0}'.format(rule_type))
for k, v in schema.items():
if k not in allowed_keywords:
raise RuleError('Keyword "{0}" is not supported for type: "{1}" '.format(k, rule_type)) | 0.003709 |
def init(self):
"""
Creates the virtual environment.
"""
r = self.local_renderer
# if self.virtualenv_exists():
# print('virtualenv exists')
# return
print('Creating new virtual environment...')
with self.settings(warn_only=True):
cmd = '[ ! -d {virtualenv_dir} ] && virtualenv --no-site-packages {virtualenv_dir} || true'
if self.is_local:
r.run_or_local(cmd)
else:
r.sudo(cmd) | 0.005682 |
def del_preset(self, name):
"""Delete a named command line preset.
:param name: the name of the preset to delete
:returns: True on success or False otherwise
"""
policy = self.policy
if not policy.find_preset(name):
self.ui_log.error("Preset '%s' not found" % name)
return False
try:
policy.del_preset(name=name)
except Exception as e:
self.ui_log.error(str(e) + "\n")
return False
self.ui_log.info("Deleted preset '%s'\n" % name)
return True | 0.003373 |
def update(did):
"""Update DDO of an existing asset
---
tags:
- ddo
consumes:
- application/json
parameters:
- in: body
name: body
required: true
description: DDO of the asset.
schema:
type: object
required:
- "@context"
- created
- id
- publicKey
- authentication
- proof
- service
properties:
"@context":
description:
example: https://w3id.org/future-method/v1
type: string
id:
description: ID of the asset.
example: did:op:123456789abcdefghi
type: string
created:
description: date of ddo creation.
example: "2016-02-08T16:02:20Z"
type: string
publicKey:
type: array
description: List of public keys.
example: [{"id": "did:op:123456789abcdefghi#keys-1"},
{"type": "Ed25519VerificationKey2018"},
{"owner": "did:op:123456789abcdefghi"},
{"publicKeyBase58": "H3C2AVvLMv6gmMNam3uVAjZpfkcJCwDwnZn6z3wXmqPV"}]
authentication:
type: array
description: List of authentication mechanisms.
example: [{"type": "RsaSignatureAuthentication2018"},
{"publicKey": "did:op:123456789abcdefghi#keys-1"}]
proof:
type: dictionary
description: Information about the creation and creator of the asset.
example: {"type": "UUIDSignature",
"created": "2016-02-08T16:02:20Z",
"creator": "did:example:8uQhQMGzWxR8vw5P3UWH1ja",
"signatureValue": "QNB13Y7Q9...1tzjn4w=="
}
service:
type: array
description: List of services.
example: [{"type": "Access",
"serviceEndpoint":
"http://mybrizo.org/api/v1/brizo/services/consume?pubKey=${
pubKey}&serviceId={serviceId}&url={url}"},
{"type": "Compute",
"serviceEndpoint":
"http://mybrizo.org/api/v1/brizo/services/compute?pubKey=${
pubKey}&serviceId={serviceId}&algo={algo}&container={container}"},
{
"type": "Metadata",
"serviceDefinitionId": "2",
"serviceEndpoint":
"http://myaquarius.org/api/v1/provider/assets/metadata/{did}",
"metadata": {
"base": {
"name": "UK Weather information 2011",
"type": "dataset",
"description": "Weather information of UK including
temperature and humidity",
"dateCreated": "2012-02-01T10:55:11Z",
"author": "Met Office",
"license": "CC-BY",
"copyrightHolder": "Met Office",
"compression": "zip",
"workExample": "stationId,latitude,longitude,datetime,
temperature,humidity/n423432fsd,51.509865,-0.118092,
2011-01-01T10:55:11+00:00,7.2,68",
"files": [{
"contentLength": "4535431",
"contentType": "text/csv",
"encoding": "UTF-8",
"compression": "zip",
"resourceId": "access-log2018-02-13-15-17-29-18386C502CAEA932"
}
],
"encryptedFiles": "0x098213xzckasdf089723hjgdasfkjgasfv",
"links": [{
"name": "Sample of Asset Data",
"type": "sample",
"url": "https://foo.com/sample.csv"
},
{
"name": "Data Format Definition",
"type": "format",
"AssetID":
"4d517500da0acb0d65a716f61330969334630363ce4a6a9d39691026ac7908ea"
}
],
"inLanguage": "en",
"tags": "weather, uk, 2011, temperature, humidity",
"price": 10,
"checksum":
"38803b9e6f04fce3fba4b124524672592264d31847182c689095a081c9e85262"
},
"curation": {
"rating": 0.93,
"numVotes": 123,
"schema": "Binary Voting"
},
"additionalInformation": {
"updateFrecuency": "yearly",
"structuredMarkup": [{
"uri": "http://skos.um.es/unescothes/C01194/jsonld",
"mediaType": "application/ld+json"
},
{
"uri": "http://skos.um.es/unescothes/C01194/turtle",
"mediaType": "text/turtle"
}
]
}
}
}]
responses:
200:
description: Asset successfully updated.
201:
description: Asset successfully registered.
400:
description: One of the required attributes is missing.
404:
description: Invalid asset data.
500:
description: Error
"""
required_attributes = ['@context', 'created', 'id', 'publicKey', 'authentication', 'proof',
'service']
required_metadata_base_attributes = ['name', 'dateCreated', 'author', 'license',
'price', 'encryptedFiles', 'type', 'checksum']
required_metadata_curation_attributes = ['rating', 'numVotes']
assert isinstance(request.json, dict), 'invalid payload format.'
data = request.json
if not data:
logger.error(f'request body seems empty, expecting {required_attributes}')
return 400
msg, status = check_required_attributes(required_attributes, data, 'update')
if msg:
return msg, status
msg, status = check_required_attributes(required_metadata_base_attributes,
_get_base_metadata(data['service']), 'update')
if msg:
return msg, status
msg, status = check_required_attributes(required_metadata_curation_attributes,
_get_curation_metadata(data['service']), 'update')
if msg:
return msg, status
msg, status = check_no_urls_in_files(_get_base_metadata(data['service']), 'register')
if msg:
return msg, status
msg, status = validate_date_format(data['created'])
if msg:
return msg, status
_record = dict()
_record = copy.deepcopy(data)
_record['created'] = datetime.strptime(data['created'], '%Y-%m-%dT%H:%M:%SZ')
try:
if dao.get(did) is None:
register()
return _sanitize_record(_record), 201
else:
for service in _record['service']:
service_id = int(service['serviceDefinitionId'])
if service['type'] == 'Metadata':
_record['service'][service_id]['metadata']['base']['datePublished'] = _get_date(
dao.get(did)['service'])
dao.update(_record, did)
return Response(_sanitize_record(_record), 200, content_type='application/json')
except Exception as err:
return f'Some error: {str(err)}', 500 | 0.002428 |
def clusterflow_commands_table (self):
""" Make a table of the Cluster Flow commands """
# I wrote this when I was tired. Sorry if it's incomprehensible.
desc = '''Every Cluster Flow run will have many different commands.
MultiQC splits these by whitespace, collects by the tool name
and shows the first command found. Any terms not found in <em>all</em> subsequent
calls are replaced with <code>[variable]</code>
<em>(typically input and ouput filenames)</em>. Each column is for one Cluster Flow run.'''
# Loop through pipelines
tool_cmds = OrderedDict()
headers = dict()
for pipeline_id, commands in self.clusterflow_commands.items():
headers[pipeline_id] = {'scale': False}
self.var_html = '<span style="background-color:#dedede; color:#999;">[variable]</span>'
tool_cmd_parts = OrderedDict()
for cmd in commands:
s = cmd.split()
tool = self._guess_cmd_name(s)
if tool not in tool_cmd_parts.keys():
tool_cmd_parts[tool] = list()
tool_cmd_parts[tool].append(s)
for tool, cmds in tool_cmd_parts.items():
cons_cmd = self._replace_variable_chunks(cmds)
# Try again with first two blocks if all variable
variable_count = cons_cmd.count(self.var_html)
if variable_count == len(cmds[0]) - 1 and len(cmds[0]) > 2:
for subcmd in set([x[1] for x in cmds]):
sub_cons_cmd = self._replace_variable_chunks([cmd for cmd in cmds if cmd[1] == subcmd])
tool = "{} {}".format(tool, subcmd)
if tool not in tool_cmds:
tool_cmds[tool] = dict()
tool_cmds[tool][pipeline_id] = '<code style="white-space:nowrap;">{}</code>'.format(" ".join(sub_cons_cmd) )
else:
if tool not in tool_cmds:
tool_cmds[tool] = dict()
tool_cmds[tool][pipeline_id] = '<code style="white-space:nowrap;">{}</code>'.format(" ".join(cons_cmd) )
table_config = {
'namespace': 'Cluster Flow',
'id': 'clusterflow-commands-table',
'table_title': 'Cluster Flow Commands',
'col1_header': 'Tool',
'sortRows': False,
'no_beeswarm': True
}
self.add_section (
name = 'Commands',
anchor = 'clusterflow-commands',
description = desc,
plot = table.plot(tool_cmds, headers, table_config)
) | 0.007718 |
def stream_filter(self, delegate, follow=None, track=None, locations=None,
stall_warnings=None):
"""
Streams public messages filtered by various parameters.
https://dev.twitter.com/docs/api/1.1/post/statuses/filter
At least one of ``follow``, ``track``, or ``locations`` must be
provided. See the API documentation linked above for details on these
parameters and the various limits on this API.
:param delegate:
A delegate function that will be called for each message in the
stream and will be passed the message dict as the only parameter.
The message dicts passed to this function may represent any message
type and the delegate is responsible for any dispatch that may be
required. (:mod:`txtwitter.messagetools` may be helpful here.)
:param list follow:
A list of user IDs, indicating the users to return statuses for in
the stream.
:param list track:
List of keywords to track.
:param list locations:
List of location bounding boxes to track.
XXX: Currently unsupported.
:param bool stall_warnings:
Specifies whether stall warnings should be delivered.
:returns: An unstarted :class:`TwitterStreamService`.
"""
params = {}
if follow is not None:
params['follow'] = ','.join(follow)
if track is not None:
params['track'] = ','.join(track)
if locations is not None:
raise NotImplementedError(
"The `locations` parameter is not yet supported.")
set_bool_param(params, 'stall_warnings', stall_warnings)
svc = TwitterStreamService(
lambda: self._post_stream('statuses/filter.json', params),
delegate)
return svc | 0.001569 |
def enabled(name, **kwargs):
'''
Return True if the named service is enabled at boot and the provided
flags match the configured ones (if any). Return False otherwise.
name
Service name
CLI Example:
.. code-block:: bash
salt '*' service.enabled <service name>
salt '*' service.enabled <service name> flags=<flags>
'''
cmd = '{0} get {1} status'.format(_cmd(), name)
if not __salt__['cmd.retcode'](cmd):
# also consider a service disabled if the current flags are different
# than the configured ones so we have a chance to update them
flags = _get_flags(**kwargs)
cur_flags = __salt__['cmd.run_stdout']('{0} get {1} flags'.format(_cmd(), name))
if format(flags) == format(cur_flags):
return True
if not flags:
def_flags = __salt__['cmd.run_stdout']('{0} getdef {1} flags'.format(_cmd(), name))
if format(cur_flags) == format(def_flags):
return True
return False | 0.002913 |
def _integrateRZOrbit(vxvv,pot,t,method,dt):
"""
NAME:
_integrateRZOrbit
PURPOSE:
integrate an orbit in a Phi(R,z) potential in the (R,z) plane
INPUT:
vxvv - array with the initial conditions stacked like
[R,vR,vT,z,vz]; vR outward!
pot - Potential instance
t - list of times at which to output (0 has to be in this!)
method - 'odeint' or 'leapfrog'
dt - if set, force the integrator to use this basic stepsize; must be an integer divisor of output stepsize
OUTPUT:
[:,5] array of [R,vR,vT,z,vz] at each t
HISTORY:
2010-04-16 - Written - Bovy (NYU)
"""
#First check that the potential has C
if '_c' in method:
if not ext_loaded or not _check_c(pot):
if ('leapfrog' in method or 'symplec' in method):
method= 'leapfrog'
else:
method= 'odeint'
if not ext_loaded: # pragma: no cover
warnings.warn("Cannot use C integration because C extension not loaded (using %s instead)" % (method), galpyWarning)
else:
warnings.warn("Cannot use C integration because some of the potentials are not implemented in C (using %s instead)" % (method), galpyWarning)
if method.lower() == 'leapfrog' \
or method.lower() == 'leapfrog_c' or method.lower() == 'rk4_c' \
or method.lower() == 'rk6_c' or method.lower() == 'symplec4_c' \
or method.lower() == 'symplec6_c' or method.lower() == 'dopr54_c' or method.lower() == 'dop853_c':
#We hack this by upgrading to a FullOrbit
this_vxvv= nu.zeros(len(vxvv)+1)
this_vxvv[0:len(vxvv)]= vxvv
tmp_out= _integrateFullOrbit(this_vxvv,pot,t,method,dt)
#tmp_out is (nt,6)
out= tmp_out[:,0:5]
elif method.lower() == 'odeint' or method.lower() == 'dop853':
l= vxvv[0]*vxvv[2]
l2= l**2.
init= [vxvv[0],vxvv[1],vxvv[3],vxvv[4]]
if method.lower() == "dop853":
intOut = dop853(_RZEOM, init, t, args=(pot, l2))
else:
intOut = integrate.odeint(_RZEOM, init, t, args=(pot, l2),
rtol=10. ** -8.) # ,mxstep=100000000)
out= nu.zeros((len(t),5))
out[:,0]= intOut[:,0]
out[:,1]= intOut[:,1]
out[:,3]= intOut[:,2]
out[:,4]= intOut[:,3]
out[:,2]= l/out[:,0]
#post-process to remove negative radii
neg_radii= (out[:,0] < 0.)
out[neg_radii,0]= -out[neg_radii,0]
return out | 0.021086 |
def _get_reader(self, network_reader):
"""
Get a reader or None if another reader is already reading.
"""
with (yield from self._lock):
if self._reader_process is None:
self._reader_process = network_reader
if self._reader:
if self._reader_process == network_reader:
self._current_read = asyncio.async(self._reader.read(READ_SIZE))
return self._current_read
return None | 0.007921 |
def make_uhs(hmap, info):
"""
Make Uniform Hazard Spectra curves for each location.
:param hmap:
array of shape (N, M, P)
:param info:
a dictionary with keys poes, imtls, uhs_dt
:returns:
a composite array containing uniform hazard spectra
"""
uhs = numpy.zeros(len(hmap), info['uhs_dt'])
for p, poe in enumerate(info['poes']):
for m, imt in enumerate(info['imtls']):
if imt.startswith(('PGA', 'SA')):
uhs[str(poe)][imt] = hmap[:, m, p]
return uhs | 0.001838 |
def reraise(additional_msg):
"""Reraise an exception with an additional message."""
exc_type, exc_value, exc_traceback = sys.exc_info()
msg = str(exc_value) + "\n" + additional_msg
six.reraise(exc_type, exc_type(msg), exc_traceback) | 0.020833 |
def plot_state_hinton(rho, title='', figsize=None):
"""Plot a hinton diagram for the quanum state.
Args:
rho (ndarray): Numpy array for state vector or density matrix.
title (str): a string that represents the plot title
figsize (tuple): Figure size in inches.
Returns:
matplotlib.Figure: The matplotlib.Figure of the visualization
Raises:
ImportError: Requires matplotlib.
"""
if not HAS_MATPLOTLIB:
raise ImportError('Must have Matplotlib installed.')
rho = _validate_input_state(rho)
if figsize is None:
figsize = (8, 5)
num = int(np.log2(len(rho)))
fig, (ax1, ax2) = plt.subplots(1, 2, figsize=figsize)
max_weight = 2 ** np.ceil(np.log(np.abs(rho).max()) / np.log(2))
datareal = np.real(rho)
dataimag = np.imag(rho)
column_names = [bin(i)[2:].zfill(num) for i in range(2**num)]
row_names = [bin(i)[2:].zfill(num) for i in range(2**num)]
lx = len(datareal[0]) # Work out matrix dimensions
ly = len(datareal[:, 0])
# Real
ax1.patch.set_facecolor('gray')
ax1.set_aspect('equal', 'box')
ax1.xaxis.set_major_locator(plt.NullLocator())
ax1.yaxis.set_major_locator(plt.NullLocator())
for (x, y), w in np.ndenumerate(datareal):
color = 'white' if w > 0 else 'black'
size = np.sqrt(np.abs(w) / max_weight)
rect = plt.Rectangle([x - size / 2, y - size / 2], size, size,
facecolor=color, edgecolor=color)
ax1.add_patch(rect)
ax1.set_xticks(np.arange(0, lx+0.5, 1))
ax1.set_yticks(np.arange(0, ly+0.5, 1))
ax1.set_yticklabels(row_names, fontsize=14)
ax1.set_xticklabels(column_names, fontsize=14, rotation=90)
ax1.autoscale_view()
ax1.invert_yaxis()
ax1.set_title('Real[rho]', fontsize=14)
# Imaginary
ax2.patch.set_facecolor('gray')
ax2.set_aspect('equal', 'box')
ax2.xaxis.set_major_locator(plt.NullLocator())
ax2.yaxis.set_major_locator(plt.NullLocator())
for (x, y), w in np.ndenumerate(dataimag):
color = 'white' if w > 0 else 'black'
size = np.sqrt(np.abs(w) / max_weight)
rect = plt.Rectangle([x - size / 2, y - size / 2], size, size,
facecolor=color, edgecolor=color)
ax2.add_patch(rect)
if np.any(dataimag != 0):
ax2.set_xticks(np.arange(0, lx+0.5, 1))
ax2.set_yticks(np.arange(0, ly+0.5, 1))
ax2.set_yticklabels(row_names, fontsize=14)
ax2.set_xticklabels(column_names, fontsize=14, rotation=90)
ax2.autoscale_view()
ax2.invert_yaxis()
ax2.set_title('Imag[rho]', fontsize=14)
if title:
fig.suptitle(title, fontsize=16)
plt.tight_layout()
plt.close(fig)
return fig | 0.00036 |
def create_item(self, name):
"""
create a new todo list item
"""
elem = self.controlled_list.create_item(name)
if elem:
return TodoElementUX(parent=self, controlled_element=elem) | 0.008696 |
def _Download(campaign, subcampaign):
'''
Download all stars from a given campaign. This is
called from ``missions/k2/download.pbs``
'''
# Are we doing a subcampaign?
if subcampaign != -1:
campaign = campaign + 0.1 * subcampaign
# Get all star IDs for this campaign
stars = [s[0] for s in GetK2Campaign(campaign)]
nstars = len(stars)
# Download the TPF data for each one
for i, EPIC in enumerate(stars):
print("Downloading data for EPIC %d (%d/%d)..." %
(EPIC, i + 1, nstars))
if not os.path.exists(os.path.join(EVEREST_DAT, 'k2',
'c%02d' % int(campaign),
('%09d' % EPIC)[:4] + '00000',
('%09d' % EPIC)[4:],
'data.npz')):
try:
GetData(EPIC, season=campaign, download_only=True)
except KeyboardInterrupt:
sys.exit()
except:
# Some targets could be corrupted...
print("ERROR downloading EPIC %d." % EPIC)
exctype, value, tb = sys.exc_info()
for line in traceback.format_exception_only(exctype, value):
ln = line.replace('\n', '')
print(ln)
continue | 0.001439 |
def maybe_reshape_4d_to_3d(x):
"""Reshape input from 4D to 3D if necessary."""
x_shape = common_layers.shape_list(x)
is_4d = False
if len(x_shape) == 4:
x = tf.reshape(x, [x_shape[0], x_shape[1]*x_shape[2], x_shape[3]])
is_4d = True
return x, x_shape, is_4d | 0.021818 |
def set_doc_data_lics(self, doc, lics):
"""Sets the document data license.
Raises value error if malformed value, CardinalityError
if already defined.
"""
if not self.doc_data_lics_set:
self.doc_data_lics_set = True
if validations.validate_data_lics(lics):
doc.data_license = document.License.from_identifier(lics)
return True
else:
raise SPDXValueError('Document::DataLicense')
else:
raise CardinalityError('Document::DataLicense') | 0.003472 |
def create_key(self, title, key):
"""Create a new key for the authenticated user.
:param str title: (required), key title
:param key: (required), actual key contents, accepts path as a string
or file-like object
:returns: :class:`Key <github3.users.Key>`
"""
created = None
if title and key:
url = self._build_url('user', 'keys')
req = self._post(url, data={'title': title, 'key': key})
json = self._json(req, 201)
if json:
created = Key(json, self)
return created | 0.0033 |
def make_pubmed_gene_group(entrez_ids: Iterable[Union[str, int]]) -> Iterable[str]:
"""Builds a skeleton for gene summaries
:param entrez_ids: A list of Entrez Gene identifiers to query the PubMed service
:return: An iterator over statement lines for NCBI Entrez Gene summaries
"""
url = PUBMED_GENE_QUERY_URL.format(','.join(str(x).strip() for x in entrez_ids))
response = requests.get(url)
tree = ElementTree.fromstring(response.content)
for x in tree.findall('./DocumentSummarySet/DocumentSummary'):
yield '\n# {}'.format(x.find('Description').text)
yield 'SET Citation = {{"Other", "PubMed Gene", "{}"}}'.format(x.attrib['uid'])
yield 'SET Evidence = "{}"'.format(x.find('Summary').text.strip().replace('\n', ''))
yield '\nUNSET Evidence\nUNSET Citation' | 0.007282 |
def robust_init(stochclass, tries, *args, **kwds):
"""Robust initialization of a Stochastic.
If the evaluation of the log-probability returns a ZeroProbability
error, due for example to a parent being outside of the support for
this Stochastic, the values of parents are randomly sampled until
a valid log-probability is obtained.
If the log-probability is still not valid after `tries` attempts, the
original ZeroProbability error is raised.
:Parameters:
stochclass : Stochastic, eg. Normal, Uniform, ...
The Stochastic distribution to instantiate.
tries : int
Maximum number of times parents will be sampled.
*args, **kwds
Positional and keyword arguments to declare the Stochastic variable.
:Example:
>>> lower = pymc.Uniform('lower', 0., 2., value=1.5, rseed=True)
>>> pymc.robust_init(pymc.Uniform, 100, 'data', lower=lower, upper=5, value=[1,2,3,4], observed=True)
"""
# Find the direct parents
stochs = [arg for arg in (list(args) + list(kwds.values()))
if isinstance(arg.__class__, StochasticMeta)]
# Find the extended parents
parents = stochs
for s in stochs:
parents.extend(s.extended_parents)
extended_parents = set(parents)
# Select the parents with a random method.
random_parents = [
p for p in extended_parents if p.rseed is True and hasattr(
p,
'random')]
for i in range(tries):
try:
return stochclass(*args, **kwds)
except ZeroProbability:
exc = sys.exc_info()
for parent in random_parents:
try:
parent.random()
except:
six.reraise(*exc)
six.reraise(*exc) | 0.001688 |
def eval_advs(self, x, y, preds_adv, X_test, Y_test, att_type):
"""
Evaluate the accuracy of the model on adversarial examples
:param x: symbolic input to model.
:param y: symbolic variable for the label.
:param preds_adv: symbolic variable for the prediction on an
adversarial example.
:param X_test: NumPy array of test set inputs.
:param Y_test: NumPy array of test set labels.
:param att_type: name of the attack.
"""
end = (len(X_test) // self.batch_size) * self.batch_size
if self.hparams.fast_tests:
end = 10*self.batch_size
acc = model_eval(self.sess, x, y, preds_adv, X_test[:end],
Y_test[:end], args=self.eval_params)
self.log_value('test_accuracy_%s' % att_type, acc,
'Test accuracy on adversarial examples')
return acc | 0.002334 |
def GetLayerFromFeatureService(self, fs, layerName="", returnURLOnly=False):
"""Obtains a layer from a feature service by feature service reference.
Args:
fs (FeatureService): The feature service from which to obtain the layer.
layerName (str): The name of the layer. Defaults to ``""``.
returnURLOnly (bool): A boolean value to return the URL of the layer. Defaults to ``False``.
Returns:
When ``returnURLOnly`` is ``True``, the URL of the layer is returned.
When ``False``, the result from :py:func:`arcrest.agol.services.FeatureService` or :py:func:`arcrest.ags.services.FeatureService`.
"""
layers = None
table = None
layer = None
sublayer = None
try:
layers = fs.layers
if (layers is None or len(layers) == 0) and fs.url is not None:
fs = arcrest.ags.FeatureService(
url=fs.url)
layers = fs.layers
if layers is not None:
for layer in layers:
if layer.name == layerName:
if returnURLOnly:
return fs.url + '/' + str(layer.id)
else:
return layer
elif not layer.subLayers is None:
for sublayer in layer.subLayers:
if sublayer == layerName:
return sublayer
if fs.tables is not None:
for table in fs.tables:
if table.name == layerName:
if returnURLOnly:
return fs.url + '/' + str(layer.id)
else:
return table
return None
except:
line, filename, synerror = trace()
raise common.ArcRestHelperError({
"function": "GetLayerFromFeatureService",
"line": line,
"filename": filename,
"synerror": synerror,
}
)
finally:
layers = None
table = None
layer = None
sublayer = None
del layers
del table
del layer
del sublayer
gc.collect() | 0.003205 |
def get_connections(app):
"""
Return all Heroku Connect connections setup with the given application.
For more details check the link -
https://devcenter.heroku.com/articles/heroku-connect-api#step-4-retrieve-the-new-connection-s-id
Sample response from the API call is below::
{
"count": 1,
"results":[{
"id": "<connection_id>",
"name": "<app_name>",
"resource_name": "<resource_name>",
…
}],
…
}
Args:
app (str): Heroku application name.
Returns:
List[dict]: List of all Heroku Connect connections associated with the Heroku application.
Raises:
requests.HTTPError: If an error occurred when accessing the connections API.
ValueError: If response is not a valid JSON.
"""
payload = {'app': app}
url = os.path.join(settings.HEROKU_CONNECT_API_ENDPOINT, 'connections')
response = requests.get(url, params=payload, headers=_get_authorization_headers())
response.raise_for_status()
return response.json()['results'] | 0.003521 |
def _assemble_regulate_activity(self, stmt):
"""Example: p(HGNC:MAP2K1) => act(p(HGNC:MAPK1))"""
act_obj = deepcopy(stmt.obj)
act_obj.activity = stmt._get_activity_condition()
# We set is_active to True here since the polarity is encoded
# in the edge (decreases/increases)
act_obj.activity.is_active = True
activates = isinstance(stmt, Activation)
relation = get_causal_edge(stmt, activates)
self._add_nodes_edges(stmt.subj, act_obj, relation, stmt.evidence) | 0.003766 |
def session_end_pb(status, end_time_secs=None):
"""Constructs a SessionEndInfo protobuffer.
Creates a summary that contains status information for a completed
training session. Should be exported after the training session is completed.
One such summary per training session should be created. Each should have
a different run.
Args:
status: A tensorboard.hparams.Status enumeration value denoting the
status of the session.
end_time_secs: float. The time to use as the session end time. Represented
as seconds since the unix epoch. If None uses the current time.
Returns:
The summary protobuffer mentioned above.
"""
if end_time_secs is None:
end_time_secs = time.time()
session_end_info = plugin_data_pb2.SessionEndInfo(status=status,
end_time_secs=end_time_secs)
return _summary(metadata.SESSION_END_INFO_TAG,
plugin_data_pb2.HParamsPluginData(
session_end_info=session_end_info)) | 0.005808 |
def get_dword_from_offset(self, offset):
"""Return the double word value at the given file offset. (little endian)"""
if offset+4 > len(self.__data__):
return None
return self.get_dword_from_data(self.__data__[offset:offset+4], 0) | 0.011194 |
def and_(cls, *queries):
"""
根据传入的 Query 对象,构造一个新的 AND 查询。
:param queries: 需要构造的子查询列表
:rtype: Query
"""
if len(queries) < 2:
raise ValueError('and_ need two queries at least')
if not all(x._query_class._class_name == queries[0]._query_class._class_name for x in queries):
raise TypeError('All queries must be for the same class')
query = Query(queries[0]._query_class._class_name)
query._and_query(queries)
return query | 0.005725 |
def _run(self, *args: Any, **kwargs: Any) -> None:
"""
Wraps around the process body (the function that implements a process within the simulation) so as to catch the
eventual Interrupt that may terminate the process.
"""
try:
self._body(*args, **kwargs)
if _logger is not None:
_log(INFO, "Process", self.local.name, "die-finish")
except Interrupt:
if _logger is not None:
_log(INFO, "Process", self.local.name, "die-interrupt") | 0.005505 |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.