text
stringlengths 78
104k
| score
float64 0
0.18
|
---|---|
def parse_result(result):
"""parse_result(json result) -- print the web query according to the type
of result from duckduckgo.
"""
if(result['Type'] == 'D'):
print """There is more than one answer for this. Try making your query\
more specific. For example, if you want to learn about apple the company\
and not apple the fruit, try something like apple inc or apple computers.
"""
elif(result['Type'] == 'A'):
print result['AbstractText']
print '\nResults from DuckDuckGo'
elif(result['Type'] == 'C'):
for entry in result['RelatedTopics']:
print entry['Text']
print "\n"
else:
print "I do not know how to process this query at the moment." | 0.035451 |
def delete_file(db, user_id, api_path):
"""
Delete a file.
TODO: Consider making this a soft delete.
"""
result = db.execute(
files.delete().where(
_file_where(user_id, api_path)
)
)
rowcount = result.rowcount
if not rowcount:
raise NoSuchFile(api_path)
return rowcount | 0.002907 |
def show(args):
"""Convert and print JSON.
Argument:
args: arguments object
"""
domain = check_infile(args.infile)
action = True
try:
print(json.dumps(set_json(domain, action, filename=args.infile),
sort_keys=True, indent=2))
except UnicodeDecodeError as e:
sys.stderr.write("ERROR: \"%s\" is invalid format file.\n"
% args.infile)
exit(1) | 0.002217 |
def search(self, id_egroup):
"""Search Group Equipament from by the identifier.
:param id_egroup: Identifier of the Group Equipament. Integer value and greater than zero.
:return: Following dictionary:
::
{‘group_equipament’: {‘id’: < id_egrupo >,
‘nome’: < nome >} }
:raise InvalidParameterError: Group Equipament identifier is null and invalid.
:raise GrupoEquipamentoNaoExisteError: Group Equipament not registered.
:raise DataBaseError: Networkapi failed to access the database.
:raise XMLError: Networkapi failed to generate the XML response.
"""
if not is_valid_int_param(id_egroup):
raise InvalidParameterError(
u'The identifier of Group Equipament is invalid or was not informed.')
url = 'egroup/' + str(id_egroup) + '/'
code, xml = self.submit(None, 'GET', url)
return self.response(code, xml) | 0.005165 |
def get_url_endpoint(self):
"""
Returns the Hypermap endpoint for a layer.
This endpoint will be the WMTS MapProxy endpoint, only for WM we use the original endpoint.
"""
endpoint = self.url
if self.type not in ('Hypermap:WorldMap',):
endpoint = 'registry/%s/layer/%s/map/wmts/1.0.0/WMTSCapabilities.xml' % (
self.catalog.slug,
self.id
)
return endpoint | 0.008602 |
def add_monitor(self, pattern, callback, limit=80):
"""
Calls the given function whenever the given pattern matches the
buffer.
Arguments passed to the callback are the index of the match, and
the match object of the regular expression.
:type pattern: str|re.RegexObject|list(str|re.RegexObject)
:param pattern: One or more regular expressions.
:type callback: callable
:param callback: The function that is called.
:type limit: int
:param limit: The maximum size of the tail of the buffer
that is searched, in number of bytes.
"""
self.monitors.append([to_regexs(pattern), callback, 0, limit]) | 0.002759 |
def list_snapshots(kwargs=None, call=None):
'''
List snapshots either for all VMs and templates or for a specific VM/template
in this VMware environment
To list snapshots for all VMs and templates:
CLI Example:
.. code-block:: bash
salt-cloud -f list_snapshots my-vmware-config
To list snapshots for a specific VM/template:
CLI Example:
.. code-block:: bash
salt-cloud -f list_snapshots my-vmware-config name="vmname"
'''
if call != 'function':
raise SaltCloudSystemExit(
'The list_snapshots function must be called with '
'-f or --function.'
)
ret = {}
vm_properties = [
"name",
"rootSnapshot",
"snapshot"
]
vm_list = salt.utils.vmware.get_mors_with_properties(_get_si(), vim.VirtualMachine, vm_properties)
for vm in vm_list:
if vm["rootSnapshot"]:
if kwargs and kwargs.get('name') == vm["name"]:
return {vm["name"]: _get_snapshots(vm["snapshot"].rootSnapshotList)}
else:
ret[vm["name"]] = _get_snapshots(vm["snapshot"].rootSnapshotList)
else:
if kwargs and kwargs.get('name') == vm["name"]:
return {}
return ret | 0.003928 |
def encoded_class(block, offset=0):
"""
predicate indicating whether a block of memory includes a magic number
"""
if not block:
raise InvalidFileFormatNull
for key in __magicmap__:
if block.find(key, offset, offset + len(key)) > -1:
return __magicmap__[key]
raise InvalidFileFormat | 0.005376 |
def create_message(self):
"""Create and send a unique message for this service."""
self.counter += 1
self._transport.send(
"transient.transaction",
"TXMessage #%d\n++++++++Produced@ %f"
% (self.counter, (time.time() % 1000) * 1000),
)
self.log.info("Created message %d", self.counter) | 0.005556 |
def rbeta(alpha, beta, size=None):
"""
Random beta variates.
"""
from scipy.stats.distributions import beta as sbeta
return sbeta.ppf(np.random.random(size), alpha, beta) | 0.005263 |
def _figure_data(self, plot, fmt='html', doc=None, as_script=False, **kwargs):
"""
Given a plot instance, an output format and an optional bokeh
document, return the corresponding data. If as_script is True,
the content will be split in an HTML and a JS component.
"""
model = plot.state
if doc is None:
doc = plot.document
else:
plot.document = doc
for m in model.references():
m._document = None
doc.theme = self.theme
doc.add_root(model)
# Bokeh raises warnings about duplicate tools and empty subplots
# but at the holoviews level these are not issues
logger = logging.getLogger(bokeh.core.validation.check.__file__)
logger.disabled = True
if fmt == 'png':
from bokeh.io.export import get_screenshot_as_png
img = get_screenshot_as_png(plot.state, None)
imgByteArr = BytesIO()
img.save(imgByteArr, format='PNG')
data = imgByteArr.getvalue()
if as_script:
b64 = base64.b64encode(data).decode("utf-8")
(mime_type, tag) = MIME_TYPES[fmt], HTML_TAGS[fmt]
src = HTML_TAGS['base64'].format(mime_type=mime_type, b64=b64)
div = tag.format(src=src, mime_type=mime_type, css='')
js = ''
else:
try:
with silence_warnings(EMPTY_LAYOUT, MISSING_RENDERERS):
js, div, _ = notebook_content(model)
html = NOTEBOOK_DIV.format(plot_script=js, plot_div=div)
data = encode_utf8(html)
doc.hold()
except:
logger.disabled = False
raise
logger.disabled = False
plot.document = doc
if as_script:
return div, js
return data | 0.001566 |
def is_sequence_match(pattern: list, instruction_list: list, index: int) -> bool:
"""Checks if the instructions starting at index follow a pattern.
:param pattern: List of lists describing a pattern, e.g. [["PUSH1", "PUSH2"], ["EQ"]] where ["PUSH1", "EQ"] satisfies pattern
:param instruction_list: List of instructions
:param index: Index to check for
:return: Pattern matched
"""
for index, pattern_slot in enumerate(pattern, start=index):
try:
if not instruction_list[index]["opcode"] in pattern_slot:
return False
except IndexError:
return False
return True | 0.004615 |
def make_sudouser(c):
"""
Create a passworded sudo-capable user.
Used by other tasks to execute the test suite so sudo tests work.
"""
user = c.travis.sudo.user
password = c.travis.sudo.password
# --create-home because we need a place to put conf files, keys etc
# --groups travis because we must be in the Travis group to access the
# (created by Travis for us) virtualenv and other contents within
# /home/travis.
c.sudo("useradd {0} --create-home --groups travis".format(user))
# Password 'mypass' also arbitrary
c.run("echo {0}:{1} | sudo chpasswd".format(user, password))
# Set up new (glob-sourced) sudoers conf file for our user; easier than
# attempting to mutate or overwrite main sudoers conf.
conf = "/etc/sudoers.d/passworded"
cmd = "echo '{0} ALL=(ALL:ALL) PASSWD:ALL' > {1}".format(user, conf)
c.sudo('sh -c "{0}"'.format(cmd))
# Grant travis group write access to /home/travis as some integration tests
# may try writing conf files there. (TODO: shouldn't running the tests via
# 'sudo -H' mean that's no longer necessary?)
c.sudo("chmod g+w /home/travis") | 0.000861 |
def command_help_long(self):
"""
Return command help for use in global parser usage string
@TODO update to support self.current_indent from formatter
"""
indent = " " * 2 # replace with current_indent
help = "Command must be one of:\n"
for action_name in self.parser.valid_commands:
help += "%s%-10s %-70s\n" % (indent, action_name, self.parser.commands[action_name].desc_short.capitalize())
help += '\nSee \'%s help COMMAND\' for help and information on a command' % self.parser.prog
return help | 0.010929 |
def _local_sym_to_py_ast(
ctx: GeneratorContext, node: Local, is_assigning: bool = False
) -> GeneratedPyAST:
"""Generate a Python AST node for accessing a locally defined Python variable."""
assert node.op == NodeOp.LOCAL
sym_entry = ctx.symbol_table.find_symbol(sym.symbol(node.name))
assert sym_entry is not None
if node.local == LocalType.FIELD:
this_entry = ctx.symbol_table.find_symbol(ctx.current_this)
assert this_entry is not None, "Field type local must have this"
return GeneratedPyAST(
node=_load_attr(
f"{this_entry.munged}.{sym_entry.munged}",
ctx=ast.Store() if is_assigning else ast.Load(),
)
)
else:
return GeneratedPyAST(
node=ast.Name(
id=sym_entry.munged, ctx=ast.Store() if is_assigning else ast.Load()
)
) | 0.003326 |
def five_crop(img, size):
"""Crop the given PIL Image into four corners and the central crop.
.. Note::
This transform returns a tuple of images and there may be a
mismatch in the number of inputs and targets your ``Dataset`` returns.
Args:
size (sequence or int): Desired output size of the crop. If size is an
int instead of sequence like (h, w), a square crop (size, size) is
made.
Returns:
tuple: tuple (tl, tr, bl, br, center)
Corresponding top left, top right, bottom left, bottom right and center crop.
"""
if isinstance(size, numbers.Number):
size = (int(size), int(size))
else:
assert len(size) == 2, "Please provide only two dimensions (h, w) for size."
w, h = img.size
crop_h, crop_w = size
if crop_w > w or crop_h > h:
raise ValueError("Requested crop size {} is bigger than input size {}".format(size,
(h, w)))
tl = img.crop((0, 0, crop_w, crop_h))
tr = img.crop((w - crop_w, 0, w, crop_h))
bl = img.crop((0, h - crop_h, crop_w, h))
br = img.crop((w - crop_w, h - crop_h, w, h))
center = center_crop(img, (crop_h, crop_w))
return (tl, tr, bl, br, center) | 0.003808 |
async def subscriptions(self, request):
"""
Handles requests for new subscription websockets.
Args:
request (aiohttp.Request): the incoming request
Returns:
aiohttp.web.WebSocketResponse: the websocket response, when the
resulting websocket is closed
"""
if not self._accepting:
return web.Response(status=503)
web_sock = web.WebSocketResponse()
await web_sock.prepare(request)
async for msg in web_sock:
if msg.type == aiohttp.WSMsgType.TEXT:
await self._handle_message(web_sock, msg.data)
elif msg.type == aiohttp.WSMsgType.ERROR:
LOGGER.warning(
'Web socket connection closed with exception %s',
web_sock.exception())
await web_sock.close()
await self._handle_unsubscribe(web_sock)
return web_sock | 0.002086 |
def handler(event, context):
"""
Historical {{cookiecutter.technology_name}} event collector.
This collector is responsible for processing Cloudwatch events and polling events.
"""
records = deserialize_records(event['Records'])
# Split records into two groups, update and delete.
# We don't want to query for deleted records.
update_records, delete_records = group_records_by_type(records)
capture_delete_records(delete_records)
# filter out error events
update_records = [e for e in update_records if not e['detail'].get('errorCode')]
# group records by account for more efficient processing
log.debug('Update Records: {records}'.format(records=records))
capture_update_records(update_records) | 0.003974 |
def ping(self, endpoint=''):
"""
Ping the server to make sure that you can access the base URL.
Arguments:
None
Returns:
`boolean` Successful access of server (or status code)
"""
r = requests.get(self.url() + "/" + endpoint)
return r.status_code | 0.006116 |
def _parse_message(self, data):
"""
Parse the raw message from the device.
:param data: message data
:type data: string
:raises: :py:class:`~alarmdecoder.util.InvalidMessageError`
"""
try:
header, values = data.split(':')
address, channel, value = values.split(',')
self.address = int(address)
self.channel = int(channel)
self.value = int(value)
except ValueError:
raise InvalidMessageError('Received invalid message: {0}'.format(data))
if header == '!EXP':
self.type = ExpanderMessage.ZONE
elif header == '!REL':
self.type = ExpanderMessage.RELAY
else:
raise InvalidMessageError('Unknown expander message header: {0}'.format(data)) | 0.004802 |
def partition(f, xs):
"""
Works similar to filter, except it returns a two-item tuple where the
first item is the sequence of items that passed the filter and the
second is a sequence of items that didn't pass the filter
"""
t = type(xs)
true = filter(f, xs)
false = [x for x in xs if x not in true]
return t(true), t(false) | 0.00271 |
def ping(self, id):
""" Pings the motor with the specified id.
.. note:: The motor id should always be included in [0, 253]. 254 is used for broadcast.
"""
pp = self._protocol.DxlPingPacket(id)
try:
self._send_packet(pp, error_handler=None)
return True
except DxlTimeoutError:
return False | 0.007813 |
def update(self, ptime):
"""Update tween with the time since the last frame"""
delta = self.delta + ptime
total_duration = self.delay + self.duration
if delta > total_duration:
delta = total_duration
if delta < self.delay:
pass
elif delta == total_duration:
for key, tweenable in self.tweenables:
setattr(self.target, key, tweenable.target_value)
else:
fraction = self.ease((delta - self.delay) / (total_duration - self.delay))
for key, tweenable in self.tweenables:
res = tweenable.update(fraction)
if isinstance(res, float) and self.round:
res = int(res)
setattr(self.target, key, res)
if delta == total_duration or len(self.tweenables) == 0:
self.complete = True
self.delta = delta
if self.on_update:
self.on_update(self.target)
return self.complete | 0.002956 |
def lset(self, key, index, value):
"""Emulate lset."""
redis_list = self._get_list(key, 'LSET')
if redis_list is None:
raise ResponseError("no such key")
try:
redis_list[index] = self._encode(value)
except IndexError:
raise ResponseError("index out of range") | 0.00597 |
def detect_file_triggers(trigger_patterns):
"""The existence of files matching configured globs will trigger a version bump"""
triggers = set()
for trigger, pattern in trigger_patterns.items():
matches = glob.glob(pattern)
if matches:
_LOG.debug("trigger: %s bump from %r\n\t%s", trigger, pattern, matches)
triggers.add(trigger)
else:
_LOG.debug("trigger: no match on %r", pattern)
return triggers | 0.006342 |
def start(self):
"""
Starts this router.
At least the IOS image must be set before starting it.
"""
# trick: we must send sensors and power supplies info after starting the router
# otherwise they are not taken into account (Dynamips bug?)
yield from Router.start(self)
if self._sensors != [22, 22, 22, 22]:
yield from self.set_sensors(self._sensors)
if self._power_supplies != [1, 1]:
yield from self.set_power_supplies(self._power_supplies) | 0.005566 |
def list(self, limit=None, marker=None, name=None, visibility=None,
member_status=None, owner=None, tag=None, status=None,
size_min=None, size_max=None, sort_key=None, sort_dir=None,
return_raw=False):
"""
Returns a list of resource objects. Pagination is supported through the
optional 'marker' and 'limit' parameters. Filtering the returned value
is possible by specifying values for any of the other parameters.
"""
uri = "/%s" % self.uri_base
qs = utils.dict_to_qs(dict(limit=limit, marker=marker, name=name,
visibility=visibility, member_status=member_status,
owner=owner, tag=tag, status=status, size_min=size_min,
size_max=size_max, sort_key=sort_key, sort_dir=sort_dir))
if qs:
uri = "%s?%s" % (uri, qs)
return self._list(uri, return_raw=return_raw) | 0.008649 |
def __field_to_subfields(self, field):
"""Fully describes data represented by field, including the nested case.
In the case that the field is not a message field, we have no fields nested
within a message definition, so we can simply return that field. However, in
the nested case, we can't simply describe the data with one field or even
with one chain of fields.
For example, if we have a message field
m_field = messages.MessageField(RefClass, 1)
which references a class with two fields:
class RefClass(messages.Message):
one = messages.StringField(1)
two = messages.IntegerField(2)
then we would need to include both one and two to represent all the
data contained.
Calling __field_to_subfields(m_field) would return:
[
[<MessageField "m_field">, <StringField "one">],
[<MessageField "m_field">, <StringField "two">],
]
If the second field was instead a message field
class RefClass(messages.Message):
one = messages.StringField(1)
two = messages.MessageField(OtherRefClass, 2)
referencing another class with two fields
class OtherRefClass(messages.Message):
three = messages.BooleanField(1)
four = messages.FloatField(2)
then we would need to recurse one level deeper for two.
With this change, calling __field_to_subfields(m_field) would return:
[
[<MessageField "m_field">, <StringField "one">],
[<MessageField "m_field">, <StringField "two">, <StringField "three">],
[<MessageField "m_field">, <StringField "two">, <StringField "four">],
]
Args:
field: An instance of a subclass of messages.Field.
Returns:
A list of lists, where each sublist is a list of fields.
"""
# Termination condition
if not isinstance(field, messages.MessageField):
return [[field]]
result = []
for subfield in sorted(field.message_type.all_fields(),
key=lambda f: f.number):
subfield_results = self.__field_to_subfields(subfield)
for subfields_list in subfield_results:
subfields_list.insert(0, field)
result.append(subfields_list)
return result | 0.002247 |
def add(user_id, resource_policy, admin, inactive, rate_limit):
'''
Add a new keypair.
USER_ID: User ID of a new key pair.
RESOURCE_POLICY: resource policy for new key pair.
'''
try:
user_id = int(user_id)
except ValueError:
pass # string-based user ID for Backend.AI v1.4+
with Session() as session:
try:
data = session.KeyPair.create(
user_id,
is_active=not inactive,
is_admin=admin,
resource_policy=resource_policy,
rate_limit=rate_limit)
except Exception as e:
print_error(e)
sys.exit(1)
if not data['ok']:
print_fail('KeyPair creation has failed: {0}'.format(data['msg']))
sys.exit(1)
item = data['keypair']
print('Access Key: {0}'.format(item['access_key']))
print('Secret Key: {0}'.format(item['secret_key'])) | 0.001047 |
def _wait_for_spot_instance(update_callback,
update_args=None,
update_kwargs=None,
timeout=10 * 60,
interval=30,
interval_multiplier=1,
max_failures=10):
'''
Helper function that waits for a spot instance request to become active
for a specific maximum amount of time.
:param update_callback: callback function which queries the cloud provider
for spot instance request. It must return None if
the required data, running instance included, is
not available yet.
:param update_args: Arguments to pass to update_callback
:param update_kwargs: Keyword arguments to pass to update_callback
:param timeout: The maximum amount of time(in seconds) to wait for the IP
address.
:param interval: The looping interval, i.e., the amount of time to sleep
before the next iteration.
:param interval_multiplier: Increase the interval by this multiplier after
each request; helps with throttling
:param max_failures: If update_callback returns ``False`` it's considered
query failure. This value is the amount of failures
accepted before giving up.
:returns: The update_callback returned data
:raises: SaltCloudExecutionTimeout
'''
if update_args is None:
update_args = ()
if update_kwargs is None:
update_kwargs = {}
duration = timeout
while True:
log.debug(
'Waiting for spot instance reservation. Giving up in '
'00:%02d:%02d', int(timeout // 60), int(timeout % 60)
)
data = update_callback(*update_args, **update_kwargs)
if data is False:
log.debug(
'update_callback has returned False which is considered a '
'failure. Remaining Failures: %s', max_failures
)
max_failures -= 1
if max_failures <= 0:
raise SaltCloudExecutionFailure(
'Too many failures occurred while waiting for '
'the spot instance reservation to become active.'
)
elif data is not None:
return data
if timeout < 0:
raise SaltCloudExecutionTimeout(
'Unable to get an active spot instance request for '
'00:{0:02d}:{1:02d}'.format(
int(duration // 60),
int(duration % 60)
)
)
time.sleep(interval)
timeout -= interval
if interval_multiplier > 1:
interval *= interval_multiplier
if interval > timeout:
interval = timeout + 1
log.info('Interval multiplier in effect; interval is '
'now %ss', interval) | 0.000327 |
def _navigator_or_thunk(self, link):
'''Crafts a navigator or from a hal-json link dict.
If the link is relative, the returned navigator will have a
uri that relative to this navigator's uri.
If the link passed in is templated, a PartialNavigator will be
returned instead.
'''
# resolve relative uris against the current uri
uri = urlparse.urljoin(self.uri, link['href'])
link_obj = Link(uri=uri, properties=link)
if link.get('templated'):
# Can expand into a real HALNavigator
return PartialNavigator(link_obj, core=self._core)
else:
return HALNavigator(link_obj, core=self._core) | 0.002829 |
def solve(grid):
"""
solve a Sudoku grid inplace
"""
clauses = sudoku_clauses()
for i in range(1, 10):
for j in range(1, 10):
d = grid[i - 1][j - 1]
# For each digit already known, a clause (with one literal).
# Note:
# We could also remove all variables for the known cells
# altogether (which would be more efficient). However, for
# the sake of simplicity, we decided not to do that.
if d:
clauses.append([v(i, j, d)])
# solve the SAT problem
sol = set(pycosat.solve(clauses))
def read_cell(i, j):
# return the digit of cell i, j according to the solution
for d in range(1, 10):
if v(i, j, d) in sol:
return d
for i in range(1, 10):
for j in range(1, 10):
grid[i - 1][j - 1] = read_cell(i, j) | 0.001088 |
def get_translation_dicts(self):
"""
Returns dictionaries for the translation of keysyms to strings and from
strings to keysyms.
"""
keysym_to_string_dict = {}
string_to_keysym_dict = {}
#XK loads latin1 and miscellany on its own; load latin2-4 and greek
Xlib.XK.load_keysym_group('latin2')
Xlib.XK.load_keysym_group('latin3')
Xlib.XK.load_keysym_group('latin4')
Xlib.XK.load_keysym_group('greek')
#Make a standard dict and the inverted dict
for string, keysym in Xlib.XK.__dict__.items():
if string.startswith('XK_'):
string_to_keysym_dict[string[3:]] = keysym
keysym_to_string_dict[keysym] = string[3:]
return keysym_to_string_dict, string_to_keysym_dict | 0.00492 |
def _fused_batch_norm_op(self, input_batch, mean, variance, use_batch_stats):
"""Creates a fused batch normalization op."""
# Store the original shape of the mean and variance.
mean_shape = mean.get_shape()
variance_shape = variance.get_shape()
# The fused batch norm expects the mean, variance, gamma and beta
# tensors to have dimension 1, so we flatten them to remove the
# extra dimensions. In addition, it expects the input_batch to have
# dimension 4, so we reshape it accordingly.
gamma_flatten = tf.reshape(self._gamma, shape=(self._num_channels,))
beta_flatten = tf.reshape(self._beta, shape=(self._num_channels,))
flatten_mean = tf.reshape(mean, shape=(self._num_channels,))
flatten_variance = tf.reshape(variance, shape=(self._num_channels,))
use_batch_stats = tf.convert_to_tensor(use_batch_stats)
input_shape = input_batch.get_shape()
output_shape = [-1] + input_shape.as_list()[1:]
flat_image_size = np.prod(self._image_shape, dtype=np.int32)
if len(self._data_format) == 4:
fusable_data_format = self._data_format
fusable_batch = input_batch
elif self._channel_index == 1 and self._image_shape:
fusable_data_format = "NCHW"
fusable_batch = tf.reshape(
input_batch,
shape=(-1, self._num_channels, 1, flat_image_size))
else:
# The CPU implementation of FusedBatchNorm only supports NHWC tensor
# format for now.
fusable_data_format = "NHWC"
fusable_batch = tf.reshape(
input_batch,
shape=(-1, 1, flat_image_size, self._num_channels))
common_args = {
"scale": gamma_flatten,
"offset": beta_flatten,
"epsilon": self._eps,
"data_format": fusable_data_format,
"name": "batch_norm"
}
def use_batch_stats_fused_batch_norm():
return tf.nn.fused_batch_norm(
fusable_batch,
mean=None,
variance=None,
is_training=True,
**common_args)
def moving_average_fused_batch_norm():
return tf.nn.fused_batch_norm(
fusable_batch,
mean=flatten_mean,
variance=flatten_variance,
is_training=False,
**common_args)
batch_norm_op, mean, variance = utils.smart_cond(
use_batch_stats, use_batch_stats_fused_batch_norm,
moving_average_fused_batch_norm)
if len(self._data_format) != 4:
batch_norm_op = tf.reshape(batch_norm_op, output_shape)
mean = tf.reshape(mean, mean_shape)
variance = tf.reshape(variance, variance_shape)
return batch_norm_op, mean, variance | 0.00457 |
def k_fold_cross_validation(
fitters,
df,
duration_col,
event_col=None,
k=5,
evaluation_measure=concordance_index,
predictor="predict_expectation",
predictor_kwargs={},
fitter_kwargs={},
): # pylint: disable=dangerous-default-value,too-many-arguments,too-many-locals
"""
Perform cross validation on a dataset. If multiple models are provided,
all models will train on each of the k subsets.
Parameters
----------
fitters: model
one or several objects which possess a method: ``fit(self, data, duration_col, event_col)``
Note that the last two arguments will be given as keyword arguments,
and that event_col is optional. The objects must also have
the "predictor" method defined below.
df: DataFrame
a Pandas DataFrame with necessary columns `duration_col` and (optional) `event_col`, plus
other covariates. `duration_col` refers to the lifetimes of the subjects. `event_col`
refers to whether the 'death' events was observed: 1 if observed, 0 else (censored).
duration_col: (n,) array
the column in DataFrame that contains the subjects lifetimes.
event_col: (n,) array
the column in DataFrame that contains the subject's death observation. If left
as None, assumes all individuals are non-censored.
k: int
the number of folds to perform. n/k data will be withheld for testing on.
evaluation_measure: function
a function that accepts either (event_times, predicted_event_times),
or (event_times, predicted_event_times, event_observed)
and returns something (could be anything).
Default: statistics.concordance_index: (C-index)
between two series of event times
predictor: string
a string that matches a prediction method on the fitter instances.
For example, ``predict_expectation`` or ``predict_percentile``.
Default is "predict_expectation"
The interface for the method is: ``predict(self, data, **optional_kwargs)``
fitter_kwargs:
keyword args to pass into fitter.fit method
predictor_kwargs:
keyword args to pass into predictor-method.
Returns
-------
results: list
(k,1) list of scores for each fold. The scores can be anything.
"""
# Make sure fitters is a list
try:
fitters = list(fitters)
except TypeError:
fitters = [fitters]
# Each fitter has its own scores
fitterscores = [[] for _ in fitters]
n, _ = df.shape
df = df.copy()
if event_col is None:
event_col = "E"
df[event_col] = 1.0
df = df.reindex(np.random.permutation(df.index)).sort_values(event_col)
assignments = np.array((n // k + 1) * list(range(1, k + 1)))
assignments = assignments[:n]
testing_columns = df.columns.drop([duration_col, event_col])
for i in range(1, k + 1):
ix = assignments == i
training_data = df.loc[~ix]
testing_data = df.loc[ix]
T_actual = testing_data[duration_col].values
E_actual = testing_data[event_col].values
X_testing = testing_data[testing_columns]
for fitter, scores in zip(fitters, fitterscores):
# fit the fitter to the training data
fitter.fit(training_data, duration_col=duration_col, event_col=event_col, **fitter_kwargs)
T_pred = getattr(fitter, predictor)(X_testing, **predictor_kwargs).values
try:
scores.append(evaluation_measure(T_actual, T_pred, E_actual))
except TypeError:
scores.append(evaluation_measure(T_actual, T_pred))
# If a single fitter was given as argument, return a single result
if len(fitters) == 1:
return fitterscores[0]
return fitterscores | 0.002644 |
def _defines(prefix, defs, suffix, env, c=_concat_ixes):
"""A wrapper around _concat_ixes that turns a list or string
into a list of C preprocessor command-line definitions.
"""
return c(prefix, env.subst_path(processDefines(defs)), suffix, env) | 0.003817 |
def outputtemplate(self, template_id):
"""Get an output template by ID"""
for profile in self.profiles:
for outputtemplate in profile.outputtemplates():
if outputtemplate.id == template_id:
return outputtemplate
return KeyError("Outputtemplate " + template_id + " not found") | 0.005764 |
def send_request(self, request):
"""
Create the transaction and fill it with the outgoing request.
:type request: Request
:param request: the request to send
:rtype : Transaction
:return: the created transaction
"""
logger.debug("send_request - " + str(request))
assert isinstance(request, Request)
try:
host, port = request.destination
except AttributeError:
return
request.timestamp = time.time()
transaction = Transaction(request=request, timestamp=request.timestamp)
if transaction.request.type is None:
transaction.request.type = defines.Types["CON"]
if transaction.request.mid is None:
transaction.request.mid = self.fetch_mid()
key_mid = str_append_hash(host, port, request.mid)
self._transactions[key_mid] = transaction
key_token = str_append_hash(host, port, request.token)
self._transactions_token[key_token] = transaction
return self._transactions[key_mid] | 0.00185 |
def build_info_string(info):
"""
Build a new vcf INFO string based on the information in the info_dict.
The info is a dictionary with vcf info keys as keys and lists of vcf values
as values. If there is no value False is value in info
Args:
info (dict): A dictionary with information from the vcf file
Returns:
String: A string that is on the proper vcf format for the INFO column
"""
info_list = []
for annotation in info:
if info[annotation]:
info_list.append('='.join([annotation, ','.join(info[annotation])]))
else:
info_list.append(annotation)
return ';'.join(info_list) | 0.012676 |
def _render_ngram_row(self, ngram, ngram_group, row_template, labels):
"""Returns the HTML for an n-gram row."""
cell_data = {'ngram': ngram}
label_data = {}
for label in labels:
label_data[label] = []
work_grouped = ngram_group.groupby(constants.WORK_FIELDNAME)
for work, group in work_grouped:
min_count = group[constants.COUNT_FIELDNAME].min()
max_count = group[constants.COUNT_FIELDNAME].max()
if min_count == max_count:
count = min_count
else:
count = '{}\N{EN DASH}{}'.format(min_count, max_count)
label_data[group[constants.LABEL_FIELDNAME].iloc[0]].append(
'{} ({})'.format(work, count))
for label, data in label_data.items():
label_data[label] = '; '.join(data)
cell_data.update(label_data)
html = row_template.format(**cell_data)
return '<tr>\n{}\n</tr>'.format(html) | 0.002022 |
def removeTab(self, index):
"""
Removes tab at index ``index``.
This method will emits tab_closed for the removed tab.
:param index: index of the tab to remove.
"""
widget = self.widget(index)
try:
self._widgets.remove(widget)
except ValueError:
pass
self.tab_closed.emit(widget)
self._del_code_edit(widget)
QTabWidget.removeTab(self, index)
if widget == self._current:
self._current = None | 0.003817 |
def _add_match(self, match):
"""
Add a match
:param match:
:type match: Match
"""
if self.__name_dict is not None:
if match.name:
_BaseMatches._base_add(self._name_dict[match.name], (match))
if self.__tag_dict is not None:
for tag in match.tags:
_BaseMatches._base_add(self._tag_dict[tag], match)
if self.__start_dict is not None:
_BaseMatches._base_add(self._start_dict[match.start], match)
if self.__end_dict is not None:
_BaseMatches._base_add(self._end_dict[match.end], match)
if self.__index_dict is not None:
for index in range(*match.span):
_BaseMatches._base_add(self._index_dict[index], match)
if match.end > self._max_end:
self._max_end = match.end | 0.002309 |
def regions():
"""
Get all available regions for the SNS service.
:rtype: list
:return: A list of :class:`boto.regioninfo.RegionInfo` instances
"""
return [RegionInfo(name='us-east-1',
endpoint='sns.us-east-1.amazonaws.com',
connection_cls=SNSConnection),
RegionInfo(name='eu-west-1',
endpoint='sns.eu-west-1.amazonaws.com',
connection_cls=SNSConnection),
RegionInfo(name='us-west-1',
endpoint='sns.us-west-1.amazonaws.com',
connection_cls=SNSConnection),
RegionInfo(name='sa-east-1',
endpoint='sns.sa-east-1.amazonaws.com',
connection_cls=SNSConnection),
RegionInfo(name='us-west-2',
endpoint='sns.us-west-2.amazonaws.com',
connection_cls=SNSConnection),
RegionInfo(name='ap-northeast-1',
endpoint='sns.ap-northeast-1.amazonaws.com',
connection_cls=SNSConnection),
RegionInfo(name='ap-southeast-1',
endpoint='sns.ap-southeast-1.amazonaws.com',
connection_cls=SNSConnection),
] | 0.000765 |
def list_object_names(self, container, marker=None, limit=None, prefix=None,
delimiter=None, end_marker=None, full_listing=False):
"""
Return a list of then names of the objects in this container. You can
use the marker, end_marker, and limit params to handle pagination, and
the prefix and delimiter params to filter the objects returned. By
default only the first 10,000 objects are returned; if you need to
access more than that, set the 'full_listing' parameter to True.
"""
return container.list_object_names(marker=marker, limit=limit,
prefix=prefix, delimiter=delimiter, end_marker=end_marker,
full_listing=full_listing) | 0.006803 |
def find_path(self, start, end, grid):
"""
find a path from start to end node on grid using the A* algorithm
:param start: start node
:param end: end node
:param grid: grid that stores all possible steps/tiles as 2D-list
:return:
"""
self.start_time = time.time() # execution time limitation
self.runs = 0 # count number of iterations
start_open_list = [start]
start.g = 0
start.f = 0
start.opened = BY_START
end_open_list = [end]
end.g = 0
end.f = 0
end.opened = BY_END
while len(start_open_list) > 0 and len(end_open_list) > 0:
self.runs += 1
self.keep_running()
path = self.check_neighbors(start, end, grid, start_open_list,
open_value=BY_START,
backtrace_by=BY_END)
if path:
return path, self.runs
self.runs += 1
self.keep_running()
path = self.check_neighbors(end, start, grid, end_open_list,
open_value=BY_END,
backtrace_by=BY_START)
if path:
return path, self.runs
# failed to find path
return [], self.runs | 0.001461 |
def load_rdf(self,
uri_or_path=None,
data=None,
file_obj=None,
rdf_format="",
verbose=False,
hide_base_schemas=True,
hide_implicit_types=True,
hide_implicit_preds=True):
"""Load an RDF source into an ontospy/rdflib graph"""
loader = RDFLoader(verbose=verbose)
loader.load(uri_or_path, data, file_obj, rdf_format)
self.rdflib_graph = loader.rdflib_graph
self.sources = loader.sources_valid
self.sparqlHelper = SparqlHelper(self.rdflib_graph)
self.namespaces = sorted(self.rdflib_graph.namespaces()) | 0.014514 |
def expected_bar_value(asset_id, date, colname):
"""
Check that the raw value for an asset/date/column triple is as
expected.
Used by tests to verify data written by a writer.
"""
from_asset = asset_id * 100000
from_colname = OHLCV.index(colname) * 1000
from_date = (date - PSEUDO_EPOCH).days
return from_asset + from_colname + from_date | 0.002674 |
def set_precision(self, precision, persist=False):
"""
Set the precision of the sensor for the next readings.
If the ``persist`` argument is set to ``False`` this value
is "only" stored in the volatile SRAM, so it is reset when
the sensor gets power-cycled.
If the ``persist`` argument is set to ``True`` the current set
precision is stored into the EEPROM. Since the EEPROM has a limited
amount of writes (>50k), this command should be used wisely.
Note: root permissions are required to change the sensors precision.
Note: This function is supported since kernel 4.7.
:param int precision: the sensor precision in bits.
Valid values are between 9 and 12
:param bool persist: if the sensor precision should be written
to the EEPROM.
:returns: if the sensor precision could be set or not.
:rtype: bool
"""
if not 9 <= precision <= 12:
raise ValueError(
"The given sensor precision '{0}' is out of range (9-12)".format(
precision
)
)
exitcode = subprocess.call(
"echo {0} > {1}".format(precision, self.sensorpath), shell=True
)
if exitcode != 0:
raise W1ThermSensorError(
"Failed to change resolution to {0} bit. "
"You might have to be root to change the precision".format(precision)
)
if persist:
exitcode = subprocess.call(
"echo 0 > {0}".format(self.sensorpath), shell=True
)
if exitcode != 0:
raise W1ThermSensorError(
"Failed to write precision configuration to sensor EEPROM"
)
return True | 0.002579 |
def copy_nrpe_checks(nrpe_files_dir=None):
"""
Copy the nrpe checks into place
"""
NAGIOS_PLUGINS = '/usr/local/lib/nagios/plugins'
if nrpe_files_dir is None:
# determine if "charmhelpers" is in CHARMDIR or CHARMDIR/hooks
for segment in ['.', 'hooks']:
nrpe_files_dir = os.path.abspath(os.path.join(
os.getenv('CHARM_DIR'),
segment,
'charmhelpers',
'contrib',
'openstack',
'files'))
if os.path.isdir(nrpe_files_dir):
break
else:
raise RuntimeError("Couldn't find charmhelpers directory")
if not os.path.exists(NAGIOS_PLUGINS):
os.makedirs(NAGIOS_PLUGINS)
for fname in glob.glob(os.path.join(nrpe_files_dir, "check_*")):
if os.path.isfile(fname):
shutil.copy2(fname,
os.path.join(NAGIOS_PLUGINS, os.path.basename(fname))) | 0.001027 |
def rmlinematch(oldstr, infile, dryrun=False):
"""
Sed-like line deletion function based on given string..
Usage: pysed.rmlinematch(<Unwanted string>, <Text File>)
Example: pysed.rmlinematch('xyz', '/path/to/file.txt')
Example:
'DRYRUN': pysed.rmlinematch('xyz', '/path/to/file.txt', dryrun=True)
This will dump the output to STDOUT instead of changing the input file.
"""
linelist = []
with open(infile) as reader:
for item in reader:
rmitem = re.match(r'.*{}'.format(oldstr), item)
# if isinstance(rmitem) == isinstance(None): Not quite sure the intent here
if rmitem is None:
linelist.append(item)
if dryrun is False:
with open(infile, "w") as writer:
writer.truncate()
for line in linelist:
writer.writelines(line)
elif dryrun is True:
for line in linelist:
print(line, end='')
else:
exit("""Unknown option specified to 'dryrun' argument,
Usage: dryrun=<True|False>.""") | 0.001854 |
def delete_entity(self, entity: int, immediate=False) -> None:
"""Delete an Entity from the World.
Delete an Entity and all of it's assigned Component instances from
the world. By default, Entity deletion is delayed until the next call
to *World.process*. You can request immediate deletion, however, by
passing the "immediate=True" parameter. This should generally not be
done during Entity iteration (calls to World.get_component/s).
Raises a KeyError if the given entity does not exist in the database.
:param entity: The Entity ID you wish to delete.
:param immediate: If True, delete the Entity immediately.
"""
if immediate:
for component_type in self._entities[entity]:
self._components[component_type].discard(entity)
if not self._components[component_type]:
del self._components[component_type]
del self._entities[entity]
self.clear_cache()
else:
self._dead_entities.add(entity) | 0.001838 |
def get_build_info_for_index(self, build_index=None):
"""Get additional information for the build at the given index."""
url = urljoin(self.base_url, self.build_list_regex)
self.logger.info('Retrieving list of builds from %s' % url)
parser = self._create_directory_parser(url)
parser.entries = parser.filter(r'^\d+$')
if self.timestamp:
# If a timestamp is given, retrieve the folder with the timestamp
# as name
parser.entries = self.timestamp in parser.entries and \
[self.timestamp]
elif self.date:
# If date is given, retrieve the subset of builds on that date
parser.entries = filter(self.date_matches, parser.entries)
if not parser.entries:
message = 'No builds have been found'
raise errors.NotFoundError(message, url)
self.show_matching_builds(parser.entries)
# If no index has been given, set it to the last build of the day.
if build_index is None:
# Find the most recent non-empty entry.
build_index = len(parser.entries)
for build in reversed(parser.entries):
build_index -= 1
if not build_index or self.is_build_dir(build):
break
self.logger.info('Selected build: %s' % parser.entries[build_index])
return (parser.entries, build_index) | 0.001381 |
def ping(self, data):
"""PING reply"""
self.bot.send('PONG :' + data)
self.pong(event='PING', data=data) | 0.015625 |
def prepare(args):
"""
%prog prepare mcscanfile cdsfile [options]
Pick sequences from cdsfile to form fasta files, according to multiple
alignment in the mcscanfile.
The fasta sequences can then be used to construct phylogenetic tree.
Use --addtandem=tandemfile to collapse tandems of anchors into single row.
The tandemfile must be provided with *ALL* genomes involved, otherwise
result will be incomplete and redundant.
"""
from jcvi.graphics.base import discrete_rainbow
p = OptionParser(prepare.__doc__)
p.add_option("--addtandem", help="path to tandemfile [default: %default]")
p.add_option("--writecolors", default=False, action="store_true", \
help="generate a gene_name to color mapping file which will be taken " \
"by jcvi.apps.phylo.draw [default: %default]")
p.set_outdir(outdir="sequences")
opts, args = p.parse_args(args)
if len(args) != 2:
sys.exit(not p.print_help())
mcscanfile, cdsfile = args
if opts.addtandem:
tandemfile = opts.addtandem
mcscanfile_with_tandems = add_tandems(mcscanfile, tandemfile)
mcscanfile = mcscanfile_with_tandems
seqdir = opts.outdir
mkdir(seqdir)
f = Fasta(cdsfile)
fp = must_open(mcscanfile)
if opts.writecolors:
fc = must_open("leafcolors.txt", "w")
n = 0
for i, row in enumerate(fp):
row = row.strip().split("\t")
if i == 0:
l = len(row)
if l <= 20:
colors = discrete_rainbow(l, shuffle=False)[1]
else:
colors = discrete_rainbow(l, usepreset=False, shuffle=False)[1]
warnings.warn("*** WARNING ***\n" \
"Too many columns. Colors may not be all distinctive.")
assert len(row)==l, "All rows should have same number of fields."
anchors = set()
for j, atom in enumerate(row):
color = "%s,%s,%s" % colors[j]
if atom == ".":
continue
elif "," in atom:
atom = atom.split(",")
for a in atom:
fc.write("{0}\t{1}\n".format(a, color))
anchors.add(a)
else:
fc.write("{0}\t{1}\n".format(atom, color))
anchors.add(atom)
if len(anchors) <= 3:
print("Not enough seqs to build trees for {0}".format(anchors), file=sys.stderr)
continue
pivot = row[0]
fw = must_open("%s/%s.cds" % (seqdir, pivot), "w")
for a in anchors:
if a not in f:
print(a)
a = find_first_isoform(a, f)
assert a, a
arec = f[a]
SeqIO.write((arec), fw, "fasta")
fw.close()
n+=1
if opts.writecolors:
fc.close()
logging.debug("leaf colors written to `{0}`".format(fc.name))
logging.debug("cds of {0} syntelog groups written to {1}/".format(n, seqdir))
return seqdir | 0.004622 |
def json(self, branch='master', filename=''):
"""Retrieve _filename_ from GitLab.
Args:
branch (str): Git Branch to find file.
filename (str): Name of file to retrieve.
Returns:
dict: Decoded JSON.
Raises:
SystemExit: Invalid JSON provided.
"""
file_contents = self.get(branch=branch, filename=filename)
try:
json_dict = json.loads(file_contents)
# TODO: Use json.JSONDecodeError when Python 3.4 has been deprecated
except ValueError as error:
msg = ('"{filename}" appears to be invalid json. '
'Please validate it with http://jsonlint.com. '
'JSON decoder error:\n'
'{error}').format(
filename=filename, error=error)
raise SystemExit(msg)
LOG.debug('JSON object:\n%s', json_dict)
return json_dict | 0.002094 |
def device_function(self, var):
"""Choose a device for the input variable.
Args:
var: an Variable.
Returns:
The device for placing the var.
"""
if var.type not in ('Variable', 'VariableV2', 'VarHandleOp'):
tf.logging.debug('Place {} on last device: {}.'.format(
var.name, self._last_device))
return self._last_device
shape = tf.TensorShape(var.get_attr('shape'))
assert shape.num_elements() is not None
size = var.get_attr('dtype').size
mem, device = heapq.heappop(self._mem_device_heap)
mem += shape.num_elements() * size
heapq.heappush(self._mem_device_heap, (mem, device))
tf.logging.debug('Place variable {} on {} and consumes {} Bytes.'.format(
var.name, device, mem))
self._last_device = device
return device | 0.003672 |
def verify(full, dataset_uri):
"""Verify the integrity of a dataset.
"""
dataset = dtoolcore.DataSet.from_uri(dataset_uri)
all_okay = True
generated_manifest = dataset.generate_manifest()
generated_identifiers = set(generated_manifest["items"].keys())
manifest_identifiers = set(dataset.identifiers)
for i in generated_identifiers.difference(manifest_identifiers):
message = "Unknown item: {} {}".format(
i,
generated_manifest["items"][i]["relpath"]
)
click.secho(message, fg="red")
all_okay = False
for i in manifest_identifiers.difference(generated_identifiers):
message = "Missing item: {} {}".format(
i,
dataset.item_properties(i)["relpath"]
)
click.secho(message, fg="red")
all_okay = False
for i in manifest_identifiers.intersection(generated_identifiers):
generated_hash = generated_manifest["items"][i]["size_in_bytes"]
manifest_hash = dataset.item_properties(i)["size_in_bytes"]
if generated_hash != manifest_hash:
message = "Altered item size: {} {}".format(
i,
dataset.item_properties(i)["relpath"]
)
click.secho(message, fg="red")
all_okay = False
if full:
for i in manifest_identifiers.intersection(generated_identifiers):
generated_hash = generated_manifest["items"][i]["hash"]
manifest_hash = dataset.item_properties(i)["hash"]
if generated_hash != manifest_hash:
message = "Altered item hash: {} {}".format(
i,
dataset.item_properties(i)["relpath"]
)
click.secho(message, fg="red")
all_okay = False
if not all_okay:
sys.exit(1)
else:
click.secho("All good :)", fg="green") | 0.000519 |
def raise_for_redefined_annotation(self, line: str, position: int, annotation: str) -> None:
"""Raise an exception if the given annotation is already defined.
:raises: RedefinedAnnotationError
"""
if self.disallow_redefinition and self.has_annotation(annotation):
raise RedefinedAnnotationError(self.get_line_number(), line, position, annotation) | 0.01023 |
def run(provider,
job_resources,
job_params,
task_descriptors,
name=None,
dry_run=False,
command=None,
script=None,
user=None,
user_project=None,
wait=False,
retries=0,
poll_interval=10,
after=None,
skip=False,
project=None,
disable_warning=False,
unique_job_id=False):
"""Actual dsub body, post-stdout-redirection."""
if not dry_run:
provider_base.emit_provider_message(provider)
if not disable_warning:
raise ValueError('Do not use this unstable API component!')
if command and script:
raise ValueError('Cannot supply both a command and script value.')
if command:
if name:
command_name = name
else:
command_name = _name_for_command(command)
# Add the shebang line to ensure the command is treated as Bash
script = job_model.Script(command_name, '#!/usr/bin/env bash\n' + command)
elif script:
# Read the script file
script_file = dsub_util.load_file(script)
script = job_model.Script(os.path.basename(script), script_file.read())
else:
raise ValueError('One of --command or a script name must be supplied')
if retries and not wait:
raise ValueError('Requesting retries requires requesting wait')
# The contract with providers and downstream code is that the job_params
# and task_params contain 'labels', 'envs', 'inputs', and 'outputs'.
job_model.ensure_job_params_are_complete(job_params)
job_model.ensure_task_params_are_complete(task_descriptors)
task_ids = {
task_descriptor.task_metadata.get('task-id')
for task_descriptor in task_descriptors
if task_descriptor.task_metadata.get('task-id') is not None
}
# Job and task parameters from the user have been validated.
# We can now compute some job and task properties, including:
# job_metadata such as the job-id, create-time, user-id, etc.
# task_resources such as the logging_path (which may include job-id, task-id)
job_metadata = _get_job_metadata(provider, user, name, script, task_ids,
user_project, unique_job_id)
_resolve_task_resources(job_metadata, job_resources, task_descriptors)
# Job and task properties are now all resolved. Begin execution!
if not dry_run:
print('Job: %s' % job_metadata['job-id'])
# Wait for predecessor jobs (if any)
if after:
if dry_run:
print('(Pretend) waiting for: %s.' % after)
else:
print('Waiting for predecessor jobs to complete...')
error_messages = _wait_after(provider, after, poll_interval, True)
if error_messages:
for msg in error_messages:
print_error(msg)
raise dsub_errors.PredecessorJobFailureError(
'One or more predecessor jobs completed but did not succeed.',
error_messages, None)
# Launch all the job tasks!
job_descriptor = job_model.JobDescriptor(job_metadata, job_params,
job_resources, task_descriptors)
launched_job = provider.submit_job(job_descriptor, skip)
if not dry_run:
if launched_job['job-id'] == dsub_util.NO_JOB:
print('Job output already present, skipping new job submission.')
return {'job-id': dsub_util.NO_JOB}
print('Launched job-id: %s' % launched_job['job-id'])
if launched_job.get('task-id'):
print('%s task(s)' % len(launched_job['task-id']))
print('To check the status, run:')
print(" dstat %s --jobs '%s' --users '%s' --status '*'" %
(provider_base.get_dstat_provider_args(provider, project),
launched_job['job-id'], launched_job['user-id']))
print('To cancel the job, run:')
print(" ddel %s --jobs '%s' --users '%s'" %
(provider_base.get_ddel_provider_args(provider, project),
launched_job['job-id'], launched_job['user-id']))
# Poll for job completion
if wait:
print('Waiting for job to complete...')
if retries:
error_messages = _wait_and_retry(provider, job_metadata['job-id'],
poll_interval, retries, job_descriptor)
else:
error_messages = _wait_after(provider, [job_metadata['job-id']],
poll_interval, False)
if error_messages:
for msg in error_messages:
print_error(msg)
raise dsub_errors.JobExecutionError(
'One or more jobs finished with status FAILURE or CANCELED'
' during wait.', error_messages, launched_job)
return launched_job | 0.010061 |
def create_child(self, nurest_object, response_choice=None, async=False, callback=None, commit=True):
""" Add given nurest_object to the current object
For example, to add a child into a parent, you can call
parent.create_child(nurest_object=child)
Args:
nurest_object (bambou.NURESTObject): the NURESTObject object to add
response_choice (int): Automatically send a response choice when confirmation is needed
async (bool): should the request be done asynchronously or not
callback (function): callback containing the object and the connection
Returns:
Returns the object and connection (object, connection)
Example:
>>> entity = NUEntity(name="Super Entity")
>>> parent_entity.create_child(entity) # the new entity as been created in the parent_entity
"""
# if nurest_object.id:
# raise InternalConsitencyError("Cannot create a child that already has an ID: %s." % nurest_object)
return self._manage_child_object(nurest_object=nurest_object,
async=async,
method=HTTP_METHOD_POST,
callback=callback,
handler=self._did_create_child,
response_choice=response_choice,
commit=commit) | 0.007106 |
def _update_lru_unlocked(self, new_context, spec, via):
"""
Update the LRU ("MRU"?) list associated with the connection described
by `kwargs`, destroying the most recently created context if the list
is full. Finally add `new_context` to the list.
"""
self._via_by_context[new_context] = via
lru = self._lru_by_via.setdefault(via, [])
if len(lru) < self.max_interpreters:
lru.append(new_context)
return
for context in reversed(lru):
if self._refs_by_context[context] == 0:
break
else:
LOG.warning('via=%r reached maximum number of interpreters, '
'but they are all marked as in-use.', via)
return
self._shutdown_unlocked(context, lru=lru, new_context=new_context) | 0.002342 |
def export_as_package(self, package_path, cv_source):
"""Exports the ensemble as a Python package and saves it to `package_path`.
Args:
package_path (str, unicode): Absolute/local path of place to save package in
cv_source (str, unicode): String containing actual code for base learner
cross-validation used to generate secondary meta-features.
Raises:
exceptions.UserError: If os.path.join(path, name) already exists.
"""
if os.path.exists(package_path):
raise exceptions.UserError('{} already exists'.format(package_path))
package_name = os.path.basename(os.path.normpath(package_path))
os.makedirs(package_path)
# Write __init__.py
with open(os.path.join(package_path, '__init__.py'), 'wb') as f:
f.write('from {}.builder import xcessiv_ensemble'.format(package_name).encode('utf8'))
# Create package baselearners with each base learner having its own module
os.makedirs(os.path.join(package_path, 'baselearners'))
open(os.path.join(package_path, 'baselearners', '__init__.py'), 'a').close()
for idx, base_learner in enumerate(self.base_learners):
base_learner.export_as_file(os.path.join(package_path,
'baselearners',
'baselearner' + str(idx)))
# Create metalearner.py containing secondary learner
self.base_learner_origin.export_as_file(
os.path.join(package_path, 'metalearner'),
self.secondary_learner_hyperparameters
)
# Create cv.py containing CV method for getting meta-features
with open(os.path.join(package_path, 'cv.py'), 'wb') as f:
f.write(cv_source.encode('utf8'))
# Create stacker.py containing class for Xcessiv ensemble
ensemble_source = ''
stacker_file_loc = os.path.join(os.path.abspath(os.path.dirname(__file__)), 'stacker.py')
with open(stacker_file_loc) as f:
ensemble_source += f.read()
ensemble_source += '\n\n' \
' def {}(self, X):\n' \
' return self._process_using_' \
'meta_feature_generator(X, "{}")\n\n'\
.format(self.base_learner_origin.meta_feature_generator,
self.base_learner_origin.meta_feature_generator)
with open(os.path.join(package_path, 'stacker.py'), 'wb') as f:
f.write(ensemble_source.encode('utf8'))
# Create builder.py containing file where `xcessiv_ensemble` is instantiated for import
builder_source = ''
for idx, base_learner in enumerate(self.base_learners):
builder_source += 'from {}.baselearners import baselearner{}\n'.format(package_name, idx)
builder_source += 'from {}.cv import return_splits_iterable\n'.format(package_name)
builder_source += 'from {} import metalearner\n'.format(package_name)
builder_source += 'from {}.stacker import XcessivStackedEnsemble\n'.format(package_name)
builder_source += '\nbase_learners = [\n'
for idx, base_learner in enumerate(self.base_learners):
builder_source += ' baselearner{}.base_learner,\n'.format(idx)
builder_source += ']\n'
builder_source += '\nmeta_feature_generators = [\n'
for idx, base_learner in enumerate(self.base_learners):
builder_source += ' baselearner{}.meta_feature_generator,\n'.format(idx)
builder_source += ']\n'
builder_source += '\nxcessiv_ensemble = XcessivStackedEnsemble(base_learners=base_learners,' \
' meta_feature_generators=meta_feature_generators,' \
' secondary_learner=metalearner.base_learner,' \
' cv_function=return_splits_iterable)\n'
with open(os.path.join(package_path, 'builder.py'), 'wb') as f:
f.write(builder_source.encode('utf8')) | 0.003894 |
def half_mag_amplitude_ratio2(self, mag, avg):
"""
Return ratio of amplitude of higher and lower magnitudes.
A ratio of amplitude of higher and lower magnitudes than average,
considering weights. This ratio, by definition, should be higher
for EB than for others.
Parameters
----------
mag : array_like
An array of magnitudes.
avg : float
An average value of magnitudes.
Returns
-------
hl_ratio : float
Ratio of amplitude of higher and lower magnitudes than average.
"""
# For lower (fainter) magnitude than average.
index = np.where(mag > avg)
fainter_mag = mag[index]
lower_sum = np.sum((fainter_mag - avg) ** 2) / len(fainter_mag)
# For higher (brighter) magnitude than average.
index = np.where(mag <= avg)
brighter_mag = mag[index]
higher_sum = np.sum((avg - brighter_mag) ** 2) / len(brighter_mag)
# Return ratio.
return np.sqrt(lower_sum / higher_sum) | 0.001838 |
def popen(fn, *args, **kwargs) -> subprocess.Popen:
"""
Please ensure you're not killing the process before it had started properly
:param fn:
:param args:
:param kwargs:
:return:
"""
args = popen_encode(fn, *args, **kwargs)
logging.getLogger(__name__).debug('Start %s', args)
p = subprocess.Popen(args)
return p | 0.002786 |
def _handle_intermediate_metric_data(self, data):
"""Call assessor to process intermediate results
"""
if data['type'] != 'PERIODICAL':
return
if self.assessor is None:
return
trial_job_id = data['trial_job_id']
if trial_job_id in _ended_trials:
return
history = _trial_history[trial_job_id]
history[data['sequence']] = data['value']
ordered_history = _sort_history(history)
if len(ordered_history) < data['sequence']: # no user-visible update since last time
return
try:
result = self.assessor.assess_trial(trial_job_id, ordered_history)
except Exception as e:
_logger.exception('Assessor error')
if isinstance(result, bool):
result = AssessResult.Good if result else AssessResult.Bad
elif not isinstance(result, AssessResult):
msg = 'Result of Assessor.assess_trial must be an object of AssessResult, not %s'
raise RuntimeError(msg % type(result))
if result is AssessResult.Bad:
_logger.debug('BAD, kill %s', trial_job_id)
send(CommandType.KillTrialJob, json_tricks.dumps(trial_job_id))
# notify tuner
_logger.debug('env var: NNI_INCLUDE_INTERMEDIATE_RESULTS: [%s]', dispatcher_env_vars.NNI_INCLUDE_INTERMEDIATE_RESULTS)
if dispatcher_env_vars.NNI_INCLUDE_INTERMEDIATE_RESULTS == 'true':
self._earlystop_notify_tuner(data)
else:
_logger.debug('GOOD') | 0.003161 |
def where(self, other, cond, align=True, errors='raise',
try_cast=False, axis=0, transpose=False):
"""
evaluate the block; return result block(s) from the result
Parameters
----------
other : a ndarray/object
cond : the condition to respect
align : boolean, perform alignment on other/cond
errors : str, {'raise', 'ignore'}, default 'raise'
- ``raise`` : allow exceptions to be raised
- ``ignore`` : suppress exceptions. On error return original object
axis : int
transpose : boolean
Set to True if self is stored with axes reversed
Returns
-------
a new block(s), the result of the func
"""
import pandas.core.computation.expressions as expressions
assert errors in ['raise', 'ignore']
values = self.values
orig_other = other
if transpose:
values = values.T
other = getattr(other, '_values', getattr(other, 'values', other))
cond = getattr(cond, 'values', cond)
# If the default broadcasting would go in the wrong direction, then
# explicitly reshape other instead
if getattr(other, 'ndim', 0) >= 1:
if values.ndim - 1 == other.ndim and axis == 1:
other = other.reshape(tuple(other.shape + (1, )))
elif transpose and values.ndim == self.ndim - 1:
cond = cond.T
if not hasattr(cond, 'shape'):
raise ValueError("where must have a condition that is ndarray "
"like")
# our where function
def func(cond, values, other):
if cond.ravel().all():
return values
values, other = self._try_coerce_args(values, other)
try:
return self._try_coerce_result(expressions.where(
cond, values, other))
except Exception as detail:
if errors == 'raise':
raise TypeError(
'Could not operate [{other!r}] with block values '
'[{detail!s}]'.format(other=other, detail=detail))
else:
# return the values
result = np.empty(values.shape, dtype='float64')
result.fill(np.nan)
return result
# see if we can operate on the entire block, or need item-by-item
# or if we are a single block (ndim == 1)
try:
result = func(cond, values, other)
except TypeError:
# we cannot coerce, return a compat dtype
# we are explicitly ignoring errors
block = self.coerce_to_target_dtype(other)
blocks = block.where(orig_other, cond, align=align,
errors=errors,
try_cast=try_cast, axis=axis,
transpose=transpose)
return self._maybe_downcast(blocks, 'infer')
if self._can_hold_na or self.ndim == 1:
if transpose:
result = result.T
# try to cast if requested
if try_cast:
result = self._try_cast_result(result)
return self.make_block(result)
# might need to separate out blocks
axis = cond.ndim - 1
cond = cond.swapaxes(axis, 0)
mask = np.array([cond[i].all() for i in range(cond.shape[0])],
dtype=bool)
result_blocks = []
for m in [mask, ~mask]:
if m.any():
r = self._try_cast_result(result.take(m.nonzero()[0],
axis=axis))
result_blocks.append(
self.make_block(r.T, placement=self.mgr_locs[m]))
return result_blocks | 0.000763 |
def execute(self, dataman):
'''
run the task
:type dataman: :class:`~kitty.data.data_manager.DataManager`
:param dataman: the executing data manager
'''
self._event.clear()
try:
self._result = self._task(dataman, *self._args)
#
# We are going to re-throw this exception from get_results,
# so we are doing such a general eception handling at the point.
# however, we do want to print it here as well
#
except Exception as ex: # pylint: disable=W0703
self._exception = ex
KittyObject.get_logger().error(traceback.format_exc())
self._event.set() | 0.002886 |
def _build_context(self, request, enterprise_customer_uuid):
"""
Build common context parts used by different handlers in this view.
"""
enterprise_customer = EnterpriseCustomer.objects.get(uuid=enterprise_customer_uuid) # pylint: disable=no-member
context = {
self.ContextParameters.ENTERPRISE_CUSTOMER: enterprise_customer,
}
context.update(admin.site.each_context(request))
context.update(self._build_admin_context(request, enterprise_customer))
return context | 0.005464 |
def load_frame_building_sample_data():
"""
Sample data for the BuildingFrame object
:return:
"""
number_of_storeys = 6
interstorey_height = 3.4 # m
masses = 40.0e3 # kg
n_bays = 3
fb = models.BuildingFrame(number_of_storeys, n_bays)
fb.interstorey_heights = interstorey_height * np.ones(number_of_storeys)
fb.floor_length = 18.0 # m
fb.floor_width = 16.0 # m
fb.storey_masses = masses * np.ones(number_of_storeys) # kg
fb.bay_lengths = [6., 6.0, 6.0]
fb.set_beam_prop("depth", [0.5, 0.5, 0.5], repeat="up")
fb.set_beam_prop("width", [0.4, 0.4, 0.4], repeat="up")
fb.set_column_prop("width", [0.5, 0.5, 0.5, 0.5], repeat="up")
fb.set_column_prop("depth", [0.5, 0.5, 0.5, 0.5], repeat="up")
fb.n_seismic_frames = 3
fb.n_gravity_frames = 0
return fb | 0.001193 |
def set_yearly(self, interval, month, *, day_of_month=None,
days_of_week=None, index=None, **kwargs):
""" Set to repeat every month on specified days for every x no. of days
:param int interval: no. of days to repeat at
:param int month: month to repeat
:param int day_of_month: repeat day of a month
:param list[str] days_of_week: list of days of the week to repeat
:param index: index
:keyword date start: Start date of repetition (kwargs)
:keyword date end: End date of repetition (kwargs)
:keyword int occurrences: no of occurrences (kwargs)
"""
self.set_monthly(interval, day_of_month=day_of_month,
days_of_week=days_of_week, index=index, **kwargs)
self.__month = month | 0.003681 |
def sections_list(self, cmd=None):
"""List of config sections used by a command.
Args:
cmd (str): command name, set to ``None`` or ``''`` for the bare
command.
Returns:
list of str: list of configuration sections used by that command.
"""
sections = list(self.common.sections)
if not cmd:
if self.bare is not None:
sections.extend(self.bare.sections)
return sections
return []
sections.extend(self.subcmds[cmd].sections)
if cmd in self._conf:
sections.append(cmd)
return sections | 0.003026 |
def draw_network(self, anim):
"""Draws solution's graph using networkx
Parameters
----------
AnimationDing0
AnimationDing0 object
"""
g = nx.Graph()
ntemp = []
nodes_pos = {}
demands = {}
demands_pos = {}
for no, node in self._nodes.items():
g.add_node(node)
ntemp.append(node)
coord = self._problem._coord[no]
nodes_pos[node] = tuple(coord)
demands[node] = 'd=' + str(node.demand())
demands_pos[node] = tuple([a+b for a, b in zip(coord, [2.5]*len(coord))])
depot = self._nodes[self._problem._depot._name]
for r in self.routes():
n1 = r._nodes[0:len(r._nodes)-1]
n2 = r._nodes[1:len(r._nodes)]
e = list(zip(n1, n2))
e.append((depot, r._nodes[0]))
e.append((r._nodes[-1], depot))
g.add_edges_from(e)
plt.figure()
ax = plt.gca()
ax.get_xaxis().set_visible(False)
ax.get_yaxis().set_visible(False)
if anim is not None:
nx.draw_networkx(g, nodes_pos, with_labels=False, node_size=50)
plt.savefig(anim.file_path +
anim.file_prefix +
(4 - len(str(anim.counter))) * '0' +
str(anim.counter) + '.png',
dpi=150,
bbox_inches='tight')
anim.counter += 1
plt.close()
else:
nx.draw_networkx(g, nodes_pos)
nx.draw_networkx_labels(g, demands_pos, labels=demands)
plt.show() | 0.002952 |
def list(cls, service, ops_filter, page_size=0):
"""Gets the list of operations for the specified filter.
Args:
service: Google Genomics API service object
ops_filter: string filter of operations to return
page_size: the number of operations to requested on each list operation to
the pipelines API (if 0 or None, the API default is used)
Yields:
Operations matching the filter criteria.
"""
page_token = None
more_operations = True
documented_default_page_size = 256
documented_max_page_size = 2048
if not page_size:
page_size = documented_default_page_size
page_size = min(page_size, documented_max_page_size)
while more_operations:
api = service.operations().list(
name='operations',
filter=ops_filter,
pageToken=page_token,
pageSize=page_size)
response = google_base.Api.execute(api)
ops = response.get('operations', [])
for op in ops:
if cls.is_dsub_operation(op):
yield GoogleOperation(op)
page_token = response.get('nextPageToken')
more_operations = bool(page_token) | 0.008651 |
def remove_isolated_nodes(graph):
"""Remove isolated nodes from the network, in place.
:param pybel.BELGraph graph: A BEL graph
"""
nodes = list(nx.isolates(graph))
graph.remove_nodes_from(nodes) | 0.00463 |
def get_accent_char(char):
"""
Get the accent of an single char, if any.
"""
index = utils.VOWELS.find(char.lower())
if (index != -1):
return 5 - index % 6
else:
return Accent.NONE | 0.004545 |
def load(self, file_key):
"""Load the data."""
var = self.sd.select(file_key)
data = xr.DataArray(from_sds(var, chunks=CHUNK_SIZE),
dims=['y', 'x']).astype(np.float32)
data = data.where(data != var._FillValue)
try:
data = data * np.float32(var.scale_factor)
except AttributeError:
pass
return data | 0.004938 |
def _Rforce(self,R,z,phi=0.,t=0.):
"""
NAME:
Rforce
PURPOSE:
evaluate radial force K_R (R,z)
INPUT:
R - Cylindrical Galactocentric radius
z - vertical height
phi - azimuth
t - time
OUTPUT:
K_R (R,z)
HISTORY:
2010-04-16 - Written - Bovy (NYU)
DOCTEST:
"""
if True:
if isinstance(R,nu.ndarray):
if not isinstance(z,nu.ndarray): z= nu.ones_like(R)*z
out= nu.array([self._Rforce(rr,zz) for rr,zz in zip(R,z)])
return out
if (R > 16.*self._hr or R > 6.) and hasattr(self,'_kp'): return self._kp.Rforce(R,z)
if R < 1.: R4max= 1.
else: R4max= R
kmax= self._kmaxFac*self._beta
kmax= 2.*self._kmaxFac*self._beta
maxj1zeroIndx= nu.argmin((self._j1zeros-kmax*R4max)**2.) #close enough
ks= nu.array([0.5*(self._glx+1.)*self._dj1zeros[ii+1] + self._j1zeros[ii] for ii in range(maxj1zeroIndx)]).flatten()
weights= nu.array([self._glw*self._dj1zeros[ii+1] for ii in range(maxj1zeroIndx)]).flatten()
evalInt= ks*special.jn(1,ks*R)*(self._alpha**2.+ks**2.)**-1.5*(self._beta*nu.exp(-ks*nu.fabs(z))-ks*nu.exp(-self._beta*nu.fabs(z)))/(self._beta**2.-ks**2.)
return -2.*nu.pi*self._alpha*nu.sum(weights*evalInt) | 0.024424 |
def uri_to_regexp(self, uri):
"""converts uri w/ placeholder to regexp
'/cars/{carName}/drivers/{DriverName}'
-> '^/cars/.*/drivers/[^/]*$'
'/cars/{carName}/drivers/{DriverName}/drive'
-> '^/cars/.*/drivers/.*/drive$'
"""
def _convert(elem, is_last):
if not re.match('^{.*}$', elem):
return elem
name = elem.replace('{', '').replace('}', '')
if is_last:
return '(?P<%s>[^/]*)' % name
return '(?P<%s>.*)' % name
elems = uri.split('/')
num_elems = len(elems)
regexp = '^{}$'.format('/'.join([_convert(elem, (i == num_elems - 1)) for i, elem in enumerate(elems)]))
return regexp | 0.004 |
def _ces_distance_simple(C1, C2):
"""Return the distance between two cause-effect structures.
Assumes the only difference between them is that some concepts have
disappeared.
"""
# Make C1 refer to the bigger CES.
if len(C2) > len(C1):
C1, C2 = C2, C1
destroyed = [c1 for c1 in C1 if not any(c1.emd_eq(c2) for c2 in C2)]
return sum(c.phi * concept_distance(c, c.subsystem.null_concept)
for c in destroyed) | 0.002169 |
def cleanup(self):
"""
This function is called when the service has finished running
regardless of intentionally or not.
"""
# if an event broker has been created for this service
if self.event_broker:
# stop the event broker
self.event_broker.stop()
# attempt
try:
# close the http server
self._server_handler.close()
self.loop.run_until_complete(self._server_handler.wait_closed())
self.loop.run_until_complete(self._http_handler.finish_connections(shutdown_timeout))
# if there was no handler
except AttributeError:
# keep going
pass
# more cleanup
self.loop.run_until_complete(self.app.shutdown())
self.loop.run_until_complete(self.app.cleanup()) | 0.003484 |
def _arg_to_str(arg):
"""Convert argument to a string."""
if isinstance(arg, str):
return _sugar(repr(arg))
elif arg is Empty:
return '\u2014'
else:
return _sugar(str(arg)) | 0.004717 |
def get_histories_fix_params(self, exp, rep, tag, **kwargs):
""" this function uses get_history(..) but returns all histories where the
subexperiments match the additional kwargs arguments. if alpha=1.0,
beta = 0.01 is given, then only those experiment histories are returned,
as a list.
"""
subexps = self.get_exps(exp)
tagvalues = [re.sub("0+$", '0', '%s%f'%(k, kwargs[k])) for k in kwargs]
histories = [self.get_history(se, rep, tag) for se in subexps if all(map(lambda tv: tv in se, tagvalues))]
params = [self.get_params(se) for se in subexps if all(map(lambda tv: tv in se, tagvalues))]
return histories, params | 0.011236 |
def accept_vpc_peering_connection(name=None, conn_id=None, conn_name=None,
region=None, key=None, keyid=None, profile=None):
'''
Accept a VPC pending requested peering connection between two VPCs.
name
Name of this state
conn_id
The connection ID to accept. Exclusive with conn_name. String type.
conn_name
The name of the VPC peering connection to accept. Exclusive with conn_id. String type.
region
Region to connect to.
key
Secret key to be used.
keyid
Access key to be used.
profile
A dict with region, key and keyid, or a pillar key (string) that
contains a dict with region, key and keyid.
.. versionadded:: 2016.11.0
Example:
.. code-block:: yaml
boto_vpc.accept_vpc_peering_connection:
- conn_name: salt_peering_connection
# usage with vpc peering connection id and region
boto_vpc.accept_vpc_peering_connection:
- conn_id: pbx-1873d472
- region: us-west-2
'''
log.debug('Called state to accept VPC peering connection')
pending = __salt__['boto_vpc.is_peering_connection_pending'](
conn_id=conn_id, conn_name=conn_name, region=region, key=key,
keyid=keyid, profile=profile)
ret = {
'name': name,
'result': True,
'changes': {},
'comment': 'Boto VPC peering state'
}
if not pending:
ret['result'] = True
ret['changes'].update({'old':
'No pending VPC peering connection found. Nothing to be done.'})
return ret
if __opts__['test']:
ret['changes'].update({'old':
'Pending VPC peering connection found and can be accepted'})
return ret
fun = 'boto_vpc.accept_vpc_peering_connection'
log.debug('Calling `%s()` to accept this VPC peering connection', fun)
result = __salt__[fun](conn_id=conn_id, name=conn_name, region=region, key=key,
keyid=keyid, profile=profile)
if 'error' in result:
ret['comment'] = "Failed to accept VPC peering: {0}".format(result['error'])
ret['result'] = False
return ret
ret['changes'].update({'old': '', 'new': result['msg']})
return ret | 0.003495 |
def request(self, command=None):
"""Run CLI -show commands
*command* (show) command to run
"""
node = new_ele('get')
filter = sub_ele(node, 'filter')
block = sub_ele(filter, 'oper-data-format-cli-block')
sub_ele(block, 'cli-show').text = command
return self._request(node) | 0.005917 |
def start(self):
"""
Invokes the mod-host process.
mod-host requires JACK to be running.
mod-host does not startup JACK automatically, so you need to start it before running mod-host.
.. note::
This function is experimental. There is no guarantee that the process will actually be initiated.
"""
if self.address != 'localhost':
raise ModHostError('The host configured in the constructor isn''t "localhost". '
'It is not possible to start a process on another device.')
try:
subprocess.call([self.process, '-p', str(self.port)])
except FileNotFoundError as e:
exception = ModHostError(
'mod-host not found. Did you install it? '
'(https://github.com/moddevices/mod-host#building)'
)
raise exception from e
self._started_with_this_api = True | 0.006243 |
def getskyimg(self,chip):
"""
Notes
=====
Return an array representing the sky image for the detector. The value
of the sky is what would actually be subtracted from the exposure by
the skysub step.
:units: electrons
"""
sci_chip = self._image[self.scienceExt,chip]
return np.ones(sci_chip.image_shape,dtype=sci_chip.image_dtype)*sci_chip.subtractedSky | 0.013793 |
def post_manager_view(model, view="PostManager", template_dir=None):
"""
:param PostStruct:
"""
PostStruct = model.PostStruct
Pylot.context_(COMPONENT_POST_MANAGER=True)
if not template_dir:
template_dir = "Pylot/PostManager"
template_page = template_dir + "/%s.html"
class PostManager(StorageUploadView):
route_base = "post-manager"
__session_edit_key = "post_manager_edit_post"
def __init__(self):
self.meta_(title=" | Post Manager", description="Post Manager to admin site")
self.per_page = 25
def index(self):
"""
List all posts
"""
self.meta_(title__prepend="All Posts")
page = request.args.get("page", 1)
id = request.args.get("id", None)
slug = request.args.get("slug", None)
status = request.args.get("status", "all")
user_id = request.args.get("user_id", None)
type_id = request.args.get("type_id", None)
category_id = request.args.get("category_id", None)
posts = PostStruct.Post.all()
if id:
posts = posts.filter(PostStruct.Post.id == id)
if slug:
posts = posts.filter(PostStruct.Post.slug == slug)
if user_id:
posts = posts.filter(PostStruct.Post.user_id == user_id)
if type_id:
posts = posts.filter(PostStruct.Post.type_id == type_id)
if category_id:
posts = posts.join(PostStruct.PostCategory)\
.join(PostStruct.Category)\
.filter(PostStruct.Category.id == category_id)
if status == "publish":
posts = posts.filter(PostStruct.Post.is_published == True)
elif status == "draft":
posts = posts.filter(PostStruct.Post.is_draft == True)
elif status == "revision":
posts = posts.filter(PostStruct.Post.is_revision == True)
posts = posts.order_by(PostStruct.Post.id.desc())
posts = posts.paginate(page=page, per_page=self.per_page)
return self.render(posts=posts,
query_vars={
"id": id,
"slug": slug,
"user_id": user_id,
"type_id": type_id,
"status": status
},
view_template=template_page % "index")
def read(self, id):
"""
Read Post
"""
post = PostStruct.Post.get(id)
if not post:
abort(404, "Post doesn't exist")
self.meta_(title__prepend=post.title)
self.meta_(title__prepend="Read Post: ")
return self.render(post=post,
view_template=template_page % "read")
@route("upload-image", methods=["POST"])
def upload_image(self):
"""
Placeholder for markdown
"""
url = ""
if request.files.get("file"):
url = storage.put(request.files.get('file'))
else:
return "Couldn't upload file. No file exist", 401
return self.render(file_url=url)
# For when there is an error
error = False
if error:
return "error message", 401
return jsonify({
"id": "",
"url": "", # full image url
})
@route("new", defaults={"id": None}, endpoint="PostManager:new")
@route("edit/<id>", endpoint="PostManager:edit")
def edit(self, id):
"""
Create / Edit Post
"""
self.meta_(title__prepend="Edit Post")
types = [(t.id, t.name) for t in PostStruct.Type.all().order_by(PostStruct.Type.name.asc())]
categories = [(c.id, c.name) for c in PostStruct.Category.all().order_by(PostStruct.Category.name.asc())]
checked_cats = []
post = {
"id": 0,
"title": "",
"content": "",
"slug": "",
"type_id": 0
}
# saved in session
if request.args.get("error") and self.__session_edit_key in session:
post = session[self.__session_edit_key]
checked_cats = post["post_categories"]
del session[self.__session_edit_key]
elif id:
post = PostStruct.Post.get(id)
if not post or post.is_revision:
abort(404, "Post doesn't exist")
checked_cats = [c.id for c in post.categories]
return self.render(post=post,
types=types,
categories=categories,
checked_categories=checked_cats,
view_template=template_page % "edit"
)
def post(self):
"""
Submit
"""
id = request.form.get("id", None)
title = request.form.get("title", None)
slug = request.form.get("slug", None)
content = request.form.get("content", None)
type_id = request.form.get("type_id", None)
post_categories = request.form.getlist("post_categories")
published_date = request.form.get("published_date", None)
status = request.form.get("status", "draft")
is_published = True if status == "publish" else False
is_draft = True if status == "draft" else False
is_public = request.form.get("is_public", True)
if status in ["draft", "publish"] and (not title or not type_id):
if not title:
self.error_("Post Title is missing ")
if not type_id:
self.error_("Post type is missing")
session[self.__session_edit_key] = {
"id": id,
"title": title,
"content": content,
"slug": slug,
"type_id": type_id,
"published_date": published_date,
"post_categories": post_categories
}
if id:
url = url_for("PostManager:edit", id=id, error=1)
else:
url = url_for("PostManager:new", error=1)
return redirect(url)
data = {
"title": title,
"content": content,
"type_id": type_id
}
if published_date:
published_date = datetime.datetime.strptime(published_date, "%Y-%m-%d %H:%M:%S")
else:
published_date = datetime.datetime.now()
if id and status in ["delete", "revision"]:
post = PostStruct.Post.get(id)
if not post:
abort(404, "Post '%s' doesn't exist" % id)
if status == "delete":
post.delete()
self.success_("Post deleted successfully!")
return redirect(url_for("%s:index" % view ))
elif status == "revision":
data.update({
"user_id": current_user.id,
"parent_id": id,
"is_revision": True,
"is_draft": False,
"is_published": False,
"is_public": False
})
post = PostStruct.Post.create(**data)
return jsonify({"revision_id": post.id})
elif status in ["draft", "publish"]:
data.update({
"is_published": is_published,
"is_draft": is_draft,
"is_revision": False,
"is_public": is_public
})
if id:
post = PostStruct.Post.get(id)
if not post:
abort(404, "Post '%s' doesn't exist" % id)
elif post.is_revision:
abort(403, "Can't access this post")
else:
post.update(**data)
else:
data["user_id"] = current_user.id
if is_published:
data["published_date"] = published_date
post = PostStruct.Post.create(**data)
post.set_slug(slug or title)
post.replace_categories(map(int, post_categories))
if post.is_published and not post.published_date:
post.update(published_date=published_date)
self.success_("Post saved successfully!")
endpoint = "read" if post.is_published else "edit"
return redirect(url_for("%s:%s" % (view, endpoint), id=post.id))
else:
abort(400, "Invalid post status")
@route("categories", methods=["GET", "POST"])
def categories(self):
self.meta_(title__prepend="Post Categories")
if request.method == "POST":
id = request.form.get("id", None)
action = request.form.get("action")
name = request.form.get("name")
slug = request.form.get("slug", None)
ajax = request.form.get("ajax", False)
try:
if not id:
cat = PostStruct.Category.new(name=name, slug=slug)
if ajax:
return jsonify({
"id": cat.id,
"name": cat.name,
"slug": cat.slug,
"status": "OK"
})
self.success_("New category '%s' added" % name)
else:
post_cat = PostStruct.Category.get(id)
if post_cat:
if action == "delete":
post_cat.delete()
self.success_("Category '%s' deleted successfully!" % post_cat.name)
else:
post_cat.update(name=name, slug=slug)
self.success_("Category '%s' updated successfully!" % post_cat.name)
except Exception as ex:
if ajax:
return jsonify({
"error": True,
"error_message": ex.message
})
self.error_("Error: %s" % ex.message)
return redirect(url_for("%s:categories" % view))
else:
cats = PostStruct.Category.all().order_by(PostStruct.Category.name.asc())
return self.render(categories=cats,
view_template=template_page % "categories")
@route("types", methods=["GET", "POST"])
def types(self):
self.meta_(title__prepend="Post Types")
if request.method == "POST":
try:
id = request.form.get("id", None)
action = request.form.get("action")
name = request.form.get("name")
slug = request.form.get("slug", None)
if not id:
PostStruct.Type.new(name=name, slug=slug)
self.success_("New type '%s' added" % name)
else:
post_type = PostStruct.Type.get(id)
if post_type:
if action == "delete":
post_type.delete()
self.success_("Type '%s' deleted successfully!" % post_type.name)
else:
post_type.update(name=name, slug=slug)
self.success_("Type '%s' updated successfully!" % post_type.name)
except Exception as ex:
self.error_("Error: %s" % ex.message)
return redirect(url_for("%s:types" % view))
else:
types = PostStruct.Type.all().order_by(PostStruct.Type.name.asc())
return self.render(types=types,
view_template=template_page % "types")
return PostManager | 0.001466 |
def _open_connection(self):
"""
Open the connection to the database based on the configuration file.
"""
if self._connection:
try:
self._connection.close()
except Exception:
pass
db = self._get_db()
self._connection = db.open()
self._connection.onCloseCallback(self._on_close_callback) | 0.005013 |
def get_item2(self, tablename, key, attributes=None, alias=None,
consistent=False, return_capacity=None):
"""
Fetch a single item from a table
Parameters
----------
tablename : str
Name of the table to fetch from
key : dict
Primary key dict specifying the hash key and, if applicable, the
range key of the item.
attributes : str or list, optional
See docs for ProjectionExpression. If list, it will be joined by
commas.
alias : dict, optional
See docs for ExpressionAttributeNames
consistent : bool, optional
Perform a strongly consistent read of the data (default False)
return_capacity : {NONE, INDEXES, TOTAL}, optional
INDEXES will return the consumed capacity for indexes, TOTAL will
return the consumed capacity for the table and the indexes.
(default NONE)
"""
kwargs = {
'TableName': tablename,
'Key': self.dynamizer.encode_keys(key),
'ConsistentRead': consistent,
'ReturnConsumedCapacity': self._default_capacity(return_capacity),
}
if attributes is not None:
if not isinstance(attributes, six.string_types):
attributes = ', '.join(attributes)
kwargs['ProjectionExpression'] = attributes
if alias:
kwargs['ExpressionAttributeNames'] = alias
data = self.call('get_item', **kwargs)
return Result(self.dynamizer, data, 'Item') | 0.001866 |
def detect_client_auth_request(server_handshake_bytes):
"""
Determines if a CertificateRequest message is sent from the server asking
the client for a certificate
:param server_handshake_bytes:
A byte string of the handshake data received from the server
:return:
A boolean - if a client certificate request was found
"""
for record_type, _, record_data in parse_tls_records(server_handshake_bytes):
if record_type != b'\x16':
continue
for message_type, message_data in parse_handshake_messages(record_data):
if message_type == b'\x0d':
return True
return False | 0.004491 |
def cross_validate(model, X, y, k_folds=5, metric="auto", shuffle=True):
"""Cross Validation
Evaluates the given model using the given data
repetitively fitting and predicting on different
chunks (folds) from the data.
Parameters:
-----------
model : dojo-model, the model to be evaluated
X : matrix, shape (n_samples, n_features), the data used for evaluation
y : vector, shape (n_samples, ), the desired labels
k_folds : integer, optional, the number of iterations/folds
metric : the single value error/accuracy metric, optional
shuffle : boolean, whether to shuffle the data before
splitting it or not
Returns:
--------
dict_scores : dictionary with train scores and test scores
"""
train_scores = []
test_scores = []
folds = KFolds(X, y, k=k_folds, shuffle=shuffle)
for X_train, X_test, y_train, y_test in folds:
model.fit(X_train, y_train)
if metric is None or metric == "auto":
train_scores.append(model.evaluate(X_train, y_train))
test_scores.append(model.evaluate(X_test, y_test))
else:
train_scores.append(
metric(y_train, model.predict(X_train))
)
test_scores.append(
metric(y_test, model.predict(X_test))
)
return {
"train_scores": np.array(train_scores),
"test_scores": np.array(test_scores),
} | 0.002736 |
def run(self, bundle,
container_id=None,
log_path=None,
pid_file=None,
log_format="kubernetes"):
''' run is a wrapper to create, start, attach, and delete a container.
Equivalent command line example:
singularity oci run -b ~/bundle mycontainer
Parameters
==========
bundle: the full path to the bundle folder
container_id: an optional container_id. If not provided, use same
container_id used to generate OciImage instance
log_path: the path to store the log.
pid_file: specify the pid file path to use
log_format: defaults to kubernetes. Can also be "basic" or "json"
'''
return self._run(bundle,
container_id=container_id,
log_path=log_path,
pid_file=pid_file,
command="run",
log_format=log_format) | 0.006129 |
def cli(env,
format='table',
config=None,
verbose=0,
proxy=None,
really=False,
demo=False,
**kwargs):
"""Main click CLI entry-point."""
# Populate environement with client and set it as the context object
env.skip_confirmations = really
env.config_file = config
env.format = format
env.ensure_client(config_file=config, is_demo=demo, proxy=proxy)
env.vars['_start'] = time.time()
logger = logging.getLogger()
if demo is False:
logger.addHandler(logging.StreamHandler())
else:
# This section is for running CLI tests.
logging.getLogger("urllib3").setLevel(logging.WARNING)
logger.addHandler(logging.NullHandler())
logger.setLevel(DEBUG_LOGGING_MAP.get(verbose, logging.DEBUG))
env.vars['_timings'] = SoftLayer.DebugTransport(env.client.transport)
env.client.transport = env.vars['_timings'] | 0.001073 |
def _isInt(x, precision = 0.0001):
"""
Return (isInt, intValue) for a given floating point number.
Parameters:
----------------------------------------------------------------------
x: floating point number to evaluate
precision: desired precision
retval: (isInt, intValue)
isInt: True if x is close enough to an integer value
intValue: x as an integer
"""
xInt = int(round(x))
return (abs(x - xInt) < precision * x, xInt) | 0.012739 |
def call_cc(fn: Callable) -> 'Observable':
r"""call-with-current-continuation.
Haskell: callCC f = Cont $ \c -> runCont (f (\a -> Cont $ \_ -> c a )) c
"""
def subscribe(on_next):
return fn(lambda a: Observable(lambda _: on_next(a))).subscribe(on_next)
return Observable(subscribe) | 0.01194 |
def eclipse_tt(p0,b,aR,P=1,ecc=0,w=0,npts=100,u1=0.394,u2=0.261,conv=True,
cadence=1626./86400,frac=1,sec=False,pars0=None,tol=1e-4,width=3):
"""
Trapezoidal parameters for simulated orbit.
All arguments passed to :func:`eclipse` except the following:
:param pars0: (optional)
Initial guess for least-sq optimization for trapezoid parameters.
:return dur,dep,slope:
Best-fit duration, depth, and T/tau for eclipse shape.
"""
ts,fs = eclipse(p0=p0,b=b,aR=aR,P=P,ecc=ecc,w=w,npts=npts,u1=u1,u2=u2,
conv=conv,cadence=cadence,frac=frac,sec=sec,tol=tol,width=width)
#logging.debug('{}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}'.format(p0,b,aR,P,ecc,w,xmax,npts,u1,u2,leastsq,conv,cadence,frac,sec,new))
#logging.debug('ts: {} fs: {}'.format(ts,fs))
if pars0 is None:
depth = 1 - fs.min()
duration = (fs < (1-0.01*depth)).sum()/float(len(fs)) * (ts[-1] - ts[0])
tc0 = ts[fs.argmin()]
pars0 = np.array([duration,depth,5.,tc0])
dur,dep,slope,epoch = fit_traptransit(ts,fs,pars0)
return dur,dep,slope | 0.042132 |
def _baseattrs(self):
"""A dict of members expressed in literals"""
result = super()._baseattrs
result["static_spaces"] = self.static_spaces._baseattrs
result["dynamic_spaces"] = self.dynamic_spaces._baseattrs
result["cells"] = self.cells._baseattrs
result["refs"] = self.refs._baseattrs
if self.has_params():
result["params"] = ", ".join(self.parameters)
else:
result["params"] = ""
return result | 0.004032 |
def phone():
"""Return a random phone number in `#-(###)###-####` format."""
format = '#-(###)###-####'
result = ''
for item in format:
if item == '#':
result += str(random.randint(0, 9))
else:
result += item
return result | 0.003521 |
def _process_handler_result(self, response):
"""Examines out the response returned by a stanza handler and sends all
stanzas provided.
:Parameters:
- `response`: the response to process. `None` or `False` means 'not
handled'. `True` means 'handled'. Stanza or stanza list means
handled with the stanzas to send back
:Types:
- `response`: `bool` or `Stanza` or iterable of `Stanza`
:Returns:
- `True`: if `response` is `Stanza`, iterable or `True` (meaning
the stanza was processed).
- `False`: when `response` is `False` or `None`
:returntype: `bool`
"""
if response is None or response is False:
return False
if isinstance(response, Stanza):
self.send(response)
return True
try:
response = iter(response)
except TypeError:
return bool(response)
for stanza in response:
if isinstance(stanza, Stanza):
self.send(stanza)
else:
logger.warning(u"Unexpected object in stanza handler result:"
u" {0!r}".format(stanza))
return True | 0.00232 |
def batch_process(
self, zoom=None, tile=None, multi=cpu_count(), max_chunksize=1
):
"""
Process a large batch of tiles.
Parameters
----------
process : MapcheteProcess
process to be run
zoom : list or int
either single zoom level or list of minimum and maximum zoom level;
None processes all (default: None)
tile : tuple
zoom, row and column of tile to be processed (cannot be used with
zoom)
multi : int
number of workers (default: number of CPU cores)
max_chunksize : int
maximum number of process tiles to be queued for each worker;
(default: 1)
"""
list(self.batch_processor(zoom, tile, multi, max_chunksize)) | 0.003699 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.