text
stringlengths
78
104k
score
float64
0
0.18
def _notify_change(self): """ After all changes have settled, tell Java it changed """ d = self.declaration self._notify_count -= 1 if self._notify_count == 0: #: Tell the UI we made changes self.adapter.notifyDataSetChanged(now=True) self.get_context().timed_call( 500, self._queue_pending_calls)
0.005249
def is_list_of_list(item): """ check whether the item is list (tuple) and consist of list (tuple) elements """ if ( type(item) in (list, tuple) and len(item) and isinstance(item[0], (list, tuple)) ): return True return False
0.003521
def res(self): """Get all target values found and corresponding parametes.""" params = [dict(zip(self.keys, p)) for p in self.params] return [ {"target": target, "params": param} for target, param in zip(self.target, params) ]
0.007067
def get_logger(level=None, name=None, filename=None): """ Create a logger or return the current one if already instantiated. Parameters ---------- level : int one of the logger.level constants name : string name of the logger filename : string name of the log file Returns ------- logger.logger """ if level is None: level = settings.log_level if name is None: name = settings.log_name if filename is None: filename = settings.log_filename logger = lg.getLogger(name) # if a logger with this name is not already set up if not getattr(logger, 'handler_set', None): # get today's date and construct a log filename todays_date = dt.datetime.today().strftime('%Y_%m_%d') log_filename = os.path.join(settings.logs_folder, '{}_{}.log'.format(filename, todays_date)) # if the logs folder does not already exist, create it if not os.path.exists(settings.logs_folder): os.makedirs(settings.logs_folder) # create file handler and log formatter and set them up handler = lg.FileHandler(log_filename, encoding='utf-8') formatter = lg.Formatter('%(asctime)s %(levelname)s %(name)s %(message)s') handler.setFormatter(formatter) logger.addHandler(handler) logger.setLevel(level) logger.handler_set = True return logger
0.002083
def step(self, actions): """Makes a step in all environments. Does any preprocessing and records frames. Args: actions: Batch of actions. Returns: (obs, rewards, dones) - batches of observations, rewards and done flags respectively. Raises: ValueError: when the data for current epoch has already been loaded. """ if self._store_rollouts and \ self._rollouts_by_epoch_and_split[self.current_epoch]: raise ValueError( "Data for current epoch has already been loaded from disk." ) (obs, unclipped_rewards, dones) = self._step(actions) obs = self._preprocess_observations(obs) (min_reward, max_reward) = self.reward_range rewards = np.around(np.clip(unclipped_rewards, min_reward, max_reward)) if self._store_rollouts: unclipped_rewards = unclipped_rewards.astype(np.float64) encoded_obs = self._encode_observations(obs) for (rollout, frame, action) in zip( self._current_batch_rollouts, self._current_batch_frames, actions ): rollout.append(frame._replace(action=action)) # orud = (observation, reward, unclipped_reward, done) self._current_batch_frames = [ Frame(*orud, action=None) for orud in zip(encoded_obs, rewards, unclipped_rewards, dones) ] return (obs, rewards, dones)
0.005852
def _dopost(url, params=None, payload=None, progress_cb=None): """Do a HTTP post to upload some data. If progress_cb is not None, it should be a callable which takes two arguments: the total number of bytes to upload and the number of bytes which have been uploaded. The callable is called periodically to show the progress of the upload. """ if params: encoded = urlencode(params) else: encoded = '' url = str(url + encoded) if payload is None: payload = '' # Try to make use of PyCURL if we have it try: c = pycurl.Curl() except NameError: c = None if c is not None: response = [ ] def append_to_response(buf): response.append(buf.decode()) def progress(download_t, download_d, upload_t, upload_d): if progress_cb is not None: progress_cb(upload_d, upload_t) c = pycurl.Curl() c.setopt(pycurl.URL, url) c.setopt(pycurl.POST, 1) c.setopt(pycurl.POSTFIELDS, payload) c.setopt(pycurl.INFILESIZE, len(payload)) c.setopt(pycurl.WRITEFUNCTION, append_to_response) c.setopt(pycurl.NOPROGRESS, 0) c.setopt(pycurl.PROGRESSFUNCTION, progress) c.perform() c.close() return json.loads(''.join(response)) else: if progress_cb is not None: progress_cb(0, len(payload)) response = [x.decode() for x in urlopen(url, payload).readlines()] if progress_cb is not None: progress_cb(len(payload), len(payload)) return json.loads(''.join(response))
0.001829
def items(self): """ A generator yielding ``(key, value)`` attribute pairs, sorted by key name. """ for key in sorted(self.attrs): yield key, self.attrs[key]
0.014925
def plot_divers(self, iabscissa=1, foffset=1e-19): """plot fitness, sigma, axis ratio... :param iabscissa: 0 means vs evaluations, 1 means vs iterations :param foffset: added to f-value :See: `plot()` """ from matplotlib.pyplot import semilogy, hold, grid, \ axis, title, text fontsize = pyplot.rcParams['font.size'] if not hasattr(self, 'f'): self.load() dat = self minfit = min(dat.f[:, 5]) dfit = dat.f[:, 5] - minfit # why not using idx? dfit[dfit < 1e-98] = np.NaN self._enter_plotting() if dat.f.shape[1] > 7: # semilogy(dat.f[:, iabscissa], abs(dat.f[:,[6, 7, 10, 12]])+foffset,'-k') semilogy(dat.f[:, iabscissa], abs(dat.f[:, [6, 7]]) + foffset, '-k') hold(True) # (larger indices): additional fitness data, for example constraints values if dat.f.shape[1] > 8: # dd = abs(dat.f[:,7:]) + 10*foffset # dd = np.where(dat.f[:,7:]==0, np.NaN, dd) # cannot be semilogy(dat.f[:, iabscissa], np.abs(dat.f[:, 8:]) + 10 * foffset, 'y') hold(True) idx = np.where(dat.f[:, 5] > 1e-98)[0] # positive values semilogy(dat.f[idx, iabscissa], dat.f[idx, 5] + foffset, '.b') hold(True) grid(True) semilogy(dat.f[:, iabscissa], abs(dat.f[:, 5]) + foffset, '-b') text(dat.f[-1, iabscissa], abs(dat.f[-1, 5]) + foffset, r'$|f_\mathsf{best}|$', fontsize=fontsize + 2) # negative f-values, dots sgn = np.sign(dat.f[:, 5]) sgn[np.abs(dat.f[:, 5]) < 1e-98] = 0 idx = np.where(sgn < 0)[0] semilogy(dat.f[idx, iabscissa], abs(dat.f[idx, 5]) + foffset, '.m') # , markersize=5 # lines between negative f-values dsgn = np.diff(sgn) start_idx = 1 + np.where((dsgn < 0) * (sgn[1:] < 0))[0] stop_idx = 1 + np.where(dsgn > 0)[0] if sgn[0] < 0: start_idx = np.concatenate(([0], start_idx)) for istart in start_idx: istop = stop_idx[stop_idx > istart] istop = istop[0] if len(istop) else 0 idx = xrange(istart, istop if istop else dat.f.shape[0]) if len(idx) > 1: semilogy(dat.f[idx, iabscissa], abs(dat.f[idx, 5]) + foffset, 'm') # , markersize=5 # lines between positive and negative f-values # TODO: the following might plot values very close to zero if istart > 0: # line to the left of istart semilogy(dat.f[istart-1:istart+1, iabscissa], abs(dat.f[istart-1:istart+1, 5]) + foffset, '--m') if istop: # line to the left of istop semilogy(dat.f[istop-1:istop+1, iabscissa], abs(dat.f[istop-1:istop+1, 5]) + foffset, '--m') # mark the respective first positive values semilogy(dat.f[istop, iabscissa], abs(dat.f[istop, 5]) + foffset, '.b', markersize=7) # mark the respective first negative values semilogy(dat.f[istart, iabscissa], abs(dat.f[istart, 5]) + foffset, '.r', markersize=7) # standard deviations std semilogy(dat.std[:-1, iabscissa], np.vstack([list(map(max, dat.std[:-1, 5:])), list(map(min, dat.std[:-1, 5:]))]).T, '-m', linewidth=2) text(dat.std[-2, iabscissa], max(dat.std[-2, 5:]), 'max std', fontsize=fontsize) text(dat.std[-2, iabscissa], min(dat.std[-2, 5:]), 'min std', fontsize=fontsize) # delta-fitness in cyan idx = isfinite(dfit) if 1 < 3: idx_nan = np.where(idx == False)[0] # gaps if not len(idx_nan): # should never happen semilogy(dat.f[:, iabscissa][idx], dfit[idx], '-c') else: i_start = 0 for i_end in idx_nan: if i_end > i_start: semilogy(dat.f[:, iabscissa][i_start:i_end], dfit[i_start:i_end], '-c') i_start = i_end + 1 if len(dfit) > idx_nan[-1] + 1: semilogy(dat.f[:, iabscissa][idx_nan[-1]+1:], dfit[idx_nan[-1]+1:], '-c') text(dat.f[idx, iabscissa][-1], dfit[idx][-1], r'$f_\mathsf{best} - \min(f)$', fontsize=fontsize + 2) # overall minimum i = np.argmin(dat.f[:, 5]) semilogy(dat.f[i, iabscissa], np.abs(dat.f[i, 5]), 'ro', markersize=9) semilogy(dat.f[i, iabscissa], dfit[idx][np.argmin(dfit[idx])] + 1e-98, 'ro', markersize=9) # semilogy(dat.f[-1, iabscissa]*np.ones(2), dat.f[-1,4]*np.ones(2), 'rd') # AR and sigma semilogy(dat.f[:, iabscissa], dat.f[:, 3], '-r') # AR semilogy(dat.f[:, iabscissa], dat.f[:, 2], '-g') # sigma text(dat.f[-1, iabscissa], dat.f[-1, 3], r'axis ratio', fontsize=fontsize) text(dat.f[-1, iabscissa], dat.f[-1, 2] / 1.5, r'$\sigma$', fontsize=fontsize+3) ax = array(axis()) # ax[1] = max(minxend, ax[1]) axis(ax) text(ax[0] + 0.01, ax[2], # 10**(log10(ax[2])+0.05*(log10(ax[3])-log10(ax[2]))), '.min($f$)=' + repr(minfit)) #'.f_recent=' + repr(dat.f[-1, 5])) # title('abs(f) (blue), f-min(f) (cyan), Sigma (green), Axis Ratio (red)') # title(r'blue:$\mathrm{abs}(f)$, cyan:$f - \min(f)$, green:$\sigma$, red:axis ratio', # fontsize=fontsize - 0.0) title(r'$|f_{\mathrm{best},\mathrm{med},\mathrm{worst}}|$, $f - \min(f)$, $\sigma$, axis ratio') # if __name__ != 'cma': # should be handled by the caller self._xlabel(iabscissa) self._finalize_plotting() return self
0.003257
def mapping_match(uri, mapping): """Determine whether the given URI matches one of the given mappings. Returns True if a match was found, False otherwise. """ try: val = mapping_get(uri, mapping) return True except KeyError: return False
0.003534
def set_var_log_arr(self, value): ''' setter ''' if isinstance(value, np.ndarray): self.__var_log_arr = value else: raise TypeError()
0.01105
def create_img(): """Creates an image, as a `peri.util.Image`, which is similar to the image in the tutorial""" # 1. particles + coverslip rad = 0.5 * np.random.randn(POS.shape[0]) + 4.5 # 4.5 +- 0.5 px particles part = objs.PlatonicSpheresCollection(POS, rad, zscale=0.89) slab = objs.Slab(zpos=4.92, angles=(-4.7e-3, -7.3e-4)) objects = comp.ComponentCollection([part, slab], category='obj') # 2. psf, ilm p = exactpsf.FixedSSChebLinePSF(kfki=1.07, zslab=-29.3, alpha=1.17, n2n1=0.98, sigkf=-0.33, zscale=0.89, laser_wavelength=0.45) i = ilms.BarnesStreakLegPoly2P1D(npts=(16,10,8,4), zorder=8) b = ilms.LegendrePoly2P1D(order=(7,2,2), category='bkg') off = comp.GlobalScalar(name='offset', value=-2.11) mdl = models.ConfocalImageModel() st = states.ImageState(util.NullImage(shape=[48,64,64]), [objects, p, i, b, off], mdl=mdl, model_as_data=True) b.update(b.params, BKGVALS) i.update(i.params, ILMVALS) im = st.model + np.random.randn(*st.model.shape) * 0.03 return util.Image(im)
0.009242
def check_passwd(guess, passwd): """ Tests to see if the guess, after salting and hashing, matches the passwd from the database. @param guess: incoming password trying to be used for authentication @param passwd: already encrypted password from the database @returns: boolean """ m = sha1() salt = passwd[:salt_len * 2] # salt_len * 2 due to encode('hex_codec') m.update(unicode2bytes(guess) + unicode2bytes(salt)) crypted_guess = bytes2unicode(salt) + m.hexdigest() return (crypted_guess == bytes2unicode(passwd))
0.001767
def getservers(self, vhost = None): ''' Return current servers :param vhost: return only servers of vhost if specified. '' to return only default servers. None for all servers. ''' if vhost is not None: return [s for s in self.connections if s.protocol.vhost == vhost] else: return list(self.connections)
0.014742
def _recv(self, rm_colon=False, blocking=True, expected_replies=None, default_rvalue=[''], ignore_unexpected_replies=True, rm_first=True, recur_limit=10): """ Receives and processes an IRC protocol message. Optional arguments: * rm_colon=False - If True: If the message is > 3 items long: Remove the colon(if found) from the [3] item. Else: Remove the colon(if found) from the [2] item. * blocking=True - Should this call block? * expected_replies=None - If specified: If no matching reply is found: Return the default_rvalue. Else: Return the message. * default_rvalue=[''] - If no message or a matching message; is found, return default_rvalue. * ignore_unexpected_replies=True - If an, unexpected reply is encountered, should we keep going, until we get a valid reply? If False, it will just return default_rvalue(If a valid reply isn't found). * rm_first=True - If True, remove [0] from the message before returning it. """ append = False if expected_replies: if len(expected_replies) > 1: append = True if self.readable(): msg = self._raw_recv() else: if not blocking: return default_rvalue else: msg = self._raw_recv() msg = msg.split(None, 3) if msg[1] in self.error_dictionary: self.exception(msg[1]) if rm_colon: if len(msg) > 3: if msg[3][0] == ':': msg[3] = msg[3][1:] elif len(msg) > 2: if msg[2][0] == ':': msg[2] = msg[2][1:] if expected_replies: if msg[1] not in expected_replies: self.stepback(append) if ignore_unexpected_replies and recur_limit > 0: recur_limit -= 1 return self._recv(rm_colon=rm_colon, blocking=blocking, \ expected_replies=expected_replies, \ default_rvalue=default_rvalue, \ ignore_unexpected_replies=ignore_unexpected_replies, rm_first=rm_first, recur_limit=recur_limit) return default_rvalue if rm_first: return msg[1:] return msg
0.00399
def setLevel(self, level): r"""Overrides the parent method to adapt the formatting string to the level. Parameters ---------- level : int The new log level to set. See the logging levels in the logging module for details. Examples -------- >>> import logging >>> Logger.setLevel(logging.DEBUG) """ if logging.DEBUG >= level: formatter = logging.Formatter("%(asctime)s [%(levelname)-8s] %(message)s (in %(module)s.%(funcName)s:%(lineno)s)", "%d.%m.%Y %H:%M:%S") self._handler.setFormatter(formatter) else: formatter = logging.Formatter("%(asctime)s [%(levelname)-8s] %(message)s", "%d.%m.%Y %H:%M:%S") self._handler.setFormatter(formatter) NativeLogger.setLevel(self, level)
0.013655
def from_str(cls, version_str: str): """ Alternate constructor that accepts a string SemVer. """ o = cls() o.version = version_str return o
0.010695
def cli(env): """Server order options for a given chassis.""" hardware_manager = hardware.HardwareManager(env.client) options = hardware_manager.get_create_options() tables = [] # Datacenters dc_table = formatting.Table(['datacenter', 'value']) dc_table.sortby = 'value' for location in options['locations']: dc_table.add_row([location['name'], location['key']]) tables.append(dc_table) # Presets preset_table = formatting.Table(['size', 'value']) preset_table.sortby = 'value' for size in options['sizes']: preset_table.add_row([size['name'], size['key']]) tables.append(preset_table) # Operating systems os_table = formatting.Table(['operating_system', 'value']) os_table.sortby = 'value' for operating_system in options['operating_systems']: os_table.add_row([operating_system['name'], operating_system['key']]) tables.append(os_table) # Port speed port_speed_table = formatting.Table(['port_speed', 'value']) port_speed_table.sortby = 'value' for speed in options['port_speeds']: port_speed_table.add_row([speed['name'], speed['key']]) tables.append(port_speed_table) # Extras extras_table = formatting.Table(['extras', 'value']) extras_table.sortby = 'value' for extra in options['extras']: extras_table.add_row([extra['name'], extra['key']]) tables.append(extras_table) env.fout(formatting.listing(tables, separator='\n'))
0.000668
def parse_bulk_delete(prs, conn): """Delete bulk_records. Arguments: prs: parser object of argparse conn: dictionary of connection information """ prs_delete = prs.add_parser( 'bulk_delete', help='delete bulk records of specific zone') set_option(prs_delete, 'infile') conn_options(prs_delete, conn) prs_delete.add_argument('--domain', action='store', help='delete records with specify zone') prs_delete.set_defaults(func=delete)
0.001938
def get_overlapping_ranges(self, collection_link, partition_key_ranges): ''' Given a partition key range and a collection, returns the list of overlapping partition key ranges :param str collection_link: The name of the collection. :param list partition_key_range: List of partition key range. :return: List of overlapping partition key ranges. :rtype: list ''' cl = self._documentClient collection_id = base.GetResourceIdOrFullNameFromLink(collection_link) collection_routing_map = self._collection_routing_map_by_item.get(collection_id) if collection_routing_map is None: collection_pk_ranges = list(cl._ReadPartitionKeyRanges(collection_link)) # for large collections, a split may complete between the read partition key ranges query page responses, # causing the partitionKeyRanges to have both the children ranges and their parents. Therefore, we need # to discard the parent ranges to have a valid routing map. collection_pk_ranges = _PartitionKeyRangeCache._discard_parent_ranges(collection_pk_ranges) collection_routing_map = _CollectionRoutingMap.CompleteRoutingMap([(r, True) for r in collection_pk_ranges], collection_id) self._collection_routing_map_by_item[collection_id] = collection_routing_map return collection_routing_map.get_overlapping_ranges(partition_key_ranges)
0.011628
def _set_extended(self, v, load=False): """ Setter method for extended, mapped from YANG variable /overlay/access_list/type/vxlan/extended (list) If this variable is read-only (config: false) in the source YANG file, then _set_extended is considered as a private method. Backends looking to populate this variable should do so via calling thisObj._set_extended() directly. """ if hasattr(v, "_utype"): v = v._utype(v) try: t = YANGDynClass(v,base=YANGListType("ext_user_acl_name",extended.extended, yang_name="extended", rest_name="extended", parent=self, is_container='list', user_ordered=False, path_helper=self._path_helper, yang_keys='ext-user-acl-name', extensions={u'tailf-common': {u'info': u'extended <user-acl-name>', u'cli-sequence-commands': None, u'callpoint': u'VxlanVisibilityExtendedCallpoint', u'cli-mode-name': u'config-overlay-vxlan-ext-$(ext-user-acl-name)'}}), is_container='list', yang_name="extended", rest_name="extended", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'extended <user-acl-name>', u'cli-sequence-commands': None, u'callpoint': u'VxlanVisibilityExtendedCallpoint', u'cli-mode-name': u'config-overlay-vxlan-ext-$(ext-user-acl-name)'}}, namespace='urn:brocade.com:mgmt:brocade-vxlan-visibility', defining_module='brocade-vxlan-visibility', yang_type='list', is_config=True) except (TypeError, ValueError): raise ValueError({ 'error-string': """extended must be of a type compatible with list""", 'defined-type': "list", 'generated-type': """YANGDynClass(base=YANGListType("ext_user_acl_name",extended.extended, yang_name="extended", rest_name="extended", parent=self, is_container='list', user_ordered=False, path_helper=self._path_helper, yang_keys='ext-user-acl-name', extensions={u'tailf-common': {u'info': u'extended <user-acl-name>', u'cli-sequence-commands': None, u'callpoint': u'VxlanVisibilityExtendedCallpoint', u'cli-mode-name': u'config-overlay-vxlan-ext-$(ext-user-acl-name)'}}), is_container='list', yang_name="extended", rest_name="extended", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'extended <user-acl-name>', u'cli-sequence-commands': None, u'callpoint': u'VxlanVisibilityExtendedCallpoint', u'cli-mode-name': u'config-overlay-vxlan-ext-$(ext-user-acl-name)'}}, namespace='urn:brocade.com:mgmt:brocade-vxlan-visibility', defining_module='brocade-vxlan-visibility', yang_type='list', is_config=True)""", }) self.__extended = t if hasattr(self, '_set'): self._set()
0.004061
def next_offsets(self): # type: (Descriptor) -> Offsets """Retrieve the next offsets :param Descriptor self: this :rtype: Offsets :return: download offsets """ resume_bytes = self._resume() with self._meta_lock: if self._chunk_num >= self._total_chunks: return None, resume_bytes if self._chunk_size == -1 and self._src_block_list is not None: num_bytes = self._src_block_list[self._chunk_num].size else: if self._offset + self._chunk_size > self._src_ase.size: num_bytes = self._src_ase.size - self._offset else: num_bytes = self._chunk_size chunk_num = self._chunk_num range_start = self._offset range_end = self._offset + num_bytes - 1 self._offset += num_bytes self._chunk_num += 1 return Offsets( chunk_num=chunk_num, num_bytes=num_bytes, range_start=range_start, range_end=range_end, ), resume_bytes
0.002593
def get_port_profile_status_output_port_profile_name(self, **kwargs): """Auto Generated Code """ config = ET.Element("config") get_port_profile_status = ET.Element("get_port_profile_status") config = get_port_profile_status output = ET.SubElement(get_port_profile_status, "output") port_profile = ET.SubElement(output, "port-profile") name = ET.SubElement(port_profile, "name") name.text = kwargs.pop('name') callback = kwargs.pop('callback', self._callback) return callback(config)
0.003503
def add(message, level=0, user=None, message_short=None,log_file_name='%s/pyscada_daemon.log' % settings.BASE_DIR,): """ add a new massage/error notice to the log <0 - Debug 1 - Emergency 2 - Critical 3 - Errors 4 - Alerts 5 - Warnings 6 - Notification (webnotice) 7 - Information (webinfo) 8 - Notification (notice) 9 - Information (info) """ #if not access(path.dirname(self.log_file_name), W_OK): # self.stderr.write("logfile path is not writeable\n") # sys.exit(0) #if access(self.log_file_name, F_OK) and not access(self.log_file_name, W_OK): # self.stderr.write("logfile is not writeable\n") # sys.exit(0) if message_short is None: message_len = len(message) if message_len > 35: message_short = message[0:31] + '...' else: message_short = message #log_ob = Log(message=message, level=level, message_short=message_short, timestamp=time()) #if user: # log_ob.user = user #log_ob.save() stdout = open(log_file_name, "a+") stdout.write("%s (%s,%d):%s\n" % (datetime.now().isoformat(' '),'none',level,message)) stdout.flush()
0.011657
def add_to_public_members(self, public_member): """ :calls: `PUT /orgs/:org/public_members/:user <http://developer.github.com/v3/orgs/members>`_ :param public_member: :class:`github.NamedUser.NamedUser` :rtype: None """ assert isinstance(public_member, github.NamedUser.NamedUser), public_member headers, data = self._requester.requestJsonAndCheck( "PUT", self.url + "/public_members/" + public_member._identity )
0.007984
def staged_predict_proba(self, test_data): """ Predict class probabilities at each stage of an H2O Model (only GBM models). The output structure is analogous to the output of function predict_leaf_node_assignment. For each tree t and class c there will be a column Tt.Cc (eg. T3.C1 for tree 3 and class 1). The value will be the corresponding predicted probability of this class by combining the raw contributions of trees T1.Cc,..,TtCc. Binomial models build the trees just for the first class and values in columns Tx.C1 thus correspond to the the probability p0. :param H2OFrame test_data: Data on which to make predictions. :returns: A new H2OFrame of staged predictions. """ if not isinstance(test_data, h2o.H2OFrame): raise ValueError("test_data must be an instance of H2OFrame") j = h2o.api("POST /3/Predictions/models/%s/frames/%s" % (self.model_id, test_data.frame_id), data={"predict_staged_proba": True}) return h2o.get_frame(j["predictions_frame"]["name"])
0.0092
def _update_physical_disk_details(raid_config, server): """Adds the physical disk details to the RAID configuration passed.""" raid_config['physical_disks'] = [] physical_drives = server.get_physical_drives() for physical_drive in physical_drives: physical_drive_dict = physical_drive.get_physical_drive_dict() raid_config['physical_disks'].append(physical_drive_dict)
0.0025
def _runAuction(self, gteeOfferPrice, gteeBidPrice, haveQ): """ Clears an auction to determine the quantity and price for each offer/bid. """ pOffers = [offer for offer in self.offers if not offer.reactive] pBids = [bid for bid in self.bids if not bid.reactive] # Clear offer/bid quantities and prices. auction = Auction(self.case, pOffers, pBids, self.auctionType, gteeOfferPrice, gteeBidPrice, self.limits) auction.run() # Separate auction for reactive power. if haveQ: qOffers = [offer for offer in self.offers if offer.reactive] qBids = [bid for bid in self.bids if bid.reactive] # Too complicated to scale with mixed bids/offers (only # auction_types LAO and FIRST_PRICE allowed) qAuction = Auction(self.case, qOffers, qBids, self.auctionType, gteeOfferPrice, gteeBidPrice, self.limits) qAuction.run()
0.002933
def find_django_migrations_module(module_name): """ Tries to locate <module_name>.migrations_django (without actually importing it). Appends either ".migrations_django" or ".migrations" to module_name. For details why: https://docs.djangoproject.com/en/1.7/topics/migrations/#libraries-third-party-apps """ import imp try: module_info = imp.find_module(module_name) module = imp.load_module(module_name, *module_info) imp.find_module('migrations_django', module.__path__) return module_name + '.migrations_django' except ImportError: return module_name + '.migrations'
0.003115
def get_content(self, force_download=False): """ Retrieve the content for this Document from the server :type force_download: Boolean :param force_download: True to download from the server regardless of the cache's contents :rtype: String :returns: the content data :raises: APIError if the API request is not successful """ return self.client.get_document(self.url(), force_download)
0.004292
def unzip_from_uri(uri, layer_zip_path, unzip_output_dir, progressbar_label): """ Download the LayerVersion Zip to the Layer Pkg Cache Parameters ---------- uri str Uri to download from layer_zip_path str Path to where the content from the uri should be downloaded to unzip_output_dir str Path to unzip the zip to progressbar_label str Label to use in the Progressbar """ try: get_request = requests.get(uri, stream=True, verify=os.environ.get('AWS_CA_BUNDLE', True)) with open(layer_zip_path, 'wb') as local_layer_file: file_length = int(get_request.headers['Content-length']) with progressbar(file_length, progressbar_label) as p_bar: # Set the chunk size to None. Since we are streaming the request, None will allow the data to be # read as it arrives in whatever size the chunks are received. for data in get_request.iter_content(chunk_size=None): local_layer_file.write(data) p_bar.update(len(data)) # Forcefully set the permissions to 700 on files and directories. This is to ensure the owner # of the files is the only one that can read, write, or execute the files. unzip(layer_zip_path, unzip_output_dir, permission=0o700) finally: # Remove the downloaded zip file path_to_layer = Path(layer_zip_path) if path_to_layer.exists(): path_to_layer.unlink()
0.00327
def run_main(): """run_main Get Splunk Service Token """ parser = argparse.ArgumentParser( description=( 'AntiNex - ' 'Get Token from Splunk')) parser.add_argument( '-u', help='username', required=False, dest='user') parser.add_argument( '-p', help='user password', required=False, dest='password') parser.add_argument( '-f', help='splunk-ready request in a json file', required=False, dest='datafile') parser.add_argument( '-a', help='host address: <fqdn:port>', required=False, dest='address') parser.add_argument( '-s', help='silent', required=False, dest='silent', action='store_true') args = parser.parse_args() user = ev( 'API_USER', 'user-not-set') password = ev( 'API_PASSWORD', 'password-not-set') address = ev( 'API_ADDRESS', 'localhost:8088') admin_address = ev( 'API_ADDRESS', 'localhost:8089') host = ev( 'API_HOST', 'localhost') port = int(ev( 'API_PORT', '8088')) verbose = bool(str(ev( 'API_VERBOSE', 'true')).lower() == 'true') datafile = None if args.user: user = args.user if args.password: password = args.password if args.address: address = args.address if args.datafile: datafile = args.datafile if args.silent: verbose = False usage = ('Please run with -u <username> ' '-p <password> ' '-a <host address as: fqdn:port> ') valid = True if not user or user == 'user-not-set': log.error('missing user') valid = False if not password or password == 'password-not-set': log.error('missing password') valid = False if not valid: log.error(usage) sys.exit(1) if verbose: log.info(( 'creating client user={} address={}').format( user, address)) last_msg = '' host = '' port = -1 try: last_msg = ( 'Invalid address={}').format( address) address_split = address.split(':') last_msg = ( 'Failed finding host in address={} ' '- please use: -a <fqdn:port>').format( address) host = address_split[0] last_msg = ( 'Failed finding integer port in address={} ' '- please use: -a <fqdn:port>').format( address) port = int(address_split[1]) except Exception as e: log.error(( 'Failed to parse -a {} for the ' 'splunk host address: {} which threw an ' 'ex={}').format( address, last_msg, e)) sys.exit(1) # end of try ex if verbose: log.info(( 'connecting {}@{}:{}').format( user, host, port)) req_body = None if datafile: if verbose: log.info(( 'loading request in datafile={}').format( datafile)) with open(datafile, 'r') as f: req_body = json.loads(f.read()) servercontent = requests.post( 'https://{}/services/auth/login'.format( admin_address), headers={}, verify=False, data={ 'username': user, 'password': password }) token = parseString( servercontent.text).getElementsByTagName( 'sessionKey')[0].childNodes[0].nodeValue log.info(( 'user={} has token={}').format( user, token)) if verbose: if req_body: log.info('starting with req_body') else: log.info('starting') if verbose: log.info('done')
0.000242
async def _fair_get_in_peer(self): """ Get the first available available inbound peer in a fair manner. :returns: A `Peer` inbox, whose inbox is guaranteed not to be empty (and thus can be read from without blocking). """ peer = None while not peer: await self._wait_peers() # This rotates the list, implementing fair-queuing. peers = list(self._in_peers) tasks = [asyncio.ensure_future(self._in_peers.wait_change())] tasks.extend([ asyncio.ensure_future( p.inbox.wait_not_empty(), loop=self.loop, ) for p in peers ]) try: done, pending = await asyncio.wait( tasks, return_when=asyncio.FIRST_COMPLETED, loop=self.loop, ) finally: for task in tasks: task.cancel() tasks.pop(0) # pop the wait_change task. peer = next( ( p for task, p in zip(tasks, peers) if task in done and not task.cancelled() ), None, ) return peer
0.001483
def _get_macd(df): """ Moving Average Convergence Divergence This function will initialize all following columns. MACD Line (macd): (12-day EMA - 26-day EMA) Signal Line (macds): 9-day EMA of MACD Line MACD Histogram (macdh): MACD Line - Signal Line :param df: data :return: None """ fast = df['close_12_ema'] slow = df['close_26_ema'] df['macd'] = fast - slow df['macds'] = df['macd_9_ema'] df['macdh'] = (df['macd'] - df['macds']) log.critical("NOTE: Behavior of MACDH calculation has changed as of " "July 2017 - it is now 1/2 of previous calculated values") del df['macd_9_ema'] del fast del slow
0.002567
def get_objective_admin_session_for_objective_bank(self, objective_bank_id): """Gets the ``OsidSession`` associated with the objective admin service for the given objective bank. arg: objective_bank_id (osid.id.Id): the ``Id`` of the objective bank return: (osid.learning.ObjectiveAdminSession) - ``an _objective_admin_session`` raise: NotFound - ``objective_bank_id`` not found raise: NullArgument - ``objective_bank_id`` is ``null`` raise: OperationFailed - ``unable to complete request`` raise: Unimplemented - ``supports_objective_admin()`` or ``supports_visible_federation()`` is ``false`` *compliance: optional -- This method must be implemented if ``supports_objective_admin()`` and ``supports_visible_federation()`` are ``true``.* """ if not self.supports_objective_admin(): raise errors.Unimplemented() ## # Also include check to see if the catalog Id is found otherwise raise errors.NotFound ## # pylint: disable=no-member return sessions.ObjectiveAdminSession(objective_bank_id, runtime=self._runtime)
0.004108
def pd_plot_data(self): """ Plot data for phase diagram. 2-comp - Full hull with energies 3/4-comp - Projection into 2D or 3D Gibbs triangle. Returns: (lines, stable_entries, unstable_entries): - lines is a list of list of coordinates for lines in the PD. - stable_entries is a {coordinate : entry} for each stable node in the phase diagram. (Each coordinate can only have one stable phase) - unstable_entries is a {entry: coordinates} for all unstable nodes in the phase diagram. """ pd = self._pd entries = pd.qhull_entries data = np.array(pd.qhull_data) lines = [] stable_entries = {} for line in self.lines: entry1 = entries[line[0]] entry2 = entries[line[1]] if self._dim < 3: x = [data[line[0]][0], data[line[1]][0]] y = [pd.get_form_energy_per_atom(entry1), pd.get_form_energy_per_atom(entry2)] coord = [x, y] elif self._dim == 3: coord = triangular_coord(data[line, 0:2]) else: coord = tet_coord(data[line, 0:3]) lines.append(coord) labelcoord = list(zip(*coord)) stable_entries[labelcoord[0]] = entry1 stable_entries[labelcoord[1]] = entry2 all_entries = pd.all_entries all_data = np.array(pd.all_entries_hulldata) unstable_entries = dict() stable = pd.stable_entries for i in range(0, len(all_entries)): entry = all_entries[i] if entry not in stable: if self._dim < 3: x = [all_data[i][0], all_data[i][0]] y = [pd.get_form_energy_per_atom(entry), pd.get_form_energy_per_atom(entry)] coord = [x, y] elif self._dim == 3: coord = triangular_coord([all_data[i, 0:2], all_data[i, 0:2]]) else: coord = tet_coord([all_data[i, 0:3], all_data[i, 0:3], all_data[i, 0:3]]) labelcoord = list(zip(*coord)) unstable_entries[entry] = labelcoord[0] return lines, stable_entries, unstable_entries
0.000819
def _read_interleaved(self, f, data_objects): """Read interleaved data that doesn't have a numpy type""" log.debug("Reading interleaved data point by point") object_data = {} points_added = {} for obj in data_objects: object_data[obj.path] = obj._new_segment_data() points_added[obj.path] = 0 while any([points_added[o.path] < o.number_values for o in data_objects]): for obj in data_objects: if points_added[obj.path] < obj.number_values: object_data[obj.path][points_added[obj.path]] = ( obj._read_value(f)) points_added[obj.path] += 1 for obj in data_objects: obj.tdms_object._update_data(object_data[obj.path])
0.002448
async def listWorkers(self, *args, **kwargs): """ Get a list of all active workers of a workerType Get a list of all active workers of a workerType. `listWorkers` allows a response to be filtered by quarantined and non quarantined workers. To filter the query, you should call the end-point with `quarantined` as a query-string option with a true or false value. The response is paged. If this end-point returns a `continuationToken`, you should call the end-point again with the `continuationToken` as a query-string option. By default this end-point will list up to 1000 workers in a single page. You may limit this with the query-string parameter `limit`. This method gives output: ``v1/list-workers-response.json#`` This method is ``experimental`` """ return await self._makeApiCall(self.funcinfo["listWorkers"], *args, **kwargs)
0.008412
def _get_json(self, path, params=None, base=JIRA_BASE_URL, ): """Get the json for a given path and params. :param path: The subpath required :type path: str :param params: Parameters to filter the json query. :type params: Optional[Dict[str, Any]] :param base: The Base JIRA URL, defaults to the instance base. :type base: Optional[str] :rtype: Union[Dict[str, Any], List[Dict[str, str]]] """ url = self._get_url(path, base) r = self._session.get(url, params=params) try: r_json = json_loads(r) except ValueError as e: logging.error("%s\n%s" % (e, r.text)) raise e return r_json
0.007491
def stream(self, opNames=[], *args, **kwargs): """ Yield specific operations (e.g. comments) only :param array opNames: List of operations to filter for :param int start: Start at this block :param int stop: Stop at this block :param str mode: We here have the choice between * "head": the last block * "irreversible": the block that is confirmed by 2/3 of all block producers and is thus irreversible! The dict output is formated such that ``type`` caries the operation type, timestamp and block_num are taken from the block the operation was stored in and the other key depend on the actualy operation. """ for op in self.ops(**kwargs): if not opNames or op["op"][0] in opNames: r = { "type": op["op"][0], "timestamp": op.get("timestamp"), "block_num": op.get("block_num"), } r.update(op["op"][1]) yield r
0.001787
def periodic_corr1d(sp_reference, sp_offset, fminmax=None, naround_zero=None, norm_spectra=False, plottitle=None, pdf=None, debugplot=0): """Periodic correlation between two spectra, implemented using FFT. Parameters ---------- sp_reference : numpy array Reference spectrum. sp_offset : numpy array Spectrum which offset is going to be measured relative to the reference spectrum. fminmax : tuple of floats or None Minimum and maximum frequencies to be used. If None, no frequency filtering is employed. naround_zero : int Half width of the window (around zero offset) to look for the correlation peak. If None, the whole correlation spectrum is employed. Otherwise, the peak will be sought in the interval [-naround_zero, +naround_zero]. norm_spectra : bool If True, the filtered spectra are normalized before computing the correlation function. This can be important when comparing the peak value of this function using different spectra. plottitle : str Optional plot title. pdf : PdfFile object or None If not None, output is sent to PDF file. debugplot : int Determines whether intermediate computations and/or plots are displayed. The valid codes are defined in numina.array.display.pause_debugplot. Returns ------- offset : float Offset between the two input spectra. fpeak : float Maximum of the cross-correlation function. """ # protections if sp_reference.ndim != 1 or sp_offset.ndim != 1: raise ValueError("Invalid array dimensions") if sp_reference.shape != sp_offset.shape: raise ValueError("x and y shapes are different") if plottitle is None: plottitle = ' ' naxis1 = len(sp_reference) xcorr = np.arange(naxis1) naxis1_half = int(naxis1 / 2) for i in range(naxis1_half): xcorr[i + naxis1_half] -= naxis1 isort = xcorr.argsort() xcorr = xcorr[isort] if fminmax is not None: fmin, fmax = fminmax sp_reference_filtmask = filtmask(sp_reference, fmin=fmin, fmax=fmax, debugplot=debugplot) sp_offset_filtmask = filtmask(sp_offset, fmin=fmin, fmax=fmax, debugplot=debugplot) if abs(debugplot) in (21, 22): from numina.array.display.matplotlib_qt import plt xdum = np.arange(naxis1) + 1 # reference spectrum ax = ximplotxy(xdum, sp_reference, show=False, title='reference spectrum', label='original spectrum') ax.plot(xdum, sp_reference_filtmask, label='filtered and masked spectrum') ax.legend() plt.show() # offset spectrum ax = ximplotxy(xdum, sp_offset, show=False, title='offset spectrum', label='original spectrum') ax.plot(xdum, sp_offset_filtmask, label='filtered and masked spectrum') ax.legend() plt.show() else: sp_reference_filtmask = sp_reference sp_offset_filtmask = sp_offset if (abs(debugplot) in (21, 22)) or (pdf is not None): xdum = np.arange(naxis1) + 1 ax = ximplotxy(xdum, sp_reference_filtmask, show=False, title=plottitle, label='reference spectrum') ax.plot(xdum, sp_offset_filtmask, label='offset spectrum') ax.legend() if pdf is not None: pdf.savefig() else: pause_debugplot(debugplot=debugplot, pltshow=True) # normalize spectra if required if norm_spectra: sp_reference_norm = np.copy(sp_reference_filtmask) sp_offset_norm = np.copy(sp_offset_filtmask) sp_dum = np.concatenate((sp_reference_norm, sp_offset_norm)) spmin = min(sp_dum) spmax = max(sp_dum) idum = np.where(sp_reference_norm > 0) sp_reference_norm[idum] /= spmax idum = np.where(sp_reference_norm < 0) sp_reference_norm[idum] /= -spmin idum = np.where(sp_offset_norm > 0) sp_offset_norm[idum] /= spmax idum = np.where(sp_offset_norm < 0) sp_offset_norm[idum] /= -spmin if (abs(debugplot) in (21, 22)) or (pdf is not None): xdum = np.arange(naxis1) + 1 ax = ximplotxy(xdum, sp_reference_norm, show=False, title=plottitle + ' [normalized]', label='reference spectrum') ax.plot(xdum, sp_offset_norm, label='offset spectrum') ax.legend() if pdf is not None: pdf.savefig() else: pause_debugplot(debugplot=debugplot, pltshow=True) else: sp_reference_norm = sp_reference_filtmask sp_offset_norm = sp_offset_filtmask corr = np.fft.ifft(np.fft.fft(sp_offset_norm) * np.fft.fft(sp_reference_norm).conj()).real corr = corr[isort] # determine correlation peak if naround_zero is None: iminpeak = 0 imaxpeak = naxis1 else: iminpeak = max(int(naxis1 / 2 - naround_zero), 0) imaxpeak = min(int(naxis1 / 2 + naround_zero), naxis1) ixpeak = corr[iminpeak:imaxpeak].argmax() + iminpeak # fit correlation peak with 2nd order polynomial nfit = 7 nmed = nfit // 2 imin = ixpeak - nmed imax = ixpeak + nmed lpeak_ok = True if imin < 0 or imax > len(corr): x_refined_peak = 0 y_refined_peak = 0 lpeak_ok = False poly_peak = Polynomial([0.0]) else: x_fit = np.arange(-nmed, nmed + 1, dtype=np.float) y_fit = corr[imin:(imax+1)] poly_peak = Polynomial.fit(x_fit, y_fit, 2) poly_peak = Polynomial.cast(poly_peak) coef = poly_peak.coef if coef[2] != 0: x_refined_peak = -coef[1] / (2.0 * coef[2]) else: x_refined_peak = 0.0 y_refined_peak = poly_peak(x_refined_peak) x_refined_peak += ixpeak offset = x_refined_peak - naxis1_half fpeak = y_refined_peak if (abs(debugplot) % 10 != 0) or (pdf is not None): ax = ximplotxy(xcorr, corr, xlabel='offset (pixels)', ylabel='cross-correlation function', title=plottitle, xlim=(-naxis1/2, naxis1/2), show=False) ax.axvline(offset, color='grey', linestyle='dashed') coffset = "(offset:{0:6.2f} pixels)".format(offset) ax.text(0.01, 0.99, coffset, horizontalalignment='left', verticalalignment='top', transform=ax.transAxes) if naround_zero is not None: cwindow = "(peak region: [{},{}] pixels)".format(-naround_zero, naround_zero) ax.text(0.01, 0.93, cwindow, horizontalalignment='left', verticalalignment='top', transform=ax.transAxes) # inset plot inset_ax = inset_axes( ax, width="40%", height="40%", loc=1 ) inset_ax.plot(xcorr, corr) if naround_zero is not None: inset_ax.set_xlim([-naround_zero, naround_zero]) else: inset_ax.set_xlim([-50, 50]) if lpeak_ok: xplot = np.arange(-nmed, nmed, 0.5) yplot = poly_peak(xplot) xplot += ixpeak - naxis1_half inset_ax.plot(xplot, yplot, '-') inset_ax.plot([x_refined_peak - naxis1_half], [y_refined_peak], 'o') inset_ax.axvline(offset, color='grey', linestyle='dashed') if pdf is not None: pdf.savefig() else: pause_debugplot(debugplot=debugplot, tight_layout=False, pltshow=True) return offset, fpeak
0.00012
def ed25519_public_key_from_string(string): """Create an ed25519 public key from ``string``, which is a seed. Args: string (str): the string to use as a seed. Returns: Ed25519PublicKey: the public key """ try: return Ed25519PublicKey.from_public_bytes( base64.b64decode(string) ) except (UnsupportedAlgorithm, Base64Error) as exc: raise ScriptWorkerEd25519Error("Can't create Ed25519PublicKey: {}!".format(str(exc)))
0.00404
def apply(query, collection=None): """Enhance the query restricting not permitted collections. Get the permitted restricted collection for the current user from the user_info object and all the restriced collections from the restricted_collection_cache. """ if not collection: return query result_tree = create_collection_query(collection) return AndOp(query, result_tree)
0.002421
def get(*dataset, **kwargs): ''' Displays properties for the given datasets. dataset : string name of snapshot(s), filesystem(s), or volume(s) properties : string comma-separated list of properties to list, defaults to all recursive : boolean recursively list children depth : int recursively list children to depth fields : string comma-separated list of fields to include, the name and property field will always be added type : string comma-separated list of types to display, where type is one of filesystem, snapshot, volume, bookmark, or all. source : string comma-separated list of sources to display. Must be one of the following: local, default, inherited, temporary, and none. The default value is all sources. parsable : boolean display numbers in parsable (exact) values (default = True) .. versionadded:: 2018.3.0 .. note:: If no datasets are specified, then the command displays properties for all datasets on the system. .. versionadded:: 2016.3.0 CLI Example: .. code-block:: bash salt '*' zfs.get salt '*' zfs.get myzpool/mydataset [recursive=True|False] salt '*' zfs.get myzpool/mydataset properties="sharenfs,mountpoint" [recursive=True|False] salt '*' zfs.get myzpool/mydataset myzpool/myotherdataset properties=available fields=value depth=1 ''' ## Configure command # NOTE: initialize the defaults flags = ['-H'] opts = {} # NOTE: set extra config from kwargs if kwargs.get('depth', False): opts['-d'] = kwargs.get('depth') elif kwargs.get('recursive', False): flags.append('-r') fields = kwargs.get('fields', 'value,source').split(',') if 'name' in fields: # ensure name is first fields.remove('name') if 'property' in fields: # ensure property is second fields.remove('property') fields.insert(0, 'name') fields.insert(1, 'property') opts['-o'] = ",".join(fields) if kwargs.get('type', False): opts['-t'] = kwargs.get('type') if kwargs.get('source', False): opts['-s'] = kwargs.get('source') # NOTE: set property_name property_name = kwargs.get('properties', 'all') ## Get properties res = __salt__['cmd.run_all']( __utils__['zfs.zfs_command']( command='get', flags=flags, opts=opts, property_name=property_name, target=list(dataset), ), python_shell=False, ) ret = __utils__['zfs.parse_command_result'](res) if res['retcode'] == 0: for ds in res['stdout'].splitlines(): ds_data = OrderedDict(list(zip( fields, ds.split("\t") ))) if 'value' in ds_data: if kwargs.get('parsable', True): ds_data['value'] = __utils__['zfs.from_auto']( ds_data['property'], ds_data['value'], ) else: ds_data['value'] = __utils__['zfs.to_auto']( ds_data['property'], ds_data['value'], convert_to_human=True, ) if ds_data['name'] not in ret: ret[ds_data['name']] = OrderedDict() ret[ds_data['name']][ds_data['property']] = ds_data del ds_data['name'] del ds_data['property'] return ret
0.002222
def save_results(self, output_dir='.', prefix='', prefix_sep='_', image_list=None): """ Write out any images generated by the meta-analysis. Args: output_dir (str): folder to write images to prefix (str): all image files will be prepended with this string prefix_sep (str): glue between the prefix and rest of filename image_list (list): optional list of images to save--e.g., ['pFgA_z', 'pAgF']. If image_list is None (default), will save all images. """ if prefix == '': prefix_sep = '' if not exists(output_dir): makedirs(output_dir) logger.debug("Saving results...") if image_list is None: image_list = self.images.keys() for suffix, img in self.images.items(): if suffix in image_list: filename = prefix + prefix_sep + suffix + '.nii.gz' outpath = join(output_dir, filename) imageutils.save_img(img, outpath, self.dataset.masker)
0.002732
def symorth(S): "Symmetric orthogonalization" E,U = np.linalg.eigh(S) n = len(E) Shalf = np.identity(n,'d') for i in range(n): Shalf[i,i] /= np.sqrt(E[i]) return simx(Shalf,U,True)
0.028302
def from_native(cls, t): """ Convert from a native Python `datetime.time` value. """ second = (1000000 * t.second + t.microsecond) / 1000000 return Time(t.hour, t.minute, second, t.tzinfo)
0.009091
def power_off(self, si, logger, session, vcenter_data_model, vm_uuid, resource_fullname): """ Power off of a vm :param vcenter_data_model: vcenter model :param si: Service Instance :param logger: :param session: :param vcenter_data_model: vcenter_data_model :param vm_uuid: the uuid of the vm :param resource_fullname: the full name of the deployed app resource :return: """ logger.info('retrieving vm by uuid: {0}'.format(vm_uuid)) vm = self.pv_service.find_by_uuid(si, vm_uuid) if vm.summary.runtime.powerState == 'poweredOff': logger.info('vm already powered off') task_result = 'Already powered off' else: # hard power off logger.info('{0} powering of vm'.format(vcenter_data_model.shutdown_method)) if vcenter_data_model.shutdown_method.lower() != 'soft': task = vm.PowerOff() task_result = self.synchronous_task_waiter.wait_for_task(task=task, logger=logger, action_name='Power Off') else: if vm.guest.toolsStatus == 'toolsNotInstalled': logger.warning('VMWare Tools status on virtual machine \'{0}\' are not installed'.format(vm.name)) raise ValueError('Cannot power off the vm softly because VMWare Tools are not installed') if vm.guest.toolsStatus == 'toolsNotRunning': logger.warning('VMWare Tools status on virtual machine \'{0}\' are not running'.format(vm.name)) raise ValueError('Cannot power off the vm softly because VMWare Tools are not running') vm.ShutdownGuest() task_result = 'vm powered off' return task_result
0.006101
def signal_optimiser(self, analytes, min_points=5, threshold_mode='kde_first_max', threshold_mult=1., x_bias=0, weights=None, filt=True, mode='minimise'): """ Optimise data selection based on specified analytes. Identifies the longest possible contiguous data region in the signal where the relative standard deviation (std) and concentration of all analytes is minimised. Optimisation is performed via a grid search of all possible contiguous data regions. For each region, the mean std and mean scaled analyte concentration ('amplitude') are calculated. The size and position of the optimal data region are identified using threshold std and amplitude values. Thresholds are derived from all calculated stds and amplitudes using the method specified by `threshold_mode`. For example, using the 'kde_max' method, a probability density function (PDF) is calculated for std and amplitude values, and the threshold is set as the maximum of the PDF. These thresholds are then used to identify the size and position of the longest contiguous region where the std is below the threshold, and the amplitude is either below the threshold. All possible regions of the data that have at least `min_points` are considered. For a graphical demonstration of the action of signal_optimiser, use `optimisation_plot`. Parameters ---------- d : latools.D object An latools data object. analytes : str or array-like Which analytes to consider. min_points : int The minimum number of contiguous points to consider. threshold_mode : str The method used to calculate the optimisation thresholds. Can be 'mean', 'median', 'kde_max' or 'bayes_mvs', or a custom function. If a function, must take a 1D array, and return a single, real number. weights : array-like of length len(analytes) An array of numbers specifying the importance of each analyte considered. Larger number makes the analyte have a greater effect on the optimisation. Default is None. """ params = locals() del(params['self']) setn = self.filt.maxset + 1 if isinstance(analytes, str): analytes = [analytes] # get filter if filt is not False: ind = (self.filt.grab_filt(filt, analytes)) else: ind = np.full(self.Time.shape, True) errmsg = [] ofilt = [] self.opt = {} for i in range(self.n): nind = ind & (self.ns == i + 1) self.opt[i + 1], err = signal_optimiser(self, analytes=analytes, min_points=min_points, threshold_mode=threshold_mode, threshold_mult=threshold_mult, weights=weights, ind=nind, x_bias=x_bias, mode=mode) if err == '': ofilt.append(self.opt[i + 1].filt) else: errmsg.append(self.sample + '_{:.0f}: '.format(i + 1) + err) if len(ofilt) > 0: ofilt = np.apply_along_axis(any, 0, ofilt) name = 'optimise_' + '_'.join(analytes) self.filt.add(name=name, filt=ofilt, info="Optimisation filter to minimise " + ', '.join(analytes), params=params, setn=setn) if len(errmsg) > 0: return '\n'.join(errmsg) else: return ''
0.005644
def users(self, params): """ This is a private API and requires whitelisting from Twitter. This endpoint will allow partners to add, update and remove users from a given tailored_audience_id. The endpoint will also accept multiple user identifier types per user as well. """ resource = self.RESOURCE_USERS.format(account_id=self.account.id, id=self.id) headers = {'Content-Type': 'application/json'} response = Request(self.account.client, 'post', resource, headers=headers, body=json.dumps(params)).perform() success_count = response.body['data']['success_count'] total_count = response.body['data']['total_count'] return (success_count, total_count)
0.005821
def convert(self, element): """Convert an element to a chainlink""" if isinstance(element, self.base_link_type): return element for converter in self.converters: link = converter(element) if link is not NotImplemented: return link raise TypeError('%r cannot be converted to a chainlink' % element)
0.005249
def counter(self, position=None, **kwargs): """ Args: position(int): Line number counting from the bottom of the screen kwargs(dict): Any additional :py:term:`keyword arguments<keyword argument>` are passed to :py:class:`Counter` Returns: :py:class:`Counter`: Instance of counter class Get a new progress bar instance If ``position`` is specified, the counter's position can change dynamically if additional counters are called without a ``position`` argument. """ for key, val in self.defaults.items(): if key not in kwargs: kwargs[key] = val kwargs['manager'] = self counter = self.counter_class(**kwargs) if position is None: toRefresh = [] if self.counters: pos = 2 for cter in reversed(self.counters): if self.counters[cter] < pos: toRefresh.append(cter) cter.clear(flush=False) self.counters[cter] = pos pos += 1 self.counters[counter] = 1 self._set_scroll_area() for cter in reversed(toRefresh): cter.refresh(flush=False) self.stream.flush() elif position in self.counters.values(): raise ValueError('Counter position %d is already occupied.' % position) elif position > self.height: raise ValueError('Counter position %d is greater than terminal height.' % position) else: self.counters[counter] = position return counter
0.003507
def get_distance( self, l_motor: float, r_motor: float, tm_diff: float ) -> typing.Tuple[float, float]: """ Given motor values and the amount of time elapsed since this was last called, retrieves the x,y,angle that the robot has moved. Pass these values to :meth:`PhysicsInterface.distance_drive`. To update your encoders, use the ``l_position`` and ``r_position`` attributes of this object. :param l_motor: Left motor value (-1 to 1); -1 is forward :param r_motor: Right motor value (-1 to 1); 1 is forward :param tm_diff: Elapsed time since last call to this function :returns: x travel, y travel, angle turned (radians) .. note:: If you are using more than 2 motors, it is assumed that all motors on each side are set to the same speed. Only pass in one of the values from each side """ # This isn't quite right, the right way is to use matrix math. However, # this is Good Enough for now... x = 0 y = 0 angle = 0 # split the time difference into timestep_ms steps total_time = int(tm_diff * 100000) steps = total_time // self._timestep remainder = total_time % self._timestep step = self._timestep / 100000.0 if remainder: last_step = remainder / 100000.0 steps += 1 else: last_step = step while steps != 0: if steps == 1: tm_diff = last_step else: tm_diff = step steps -= 1 l = self._lmotor.compute(-l_motor, tm_diff) r = self._rmotor.compute(r_motor, tm_diff) # Tank drive motion equations velocity = (l + r) * 0.5 # Thanks to Tyler Veness for fixing the rotation equation, via conservation # of angular momentum equations # -> omega = b * m * (l - r) / J rotation = self._bm * (l - r) / self._inertia distance = velocity * tm_diff turn = rotation * tm_diff x += distance * math.cos(angle) y += distance * math.sin(angle) angle += turn return x, y, angle
0.004203
def generate_local_url(self, js_name): """ Generate the local url for a js file. :param js_name: :return: """ host = self._settings['local_host'].format(**self._host_context).rstrip('/') return '{}/{}.js'.format(host, js_name)
0.010638
def copyTargets(self, arr): """ Copies the targets of the argument array into the self.target attribute. """ array = Numeric.array(arr) if not len(array) == self.size: raise LayerError('Mismatched target size and layer size in call to copyTargets()', \ (len(array), self.size)) # Removed this because both propagate and backprop (via compute_error) set targets #if self.verify and not self.targetSet == 0: # if not self.warningIssued: # print 'Warning! Targets have already been set and no intervening backprop() was called.', \ # (self.name, self.targetSet) # print "(Warning will not be issued again)" # self.warningIssued = 1 if Numeric.add.reduce(array < self.minTarget) or Numeric.add.reduce(array > self.maxTarget): print(self.name, self.minTarget, self.maxTarget) raise LayerError('Targets for this layer are out of range.', (self.name, array)) self.target = array self.targetSet = 1
0.009982
def users_profile_get(self, **kwargs) -> SlackResponse: """Retrieves a user's profile information.""" self._validate_xoxp_token() return self.api_call("users.profile.get", http_verb="GET", params=kwargs)
0.013216
def rgb_to_vector(image): """ Convert an RGB ANTsImage to a Vector ANTsImage Arguments --------- image : ANTsImage RGB image to be converted Returns ------- ANTsImage Example ------- >>> import ants >>> mni = ants.image_read(ants.get_data('mni')) >>> mni_rgb = mni.scalar_to_rgb() >>> mni_vector = mni.rgb_to_vector() >>> mni_rgb2 = mni.vector_to_rgb() """ if image.pixeltype != 'unsigned char': image = image.clone('unsigned char') idim = image.dimension libfn = utils.get_lib_fn('RgbToVector%i' % idim) new_ptr = libfn(image.pointer) new_img = iio.ANTsImage(pixeltype=image.pixeltype, dimension=image.dimension, components=3, pointer=new_ptr, is_rgb=False) return new_img
0.003708
def insert(self, val): """\ Inserts a value and returns a :class:`Pair <Pair>`. If the generated key exists or memcache cannot store it, a :class:`KeyInsertError <shorten.KeyInsertError>` is raised (or a :class:`TokenInsertError <shorten.TokenInsertError>` if a token exists or cannot be stored). """ key, token, formatted_key, formatted_token = self.next_formatted_pair() if self.has_key(key): raise KeyInsertError(key) if self.has_token(token): raise TokenInsertError(token) # Memcache is down or read-only if not self._mc.add(formatted_key, (val, token)): raise KeyInsertError(key, 'key could not be stored') if not self._mc.add(formatted_token, key): raise TokenInsertError(token, 'token could not be stored') return Pair(key, token)
0.020525
def combine_hex(data): ''' Combine list of integer values to one big integer ''' output = 0x00 for i, value in enumerate(reversed(data)): output |= (value << i * 8) return output
0.00495
def distribute_javaclasses(self, javaclass_dir, dest_dir="src"): '''Copy existing javaclasses from build dir to current dist dir.''' info('Copying java files') ensure_dir(dest_dir) for filename in glob.glob(javaclass_dir): shprint(sh.cp, '-a', filename, dest_dir)
0.006515
def scan(self, search_path=None): """Scan `search_path` for distributions usable in this environment Any distributions found are added to the environment. `search_path` should be a sequence of ``sys.path`` items. If not supplied, ``sys.path`` is used. Only distributions conforming to the platform/python version defined at initialization are added. """ if search_path is None: search_path = sys.path for item in search_path: for dist in find_distributions(item): self.add(dist)
0.003413
def remove(self, fieldspec): """ Removes fields or subfields according to `fieldspec`. If a non-control field subfield removal leaves no other subfields, delete the field entirely. """ pattern = r'(?P<field>[^.]+)(.(?P<subfield>[^.]+))?' match = re.match(pattern, fieldspec) if not match: return None grp = match.groupdict() for field in self.get_fields(grp['field']): if grp['subfield']: updated = [] for code, value in pairwise(field.subfields): if not code == grp['subfield']: updated += [code, value] # if we removed the last subfield entry, # remove the whole field, too if not updated: self.remove_field(field) else: field.subfields = updated else: # it is a control field self.remove_field(field)
0.001934
def mask_xdata_with_shape(self, shape: DataAndMetadata.ShapeType) -> DataAndMetadata.DataAndMetadata: """Return the mask created by this graphic as extended data. .. versionadded:: 1.0 Scriptable: Yes """ mask = self._graphic.get_mask(shape) return DataAndMetadata.DataAndMetadata.from_data(mask)
0.008671
def traverse_dict_and_list(data, key, default=None, delimiter=DEFAULT_TARGET_DELIM): ''' Traverse a dict or list using a colon-delimited (or otherwise delimited, using the 'delimiter' param) target string. The target 'foo:bar:0' will return data['foo']['bar'][0] if this value exists, and will otherwise return the dict in the default argument. Function will automatically determine the target type. The target 'foo:bar:0' will return data['foo']['bar'][0] if data like {'foo':{'bar':['baz']}} , if data like {'foo':{'bar':{'0':'baz'}}} then return data['foo']['bar']['0'] ''' ptr = data for each in key.split(delimiter): if isinstance(ptr, list): try: idx = int(each) except ValueError: embed_match = False # Index was not numeric, lets look at any embedded dicts for embedded in (x for x in ptr if isinstance(x, dict)): try: ptr = embedded[each] embed_match = True break except KeyError: pass if not embed_match: # No embedded dicts matched, return the default return default else: try: ptr = ptr[idx] except IndexError: return default else: try: ptr = ptr[each] except (KeyError, TypeError): return default return ptr
0.001244
def filter_query(key, expression): """Filter documents with a key that satisfies an expression.""" if (isinstance(expression, dict) and len(expression) == 1 and list(expression.keys())[0].startswith('$')): compiled_expression = compile_query(expression) elif callable(expression): def _filter(index, expression=expression): result = [store_key for value, store_keys in index.get_index().items() if expression(value) for store_key in store_keys] return result compiled_expression = _filter else: compiled_expression = expression def _get(query_function, key=key, expression=compiled_expression): """Get document key and check against expression.""" return query_function(key, expression) return _get
0.001129
def show_inventory(self, pretty=False): """ Satisfies the ``--list`` portion of ansible's external inventory API. Allows ``bang`` to be used as an external inventory script, for example when running ad-hoc ops tasks. For more details, see: http://ansible.cc/docs/api.html#external-inventory-scripts """ inv_lists = copy.deepcopy(self.groups_and_vars.lists) # sort the host lists to help consumers of the inventory (e.g. ansible # playbooks) for l in inv_lists.values(): l.sort() # new in ansible 1.3: add hostvars directly into ``--list`` output inv_lists['_meta'] = { 'hostvars': self.groups_and_vars.dicts.copy() } if pretty: kwargs = { 'sort_keys': True, 'indent': 2, 'separators': (',', ': '), } else: kwargs = {} print json.dumps(inv_lists, **kwargs)
0.002918
def filter(self, f: Callable[[A], B]) -> 'List[T]': """doufo.List.filter: filter this `List` obj with input `f` function Args: `self`: f (`Callable[[A],B]`): function that tells `True` or `False` Returns: return (`List[T]`): Filtered List Raises: """ return List([x for x in self.unbox() if f(x) is True])
0.01995
def get_default_classes(self): """Returns a list of the default classes for the tab. Defaults to and empty list (``[]``), however additional classes may be added depending on the state of the tab as follows: If the tab is the active tab for the tab group, in which the class ``"active"`` will be added. If the tab is not enabled, the classes the class ``"disabled"`` will be added. """ default_classes = super(Tab, self).get_default_classes() if self.is_active(): default_classes.extend(CSS_ACTIVE_TAB_CLASSES) if not self._enabled: default_classes.extend(CSS_DISABLED_TAB_CLASSES) return default_classes
0.002751
def _native_size(self): """ A (width, height) 2-tuple representing the native dimensions of the image in EMU, calculated based on the image DPI value, if present, assuming 72 dpi as a default. """ EMU_PER_INCH = 914400 horz_dpi, vert_dpi = self._dpi width_px, height_px = self._px_size width = EMU_PER_INCH * width_px / horz_dpi height = EMU_PER_INCH * height_px / vert_dpi return width, height
0.004132
def vgremove(vgname): ''' Remove an LVM volume group CLI Examples: .. code-block:: bash salt mymachine lvm.vgremove vgname salt mymachine lvm.vgremove vgname force=True ''' cmd = ['vgremove', '-f', vgname] out = __salt__['cmd.run'](cmd, python_shell=False) return out.strip()
0.003067
def _reg_sighandlers(self): """ Registers signal handlers to this class. """ # SIGCHLD, so we shutdown when any of the child processes exit _handler = lambda signo, frame: self.shutdown() signal.signal(signal.SIGCHLD, _handler) signal.signal(signal.SIGTERM, _handler)
0.009375
def bulk_update(cls, files, api=None): """ This call updates the details for multiple specified files. Use this call to set new information for the files, thus replacing all existing information and erasing omitted parameters. For each of the specified files, the call sets a new name, new tags and metadata. :param files: List of file instances. :param api: Api instance. :return: List of FileBulkRecord objects. """ if not files: raise SbgError('Files are required.') api = api or cls._API data = { 'items': [ { 'id': file_.id, 'name': file_.name, 'tags': file_.tags, 'metadata': file_.metadata, } for file_ in files ] } logger.info('Updating files in bulk.') response = api.post(url=cls._URL['bulk_update'], data=data) return FileBulkRecord.parse_records(response=response, api=api)
0.00184
def get_unique_constraints(self, connection, table_name, schema=None, **kw): """ Return information about unique constraints in `table_name`. Overrides interface :meth:`~sqlalchemy.engine.interfaces.Dialect.get_unique_constraints`. """ constraints = self._get_redshift_constraints(connection, table_name, schema, **kw) constraints = [c for c in constraints if c.contype == 'u'] uniques = defaultdict(lambda: defaultdict(dict)) for con in constraints: uniques[con.conname]["key"] = con.conkey uniques[con.conname]["cols"][con.attnum] = con.attname return [ {'name': None, 'column_names': [uc["cols"][i] for i in uc["key"]]} for name, uc in uniques.items() ]
0.003367
def longest_run(da, dim='time'): """Return the length of the longest consecutive run of True values. Parameters ---------- arr : N-dimensional array (boolean) Input array dim : Xarray dimension (default = 'time') Dimension along which to calculate consecutive run Returns ------- N-dimensional array (int) Length of longest run of True values along dimension """ d = rle(da, dim=dim) rl_long = d.max(dim=dim) return rl_long
0.001862
def split_page_artid(page_artid): """Split page_artid into page_start/end and artid.""" page_start = None page_end = None artid = None if not page_artid: return None, None, None # normalize unicode dashes page_artid = unidecode(six.text_type(page_artid)) if '-' in page_artid: # if it has a dash it's a page range page_range = page_artid.replace('--', '-').split('-') if len(page_range) == 2: page_start, page_end = page_range else: artid = page_artid elif _RE_2_CHARS.search(page_artid): # if it has 2 or more letters it's an article ID artid = page_artid elif len(_RE_CHAR.sub('', page_artid)) >= 5: # if there are more than 5 digits it's an article ID artid = page_artid else: if artid is None: artid = page_artid if page_start is None: page_start = page_artid return page_start, page_end, artid
0.001014
def _results(self, scheduler_instance_id): """Get the results of the executed actions for the scheduler which instance id is provided Calling this method for daemons that are not configured as passive do not make sense. Indeed, this service should only be exposed on poller and reactionner daemons. :param scheduler_instance_id: instance id of the scheduler :type scheduler_instance_id: string :return: serialized list :rtype: str """ with self.app.lock: res = self.app.get_results_from_passive(scheduler_instance_id) return serialize(res, True)
0.007825
def init_app(state): """ Prepare the Flask application for Flask-Split. :param state: :class:`BlueprintSetupState` instance """ app = state.app app.config.setdefault('SPLIT_ALLOW_MULTIPLE_EXPERIMENTS', False) app.config.setdefault('SPLIT_DB_FAILOVER', False) app.config.setdefault('SPLIT_IGNORE_IP_ADDRESSES', []) app.config.setdefault('SPLIT_ROBOT_REGEX', r""" (?i)\b( Baidu| Gigabot| Googlebot| libwww-perl| lwp-trivial| msnbot| SiteUptime| Slurp| WordPress| ZIBB| ZyBorg )\b """) app.jinja_env.globals.update({ 'ab_test': ab_test, 'finished': finished }) @app.template_filter() def percentage(number): number *= 100 if abs(number) < 10: return "%.1f%%" % round(number, 1) else: return "%d%%" % round(number)
0.001018
def filter_for_probability(key: str, population: Union[pd.DataFrame, pd.Series, Index], probability: Array, index_map: IndexMap=None) -> Union[pd.DataFrame, pd.Series, Index]: """Decide an event outcome for each individual in a population from probabilities. Given a population or its index and an array of associated probabilities for some event to happen, we create and return the sub-population for whom the event occurred. Parameters ---------- key : A string used to create a seed for the random number generation. population : A view on the simulants for which we are determining the outcome of an event. probability : A 1d list of probabilities of the event under consideration occurring which corresponds (i.e. `len(population) == len(probability)`) to the population array passed in. index_map : A mapping between the provided index (which may contain ints, floats, datetimes or any arbitrary combination of them) and an integer index into the random number array. Returns ------- pd.core.generic.PandasObject The sub-population of the simulants for whom the event occurred. The return type will be the same as type(population) """ if population.empty: return population index = population if isinstance(population, pd.Index) else population.index draw = random(key, index, index_map) mask = np.array(draw < probability) return population[mask]
0.005141
def resolve_extensions(bot: commands.Bot, name: str) -> list: """ Tries to resolve extension queries into a list of extension names. """ if name.endswith('.*'): module_parts = name[:-2].split('.') path = pathlib.Path(module_parts.pop(0)) for part in module_parts: path = path / part return find_extensions_in(path) if name == '~': return list(bot.extensions.keys()) return [name]
0.002174
def get_plugin_client_settings(self): settings = {} user_path = self.get_plugin_settings_path("User") def_path = self.get_plugin_settings_path("MavensMate") ''' if the default path for settings is none, we're either dealing with a bad client setup or a new client like Atom.io. Let's load the settings from the default cache and optionally allow them to pipe settings in via STDIN ''' if def_path == None: if 'ATOM' in self.plugin_client: file_name = 'atom' elif 'SUBLIME_TEXT' in self.plugin_client: file_name = 'st3' elif 'BRACKETS' in self.plugin_client: file_name = 'brackets' settings['default'] = util.parse_json_from_file(config.base_path + "/"+config.support_dir+"/config/"+file_name+".json") if config.plugin_client_settings != None: settings['user'] = config.plugin_client_settings else: workspace = self.params.get('workspace', None) if self.project_name != None and workspace != None: try: settings['project'] = util.parse_json_from_file(os.path.join(workspace,self.project_name,self.project_name+'.sublime-settings')) except: debug('Project settings could not be loaded') if not user_path == None: try: settings['user'] = util.parse_json_from_file(user_path) except: debug('User settings could not be loaded') if not def_path == None: try: settings['default'] = util.parse_json_from_file(def_path) except: raise MMException('Could not load default MavensMate settings.') if settings == {}: raise MMException('Could not load MavensMate settings. Please ensure they contain valid JSON') return settings
0.009881
def validate(self, value): """ Validate the I{value} is of the correct class. @param value: The value to validate. @type value: any @raise AttributeError: When I{value} is invalid. """ if value is None: return if len(self.classes) and not isinstance(value, self.classes): msg = '"%s" must be: %s' % (self.name, self.classes) raise AttributeError(msg)
0.004444
def random_markov_chain(n, k=None, sparse=False, random_state=None): """ Return a randomly sampled MarkovChain instance with n states, where each state has k states with positive transition probability. Parameters ---------- n : scalar(int) Number of states. k : scalar(int), optional(default=None) Number of states that may be reached from each state with positive probability. Set to n if not specified. sparse : bool, optional(default=False) Whether to store the transition probability matrix in sparse matrix form. random_state : int or np.random.RandomState, optional Random seed (integer) or np.random.RandomState instance to set the initial state of the random number generator for reproducibility. If None, a randomly initialized RandomState is used. Returns ------- mc : MarkovChain Examples -------- >>> mc = qe.markov.random_markov_chain(3, random_state=1234) >>> mc.P array([[ 0.19151945, 0.43058932, 0.37789123], [ 0.43772774, 0.34763084, 0.21464142], [ 0.27259261, 0.5073832 , 0.22002419]]) >>> mc = qe.markov.random_markov_chain(3, k=2, random_state=1234) >>> mc.P array([[ 0.19151945, 0.80848055, 0. ], [ 0. , 0.62210877, 0.37789123], [ 0.56227226, 0. , 0.43772774]]) """ P = random_stochastic_matrix(n, k, sparse, format='csr', random_state=random_state) mc = MarkovChain(P) return mc
0.000631
def _validate_params(self): """ method to sanitize model parameters Parameters --------- None Returns ------- None """ self.distribution = NormalDist(scale=self.scale) super(LinearGAM, self)._validate_params()
0.006689
def build_overviews(source_file, factors=None, minsize=256, external=False, blocksize=256, interleave='pixel', compress='lzw', resampling=Resampling.gauss, **kwargs): """Build overviews at one or more decimation factors for all bands of the dataset. Parameters ------------ source_file : str, file object or pathlib.Path object Source file. factors : list, optional A list of integral overview levels to build. minsize : int, optional Maximum width or height of the smallest overview level. Only taken into account if explicit factors are not specified. Defaults to `256`. external : bool, optional Can be set to `True` to force external overviews in the GeoTIFF (.ovr) format. Default is False. blocksize : int, optional The block size (tile width and height) used for overviews. Should be a power-of-two value between 64 and 4096. Default value is `256`. interleave : str, optional Interleaving. Default value is `pixel`. compress : str, optional Set the compression to use. Default is `lzw`. resampling : rasterio.enums.Resampling Resampling method. Default is `gauss`. kwargs : optional Additional arguments passed to rasterio.Env. Returns --------- out: None Original file is altered or external .ovr can be created. """ with rasterio.open(source_file, 'r+') as dst: if factors is None: factors = _calc_overviews_factors( SimpleNamespace(width=dst.width, height=dst.height), minsize) with rasterio.Env( GDAL_TIFF_OVR_BLOCKSIZE=blocksize, INTERLEAVE_OVERVIEW=interleave, COMPRESS_OVERVIEW=compress, TIFF_USE_OVR=external, **kwargs ): dst.build_overviews(factors, resampling)
0.001561
def add_layer2image(grid2d, x_pos, y_pos, kernel, order=1): """ adds a kernel on the grid2d image at position x_pos, y_pos with an interpolated subgrid pixel shift of order=order :param grid2d: 2d pixel grid (i.e. image) :param x_pos: x-position center (pixel coordinate) of the layer to be added :param y_pos: y-position center (pixel coordinate) of the layer to be added :param kernel: the layer to be added to the image :param order: interpolation order for sub-pixel shift of the kernel to be added :return: image with added layer, cut to original size """ x_int = int(round(x_pos)) y_int = int(round(y_pos)) shift_x = x_int - x_pos shift_y = y_int - y_pos kernel_shifted = interp.shift(kernel, [-shift_y, -shift_x], order=order) return add_layer2image_int(grid2d, x_int, y_int, kernel_shifted)
0.003492
def blueprint(self): """The name of the current blueprint""" if self.url_rule and '.' in self.url_rule.endpoint: return self.url_rule.endpoint.rsplit('.', 1)[0]
0.010638
def to_dict(self): """Create a dict representation of this exception. :return: The dictionary representation. :rtype: dict """ rv = dict(self.payload or ()) rv["message"] = self.message return rv
0.007905
def _parse_east_asian(fname, properties=(u'W', u'F',)): """Parse unicode east-asian width tables.""" version, date, values = None, None, [] print("parsing {} ..".format(fname)) for line in open(fname, 'rb'): uline = line.decode('utf-8') if version is None: version = uline.split(None, 1)[1].rstrip() continue elif date is None: date = uline.split(':', 1)[1].rstrip() continue if uline.startswith('#') or not uline.lstrip(): continue addrs, details = uline.split(';', 1) if any(details.startswith(property) for property in properties): start, stop = addrs, addrs if '..' in addrs: start, stop = addrs.split('..') values.extend(range(int(start, 16), int(stop, 16) + 1)) return version, date, sorted(values)
0.002035
def is_address_in_network(ip, net): """Is an address in a network""" # http://stackoverflow.com/questions/819355/how-can-i-check-if-an-ip-is-in-a-network-in-python import socket import struct ipaddr = struct.unpack('=L', socket.inet_aton(ip))[0] netaddr, bits = net.split('/') if int(bits) == 0: return True net = struct.unpack('=L', socket.inet_aton(netaddr))[0] mask = ((2L << int(bits) - 1) - 1) return (ipaddr & mask) == (net & mask)
0.002053
def register_socket(self, socket): """Registers the given socket(s) for further use. :param Socket|list[Socket] socket: Socket type object. See ``.sockets``. """ sockets = self._sockets for socket in listify(socket): uses_shared = isinstance(socket.address, SocketShared) if uses_shared: # Handling shared sockets involves socket index resolution. shared_socket = socket.address # type: SocketShared if shared_socket not in sockets: self.register_socket(shared_socket) socket.address = self._get_shared_socket_idx(shared_socket) socket.address = self._section.replace_placeholders(socket.address) self._set(socket.name, socket, multi=True) socket._contribute_to_opts(self) bound_workers = socket.bound_workers if bound_workers: self._set( 'map-socket', '%s:%s' % (len(sockets), ','.join(map(str, bound_workers))), multi=True) if not uses_shared: sockets.append(socket) return self._section
0.003322
def _drain(writer, ion_event): """Drain the writer of its pending write events. Args: writer (Coroutine): A writer co-routine. ion_event (amazon.ion.core.IonEvent): The first event to apply to the writer. Yields: DataEvent: Yields each pending data event. """ result_event = _WRITE_EVENT_HAS_PENDING_EMPTY while result_event.type is WriteEventType.HAS_PENDING: result_event = writer.send(ion_event) ion_event = None yield result_event
0.003937
def mag_calibration(self): """Perform magnetometer calibration for current IMU.""" self.calibration_state = self.CAL_MAG self.mag_dialog = SK8MagDialog(self.sk8.get_imu(self.spinIMU.value()), self) if self.mag_dialog.exec_() == QDialog.Rejected: return self.calculate_mag_calibration(self.mag_dialog.samples)
0.00831
def parse_masked_phone_number(html, parser=None): """Get masked phone number from security check html :param html: str: raw html text :param parser: bs4.BeautifulSoup: html parser :return: tuple of phone prefix and suffix, for example: ('+1234', '89') :rtype : tuple """ if parser is None: parser = bs4.BeautifulSoup(html, 'html.parser') fields = parser.find_all('span', {'class': 'field_prefix'}) if not fields: raise VkParseError( 'No <span class="field_prefix">...</span> in the \n%s' % html) result = [] for f in fields: value = f.get_text().replace(six.u('\xa0'), '') result.append(value) return tuple(result)
0.00141
def split_string(x: str, n: int) -> List[str]: """ Split string into chunks of length n """ # https://stackoverflow.com/questions/9475241/split-string-every-nth-character # noqa return [x[i:i+n] for i in range(0, len(x), n)]
0.004082
def get_nodes_with_recipe(recipe_name, environment=None): """Get all nodes which include a given recipe, prefix-searches are also supported """ prefix_search = recipe_name.endswith("*") if prefix_search: recipe_name = recipe_name.rstrip("*") for n in get_nodes(environment): recipes = get_recipes_in_node(n) for role in get_roles_in_node(n, recursive=True): recipes.extend(get_recipes_in_role(role)) if prefix_search: if any(recipe.startswith(recipe_name) for recipe in recipes): yield n else: if recipe_name in recipes: yield n
0.001513
def append(self, child, *args, **kwargs): """See :meth:`AbstractElement.append`""" #if no set is associated with the layer yet, we learn it from span annotation elements that are added if self.set is False or self.set is None: if inspect.isclass(child): if issubclass(child,AbstractSpanAnnotation): if 'set' in kwargs: self.set = kwargs['set'] elif isinstance(child, AbstractSpanAnnotation): if child.set: self.set = child.set elif isinstance(child, Correction): #descend into corrections to find the proper set for this layer (derived from span annotation elements) for e in itertools.chain( child.new(), child.original(), child.suggestions() ): if isinstance(e, AbstractSpanAnnotation) and e.set: self.set = e.set break return super(AbstractAnnotationLayer, self).append(child, *args, **kwargs)
0.010368
def lambda_handler(event, context): '''A Python AWS Lambda function to process aggregated records sent to KinesisAnalytics.''' raw_kpl_records = event['records'] output = [process_kpl_record(kpl_record) for kpl_record in raw_kpl_records] # Print number of successful and failed records. success_count = sum(1 for record in output if record['result'] == 'Ok') failure_count = sum(1 for record in output if record['result'] == 'ProcessingFailed') print('Processing completed. Successful records: {0}, Failed records: {1}.'.format(success_count, failure_count)) return {'records': output}
0.006452
def parse_bookmark_data (data): """Parse data string. Return iterator for bookmarks of the form (url, name). Bookmarks are not sorted. """ for url, name in parse_bookmark_json(json.loads(data)): yield url, name
0.008403
def fixup_ins_del_tags(html): """ Given an html string, move any <ins> or <del> tags inside of any block-level elements, e.g. transform <ins><p>word</p></ins> to <p><ins>word</ins></p> """ doc = parse_html(html, cleanup=False) _fixup_ins_del_tags(doc) html = serialize_html_fragment(doc, skip_outer=True) return html
0.002907
def create_button_label(icon, font_size=constants.FONT_SIZE_NORMAL): """Create a button label with a chosen icon. :param icon: The icon :param font_size: The size of the icon :return: The created label """ label = Gtk.Label() set_label_markup(label, '&#x' + icon + ';', constants.ICON_FONT, font_size) label.show() return label
0.002747
def is_bbox_not_intersecting(self, other): """Returns False iif bounding boxed of self and other intersect""" self_x_min, self_x_max, self_y_min, self_y_max = self.get_bbox() other_x_min, other_x_max, other_y_min, other_y_max = other.get_bbox() return \ self_x_min > other_x_max or \ other_x_min > self_x_max or \ self_y_min > other_y_max or \ other_y_min > self_y_max
0.004444