text
stringlengths
78
104k
score
float64
0
0.18
def alter_1(self, given_container_name, container_name, meta, val): """Get the container_name of the container if a container is specified""" meta.container = None if not isinstance(container_name, six.string_types): meta.container = container_name container_name = container_name.container_name return container_name
0.008043
def closer_than(self, mesh, radius): """ Check for proximity of points in the ``mesh``. :param mesh: :class:`openquake.hazardlib.geo.mesh.Mesh` instance. :param radius: Proximity measure in km. :returns: Numpy array of boolean values in the same shape as the mesh coordinate arrays with ``True`` on indexes of points that are not further than ``radius`` km from this point. Function :func:`~openquake.hazardlib.geo.geodetic.distance` is used to calculate distances to points of the mesh. Points of the mesh that lie exactly ``radius`` km away from this point also have ``True`` in their indices. """ dists = geodetic.distance(self.longitude, self.latitude, self.depth, mesh.lons, mesh.lats, 0 if mesh.depths is None else mesh.depths) return dists <= radius
0.002006
def shift(func, *args, **kwargs): """This function is basically a beefed up lambda x: func(x, *args, **kwargs) :func:`shift` comes in handy when it is used in a pipeline with a function that needs the passed value as its first argument. :param func: a function :param args: objects :param kwargs: keywords >>> def div(x, y): return float(x) / y This is equivalent to div(42, 2):: >>> shift(div, 2)(42) 21.0 which is different from div(2, 42):: >>> from functools import partial >>> partial(div, 2)(42) 0.047619047619047616 """ @wraps(func) def wrapped(x): return func(x, *args, **kwargs) return wrapped
0.004225
def _child(self, path): """ Return a ConfigNode object representing a child node with the specified relative path. """ if self._path: path = '{}.{}'.format(self._path, path) return ConfigNode(root=self._root, path=path)
0.007168
def listeners_iter(self): """Return an iterator over the mapping of event => listeners bound. The listener list(s) returned should **not** be mutated. NOTE(harlowja): Each listener in the yielded (event, listeners) tuple is an instance of the :py:class:`~.Listener` type, which itself wraps a provided callback (and its details filter callback, if any). """ topics = set(six.iterkeys(self._topics)) while topics: event_type = topics.pop() try: yield event_type, self._topics[event_type] except KeyError: pass
0.003072
def get_provider_token(self, provider_secret): """ 获取服务商凭证 https://work.weixin.qq.com/api/doc#90001/90143/91200 :param provider_secret: 服务商的secret,在服务商管理后台可见 :return: 返回的 JSON 数据包 """ return self._post( 'service/get_provider_token', data={ 'corpid': self._client.corp_id, 'provider_secret': provider_secret, } )
0.004494
def reduce_stack(array3D, z_function): """Return 2D array projection of the input 3D array. The input function is applied to each line of an input x, y value. :param array3D: 3D numpy.array :param z_function: function to use for the projection (e.g. :func:`max`) """ xmax, ymax, _ = array3D.shape projection = np.zeros((xmax, ymax), dtype=array3D.dtype) for x in range(xmax): for y in range(ymax): projection[x, y] = z_function(array3D[x, y, :]) return projection
0.001919
def padded_cross_entropy(logits, labels, label_smoothing, weights_fn=weights_nonzero, reduce_sum=True, cutoff=0.0, gaussian=False): """Compute cross-entropy assuming 0s are padding. Computes a loss numerator (the sum of losses), and loss denominator (the number of non-padding tokens). Args: logits: a `Tensor` with shape `[batch, timesteps, vocab_size]`. optionally a FactoredTensor. labels: an integer `Tensor` with shape `[batch, timesteps]`. label_smoothing: a floating point `Scalar`. weights_fn: A function from labels to weights. reduce_sum: a Boolean, whether to sum at the end or not. cutoff: a float, at which point to have no loss. gaussian: If true, use a Gaussian distribution for label smoothing Returns: loss_numerator: a `Scalar`. Sum of losses. loss_denominator: a `Scalar. The number of non-padding target tokens. Raises: ValueError: in case of unsupported argument types. """ if isinstance(logits, FactoredTensor): if gaussian: raise ValueError("Factored padded cross entropy with Gaussian smoothing " "is not implemented yet.") return padded_cross_entropy_factored( logits, labels, label_smoothing, weights_fn=weights_fn, reduce_sum=reduce_sum) confidence = 1.0 - label_smoothing logits_shape = shape_list(logits) vocab_size = logits_shape[-1] with tf.name_scope("padded_cross_entropy", values=[logits, labels]): if len(logits_shape) == 2: # Deal with the case where we did not insert extra dimensions due to # TPU issues. No pad-to-same-length happens in this case. # TODO(noam): remove this logic once TPU can handle extra dimensions. labels = tf.reshape(labels, [-1]) else: logits, labels = pad_with_zeros(logits, labels) logits = tf.reshape( logits, shape_list(labels) + [vocab_size], name="padded_cross_entropy_size_check") logits = tf.cast(logits, tf.float32) xent = smoothing_cross_entropy( logits, labels, vocab_size, confidence, gaussian=gaussian) weights = weights_fn(labels) if cutoff > 0.0: xent = tf.nn.relu(xent - cutoff) if not reduce_sum: return xent * weights, weights return tf.reduce_sum(xent * weights), tf.reduce_sum(weights)
0.006048
def cpu_percent(interval=0.1, percpu=False): """Return a float representing the current system-wide CPU utilization as a percentage. When interval is > 0.0 compares system CPU times elapsed before and after the interval (blocking). When interval is 0.0 or None compares system CPU times elapsed since last call or module import, returning immediately. In this case is recommended for accuracy that this function be called with at least 0.1 seconds between calls. When percpu is True returns a list of floats representing the utilization as a percentage for each CPU. First element of the list refers to first CPU, second element to second CPU and so on. The order of the list is consistent across calls. """ global _last_cpu_times global _last_per_cpu_times blocking = interval is not None and interval > 0.0 def calculate(t1, t2): t1_all = sum(t1) t1_busy = t1_all - t1.idle t2_all = sum(t2) t2_busy = t2_all - t2.idle # this usually indicates a float precision issue if t2_busy <= t1_busy: return 0.0 busy_delta = t2_busy - t1_busy all_delta = t2_all - t1_all busy_perc = (busy_delta / all_delta) * 100 return round(busy_perc, 1) # system-wide usage if not percpu: if blocking: t1 = cpu_times() time.sleep(interval) else: t1 = _last_cpu_times _last_cpu_times = cpu_times() return calculate(t1, _last_cpu_times) # per-cpu usage else: ret = [] if blocking: tot1 = cpu_times(percpu=True) time.sleep(interval) else: tot1 = _last_per_cpu_times _last_per_cpu_times = cpu_times(percpu=True) for t1, t2 in zip(tot1, _last_per_cpu_times): ret.append(calculate(t1, t2)) return ret
0.00052
def w(self): """Extract write lock (w) counter if available (lazy).""" if not self._counters_calculated: self._counters_calculated = True self._extract_counters() return self._w
0.00885
def create_qrcode(self, data): """ 创建二维码 详情请参考 http://mp.weixin.qq.com/wiki/18/28fc21e7ed87bec960651f0ce873ef8a.html :param data: 你要发送的参数 dict :return: 返回的 JSON 数据包 """ data = self._transcoding_dict(data) return self.request.post( url='https://api.weixin.qq.com/cgi-bin/qrcode/create', data=data )
0.007634
def _zip_with_scalars(args): """Zips across args in order and replaces non-iterables with repeats.""" zipped = [] for arg in args: if isinstance(arg, prettytensor.PrettyTensor): zipped.append(arg if arg.is_sequence() else itertools.repeat(arg)) elif (isinstance(arg, collections.Sequence) and not isinstance(arg, tf.compat.bytes_or_text_types)): zipped.append(arg) else: zipped.append(itertools.repeat(arg)) assert len(args) == len(zipped) return zip(*zipped)
0.017682
def add_fast(self, filepath, hashfn=None, force=False): """ Bespoke function to add filepaths but set shortcircuit to True, which means only the first calculable hash will be stored. In this way only one "fast" hashing function need be called for each filepath. """ if hashfn is None: hashfn = fast_hashes self.add(filepath, hashfn, force, shortcircuit=True)
0.004695
def Run(self, unused_arg): """Run the kill.""" # Send a message back to the service to say that we are about to shutdown. reply = rdf_flows.GrrStatus(status=rdf_flows.GrrStatus.ReturnedStatus.OK) # Queue up the response message, jump the queue. self.SendReply(reply, message_type=rdf_flows.GrrMessage.Type.STATUS) # Give the http thread some time to send the reply. self.grr_worker.Sleep(10) # Die ourselves. logging.info("Dying on request.") os._exit(242)
0.002008
def p_network_sentence(self, t): """network_sentence : NETWORK VAR | NETWORK VAR LPAREN features RPAREN""" if len(t) == 3: t[0] = network(t[2], reference=True, line=t.lineno(1)) else: t[0] = network(t[2], t[4], line=t.lineno(1))
0.006557
def attach(self, host=None, source=None, sourcetype=None): """Opens a stream (a writable socket) for writing events to the index. :param host: The host value for events written to the stream. :type host: ``string`` :param source: The source value for events written to the stream. :type source: ``string`` :param sourcetype: The sourcetype value for events written to the stream. :type sourcetype: ``string`` :return: A writable socket. """ args = { 'index': self.name } if host is not None: args['host'] = host if source is not None: args['source'] = source if sourcetype is not None: args['sourcetype'] = sourcetype path = UrlEncoded(PATH_RECEIVERS_STREAM + "?" + urllib.parse.urlencode(args), skip_encode=True) cookie_or_auth_header = "Authorization: Splunk %s\r\n" % \ (self.service.token if self.service.token is _NoAuthenticationToken else self.service.token.replace("Splunk ", "")) # If we have cookie(s), use them instead of "Authorization: ..." if self.service.has_cookies(): cookie_or_auth_header = "Cookie: %s\r\n" % _make_cookie_header(self.service.get_cookies().items()) # Since we need to stream to the index connection, we have to keep # the connection open and use the Splunk extension headers to note # the input mode sock = self.service.connect() headers = [("POST %s HTTP/1.1\r\n" % str(self.service._abspath(path))).encode('utf-8'), ("Host: %s:%s\r\n" % (self.service.host, int(self.service.port))).encode('utf-8'), b"Accept-Encoding: identity\r\n", cookie_or_auth_header.encode('utf-8'), b"X-Splunk-Input-Mode: Streaming\r\n", b"\r\n"] for h in headers: sock.write(h) return sock
0.00652
def loop(self): """ Enter loop, read user input then run command. Repeat """ while True: text = compat.input('ctl > ') command, args = self.parse_input(text) if not command: continue response = self.call(command, *args) response.show()
0.006098
def extract(input, output): """Extract public key from private key. Given INPUT a private paillier key file as generated by generate, extract the public key portion to OUTPUT. Use "-" to output to stdout. """ log("Loading paillier keypair") priv = json.load(input) error_msg = "Invalid private key" assert 'pub' in priv, error_msg assert priv['kty'] == 'DAJ', error_msg json.dump(priv['pub'], output) output.write('\n') log("Public key written to {}".format(output.name))
0.00381
def settings(): """ Fetch the middleware settings. :return dict: settings """ # Get the user-provided settings user_settings = dict(getattr(django_settings, _settings_key, {})) user_settings_keys = set(user_settings.keys()) # Check for required but missing settings missing = _required_settings_keys - user_settings_keys if missing: raise AuthzConfigurationError( 'Missing required {} config: {}'.format(_settings_key, missing)) # Check for unknown settings unknown = user_settings_keys - _available_settings_keys if unknown: raise AuthzConfigurationError( 'Unknown {} config params: {}'.format(_settings_key, unknown)) # Merge defaults with provided settings defaults = _available_settings_keys - user_settings_keys user_settings.update({key: _available_settings[key] for key in defaults}) _rectify(user_settings) return types.MappingProxyType(user_settings)
0.001032
def generate_batches(sequence, batch_len=1, allow_partial=True, ignore_errors=True, verbosity=1): """Iterate through a sequence (or generator) in batches of length `batch_len` http://stackoverflow.com/a/761125/623735 >>> [batch for batch in generate_batches(range(7), 3)] [[0, 1, 2], [3, 4, 5], [6]] """ it = iter(sequence) last_value = False # An exception will be thrown by `.next()` here and caught in the loop that called this iterator/generator while not last_value: batch = [] for n in range(batch_len): try: batch += (next(it),) except StopIteration: last_value = True if batch: break else: raise StopIteration except Exception: # 'Error: new-line character seen in unquoted field - # do you need to open the file in universal-newline mode?' if verbosity > 0: print_exc() if not ignore_errors: raise yield batch
0.003562
def validate(self, proxy_ip, client_ip): """ Looks up the proxy identified by its IP, then verifies that the given client IP may be introduced by that proxy. :param proxy_ip: The IP address of the proxy. :param client_ip: The IP address of the supposed client. :returns: True if the proxy is permitted to introduce the client; False if the proxy doesn't exist or isn't permitted to introduce the client. """ # First, look up the proxy if self.pseudo_proxy: proxy = self.pseudo_proxy elif proxy_ip not in self.proxies: return False else: proxy = self.proxies[proxy_ip] # Now, verify that the client is valid return client_ip in proxy
0.002463
def writerow(self, row): """ Writes a row to the CSV file """ self.writer.writerow(row) # Fetch UTF-8 output from the queue ... data = self.queue.getvalue() # write to the target stream self.stream.write(data) # empty queue self.queue.truncate(0) self.queue.seek(0)
0.005935
def _get_stddev_rock(self, mag, imt): """ Calculate and return total standard deviation for rock sites. Implements formulae from table 3. """ C = self.COEFFS_ROCK_STDDERR[imt] if mag > C['maxmag']: return C['maxsigma'] else: return C['sigma0'] + C['magfactor'] * mag
0.005764
def update_channels(self): """Update the GUI to reflect channels and image listing. """ if not self.gui_up: return self.logger.debug("channel configuration has changed--updating gui") try: channel = self.fv.get_channel(self.chname) except KeyError: channel = self.fv.get_channel_info() if channel is None: raise ValueError('No channel available') self.chname = channel.name w = self.w.channel_name w.clear() self.chnames = list(self.fv.get_channel_names()) #self.chnames.sort() for chname in self.chnames: w.append_text(chname) # select the channel that is the current one try: i = self.chnames.index(channel.name) except IndexError: i = 0 self.w.channel_name.set_index(i) # update the image listing self.redo()
0.003141
def published(self, for_user=None, include_login_required=False): """ Override ``DisplayableManager.published`` to exclude pages with ``login_required`` set to ``True``. if the user is unauthenticated and the setting ``PAGES_PUBLISHED_INCLUDE_LOGIN_REQUIRED`` is ``False``. The extra ``include_login_required`` arg allows callers to override the ``PAGES_PUBLISHED_INCLUDE_LOGIN_REQUIRED`` behaviour in special cases where they want to deal with the ``login_required`` field manually, such as the case in ``PageMiddleware``. """ published = super(PageManager, self).published(for_user=for_user) unauthenticated = for_user and not for_user.is_authenticated() if (unauthenticated and not include_login_required and not settings.PAGES_PUBLISHED_INCLUDE_LOGIN_REQUIRED): published = published.exclude(login_required=True) return published
0.002037
def plot(self, grid=None, size=256, limits=None, square=False, center=None, weight=None, weight_stat="mean", figsize=None, aspect="auto", f="identity", axes=None, xlabel=None, ylabel=None, group_by=None, group_limits=None, group_colors='jet', group_labels=None, group_count=None, vmin=None, vmax=None, cmap="afmhot", **kwargs): """Plot the subspace using sane defaults to get a quick look at the data. :param grid: A 2d numpy array with the counts, if None it will be calculated using limits provided and Subspace.histogram :param size: Passed to Subspace.histogram :param limits: Limits for the subspace in the form [[xmin, xmax], [ymin, ymax]], if None it will be calculated using Subspace.limits_sigma :param square: argument passed to Subspace.limits_sigma :param Executor executor: responsible for executing the tasks :param figsize: (x, y) tuple passed to pylab.figure for setting the figure size :param aspect: Passed to matplotlib's axes.set_aspect :param xlabel: String for label on x axis (may contain latex) :param ylabel: Same for y axis :param kwargs: extra argument passed to axes.imshow, useful for setting the colormap for instance, e.g. cmap='afmhot' :return: matplotlib.image.AxesImage """ import pylab f = _parse_f(f) limits = self.limits(limits) if limits is None: limits = self.limits_sigma() # if grid is None: if group_limits is None and group_by: group_limits = tuple(self.df(group_by).minmax()[0]) + (group_count,) # grid = self.histogram(limits=limits, size=size, weight=weight, group_limits=group_limits, group_by=group_by) if figsize is not None: pylab.figure(num=None, figsize=figsize, dpi=80, facecolor='w', edgecolor='k') if axes is None: axes = pylab.gca() fig = pylab.gcf() # if xlabel: pylab.xlabel(xlabel or self.expressions[0]) # if ylabel: pylab.ylabel(ylabel or self.expressions[1]) # axes.set_aspect(aspect) rgba8 = self.image_rgba(grid=grid, size=size, limits=limits, square=square, center=center, weight=weight, weight_stat=weight_stat, f=f, axes=axes, group_by=group_by, group_limits=group_limits, group_colors=group_colors, group_count=group_count, vmin=vmin, vmax=vmax, cmap=cmap) import matplotlib if group_by: if isinstance(group_colors, six.string_types): group_colors = matplotlib.cm.get_cmap(group_colors) if isinstance(group_colors, matplotlib.colors.Colormap): group_count = group_limits[2] colors = [group_colors(k / float(group_count - 1.)) for k in range(group_count)] else: colors = [matplotlib.colors.colorConverter.to_rgba(k) for k in group_colors] colormap = matplotlib.colors.ListedColormap(colors) gmin, gmax, group_count = group_limits # [:2] delta = (gmax - gmin) / (group_count - 1.) norm = matplotlib.colors.Normalize(gmin - delta / 2, gmax + delta / 2) sm = matplotlib.cm.ScalarMappable(norm, colormap) sm.set_array(1) # make matplotlib happy (strange behavious) colorbar = fig.colorbar(sm) if group_labels: colorbar.set_ticks(np.arange(gmin, gmax + delta / 2, delta)) colorbar.set_ticklabels(group_labels) else: colorbar.set_ticks(np.arange(gmin, gmax + delta / 2, delta)) colorbar.set_ticklabels(map(lambda x: "%f" % x, np.arange(gmin, gmax + delta / 2, delta))) colorbar.ax.set_ylabel(group_by) # matplotlib.colorbar.ColorbarBase(axes, norm=norm, cmap=colormap) im = axes.imshow(rgba8, extent=np.array(limits).flatten(), origin="lower", aspect=aspect, **kwargs) else: norm = matplotlib.colors.Normalize(0, 23) sm = matplotlib.cm.ScalarMappable(norm, cmap) sm.set_array(1) # make matplotlib happy (strange behavious) colorbar = fig.colorbar(sm) im = axes.imshow(rgba8, extent=np.array(limits).flatten(), origin="lower", aspect=aspect, **kwargs) colorbar = None return im, colorbar
0.005509
def dump_to_console(pylint_data): """ Displays pylint data to the console. :param pylint_data: :return: """ for key, value in list(pylint_data.items()): if key not in ('errors', 'total', 'scores', 'average') and len(value) > 0: print("\n*********** {}".format(key)) for line in value: print(line.strip('\n')) f_score = [score[1] for score in pylint_data['scores'] if score[0] == key][0] print("Score: {}".format(f_score))
0.005792
async def fetch_invite(self, url, *, with_counts=True): """|coro| Gets an :class:`.Invite` from a discord.gg URL or ID. .. note:: If the invite is for a guild you have not joined, the guild and channel attributes of the returned :class:`.Invite` will be :class:`.PartialInviteGuild` and :class:`PartialInviteChannel` respectively. Parameters ----------- url: :class:`str` The discord invite ID or URL (must be a discord.gg URL). with_counts: :class:`bool` Whether to include count information in the invite. This fills the :attr:`.Invite.approximate_member_count` and :attr:`.Invite.approximate_presence_count` fields. Raises ------- NotFound The invite has expired or is invalid. HTTPException Getting the invite failed. Returns -------- :class:`.Invite` The invite from the URL/ID. """ invite_id = utils.resolve_invite(url) data = await self.http.get_invite(invite_id, with_counts=with_counts) return Invite.from_incomplete(state=self._connection, data=data)
0.004062
def _spectrum(self, photon_energy): """ Compute differential spectrum from pp interactions using Eq.71 and Eq.58 of Kelner, S.R., Aharonian, F.A., and Bugayov, V.V., 2006 PhysRevD 74, 034018 (`arXiv:astro-ph/0606058 <http://www.arxiv.org/abs/astro-ph/0606058>`_). Parameters ---------- photon_energy : :class:`~astropy.units.Quantity` instance Photon energy array. """ outspecene = _validate_ene(photon_energy) with warnings.catch_warnings(): warnings.simplefilter("ignore") self.nhat = 1.0 # initial value, works for index~2.1 if np.any(outspecene < self.Etrans) and np.any( outspecene >= self.Etrans ): # compute value of nhat so that delta functional matches # accurate calculation at 0.1TeV full = self._calc_specpp_hiE(self.Etrans) delta = self._calc_specpp_loE(self.Etrans) self.nhat *= (full / delta).decompose().value self.specpp = np.zeros(len(outspecene)) * u.Unit("1/(s TeV)") for i, Egamma in enumerate(outspecene): if Egamma >= self.Etrans: self.specpp[i] = self._calc_specpp_hiE(Egamma) else: self.specpp[i] = self._calc_specpp_loE(Egamma) density_factor = (self.nh / (1 * u.Unit("1/cm3"))).decompose().value return density_factor * self.specpp.to("1/(s eV)")
0.001301
def post_build(self, packet, payload): """Compute the 'records_number' field when needed""" if self.records_number is None: recnum = struct.pack("!H", len(self.records)) packet = packet[:6] + recnum + packet[8:] return _ICMPv6.post_build(self, packet, payload)
0.006494
def _set_current_subscript(self, active): """ sets the current subscript and keeps a counter of how ofter a particular subscript has been executed this information is usefull when implementing a status update or plotting functions that depend on which subscript is being executed keeps track of the following dictionary: self._current_subscript_stage = { 'current_subscript' : reference to the current subscrit 'subscript_exec_count' : dictionary where key is the subscript name and value how often is has been executed 'subscript_exec_duration' : dictionary where key is the subscript name and value the average duration of executing the subscript } Args: active: True if the current subscript is just started, False if it just finished """ current_subscript = self.sender() if active: for subscript_name in list(self._current_subscript_stage['subscript_exec_count'].keys()): if subscript_name == current_subscript.name: self._current_subscript_stage['subscript_exec_count'][subscript_name] += 1 self._current_subscript_stage['current_subscript'] = current_subscript else: self._current_subscript_stage['current_subscript'] = current_subscript for subscript_name in list(self._current_subscript_stage['subscript_exec_count'].keys()): # calculate the average duration to execute the subscript if subscript_name == current_subscript.name: duration = current_subscript.end_time - current_subscript.start_time if subscript_name in self._current_subscript_stage['subscript_exec_duration']: duration_old = self._current_subscript_stage['subscript_exec_duration'][subscript_name] else: duration_old = datetime.timedelta(0) exec_count = self._current_subscript_stage['subscript_exec_count'][subscript_name] duration_new = (duration_old * (exec_count - 1) + duration) self._current_subscript_stage['subscript_exec_duration'][subscript_name] = (duration_old * ( exec_count - 1) + duration) / exec_count
0.008547
def get_param(self): """Method to get current optimizer's parameter value """ cycle_progress = self.event_index / self.cycle_size return self.start_value + ((self.end_value - self.start_value) / 2) * (1 - math.cos(math.pi * cycle_progress))
0.011029
def set_section(self, section): """Set a section. If section already exists, overwrite the old one. """ if not isinstance(section, Section): raise Exception("You") try: self.remove_section(section.name) except: pass self._sections[section.name] = copy.deepcopy(section)
0.008451
def register_rml_def(self, location_type, location, filename=None, **kwargs): """ Registers the rml file locations for easy access Args: ----- location_type: ['package_all', 'package_file', 'directory', 'filepath'] location: The correlated location string based on the location_type filename: Optional, associated with 'package_file' location_type kwargs: ------- include_subfolders: Boolean """ if location_type == 'directory': self.register_directory(location, **kwargs) elif location_type == 'filepath': if not os.path.exists(location): raise OSError("File not found", location) if os.path.isfile(location): self.register_rml(location) elif filename: new_loc = os.path.join(location, filename) if not os.path.exists(new_loc): raise OSError("File not found", new_loc) elif os.path.isfile(new_loc): self.register_rml(new_loc) else: raise OSError("File not found", location) elif location_type.startswith('package'): pkg_path = \ importlib.util.find_spec(\ location).submodule_search_locations[0] if location_type.endswith('_all'): self.register_directory(pkg_path, **kwargs) elif location_type.endswith('_file'): filepath = os.path.join(pkg_path, filename) self.register_rml(filepath, **kwargs) else: raise NotImplementedError
0.00369
def get_tree_from_branch(self, ref): ''' Return a pygit2.Tree object matching a head ref fetched into refs/remotes/origin/ ''' try: return self.peel(self.repo.lookup_reference( 'refs/remotes/origin/{0}'.format(ref))).tree except KeyError: return None
0.005917
def _cache_get_last_in_slice(url_dict, start_int, total_int, authn_subj_list): """Return None if cache entry does not exist.""" key_str = _gen_cache_key_for_slice(url_dict, start_int, total_int, authn_subj_list) # TODO: Django docs state that cache.get() should return None on unknown key. try: last_ts_tup = django.core.cache.cache.get(key_str) except KeyError: last_ts_tup = None logging.debug('Cache get. key="{}" -> last_ts_tup={}'.format(key_str, last_ts_tup)) return last_ts_tup
0.007576
def template_instance(self): ''' parse the template instance node. this is used to compute the location of the template definition structure. Returns: TemplateInstanceNode: the template instance. ''' ofs = self.offset() if self.unpack_byte(0x0) & 0x0F == 0xF: ofs += 4 return TemplateInstanceNode(self._buf, ofs, self._chunk, self)
0.007177
def submit_tar(cl_args, unknown_args, tmp_dir): ''' Extract and execute the java files inside the tar and then add topology definition file created by running submitTopology We use the packer to make a package for the tar and dump it to a well-known location. We then run the main method of class with the specified arguments. We pass arguments as an environment variable HERON_OPTIONS. This will run the jar file with the topology class name. The submitter inside will write out the topology defn file to a location that we specify. Then we write the topology defn file to a well known packer location. We then write to appropriate places in zookeeper and launch the aurora jobs :param cl_args: :param unknown_args: :param tmp_dir: :return: ''' # execute main of the topology to create the topology definition topology_file = cl_args['topology-file-name'] java_defines = cl_args['topology_main_jvm_property'] main_class = cl_args['topology-class-name'] res = execute.heron_tar( main_class, topology_file, tuple(unknown_args), tmp_dir, java_defines) result.render(res) if not result.is_successful(res): err_context = ("Failed to create topology definition " \ "file when executing class '%s' of file '%s'") % (main_class, topology_file) res.add_context(err_context) return res return launch_topologies(cl_args, topology_file, tmp_dir)
0.009756
def get_stp_mst_detail_output_cist_port_rx_bpdu_count(self, **kwargs): """Auto Generated Code """ config = ET.Element("config") get_stp_mst_detail = ET.Element("get_stp_mst_detail") config = get_stp_mst_detail output = ET.SubElement(get_stp_mst_detail, "output") cist = ET.SubElement(output, "cist") port = ET.SubElement(cist, "port") rx_bpdu_count = ET.SubElement(port, "rx-bpdu-count") rx_bpdu_count.text = kwargs.pop('rx_bpdu_count') callback = kwargs.pop('callback', self._callback) return callback(config)
0.003295
def paste(client, event, channel, nick, rest): "Drop a link to your latest paste" path = '/last/{nick}'.format(**locals()) paste_root = pmxbot.config.get('librarypaste', 'http://paste.jaraco.com') url = urllib.parse.urljoin(paste_root, path) auth = pmxbot.config.get('librarypaste auth') resp = requests.head(url, auth=_request_friendly(auth)) if not resp.ok: return "I couldn't resolve a recent paste of yours. Maybe try " + url return resp.headers['location']
0.021231
def lonlat2xyz(lon, lat): """Convert lon lat to cartesian.""" lat = xu.deg2rad(lat) lon = xu.deg2rad(lon) x = xu.cos(lat) * xu.cos(lon) y = xu.cos(lat) * xu.sin(lon) z = xu.sin(lat) return x, y, z
0.004464
def format_number(col, d): """ Formats the number X to a format like '#,--#,--#.--', rounded to d decimal places with HALF_EVEN round mode, and returns the result as a string. :param col: the column name of the numeric value to be formatted :param d: the N decimal places >>> spark.createDataFrame([(5,)], ['a']).select(format_number('a', 4).alias('v')).collect() [Row(v=u'5.0000')] """ sc = SparkContext._active_spark_context return Column(sc._jvm.functions.format_number(_to_java_column(col), d))
0.005556
def zfill(self, width): """ Pad strings in the Series/Index by prepending '0' characters. Strings in the Series/Index are padded with '0' characters on the left of the string to reach a total string length `width`. Strings in the Series/Index with length greater or equal to `width` are unchanged. Parameters ---------- width : int Minimum length of resulting string; strings with length less than `width` be prepended with '0' characters. Returns ------- Series/Index of objects See Also -------- Series.str.rjust : Fills the left side of strings with an arbitrary character. Series.str.ljust : Fills the right side of strings with an arbitrary character. Series.str.pad : Fills the specified sides of strings with an arbitrary character. Series.str.center : Fills boths sides of strings with an arbitrary character. Notes ----- Differs from :meth:`str.zfill` which has special handling for '+'/'-' in the string. Examples -------- >>> s = pd.Series(['-1', '1', '1000', 10, np.nan]) >>> s 0 -1 1 1 2 1000 3 10 4 NaN dtype: object Note that ``10`` and ``NaN`` are not strings, therefore they are converted to ``NaN``. The minus sign in ``'-1'`` is treated as a regular character and the zero is added to the left of it (:meth:`str.zfill` would have moved it to the left). ``1000`` remains unchanged as it is longer than `width`. >>> s.str.zfill(3) 0 0-1 1 001 2 1000 3 NaN 4 NaN dtype: object """ result = str_pad(self._parent, width, side='left', fillchar='0') return self._wrap_result(result)
0.001004
def circuit_to_latex(circ: Circuit, qubits: Qubits = None, document: bool = True) -> str: """ Create an image of a quantum circuit in LaTeX. Can currently draw X, Y, Z, H, T, S, T_H, S_H, RX, RY, RZ, TX, TY, TZ, TH, CNOT, CZ, SWAP, ISWAP, CCNOT, CSWAP, XX, YY, ZZ, CAN, P0 and P1 gates, and the RESET operation. Args: circ: A quantum Circuit qubits: Optional qubit list to specify qubit order document: If false, just the qcircuit latex is returned. Else the circuit image is wrapped in a standalone LaTeX document ready for typesetting. Returns: A LaTeX string representation of the circuit. Raises: NotImplementedError: For unsupported gates. Refs: LaTeX Qcircuit package (https://arxiv.org/pdf/quant-ph/0406003). """ if qubits is None: qubits = circ.qubits N = len(qubits) qubit_idx = dict(zip(qubits, range(N))) layers = _display_layers(circ, qubits) layer_code = [] code = [r'\lstick{' + str(q) + r'}' for q in qubits] layer_code.append(code) def _two_qubit_gate(top, bot, label): if bot-top == 1: code_top = r'\multigate{1}{%s}' % label code_bot = r'\ghost{%s}' % label else: code_top = r'\sgate{%s}{%s}' % (label, str(bot - top)) code_bot = r'\gate{%s}' % (label) return code_top, code_bot for layer in layers.elements: code = [r'\qw'] * N assert isinstance(layer, Circuit) for gate in layer: idx = [qubit_idx[q] for q in gate.qubits] name = gate.name if isinstance(gate, I): pass elif(len(idx) == 1) and name in ['X', 'Y', 'Z', 'H', 'T', 'S']: code[idx[0]] = r'\gate{' + gate.name + '}' elif isinstance(gate, S_H): code[idx[0]] = r'\gate{S^\dag}' elif isinstance(gate, T_H): code[idx[0]] = r'\gate{T^\dag}' elif isinstance(gate, RX): theta = _latex_format(gate.params['theta']) code[idx[0]] = r'\gate{R_x(%s)}' % theta elif isinstance(gate, RY): theta = _latex_format(gate.params['theta']) code[idx[0]] = r'\gate{R_y(%s)}' % theta elif isinstance(gate, RZ): theta = _latex_format(gate.params['theta']) code[idx[0]] = r'\gate{R_z(%s)}' % theta elif isinstance(gate, TX): t = _latex_format(gate.params['t']) code[idx[0]] = r'\gate{X^{%s}}' % t elif isinstance(gate, TY): t = _latex_format(gate.params['t']) code[idx[0]] = r'\gate{Y^{%s}}' % t elif isinstance(gate, TZ): t = _latex_format(gate.params['t']) code[idx[0]] = r'\gate{Z^{%s}}' % t elif isinstance(gate, TH): t = _latex_format(gate.params['t']) code[idx[0]] = r'\gate{H^{%s}}' % t elif isinstance(gate, CNOT): code[idx[0]] = r'\ctrl{' + str(idx[1] - idx[0]) + '}' code[idx[1]] = r'\targ' elif isinstance(gate, XX): label = r'X\!X^{%s}' % _latex_format(gate.params['t']) top = min(idx) bot = max(idx) code[top], code[bot] = _two_qubit_gate(top, bot, label) elif isinstance(gate, YY): label = r'Y\!Y^{%s}' % _latex_format(gate.params['t']) top = min(idx) bot = max(idx) code[top], code[bot] = _two_qubit_gate(top, bot, label) elif isinstance(gate, ZZ): label = r'Z\!Z^{%s}' % _latex_format(gate.params['t']) top = min(idx) bot = max(idx) code[top], code[bot] = _two_qubit_gate(top, bot, label) elif isinstance(gate, CPHASE): theta = _latex_format(gate.params['theta']) label = r'\text{CPHASE}({%s})' % theta top = min(idx) bot = max(idx) code[top], code[bot] = _two_qubit_gate(top, bot, label) elif isinstance(gate, PSWAP): theta = _latex_format(gate.params['theta']) label = r'\text{PSWAP}({%s})' % theta top = min(idx) bot = max(idx) code[top], code[bot] = _two_qubit_gate(top, bot, label) elif isinstance(gate, CZ): code[idx[0]] = r'\ctrl{' + str(idx[1] - idx[0]) + '}' code[idx[1]] = r'\ctrl{' + str(idx[0] - idx[1]) + '}' elif isinstance(gate, SWAP): code[idx[0]] = r'\qswap \qwx[' + str(idx[1] - idx[0]) + ']' code[idx[1]] = r'\qswap' elif isinstance(gate, CAN): tx = _latex_format(gate.params['tx']) ty = _latex_format(gate.params['ty']) tz = _latex_format(gate.params['tz']) label = r'{\text{CAN}(%s, %s, %s)}' % (tx, ty, tz) top = min(idx) bot = max(idx) code[top], code[bot] = _two_qubit_gate(top, bot, label) elif isinstance(gate, ISWAP): label = r'{ \text{iSWAP}}' top = min(idx) bot = max(idx) code[top], code[bot] = _two_qubit_gate(top, bot, label) elif isinstance(gate, CCNOT): code[idx[0]] = r'\ctrl{' + str(idx[1]-idx[0]) + '}' code[idx[1]] = r'\ctrl{' + str(idx[2]-idx[1]) + '}' code[idx[2]] = r'\targ' elif isinstance(gate, CSWAP): code[idx[0]] = r'\ctrl{' + str(idx[1]-idx[0]) + '}' code[idx[1]] = r'\qswap \qwx[' + str(idx[2] - idx[1]) + ']' code[idx[2]] = r'\qswap' elif isinstance(gate, P0): code[idx[0]] = r'\push{\ket{0}\!\!\bra{0}} \qw' elif isinstance(gate, P1): code[idx[0]] = r'\push{\ket{1}\!\!\bra{1}} \qw' elif isinstance(gate, Reset): for i in idx: code[i] = r'\push{\rule{0.1em}{0.5em}\, \ket{0}\,} \qw' elif isinstance(gate, Measure): code[idx[0]] = r'\meter' else: raise NotImplementedError(str(gate)) layer_code.append(code) code = [r'\qw'] * N layer_code.append(code) latex_lines = [''] * N for line, wire in enumerate(zip(*layer_code)): latex = '& ' + ' & '.join(wire) if line < N - 1: # Not last line latex += r' \\' latex_lines[line] = latex latex_code = _QCIRCUIT % '\n'.join(latex_lines) if document: latex_code = _DOCUMENT_HEADER + latex_code + _DOCUMENT_FOOTER return latex_code
0.000143
def till(self): """ Queries the current shop till and returns the amount Returns str -- Amount of NPs in shop till Raises parseException """ pg = self.usr.getPage("http://www.neopets.com/market.phtml?type=till") try: return pg.find_all(text = "Shop Till")[1].parent.next_sibling.b.text.replace(" NP", "").replace(",", "") except Exception: logging.getLogger("neolib.shop").exception("Could not grab shop till.", {'pg': pg}) raise parseException
0.015228
def _updateEndpoints(self,*args,**kwargs): """ Updates all endpoints except the one from which this slot was called. Note: this method is probably not complete threadsafe. Maybe a lock is needed when setter self.ignoreEvents """ sender = self.sender() if not self.ignoreEvents: self.ignoreEvents = True for binding in self.bindings.values(): if binding.instanceId == id(sender): continue if args: binding.setter(*args,**kwargs) else: binding.setter(self.bindings[id(sender)].getter()) self.ignoreEvents = False
0.01105
def EQ105(T, A, B, C, D): r'''DIPPR Equation #105. Often used in calculating liquid molar density. All 4 parameters are required. C is sometimes the fluid's critical temperature. .. math:: Y = \frac{A}{B^{1 + (1-\frac{T}{C})^D}} Parameters ---------- T : float Temperature, [K] A-D : float Parameter for the equation; chemical and property specific [-] Returns ------- Y : float Property [constant-specific] Notes ----- This expression can be integrated in terms of the incomplete gamma function for dT, but for Y/T dT no integral could be found. Examples -------- Hexane molar density; DIPPR coefficients normally in kmol/m^3. >>> EQ105(300., 0.70824, 0.26411, 507.6, 0.27537) 7.593170096339236 References ---------- .. [1] Design Institute for Physical Properties, 1996. DIPPR Project 801 DIPPR/AIChE ''' return A/B**(1. + (1. - T/C)**D)
0.002014
def getkey(self, path, filename=None): """ Get single matching key for a path """ scheme, keys = self.getkeys(path, filename=filename) try: key = next(keys) except StopIteration: raise FileNotFoundError("Could not find object for: '%s'" % path) # we expect to only have a single key returned nextKey = None try: nextKey = next(keys) except StopIteration: pass if nextKey: raise ValueError("Found multiple keys for: '%s'" % path) return scheme, key
0.003311
def run(command, show=True, *args, **kwargs): """ Runs a shell comand on the remote server. """ if show: print_command(command) with hide("running"): return _run(command, *args, **kwargs)
0.004484
def add_firewalld_service(service, permanent=True): """ adds a firewall rule """ yum_install(packages=['firewalld']) with settings(hide('warnings', 'running', 'stdout', 'stderr'), warn_only=True, capture=True): p = '' if permanent: p = '--permanent' sudo('firewall-cmd --add-service %s %s' % (service, p)) sudo('systemctl reload firewalld')
0.00241
def read_lsm_eventlist(fh): """Read LSM events from file and return as list of (time, type, text).""" count = struct.unpack('<II', fh.read(8))[1] events = [] while count > 0: esize, etime, etype = struct.unpack('<IdI', fh.read(16)) etext = bytes2str(stripnull(fh.read(esize - 16))) events.append((etime, etype, etext)) count -= 1 return events
0.002532
def compile_fetch(self, raw, doi_id): """ Loop over Raw and add selected items to Fetch with proper formatting :param dict raw: JSON data from doi.org :param str doi_id: :return dict: """ fetch_dict = OrderedDict() order = {'author': 'author', 'type': 'type', 'identifier': '', 'title': 'title', 'journal': 'container-title', 'pubYear': '', 'volume': 'volume', 'publisher': 'publisher', 'page':'page', 'issue': 'issue'} for k, v in order.items(): try: if k == 'identifier': fetch_dict[k] = [{"type": "doi", "id": doi_id, "url": "http://dx.doi.org/" + doi_id}] elif k == 'author': fetch_dict[k] = self.compile_authors(raw[v]) elif k == 'pubYear': fetch_dict[k] = self.compile_date(raw['issued']['date-parts']) else: fetch_dict[k] = raw[v] except KeyError as e: # If we try to add a key that doesn't exist in the raw dict, then just keep going. logger_doi_resolver.warn("compile_fetch: KeyError: key not in raw: {}, {}".format(v, e)) return fetch_dict
0.008026
def filter_factory(global_conf, **local_conf): """Returns a WSGI filter app for use with paste.deploy.""" conf = global_conf.copy() conf.update(local_conf) def blacklist(app): return BlacklistFilter(app, conf) return blacklist
0.003922
def calculate_md5(fileobject, size=2**16): """Utility function to calculate md5 hashes while being light on memory usage. By reading the fileobject piece by piece, we are able to process content that is larger than available memory""" fileobject.seek(0) md5 = hashlib.md5() for data in iter(lambda: fileobject.read(size), b''): if not data: break if isinstance(data, six.text_type): data = data.encode('utf-8') # md5 needs a byte string md5.update(data) fileobject.seek(0) # rewind read head return md5.hexdigest()
0.010292
def is_diagonal_matrix(mat, rtol=RTOL_DEFAULT, atol=ATOL_DEFAULT): """Test if an array is a diagonal matrix""" if atol is None: atol = ATOL_DEFAULT if rtol is None: rtol = RTOL_DEFAULT mat = np.array(mat) if mat.ndim != 2: return False return np.allclose(mat, np.diag(np.diagonal(mat)), rtol=rtol, atol=atol)
0.002809
def _load_url(url): """ Loads a URL resource from a remote server """ try: response = requests.get(url) return BytesIO(response.content) except IOError as ex: parser.error("{url} could not be loaded remotely! ({ex})".format(url=url, ex=ex))
0.007042
def dragdrop(self, chviewer, uris): """Called when a drop operation is performed on a channel viewer. We are called back with a URL and we attempt to (down)load it if it names a file. """ # find out our channel chname = self.get_channel_name(chviewer) self.open_uris(uris, chname=chname) return True
0.00551
def get_tree_type(tree): """ returns the type of the (sub)tree: Root, Nucleus or Satellite Parameters ---------- tree : nltk.tree.ParentedTree a tree representing a rhetorical structure (or a part of it) """ tree_type = tree.label() assert tree_type in SUBTREE_TYPES, "tree_type: {}".format(tree_type) return tree_type
0.002755
def transmute(df, *keep_columns, **kwargs): """ Creates columns and then returns those new columns and optionally specified original columns from the DataFrame. This works like `mutate`, but designed to discard the original columns used to create the new ones. Args: *keep_columns: Column labels to keep. Can be string, symbolic, or integer position. Kwargs: **kwargs: keys are the names of the new columns, values indicate what the new column values will be. Example: diamonds >> transmute(x_plus_y=X.x + X.y, y_div_z=(X.y / X.z)) >> head(3) y_div_z x_plus_y 0 1.637860 7.93 1 1.662338 7.73 2 1.761905 8.12 """ keep_cols = [] for col in flatten(keep_columns): try: keep_cols.append(col.name) except: if isinstance(col, str): keep_cols.append(col) elif isinstance(col, int): keep_cols.append(df.columns[col]) df = df.assign(**kwargs) columns = [k for k in kwargs.keys()] + list(keep_cols) return df[columns]
0.002593
def _base_repr_(self, html=False, show_name=True, **kwargs): """ Override the method in the astropy.Table class to avoid displaying the description, and the format of the columns """ table_id = 'table{id}'.format(id=id(self)) data_lines, outs = self.formatter._pformat_table(self, tableid=table_id, html=html, max_width=(-1 if html else None), show_name=show_name, show_unit=None, show_dtype=False) out = '\n'.join(data_lines) # if astropy.table.six.PY2 and isinstance(out, astropy.table.six.text_type): # out = out.encode('utf-8') return out
0.006649
def create_object(self, filename, img_properties=None): """Create an image object on local disk from the given file. The file is copied to a new local directory that is created for the image object. The optional list of image properties will be associated with the new object together with the set of default properties for images. Parameters ---------- filename : string Path to file on disk img_properties : Dictionary, optional Set of image properties. Returns ------- ImageHandle Handle for created image object """ # Get the file name, i.e., last component of the given absolute path prop_name = os.path.basename(os.path.normpath(filename)) # Ensure that the image file has a valid suffix. Currently we do not # check whether the file actually is an image. If the suffix is valid # get the associated Mime type from the dictionary. prop_mime = None pos = prop_name.rfind('.') if pos >= 0: suffix = prop_name[pos:].lower() if suffix in VALID_IMGFILE_SUFFIXES: prop_mime = VALID_IMGFILE_SUFFIXES[suffix] if not prop_mime: raise ValueError('unsupported image type: ' + prop_name) # Create a new object identifier. identifier = str(uuid.uuid4()).replace('-','') # The sub-folder to store the image is given by the first two # characters of the identifier. image_dir = self.get_directory(identifier) # Create the directory if it doesn't exists if not os.access(image_dir, os.F_OK): os.makedirs(image_dir) # Create the initial set of properties for the new image object. properties = { datastore.PROPERTY_NAME: prop_name, datastore.PROPERTY_FILENAME : prop_name, datastore.PROPERTY_FILESIZE : os.path.getsize(filename), datastore.PROPERTY_MIMETYPE : prop_mime } # Add additional image properties (if given). Note that this will not # override the default image properties. if not img_properties is None: for prop in img_properties: if not prop in properties: properties[prop] = img_properties[prop] # Copy original file to new object's directory shutil.copyfile(filename, os.path.join(image_dir, prop_name)) # Create object handle and store it in database before returning it obj = ImageHandle(identifier, properties, image_dir) self.insert_object(obj) return obj
0.003358
def applyEdits(self, addFeatures=None, updateFeatures=None, deleteFeatures=None, gdbVersion=None, useGlobalIds=False, rollbackOnFailure=True, attachments=None): """ This operation adds, updates, and deletes features to the associated feature layer or table in a single call. Inputs: addFeatures - The array of features to be added. These features should be common.Feature objects, or they should be a list of dictionary features. updateFeatures - The array of features to be updateded. These features should be common.Feature objects or a list of dictionary formed features. deleteFeatures - string of OIDs to remove from service or a list of values. gdbVersion - Geodatabase version to apply the edits. useGlobalIds - instead of referencing the default Object ID field, the service will look at a GUID field to track changes. This means the GUIDs will be passed instead of OIDs for delete, update or add features. rollbackOnFailure - Optional parameter to specify if the edits should be applied only if all submitted edits succeed. If false, the server will apply the edits that succeed even if some of the submitted edits fail. If true, the server will apply the edits only if all edits succeed. The default value is true. attachments - Optional parameter which requires the layer's supportsApplyEditsWithGlobalIds property to be true. Use the attachments parameter to add, update or delete attachments. Applies only when the useGlobalIds parameter is set to true. For adds, the globalIds of the attachments provided by the client are preserved. When useGlobalIds is true, updates and deletes are identified by each feature or attachment globalId rather than their objectId or attachmentId. Dictionary Format: { "adds": [<attachment1>, <attachment2>], "updates": [<attachment1>, <attachment2>], "deletes": ["<attachmentGlobalId1>", "<attachmentGlobalId2>"] } Output: dictionary of messages """ editURL = self._url + "/applyEdits" params = {"f": "json", "useGlobalIds" : useGlobalIds, "rollbackOnFailure" : rollbackOnFailure } if gdbVersion is not None: params['gdbVersion'] = gdbVersion if addFeatures is None: addFeatures = [] if updateFeatures is None: updateFeatures = [] if len(addFeatures) > 0 and \ isinstance(addFeatures[0], Feature): params['adds'] = json.dumps([f.asDictionary for f in addFeatures], default=_date_handler) elif len(addFeatures) > 0 and \ isinstance(addFeatures[0], dict): params['adds'] = json.dumps(addFeatures, default=_date_handler) elif len(addFeatures) == 0: params['adds'] = json.dumps(addFeatures) if len(updateFeatures) > 0 and \ isinstance(updateFeatures[0], Feature): params['updates'] = json.dumps([f.asDictionary for f in updateFeatures], default=_date_handler) elif len(updateFeatures) > 0 and \ isinstance(updateFeatures[0], dict): params['updates'] = json.dumps(updateFeatures, default=_date_handler) elif updateFeatures is None or \ len(updateFeatures) == 0: updateFeatures = json.dumps([]) if deleteFeatures is not None and \ isinstance(deleteFeatures, str): params['deletes'] = deleteFeatures elif deleteFeatures is not None and \ isinstance(deleteFeatures, list): params['deletes'] = ",".join([str(f) for f in deleteFeatures]) else: params['deletes'] = "" if attachments is None: params['attachments'] = "" else: params['attachments'] = attachments res = self._post(url=editURL, param_dict=params, securityHandler=self._securityHandler, proxy_port=self._proxy_port, proxy_url=self._proxy_url) return res
0.00366
def run_miner_if_free(self): """TODO: docstring""" (address, username, password, device, tstart, tend) = read_config() if self.dtype == 0: self.run_miner_cmd = [ cpu_miner_path, '-o', address, '-O', '{}:{}'.format( username, password) ] elif self.dtype == 1: # parse address -> scheme + netloc r = urlparse(address) # scheme://user[:password]@hostname:port url = '{}://{}:{}@{}'.format(r.scheme, username, password, r.netloc) # Cuda self.run_miner_cmd = [gpu_miner_path, '-P', url, '-U'] if (len(self.run_miner_cmd) != 0): logger.info(' '.join(self.run_miner_cmd)) # start if resource(cpu or gpu) is free if (self.is_device_free()): logger.info('start miner in another thread') self.run_cmd(self.run_miner_cmd)
0.002008
def field_types(self): """ Access the field_types :returns: twilio.rest.autopilot.v1.assistant.field_type.FieldTypeList :rtype: twilio.rest.autopilot.v1.assistant.field_type.FieldTypeList """ if self._field_types is None: self._field_types = FieldTypeList(self._version, assistant_sid=self._solution['sid'], ) return self._field_types
0.007444
def _pack3(obj, fp, **options): """ Serialize a Python object into MessagePack bytes. Args: obj: a Python object fp: a .write()-supporting file-like object Kwargs: ext_handlers (dict): dictionary of Ext handlers, mapping a custom type to a callable that packs an instance of the type into an Ext object force_float_precision (str): "single" to force packing floats as IEEE-754 single-precision floats, "double" to force packing floats as IEEE-754 double-precision floats. Returns: None. Raises: UnsupportedType(PackException): Object type not supported for packing. Example: >>> f = open('test.bin', 'wb') >>> umsgpack.pack({u"compact": True, u"schema": 0}, f) >>> """ global compatibility ext_handlers = options.get("ext_handlers") if obj is None: _pack_nil(obj, fp, options) elif ext_handlers and obj.__class__ in ext_handlers: _pack_ext(ext_handlers[obj.__class__](obj), fp, options) elif isinstance(obj, bool): _pack_boolean(obj, fp, options) elif isinstance(obj, int): _pack_integer(obj, fp, options) elif isinstance(obj, float): _pack_float(obj, fp, options) elif compatibility and isinstance(obj, str): _pack_oldspec_raw(obj.encode('utf-8'), fp, options) elif compatibility and isinstance(obj, bytes): _pack_oldspec_raw(obj, fp, options) elif isinstance(obj, str): _pack_string(obj, fp, options) elif isinstance(obj, bytes): _pack_binary(obj, fp, options) elif isinstance(obj, (list, tuple)): _pack_array(obj, fp, options) elif isinstance(obj, dict): _pack_map(obj, fp, options) elif isinstance(obj, datetime.datetime): _pack_ext_timestamp(obj, fp, options) elif isinstance(obj, Ext): _pack_ext(obj, fp, options) elif ext_handlers: # Linear search for superclass t = next((t for t in ext_handlers.keys() if isinstance(obj, t)), None) if t: _pack_ext(ext_handlers[t](obj), fp, options) else: raise UnsupportedTypeException( "unsupported type: %s" % str(type(obj))) else: raise UnsupportedTypeException( "unsupported type: %s" % str(type(obj)))
0.000401
def listsdm(sdm, file=None): """Generate a standard "listsdm" listing of(A)SDM dataset contents. sdm (str) The path to the (A)SDM dataset to parse file (stream-like object, such as an opened file) Where to print the human-readable listing. If unspecified, results go to :data:`sys.stdout`. Returns A dictionary of information about the dataset. Contents not yet documented. Example:: from pwkit.environments.casa import tasks tasks.listsdm('myalmaa.asdm') This code based on CASA's `task_listsdm.py`, with this version info:: # v1.0: 2010.12.07, M. Krauss # v1.1: 2011.02.23, M. Krauss: added functionality for ALMA data # # Original code based on readscans.py, courtesy S. Meyers """ from xml.dom import minidom import string def printf(fmt, *args): if len(args): s = fmt % args else: s = str(fmt) print(s, file=file) qa = util.tools.quanta() me = util.tools.measures() list_scans = True list_antennas = False list_fields = True list_spws = False # read Scan.xml xmlscans = minidom.parse(sdm+'/Scan.xml') scandict = {} startTimeShort = [] endTimeShort = [] rowlist = xmlscans.getElementsByTagName('row') for rownode in rowlist: rowfid = rownode.getElementsByTagName('scanNumber') fid = int(rowfid[0].childNodes[0].nodeValue) scandict[fid] = {} # number of subscans rowsubs = rownode.getElementsByTagName('numSubscan') if len(rowsubs) == 0: # EVLA and old ALMA data rowsubs = rownode.getElementsByTagName('numSubScan') nsubs = int(rowsubs[0].childNodes[0].nodeValue) # intents rownint = rownode.getElementsByTagName('numIntent') nint = int(rownint[0].childNodes[0].nodeValue) rowintents = rownode.getElementsByTagName('scanIntent') sint = str(rowintents[0].childNodes[0].nodeValue) sints = sint.split() rint = '' for r in range(nint): intent = sints[2+r] if rint=='': rint = intent else: rint += ' '+intent # start and end times in mjd ns rowstart = rownode.getElementsByTagName('startTime') start = int(rowstart[0].childNodes[0].nodeValue) startmjd = float(start)*1.0E-9/86400.0 t = b(qa.quantity(startmjd,b'd')) starttime = qa.time(t,form=b'ymd',prec=8)[0] startTimeShort.append(qa.time(t,prec=8)[0]) rowend = rownode.getElementsByTagName('endTime') end = int(rowend[0].childNodes[0].nodeValue) endmjd = float(end)*1.0E-9/86400.0 t = b(qa.quantity(endmjd,b'd')) endtime = qa.time(t,form=b'ymd',prec=8)[0] endTimeShort.append(qa.time(t,prec=8)[0]) # source name rowsrc = rownode.getElementsByTagName('sourceName') try: src = str(rowsrc[0].childNodes[0].nodeValue) except: src = '???' # PKGW scandict[fid]['start'] = starttime scandict[fid]['end'] = endtime timestr = starttime+'~'+endtime scandict[fid]['timerange'] = timestr scandict[fid]['source'] = src scandict[fid]['intent'] = rint scandict[fid]['nsubs'] = nsubs # read Main.xml xmlmain = minidom.parse(sdm+'/Main.xml') rowlist = xmlmain.getElementsByTagName('row') mainScanList = [] mainConfigList = [] fieldIdList = [] for rownode in rowlist: # get the scan numbers rowfid = rownode.getElementsByTagName('scanNumber') fid = int(rowfid[0].childNodes[0].nodeValue) mainScanList.append(fid) # get the configuration description rowconfig = rownode.getElementsByTagName('configDescriptionId') config = str(rowconfig[0].childNodes[0].nodeValue) mainConfigList.append(config) # get the field ID rowfieldid = rownode.getElementsByTagName('fieldId') fieldid = string.split(str(rowfieldid[0].childNodes[0].nodeValue), '_')[1] fieldIdList.append(fieldid) # read ConfigDescription.xml to relate the configuration # description to a(set) of data description IDs xmlconfig = minidom.parse(sdm+'/ConfigDescription.xml') rowlist = xmlconfig.getElementsByTagName('row') configDescList = [] dataDescList = [] for rownode in rowlist: # get the configuration description rowConfigDesc = rownode.getElementsByTagName('configDescriptionId') configDesc = str(rowConfigDesc[0].childNodes[0].nodeValue) configDescList.append(configDesc) # make a list of the data description IDs: rowNumDataDesc = rownode.getElementsByTagName('numDataDescription') numDataDesc = int(rowNumDataDesc[0].childNodes[0].nodeValue) rowDataDesc = rownode.getElementsByTagName('dataDescriptionId') dataDescStr = str(rowDataDesc[0].childNodes[0].nodeValue) dataDescSplit = dataDescStr.split() dataDesc = [] for i in range(numDataDesc): dataDesc.append(dataDescSplit[i+2]) dataDescList.append(dataDesc) # read DataDescription.xml to relate the data description IDs to # spectral window IDs xmlDataDesc = minidom.parse(sdm+'/DataDescription.xml') rowlist = xmlDataDesc.getElementsByTagName('row') dataDescElList = [] spwIdDataDescList = [] for rownode in rowlist: # get the data description ID, make another list: rowDataDescEl = rownode.getElementsByTagName('dataDescriptionId') dataDescEl = str(rowDataDescEl[0].childNodes[0].nodeValue) dataDescElList.append(dataDescEl) # get the related spectral window ID: rowSpwIdDataDesc = rownode.getElementsByTagName('spectralWindowId') spwIdDataDesc = str(rowSpwIdDataDesc[0].childNodes[0].nodeValue) spwIdDataDescList.append(spwIdDataDesc) # read SpectralWindow.xml, get information about number of # channels, reference frequency, baseband name, channel width. # Interesting that there seem to be multiple fields that give the # same information: chanFreqStart=reFreq, # chanFreqStep=chanWidth=resolution. Why?(Note: all units are Hz) # Note: this is where the script breaks for ALMA data, since there # are different tags in SpectraWindow.xml(for varying channel widths). xmlSpecWin = minidom.parse(sdm+'/SpectralWindow.xml') rowlist = xmlSpecWin.getElementsByTagName('row') spwIdList = [] nChanList = [] refFreqList = [] chanWidthList = [] basebandList = [] for rownode in rowlist: # get the various row values: rowSpwId = rownode.getElementsByTagName('spectralWindowId') rowNChan = rownode.getElementsByTagName('numChan') rowRefFreq = rownode.getElementsByTagName('refFreq') # For EVLA rowChanWidth = rownode.getElementsByTagName('chanWidth') # For ALMA rowChanWidthArr = rownode.getElementsByTagName('chanWidthArray') rowBaseband = rownode.getElementsByTagName('basebandName') # convert to values or strings and append to the relevant lists: spwId = str(rowSpwId[0].childNodes[0].nodeValue) spwIdList.append(spwId) nChan = int(rowNChan[0].childNodes[0].nodeValue) nChanList.append(nChan) refFreq = float(rowRefFreq[0].childNodes[0].nodeValue) refFreqList.append(refFreq) if rowChanWidth: chanWidth = float(rowChanWidth[0].childNodes[0].nodeValue) chanWidthList.append(chanWidth) if rowChanWidthArr: tmpArr = str(rowChanWidthArr[0].childNodes[0].nodeValue).split(' ') tmpWidth = [] for cw in range(2, len(tmpArr)): thisWidth = float(tmpArr[cw]) tmpWidth.append(thisWidth) chanWidthList.append(tmpWidth) baseband = str(rowBaseband[0].childNodes[0].nodeValue) basebandList.append(baseband) # read Field.xml xmlField = minidom.parse(sdm+'/Field.xml') rowlist = xmlField.getElementsByTagName('row') fieldList = [] fieldNameList = [] fieldCodeList = [] fieldRAList = [] fieldDecList = [] fieldSrcIDList = [] for rownode in rowlist: rowField = rownode.getElementsByTagName('fieldId') rowName = rownode.getElementsByTagName('fieldName') rowCode = rownode.getElementsByTagName('code') rowCoords = rownode.getElementsByTagName('referenceDir') rowSrcId = rownode.getElementsByTagName('sourceId') # convert to values or strings and append to relevent lists: fieldList.append(int(string.split(str(rowField[0].childNodes[0].nodeValue),'_')[1])) fieldNameList.append(str(rowName[0].childNodes[0].nodeValue)) fieldCodeList.append(str(rowCode[0].childNodes[0].nodeValue)) coordInfo = rowCoords[0].childNodes[0].nodeValue.split() RADeg = float(coordInfo[3])* (180.0/np.pi) DecDeg = float(coordInfo[4])* (180.0/np.pi) RAInp = {'unit': 'deg', 'value': RADeg} DecInp = {'unit': 'deg', 'value': DecDeg} RAHMS = b(qa.formxxx(b(RAInp), format=b'hms')) DecDMS = b(qa.formxxx(b(DecInp), format=b'dms')) fieldRAList.append(RAHMS) fieldDecList.append(DecDMS) fieldSrcIDList.append(int(rowSrcId[0].childNodes[0].nodeValue)) # read Antenna.xml xmlAnt = minidom.parse(sdm+'/Antenna.xml') rowlist = xmlAnt.getElementsByTagName('row') antList = [] antNameList = [] dishDiamList = [] stationList = [] for rownode in rowlist: rowAnt = rownode.getElementsByTagName('antennaId') rowAntName = rownode.getElementsByTagName('name') rowDishDiam = rownode.getElementsByTagName('dishDiameter') rowStation = rownode.getElementsByTagName('stationId') # convert and append antList.append(int(string.split(str(rowAnt[0].childNodes[0].nodeValue), '_')[1])) antNameList.append(str(rowAntName[0].childNodes[0].nodeValue)) dishDiamList.append(float(rowDishDiam[0].childNodes[0].nodeValue)) stationList.append(str(rowStation[0].childNodes[0].nodeValue)) # read Station.xml xmlStation = minidom.parse(sdm+'/Station.xml') rowlist = xmlStation.getElementsByTagName('row') statIdList = [] statNameList = [] statLatList = [] statLonList = [] for rownode in rowlist: rowStatId = rownode.getElementsByTagName('stationId') rowStatName = rownode.getElementsByTagName('name') rowStatPos = rownode.getElementsByTagName('position') # convert and append statIdList.append(str(rowStatId[0].childNodes[0].nodeValue)) statNameList.append(str(rowStatName[0].childNodes[0].nodeValue)) posInfo = string.split(str(rowStatPos[0].childNodes[0].nodeValue)) x = b(qa.quantity([float(posInfo[2])], b'm')) y = b(qa.quantity([float(posInfo[3])], b'm')) z = b(qa.quantity([float(posInfo[4])], b'm')) pos = b(me.position(b'ITRF', x, y, z)) qLon = pos['m0'] qLat = pos['m1'] statLatList.append(qa.formxxx(qLat, b'dms', prec=0)) statLonList.append(qa.formxxx(qLon, b'dms', prec=0)) # associate antennas with stations: assocStatList = [] for station in stationList: i = np.where(np.array(statIdList) == station)[0][0] assocStatList.append(statNameList[i]) # read ExecBlock.xml xmlExecBlock = minidom.parse(sdm+'/ExecBlock.xml') rowlist = xmlExecBlock.getElementsByTagName('row') sTime = float(rowlist[0].getElementsByTagName('startTime')[0].childNodes[0].nodeValue)*1.0E-9 eTime = float(rowlist[0].getElementsByTagName('endTime')[0].childNodes[0].nodeValue)*1.0E-9 # integration time in seconds, start and end times: intTime = eTime - sTime t = b(qa.quantity(sTime/86400.0, b'd')) obsStart = qa.time(t, form=b'ymd', prec=8)[0] t = b(qa.quantity(eTime/86400.0, b'd')) obsEnd = qa.time(t, form=b'ymd', prec=8)[0] # observer name and obs. info: observerName = str(rowlist[0].getElementsByTagName('observerName')[0].childNodes[0].nodeValue) configName = str(rowlist[0].getElementsByTagName('configName')[0].childNodes[0].nodeValue) telescopeName = str(rowlist[0].getElementsByTagName('telescopeName')[0].childNodes[0].nodeValue) numAntenna = int(rowlist[0].getElementsByTagName('numAntenna')[0].childNodes[0].nodeValue) # make lists like the dataDescList for spectral windows & related info: spwOrd = [] nChanOrd = [] rFreqOrd = [] cWidthOrd = [] bbandOrd = [] for i in range(0, len(configDescList)): spwTempList = [] nChanTempList = [] rFreqTempList = [] cWidthTempList = [] bbandTempList = [] for dDesc in dataDescList[i]: el = np.where(np.array(dataDescElList) == dDesc)[0][0] spwIdN = spwIdDataDescList[el] spwEl = np.where(np.array(spwIdList) == spwIdN)[0][0] spwTempList.append(int(string.split(spwIdList[spwEl], '_')[1])) nChanTempList.append(nChanList[spwEl]) rFreqTempList.append(refFreqList[spwEl]) cWidthTempList.append(chanWidthList[spwEl]) bbandTempList.append(basebandList[spwEl]) spwOrd.append(spwTempList) nChanOrd.append(nChanTempList) rFreqOrd.append(rFreqTempList) cWidthOrd.append(cWidthTempList) bbandOrd.append(bbandTempList) # add this info to the scan dictionary: for scanNum in scandict: spwOrdList = [] nChanOrdList = [] rFreqOrdList = [] cWidthOrdList = [] bbandOrdList = [] # scanEl could have multiple elements if subscans are present, # or for ALMA data: scanEl = np.where(np.array(mainScanList) == scanNum)[0] for thisEl in scanEl: configEl = mainConfigList[thisEl] listEl = np.where(np.array(configDescList) == configEl)[0][0] spwOrdList.append(spwOrd[listEl]) nChanOrdList.append(nChanOrd[listEl]) rFreqOrdList.append(rFreqOrd[listEl]) cWidthOrdList.append(cWidthOrd[listEl]) bbandOrdList.append(bbandOrd[listEl]) try: scandict[scanNum]['field'] = int(fieldIdList[scanEl[0]]) except: scandict[scanNum]['field'] = -1 # PKGW scandict[scanNum]['spws'] = spwOrdList scandict[scanNum]['nchan'] = nChanOrdList scandict[scanNum]['reffreq'] = rFreqOrdList scandict[scanNum]['chanwidth'] = cWidthOrdList scandict[scanNum]['baseband'] = bbandOrdList # report information to the logger printf('================================================================================') printf(' SDM File: %s', sdm) printf('================================================================================') printf(' Observer: %s', observerName) printf(' Facility: %s, %s-configuration', telescopeName, configName) printf(' Observed from %s to %s(UTC)', obsStart, obsEnd) printf(' Total integration time = %.2f seconds(%.2f hours)', intTime, intTime / 3600) if list_scans: printf(' ') printf('Scan listing:') maxspwlen = 0 for scaninfo in scandict.values(): SPWs = [] for spw in scaninfo['spws']: SPWs += spw scaninfo['spwstr'] = str(list(set(SPWs))) maxspwlen = max(maxspwlen, len(scaninfo['spwstr'])) fmt = ' %-25s %-4s %-5s %-15s %-*s %s' printf(fmt, 'Timerange(UTC)', 'Scan', 'FldID', 'FieldName', maxspwlen, 'SpwIDs', 'Intent(s)') for i,(scanid, scaninfo) in enumerate(scandict.items()): printf(fmt, startTimeShort[i] + ' - ' + endTimeShort[i], scanid, scaninfo['field'], scaninfo['source'], maxspwlen, scaninfo['spwstr'], scaninfo['intent']) if list_spws: printf(' ') printf('Spectral window information:') printf(' SpwID #Chans Ch0(MHz) ChWidth(kHz) TotBW(MHz) Baseband') for i in range(0, len(spwIdList)): printf(' %s %s %s %s %s %s', string.split(spwIdList[i], '_')[1].ljust(4), str(nChanList[i]).ljust(4), str(refFreqList[i]/1e6).ljust(8), str(np.array(chanWidthList[i])/1e3).ljust(8), str(np.array(chanWidthList[i])*nChanList[i]/1e6).ljust(8), basebandList[i].ljust(8)) if list_fields: printf(' ') printf('Field information:') printf(' FldID Code Name RA Dec SrcID') for i in range(0, len(fieldList)): printf(' %-6d %-6s %-15s %-13s %-15s %-5d', fieldList[i], fieldCodeList[i], fieldNameList[i], fieldRAList[i], fieldDecList[i], fieldSrcIDList[i]) if list_antennas: printf(' ') printf('Antennas(%i):' % len(antList)) printf(' ID Name Station Diam.(m) Lat. Long.') for i in range(0, len(antList)): printf(' %s %s %s %s %s %s ', str(antList[i]).ljust(5), antNameList[i].ljust(6), assocStatList[i].ljust(5), str(dishDiamList[i]).ljust(5), statLatList[i].ljust(12), statLonList[i].ljust(12)) # return the scan dictionary return scandict
0.002604
def ReportConfiguration(self, file): """ Report configuration for logging purposes. :param file: Destination for report details :return: None """ print >> file, BuildReportLine("PED FILE", self.datasource) print >> file, BuildReportLine("MAP FILE", self.mapfile)
0.006431
def exclusive(via=threading.Lock): """ Mark a callable as exclusive :param via: factory for a Lock to guard the callable Guards the callable against being entered again before completion. Explicitly raises a :py:exc:`RuntimeError` on violation. :note: If applied to a method, it is exclusive across all instances. """ def make_exclusive(fnc): fnc_guard = via() @functools.wraps(fnc) def exclusive_call(*args, **kwargs): if fnc_guard.acquire(blocking=False): try: return fnc(*args, **kwargs) finally: fnc_guard.release() else: raise RuntimeError('exclusive call to %s violated') return exclusive_call return make_exclusive
0.001242
def evict(self, urls): """Remove items from cache matching URLs. Return the number of items removed. """ if isinstance(urls, six.text_type): urls = [urls] urls = set(normalize_url(url) for url in urls) retval = 0 for key in list(self.cache): if key[0] in urls: retval += 1 del self.cache[key] del self.timeouts[key] return retval
0.004292
def FromDict(cls, obj): """Create an IOTileEvent from the result of a previous call to asdict(). Args: obj (dict): A dictionary produced by a call to IOTileEvent.asdict() Returns: IOTileEvent: The converted IOTileEvent object. """ timestamp = obj.get('timestamp') if timestamp is not None: import dateutil.parser timestamp = dateutil.parser.parse(timestamp) return IOTileEvent(obj.get('device_timestamp'), obj.get('stream'), obj.get('extra_data'), obj.get('data'), reading_id=obj.get('streamer_local_id'), reading_time=timestamp)
0.00753
def get_list(self, list_name, options=None): """ Get detailed metadata information about a list. """ options = options or {} data = {'list': list_name} data.update(options) return self.api_get('list', data)
0.007634
def uniq_by_id(self, records): """Only the first record for each id""" uniq = [] keys = set() for rec in records: rec_id = rec[self._id_field] if rec_id not in keys: uniq.append(rec) keys.add(rec_id) return uniq
0.006515
def rgb_to_hsl(r, g, b): """ Converts an RGB color value to HSL. :param r: The red color value :param g: The green color value :param b: The blue color value :return: The HSL representation """ r = float(r) / 255.0 g = float(g) / 255.0 b = float(b) / 255.0 max_value = max(r, g, b) min_value = min(r, g, b) h = None s = None l = (max_value + min_value) / 2 d = max_value - min_value if d == 0: # achromatic h = 0 s = 0 else: s = d / (1 - abs(2 * l - 1)) if r == max_value: h = 60 * ((g - b) % 6) if b > g: h += 360 if g == max_value: h = 60 * ((b - r) / d + 2) if b == max_value: h = 60 * ((r - g) / d + 4) return round(h, 2), round(s, 2), round(l, 2)
0.002347
def retention_period(self, value): """Set the retention period for items in the bucket. :type value: int :param value: number of seconds to retain items after upload or release from event-based lock. :raises ValueError: if the bucket's retention policy is locked. """ policy = self._properties.setdefault("retentionPolicy", {}) if value is not None: policy["retentionPeriod"] = str(value) else: policy = None self._patch_property("retentionPolicy", policy)
0.00346
def add(envelope): """ Take a dict-like fedmsg envelope and store the headers and message in the table. """ message = envelope['body'] timestamp = message.get('timestamp', None) try: if timestamp: timestamp = datetime.datetime.utcfromtimestamp(timestamp) else: timestamp = datetime.datetime.utcnow() except Exception: pass headers = envelope.get('headers', None) msg_id = message.get('msg_id', None) if not msg_id and headers: msg_id = headers.get('message-id', None) if not msg_id: msg_id = six.text_type(timestamp.year) + six.u('-') + six.text_type(uuid.uuid4()) obj = Message( i=message.get('i', 0), msg_id=msg_id, topic=message['topic'], timestamp=timestamp, username=message.get('username', None), crypto=message.get('crypto', None), certificate=message.get('certificate', None), signature=message.get('signature', None), ) obj.msg = message['msg'] obj.headers = headers try: session.add(obj) session.flush() except IntegrityError: log.warning('Skipping message from %s with duplicate id: %s', message['topic'], msg_id) session.rollback() return usernames = fedmsg.meta.msg2usernames(message) packages = fedmsg.meta.msg2packages(message) # Do a little sanity checking on fedmsg.meta results if None in usernames: # Notify developers so they can fix msg2usernames log.error('NoneType found in usernames of %r' % msg_id) # And prune out the bad value usernames = [name for name in usernames if name is not None] if None in packages: # Notify developers so they can fix msg2packages log.error('NoneType found in packages of %r' % msg_id) # And prune out the bad value packages = [pkg for pkg in packages if pkg is not None] # If we've never seen one of these users before, then: # 1) make sure they exist in the db (create them if necessary) # 2) mark an in memory cache so we can remember that they exist without # having to hit the db. for username in usernames: if username not in _users_seen: # Create the user in the DB if necessary User.get_or_create(username) # Then just mark an in memory cache noting that we've seen them. _users_seen.add(username) for package in packages: if package not in _packages_seen: Package.get_or_create(package) _packages_seen.add(package) session.flush() # These two blocks would normally be a simple "obj.users.append(user)" kind # of statement, but here we drop down out of sqlalchemy's ORM and into the # sql abstraction in order to gain a little performance boost. values = [{'username': username, 'msg': obj.id} for username in usernames] if values: session.execute(user_assoc_table.insert(), values) values = [{'package': package, 'msg': obj.id} for package in packages] if values: session.execute(pack_assoc_table.insert(), values) # TODO -- can we avoid committing every time? session.flush() session.commit()
0.000608
def p_Revisions(self, p): """Revisions : Revisions Revision | Revision""" n = len(p) if n == 3: p[0] = ('Revisions', p[1][1] + [p[2]]) elif n == 2: p[0] = ('Revisions', [p[1]])
0.007905
def insert(self, x1, x2, name = '', referedObject = []) : """Insert the segment in it's right place and returns it. If there's already a segment S as S.x1 == x1 and S.x2 == x2. S.name will be changed to 'S.name U name' and the referedObject will be appended to the already existing list""" if x1 > x2 : xx1, xx2 = x2, x1 else : xx1, xx2 = x1, x2 rt = None insertId = None childrenToRemove = [] for i in range(len(self.children)) : if self.children[i].x1 == xx1 and xx2 == self.children[i].x2 : self.children[i].name = self.children[i].name + ' U ' + name self.children[i].referedObject.append(referedObject) return self.children[i] if self.children[i].x1 <= xx1 and xx2 <= self.children[i].x2 : return self.children[i].insert(x1, x2, name, referedObject) elif xx1 <= self.children[i].x1 and self.children[i].x2 <= xx2 : if rt == None : if type(referedObject) is types.ListType : rt = SegmentTree(xx1, xx2, name, referedObject, self, self.level+1) else : rt = SegmentTree(xx1, xx2, name, [referedObject], self, self.level+1) insertId = i rt.__addChild(self.children[i]) self.children[i].father = rt childrenToRemove.append(self.children[i]) elif xx1 <= self.children[i].x1 and xx2 <= self.children[i].x2 : insertId = i break if rt != None : self.__addChild(rt, insertId) for c in childrenToRemove : self.children.remove(c) else : if type(referedObject) is types.ListType : rt = SegmentTree(xx1, xx2, name, referedObject, self, self.level+1) else : rt = SegmentTree(xx1, xx2, name, [referedObject], self, self.level+1) if insertId != None : self.__addChild(rt, insertId) else : self.__addChild(rt) return rt
0.051897
def are_equivalent(*args, **kwargs): """Indicate if arguments passed to this function are equivalent. .. hint:: This checker operates recursively on the members contained within iterables and :class:`dict <python:dict>` objects. .. caution:: If you only pass one argument to this checker - even if it is an iterable - the checker will *always* return ``True``. To evaluate members of an iterable for equivalence, you should instead unpack the iterable into the function like so: .. code-block:: python obj = [1, 1, 1, 2] result = are_equivalent(*obj) # Will return ``False`` by unpacking and evaluating the iterable's members result = are_equivalent(obj) # Will always return True :param args: One or more values, passed as positional arguments. :returns: ``True`` if ``args`` are equivalent, and ``False`` if not. :rtype: :class:`bool <python:bool>` :raises SyntaxError: if ``kwargs`` contains duplicate keyword parameters or duplicates keyword parameters passed to the underlying validator """ if len(args) == 1: return True first_item = args[0] for item in args[1:]: if type(item) != type(first_item): # pylint: disable=C0123 return False if isinstance(item, dict): if not are_dicts_equivalent(item, first_item): return False elif hasattr(item, '__iter__') and not isinstance(item, (str, bytes, dict)): if len(item) != len(first_item): return False for value in item: if value not in first_item: return False for value in first_item: if value not in item: return False else: if item != first_item: return False return True
0.003597
def readString(self): """ Reads and returns a string from the stream. """ length, is_reference = self._readLength() if is_reference: result = self.context.getString(length) return self.context.getStringForBytes(result) if length == 0: return '' result = self.stream.read(length) self.context.addString(result) return self.context.getStringForBytes(result)
0.004274
def getOverlayErrorNameFromEnum(self, error): """ returns a string that corresponds with the specified overlay error. The string will be the name of the error enum value for all valid error codes """ fn = self.function_table.getOverlayErrorNameFromEnum result = fn(error) return result
0.011662
def mahalanobis(self): """" Mahalanobis distance of measurement. E.g. 3 means measurement was 3 standard deviations away from the predicted value. Returns ------- mahalanobis : float """ if self._mahalanobis is None: self._mahalanobis = sqrt(float(dot(dot(self.y.T, self.SI), self.y))) return self._mahalanobis
0.007595
def set_name_filters(self, name_filters): """Set name filters""" self.name_filters = name_filters self.fsmodel.setNameFilters(name_filters)
0.012048
def has_tokens(self, phrase): """ Checks if phrase or sub-phrase exists in the tree. If set of phrases contains phrases such as: "state", "of the" and "state of the art", look up on: "state" returns true, "of" returns null, "of the art" returns false. :param phrase: Phrase or sub-phrase to look up. :type: phrase: list of str :return: Returns true if phrase in its entirety is in the tree, null if part of the phrase matches a larger tokenSequence, false if phrases matches no other phrase entirely and not part any longer phrase. """ if len(phrase) == 1 and classifier_options.is_special_class_word(phrase[0]): return True tree = self.root for token in phrase: if not tree.has_child(token): return False tree = tree.get_child(token) return True if tree.is_end_of_phrase() else None
0.005252
def get_hyperparams_dict(self, id, display=True): """ Derived and returned the model parameters used to train the particular grid search model. :param str id: The model id of the model with hyperparameters of interest. :param bool display: Flag to indicate whether to display the hyperparameter names. :returns: A dict of model pararmeters derived from the hyper-parameters used to train this particular model. """ idx = id if is_type(id, int) else self.model_ids.index(id) model = self[idx] model_params = dict() # if cross-validation is turned on, parameters in one of the fold model actual contains the max_runtime_secs # parameter and not the main model that is returned. if model._is_xvalidated: model = h2o.get_model(model._xval_keys[0]) for param_name in self.hyper_names: model_params[param_name] = model.params[param_name]['actual'][0] if \ isinstance(model.params[param_name]['actual'], list) else model.params[param_name]['actual'] if display: print('Hyperparameters: [' + ', '.join(list(self.hyper_params.keys())) + ']') return model_params
0.009031
def display_event(div, attributes=[]): """ Function to build a suitable CustomJS to display the current event in the div model. """ style = 'float: left; clear: left; font-size: 10pt' return CustomJS(args=dict(div=div), code=""" var attrs = %s; var args = []; for (var i = 0; i<attrs.length; i++ ) { var val = JSON.stringify(cb_obj[attrs[i]], function(key, val) { return val.toFixed ? Number(val.toFixed(2)) : val; }) args.push(attrs[i] + '=' + val) } var line = "<span style=%r><b>" + cb_obj.event_name + "</b>(" + args.join(", ") + ")</span>\\n"; var text = div.text.concat(line); var lines = text.split("\\n") if (lines.length > 35) lines.shift(); div.text = lines.join("\\n"); """ % (attributes, style))
0.002296
def stop(self): """Stops read loop and closes socket if it has been created. """ self._running = False if self._socket is None: return try: self._socket.shutdown(socket.SHUT_RDWR) self._socket.close() except socket.error: pass self._socket = None
0.005698
def resolve_peer(self, peer_id: Union[int, str]): """Use this method to get the InputPeer of a known peer_id. This is a utility method intended to be used **only** when working with Raw Functions (i.e: a Telegram API method you wish to use which is not available yet in the Client class as an easy-to-use method), whenever an InputPeer type is required. Args: peer_id (``int`` | ``str``): The peer id you want to extract the InputPeer from. Can be a direct id (int), a username (str) or a phone number (str). Returns: On success, the resolved peer id is returned in form of an InputPeer object. Raises: :class:`RPCError <pyrogram.RPCError>` in case of a Telegram RPC error. ``KeyError`` in case the peer doesn't exist in the internal database. """ try: return self.peers_by_id[peer_id] except KeyError: if type(peer_id) is str: if peer_id in ("self", "me"): return types.InputPeerSelf() peer_id = re.sub(r"[@+\s]", "", peer_id.lower()) try: int(peer_id) except ValueError: if peer_id not in self.peers_by_username: self.send( functions.contacts.ResolveUsername( username=peer_id ) ) return self.peers_by_username[peer_id] else: try: return self.peers_by_phone[peer_id] except KeyError: raise PeerIdInvalid if peer_id > 0: self.fetch_peers( self.send( functions.users.GetUsers( id=[types.InputUser(user_id=peer_id, access_hash=0)] ) ) ) else: if str(peer_id).startswith("-100"): self.send( functions.channels.GetChannels( id=[types.InputChannel(channel_id=int(str(peer_id)[4:]), access_hash=0)] ) ) else: self.send( functions.messages.GetChats( id=[-peer_id] ) ) try: return self.peers_by_id[peer_id] except KeyError: raise PeerIdInvalid
0.004035
def tag(self, name=None): """Create and list tag objects running git-tag command""" command = ["git", "tag"] if not name: command.extend( [ "-l", "--sort=creatordate", "--format=%(creatordate:short)%09%(refname:strip=2)", ] ) command_output = _run_command(command).strip() if command_output == "": return [] tag_text_list = command_output.split("\n") tag_list = [Tag(text) for text in tag_text_list] return list(reversed(tag_list)) command.extend(["-a", name, "-m", '""']) return _run_command(command)
0.002703
def potential_from_grid(self, grid): """ Calculate the potential at a given set of arc-second gridded coordinates. Parameters ---------- grid : grids.RegularGrid The grid of (y,x) arc-second coordinates the deflection angles are computed on. """ eta = self.grid_to_elliptical_radii(grid) return 2.0 * self.einstein_radius_rescaled * eta
0.009685
def _load_model(self): """ Loads the arena and pot object. """ super()._load_model() self.mujoco_robot.set_base_xpos([0, 0, 0]) # load model for table top workspace self.mujoco_arena = TableArena( table_full_size=self.table_full_size, table_friction=self.table_friction ) if self.use_indicator_object: self.mujoco_arena.add_pos_indicator() # The sawyer robot has a pedestal, we want to align it with the table self.mujoco_arena.set_origin([0.45 + self.table_full_size[0] / 2, 0, 0]) # task includes arena, robot, and objects of interest self.model = TableTopTask( self.mujoco_arena, self.mujoco_robot, self.mujoco_objects, self.object_initializer, ) self.model.place_objects()
0.004592
def export_urdf(mesh, directory, scale=1.0, color=[0.75, 0.75, 0.75], **kwargs): """ Convert a Trimesh object into a URDF package for physics simulation. This breaks the mesh into convex pieces and writes them to the same directory as the .urdf file. Parameters --------- mesh : Trimesh object directory : str The directory path for the URDF package Returns --------- mesh : Trimesh object Multi-body mesh containing convex decomposition """ import lxml.etree as et # TODO: fix circular import from .export import export_mesh # Extract the save directory and the file name fullpath = os.path.abspath(directory) name = os.path.basename(fullpath) _, ext = os.path.splitext(name) if ext != '': raise ValueError('URDF path must be a directory!') # Create directory if needed if not os.path.exists(fullpath): os.mkdir(fullpath) elif not os.path.isdir(fullpath): raise ValueError('URDF path must be a directory!') # Perform a convex decomposition try: convex_pieces = convex_decomposition(mesh, **kwargs) if not isinstance(convex_pieces, list): convex_pieces = [convex_pieces] except BaseException: log.error('problem with convex decomposition, using hull', exc_info=True) convex_pieces = [mesh.convex_hull] # Get the effective density of the mesh effective_density = mesh.volume / sum([ m.volume for m in convex_pieces]) # open an XML tree root = et.Element('robot', name='root') # Loop through all pieces, adding each as a link prev_link_name = None for i, piece in enumerate(convex_pieces): # Save each nearly convex mesh out to a file piece_name = '{}_convex_piece_{}'.format(name, i) piece_filename = '{}.obj'.format(piece_name) piece_filepath = os.path.join(fullpath, piece_filename) export_mesh(piece, piece_filepath) # Set the mass properties of the piece piece.center_mass = mesh.center_mass piece.density = effective_density * mesh.density link_name = 'link_{}'.format(piece_name) geom_name = '{}'.format(piece_filename) I = [['{:.2E}'.format(y) for y in x] # NOQA for x in piece.moment_inertia] # Write the link out to the XML Tree link = et.SubElement(root, 'link', name=link_name) # Inertial information inertial = et.SubElement(link, 'inertial') et.SubElement(inertial, 'origin', xyz="0 0 0", rpy="0 0 0") et.SubElement(inertial, 'mass', value='{:.2E}'.format(piece.mass)) et.SubElement( inertial, 'inertia', ixx=I[0][0], ixy=I[0][1], ixz=I[0][2], iyy=I[1][1], iyz=I[1][2], izz=I[2][2]) # Visual Information visual = et.SubElement(link, 'visual') et.SubElement(visual, 'origin', xyz="0 0 0", rpy="0 0 0") geometry = et.SubElement(visual, 'geometry') et.SubElement(geometry, 'mesh', filename=geom_name, scale="{:.4E} {:.4E} {:.4E}".format(scale, scale, scale)) material = et.SubElement(visual, 'material', name='') et.SubElement(material, 'color', rgba="{:.2E} {:.2E} {:.2E} 1".format(color[0], color[1], color[2])) # Collision Information collision = et.SubElement(link, 'collision') et.SubElement(collision, 'origin', xyz="0 0 0", rpy="0 0 0") geometry = et.SubElement(collision, 'geometry') et.SubElement(geometry, 'mesh', filename=geom_name, scale="{:.4E} {:.4E} {:.4E}".format(scale, scale, scale)) # Create rigid joint to previous link if prev_link_name is not None: joint_name = '{}_joint'.format(link_name) joint = et.SubElement(root, 'joint', name=joint_name, type='fixed') et.SubElement(joint, 'origin', xyz="0 0 0", rpy="0 0 0") et.SubElement(joint, 'parent', link=prev_link_name) et.SubElement(joint, 'child', link=link_name) prev_link_name = link_name # Write URDF file tree = et.ElementTree(root) urdf_filename = '{}.urdf'.format(name) tree.write(os.path.join(fullpath, urdf_filename), pretty_print=True) # Write Gazebo config file root = et.Element('model') model = et.SubElement(root, 'name') model.text = name version = et.SubElement(root, 'version') version.text = '1.0' sdf = et.SubElement(root, 'sdf', version='1.4') sdf.text = '{}.urdf'.format(name) author = et.SubElement(root, 'author') et.SubElement(author, 'name').text = 'trimesh {}'.format(trimesh_version) et.SubElement(author, 'email').text = '[email protected]' description = et.SubElement(root, 'description') description.text = name tree = et.ElementTree(root) tree.write(os.path.join(fullpath, 'model.config')) return np.sum(convex_pieces)
0.000354
def get_contract_factory(self, name: ContractName) -> Contract: """ Return the contract factory for a given contract type, generated from the data vailable in ``Package.manifest``. Contract factories are accessible from the package class. .. code:: python Owned = OwnedPackage.get_contract_factory('owned') In cases where a contract uses a library, the contract factory will have unlinked bytecode. The ``ethpm`` package ships with its own subclass of ``web3.contract.Contract``, ``ethpm.contract.LinkableContract`` with a few extra methods and properties related to bytecode linking. .. code:: python >>> math = owned_package.contract_factories.math >>> math.needs_bytecode_linking True >>> linked_math = math.link_bytecode({'MathLib': '0x1234...'}) >>> linked_math.needs_bytecode_linking False """ validate_contract_name(name) if "contract_types" not in self.manifest: raise InsufficientAssetsError( "This package does not contain any contract type data." ) try: contract_data = self.manifest["contract_types"][name] except KeyError: raise InsufficientAssetsError( "This package does not contain any package data to generate " f"a contract factory for contract type: {name}. Available contract types include: " f"{ list(self.manifest['contract_types'].keys()) }." ) validate_minimal_contract_factory_data(contract_data) contract_kwargs = generate_contract_factory_kwargs(contract_data) contract_factory = self.w3.eth.contract(**contract_kwargs) return contract_factory
0.00384
def p_NsContentNameAsId(p): ''' NsContentNameAsId : DefOrConstModifier NsContentName | DefOrConstModifier NsContentName AS INDENTIFIER ''' if len(p) <= 3: p[0] = NsContentNameAsId(p[1], p[2], None) else: p[0] = NsContentNameAsId(p[1], p[2], p[4])
0.003289
def set_images(self, text, parse_html=True): """ set_images: Replace image strings with downloaded image checksums Args: text (str): text to parse for image strings Returns:string with checksums in place of image strings and list of files that were downloaded from string """ # Set up return values and regex file_list = [] if parse_html: processed_string = self.parse_html(text) else: processed_string = text reg = re.compile(MARKDOWN_IMAGE_REGEX, flags=re.IGNORECASE) matches = reg.findall(processed_string) # Parse all matches for match in matches: file_result = self.set_image(match[1]) if file_result[0] != "": replacement, new_files = file_result processed_string = processed_string.replace(match[1], replacement) file_list += new_files return processed_string, file_list
0.002947
def load_csv(self): """ Load old benchmark results from CSV. """ if path.exists(self.csv_filepath): self.results = self.results.append( pandas.read_csv(self.csv_filepath))
0.009302
def get_segments_intersections(segment1, segment2): """Return at least one point in a list where segments intersect if an intersection exists. Otherwise, return an empty list. >>> get_segments_intersections(LineSegment(Point(0,0), Point(1,0)), \ LineSegment(Point(0,0), Point(1,0))) [Point(0,0)] """ dx1 = segment1.p2.x - segment1.p1.x dy1 = segment1.p2.y - segment1.p1.y dx2 = segment2.p2.x - segment2.p1.x dy2 = segment2.p2.y - segment2.p1.y delta = dx2 * dy1 - dy2 * dx1 if delta == 0: # parallel segments # Line segments could be (partially) identical. # In that case this functin should return True. if dx1 == 0 and dy1 == 0: # segment1 is a point point = segment1.p1 if segment2.p1.x == point.x and segment2.p1.y == point.y: return [Point(point.x, point.y)] elif segment2.p2.x == point.x and segment2.p2.y == point.y: return [Point(point.x, point.y)] else: return [] elif dx2 == 0 and dy2 == 0: # segment2 is a point point = segment2.p1 if segment1.p1.x == point.x and segment1.p1.y == point.y: return [Point(point.x, point.y)] elif segment1.p2.x == point.x and segment1.p2.y == point.y: return [Point(point.x, point.y)] else: return [] elif dx1 == 0: # Lines segments are vertical if segment1.p1.x == segment2.p1.x: if segment1.p1.y > segment1.p2.y: segment1.p1, segment1.p2 = segment1.p2, segment1.p1 if segment2.p1.y > segment2.p2.y: segment2.p1, segment2.p2 = segment2.p2, segment2.p1 # Lines segments are on the same line if segment1.p1.y <= segment2.p1.y <= segment1.p2.y: return [Point(segment1.p1.x, segment2.p1.y)] if segment2.p1.y <= segment1.p1.y <= segment2.p2.y: return [Point(segment1.p1.x, segment1.p1.y)] else: # The equation f(x) = m*x + t defines any non-vertical line t1 = segment1.get_offset() t2 = segment2.get_offset() if t1 == t2: # line segments are on the same line if segment1.p1.x <= segment2.p1.x <= segment1.p2.x: return [Point(segment2.p1.x, segment2.get_slope()*segment2.p1.x+t2)] if segment2.p1.x <= segment1.p1.x <= segment2.p2.x: return [Point(segment1.p1.x, segment1.get_slope()*segment1.p1.x+t1)] return [] if dx2 == 0: # Line 2 is a vertical line, but line 1 isn't segment1, segment2 = segment2, segment1 dx1, dx2 = dx2, dx1 if dx1 == 0: # Line 1 is a vertical line, but line 2 isn't if segment2.p1.x > segment2.p2.x: segment2.p1, segment2.p2 = segment2.p2, segment2.p1 if segment2.p1.x <= segment1.p1.x <= segment2.p2.x: # The x-values overlap m2 = segment2.get_slope() t2 = segment2.get_offset() y = m2*segment1.p1.x + t2 if segment1.p1.y > segment1.p2.y: segment1.p1, segment1.p2 = segment1.p2, segment1.p1 if segment1.p1.y <= y <= segment1.p2.y: return [Point(segment1.p1.x, y)] else: return [] else: return [] m1, t1 = segment1.get_slope(), segment1.get_offset() m2, t2 = segment2.get_slope(), segment2.get_offset() try: x = (t2-t1)/(m1-m2) except Exception as inst: logging.debug(inst) logging.debug("m1=%s", repr(m1)) logging.debug("m2=%s", repr(m2)) return [] if segment1.p1.x > segment1.p2.x: segment1.p1, segment1.p2 = segment1.p2, segment1.p1 if segment2.p1.x > segment2.p2.x: segment2.p1, segment2.p2 = segment2.p2, segment2.p1 if (segment1.p1.x <= x <= segment1.p2.x) and \ (segment2.p1.x <= x <= segment2.p2.x): # The intersection is on both line segments - not only on the lines return [Point(x, m1*x+t1)] else: return []
0.000232
def processFlat(self): """Main process. Returns ------- est_idxs : np.array(N) Estimated indeces the segment boundaries in frame indeces. est_labels : np.array(N-1) Estimated labels for the segments. """ # Preprocess to obtain features (array(n_frames, n_features)) F = self._preprocess() F = librosa.util.normalize(F, axis=0) F = librosa.feature.stack_memory(F.T).T self.config["hier"] = False my_bounds, my_labels, _ = main.scluster_segment(F, self.config, self.in_bound_idxs) # Post process estimations est_idxs, est_labels = self._postprocess(my_bounds, my_labels) assert est_idxs[0] == 0 and est_idxs[-1] == F.shape[0] - 1 # We're done! return est_idxs, est_labels
0.003601
def _step1func(self, force, ipyclient): """ hidden wrapped function to start step 1 """ ## check input data files sfiles = self.paramsdict["sorted_fastq_path"] rfiles = self.paramsdict["raw_fastq_path"] ## do not allow both a sorted_fastq_path and a raw_fastq if sfiles and rfiles: raise IPyradWarningExit(NOT_TWO_PATHS) ## but also require that at least one exists if not (sfiles or rfiles): raise IPyradWarningExit(NO_SEQ_PATH_FOUND) ## print headers if self._headers: if sfiles: print("\n{}Step 1: Loading sorted fastq data to Samples"\ .format(self._spacer)) else: print("\n{}Step 1: Demultiplexing fastq data to Samples"\ .format(self._spacer)) ## if Samples already exist then no demultiplexing if self.samples: if not force: print(SAMPLES_EXIST.format(len(self.samples), self.name)) else: ## overwrite existing data else do demux if glob.glob(sfiles): self._link_fastqs(ipyclient=ipyclient, force=force) else: assemble.demultiplex.run2(self, ipyclient, force) ## Creating new Samples else: ## first check if demultiplexed files exist in sorted path if glob.glob(sfiles): self._link_fastqs(ipyclient=ipyclient) ## otherwise do the demultiplexing else: assemble.demultiplex.run2(self, ipyclient, force)
0.008454
def max(self): """ :returns the maximum of the column """ res = self._qexec("max(%s)" % self._name) if len(res) > 0: self._max = res[0][0] return self._max
0.009302
def add_options(self, path: str, handler: _WebHandler, **kwargs: Any) -> AbstractRoute: """ Shortcut for add_route with method OPTIONS """ return self.add_route(hdrs.METH_OPTIONS, path, handler, **kwargs)
0.011719