text
stringlengths
78
104k
score
float64
0
0.18
def b2u(string): """ bytes to unicode """ if (isinstance(string, bytes) or (PY2 and isinstance(string, str))): return string.decode('utf-8') return string
0.010989
def _set_addpath_select(self, v, load=False): """ Setter method for addpath_select, mapped from YANG variable /routing_system/router/router_bgp/address_family/ipv6/ipv6_unicast/af_ipv6_vrf/af_additional_paths/addpath_select (container) If this variable is read-only (config: false) in the source YANG file, then _set_addpath_select is considered as a private method. Backends looking to populate this variable should do so via calling thisObj._set_addpath_select() directly. """ if hasattr(v, "_utype"): v = v._utype(v) try: t = YANGDynClass(v,base=addpath_select.addpath_select, is_container='container', presence=False, yang_name="addpath-select", rest_name="select", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Specify which routes should be selected as candidates for additioanal paths', u'cli-compact-syntax': None, u'alt-name': u'select', u'cli-incomplete-command': None}}, namespace='urn:brocade.com:mgmt:brocade-bgp', defining_module='brocade-bgp', yang_type='container', is_config=True) except (TypeError, ValueError): raise ValueError({ 'error-string': """addpath_select must be of a type compatible with container""", 'defined-type': "container", 'generated-type': """YANGDynClass(base=addpath_select.addpath_select, is_container='container', presence=False, yang_name="addpath-select", rest_name="select", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Specify which routes should be selected as candidates for additioanal paths', u'cli-compact-syntax': None, u'alt-name': u'select', u'cli-incomplete-command': None}}, namespace='urn:brocade.com:mgmt:brocade-bgp', defining_module='brocade-bgp', yang_type='container', is_config=True)""", }) self.__addpath_select = t if hasattr(self, '_set'): self._set()
0.004983
def convert_embedding(builder, layer, input_names, output_names, keras_layer): """Convert a dense layer from keras to coreml. Parameters keras_layer: layer ---------- A keras layer object. builder: NeuralNetworkBuilder A neural network builder object. """ # Get input and output names input_name, output_name = (input_names[0], output_names[0]) # Get the weights from keras W = keras_layer.get_weights ()[0].T # assuming keras embedding layers don't have biases builder.add_embedding(name = layer, W = W, b = None, input_dim = keras_layer.input_dim, output_channels = keras_layer.output_dim, has_bias = False, input_name = input_name, output_name = output_name)
0.019759
def clipping_params(ts, capacity=100, rate_limit=float('inf'), method=None, max_attempts=100): """Start, end, and threshold that clips the value of a time series the most, given a limitted "capacity" and "rate" Assumes that signal can be linearly interpolated between points (trapezoidal integration) Arguments: ts (TimeSeries): Time series to attempt to clip to as low a max value as possible capacity (float): Total "funds" or "energy" available for clipping (integrated area under time series) method (str): scipy optimization algorithm name, one of: 'L-BFGS-B': Byrd, 1995, "A Limited Memory Algorithm for Bound Constrained Optimization" 'TNC': Truncated Newton in C, or Newton Conjugate-Gradient, each variable may be constrained with upper and lower bounds 'COBYLA': Constrained Optimization by Linear Approximation. Fortran implementation. 'SLSQP': Kraft, 1988, Sequential Least Squares Programming or Quadratic Programming, infinite bounds converted to large floats TODO: Bisection search for the optimal threshold. Returns: 2-tuple: Timestamp of the start and end of the period of the maximum clipped integrated increase >>> t = ['2014-12-09T00:00', '2014-12-09T00:15', '2014-12-09T00:30', '2014-12-09T00:45', '2014-12-09T01:00', '2014-12-09T01:15', '2014-12-09T01:30', '2014-12-09T01:45'] >>> import pandas as pd >>> ts = pd.Series([217, 234, 235, 231, 219, 219, 231, 232], index=pd.to_datetime(t)) # doctest: +ELLIPSIS, +NORMALIZE_WHITESPACE >>> clipping_params(ts, capacity=60000)['threshold'] # doctest: +ELLIPSIS, +NORMALIZE_WHITESPACE 218.13... >>> clipping_params(ts, capacity=30000)['threshold'] # doctest: +ELLIPSIS, +NORMALIZE_WHITESPACE 224.15358... """ VALID_METHODS = ['L-BFGS-B', 'TNC', 'SLSQP', 'COBYLA'] # print('in clipping params for ts.index={0} and method={1}'.format(ts.index[0], method)) ts.index = ts.index.astype(np.int64) costs = [] def cost_fun(x, *args): thresh = x[0] ts, capacity, bounds = args integral = clipped_area(ts, thresh=thresh) terms = np.array([(10. * (integral - capacity) / capacity) ** 2, 2. / 0.1**((bounds[0] - thresh) * capacity / bounds[0]), 2. / 0.1**((thresh - bounds[1]) * capacity / bounds[1]), 1.2 ** (integral / capacity)]) return sum(terms) bounds = (ts.min(), ts.max()) done, attempts = 0, 0 thresh0 = bounds[0] + 0.5 * (bounds[1] - bounds[0]) if not method or not method in VALID_METHODS: while attempts < max_attempts and not done: for optimizer_method in VALID_METHODS: optimum = minimize(fun=cost_fun, x0=[thresh0], bounds=[bounds], args=(ts, capacity, bounds), method=optimizer_method) if optimum.success: done = True break if done: break attempts += 1 thresh0 = bounds[0] + random.random() * (bounds[1] - bounds[0]) else: optimum = minimize(fun=cost_fun, x0=[thresh0], bounds=[bounds], args=(ts, capacity, bounds), method=method) thresh = optimum.x[0] integral = clipped_area(ts, thresh=thresh) params = dict(optimum) params.update({'costs': costs, 'threshold': thresh, 'initial_guess': thresh0, 'attempts': attempts, 'integral': integral, 'method': method}) return params
0.007104
def secure(view_func): """Handles SSL redirect on the view level.""" @wraps(view_func, assigned=available_attrs(view_func)) def _wrapped_view(request, *args, **kwargs): if not request.is_secure(): redirect = _redirect(request, True) if redirect: # Redirect might be None if SSL is not enabled return redirect return view_func(request, *args, **kwargs) return _wrapped_view
0.002169
async def _resolve_params(self, params: Dict[Text, Any], request: Optional['Request']): """ If any StringToTranslate was passed as parameter then it is rendered at this moment. """ out = {} for k, v in params.items(): if isinstance(v, StringToTranslate): out[k] = await render(v, request) else: out[k] = v return out
0.008147
def mean_by_window(self, indices, window): """ Average series across multiple windows specified by their centers. Parameters ---------- indices : array-like List of times specifying window centers window : int Window size """ masks = self._makewindows(indices, window) newindex = arange(0, len(masks[0])) return self.map(lambda x: mean([x[m] for m in masks], axis=0), index=newindex)
0.006135
def cohesion(self, d=100): """ Boids move towards the flock's centre of mass. The centre of mass is the average position of all boids, not including itself (the "perceived centre"). """ vx = vy = vz = 0 for b in self.boids: if b != self: vx, vy, vz = vx+b.x, vy+b.y, vz+b.z n = len(self.boids)-1 vx, vy, vz = vx/n, vy/n, vz/n return (vx-self.x)/d, (vy-self.y)/d, (vz-self.z)/d
0.01487
def _filter(self, query, **kwargs): """ Filter a query with user-supplied arguments. """ query = self._auto_filter(query, **kwargs) return query
0.010811
def wrap(self, alias=None): """ Wraps the query by selecting all fields from itself :rtype: :class:`Query <querybuilder.query.Query>` :return: The wrapped query """ field_names = self.get_field_names() query = Query(self.connection).from_table(deepcopy(self), alias=alias) self.__dict__.update(query.__dict__) # set explicit field names self.tables[0].set_fields(field_names) field_names = self.get_field_names() return self
0.003824
def get_x_inds(self, *dynac_type): """ Return the indices into the lattice list attribute of elements whose Dynac type matches the input string. Multiple input strings can be given, either as a comma-separated list or as a genuine Python list. """ return [i for i, x in enumerate(self.lattice) for y in dynac_type if dynac_from_ele(x) == y]
0.012853
def _ncc_c(x, y): """ >>> _ncc_c([1,2,3,4], [1,2,3,4]) array([ 0.13333333, 0.36666667, 0.66666667, 1. , 0.66666667, 0.36666667, 0.13333333]) >>> _ncc_c([1,1,1], [1,1,1]) array([ 0.33333333, 0.66666667, 1. , 0.66666667, 0.33333333]) >>> _ncc_c([1,2,3], [-1,-1,-1]) array([-0.15430335, -0.46291005, -0.9258201 , -0.77151675, -0.46291005]) """ den = np.array(norm(x) * norm(y)) den[den == 0] = np.Inf x_len = len(x) fft_size = 1 << (2*x_len-1).bit_length() cc = ifft(fft(x, fft_size) * np.conj(fft(y, fft_size))) cc = np.concatenate((cc[-(x_len-1):], cc[:x_len])) return np.real(cc) / den
0.001468
def _bfd_tx(self, **kwargs): """Return the BFD minimum transmit interval XML. You should not use this method. You probably want `BGP.bfd`. Args: min_tx (str): BFD transmit interval in milliseconds (300, 500, etc) delete (bool): Remove the configuration if ``True``. Returns: XML to be passed to the switch. Raises: None """ int_type = kwargs['int_type'] method_name = 'interface_%s_bfd_interval_min_tx' % int_type bfd_tx = getattr(self._interface, method_name) config = bfd_tx(**kwargs) if kwargs['delete']: tag = 'min-tx' config.find('.//*%s' % tag).set('operation', 'delete') return config
0.002594
def export_public_key(vk, label): """ Export public key to text format. The resulting string can be written into a .pub file or appended to the ~/.ssh/authorized_keys file. """ key_type, blob = serialize_verifying_key(vk) log.debug('fingerprint: %s', fingerprint(blob)) b64 = base64.b64encode(blob).decode('ascii') return u'{} {} {}\n'.format(key_type.decode('ascii'), b64, label)
0.002398
def default_loc_scale_fn( is_singular=False, loc_initializer=tf.compat.v1.initializers.random_normal(stddev=0.1), untransformed_scale_initializer=tf.compat.v1.initializers.random_normal( mean=-3., stddev=0.1), loc_regularizer=None, untransformed_scale_regularizer=None, loc_constraint=None, untransformed_scale_constraint=None): """Makes closure which creates `loc`, `scale` params from `tf.get_variable`. This function produces a closure which produces `loc`, `scale` using `tf.get_variable`. The closure accepts the following arguments: dtype: Type of parameter's event. shape: Python `list`-like representing the parameter's event shape. name: Python `str` name prepended to any created (or existing) `tf.Variable`s. trainable: Python `bool` indicating all created `tf.Variable`s should be added to the graph collection `GraphKeys.TRAINABLE_VARIABLES`. add_variable_fn: `tf.get_variable`-like `callable` used to create (or access existing) `tf.Variable`s. Args: is_singular: Python `bool` indicating if `scale is None`. Default: `False`. loc_initializer: Initializer function for the `loc` parameters. The default is `tf.random_normal_initializer(mean=0., stddev=0.1)`. untransformed_scale_initializer: Initializer function for the `scale` parameters. Default value: `tf.random_normal_initializer(mean=-3., stddev=0.1)`. This implies the softplus transformed result is initialized near `0`. It allows a `Normal` distribution with `scale` parameter set to this value to approximately act like a point mass. loc_regularizer: Regularizer function for the `loc` parameters. The default (`None`) is to use the `tf.get_variable` default. untransformed_scale_regularizer: Regularizer function for the `scale` parameters. The default (`None`) is to use the `tf.get_variable` default. loc_constraint: An optional projection function to be applied to the loc after being updated by an `Optimizer`. The function must take as input the unprojected variable and must return the projected variable (which must have the same shape). Constraints are not safe to use when doing asynchronous distributed training. The default (`None`) is to use the `tf.get_variable` default. untransformed_scale_constraint: An optional projection function to be applied to the `scale` parameters after being updated by an `Optimizer` (e.g. used to implement norm constraints or value constraints). The function must take as input the unprojected variable and must return the projected variable (which must have the same shape). Constraints are not safe to use when doing asynchronous distributed training. The default (`None`) is to use the `tf.get_variable` default. Returns: default_loc_scale_fn: Python `callable` which instantiates `loc`, `scale` parameters from args: `dtype, shape, name, trainable, add_variable_fn`. """ def _fn(dtype, shape, name, trainable, add_variable_fn): """Creates `loc`, `scale` parameters.""" loc = add_variable_fn( name=name + '_loc', shape=shape, initializer=loc_initializer, regularizer=loc_regularizer, constraint=loc_constraint, dtype=dtype, trainable=trainable) if is_singular: return loc, None untransformed_scale = add_variable_fn( name=name + '_untransformed_scale', shape=shape, initializer=untransformed_scale_initializer, regularizer=untransformed_scale_regularizer, constraint=untransformed_scale_constraint, dtype=dtype, trainable=trainable) scale = (np.finfo(dtype.as_numpy_dtype).eps + tf.nn.softplus(untransformed_scale)) return loc, scale return _fn
0.001813
def dataframe(self): """ Returns a pandas DataFrame where each row is a representation of the Game class. Rows are indexed by the boxscore string. """ frames = [] for game in self.__iter__(): df = game.dataframe if df is not None: frames.append(df) if frames == []: return None return pd.concat(frames)
0.004785
def assignrepr(self, prefix): """Return a |repr| string with a prefixed assignment.""" caller = 'Timegrids(' blanks = ' ' * (len(prefix) + len(caller)) prefix = f'{prefix}{caller}' lines = [f'{self.init.assignrepr(prefix)},'] if self.sim != self.init: lines.append(f'{self.sim.assignrepr(blanks)},') lines[-1] = lines[-1][:-1] + ')' return '\n'.join(lines)
0.00463
def message(title="", text="", width=DEFAULT_WIDTH, height=DEFAULT_HEIGHT, timeout=None): """ Display a simple message :param text: text inside the window :type text: str :param title: title of the window :type title: str :param width: window width :type width: int :param height: window height :type height: int :param timeout: close the window after n seconds :type timeout: int """ return _simple_dialog(Gtk.MessageType.INFO, text, title, width, height, timeout)
0.001786
def schema_org(builder): # pylint: disable=line-too-long """Builds schema.org microdata for DatasetSearch from DatasetBuilder. Markup spec: https://developers.google.com/search/docs/data-types/dataset#dataset Testing tool: https://search.google.com/structured-data/testing-tool For Google Dataset Search: https://toolbox.google.com/datasetsearch Microdata format was chosen over JSON-LD due to the fact that Markdown rendering engines remove all <script> tags. Args: builder: `tfds.core.DatasetBuilder` Returns: HTML string with microdata """ # pylint: enable=line-too-long properties = [ (lambda x: x.name, SCHEMA_ORG_NAME), (lambda x: x.description, SCHEMA_ORG_DESC), (lambda x: x.name, SCHEMA_ORG_URL), (lambda x: (x.urls and x.urls[0]) or "", SCHEMA_ORG_SAMEAS) ] info = builder.info out_str = SCHEMA_ORG_PRE for extractor, template in properties: val = extractor(info) if val: # We are using cgi module instead of html due to Python 2 compatibility out_str += template.format(val=cgi.escape(val, quote=True).strip()) out_str += SCHEMA_ORG_POST return out_str
0.011226
def _collect_classes( self, package_paths: Sequence[str], recurse_subpackages: bool = True ) -> Sequence[type]: """ Collect all classes defined in/under ``package_paths``. """ import uqbar.apis classes = [] initial_source_paths: Set[str] = set() # Graph source paths and classes for path in package_paths: try: module = importlib.import_module(path) if hasattr(module, "__path__"): initial_source_paths.update(getattr(module, "__path__")) else: initial_source_paths.add(module.__file__) except ModuleNotFoundError: path, _, class_name = path.rpartition(".") module = importlib.import_module(path) classes.append(getattr(module, class_name)) # Iterate source paths for source_path in uqbar.apis.collect_source_paths( initial_source_paths, recurse_subpackages=recurse_subpackages ): package_path = uqbar.apis.source_path_to_package_path(source_path) module = importlib.import_module(package_path) # Grab any defined classes for name in dir(module): if name.startswith("_"): continue object_ = getattr(module, name) if isinstance(object_, type) and object_.__module__ == module.__name__: classes.append(object_) return sorted(classes, key=lambda x: (x.__module__, x.__name__))
0.002516
def _build_credentials_tuple(mech, source, user, passwd, extra, database): """Build and return a mechanism specific credentials tuple. """ if mech != 'MONGODB-X509' and user is None: raise ConfigurationError("%s requires a username." % (mech,)) if mech == 'GSSAPI': if source is not None and source != '$external': raise ValueError( "authentication source must be $external or None for GSSAPI") properties = extra.get('authmechanismproperties', {}) service_name = properties.get('SERVICE_NAME', 'mongodb') canonicalize = properties.get('CANONICALIZE_HOST_NAME', False) service_realm = properties.get('SERVICE_REALM') props = GSSAPIProperties(service_name=service_name, canonicalize_host_name=canonicalize, service_realm=service_realm) # Source is always $external. return MongoCredential(mech, '$external', user, passwd, props, None) elif mech == 'MONGODB-X509': if passwd is not None: raise ConfigurationError( "Passwords are not supported by MONGODB-X509") if source is not None and source != '$external': raise ValueError( "authentication source must be " "$external or None for MONGODB-X509") # user can be None. return MongoCredential(mech, '$external', user, None, None, None) elif mech == 'PLAIN': source_database = source or database or '$external' return MongoCredential(mech, source_database, user, passwd, None, None) else: source_database = source or database or 'admin' if passwd is None: raise ConfigurationError("A password is required.") return MongoCredential( mech, source_database, user, passwd, None, _Cache())
0.000529
def default(cls): "Make the current foreground color the default." wAttributes = cls._get_text_attributes() wAttributes &= ~win32.FOREGROUND_MASK wAttributes |= win32.FOREGROUND_GREY wAttributes &= ~win32.FOREGROUND_INTENSITY cls._set_text_attributes(wAttributes)
0.009615
def find_in_data_path(filename): """Searches for a file within Fuel's data path. This function loops over all paths defined in Fuel's data path and returns the first path in which the file is found. Parameters ---------- filename : str Name of the file to find. Returns ------- file_path : str Path to the first file matching `filename` found in Fuel's data path. Raises ------ IOError If the file doesn't appear in Fuel's data path. """ for path in config.data_path: path = os.path.expanduser(os.path.expandvars(path)) file_path = os.path.join(path, filename) if os.path.isfile(file_path): return file_path raise IOError("{} not found in Fuel's data path".format(filename))
0.001239
def _add_gene_to_graph(self, gene, variant_bnode, gene_id, relation): """ :param gene: :param variant_bnode: :return: """ model = Model(self.graph) if gene_id: self.graph.addTriple(variant_bnode, relation, gene_id) elif gene: LOG.info("gene %s not mapped to NCBI gene, making blank node", gene) gene_bnode = self.make_id("{0}".format(gene), "_") model.addIndividualToGraph(gene_bnode, gene) self.graph.addTriple(variant_bnode, relation, gene_bnode)
0.005245
def makefile(self, sock, mode='r', bufsize=-1): """Return socket file object.""" cls = ( SSLFileobjectStreamReader if 'r' in mode else SSLFileobjectStreamWriter ) if SSL and isinstance(sock, ssl_conn_type): wrapped_socket = cls(sock, mode, bufsize) wrapped_socket.ssl_timeout = sock.gettimeout() return wrapped_socket # This is from past: # TODO: figure out what it's meant for else: return cheroot_server.CP_fileobject(sock, mode, bufsize)
0.003448
def create_alignment(self, x_align=0, y_align=0, x_scale=0, y_scale=0): """ Function creates an alignment """ align = Gtk.Alignment() align.set(x_align, y_align, x_scale, y_scale) return align
0.008333
def get_dsub_version(): """Get the dsub version out of the _dsub_version.py source file. Setup.py should not import dsub version from dsub directly since ambiguity in import order could lead to an old version of dsub setting the version number. Parsing the file directly is simpler than using import tools (whose interface varies between python 2.7, 3.4, and 3.5). Returns: string of dsub version. Raises: ValueError: if the version is not found. """ filename = os.path.join(os.path.dirname(__file__), 'dsub/_dsub_version.py') with open(filename, 'r') as versionfile: for line in versionfile: if line.startswith('DSUB_VERSION ='): # Get the version then strip whitespace and quote characters. version = line.partition('=')[2] return version.strip().strip('\'"') raise ValueError('Could not find version.')
0.006881
def plot_voight_painting(painting, palette='colorblind', flank='right', ax=None, height_factor=0.01): """Plot a painting of shared haplotype prefixes. Parameters ---------- painting : array_like, int, shape (n_variants, n_haplotypes) Painting array. ax : axes, optional The axes on which to draw. If not provided, a new figure will be created. palette : string, optional A Seaborn palette name. flank : {'right', 'left'}, optional If left, painting will be reversed along first axis. height_factor : float, optional If no axes provided, determine height of figure by multiplying height of painting array by this number. Returns ------- ax : axes """ import seaborn as sns from matplotlib.colors import ListedColormap import matplotlib.pyplot as plt if flank == 'left': painting = painting[::-1] n_colors = painting.max() palette = sns.color_palette(palette, n_colors) # use white for singleton haplotypes cmap = ListedColormap(['white'] + palette) # setup axes if ax is None: w = plt.rcParams['figure.figsize'][0] h = height_factor*painting.shape[1] fig, ax = plt.subplots(figsize=(w, h)) sns.despine(ax=ax, bottom=True, left=True) ax.pcolormesh(painting.T, cmap=cmap) ax.set_xticks([]) ax.set_yticks([]) ax.set_xlim(0, painting.shape[0]) ax.set_ylim(0, painting.shape[1]) return ax
0.000656
def setBuildProperty(self, bid, name, value, source): """ A kind of create_or_update, that's between one or two queries per call """ def thd(conn): bp_tbl = self.db.model.build_properties self.checkLength(bp_tbl.c.name, name) self.checkLength(bp_tbl.c.source, source) whereclause = sa.and_(bp_tbl.c.buildid == bid, bp_tbl.c.name == name) q = sa.select( [bp_tbl.c.value, bp_tbl.c.source], whereclause=whereclause) prop = conn.execute(q).fetchone() value_js = json.dumps(value) if prop is None: conn.execute(bp_tbl.insert(), dict(buildid=bid, name=name, value=value_js, source=source)) elif (prop.value != value_js) or (prop.source != source): conn.execute(bp_tbl.update(whereclause=whereclause), dict(value=value_js, source=source)) yield self.db.pool.do(thd)
0.001838
def _get_parameters_from_request(self, request, exception=False): """Get parameters to log in OPERATION_LOG.""" user = request.user referer_url = None try: referer_dic = urlparse.urlsplit( urlparse.unquote(request.META.get('HTTP_REFERER'))) referer_url = referer_dic[2] if referer_dic[3]: referer_url += "?" + referer_dic[3] if isinstance(referer_url, str): referer_url = referer_url.decode('utf-8') except Exception: pass request_url = urlparse.unquote(request.path) if request.META['QUERY_STRING']: request_url += '?' + request.META['QUERY_STRING'] return { 'client_ip': request.META.get('REMOTE_ADDR', None), 'domain_name': getattr(user, 'domain_name', None), 'domain_id': getattr(user, 'domain_id', None), 'project_name': getattr(user, 'project_name', None), 'project_id': getattr(user, 'project_id', None), 'user_name': getattr(user, 'username', None), 'user_id': request.session.get('user_id', None), 'request_scheme': request.scheme, 'referer_url': referer_url, 'request_url': request_url, 'method': request.method if not exception else None, 'param': self._get_request_param(request), }
0.001399
def on_execute__set_video_config(self, request): ''' .. versionchanged:: 0.12 Accept empty video configuration as either `None` or an empty `pandas.Series`. ''' data = decode_content_data(request) compare_fields = ['device_name', 'width', 'height', 'name', 'fourcc', 'framerate'] if data['video_config'] is None or not data['video_config'].shape[0]: i = None else: for i, row in self.parent.video_mode_slave.configs.iterrows(): if (row[compare_fields] == data['video_config'][compare_fields]).all(): break else: i = None if i is None: logger.error('Unsupported video config:\n%s', data['video_config']) logger.error('Video configs:\n%s', self.parent.video_mode_slave.configs) self.parent.video_mode_slave.config_combo.set_active(0) else: logger.error('Set video config (%d):\n%s', i + 1, data['video_config']) self.parent.video_mode_slave.config_combo.set_active(i + 1)
0.001649
def list_templates(call=None): ''' Lists all templates available to the user and the user's groups. .. versionadded:: 2016.3.0 CLI Example: .. code-block:: bash salt-cloud -f list_templates opennebula ''' if call == 'action': raise SaltCloudSystemExit( 'The list_templates function must be called with -f or --function.' ) server, user, password = _get_xml_rpc() auth = ':'.join([user, password]) template_pool = server.one.templatepool.info(auth, -2, -1, -1)[1] templates = {} for template in _get_xml(template_pool): templates[template.find('NAME').text] = _xml_to_dict(template) return templates
0.001427
def mass_3d_lens(self, r, theta_E, gamma, e1, e2): """ computes the spherical power-law mass enclosed (with SPP routiune) :param r: :param theta_E: :param gamma: :param q: :param phi_G: :return: """ return self.spp.mass_3d_lens(r, theta_E, gamma)
0.006135
def build_suite(args): """Build a test suite by loading TAP files or a TAP stream.""" loader = Loader() if len(args.files) == 0 or args.files[0] == "-": suite = loader.load_suite_from_stdin() else: suite = loader.load(args.files) return suite
0.003597
def find_group_consistencies(groups1, groups2): r""" Returns a measure of group consistency Example: >>> # ENABLE_DOCTEST >>> from utool.util_alg import * # NOQA >>> groups1 = [[1, 2, 3], [4], [5, 6]] >>> groups2 = [[1, 2], [4], [5, 6]] >>> common_groups = find_group_consistencies(groups1, groups2) >>> result = ('common_groups = %r' % (common_groups,)) >>> print(result) common_groups = [(5, 6), (4,)] """ group1_list = {tuple(sorted(_group)) for _group in groups1} group2_list = {tuple(sorted(_group)) for _group in groups2} common_groups = list(group1_list.intersection(group2_list)) return common_groups
0.001416
def list_records(zone_id, profile, type=None): ''' List records for the given zone_id on the given profile :param zone_id: Zone to export. :type zone_id: ``str`` :param profile: The profile key :type profile: ``str`` :param type: The record type, e.g. A, NS :type type: ``str`` CLI Example: .. code-block:: bash salt myminion libcloud_dns.list_records google.com profile1 ''' conn = _get_driver(profile=profile) zone = conn.get_zone(zone_id) if type is not None: return [_simple_record(record) for record in conn.list_records(zone) if record.type == type] else: return [_simple_record(record) for record in conn.list_records(zone)]
0.002762
def init_widget(self): """ Initialize the underlying widget. """ super(AndroidTextView, self).init_widget() d = self.declaration w = self.widget if d.input_type: self.set_input_type(d.input_type) w.addTextChangedListener(w.getId()) w.onTextChanged.connect(self.on_text_changed)
0.005525
def as_sql(self, qn, connection): """ Create the proper SQL fragment. This inserts something like "(T0.flags & value) != 0". This will be called by Where.as_sql() """ engine = connection.settings_dict['ENGINE'].rsplit('.', -1)[-1] if engine.startswith('postgres'): XOR_OPERATOR = '#' elif engine.startswith('sqlite'): raise NotImplementedError else: XOR_OPERATOR = '^' if self.bit: return ("%s.%s | %d" % (qn(self.table_alias), qn(self.column), self.bit.mask), []) return ("%s.%s %s %d" % (qn(self.table_alias), qn(self.column), XOR_OPERATOR, self.bit.mask), [])
0.005435
def _tmpfile(self, cache_key, use): """Allocate tempfile on same device as cache with a suffix chosen to prevent collisions""" with temporary_file(suffix=cache_key.id + use, root_dir=self._cache_root, permissions=self._permissions) as tmpfile: yield tmpfile
0.010169
def get_size(self, value=None): """Return the action length including the padding (multiple of 8).""" if isinstance(value, ActionHeader): return value.get_size() elif value is None: current_size = super().get_size() return ceil(current_size / 8) * 8 raise ValueError(f'Invalid value "{value}" for Action*.get_size()')
0.005195
def convert(model, image_input_names=[], is_bgr=False, red_bias=0.0, blue_bias=0.0, green_bias=0.0, gray_bias=0.0, image_scale=1.0, class_labels=None, predicted_feature_name=None, model_precision=_MLMODEL_FULL_PRECISION): """ Convert a Caffe model to Core ML format. Parameters ---------- model: str | (str, str) | (str, str, str) | (str, str, dict) A trained Caffe neural network model which can be represented as: - Path on disk to a trained Caffe model (.caffemodel) - A tuple of two paths, where the first path is the path to the .caffemodel file while the second is the path to the deploy.prototxt. - A tuple of three paths, where the first path is the path to the trained .caffemodel file, the second is the path to the deploy.prototxt while the third is a path to the mean image binary, data in which is subtracted from the input image as a preprocessing step. - A tuple of two paths to .caffemodel and .prototxt and a dict with image input names as keys and paths to mean image binaryprotos as values. The keys should be same as the input names provided via the argument 'image_input_name'. image_input_names: [str] | str The name(s) of the input blob(s) in the Caffe model that can be treated as images by Core ML. All other inputs are treated as MultiArrays (N-D Arrays) by Core ML. is_bgr: bool | dict() Flag indicating the channel order the model internally uses to represent color images. Set to True if the internal channel order is BGR, otherwise it will be assumed RGB. This flag is applicable only if image_input_names is specified. To specify a different value for each image input, provide a dictionary with input names as keys. Note that this flag is about the models internal channel order. An input image can be passed to the model in any color pixel layout containing red, green and blue values (e.g. 32BGRA or 32ARGB). This flag determines how those pixel values get mapped to the internal multiarray representation. red_bias: float | dict() Bias value to be added to the red channel of the input image. Defaults to 0.0. Applicable only if image_input_names is specified. To specify different values for each image input provide a dictionary with input names as keys. blue_bias: float | dict() Bias value to be added to the the blue channel of the input image. Defaults to 0.0. Applicable only if image_input_names is specified. To specify different values for each image input provide a dictionary with input names as keys. green_bias: float | dict() Bias value to be added to the green channel of the input image. Defaults to 0.0. Applicable only if image_input_names is specified. To specify different values for each image input provide a dictionary with input names as keys. gray_bias: float | dict() Bias value to be added to the input image (in grayscale). Defaults to 0.0. Applicable only if image_input_names is specified. To specify different values for each image input provide a dictionary with input names as keys. image_scale: float | dict() Value by which the input images will be scaled before bias is added and Core ML model makes a prediction. Defaults to 1.0. Applicable only if image_input_names is specified. To specify different values for each image input provide a dictionary with input names as keys. class_labels: str Filepath where classes are parsed as a list of newline separated strings. Class labels map the index of the output of a neural network to labels in a classifier. Provide this argument to get a model of type classifier. predicted_feature_name: str Name of the output feature for the class labels exposed in the Core ML model (applies to classifiers only). Defaults to 'classLabel' model_precision: str Precision at which model will be saved. Currently full precision (float) and half precision (float16) models are supported. Defaults to '_MLMODEL_FULL_PRECISION' (full precision). Returns ------- model: MLModel Model in Core ML format. Examples -------- .. sourcecode:: python # Convert it with default input and output names >>> import coremltools >>> coreml_model = coremltools.converters.caffe.convert('my_caffe_model.caffemodel') # Saving the Core ML model to a file. >>> coreml_model.save('my_model.mlmodel') Sometimes, critical information in the Caffe converter is missing from the .caffemodel file. This information is present in the deploy.prototxt file. You can provide us with both files in the conversion process. .. sourcecode:: python >>> coreml_model = coremltools.converters.caffe.convert(('my_caffe_model.caffemodel', 'my_deploy.prototxt')) Some models (like Resnet-50) also require a mean image file which is subtracted from the input image before passing through the network. This file can also be provided during conversion: .. sourcecode:: python >>> coreml_model = coremltools.converters.caffe.convert(('my_caffe_model.caffemodel', ... 'my_deploy.prototxt', 'mean_image.binaryproto'), image_input_names = 'image_input') # Multiple mean images for preprocessing >>> coreml_model = coremltools.converters.caffe.convert(('my_caffe_model.caffemodel', ... 'my_deploy.prototxt', {'image1': 'mean_image1.binaryproto', 'image2': 'mean_image2.binaryproto'}), ... image_input_names = ['image1', 'image2']) # Multiple image inputs and bias/scale values >>> coreml_model = coremltools.converters.caffe.convert(('my_caffe_model.caffemodel', 'my_deploy.prototxt'), ... red_bias = {'image1': -100, 'image2': -110}, ... green_bias = {'image1': -90, 'image2': -125}, ... blue_bias = {'image1': -105, 'image2': -120}, ... image_input_names = ['image1', 'image2']) Input and output names used in the interface of the converted Core ML model are inferred from the .prototxt file, which contains a description of the network architecture. Input names are read from the input layer definition in the .prototxt. By default, they are of type MultiArray. Argument "image_input_names" can be used to assign image type to specific inputs. All the blobs that are "dangling", i.e. which do not feed as input to any other layer are taken as outputs. The .prototxt file can be modified to specify custom input and output names. The converted Core ML model is of type classifier when the argument "class_labels" is specified. Advanced usage with custom classifiers, and images: .. sourcecode:: python # Mark some inputs as Images >>> coreml_model = coremltools.converters.caffe.convert(('my_caffe_model.caffemodel', 'my_caffe_model.prototxt'), ... image_input_names = 'my_image_input') # Export as a classifier with classes from a file >>> coreml_model = coremltools.converters.caffe.convert(('my_caffe_model.caffemodel', 'my_caffe_model.prototxt'), ... image_input_names = 'my_image_input', class_labels = 'labels.txt') Sometimes the converter might return a message about not able to infer input data dimensions. This happens when the input size information is absent from the deploy.prototxt file. This can be easily provided by editing the .prototxt in a text editor. Simply add a snippet in the beginning, similar to the following, for each of the inputs to the model: .. code-block:: bash input: "my_image_input" input_dim: 1 input_dim: 3 input_dim: 227 input_dim: 227 Here we have specified an input with dimensions (1,3,227,227), using Caffe's convention, in the order (batch, channel, height, width). Input name string ("my_image_input") must also match the name of the input (or "bottom", as inputs are known in Caffe) of the first layer in the .prototxt. """ from ...models import MLModel from ...models.utils import convert_neural_network_weights_to_fp16 as convert_neural_network_weights_to_fp16 if model_precision not in _VALID_MLMODEL_PRECISION_TYPES: raise RuntimeError('Model precision {} is not valid'.format(model_precision)) import tempfile model_path = tempfile.mktemp() _export(model_path, model, image_input_names, is_bgr, red_bias, blue_bias, green_bias, gray_bias, image_scale, class_labels, predicted_feature_name) model = MLModel(model_path) if model_precision == _MLMODEL_HALF_PRECISION and model is not None: model = convert_neural_network_weights_to_fp16(model) return model
0.006453
def get_model_class(name): """ This is being implemented to help with the Email Module, where we want to use a model for the email context without needing to import the model (which is most cases create a circular dependency, anyway) Beware that currently implementation returns the first match, so if a model with a same name exists in two different applications this will not work http://stackoverflow.com/a/13242421 """ LOGGER.warning('Beware, function returns first match in the model registry.') # iterate all registered models for model in apps.get_models(): # return the app_label for first match if name == model._meta.object_name: app_label = model._meta.app_label return apps.get_model(app_label, name)
0.002469
def lookupFunction(self, proto, name, namespace): """Return a callable to invoke when executing the named command. """ # Try to find a method to be invoked in a transaction first # Otherwise fallback to a "regular" method fName = self.autoDispatchPrefix + name fObj = getattr(self, fName, None) if fObj is not None: # pass the namespace along return self._auto(fObj, proto, namespace) assert namespace is None, 'Old-style parsing' # Fall back to simplistic command dispatching - we probably want to get # rid of this eventually, there's no reason to do extra work and write # fewer docs all the time. fName = self.baseDispatchPrefix + name return getattr(self, fName, None)
0.002494
def ghmean(nums): """Return geometric-harmonic mean. Iterates between geometric & harmonic means until they converge to a single value (rounded to 12 digits). Cf. https://en.wikipedia.org/wiki/Geometric-harmonic_mean Parameters ---------- nums : list A series of numbers Returns ------- float The geometric-harmonic mean of nums Examples -------- >>> ghmean([1, 2, 3, 4]) 2.058868154613003 >>> ghmean([1, 2]) 1.3728805006183502 >>> ghmean([0, 5, 1000]) 0.0 >>> ghmean([0, 0]) 0.0 >>> ghmean([0, 0, 5]) nan """ m_g = gmean(nums) m_h = hmean(nums) if math.isnan(m_g) or math.isnan(m_h): return float('nan') while round(m_h, 12) != round(m_g, 12): m_g, m_h = (m_g * m_h) ** (1 / 2), (2 * m_g * m_h) / (m_g + m_h) return m_g
0.001148
def parse_uk_postcode(postcode, strict=True, incode_mandatory=True): '''Split UK postcode into outcode and incode portions. Arguments: postcode The postcode to be split. strict If true, the postcode will be validated according to the rules as specified at the Universal Postal Union[1] and The UK Government Data Standards Catalogue[2]. If the supplied postcode doesn't adhere to these rules a ValueError will be thrown. incode_mandatory If true, and only an outcode has been supplied, the function will throw a ValueError. Returns: outcode, incode Raises: ValueError, if postcode is longer than seven characters, or if 'strict' or 'incode_mandatory' conditions are broken - see above. Usage example: >>> from postcode import parse_uk_postcode >>> parse_uk_postcode('cr0 2yr') ('CR0', '2YR') >>> parse_uk_postcode('cr0') Traceback (most recent call last): File "<interactive input>", line 1, in ? File "postcode.py", line 101, in parse_uk_postcode raise ValueError('Incode mandatory') ValueError: Incode mandatory >>> parse_uk_postcode('cr0', False, False) ('CR0', '') [1] http://www.upu.int/fileadmin/documentsFiles/activities/addressingUnit/gbrEn.pdf [2] http://web.archive.org/web/20090930140939/http://www.govtalk.gov.uk/gdsc/html/noframes/PostCode-2-1-Release.htm ''' postcode = postcode.replace(' ', '').upper() # Normalize if len(postcode) > 7: raise exceptions.MaxLengthExceededError() # Validate postcode if strict: # Try for full postcode match postcode_match = POSTCODE_REGEX.match(postcode) if postcode_match: return postcode_match.group(1, 2) # Try for outcode only match outcode_match = STANDALONE_OUTCODE_REGEX.match(postcode) if outcode_match: if incode_mandatory: raise exceptions.IncodeNotFoundError('Incode mandatory') else: return outcode_match.group(1), '' # Try Girobank special case if postcode == 'GIR0AA': return 'GIR', '0AA' elif postcode == 'GIR': if incode_mandatory: raise exceptions.IncodeNotFoundError('Incode mandatory') else: return 'GIR', '' # None of the above raise exceptions.InvalidPostcodeError( 'Value provided does not align with UK postcode rules' ) # Just chop up whatever we've been given. else: # Outcode only if len(postcode) <= 4: if incode_mandatory: raise exceptions.IncodeNotFoundError('Incode mandatory') else: return postcode, '' # Full postcode else: return postcode[:-3], postcode[-3:]
0.000925
def grep_file(query, item): """This function performs the actual grep on a given file.""" return ['%s: %s' % (item, line) for line in open(item) if re.search(query, line)]
0.005236
def do_loop_turn(self): # pylint: disable=too-many-branches, too-many-statements, too-many-locals """Loop turn for Arbiter If not a master daemon, wait for my master death... Else, run: * Check satellites are alive * Check and dispatch (if needed) the configuration * Get broks and external commands from the satellites * Push broks and external commands to the satellites :return: None """ # If I am a spare, I only wait for the master arbiter to die... if not self.is_master: logger.debug("Waiting for my master death...") self.wait_for_master_death() return if self.loop_count % self.alignak_monitor_period == 1: self.get_alignak_status(details=True) # Maybe an external process requested Alignak stop... if self.kill_request: logger.info("daemon stop mode ...") if not self.dispatcher.stop_request_sent: logger.info("entering daemon stop mode, time before exiting: %s", self.conf.daemons_stop_timeout) self.dispatcher.stop_request() if time.time() > self.kill_timestamp + self.conf.daemons_stop_timeout: logger.info("daemon stop mode delay reached, immediate stop") self.dispatcher.stop_request(stop_now=True) time.sleep(1) self.interrupted = True logger.info("exiting...") if not self.kill_request: # Main loop treatment # Try to see if one of my module is dead, and restart previously dead modules self.check_and_del_zombie_modules() # Call modules that manage a starting tick pass _t0 = time.time() self.hook_point('tick') statsmgr.timer('hook.tick', time.time() - _t0) # Look for logging timeperiods activation change (active/inactive) self.check_and_log_tp_activation_change() # Check that my daemons are alive if not self.daemons_check(): if self.conf.daemons_failure_kill: self.request_stop(message="Some Alignak daemons cannot be checked.", exit_code=4) else: logger.warning("Should have killed my children if " "'daemons_failure_kill' were set!") # Now the dispatcher job - check if all daemons are reachable and have a configuration if not self.daemons_reachability_check(): logger.warning("A new configuration dispatch is required!") # Prepare and dispatch the monitored configuration self.configuration_dispatch(self.dispatcher.not_configured) # Now get things from our module instances _t0 = time.time() self.get_objects_from_from_queues() statsmgr.timer('get-objects-from-queues', time.time() - _t0) # Maybe our satellites raised new broks. Reap them... _t0 = time.time() self.get_broks_from_satellites() statsmgr.timer('broks.got.time', time.time() - _t0) # One broker is responsible for our broks, we give him our broks _t0 = time.time() self.push_broks_to_broker() statsmgr.timer('broks.pushed.time', time.time() - _t0) # # We push our external commands to our schedulers... # _t0 = time.time() # self.push_external_commands_to_schedulers() # statsmgr.timer('external-commands.pushed.time', time.time() - _t0) if self.system_health and (self.loop_count % self.system_health_period == 1): perfdatas = [] cpu_count = psutil.cpu_count() perfdatas.append("'cpu_count'=%d" % cpu_count) logger.debug(" . cpu count: %d", cpu_count) cpu_percents = psutil.cpu_percent(percpu=True) cpu = 1 for percent in cpu_percents: perfdatas.append("'cpu_%d_percent'=%.2f%%" % (cpu, percent)) cpu += 1 cpu_times_percent = psutil.cpu_times_percent(percpu=True) cpu = 1 for cpu_times_percent in cpu_times_percent: logger.debug(" . cpu time percent: %s", cpu_times_percent) for key in cpu_times_percent._fields: perfdatas.append( "'cpu_%d_%s_percent'=%.2f%%" % (cpu, key, getattr(cpu_times_percent, key))) cpu += 1 logger.info("%s cpu|%s", self.name, " ".join(perfdatas)) perfdatas = [] disk_partitions = psutil.disk_partitions(all=False) for disk_partition in disk_partitions: logger.debug(" . disk partition: %s", disk_partition) disk = getattr(disk_partition, 'mountpoint') disk_usage = psutil.disk_usage(disk) logger.debug(" . disk usage: %s", disk_usage) for key in disk_usage._fields: if 'percent' in key: perfdatas.append("'disk_%s_percent_used'=%.2f%%" % (disk, getattr(disk_usage, key))) else: perfdatas.append("'disk_%s_%s'=%dB" % (disk, key, getattr(disk_usage, key))) logger.info("%s disks|%s", self.name, " ".join(perfdatas)) perfdatas = [] virtual_memory = psutil.virtual_memory() logger.debug(" . memory: %s", virtual_memory) for key in virtual_memory._fields: if 'percent' in key: perfdatas.append("'mem_percent_used_%s'=%.2f%%" % (key, getattr(virtual_memory, key))) else: perfdatas.append("'mem_%s'=%dB" % (key, getattr(virtual_memory, key))) swap_memory = psutil.swap_memory() logger.debug(" . memory: %s", swap_memory) for key in swap_memory._fields: if 'percent' in key: perfdatas.append("'swap_used_%s'=%.2f%%" % (key, getattr(swap_memory, key))) else: perfdatas.append("'swap_%s'=%dB" % (key, getattr(swap_memory, key))) logger.info("%s memory|%s", self.name, " ".join(perfdatas))
0.001933
def record(cls_def): """ Namedtuple which could inherit from other types. >>> from Redy.Magic.Classic import record >>> class Interface: pass >>> @record >>> class S(Interface): >>> name: str >>> addr: str >>> sex : int >>> s = S("sam", "I/O", 1) """ annotations = getattr(cls_def, '__annotations__', {}) typ: type = namedtuple(cls_def.__name__, list(annotations.keys())) return cls_def.__class__(cls_def.__name__, (typ, *cls_def.__bases__), dict(cls_def.__dict__))
0.003745
def read(self, size=-1): "Reads up to size bytes, but always completes the last line." buf = self.fin.read(size) if not buf: return '' lines = buf.splitlines() # Read the rest of the last line if necessary if not buf.endswith('\n'): last = lines.pop() partial = self.fin.readline() lines.append(last + partial) # Process the lines, concatenate them lines = [self.process_line(line.rstrip('\n')) for line in lines] return ''.join(lines)
0.003604
def get_descriptors_in_module(mdl, submodule=True): r"""Get descriptors in module. Parameters: mdl(module): module to search submodule(bool): search recursively Returns: Iterator[Descriptor] """ __all__ = getattr(mdl, "__all__", None) if __all__ is None: __all__ = dir(mdl) all_values = (getattr(mdl, name) for name in __all__ if name[:1] != "_") if submodule: for v in all_values: if is_descriptor_class(v): yield v if isinstance(v, ModuleType): for v in get_descriptors_in_module(v, submodule=True): yield v else: for v in all_values: if is_descriptor_class(v): yield v
0.001307
def winner(self): 'The winner of this board if one exists.' for potential_win in self._potential_wins(): if potential_win == tuple('XXX'): return Outcome.win_for_crosses elif potential_win == tuple('OOO'): return Outcome.win_for_naughts if self._count(' ') == 0: return Outcome.draw return Outcome.ongoing
0.004938
def update(x, P, z, R, H=None, return_all=False): """ Add a new measurement (z) to the Kalman filter. If z is None, nothing is changed. This can handle either the multidimensional or unidimensional case. If all parameters are floats instead of arrays the filter will still work, and return floats for x, P as the result. update(1, 2, 1, 1, 1) # univariate update(x, P, 1 Parameters ---------- x : numpy.array(dim_x, 1), or float State estimate vector P : numpy.array(dim_x, dim_x), or float Covariance matrix z : (dim_z, 1): array_like measurement for this update. z can be a scalar if dim_z is 1, otherwise it must be convertible to a column vector. R : numpy.array(dim_z, dim_z), or float Measurement noise matrix H : numpy.array(dim_x, dim_x), or float, optional Measurement function. If not provided, a value of 1 is assumed. return_all : bool, default False If true, y, K, S, and log_likelihood are returned, otherwise only x and P are returned. Returns ------- x : numpy.array Posterior state estimate vector P : numpy.array Posterior covariance matrix y : numpy.array or scalar Residua. Difference between measurement and state in measurement space K : numpy.array Kalman gain S : numpy.array System uncertainty in measurement space log_likelihood : float log likelihood of the measurement """ #pylint: disable=bare-except if z is None: if return_all: return x, P, None, None, None, None return x, P if H is None: H = np.array([1]) if np.isscalar(H): H = np.array([H]) Hx = np.atleast_1d(dot(H, x)) z = reshape_z(z, Hx.shape[0], x.ndim) # error (residual) between measurement and prediction y = z - Hx # project system uncertainty into measurement space S = dot(dot(H, P), H.T) + R # map system uncertainty into kalman gain try: K = dot(dot(P, H.T), linalg.inv(S)) except: # can't invert a 1D array, annoyingly K = dot(dot(P, H.T), 1./S) # predict new x with residual scaled by the kalman gain x = x + dot(K, y) # P = (I-KH)P(I-KH)' + KRK' KH = dot(K, H) try: I_KH = np.eye(KH.shape[0]) - KH except: I_KH = np.array([1 - KH]) P = dot(dot(I_KH, P), I_KH.T) + dot(dot(K, R), K.T) if return_all: # compute log likelihood log_likelihood = logpdf(z, dot(H, x), S) return x, P, y, K, S, log_likelihood return x, P
0.002636
def generateIdentityKeyPair(): """ Generate an identity key pair. Clients should only do this once, at install time. @return the generated IdentityKeyPair. """ keyPair = Curve.generateKeyPair() publicKey = IdentityKey(keyPair.getPublicKey()) serialized = '0a21056e8936e8367f768a7bba008ade7cf58407bdc7a6aae293e2c' \ 'b7c06668dcd7d5e12205011524f0c15467100dd603e0d6020f4d293' \ 'edfbcd82129b14a88791ac81365c' serialized = binascii.unhexlify(serialized.encode()) identityKeyPair = IdentityKeyPair(publicKey, keyPair.getPrivateKey()) return identityKeyPair
0.005865
def to_html(self, index=False, escape=False, header=True, collapse_table=True, class_outer="table_outer", **kargs): """Return HTML version of the table This is a wrapper of the to_html method of the pandas dataframe. :param bool index: do not include the index :param bool escape: do not escape special characters :param bool header: include header :param bool collapse_table: long tables are shorten with a scroll bar :param kargs: any parameter accepted by :meth:`pandas.DataFrame.to_html` """ _buffer = {} for k, v in self.pd_options.items(): # save the current option _buffer[k] = pd.get_option(k) # set with user value pd.set_option(k, v) # class sortable is to use the sorttable javascript # note that the class has one t and the javascript library has 2 # as in the original version of sorttable.js table = self.df.to_html(escape=escape, header=header, index=index, classes='sortable', **kargs) # get back to default options for k, v in _buffer.items(): pd.set_option(k, v) # We wrap the table in a dedicated class/div nammed table_scroller # that users must define. return '<div class="%s">' % class_outer + table+"</div>"
0.002161
def getContent(self): """ Returns: str: Content of tag (everything between `opener` and `endtag`). """ if not self.isTag() and self._element: return self._element if not self.childs: return "" output = "" for c in self.childs: if not c.isEndTag(): output += c.toString() if output.endswith("\n"): return output.rstrip() return output
0.004124
def do_help(self, arg): """Help command. Usage: help [command] Parameters: command: Optional - command name to display detailed help """ cmds = arg.split() if cmds: func = getattr(self, 'do_{}'.format(cmds[0])) if func: _LOGGING.info(func.__doc__) else: _LOGGING.error('Command %s not found', cmds[0]) else: _LOGGING.info("Available command list: ") for curr_cmd in dir(self.__class__): if curr_cmd.startswith("do_") and not curr_cmd == 'do_test': print(" - ", curr_cmd[3:]) _LOGGING.info("For help with a command type `help command`")
0.002639
def run(command, exit, silent, check): """ Runs given command on all repos and checks status $ maintain repo run -- git checkout master """ status = 0 for (repo, path) in gather_repositories(): if check and not check_repo(repo, path): status = 1 if exit: break continue with chdir(path): result = subprocess.run(command, shell=True, capture_output=silent) if result.returncode != 0: status = result.returncode print('Command failed: {}'.format(repo)) if exit: break sys.exit(status)
0.001464
def update_installed_files(self, installed_files): """ Track the files installed by a package so pip knows how to remove the package. This method is used by :func:`install_binary_dist()` (which collects the list of installed files for :func:`update_installed_files()`). :param installed_files: A list of absolute pathnames (strings) with the files that were just installed. """ # Find the *.egg-info directory where installed-files.txt should be created. pkg_info_files = [fn for fn in installed_files if fnmatch.fnmatch(fn, '*.egg-info/PKG-INFO')] # I'm not (yet) sure how reliable the above logic is, so for now # I'll err on the side of caution and only act when the results # seem to be reliable. if len(pkg_info_files) != 1: logger.warning("Not tracking installed files (couldn't reliably determine *.egg-info directory)") else: egg_info_directory = os.path.dirname(pkg_info_files[0]) installed_files_path = os.path.join(egg_info_directory, 'installed-files.txt') logger.debug("Tracking installed files in %s ..", installed_files_path) with open(installed_files_path, 'w') as handle: for pathname in installed_files: handle.write('%s\n' % os.path.relpath(pathname, egg_info_directory))
0.00632
def convert_notebook(self, fname): """Convert an IPython notebook to a Python script in editor""" try: script = nbexporter().from_filename(fname)[0] except Exception as e: QMessageBox.critical(self, _('Conversion error'), _("It was not possible to convert this " "notebook. The error is:\n\n") + \ to_text_string(e)) return self.sig_new_file.emit(script)
0.009381
def typechecked_func(func, force = False, argType = None, resType = None, prop_getter = False): """Works like typechecked, but is only applicable to functions, methods and properties. """ if not pytypes.checking_enabled and not pytypes.do_logging_in_typechecked: return func assert(_check_as_func(func)) if not force and is_no_type_check(func): return func if hasattr(func, 'do_typecheck'): func.do_typecheck = True return func elif hasattr(func, 'do_logging'): # actually shouldn't happen return _typeinspect_func(func, True, func.do_logging, argType, resType, prop_getter) else: return _typeinspect_func(func, True, False, argType, resType, prop_getter)
0.017497
def apply_to(self, launchable): """ Apply this ISCM configuration into a launchable resource, such as an EC2 instance or an AutoScalingGroup LaunchConfig. """ # Update user data if launchable.get_property("UserData") is not None: raise NotImplementedError("It's not yet supported to append SCM to existing userdata") user_data = { "Fn::Base64" : { "Fn::Join" : ["", [ "\n".join([ r'#!/bin/bash', r'FATAL() { code=$1; shift; echo "[FATAL] $*" >&2; exit $code; }', r'ERROR() { echo "[ERROR] $*" >&2 ; }', r'WARN() { echo "[WARNING] $*" >&2 ; }', r'INFO() { echo "[INFO] $*" >&2 ; }', "", ]) ] + (self.wc_handle is not None and [ cfnjoin("", r'ISCM_WCHANDLE_URL="', self.wc_handle, '"\n' ) ] or []) + [ "\n".join([ r'{', r'INFO "CloudCast ISCM booting on $(date)"', "\n\n" ]) ] + self.userdata_elems + [ "\n".join([ "", r'iscm_result=$?', r'[ -n "$ISCM_WCHANDLE_URL" ] && [ -n "$(which cfn-signal)" ] && cfn-signal -e $iscm_result $ISCM_WCHANDLE_URL', '\nINFO "CloudCast ISCM successfully completed on $(date)"', '} 2>&1 | tee -a /iscm.log\n' ]) ] ]} } launchable.add_property("UserData", user_data) # Set meta data keys for k in self.metadata: if launchable.get_metadata_key(k) is not None: raise NotImplementedError("It's not yet supported to append to existing metadata keys") launchable.add_metadata_key(k, self.metadata[k])
0.009291
def positions(self): """Initial position for each particle. Shape (N, 3, 1).""" return np.vstack([p.r0 for p in self]).reshape(len(self), 3, 1)
0.012579
def get_text(self): ''' ::returns: a rendered string representation of the given row ''' row_lines = [] for line in zip_longest(*[column.get_cell_lines() for column in self.columns], fillvalue=' '): row_lines.append(' '.join(line)) return '\n'.join(row_lines)
0.008929
def grayspec(k): """ List of gray-scale colors in HSV space as web hex triplets. For integer argument k, returns list of `k` gray-scale colors, increasingly light, linearly in the HSV color space, as web hex triplets. Technical dependency of :func:`tabular.spreadsheet.aggregate_in`. **Parameters** **k** : positive integer Number of gray-scale colors to return. **Returns** **glist** : list of strings List of `k` gray-scale colors. """ ll = .5 ul = .8 delta = (ul - ll) / k return [GrayScale(t) for t in np.arange(ll, ul, delta)]
0.003053
def list_distinfo_files(self): """ Iterates over the ``RECORD`` entries and returns paths for each line if the path is pointing to a file located in the ``.dist-info`` directory or one of its subdirectories. :returns: iterator of paths """ base = os.path.dirname(self.path) for path, checksum, size in self._get_records(): # XXX add separator or use real relpath algo if not os.path.isabs(path): path = os.path.join(base, path) if path.startswith(self.path): yield path
0.003322
def get_top_priority(self): """Pops the element that has the top (smallest) priority. :returns: element with the top (smallest) priority. :raises: IndexError -- Priority queue is empty. """ if self.is_empty(): raise IndexError("Priority queue is empty.") _, _, element = heapq.heappop(self.pq) if element in self.element_finder: del self.element_finder[element] return element
0.004292
def ls(args): """ lexibank ls [COLS]+ column specification: - license - lexemes - macroareas """ db = Database(args.db) db.create(exists_ok=True) in_db = {r[0]: r[1] for r in db.fetchall('select id, version from dataset')} # FIXME: how to smartly choose columns? table = Table('ID', 'Title') cols = OrderedDict([ (col, {}) for col in args.args if col in [ 'version', 'location', 'changes', 'license', 'all_lexemes', 'lexemes', 'concepts', 'languages', 'families', 'varieties', 'macroareas', ]]) tl = 40 if cols: tl = 25 table.columns.extend(col.capitalize() for col in cols) for col, sql in [ ('languages', 'glottocodes_by_dataset'), ('concepts', 'conceptsets_by_dataset'), ('lexemes', 'mapped_lexemes_by_dataset'), ('all_lexemes', 'lexemes_by_dataset'), ('macroareas', 'macroareas_by_dataset'), ('families', 'families_by_dataset'), ]: if col in cols: cols[col] = {r[0]: r[1] for r in db.fetchall(sql)} for ds in args.cfg.datasets: row = [ colored(ds.id, 'green' if ds.id in in_db else 'red'), truncate_with_ellipsis(ds.metadata.title or '', width=tl), ] for col in cols: if col == 'version': row.append(git_hash(ds.dir)) elif col == 'location': row.append(colored(str(ds.dir), 'green')) elif col == 'changes': row.append(ds.git_repo.is_dirty()) elif col == 'license': lic = licenses.find(ds.metadata.license or '') row.append(lic.id if lic else ds.metadata.license) elif col in ['languages', 'concepts', 'lexemes', 'all_lexemes', 'families']: row.append(float(cols[col].get(ds.id, 0))) elif col == 'macroareas': row.append(', '.join(sorted((cols[col].get(ds.id) or '').split(',')))) else: row.append('') table.append(row) totals = ['zztotal', len(args.cfg.datasets)] for i, col in enumerate(cols): if col in ['lexemes', 'all_lexemes']: totals.append(sum([r[i + 2] for r in table])) elif col == 'languages': totals.append(float(db.fetchone( "SELECT count(distinct glottocode) FROM languagetable")[0])) elif col == 'concepts': totals.append(float(db.fetchone( "SELECT count(distinct concepticon_id) FROM parametertable")[0])) elif col == 'families': totals.append(float(db.fetchone( "SELECT count(distinct family) FROM languagetable")[0])) else: totals.append('') table.append(totals) print(table.render( tablefmt='simple', sortkey=lambda r: r[0], condensed=False, floatfmt=',.0f'))
0.001982
def calc_wtd_exp(skydir, ltc, event_class, event_types, egy_bins, cth_bins, fn, nbin=16): """Calculate the effective exposure. Parameters ---------- skydir : `~astropy.coordinates.SkyCoord` ltc : `~fermipy.irfs.LTCube` nbin : int Number of points per decade with which to sample true energy. """ cnts = calc_counts_edisp(skydir, ltc, event_class, event_types, egy_bins, cth_bins, fn, nbin=nbin) flux = fn.flux(egy_bins[:-1], egy_bins[1:]) return cnts / flux[:, None]
0.003521
def snapshots(self, xml_bytes): """Parse the XML returned by the C{DescribeSnapshots} function. @param xml_bytes: XML bytes with a C{DescribeSnapshotsResponse} root element. @return: A list of L{Snapshot} instances. TODO: ownersSet, restorableBySet, ownerId, volumeSize, description, ownerAlias. """ root = XML(xml_bytes) result = [] for snapshot_data in root.find("snapshotSet"): snapshot_id = snapshot_data.findtext("snapshotId") volume_id = snapshot_data.findtext("volumeId") status = snapshot_data.findtext("status") start_time = snapshot_data.findtext("startTime") start_time = datetime.strptime( start_time[:19], "%Y-%m-%dT%H:%M:%S") progress = snapshot_data.findtext("progress")[:-1] progress = float(progress or "0") / 100. snapshot = model.Snapshot( snapshot_id, volume_id, status, start_time, progress) result.append(snapshot) return result
0.001835
def write_newick(rootnode, features=None, format=1, format_root_node=True, is_leaf_fn=None, dist_formatter=None, support_formatter=None, name_formatter=None): """ Iteratively export a tree structure and returns its NHX representation. """ newick = [] leaf = is_leaf_fn if is_leaf_fn else lambda n: not bool(n.children) for postorder, node in rootnode.iter_prepostorder(is_leaf_fn=is_leaf_fn): if postorder: newick.append(")") if node.up is not None or format_root_node: newick.append(format_node(node, "internal", format, dist_formatter=dist_formatter, support_formatter=support_formatter, name_formatter=name_formatter)) newick.append(_get_features_string(node, features)) else: if node is not rootnode and node != node.up.children[0]: newick.append(",") if leaf(node): safe_name = re.sub("["+_ILEGAL_NEWICK_CHARS+"]", "_", \ str(getattr(node, "name"))) newick.append(format_node(node, "leaf", format, dist_formatter=dist_formatter, support_formatter=support_formatter, name_formatter=name_formatter)) newick.append(_get_features_string(node, features)) else: newick.append("(") newick.append(";") return ''.join(newick)
0.012971
def check_output_command(file_path, head=None, tail=None): '''call check_output command to read content from a file''' if os.path.exists(file_path): if sys.platform == 'win32': cmds = ['powershell.exe', 'type', file_path] if head: cmds += ['|', 'select', '-first', str(head)] elif tail: cmds += ['|', 'select', '-last', str(tail)] return check_output(cmds, shell=True).decode('utf-8') else: cmds = ['cat', file_path] if head: cmds = ['head', '-' + str(head), file_path] elif tail: cmds = ['tail', '-' + str(tail), file_path] return check_output(cmds, shell=False).decode('utf-8') else: print_error('{0} does not exist!'.format(file_path)) exit(1)
0.001176
def get_handle(self, filepath): """Get the `FileHandle` object associated to a particular file """ localpath = self._get_localpath(filepath) return self._cache[localpath]
0.010309
def EnumerateQualifiers(self, namespace=None, **extra): # pylint: disable=invalid-name """ Enumerate the qualifier types (= qualifier declarations) in a namespace. This method performs the EnumerateQualifiers operation (see :term:`DSP0200`). See :ref:`WBEM operations` for a list of all methods performing such operations. If the operation succeeds, this method returns. Otherwise, this method raises an exception. Parameters: namespace (:term:`string`): Name of the namespace in which the qualifier declarations are to be enumerated (case independent). Leading and trailing slash characters will be stripped. The lexical case will be preserved. If `None`, the default namespace of the connection object will be used. **extra : Additional keyword arguments are passed as additional operation parameters to the WBEM server. Note that :term:`DSP0200` does not define any additional parameters for this operation. Returns: A list of :class:`~pywbem.CIMQualifierDeclaration` objects that are representations of the enumerated qualifier declarations. Raises: Exceptions described in :class:`~pywbem.WBEMConnection`. """ exc = None qualifiers = None method_name = 'EnumerateQualifiers' if self._operation_recorders: self.operation_recorder_reset() self.operation_recorder_stage_pywbem_args( method=method_name, namespace=namespace, **extra) try: stats = self.statistics.start_timer(method_name) namespace = self._iparam_namespace_from_namespace(namespace) result = self._imethodcall( method_name, namespace, **extra) if result is None: qualifierdecls = [] else: qualifierdecls = result[0][2] for qualifierdecl in qualifierdecls: if not isinstance(qualifierdecl, CIMQualifierDeclaration): raise CIMXMLParseError( _format("Expecting CIMQualifierDeclaration object in " "result list, got {0} object", qualifierdecl.__class__.__name__), conn_id=self.conn_id) return qualifierdecls except (CIMXMLParseError, XMLParseError) as exce: exce.request_data = self.last_raw_request exce.response_data = self.last_raw_reply exc = exce raise except Exception as exce: exc = exce raise finally: self._last_operation_time = stats.stop_timer( self.last_request_len, self.last_reply_len, self.last_server_response_time, exc) if self._operation_recorders: self.operation_recorder_stage_result(qualifiers, exc)
0.000945
def transact(self, f, *a, **k): """ Execute C{f(*a, **k)} in the context of a database transaction. Any changes made to this L{Store} by C{f} will be committed when C{f} returns. If C{f} raises an exception, those changes will be reverted instead. If a transaction is already in progress (in this thread - ie, if a frame executing L{Store.transact} is already on the call stack), this will B{not} start a nested transaction. Changes will not be committed until the existing transaction completes, and an exception raised by C{f} will not revert changes made by C{f}. You probably don't want to ever call this if another transaction is in progress. @return: Whatever C{f(*a, **kw)} returns. @raise: Whatever C{f(*a, **kw)} raises, or a database exception. """ if self.transaction is not None: return f(*a, **k) if self.attachedToParent: return self.parent.transact(f, *a, **k) try: self._begin() try: result = f(*a, **k) self.checkpoint() except: exc = Failure() try: self.revert() except: log.err(exc) raise raise else: self._commit() return result finally: self._cleanupTxnState()
0.002668
def write(self, data): ''' This could be a bit less clumsy. ''' if data == '\n': # print does this return self.stream.write(data) else: bytes_ = 0 for line in data.splitlines(True): nl = '' if line.endswith('\n'): # mv nl to end: line = line[:-1] nl = '\n' bytes_ += self.stream.write( f'{self.start}{line}{self.default}{nl}' ) or 0 # in case None returned (on Windows) return bytes_
0.003311
def get_tmpfile(requested_tmpdir=None, prefix=""): '''get a temporary file with an optional prefix. By default will be created in /tmp unless SREGISTRY_TMPDIR is set. By default, the file is closed (and just a name returned). Parameters ========== requested_tmpdir: an optional requested temporary directory, first priority as is coming from calling function. prefix: Given a need for a sandbox (or similar), prefix the file with this string. ''' # First priority for the base goes to the user requested. tmpdir = get_tmpdir(requested_tmpdir) # If tmpdir is set, add to prefix if tmpdir is not None: prefix = os.path.join(tmpdir, os.path.basename(prefix)) fd, tmp_file = tempfile.mkstemp(prefix=prefix) os.close(fd) return tmp_file
0.002389
def from_config(cls, config, name, section_key="segmenters"): """ Constructs a segmenter from a configuration doc. """ section = config[section_key][name] segmenter_class_path = section['class'] Segmenter = yamlconf.import_module(segmenter_class_path) return Segmenter.from_config(config, name, section_key=section_key)
0.005333
def insert(self, meter_db): """ Insert to :class:`~ekmmeters.MeterDB` subclass. Please note MeterDB subclassing is only for simplest-case. Args: meter_db (MeterDB): Instance of subclass of MeterDB. """ if meter_db: meter_db.dbInsert(self.m_req, self.m_raw_read_a, self.m_raw_read_b) else: ekm_log("Attempt to insert when no MeterDB assigned.") pass
0.004515
def compare_networks(self, other): """Compare two IP objects. This is only concerned about the comparison of the integer representation of the network addresses. This means that the host bits aren't considered at all in this method. If you want to compare host bits, you can easily enough do a 'HostA._ip < HostB._ip' Args: other: An IP object. Returns: If the IP versions of self and other are the same, returns: -1 if self < other: eg: IPv4Network('192.0.2.0/25') < IPv4Network('192.0.2.128/25') IPv6Network('2001:db8::1000/124') < IPv6Network('2001:db8::2000/124') 0 if self == other eg: IPv4Network('192.0.2.0/24') == IPv4Network('192.0.2.0/24') IPv6Network('2001:db8::1000/124') == IPv6Network('2001:db8::1000/124') 1 if self > other eg: IPv4Network('192.0.2.128/25') > IPv4Network('192.0.2.0/25') IPv6Network('2001:db8::2000/124') > IPv6Network('2001:db8::1000/124') Raises: TypeError if the IP versions are different. """ # does this need to raise a ValueError? if self._version != other._version: raise TypeError('%s and %s are not of the same type' % ( self, other)) # self._version == other._version below here: if self.network_address < other.network_address: return -1 if self.network_address > other.network_address: return 1 # self.network_address == other.network_address below here: if self.netmask < other.netmask: return -1 if self.netmask > other.netmask: return 1 return 0
0.001074
def get_app_env(): """ if the app and the envi are passed in the command line as 'app=$app:$env' :return: tuple app, env """ app, env = None, get_env() if "app" in os.environ: app = os.environ["app"].lower() if ":" in app: app, env = os.environ["app"].split(":", 2) set_env(env) return app, env
0.002762
def encode_message(self): """Encode message to AMQP wire-encoded bytearray. :rtype: bytearray """ if not self._message: raise ValueError("No message data to encode.") cloned_data = self._message.clone() self._populate_message_attributes(cloned_data) encoded_data = [] c_uamqp.get_encoded_message_size(cloned_data, encoded_data) return b"".join(encoded_data)
0.004525
def create(iterations=1000, distance=1.0, layout=LAYOUT_SPRING, depth=True): """ Returns a new graph with predefined styling. """ #global _ctx _ctx.colormode(_ctx.RGB) g = graph(iterations, distance, layout) # Styles for different types of nodes. s = style.style g.styles.append(s(style.LIGHT , _ctx, fill = _ctx.color(0.0, 0.0, 0.0, 0.20))) g.styles.append(s(style.DARK , _ctx, fill = _ctx.color(0.3, 0.5, 0.7, 0.75))) g.styles.append(s(style.BACK , _ctx, fill = _ctx.color(0.5, 0.8, 0.0, 0.50))) g.styles.append(s(style.IMPORTANT, _ctx, fill = _ctx.color(0.3, 0.6, 0.8, 0.75))) g.styles.append(s(style.HIGHLIGHT, _ctx, stroke = _ctx.color(1.0, 0.0, 0.5), strokewidth=1.5)) g.styles.append(s(style.MARKED , _ctx)) g.styles.append(s(style.ROOT , _ctx, text = _ctx.color(1.0, 0.0, 0.4, 1.00), stroke = _ctx.color(0.8, 0.8, 0.8, 0.60), strokewidth = 1.5, fontsize = 16, textwidth = 150)) # Important nodes get a double stroke. def important_node(s, node, alpha=1.0): style.style(None, _ctx).node(s, node, alpha) r = node.r * 1.4 _ctx.nofill() _ctx.oval(node.x-r, node.y-r, r*2, r*2) # Marked nodes have an inner dot. def marked_node(s, node, alpha=1.0): style.style(None, _ctx).node(s, node, alpha) r = node.r * 0.3 _ctx.fill(s.stroke) _ctx.oval(node.x-r, node.y-r, r*2, r*2) g.styles.important.node = important_node g.styles.marked.node = marked_node g.styles.depth = depth # Styling guidelines. All nodes have the default style, except: # 1) a node directly connected to the root gets the LIGHT style. # 2) a node with more than 4 edges gets the DARK style. # 3) a node with a weight of 0.75-1.0 gets the IMPORTANT style. # 4) the graph.root node gets the ROOT style. # 5) the node last clicked gets the BACK style. g.styles.guide.append(style.LIGHT , lambda graph, node: graph.root in node.links) g.styles.guide.append(style.DARK , lambda graph, node: len(node.links) > 4) g.styles.guide.append(style.IMPORTANT , lambda graph, node: node.weight > 0.75) g.styles.guide.append(style.ROOT , lambda graph, node: node == graph.root) g.styles.guide.append(style.BACK , lambda graph, node: node == graph.events.clicked) # An additional rule applies every node's weight to its radius. def balance(graph, node): node.r = node.r*0.75 + node.r*node.weight*0.75 g.styles.guide.append("balance", balance) # An additional rule that keeps leaf nodes closely clustered. def cluster(graph, node): if len(node.links) == 1: node.links.edge(node.links[0]).length *= 0.5 g.styles.guide.append("cluster", cluster) g.styles.guide.order = [ style.LIGHT, style.DARK, style.IMPORTANT, style.ROOT, style.BACK, "balance", "nurse" ] return g
0.023922
def _convert_value_to_native(value): """ Converts pysnmp objects into native Python objects. """ if isinstance(value, Counter32): return int(value.prettyPrint()) if isinstance(value, Counter64): return int(value.prettyPrint()) if isinstance(value, Gauge32): return int(value.prettyPrint()) if isinstance(value, Integer): return int(value.prettyPrint()) if isinstance(value, Integer32): return int(value.prettyPrint()) if isinstance(value, Unsigned32): return int(value.prettyPrint()) if isinstance(value, IpAddress): return str(value.prettyPrint()) if isinstance(value, OctetString): try: return value.asOctets().decode(value.encoding) except UnicodeDecodeError: return value.asOctets() if isinstance(value, TimeTicks): return timedelta(seconds=int(value.prettyPrint()) / 100.0) return value
0.001056
def parse_comments(document, xmlcontent): """Parse comments document. Comments are defined in file 'comments.xml' """ comments = etree.fromstring(xmlcontent) document.comments = {} for comment in comments.xpath('.//w:comment', namespaces=NAMESPACES): # w:author # w:id # w: date comment_id = comment.attrib[_name('{{{w}}}id')] comm = doc.CommentContent(comment_id) comm.author = comment.attrib.get(_name('{{{w}}}author'), None) comm.date = comment.attrib.get(_name('{{{w}}}date'), None) comm.elements = [parse_paragraph(document, para) for para in comment.xpath('.//w:p', namespaces=NAMESPACES)] document.comments[comment_id] = comm
0.002725
def variational_lower_bound(params, t, logprob, sampler, log_density, num_samples, rs): """Provides a stochastic estimate of the variational lower bound, for any variational family and model density.""" samples = sampler(params, num_samples, rs) log_qs = log_density(params, samples) log_ps = logprob(samples, t) log_ps = np.reshape(log_ps, (num_samples, -1)) log_qs = np.reshape(log_qs, (num_samples, -1)) return np.mean(log_ps - log_qs)
0.001996
def setLaneChangeMode(self, vehID, lcm): """setLaneChangeMode(string, integer) -> None Sets the vehicle's lane change mode as a bitset. """ self._connection._sendIntCmd( tc.CMD_SET_VEHICLE_VARIABLE, tc.VAR_LANECHANGE_MODE, vehID, lcm)
0.007168
def create_rackservers(self): """Get an instance of rackservers services facade.""" return RackServers( self.networkapi_url, self.user, self.password, self.user_ldap)
0.008696
def operation_recorder_enabled(self, value): """Setter method; for a description see the getter method.""" for recorder in self._operation_recorders: if value: recorder.enable() else: recorder.disable()
0.007299
def Freqs(self,jr,jphi,jz,**kwargs): """ NAME: Freqs PURPOSE: return the frequencies corresponding to a torus INPUT: jr - radial action (scalar) jphi - azimuthal action (scalar) jz - vertical action (scalar) tol= (object-wide value) goal for |dJ|/|J| along the torus OUTPUT: (OmegaR,Omegaphi,Omegaz) HISTORY: 2015-08-07 - Written - Bovy (UofT) """ out= actionAngleTorus_c.actionAngleTorus_Freqs_c(\ self._pot, jr,jphi,jz, tol=kwargs.get('tol',self._tol)) if out[3] != 0: warnings.warn("actionAngleTorus' AutoFit exited with non-zero return status %i: %s" % (out[3],_autofit_errvals[out[3]]), galpyWarning) return out
0.014925
def _update_cinder_config(cls): """Parse in-memory file to update OSLO configuration used by Cinder.""" cls._config_string_io.seek(0) cls._parser.write(cls._config_string_io) # Check if we have any multiopt cls._config_string_io.seek(0) current_cfg = cls._config_string_io.read() if '\n\t' in current_cfg: cls._config_string_io.seek(0) cls._config_string_io.write(current_cfg.replace('\n\t', '\n')) cls._config_string_io.seek(0) cfg.CONF.reload_config_files()
0.003591
def compute_tf(self, sentences): """ Computes the normalized term frequency as explained in http://www.tfidf.com/ :type sentences: [sumy.models.dom.Sentence] """ content_words = self._get_all_content_words_in_doc(sentences) content_words_count = len(content_words) content_words_freq = self._compute_word_freq(content_words) content_word_tf = dict((w, f / content_words_count) for w, f in content_words_freq.items()) return content_word_tf
0.007813
def update_edge_keys(G): """ Update the keys of edges that share a u, v with another edge but differ in geometry. For example, two one-way streets from u to v that bow away from each other as separate streets, rather than opposite direction edges of a single street. Parameters ---------- G : networkx multidigraph Returns ------- networkx multigraph """ # identify all the edges that are duplicates based on a sorted combination # of their origin, destination, and key. that is, edge uv will match edge vu # as a duplicate, but only if they have the same key edges = graph_to_gdfs(G, nodes=False, fill_edge_geometry=False) edges['uvk'] = edges.apply(lambda row: '_'.join(sorted([str(row['u']), str(row['v'])]) + [str(row['key'])]), axis=1) edges['dupe'] = edges['uvk'].duplicated(keep=False) dupes = edges[edges['dupe']==True].dropna(subset=['geometry']) different_streets = [] groups = dupes[['geometry', 'uvk', 'u', 'v', 'key', 'dupe']].groupby('uvk') # for each set of duplicate edges for label, group in groups: # if there are more than 2 edges here, make sure to compare all if len(group['geometry']) > 2: l = group['geometry'].tolist() l.append(l[0]) geom_pairs = list(zip(l[:-1], l[1:])) # otherwise, just compare the first edge to the second edge else: geom_pairs = [(group['geometry'].iloc[0], group['geometry'].iloc[1])] # for each pair of edges to compare for geom1, geom2 in geom_pairs: # if they don't have the same geometry, flag them as different streets if not is_same_geometry(geom1, geom2): # add edge uvk, but not edge vuk, otherwise we'll iterate both their keys # and they'll still duplicate each other at the end of this process different_streets.append((group['u'].iloc[0], group['v'].iloc[0], group['key'].iloc[0])) # for each unique different street, iterate its key + 1 so it's unique for u, v, k in set(different_streets): # filter out key if it appears in data dict as we'll pass it explicitly attributes = {k:v for k, v in G[u][v][k].items() if k != 'key'} G.add_edge(u, v, key=k+1, **attributes) G.remove_edge(u, v, key=k) return G
0.007083
def _delete_sbo_tar_gz(self): """Delete slackbuild tar.gz file after untar """ if not self.auto and os.path.isfile(self.meta.build_path + self.script): os.remove(self.meta.build_path + self.script)
0.012876
def createStateText(self): '''Creates the mode and arm state text.''' self.modeText = self.axes.text(self.leftPos+(self.vertSize/10.0),0.97,'UNKNOWN',color='grey',size=1.5*self.fontSize,ha='left',va='top') self.modeText.set_path_effects([PathEffects.withStroke(linewidth=self.fontSize/10.0,foreground='black')])
0.032836
def partition_expiration(self): """Union[int, None]: Expiration time in milliseconds for a partition. If :attr:`partition_expiration` is set and :attr:`type_` is not set, :attr:`type_` will default to :attr:`~google.cloud.bigquery.table.TimePartitioningType.DAY`. """ warnings.warn( "This method will be deprecated in future versions. Please use " "Table.time_partitioning.expiration_ms instead.", PendingDeprecationWarning, stacklevel=2, ) if self.time_partitioning is not None: return self.time_partitioning.expiration_ms
0.003086
def getproject_cmd(argv): """Print a virtualenv's project directory, if set. If called without providing a virtualenv name as argument, print the current virtualenv's project directory. """ # Parse command line arguments parser = argparse.ArgumentParser( description="Print an environment's project directory.", ) parser.add_argument( 'envname', nargs='?', default=os.environ.get('VIRTUAL_ENV'), help=( 'The name of the environment to return the project directory ' 'for. If omitted, will use the currently active environment.' ), ) args = parser.parse_args(argv) # Now, do the actual work if not args.envname: sys.exit('ERROR: no virtualenv active') if not (workon_home / args.envname).exists(): sys.exit("ERROR: Environment '{0}' does not exist." .format(args.envname)) project_dir = get_project_dir(args.envname) if project_dir is None: sys.exit("ERROR: no project directory set for Environment '{0}'" .format(args.envname)) print(project_dir)
0.000876
def host_validator(value, **kwargs): """ From: http://stackoverflow.com/questions/2532053/validate-a-hostname-string According to: http://en.wikipedia.org/wiki/Hostname#Restrictions_on_valid_host_names """ scheme, hostname, port, path = decompose_hostname(value) if len(hostname) > 255: return False if hostname[-1] == ".": hostname = hostname[:-1] # strip exactly one dot from the right, if present allowed = re.compile("(?!-)[A-Z\d-]{1,63}(?<!-)$", re.IGNORECASE) with ErrorDict() as errors: if not all(allowed.match(x) for x in hostname.split(".")): errors.add_error( 'invalid', MESSAGES['host']['invalid'].format(value), ) if path: errors.add_error( 'path', MESSAGES['host']['may_not_include_path'].format(value), ) if scheme: errors.add_error( 'scheme', MESSAGES['host']['may_not_include_scheme'].format(value), )
0.003749
def url_decode(s, charset='utf-8', decode_keys=False, include_empty=True, errors='replace', separator='&', cls=None): """Parse a querystring and return it as :class:`MultiDict`. Per default only values are decoded into unicode strings. If `decode_keys` is set to `True` the same will happen for keys. Per default a missing value for a key will default to an empty key. If you don't want that behavior you can set `include_empty` to `False`. Per default encoding errors are ignored. If you want a different behavior you can set `errors` to ``'replace'`` or ``'strict'``. In strict mode a `HTTPUnicodeError` is raised. .. versionchanged:: 0.5 In previous versions ";" and "&" could be used for url decoding. This changed in 0.5 where only "&" is supported. If you want to use ";" instead a different `separator` can be provided. The `cls` parameter was added. :param s: a string with the query string to decode. :param charset: the charset of the query string. :param decode_keys: set to `True` if you want the keys to be decoded as well. :param include_empty: Set to `False` if you don't want empty values to appear in the dict. :param errors: the decoding error behavior. :param separator: the pair separator to be used, defaults to ``&`` :param cls: an optional dict class to use. If this is not specified or `None` the default :class:`MultiDict` is used. """ if cls is None: cls = MultiDict return cls(_url_decode_impl(str(s).split(separator), charset, decode_keys, include_empty, errors))
0.000576
def asobject(self): """ Return object Series which contains boxed values. .. deprecated :: 0.23.0 Use ``astype(object)`` instead. *this is an internal non-public method* """ warnings.warn("'asobject' is deprecated. Use 'astype(object)'" " instead", FutureWarning, stacklevel=2) return self.astype(object).values
0.004963
def build(self, id, **kwargs): """ Builds the Configurations for the Specified Set This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please define a `callback` function to be invoked when receiving the response. >>> def callback_function(response): >>> pprint(response) >>> >>> thread = api.build(id, callback=callback_function) :param callback function: The callback function for asynchronous request. (optional) :param int id: Build Configuration Set id (required) :param str callback_url: Optional Callback URL :param bool temporary_build: Is it a temporary build or a standard build? :param bool force_rebuild: DEPRECATED: Use RebuildMode. :param bool timestamp_alignment: Should we add a timestamp during the alignment? Valid only for temporary builds. :param str rebuild_mode: Rebuild Modes: FORCE: always rebuild all the configurations in the set; EXPLICIT_DEPENDENCY_CHECK: check if any of user defined dependencies has been update; IMPLICIT_DEPENDENCY_CHECK: check if any captured dependency has been updated; :return: BuildConfigSetRecordSingleton If the method is called asynchronously, returns the request thread. """ kwargs['_return_http_data_only'] = True if kwargs.get('callback'): return self.build_with_http_info(id, **kwargs) else: (data) = self.build_with_http_info(id, **kwargs) return data
0.003706