text
stringlengths
78
104k
score
float64
0
0.18
def save_module(self, obj): """ Save a module as an import """ self.modules.add(obj) if _is_dynamic(obj): self.save_reduce(dynamic_subimport, (obj.__name__, vars(obj)), obj=obj) else: self.save_reduce(subimport, (obj.__name__,), obj=obj)
0.005917
def new_build(): """Page for crediting or editing a build.""" form = forms.BuildForm() if form.validate_on_submit(): build = models.Build() form.populate_obj(build) build.owners.append(current_user) db.session.add(build) db.session.flush() auth.save_admin_log(build, created_build=True, message=build.name) db.session.commit() operations.UserOps(current_user.get_id()).evict() logging.info('Created build via UI: build_id=%r, name=%r', build.id, build.name) return redirect(url_for('view_build', id=build.id)) return render_template( 'new_build.html', build_form=form)
0.001414
def get_default_ENV(env): """ A fiddlin' little function that has an 'import SCons.Environment' which can't be moved to the top level without creating an import loop. Since this import creates a local variable named 'SCons', it blocks access to the global variable, so we move it here to prevent complaints about local variables being used uninitialized. """ global default_ENV try: return env['ENV'] except KeyError: if not default_ENV: import SCons.Environment # This is a hideously expensive way to get a default shell # environment. What it really should do is run the platform # setup to get the default ENV. Fortunately, it's incredibly # rare for an Environment not to have a shell environment, so # we're not going to worry about it overmuch. default_ENV = SCons.Environment.Environment()['ENV'] return default_ENV
0.001028
def rename_window(self, new_name): """ Return :class:`Window` object ``$ tmux rename-window <new_name>``. Parameters ---------- new_name : str name of the window """ import shlex lex = shlex.shlex(new_name) lex.escape = ' ' lex.whitespace_split = False try: self.cmd('rename-window', new_name) self['window_name'] = new_name except Exception as e: logger.error(e) self.server._update_windows() return self
0.003503
def update(self, E=None, **F): """ Update ContextDict from dict/iterable E and F :return: Nothing :rtype: None """ if E is not None: if hasattr(E, 'keys'): for K in E: self.replace(K, E[K]) elif hasattr(E, 'items'): for K, V in E.items(): self.replace(K, V) else: for K, V in E: self.replace(K, V) for K in F: self.replace(K, F[K])
0.003683
def is_extension_type(arr): """ Check whether an array-like is of a pandas extension class instance. Extension classes include categoricals, pandas sparse objects (i.e. classes represented within the pandas library and not ones external to it like scipy sparse matrices), and datetime-like arrays. Parameters ---------- arr : array-like The array-like to check. Returns ------- boolean Whether or not the array-like is of a pandas extension class instance. Examples -------- >>> is_extension_type([1, 2, 3]) False >>> is_extension_type(np.array([1, 2, 3])) False >>> >>> cat = pd.Categorical([1, 2, 3]) >>> >>> is_extension_type(cat) True >>> is_extension_type(pd.Series(cat)) True >>> is_extension_type(pd.SparseArray([1, 2, 3])) True >>> is_extension_type(pd.SparseSeries([1, 2, 3])) True >>> >>> from scipy.sparse import bsr_matrix >>> is_extension_type(bsr_matrix([1, 2, 3])) False >>> is_extension_type(pd.DatetimeIndex([1, 2, 3])) False >>> is_extension_type(pd.DatetimeIndex([1, 2, 3], tz="US/Eastern")) True >>> >>> dtype = DatetimeTZDtype("ns", tz="US/Eastern") >>> s = pd.Series([], dtype=dtype) >>> is_extension_type(s) True """ if is_categorical(arr): return True elif is_sparse(arr): return True elif is_datetime64tz_dtype(arr): return True return False
0.000669
def _find_cellid(self, code): """Determines the most similar cell (if any) to the specified code. It must have at least 50% overlap ratio and have been a loop-intercepted cell previously. Args: code (str): contents of the code cell that were executed. """ from difflib import SequenceMatcher maxvalue = 0. maxid = None for cellid, c in self.cellids.items(): matcher = SequenceMatcher(a=c, b=code) ratio = matcher.quick_ratio() if ratio > maxvalue and ratio > 0.5: maxid, maxvalue = cellid, ratio return maxid
0.004525
def lock(self, lock=True, changelist=0): """Locks or unlocks the file :param lock: Lock or unlock the file :type lock: bool :param changelist: Optional changelist to checkout the file into :type changelist: :class:`.Changelist` """ cmd = 'lock' if lock else 'unlock' if changelist: self._connection.run([cmd, '-c', changelist, self.depotFile]) else: self._connection.run([cmd, self.depotFile]) self.query()
0.003899
def r_division(onarray, offarray, rarray, mode='mean'): """Apply R division. Args: onarray (decode.array): Decode array of on-point observations. offarray (decode.array): Decode array of off-point observations. rarray (decode.array): Decode array of R observations. mode (str): Method for the selection of nominal R value. 'mean': Mean. 'median': Median. Returns: onarray_cal (decode.array): Calibrated array of on-point observations. offarray_cal (decode.array): Calibrated array of off-point observations. """ logger = getLogger('decode.models.r_division') logger.info('mode') logger.info('{}'.format(mode)) offid = np.unique(offarray.scanid) onid = np.unique(onarray.scanid) rid = np.unique(rarray.scanid) onarray = onarray.copy() # Xarray onvalues = onarray.values onscanid = onarray.scanid.values offarray = offarray.copy() # Xarray offvalues = offarray.values offscanid = offarray.scanid.values rarray = rarray.copy() # Xarray rvalues = rarray.values rscanid = rarray.scanid.values for i in onid: rleftid = np.searchsorted(rid, i) - 1 rrightid = np.searchsorted(rid, i) if rleftid == -1: Xr = rvalues[rscanid == rid[rrightid]] Xr_m = getattr(np, 'nan'+mode)(Xr, axis=0) elif rrightid == len(rid): Xr = rvalues[rscanid == rid[rleftid]] Xr_m = getattr(np, 'nan'+mode)(Xr, axis=0) else: Xr_l = rvalues[rscanid == rid[rleftid]] Xr_r = rvalues[rscanid == rid[rrightid]] Xr_m = getattr(np, 'nan'+mode)(np.vstack([Xr_l, Xr_r]), axis=0) onvalues[onscanid == i] /= Xr_m for j in offid: rleftid = np.searchsorted(rid, j) - 1 rrightid = np.searchsorted(rid, j) Xoff_m = getattr(np, 'nan'+mode)(offvalues[offscanid == j], axis=0) if rleftid == -1: Xr = rvalues[rscanid == rid[rrightid]] Xr_m = getattr(np, 'nan'+mode)(Xr, axis=0) elif rrightid == len(rid): Xr = rvalues[rscanid == rid[rleftid]] Xr_m = getattr(np, 'nan'+mode)(Xr, axis=0) else: Xr_l = rvalues[rscanid == rid[rleftid]] Xr_r = rvalues[rscanid == rid[rrightid]] Xr_m = getattr(np, 'nan'+mode)(np.vstack([Xr_l, Xr_r]), axis=0) offvalues[offscanid == j] /= Xr_m Xon_rdiv = dc.full_like(onarray, onarray) Xoff_rdiv = dc.full_like(offarray, offarray) Xonoff_rdiv = dc.concat([Xon_rdiv, Xoff_rdiv], dim='t') Xonoff_rdiv_sorted = Xonoff_rdiv[np.argsort(Xonoff_rdiv.time.values)] scantype = Xonoff_rdiv_sorted.scantype.values newscanid = np.cumsum(np.hstack([False, scantype[1:] != scantype[:-1]])) onmask = np.in1d(Xonoff_rdiv_sorted.scanid, onid) offmask = np.in1d(Xonoff_rdiv_sorted.scanid, offid) Xon_rdiv = Xonoff_rdiv_sorted[onmask] Xoff_rdiv = Xonoff_rdiv_sorted[offmask] Xon_rdiv.coords.update({'scanid': ('t', newscanid[onmask])}) Xoff_rdiv.coords.update({'scanid': ('t', newscanid[offmask])}) return Xon_rdiv, Xoff_rdiv
0.010546
def get_image_descriptor(self, im, xy=None): """ get_image_descriptor(im, xy=None) Used for the local color table properties per image. Otherwise global color table applies to all frames irrespective of whether additional colors comes in play that require a redefined palette. Still a maximum of 256 color per frame, obviously. Written by Ant1 on 2010-08-22 Modified by Alex Robinson in Janurari 2011 to implement subrectangles. """ # Defaule use full image and place at upper left if xy is None: xy = (0, 0) # Image separator, bb = '\x2C' # Image position and size bb += int_to_bin(xy[0]) # Left position bb += int_to_bin(xy[1]) # Top position bb += int_to_bin(im.size[0]) # image width bb += int_to_bin(im.size[1]) # image height # packed field: local color table flag1, interlace0, sorted table0, # reserved00, lct size111=7=2^(7+1)=256. bb += '\x87' # LZW minimum size code now comes later, begining of [image data] blocks return bb
0.002641
def select_lamb(self, lamb=None, out=bool): """ Return a wavelength index array Return a boolean or integer index array, hereafter called 'ind' The array refers to the reference time vector self.ddataRef['lamb'] Parameters ---------- lamb : None / float / np.ndarray / list / tuple The time values to be selected: - None : ind matches all wavelength values - float : ind is True only for the wavelength closest to lamb - np.ndarray : ind True only for the wavelength closest to lamb - list (len()==2): ind True for wavelength in [lamb[0],lamb[1]] - tuple (len()==2): ind True for wavelength outside ]t[0];t[1][ out : type Specifies the type of the output index array: - bool : return a boolean array of shape (self.ddataRef['nlamb'],) - int : return the array as integers indices Return ------ ind : np.ndarray The array of indices, of dtype specified by keywordarg out """ if not self._isSpectral(): msg = "" raise Exception(msg) assert out in [bool,int] ind = _select_ind(lamb, self._ddataRef['lamb'], self._ddataRef['nlamb']) if out is int: ind = ind.nonzero()[0] return ind
0.003569
def qs_field( model_class, field, filters=None, formatter=queryset_formatter, manager_name='objects', ): """ Show computed fields based on QuerySet's. This is a workaround since sometimes some filtering is involved to see if a user owns and object, is a student, etc. Example ------- class MyModel(ModelView): details_extra_columns = [ ('courses_owned', 'Courses (Owner of)'), ] column_formatters_detail = { 'courses_owner': qs_field(model.Course, 'owner'), ] """ if filters is None: filters = {} def _(view, context, _model, name): filters[field] = _model # e.g. students: user # e.g. User.objects, User.deleted_objects manager = getattr(model_class, manager_name) return formatter(manager(**filters)) return _
0.002281
def deltas(errors, epsilon, mean, std): """Compute mean and std deltas. delta_mean = mean(errors) - mean(all errors below epsilon) delta_std = std(errors) - std(all errors below epsilon) """ below = errors[errors <= epsilon] if not len(below): return 0, 0 return mean - below.mean(), std - below.std()
0.00295
def _wait_threads(self): """ Tell all the threads to terminate (by sending a sentinel value) and wait for them to do so. """ # Note that you need two loops, since you can't say which # thread will get each sentinel for t in self._threads: self._to_fetch.put(None) # sentinel for t in self._threads: t.join() self._threads = []
0.004706
def authenticate(self, request): """ Authenticate a user from a token form field Errors thrown here will be swallowed by django-rest-framework, and it expects us to return None if authentication fails. """ try: key = request.data['token'] except KeyError: return try: token = AuthToken.objects.get(key=key) except AuthToken.DoesNotExist: return return (token.user, token)
0.003984
def periodogram_auto(self, oversampling=5, nyquist_factor=3, return_periods=True): """Compute the periodogram on an automatically-determined grid This function uses heuristic arguments to choose a suitable frequency grid for the data. Note that depending on the data window function, the model may be sensitive to periodicity at higher frequencies than this function returns! The final number of frequencies will be Nf = oversampling * nyquist_factor * len(t) / 2 Parameters ---------- oversampling : float the number of samples per approximate peak width nyquist_factor : float the highest frequency, in units of the nyquist frequency for points spread uniformly through the data range. Returns ------- period : ndarray the grid of periods power : ndarray the power at each frequency """ N = len(self.t) T = np.max(self.t) - np.min(self.t) df = 1. / T / oversampling f0 = df Nf = int(0.5 * oversampling * nyquist_factor * N) freq = f0 + df * np.arange(Nf) return 1. / freq, self._score_frequency_grid(f0, df, Nf)
0.002333
def attach_usage_plan_to_apis(plan_id, apis, region=None, key=None, keyid=None, profile=None): ''' Attaches given usage plan to each of the apis provided in a list of apiId and stage values .. versionadded:: 2017.7.0 apis a list of dictionaries, where each dictionary contains the following: apiId a string, which is the id of the created API in AWS ApiGateway stage a string, which is the stage that the created API is deployed to. CLI Example: .. code-block:: bash salt myminion boto_apigateway.attach_usage_plan_to_apis plan_id='usage plan id' apis='[{"apiId": "some id 1", "stage": "some stage 1"}]' ''' return _update_usage_plan_apis(plan_id, apis, 'add', region=region, key=key, keyid=keyid, profile=profile)
0.006173
def peaks(samples): """ Find the minimum and maximum peak of the samples. Returns that pair in the order they were found. So if min was found first, it returns (min, max) else the other way around. """ max_index = numpy.argmax(samples) max_value = samples[max_index] min_index = numpy.argmin(samples) min_value = samples[min_index] if min_index < max_index: return (min_value, max_value) else: return (max_value, min_value)
0.004193
def dump(self, stream=None, encoding='utf8', encoding_errors='ignore'): # pylint: disable=arguments-differ """Writes a stream to a file. :param stream: An ``io.StringIO`` instance. A ``basestring`` is also possible and get converted to ``io.StringIO``. :param encoding: (optional) The character encoding of the file. :rtype: TextFile """ if stream is None: stream = StringIO() if isinstance(stream, string_types): stream = StringIO(stream) stream = self.codec.compress( BytesIO(stream.read().encode(encoding, encoding_errors)) ) self.fs.dump(stream) return self
0.004121
def set_value(self, instance, value): '''Set the ``value`` for this :class:`Field` in a ``instance`` of a :class:`StdModel`.''' setattr(instance, self.attname, self.to_python(value))
0.010101
def upload_custom_service_account_avatar(self, account, avatar): """ 设置客服帐号的头像。 :param account: 客服账号的用户名 :param avatar: 头像文件,必须是 jpg 格式 :return: 返回的 JSON 数据包 """ return self.post( url= "http://api.weixin.qq.com/customservice/kfaccount/uploadheadimg", params={ "access_token": self.token, "kf_account": account }, files={"media": avatar} )
0.006061
def cli(context, verbose, api_key, base_url, workers): '''Planet API Client''' configure_logging(verbose) client_params.clear() client_params['api_key'] = api_key client_params['workers'] = workers if base_url: client_params['base_url'] = base_url
0.003559
def cut_levels(self, loval, hival, no_reset=False): """Apply cut levels on the image view. Parameters ---------- loval, hival : float Low and high values of the cut levels, respectively. no_reset : bool Do not reset ``autocuts`` setting. """ self.t_.set(cuts=(loval, hival)) # If user specified "override" or "once" for auto levels, # then turn off auto levels now that they have set the levels # manually if (not no_reset) and (self.t_['autocuts'] in ('once', 'override')): self.t_.set(autocuts='off')
0.003175
def _render_full_resource(self, instance, include, fields): """ Generate a representation of a full resource to match JSON API spec. :param instance: The instance to serialize :param include: Dictionary of relationships to include :param fields: Dictionary of fields to filter """ api_type = instance.__jsonapi_type__ orm_desc_keys = instance.__mapper__.all_orm_descriptors.keys() to_ret = { 'id': instance.id, 'type': api_type, 'attributes': {}, 'relationships': {}, 'included': {} } attrs_to_ignore = {'__mapper__', 'id'} if api_type in fields.keys(): local_fields = list(map(( lambda x: instance.__jsonapi_map_to_py__[x]), fields[api_type])) else: local_fields = orm_desc_keys for key, relationship in instance.__mapper__.relationships.items(): attrs_to_ignore |= set([c.name for c in relationship.local_columns ]) | {key} api_key = instance.__jsonapi_map_to_api__[key] try: desc = get_rel_desc(instance, key, RelationshipActions.GET) except PermissionDeniedError: continue if relationship.direction == MANYTOONE: if key in local_fields: to_ret['relationships'][api_key] = { 'links': self._lazy_relationship(api_type, instance.id, api_key) } if api_key in include.keys(): related = desc(instance) if related is not None: perm = get_permission_test( related, None, Permissions.VIEW) if (key in local_fields and (related is None or not perm(related))): to_ret['relationships'][api_key]['data'] = None continue if key in local_fields: to_ret['relationships'][api_key]['data'] = self._render_short_instance(related) # NOQA new_include = self._parse_include(include[api_key]) built = self._render_full_resource( related, new_include, fields) included = built.pop('included') to_ret['included'].update(included) to_ret['included'][(related.__jsonapi_type__, related.id)] = built # NOQA else: if key in local_fields: to_ret['relationships'][api_key] = { 'links': self._lazy_relationship( api_type, instance.id, api_key), } if api_key not in include.keys(): continue if key in local_fields: to_ret['relationships'][api_key]['data'] = [] related = desc(instance) for item in related: try: check_permission(item, None, Permissions.VIEW) except PermissionDeniedError: continue if key in local_fields: to_ret['relationships'][api_key]['data'].append( self._render_short_instance(item)) new_include = self._parse_include(include[api_key]) built = self._render_full_resource(item, new_include, fields) included = built.pop('included') to_ret['included'].update(included) to_ret['included'][(item.__jsonapi_type__, item.id)] = built # NOQA for key in set(orm_desc_keys) - attrs_to_ignore: try: desc = get_attr_desc(instance, key, AttributeActions.GET) if key in local_fields: to_ret['attributes'][instance.__jsonapi_map_to_api__[key]] = desc(instance) # NOQA except PermissionDeniedError: continue return to_ret
0.000462
def encoding_and_executable(notebook, metadata, ext): """Return encoding and executable lines for a notebook, if applicable""" lines = [] comment = _SCRIPT_EXTENSIONS.get(ext, {}).get('comment') jupytext_metadata = metadata.get('jupytext', {}) if ext not in ['.Rmd', '.md'] and 'executable' in jupytext_metadata: lines.append(comment + '!' + jupytext_metadata.pop('executable')) if 'encoding' in jupytext_metadata: lines.append(jupytext_metadata.pop('encoding')) elif ext not in ['.Rmd', '.md']: for cell in notebook.cells: try: cell.source.encode('ascii') except (UnicodeEncodeError, UnicodeDecodeError): lines.append(comment + _UTF8_HEADER) break return lines
0.001263
def __write_columns(self, pc, table): """ Read numeric data from csv and write to the bottom section of the txt file. :param dict table: Paleodata dictionary :return none: """ logger_lpd_noaa.info("writing section: data, csv values from file") # get filename for this table's csv data # filename = self.__get_filename(table) # logger_lpd_noaa.info("processing csv file: {}".format(filename)) # # get missing value for this table # # mv = self.__get_mv(table) # # write template lines # # self.__write_template_paleo(mv) if pc == "paleo": self.__write_template_paleo() elif pc == "chron": self.__write_template_chron() # continue if csv exists if self._values_exist(table): # logger_lpd_noaa.info("_write_columns: csv data exists: {}".format(filename)) # sort the dictionary so the year column is first _csv_data_by_name = self.__put_year_col_first(table["columns"]) # now split the sorted dictionary back into two lists (easier format to write to file) _names, _data = self.__rm_names_on_csv_cols(_csv_data_by_name) # write column variableNames self.__write_data_col_header(_names, pc) # write data columns index by index self.__write_data_col_vals(_data, pc) return
0.003453
def _as_document(self, dataset): """ Converts dataset to document indexed by to FTS index. Args: dataset (orm.Dataset): dataset to convert. Returns: dict with structure matches to BaseDatasetIndex._schema. """ # find tables. assert isinstance(dataset, Dataset) execute = object_session(dataset).connection().execute query = text(""" SELECT t_name, c_name, c_description FROM columns JOIN tables ON c_t_vid = t_vid WHERE t_d_vid = :dataset_vid;""") columns = u('\n').join( [u(' ').join(list(text_type(e) for e in t)) for t in execute(query, dataset_vid=str(dataset.identity.vid))]) doc = '\n'.join([u('{}').format(x) for x in [dataset.config.metadata.about.title, dataset.config.metadata.about.summary, dataset.identity.id_, dataset.identity.vid, dataset.identity.source, dataset.identity.name, dataset.identity.vname, columns]]) # From the source, make a variety of combinations for keywords: # foo.bar.com -> "foo foo.bar foo.bar.com bar.com" parts = u('{}').format(dataset.identity.source).split('.') sources = (['.'.join(g) for g in [parts[-i:] for i in range(2, len(parts) + 1)]] + ['.'.join(g) for g in [parts[:i] for i in range(0, len(parts))]]) # Re-calculate the summarization of grains, since the geoid 0.0.7 package had a bug where state level # summaries had the same value as state-level allvals def resum(g): try: return str(GVid.parse(g).summarize()) except (KeyError, ValueError): return g def as_list(value): """ Converts value to the list. """ if not value: return [] if isinstance(value, string_types): lst = [value] else: try: lst = list(value) except TypeError: lst = [value] return lst about_time = as_list(dataset.config.metadata.about.time) about_grain = as_list(dataset.config.metadata.about.grain) keywords = ( list(dataset.config.metadata.about.groups) + list(dataset.config.metadata.about.tags) + about_time + [resum(g) for g in about_grain] + sources) document = dict( vid=u('{}').format(dataset.identity.vid), title=u('{} {}').format(dataset.identity.name, dataset.config.metadata.about.title), doc=u('{}').format(doc), keywords=' '.join(u('{}').format(x) for x in keywords) ) return document
0.002886
def main(): """Parse the command line and run :func:`migrate`.""" parser = get_args_parser() args = parser.parse_args() config = Config.from_parse_args(args) migrate(config)
0.005181
def entry_for_view(self, view, perm_name): """Get registry entry for permission if ``view`` requires it. In other words, if ``view`` requires the permission specified by ``perm_name``, return the :class:`Entry` associated with the permission. If ``view`` doesn't require the permission, return ``None`` instead. """ view_name = self._get_view_name(view) entry = self._get_entry(perm_name) if view_name in entry.views: return entry return None
0.003731
def set_dns(name, dnsservers=None, searchdomains=None, path=None): ''' .. versionchanged:: 2015.5.0 The ``dnsservers`` and ``searchdomains`` parameters can now be passed as a comma-separated list. Update /etc/resolv.confo path path to the container parent default: /var/lib/lxc (system default) .. versionadded:: 2015.8.0 CLI Example: .. code-block:: bash salt myminion lxc.set_dns ubuntu "['8.8.8.8', '4.4.4.4']" ''' if dnsservers is None: dnsservers = ['8.8.8.8', '4.4.4.4'] elif not isinstance(dnsservers, list): try: dnsservers = dnsservers.split(',') except AttributeError: raise SaltInvocationError( 'Invalid input for \'dnsservers\' parameter' ) if searchdomains is None: searchdomains = [] elif not isinstance(searchdomains, list): try: searchdomains = searchdomains.split(',') except AttributeError: raise SaltInvocationError( 'Invalid input for \'searchdomains\' parameter' ) dns = ['nameserver {0}'.format(x) for x in dnsservers] dns.extend(['search {0}'.format(x) for x in searchdomains]) dns = '\n'.join(dns) + '\n' # we may be using resolvconf in the container # We need to handle that case with care: # - we create the resolv.conf runtime directory (the # linked directory) as anyway it will be shadowed when the real # runned tmpfs mountpoint will be mounted. # ( /etc/resolv.conf -> ../run/resolvconf/resolv.conf) # Indeed, it can save us in any other case (running, eg, in a # bare chroot when repairing or preparing the container for # operation. # - We also teach resolvconf to use the aforementioned dns. # - We finally also set /etc/resolv.conf in all cases rstr = __salt__['test.random_hash']() # no tmp here, apparmor won't let us execute ! script = '/sbin/{0}_dns.sh'.format(rstr) DNS_SCRIPT = "\n".join([ # 'set -x', '#!/usr/bin/env bash', 'if [ -h /etc/resolv.conf ];then', ' if [ "x$(readlink /etc/resolv.conf)"' ' = "x../run/resolvconf/resolv.conf" ];then', ' if [ ! -d /run/resolvconf/ ];then', ' mkdir -p /run/resolvconf', ' fi', ' cat > /etc/resolvconf/resolv.conf.d/head <<EOF', dns, 'EOF', '', ' fi', 'fi', 'cat > /etc/resolv.conf <<EOF', dns, 'EOF', '']) result = run_all( name, 'tee {0}'.format(script), path=path, stdin=DNS_SCRIPT, python_shell=True) if result['retcode'] == 0: result = run_all( name, 'sh -c "chmod +x {0};{0}"'.format(script), path=path, python_shell=True) # blindly delete the setter file run_all(name, 'sh -c \'if [ -f "{0}" ];then rm -f "{0}";fi\''.format(script), path=path, python_shell=True) if result['retcode'] != 0: error = ('Unable to write to /etc/resolv.conf in container \'{0}\'' .format(name)) if result['stderr']: error += ': {0}'.format(result['stderr']) raise CommandExecutionError(error) return True
0.000302
def ls_remote(cwd=None, remote='origin', ref=None, opts='', git_opts='', user=None, password=None, identity=None, https_user=None, https_pass=None, ignore_retcode=False, output_encoding=None, saltenv='base'): ''' Interface to `git-ls-remote(1)`_. Returns the upstream hash for a remote reference. cwd The path to the git checkout. Optional (and ignored if present) when ``remote`` is set to a URL instead of a remote name. remote : origin The name of the remote to query. Can be the name of a git remote (which exists in the git checkout defined by the ``cwd`` parameter), or the URL of a remote repository. .. versionchanged:: 2015.8.0 Argument renamed from ``repository`` to ``remote`` ref The name of the ref to query. Optional, if not specified, all refs are returned. Can be a branch or tag name, or the full name of the reference (for example, to get the hash for a Github pull request number 1234, ``ref`` can be set to ``refs/pull/1234/head`` .. versionchanged:: 2015.8.0 Argument renamed from ``branch`` to ``ref`` .. versionchanged:: 2015.8.4 Defaults to returning all refs instead of master. opts Any additional options to add to the command line, in a single string .. versionadded:: 2015.8.0 git_opts Any additional options to add to git command itself (not the ``ls-remote`` subcommand), in a single string. This is useful for passing ``-c`` to run git with temporary changes to the git configuration. .. versionadded:: 2017.7.0 .. note:: This is only supported in git 1.7.2 and newer. user User under which to run the git command. By default, the command is run by the user under which the minion is running. password Windows only. Required when specifying ``user``. This parameter will be ignored on non-Windows platforms. .. versionadded:: 2016.3.4 identity Path to a private key to use for ssh URLs .. warning:: Unless Salt is invoked from the minion using ``salt-call``, the key(s) must be passphraseless. For greater security with passphraseless private keys, see the `sshd(8)`_ manpage for information on securing the keypair from the remote side in the ``authorized_keys`` file. .. _`sshd(8)`: http://www.man7.org/linux/man-pages/man8/sshd.8.html#AUTHORIZED_KEYS_FILE_FORMAT .. versionchanged:: 2015.8.7 Salt will no longer attempt to use passphrase-protected keys unless invoked from the minion using ``salt-call``, to prevent blocking waiting for user input. Key can also be specified as a SaltStack file server URL, eg. salt://location/identity_file .. versionchanged:: 2016.3.0 https_user Set HTTP Basic Auth username. Only accepted for HTTPS URLs. .. versionadded:: 2015.5.0 https_pass Set HTTP Basic Auth password. Only accepted for HTTPS URLs. .. versionadded:: 2015.5.0 ignore_retcode : False If ``True``, do not log an error to the minion log if the git command returns a nonzero exit status. .. versionadded:: 2015.8.0 saltenv The default salt environment to pull sls files from .. versionadded:: 2016.3.1 output_encoding Use this option to specify which encoding to use to decode the output from any git commands which are run. This should not be needed in most cases. .. note:: This should only be needed if the files in the repository were created with filenames using an encoding other than UTF-8 to handle Unicode characters. .. versionadded:: 2018.3.1 .. _`git-ls-remote(1)`: http://git-scm.com/docs/git-ls-remote CLI Example: .. code-block:: bash salt myminion git.ls_remote /path/to/repo origin master salt myminion git.ls_remote remote=https://mydomain.tld/repo.git ref=mytag opts='--tags' ''' if cwd is not None: cwd = _expand_path(cwd, user) try: remote = salt.utils.url.add_http_basic_auth(remote, https_user, https_pass, https_only=True) except ValueError as exc: raise SaltInvocationError(exc.__str__()) command = ['git'] + _format_git_opts(git_opts) command.append('ls-remote') command.extend(_format_opts(opts)) command.append(remote) if ref: command.append(ref) output = _git_run(command, cwd=cwd, user=user, password=password, identity=identity, ignore_retcode=ignore_retcode, saltenv=saltenv, output_encoding=output_encoding)['stdout'] ret = {} for line in output.splitlines(): try: ref_sha1, ref_name = line.split(None, 1) except IndexError: continue ret[ref_name] = ref_sha1 return ret
0.000906
def get_parser(self, **kwargs): """This method will create and return a new parser with prog_name, description, and a config file argument. """ self.parser = argparse.ArgumentParser(prog=self.prog_name, description=self._desc, add_help=False, **kwargs) # help is removed because parser.parse_known_args() show help, # often partial help. help action will be added during # reloading step for parser.parse_args() if self.use_config_file: self.parser.add_argument('--config-file', action="store", help="Other configuration file.") return self.parser
0.002509
def init_raspbian_vm(self): """ Creates an image for running Raspbian in a QEMU virtual machine. Based on the guide at: https://github.com/dhruvvyas90/qemu-rpi-kernel/wiki/Emulating-Jessie-image-with-4.1.x-kernel """ r = self.local_renderer r.comment('Installing system packages.') r.sudo('add-apt-repository ppa:linaro-maintainers/tools') r.sudo('apt-get update') r.sudo('apt-get install libsdl-dev qemu-system') r.comment('Download image.') r.local('wget https://downloads.raspberrypi.org/raspbian_lite_latest') r.local('unzip raspbian_lite_latest.zip') #TODO:fix name? #TODO:resize image? r.comment('Find start of the Linux ext4 partition.') r.local( "parted -s 2016-03-18-raspbian-jessie-lite.img unit B print | " "awk '/^Number/{{p=1;next}}; p{{gsub(/[^[:digit:]]/, "", $2); print $2}}' | sed -n 2p", assign_to='START') r.local('mkdir -p {raspbian_mount_point}') r.sudo('mount -v -o offset=$START -t ext4 {raspbian_image} $MNT') r.comment('Comment out everything in ld.so.preload') r.local("sed -i 's/^/#/g' {raspbian_mount_point}/etc/ld.so.preload") r.comment('Comment out entries containing /dev/mmcblk in fstab.') r.local("sed -i '/mmcblk/ s?^?#?' /etc/fstab") r.sudo('umount {raspbian_mount_point}') r.comment('Download kernel.') r.local('wget https://github.com/dhruvvyas90/qemu-rpi-kernel/blob/master/{raspbian_kernel}?raw=true') r.local('mv {raspbian_kernel} {libvirt_images_dir}') r.comment('Creating libvirt machine.') r.local('virsh define libvirt-raspbian.xml') r.comment('You should now be able to boot the VM by running:') r.comment('') r.comment(' qemu-system-arm -kernel {libvirt_boot_dir}/{raspbian_kernel} ' '-cpu arm1176 -m 256 -M versatilepb -serial stdio -append "root=/dev/sda2 rootfstype=ext4 rw" ' '-hda {libvirt_images_dir}/{raspbian_image}') r.comment('') r.comment('Or by running virt-manager.')
0.004617
def history(name, quiet=False): ''' Return the history for an image. Equivalent to running the ``docker history`` Docker CLI command. name Container name or ID quiet : False If ``True``, the return data will simply be a list of the commands run to build the container. .. code-block:: bash $ salt myminion docker.history nginx:latest quiet=True myminion: - FROM scratch - ADD file:ef063ed0ae9579362871b9f23d2bc0781ef7cd4de6ac822052cf6c9c5a12b1e2 in / - CMD [/bin/bash] - MAINTAINER NGINX Docker Maintainers "[email protected]" - apt-key adv --keyserver pgp.mit.edu --recv-keys 573BFD6B3D8FBC641079A6ABABF5BD827BD9BF62 - echo "deb http://nginx.org/packages/mainline/debian/ wheezy nginx" >> /etc/apt/sources.list - ENV NGINX_VERSION=1.7.10-1~wheezy - apt-get update && apt-get install -y ca-certificates nginx=${NGINX_VERSION} && rm -rf /var/lib/apt/lists/* - ln -sf /dev/stdout /var/log/nginx/access.log - ln -sf /dev/stderr /var/log/nginx/error.log - VOLUME [/var/cache/nginx] - EXPOSE map[80/tcp:{} 443/tcp:{}] - CMD [nginx -g daemon off;] https://github.com/saltstack/salt/pull/22421 **RETURN DATA** If ``quiet=False``, the return value will be a list of dictionaries containing information about each step taken to build the image. The keys in each step include the following: - ``Command`` - The command executed in this build step - ``Id`` - Layer ID - ``Size`` - Cumulative image size, in bytes - ``Size_Human`` - Cumulative image size, in human-readable units - ``Tags`` - Tag(s) assigned to this layer - ``Time_Created_Epoch`` - Time this build step was completed (Epoch time) - ``Time_Created_Local`` - Time this build step was completed (Minion's local timezone) CLI Example: .. code-block:: bash salt myminion docker.exists mycontainer ''' response = _client_wrapper('history', name) key_map = { 'CreatedBy': 'Command', 'Created': 'Time_Created_Epoch', } command_prefix = re.compile(r'^/bin/sh -c (?:#\(nop\) )?') ret = [] # history is most-recent first, reverse this so it is ordered top-down for item in reversed(response): step = {} for key, val in six.iteritems(item): step_key = key_map.get(key, key) if step_key == 'Command': if not val: # We assume that an empty build step is 'FROM scratch' val = 'FROM scratch' else: val = command_prefix.sub('', val) step[step_key] = val if 'Time_Created_Epoch' in step: step['Time_Created_Local'] = \ time.strftime( '%Y-%m-%d %H:%M:%S %Z', time.localtime(step['Time_Created_Epoch']) ) for param in ('Size',): if param in step: step['{0}_Human'.format(param)] = _size_fmt(step[param]) ret.append(copy.deepcopy(step)) if quiet: return [x.get('Command') for x in ret] return ret
0.001481
def register_json(self, obj): """Register Descriptors from json descriptor objects. Parameters: obj(list or dict): descriptors to register """ if not isinstance(obj, list): obj = [obj] self.register(Descriptor.from_json(j) for j in obj)
0.006601
def fromJson(struct, attributes=None): "Convert a JSON struct to a Geometry based on its structure" if isinstance(struct, basestring): struct = json.loads(struct) indicative_attributes = { 'x': Point, 'wkid': SpatialReference, 'paths': Polyline, 'rings': Polygon, 'points': Multipoint, 'xmin': Envelope } # bbox string if isinstance(struct, basestring) and len(struct.split(',')) == 4: return Envelope(*map(float, struct.split(','))) # Look for telltale attributes in the dict if isinstance(struct, dict): for key, cls in indicative_attributes.iteritems(): if key in struct: ret = cls.fromJson(dict((str(key), value) for (key, value) in struct.iteritems())) if attributes: ret.attributes = dict((str(key.lower()), val) for (key, val) in attributes.iteritems()) return ret raise ValueError("Unconvertible to geometry")
0.003549
def wrap_command(cmds, data_dirs, cls, strict=True): """Wrap a setup command Parameters ---------- cmds: list(str) The names of the other commands to run prior to the command. strict: boolean, optional Wether to raise errors when a pre-command fails. """ class WrappedCommand(cls): def run(self): if not getattr(self, 'uninstall', None): try: [self.run_command(cmd) for cmd in cmds] except Exception: if strict: raise else: pass result = cls.run(self) data_files = [] for dname in data_dirs: data_files.extend(get_data_files(dname)) # update data-files in case this created new files self.distribution.data_files = data_files # also update package data update_package_data(self.distribution) return result return WrappedCommand
0.000951
def multiplyC(self, alpha): """multiply C with a scalar and update all related internal variables (dC, D,...)""" self.C *= alpha if self.dC is not self.C: self.dC *= alpha self.D *= alpha**0.5
0.012712
def killCells(self, percent=0.05): """ Changes the percentage of cells that are now considered dead. The first time you call this method a permutation list is set up. Calls change the number of cells considered dead. """ numColumns = numpy.prod(self.getColumnDimensions()) if self.zombiePermutation is None: self.zombiePermutation = numpy.random.permutation(numColumns) self.numDead = int(round(percent * numColumns)) if self.numDead > 0: self.deadCols = self.zombiePermutation[0:self.numDead] else: self.deadCols = numpy.array([]) self.deadColumnInputSpan = self.getConnectedSpan(self.deadCols) self.removeDeadColumns()
0.005797
def values(self): """return a list of all state values""" values = [] for __, data in self.items(): values.append(data) return values
0.011299
def generate_data(self, data_dir, tmp_dir, task_id=-1): """Generates training/dev data. Args: data_dir: a string tmp_dir: a string task_id: an optional integer Returns: shard or shards for which data was generated. """ tf.logging.info("generate_data task_id=%s" % task_id) encoder = self.get_or_create_vocab(data_dir, tmp_dir) assert task_id >= 0 and task_id < self.num_generate_tasks if task_id < self.num_train_shards: out_file = self.training_filepaths( data_dir, self.num_train_shards, shuffled=False)[task_id] else: out_file = self.dev_filepaths( data_dir, self.num_dev_shards, shuffled=False)[task_id - self.num_train_shards] generator_utils.generate_files( self.example_generator(encoder, tmp_dir, task_id), [out_file]) generator_utils.shuffle_dataset([out_file])
0.003375
def handle_starting_instance(self): """Starting up PostgreSQL may take a long time. In case we are the leader we may want to fail over to.""" # Check if we are in startup, when paused defer to main loop for manual failovers. if not self.state_handler.check_for_startup() or self.is_paused(): self.set_start_timeout(None) if self.is_paused(): self.state_handler.set_state(self.state_handler.is_running() and 'running' or 'stopped') return None # state_handler.state == 'starting' here if self.has_lock(): if not self.update_lock(): logger.info("Lost lock while starting up. Demoting self.") self.demote('immediate-nolock') return 'stopped PostgreSQL while starting up because leader key was lost' timeout = self._start_timeout or self.patroni.config['master_start_timeout'] time_left = timeout - self.state_handler.time_in_state() if time_left <= 0: if self.is_failover_possible(self.cluster.members): logger.info("Demoting self because master startup is taking too long") self.demote('immediate') return 'stopped PostgreSQL because of startup timeout' else: return 'master start has timed out, but continuing to wait because failover is not possible' else: msg = self.process_manual_failover_from_leader() if msg is not None: return msg return 'PostgreSQL is still starting up, {0:.0f} seconds until timeout'.format(time_left) else: # Use normal processing for standbys logger.info("Still starting up as a standby.") return None
0.005362
def worksheet(self, title): """Returns a worksheet with specified `title`. :param title: A title of a worksheet. If there're multiple worksheets with the same title, first one will be returned. :type title: int :returns: an instance of :class:`gsperad.models.Worksheet`. Example. Getting worksheet named 'Annual bonuses' >>> sht = client.open('Sample one') >>> worksheet = sht.worksheet('Annual bonuses') """ sheet_data = self.fetch_sheet_metadata() try: item = finditem( lambda x: x['properties']['title'] == title, sheet_data['sheets'] ) return Worksheet(self, item['properties']) except (StopIteration, KeyError): raise WorksheetNotFound(title)
0.002315
def _create_tmpfile(cls, status): """Creates a new random-named tmpfile.""" # We can't put the tmpfile in the same directory as the output. There are # rare circumstances when we leave trash behind and we don't want this trash # to be loaded into bigquery and/or used for restore. # # We used mapreduce id, shard number and attempt and 128 random bits to make # collisions virtually impossible. tmpl = string.Template(cls._TMPFILE_PATTERN) filename = tmpl.substitute( id=status.mapreduce_id, shard=status.shard, random=random.getrandbits(cls._RAND_BITS)) return cls._open_file(status.writer_spec, filename, use_tmp_bucket=True)
0.00438
def _index_document(self, identifier, force=False): """ Adds identifier document to the index. """ query = text(""" INSERT INTO identifier_index(identifier, type, name) VALUES(:identifier, :type, :name); """) self.execute(query, **identifier)
0.006689
def get_user(self, user_id=None, user_name=None): """ Get a user object from the API. If no ``user_id`` or ``user_name`` is specified, it will return the User object for the currently authenticated user. Args: user_id (int): User ID of the user for whom you want to get information. [Optional] user_name(str): Username for the user for whom you want to get information. [Optional] Returns: A User object. """ if user_id: endpoint = '/api/user_id/{0}'.format(user_id) elif user_name: endpoint = '/api/user_name/{0}'.format(user_name) else: # Return currently authorized user endpoint = '/api/user' data = self._make_request(verb="GET", endpoint=endpoint) try: return User.NewFromJSON(data) except: return data
0.003161
def get_slot(handler_input, slot_name): # type: (HandlerInput, str) -> Optional[Slot] """Return the slot information from intent request. The method retrieves the slot information :py:class:`ask_sdk_model.slot.Slot` from the input intent request for the given ``slot_name``. More information on the slots can be found here : https://developer.amazon.com/docs/custom-skills/request-types-reference.html#slot-object If there is no such slot, then a ``None`` is returned. If the input request is not an :py:class:`ask_sdk_model.intent_request.IntentRequest`, a :py:class:`TypeError` is raised. :param handler_input: The handler input instance that is generally passed in the sdk's request and exception components :type handler_input: ask_sdk_core.handler_input.HandlerInput :param slot_name: Name of the slot that needs to be retrieved :type slot_name: str :return: Slot information for the provided slot name if it exists, or a `None` value :rtype: Optional[ask_sdk_model.slot.Slot] :raises: TypeError if the input is not an IntentRequest """ request = handler_input.request_envelope.request if isinstance(request, IntentRequest): if request.intent.slots is not None: return request.intent.slots.get(slot_name, None) else: return None raise TypeError("The provided request is not an IntentRequest")
0.000692
def _dbg_output(self): """ Returns a string representation of the segments that form this SegmentList :return: String representation of contents :rtype: str """ s = "[" lst = [] for segment in self._list: lst.append(repr(segment)) s += ", ".join(lst) s += "]" return s
0.00813
def html(self, unicode=False): """ Return HTML of element """ html = lxml.html.tostring(self.element, encoding=self.encoding) if unicode: html = html.decode(self.encoding) return html
0.008811
def extract_srcset(self, srcset): """ Handle ``srcset="image.png 1x, [email protected] 2x"`` """ urls = [] for item in srcset.split(','): if item: urls.append(unquote_utf8(item.rsplit(' ', 1)[0])) return urls
0.007168
def get_parent(self, update=False): """:returns: the parent node of the current node object.""" if self._meta.proxy_for_model: # the current node is a proxy model; the returned parent # should be the same proxy model, so we need to explicitly # fetch it as an instance of that model rather than simply # following the 'parent' relation if self.parent_id is None: return None else: return self.__class__.objects.get(pk=self.parent_id) else: return self.parent
0.003344
def build_extension(extensions: Sequence[ExtensionHeader]) -> str: """ Unparse a ``Sec-WebSocket-Extensions`` header. This is the reverse of :func:`parse_extension`. """ return ", ".join( build_extension_item(name, parameters) for name, parameters in extensions )
0.006734
def getPollFDList(self): """ Return file descriptors to be used to poll USB events. You should not have to call this method, unless you are integrating this class with a polling mechanism. """ pollfd_p_p = libusb1.libusb_get_pollfds(self.__context_p) if not pollfd_p_p: errno = get_errno() if errno: raise OSError(errno) else: # Assume not implemented raise NotImplementedError( 'Your libusb does not seem to implement pollable FDs') try: result = [] append = result.append fd_index = 0 while pollfd_p_p[fd_index]: append(( pollfd_p_p[fd_index].contents.fd, pollfd_p_p[fd_index].contents.events, )) fd_index += 1 finally: _free(pollfd_p_p) return result
0.00203
def get_interfaces_counters(self): """Return interfaces counters.""" query = junos_views.junos_iface_counter_table(self.device) query.get() interface_counters = {} for interface, counters in query.items(): interface_counters[interface] = { k: v if v is not None else -1 for k, v in counters } return interface_counters
0.004926
def helpful_error_list_get(lst, index): """ >>> helpful_error_list_get([1, 2, 3], 1) 2 >>> helpful_error_list_get([1, 2, 3], 4) Traceback (most recent call last): ... IndexError: Tried to access 4, length is only 3 """ try: return lst[index] except IndexError: raise IndexError('Tried to access %r, length is only %r' % (index, len(lst)))
0.005076
def load_module(module_name, file_path): """ Load a module by name and search path Returns None if Module could not be loaded. """ if sys.version_info >= (3,5,): import importlib.util spec = importlib.util.spec_from_file_location(module_name, file_path) if not spec: return module = importlib.util.module_from_spec(spec) spec.loader.exec_module(module) return module else: import imp mod = imp.load_source(module_name, file_path) return mod
0.003623
def _count_model(self, model): """ return model count """ try: res = model.objects.all().count() except Exception as e: self.err(e) return return res
0.008584
def vd(inc, sd): """ Calculate vertical distance. :param inc: (float) inclination angle in degrees :param sd: (float) slope distance in any units """ return abs(sd * math.sin(math.radians(inc)))
0.004545
def StringEscape(self, string, match, **unused_kwargs): """Escape backslashes found inside a string quote. Backslashes followed by anything other than ['"rnbt] will just be included in the string. Args: string: The string that matched. match: the match object (instance of re.MatchObject). Where match.group(1) contains the escaped code. """ if match.group(1) in '\'"rnbt': self.string += string.decode('unicode_escape') else: self.string += string
0.005825
def _from_dataframe(dataframe, default_type='STRING'): """ Infer a BigQuery table schema from a Pandas dataframe. Note that if you don't explicitly set the types of the columns in the dataframe, they may be of a type that forces coercion to STRING, so even though the fields in the dataframe themselves may be numeric, the type in the derived schema may not be. Hence it is prudent to make sure the Pandas dataframe is typed correctly. Args: dataframe: The DataFrame. default_type : The default big query type in case the type of the column does not exist in the schema. Defaults to 'STRING'. Returns: A list of dictionaries containing field 'name' and 'type' entries, suitable for use in a BigQuery Tables resource schema. """ type_mapping = { 'i': 'INTEGER', 'b': 'BOOLEAN', 'f': 'FLOAT', 'O': 'STRING', 'S': 'STRING', 'U': 'STRING', 'M': 'TIMESTAMP' } fields = [] for column_name, dtype in dataframe.dtypes.iteritems(): fields.append({'name': column_name, 'type': type_mapping.get(dtype.kind, default_type)}) return fields
0.006656
def SConscript_exception(file=sys.stderr): """Print an exception stack trace just for the SConscript file(s). This will show users who have Python errors where the problem is, without cluttering the output with all of the internal calls leading up to where we exec the SConscript.""" exc_type, exc_value, exc_tb = sys.exc_info() tb = exc_tb while tb and stack_bottom not in tb.tb_frame.f_locals: tb = tb.tb_next if not tb: # We did not find our exec statement, so this was actually a bug # in SCons itself. Show the whole stack. tb = exc_tb stack = traceback.extract_tb(tb) try: type = exc_type.__name__ except AttributeError: type = str(exc_type) if type[:11] == "exceptions.": type = type[11:] file.write('%s: %s:\n' % (type, exc_value)) for fname, line, func, text in stack: file.write(' File "%s", line %d:\n' % (fname, line)) file.write(' %s\n' % text)
0.001002
def parse(cls, text): """ Parse the given text. Returns a tuple: (list_of_parts, start_pos_of_the_last_part). """ OUTSIDE, IN_DOUBLE, IN_SINGLE = 0, 1, 2 iterator = enumerate(text) state = OUTSIDE parts = [] current_part = '' part_start_pos = 0 for i, c in iterator: # XXX: correctly handle empty strings. if state == OUTSIDE: if c.isspace(): # New part. if current_part: parts.append(current_part) part_start_pos = i + 1 current_part = '' elif c == '"': state = IN_DOUBLE elif c == "'": state = IN_SINGLE else: current_part += c elif state == IN_SINGLE: if c == "'": state = OUTSIDE elif c == "\\": next(iterator) current_part += c else: current_part += c elif state == IN_DOUBLE: if c == '"': state = OUTSIDE elif c == "\\": next(iterator) current_part += c else: current_part += c parts.append(current_part) return parts, part_start_pos
0.001361
def _write_bed_header(self): """Writes the BED first 3 bytes.""" # Writing the first three bytes final_byte = 1 if self._bed_format == "SNP-major" else 0 self._bed.write(bytearray((108, 27, final_byte)))
0.008511
def get_managed_configurations(self): """Get the configurations managed by this scheduler The configuration managed by a scheduler is the self configuration got by the scheduler during the dispatching. :return: a dict of scheduler links with instance_id as key and hash, push_flavor and configuration identifier as values :rtype: dict """ # for scheduler_link in list(self.schedulers.values()): # res[scheduler_link.instance_id] = { # 'hash': scheduler_link.hash, # 'push_flavor': scheduler_link.push_flavor, # 'managed_conf_id': scheduler_link.managed_conf_id # } res = {} if self.sched.pushed_conf and self.cur_conf and 'instance_id' in self.cur_conf: res[self.cur_conf['instance_id']] = { 'hash': self.cur_conf['hash'], 'push_flavor': self.cur_conf['push_flavor'], 'managed_conf_id': self.cur_conf['managed_conf_id'] } logger.debug("Get managed configuration: %s", res) return res
0.002664
def list_records(after=None, before=None): ''' Display fault management logs after : string filter events after time, see man fmdump for format before : string filter events before time, see man fmdump for format CLI Example: .. code-block:: bash salt '*' fmadm.list ''' ret = {} fmdump = _check_fmdump() cmd = '{cmd}{after}{before}'.format( cmd=fmdump, after=' -t {0}'.format(after) if after else '', before=' -T {0}'.format(before) if before else '' ) res = __salt__['cmd.run_all'](cmd) retcode = res['retcode'] result = {} if retcode != 0: result['Error'] = 'error executing fmdump' else: result = _parse_fmdump(res['stdout']) return result
0.00128
def get(self, uuid): """ Get one document store into LinShare.""" #return self.core.get("documents/" + uuid) documents = (v for v in self.list() if v.get('uuid') == uuid) for i in documents: self.log.debug(i) return i return None
0.010239
def has_cache(self): """Intended to be called before any call that might access the cache. If the cache is not selected, then returns False, otherwise the cache is build if needed and returns True.""" if not self.cache_enabled: return False if self._cache is None: self.build_cache() return True
0.00545
def dicom_series_to_nifti(original_dicom_directory, output_file=None, reorient_nifti=True): """ Converts dicom single series (see pydicom) to nifty, mimicking SPM Examples: See unit test will return a dictionary containing - the NIFTI under key 'NIFTI' - the NIFTI file path under 'NII_FILE' - the BVAL file path under 'BVAL_FILE' (only for dti) - the BVEC file path under 'BVEC_FILE' (only for dti) IMPORTANT: If no specific sequence type can be found it will default to anatomical and try to convert. You should check that the data you are trying to convert is supported by this code Inspired by http://nipy.sourceforge.net/nibabel/dicom/spm_dicom.html Inspired by http://code.google.com/p/pydicom/source/browse/source/dicom/contrib/pydicom_series.py :param reorient_nifti: if True the nifti affine and data will be updated so the data is stored LAS oriented :param output_file: file path to write to if not set to None :param original_dicom_directory: directory with the dicom files for a single series/scan :return nibabel image """ # copy files so we can can modify without altering the original temp_directory = tempfile.mkdtemp() try: dicom_directory = os.path.join(temp_directory, 'dicom') shutil.copytree(original_dicom_directory, dicom_directory) dicom_input = common.read_dicom_directory(dicom_directory) return dicom_array_to_nifti(dicom_input, output_file, reorient_nifti) except AttributeError as exception: reraise( tp=ConversionError, value=ConversionError(str(exception)), tb=sys.exc_info()[2]) finally: # remove the copied data shutil.rmtree(temp_directory)
0.003966
def close(self): """Flushes the pending events and closes the writer after it is done.""" self.flush() if self._recordio_writer is not None: self._recordio_writer.close() self._recordio_writer = None
0.012146
def add_next(self, requester: int, track: dict): """ Adds a track to beginning of the queue """ self.queue.insert(0, AudioTrack().build(track, requester))
0.011628
def _expand_libs_in_apps(specs): """ Expands specs.apps.depends.libs to include any indirectly required libs """ for app_name, app_spec in specs['apps'].iteritems(): if 'depends' in app_spec and 'libs' in app_spec['depends']: app_spec['depends']['libs'] = _get_dependent('libs', app_name, specs, 'apps')
0.0059
def get_process_behavior(self, process_id, behavior_ref_name, expand=None): """GetProcessBehavior. [Preview API] Returns a behavior of the process. :param str process_id: The ID of the process :param str behavior_ref_name: The reference name of the behavior :param str expand: :rtype: :class:`<ProcessBehavior> <azure.devops.v5_0.work_item_tracking_process.models.ProcessBehavior>` """ route_values = {} if process_id is not None: route_values['processId'] = self._serialize.url('process_id', process_id, 'str') if behavior_ref_name is not None: route_values['behaviorRefName'] = self._serialize.url('behavior_ref_name', behavior_ref_name, 'str') query_parameters = {} if expand is not None: query_parameters['$expand'] = self._serialize.query('expand', expand, 'str') response = self._send(http_method='GET', location_id='d1800200-f184-4e75-a5f2-ad0b04b4373e', version='5.0-preview.2', route_values=route_values, query_parameters=query_parameters) return self._deserialize('ProcessBehavior', response)
0.005512
def scrub_dict(d): """ Recursively inspect a dictionary and remove all empty values, including empty strings, lists, and dictionaries. """ if type(d) is dict: return dict( (k, scrub_dict(v)) for k, v in d.iteritems() if v and scrub_dict(v) ) elif type(d) is list: return [ scrub_dict(v) for v in d if v and scrub_dict(v) ] else: return d
0.002331
def marketOhlcDF(token='', version=''): '''Returns the official open and close for whole market. https://iexcloud.io/docs/api/#news 9:30am-5pm ET Mon-Fri Args: token (string); Access token version (string); API version Returns: DataFrame: result ''' x = marketOhlc(token, version) data = [] for key in x: data.append(x[key]) data[-1]['symbol'] = key df = pd.io.json.json_normalize(data) _toDatetime(df) _reindex(df, 'symbol') return df
0.001887
def review_metadata_csv_single_user(filedir, metadata, csv_in, n_headers): """ Check validity of metadata for single user. :param filedir: This field is the filepath of the directory whose csv has to be made. :param metadata: This field is the metadata generated from the load_metadata_csv function. :param csv_in: This field returns a reader object which iterates over the csv. :param n_headers: This field is the number of headers in the csv. """ try: if not validate_metadata(filedir, metadata): return False for filename, file_metadata in metadata.items(): is_single_file_metadata_valid(file_metadata, None, filename) except ValueError as e: print_error(e) return False return True
0.001241
def encode_basestring(s, _PY3=PY3, _q=u('"')): """Return a JSON representation of a Python string """ if _PY3: if isinstance(s, binary_type): s = s.decode('utf-8') else: if isinstance(s, str) and HAS_UTF8.search(s) is not None: s = s.decode('utf-8') def replace(match): return ESCAPE_DCT[match.group(0)] return _q + ESCAPE.sub(replace, s) + _q
0.004808
def _setMotorShutdown(self, value, device, message): """ Set the motor shutdown on error status stored on the hardware device. :Parameters: value : `int` An integer indicating the effect on the motors when an error occurs. device : `int` The device is the integer number of the hardware devices ID and is only used with the Pololu Protocol. message : `bool` If set to `True` a text message will be returned, if set to `False` the integer stored in the Qik will be returned. :Returns: Text message indicating the status of the shutdown error. """ value = self._BOOL_TO_INT.get(value, 1) self._deviceConfig[device]['shutdown'] = value return self._setConfig(self.MOTOR_ERR_SHUTDOWN, value, device, message)
0.003448
def _calculateError(self, recordNum, bucketIdxList): """ Calculate error signal :param bucketIdxList: list of encoder buckets :return: dict containing error. The key is the number of steps The value is a numpy array of error at the output layer """ error = dict() targetDist = numpy.zeros(self._maxBucketIdx + 1) numCategories = len(bucketIdxList) for bucketIdx in bucketIdxList: targetDist[bucketIdx] = 1.0/numCategories for (learnRecordNum, learnPatternNZ) in self._patternNZHistory: nSteps = recordNum - learnRecordNum if nSteps in self.steps: predictDist = self.inferSingleStep(learnPatternNZ, self._weightMatrix[nSteps]) error[nSteps] = targetDist - predictDist return error
0.004896
def get_closest_station(latitude, longitude, minumum_recent_data=20140000, match_max=100): '''Query function to find the nearest weather station to a particular set of coordinates. Optionally allows for a recent date by which the station is required to be still active at. Parameters ---------- latitude : float Latitude to search for nearby weather stations at, [degrees] longitude : float Longitude to search for nearby weather stations at, [degrees] minumum_recent_data : int, optional Date that the weather station is required to have more recent weather data than; format YYYYMMDD; set this to 0 to not restrict data by date. match_max : int, optional The maximum number of results in the KDTree to search for before applying the filtering criteria; an internal parameter which is increased automatically if the default value is insufficient [-] Returns ------- station : IntegratedSurfaceDatabaseStation Instance of IntegratedSurfaceDatabaseStation which was nearest to the requested coordinates and with sufficiently recent data available [-] Notes ----- Searching for 100 stations is a reasonable choice as it takes, ~70 microseconds vs 50 microsecond to find only 1 station. The search does get slower as more points are requested. Bad data is returned from a KDTree search if more points are requested than are available. Examples -------- >>> get_closest_station(51.02532675, -114.049868485806, 20150000) <Weather station registered in the Integrated Surface Database, name CALGARY INTL CS, country CA, USAF 713930, WBAN None, coords (51.1, -114.0) Weather data from 2004 to 2017> ''' # Both station strings may be important # Searching for 100 stations is fine, 70 microseconds vs 50 microsecond for 1 # but there's little point for more points, it gets slower. # bad data is returned if k > station_count distances, indexes = kd_tree.query([latitude, longitude], k=min(match_max, station_count)) # for i in indexes: latlon = _latlongs[i] enddate = stations[i].END # Iterate for all indexes until one is found whose date is current if enddate > minumum_recent_data: return stations[i] if match_max < station_count: return get_closest_station(latitude, longitude, minumum_recent_data=minumum_recent_data, match_max=match_max*10) raise Exception('Could not find a station with more recent data than ' 'specified near the specified coordinates.')
0.005922
def oriented_bounds(obj, angle_digits=1, ordered=True): """ Find the oriented bounding box for a Trimesh Parameters ---------- obj : trimesh.Trimesh, (n, 2) float, or (n, 3) float Mesh object or points in 2D or 3D space angle_digits : int How much angular precision do we want on our result. Even with less precision the returned extents will cover the mesh albeit with larger than minimal volume, and may experience substantial speedups. Returns ---------- to_origin : (4,4) float Transformation matrix which will move the center of the bounding box of the input mesh to the origin. extents: (3,) float The extents of the mesh once transformed with to_origin """ # extract a set of convex hull vertices and normals from the input # we bother to do this to avoid recomputing the full convex hull if # possible if hasattr(obj, 'convex_hull'): # if we have been passed a mesh, use its existing convex hull to pull from # cache rather than recomputing. This version of the cached convex hull has # normals pointing in arbitrary directions (straight from qhull) # using this avoids having to compute the expensive corrected normals # that mesh.convex_hull uses since normal directions don't matter here vertices = obj.convex_hull.vertices hull_normals = obj.convex_hull.face_normals elif util.is_sequence(obj): # we've been passed a list of points points = np.asanyarray(obj) if util.is_shape(points, (-1, 2)): return oriented_bounds_2D(points) elif util.is_shape(points, (-1, 3)): hull_obj = spatial.ConvexHull(points) vertices = hull_obj.points[hull_obj.vertices] hull_normals, valid = triangles.normals( hull_obj.points[hull_obj.simplices]) else: raise ValueError('Points are not (n,3) or (n,2)!') else: raise ValueError( 'Oriented bounds must be passed a mesh or a set of points!') # convert face normals to spherical coordinates on the upper hemisphere # the vector_hemisphere call effectivly merges negative but otherwise # identical vectors spherical_coords = util.vector_to_spherical( util.vector_hemisphere(hull_normals)) # the unique_rows call on merge angles gets unique spherical directions to check # we get a substantial speedup in the transformation matrix creation # inside the loop by converting to angles ahead of time spherical_unique = grouping.unique_rows(spherical_coords, digits=angle_digits)[0] min_volume = np.inf tic = time.time() for spherical in spherical_coords[spherical_unique]: # a matrix which will rotate each hull normal to [0,0,1] to_2D = np.linalg.inv(transformations.spherical_matrix(*spherical)) # apply the transform here projected = np.dot(to_2D, np.column_stack( (vertices, np.ones(len(vertices)))).T).T[:, :3] height = projected[:, 2].ptp() rotation_2D, box = oriented_bounds_2D(projected[:, :2]) volume = np.product(box) * height if volume < min_volume: min_volume = volume min_extents = np.append(box, height) min_2D = to_2D.copy() rotation_2D[:2, 2] = 0.0 rotation_Z = transformations.planar_matrix_to_3D(rotation_2D) # combine the 2D OBB transformation with the 2D projection transform to_origin = np.dot(rotation_Z, min_2D) # transform points using our matrix to find the translation for the # transform transformed = transformations.transform_points(vertices, to_origin) box_center = (transformed.min(axis=0) + transformed.ptp(axis=0) * .5) to_origin[:3, 3] = -box_center # return ordered 3D extents if ordered: # sort the three extents order = min_extents.argsort() # generate a matrix which will flip transform # to match the new ordering flip = np.eye(4) flip[:3, :3] = -np.eye(3)[order] # make sure transform isn't mangling triangles # by reversing windings on triangles if np.isclose(np.trace(flip[:3, :3]), 0.0): flip[:3, :3] = np.dot(flip[:3, :3], -np.eye(3)) # apply the flip to the OBB transform to_origin = np.dot(flip, to_origin) # apply the order to the extents min_extents = min_extents[order] log.debug('oriented_bounds checked %d vectors in %0.4fs', len(spherical_unique), time.time() - tic) return to_origin, min_extents
0.000837
def get_region(self, x1, y1, x2, y2): '''Get an image that refers to the given rectangle within this image. The image data is not actually copied; if the image region is rendered into, it will affect this image. :param int x1: left edge of the image region to return :param int y1: top edge of the image region to return :param int x2: right edge of the image region to return :param int y2: bottom edge of the image region to return :return: :class:`Image` ''' handle = c_int() lib.GetImageRegion(byref(handle), self._handle, x1, y1, x2, y2) return Image(width = x2 - x1, height = y2 - y1, content_scale = self._content_scale, handle = handle)
0.017711
def binned_bitsets_by_chrom( f, chrom, chrom_col=0, start_col=1, end_col=2): """Read a file by chrom name into a bitset""" bitset = BinnedBitSet( MAX ) for line in f: if line.startswith("#"): continue fields = line.split() if fields[chrom_col] == chrom: start, end = int( fields[start_col] ), int( fields[end_col] ) bitset.set_range( start, end-start ) return bitset
0.025581
def data(self): """Fetch latest data from PyPI, and cache for 30s.""" key = cache_key(self.name) data = cache.get(key) if data is None: logger.debug("Updating package info for %s from PyPI.", self.name) data = requests.get(self.url).json() cache.set(key, data, PYPI_CACHE_EXPIRY) return data
0.00545
def add_base_str(self, base_str, pattern='.+', pattern_base=None, append=True): """ Add further base string to this instance Parameters ---------- base_str: str or list of str Strings that are used as to look for keys to get and set keys in the :attr:`base` dictionary. If a string does not contain ``'%(key)s'``, it will be appended at the end. ``'%(key)s'`` will be replaced by the specific key for getting and setting an item. pattern: str Default: ``'.+'``. This is the pattern that is inserted for ``%(key)s`` in a base string to look for matches (using the :mod:`re` module) in the `base` dictionary. The default `pattern` matches everything without white spaces. pattern_base: str or list or str If None, the whatever is given in the `base_str` is used. Those strings will be used for generating the final search patterns. You can specify this parameter by yourself to avoid the misinterpretation of patterns. For example for a `base_str` like ``'my.str'`` it is recommended to additionally provide the `pattern_base` keyword with ``'my\.str'``. Like for `base_str`, the ``%(key)s`` is appended if not already in the string. append: bool If True, the given `base_str` are appended (i.e. it is first looked for them in the :attr:`base` dictionary), otherwise they are put at the beginning""" base_str = safe_list(base_str) pattern_base = safe_list(pattern_base or []) for i, s in enumerate(base_str): if '%(key)s' not in s: base_str[i] += '%(key)s' if pattern_base: for i, s in enumerate(pattern_base): if '%(key)s' not in s: pattern_base[i] += '%(key)s' else: pattern_base = base_str self.base_str = base_str + self.base_str self.patterns = list(map(lambda s: re.compile(s.replace( '%(key)s', '(?P<key>%s)' % pattern)), pattern_base)) + \ self.patterns
0.001785
def _dfs_preorder(node, visited): """Iterate through nodes in DFS pre-order.""" if node not in visited: visited.add(node) yield node if node.lo is not None: yield from _dfs_preorder(node.lo, visited) if node.hi is not None: yield from _dfs_preorder(node.hi, visited)
0.003185
def Convert(self, metadata, yara_match, token=None): """Convert a single YaraProcessScanMatch.""" conv = ProcessToExportedProcessConverter(options=self.options) process = list( conv.Convert(ExportedMetadata(), yara_match.process, token=token))[0] seen_rules = set() for m in yara_match.match: if m.rule_name in seen_rules: continue seen_rules.add(m.rule_name) yield ExportedYaraProcessScanMatch( metadata=metadata, process=process, rule_name=m.rule_name, scan_time_us=yara_match.scan_time_us)
0.006791
def start(self): """ Initialize websockets, say hello, and start listening for events """ self.connect() if not self.isAlive(): super(WAMPClient,self).start() self.hello() return self
0.012346
def _set_session_ldp_stats(self, v, load=False): """ Setter method for session_ldp_stats, mapped from YANG variable /mpls_state/ldp/ldp_session/session_ldp_stats (container) If this variable is read-only (config: false) in the source YANG file, then _set_session_ldp_stats is considered as a private method. Backends looking to populate this variable should do so via calling thisObj._set_session_ldp_stats() directly. YANG Description: Session LDP stats """ if hasattr(v, "_utype"): v = v._utype(v) try: t = YANGDynClass(v,base=session_ldp_stats.session_ldp_stats, is_container='container', presence=False, yang_name="session-ldp-stats", rest_name="session-ldp-stats", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'callpoint': u'mpls-session-ldp-stats', u'cli-suppress-show-path': None}}, namespace='urn:brocade.com:mgmt:brocade-mpls-operational', defining_module='brocade-mpls-operational', yang_type='container', is_config=False) except (TypeError, ValueError): raise ValueError({ 'error-string': """session_ldp_stats must be of a type compatible with container""", 'defined-type': "container", 'generated-type': """YANGDynClass(base=session_ldp_stats.session_ldp_stats, is_container='container', presence=False, yang_name="session-ldp-stats", rest_name="session-ldp-stats", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'callpoint': u'mpls-session-ldp-stats', u'cli-suppress-show-path': None}}, namespace='urn:brocade.com:mgmt:brocade-mpls-operational', defining_module='brocade-mpls-operational', yang_type='container', is_config=False)""", }) self.__session_ldp_stats = t if hasattr(self, '_set'): self._set()
0.005291
def parse_node_descriptor(desc, model): """Parse a string node descriptor. The function creates an SGNode object without connecting its inputs and outputs and returns a 3-tuple: SGNode, [(input X, trigger X)], <processing function name> Args: desc (str): A description of the node to be created. model (str): A device model for the node to be created that sets any device specific limits on how the node is set up. """ try: data = graph_node.parseString(desc) except ParseException: raise # TODO: Fix this to properly encapsulate the parse error stream_desc = u' '.join(data['node']) stream = DataStream.FromString(stream_desc) node = SGNode(stream, model) inputs = [] if 'input_a' in data: input_a = data['input_a'] stream_a = DataStreamSelector.FromString(u' '.join(input_a['input_stream'])) trigger_a = None if 'type' in input_a: trigger_a = InputTrigger(input_a['type'], input_a['op'], int(input_a['reference'], 0)) inputs.append((stream_a, trigger_a)) if 'input_b' in data: input_a = data['input_b'] stream_a = DataStreamSelector.FromString(u' '.join(input_a['input_stream'])) trigger_a = None if 'type' in input_a: trigger_a = InputTrigger(input_a['type'], input_a['op'], int(input_a['reference'], 0)) inputs.append((stream_a, trigger_a)) if 'combiner' in data and str(data['combiner']) == u'||': node.trigger_combiner = SGNode.OrTriggerCombiner else: node.trigger_combiner = SGNode.AndTriggerCombiner processing = data['processor'] return node, inputs, processing
0.003484
def abook_file(vcard, bookfile): """Write a new Abook file with the given vcards""" book = ConfigParser(default_section='format') book['format'] = {} book['format']['program'] = 'abook' book['format']['version'] = '0.6.1' for (i, card) in enumerate(readComponents(vcard.read())): Abook.to_abook(card, str(i), book, bookfile) with open(bookfile, 'w') as fp: book.write(fp, False)
0.004348
def get_command_handlers(): ''' Create a map of command names and handlers ''' return { 'activate': activate, 'config': hconfig, 'deactivate': deactivate, 'help': cli_help, 'kill': kill, 'restart': restart, 'submit': submit, 'update': update, 'version': version }
0.009174
def root_frame(self): """ Returns the parsed results in the form of a tree of Frame objects """ if not hasattr(self, '_root_frame'): self._root_frame = Frame() # define a recursive function that builds the hierarchy of frames given the # stack of frame identifiers def frame_for_stack(stack): if len(stack) == 0: return self._root_frame parent = frame_for_stack(stack[:-1]) frame_name = stack[-1] if not frame_name in parent.children_dict: parent.add_child(Frame(frame_name, parent)) return parent.children_dict[frame_name] for stack, self_time in self.stack_self_time.items(): frame_for_stack(stack).self_time = self_time return self._root_frame
0.004505
def serialize_data(data, compression=False, encryption=False, public_key=None): """Serializes normal Python datatypes into plaintext using json. You may also choose to enable compression and encryption when serializing data to send over the network. Enabling one or both of these options will incur additional overhead. Args: data (dict): The data to convert into plain text using json. compression (boolean): True or False value on whether or not to compress the serialized data. encryption (rsa.encryption): An encryption instance used to encrypt the message if encryption is desired. public_key (str): The public key to use to encrypt if encryption is enabled. Returns: The string message serialized using json. """ message = json.dumps(data) if compression: message = zlib.compress(message) message = binascii.b2a_base64(message) if encryption and public_key: message = encryption.encrypt(message, public_key) encoded_message = str.encode(message) return encoded_message
0.000902
def is_true(self, item=None): """ If you are filtering on object values, you need to pass that object here. """ if item: values = [item] else: values = [] self._get_item_and_att_names(*values) return self._passes_all
0.010135
def nn(self, x, k = 1, radius = np.inf, eps = 0.0, p = 2): """Find the k nearest neighbors of x in the observed input data :arg x: center :arg k: the number of nearest neighbors to return (default: 1) :arg eps: approximate nearest neighbors. the k-th returned value is guaranteed to be no further than (1 + eps) times the distance to the real k-th nearest neighbor. :arg p: Which Minkowski p-norm to use. (default: 2, euclidean) :arg radius: the maximum radius (default: +inf) :return: distance and indexes of found nearest neighbors. """ assert len(x) == self.dim, 'dimension of input {} does not match expected dimension {}.'.format(len(x), self.dim) k_x = min(k, self.size) # Because linear models requires x vector to be extended to [1.0]+x # to accomodate a constant, we store them that way. return self._nn(np.array(x), k_x, radius = radius, eps = eps, p = p)
0.018393
def cache_location(): '''Cross-platform placement of cached files''' plat = platform.platform() log.debug('Platform read as: {0}'.format(plat)) if plat.startswith('Windows'): log.debug('Windows platform detected') return os.path.join(os.environ['APPDATA'], 'OpenAccess_EPUB') elif plat.startswith('Darwin'): log.debug('Mac platform detected') elif plat.startswith('Linux'): log.debug('Linux platform detected') else: log.warning('Unhandled platform for cache_location') #This code is written for Linux and Mac, don't expect success for others path = os.path.expanduser('~') if path == '~': path = os.path.expanduser('~user') if path == '~user': log.critical('Could not resolve the correct cache location') sys.exit('Could not resolve the correct cache location') cache_loc = os.path.join(path, '.OpenAccess_EPUB') log.debug('Cache located: {0}'.format(cache_loc)) return cache_loc
0.001974
def get(self, service_name, **kwargs): """Retrieve data from AppNexus API""" return self._send(requests.get, service_name, **kwargs)
0.013514
def hisat2_general_stats_table(self): """ Take the parsed stats from the HISAT2 report and add it to the basic stats table at the top of the report """ headers = OrderedDict() headers['overall_alignment_rate'] = { 'title': '% Aligned', 'description': 'overall alignment rate', 'max': 100, 'min': 0, 'suffix': '%', 'scale': 'YlGn' } self.general_stats_addcols(self.hisat2_data, headers)
0.003953
def send_templated_mail(tpl, subject, context, to=getattr(settings, 'MIDNIGHT_MAIN_ADMIN_EMAIL', '[email protected]')): """ Отправляет письмо на основе шаблона :param tpl: шаблон :param subject: тема письма :param context: контекст для рендеринга шаблона :param to: кому слать письмо :return: """ msg_html = render_to_string(tpl, {'context': context}) send_mail(subject, '', getattr(settings, 'MIDNIGHT_MAIN_MAIL_FROM', '[email protected]'), [to], html_message=msg_html,)
0.005848
def confd_state_netconf_listen_tcp_port(self, **kwargs): """Auto Generated Code """ config = ET.Element("config") confd_state = ET.SubElement(config, "confd-state", xmlns="http://tail-f.com/yang/confd-monitoring") netconf = ET.SubElement(confd_state, "netconf") listen = ET.SubElement(netconf, "listen") tcp = ET.SubElement(listen, "tcp") port = ET.SubElement(tcp, "port") port.text = kwargs.pop('port') callback = kwargs.pop('callback', self._callback) return callback(config)
0.0053