text
stringlengths
78
104k
score
float64
0
0.18
def gcj02tobd09(lng, lat): """ 火星坐标系(GCJ-02)转百度坐标系(BD-09) 谷歌、高德——>百度 :param lng:火星坐标经度 :param lat:火星坐标纬度 :return: """ z = math.sqrt(lng * lng + lat * lat) + 0.00002 * math.sin(lat * x_pi) theta = math.atan2(lat, lng) + 0.000003 * math.cos(lng * x_pi) bd_lng = z * math.cos(theta) + 0.0065 bd_lat = z * math.sin(theta) + 0.006 return [bd_lng, bd_lat]
0.002519
def defaultBuilder(value, nt): """Reasonably sensible default handling of put builder """ if callable(value): def logbuilder(V): try: value(V) except: _log.exception("Error in Builder") raise # will be logged again return logbuilder def builder(V): try: if isinstance(value, Value): V[None] = value elif isinstance(value, dict): for k, v in value.items(): V[k] = v else: nt.assign(V, value) except: _log.exception("Exception in Put builder") raise # will be printed to stdout from extension code. return builder
0.003916
def resolve(self, s): """ Resolve strings to objects using standard import and attribute syntax. """ name = s.split('.') used = name.pop(0) try: found = self.importer(used) for frag in name: used += '.' + frag try: found = getattr(found, frag) except AttributeError: self.importer(used) found = getattr(found, frag) return found except ImportError: e, tb = sys.exc_info()[1:] v = ValueError('Cannot resolve %r: %s' % (s, e)) v.__cause__, v.__traceback__ = e, tb raise v
0.002759
def hash(self): """Generate a hash value.""" h = hash_pandas_object(self, index=True) return hashlib.md5(h.values.tobytes()).hexdigest()
0.0125
def _raw_weights(self): """Create a numpy array containing the raw sensor weights. """ if self._debug: return np.array([[],[],[],[]]) if not self._running: raise ValueError('Weight sensor is not running!') if len(self._weight_buffers) == 0: time.sleep(0.3) if len(self._weight_buffers) == 0: raise ValueError('Weight sensor is not retrieving data!') weights = np.array(self._weight_buffers) return weights
0.009506
def _update_limits_from_api(self): """ Call the service's API action to retrieve limit/quota information, and update AwsLimit objects in ``self.limits`` with this information. """ try: self.connect() resp = self.conn.get_send_quota() except EndpointConnectionError as ex: logger.warning('Skipping SES: %s', str(ex)) return except ClientError as ex: if ex.response['Error']['Code'] in ['AccessDenied', '503']: logger.warning('Skipping SES: %s', ex) return raise self.limits['Daily sending quota']._set_api_limit(resp['Max24HourSend'])
0.00428
def setSortedBy( self, sortedBy ): """ Sets the sorting information for this widget to the inputed sorting options. This can be either a list of terms, or a comma deliminated string. :param sortedBy | <str> || [(<str> column, <str> direction), ..] """ if ( type(groupBy) in (list, tuple) ): sortedBy = ','.join(map(lambda x: '%s|%s' % x, sortedBy)) self.uiSortingTXT.setText(sortedBy)
0.016162
def run(): """Command for reflection database objects""" parser = OptionParser( version=__version__, description=__doc__, ) parser.add_option( '-u', '--url', dest='url', help='Database URL (connection string)', ) parser.add_option( '-r', '--render', dest='render', default='dot', choices=['plantuml', 'dot'], help='Output format - plantuml or dot', ) parser.add_option( '-l', '--list', dest='list', action='store_true', help='Output database list of tables and exit', ) parser.add_option( '-i', '--include', dest='include', help='List of tables to include through ","', ) parser.add_option( '-e', '--exclude', dest='exclude', help='List of tables to exlude through ","', ) (options, args) = parser.parse_args() if not options.url: print('-u/--url option required') exit(1) engine = create_engine(options.url) meta = MetaData() meta.reflect(bind=engine) if options.list: print('Database tables:') tables = sorted(meta.tables.keys()) def _g(l, i): try: return tables[i] except IndexError: return '' for i in range(0, len(tables), 2): print(' {0}{1}{2}'.format( _g(tables, i), ' ' * (38 - len(_g(tables, i))), _g(tables, i + 1), )) exit(0) tables = set(meta.tables.keys()) if options.include: tables &= set(map(string.strip, options.include.split(','))) if options.exclude: tables -= set(map(string.strip, options.exclude.split(','))) desc = describe(map(lambda x: operator.getitem(meta.tables, x), tables)) print(getattr(render, options.render)(desc))
0.001076
def append(self, fdata, offset, query='/content/uploads'): """ append binary data to an upload `fdata` - binary data to send to pulp `offset` - the amount of previously-uploaded data """ query = '%s/%s/%s/' % (query, self.uid, offset) _r = self.connector.put(query, fdata, log_data=False, auto_create_json_str=False) juicer.utils.Log.log_notice("Appending to: %s" % query) juicer.utils.Log.log_debug("Continuing upload with append. POST returned with data: %s" % str(_r.content)) return _r.status_code
0.006861
def detect_build(snps): """ Detect build of SNPs. Use the coordinates of common SNPs to identify the build / assembly of a genotype file that is being loaded. Notes ----- rs3094315 : plus strand in 36, 37, and 38 rs11928389 : plus strand in 36, minus strand in 37 and 38 rs2500347 : plus strand in 36 and 37, minus strand in 38 rs964481 : plus strand in 36, 37, and 38 rs2341354 : plus strand in 36, 37, and 38 Parameters ---------- snps : pandas.DataFrame SNPs to add Returns ------- int detected build of SNPs, else None References ---------- ..[1] Yates et. al. (doi:10.1093/bioinformatics/btu613), http://europepmc.org/search/?query=DOI:10.1093/bioinformatics/btu613 ..[2] Zerbino et. al. (doi.org/10.1093/nar/gkx1098), https://doi.org/10.1093/nar/gkx1098 ..[3] Sherry ST, Ward MH, Kholodov M, Baker J, Phan L, Smigielski EM, Sirotkin K. dbSNP: the NCBI database of genetic variation. Nucleic Acids Res. 2001 Jan 1;29(1):308-11. ..[4] Database of Single Nucleotide Polymorphisms (dbSNP). Bethesda (MD): National Center for Biotechnology Information, National Library of Medicine. dbSNP accession: rs3094315, rs11928389, rs2500347, rs964481, and rs2341354 (dbSNP Build ID: 151). Available from: http://www.ncbi.nlm.nih.gov/SNP/ """ def lookup_build_with_snp_pos(pos, s): try: return s.loc[s == pos].index[0] except: return None build = None rsids = ["rs3094315", "rs11928389", "rs2500347", "rs964481", "rs2341354"] df = pd.DataFrame( { 36: [742429, 50908372, 143649677, 27566744, 908436], 37: [752566, 50927009, 144938320, 27656823, 918573], 38: [817186, 50889578, 148946169, 27638706, 983193], }, index=rsids, ) for rsid in rsids: if rsid in snps.index: build = lookup_build_with_snp_pos(snps.loc[rsid].pos, df.loc[rsid]) if build is not None: break return build
0.004321
def set_argsx(self, arguments, *args): """ Setup the command line arguments, the first item must be an (absolute) filename to run. Variadic function, must be NULL terminated. """ return lib.zproc_set_argsx(self._as_parameter_, arguments, *args)
0.01087
def wasb_write(self, log, remote_log_location, append=True): """ Writes the log to the remote_log_location. Fails silently if no hook was created. :param log: the log to write to the remote_log_location :type log: str :param remote_log_location: the log's location in remote storage :type remote_log_location: str (path) :param append: if False, any existing log file is overwritten. If True, the new log is appended to any existing logs. :type append: bool """ if append and self.wasb_log_exists(remote_log_location): old_log = self.wasb_read(remote_log_location) log = '\n'.join([old_log, log]) if old_log else log try: self.hook.load_string( log, self.wasb_container, remote_log_location, ) except AzureHttpError: self.log.exception('Could not write logs to %s', remote_log_location)
0.001919
def on(cls, hook): """Hook decorator.""" def decorator(function_): cls._hooks[hook].append(function_) return function_ return decorator
0.010929
def level_to_number(value): """ Coerce a logging level name to a number. :param value: A logging level (integer or string). :returns: The number of the log level (an integer). This function translates log level names into their numeric values. The :mod:`logging` module does this for us on Python 2.7 and 3.4 but fails to do so on Python 2.6 which :mod:`coloredlogs` still supports. """ if is_string(value): try: defined_levels = find_defined_levels() value = defined_levels[value.upper()] except KeyError: # Don't fail on unsupported log levels. value = DEFAULT_LOG_LEVEL return value
0.001447
def get_exit_code(self): """Executes the external command and get its exitcode, stdout and stderr :return: exit code of command """ args = shlex.split(self.cmd) proc = Popen(args, stdout=PIPE, stderr=PIPE) out, err = proc.communicate() out, err = out.decode("utf8"), err.decode("utf8") exitcode = proc.returncode # return exitcode, out, err
0.009302
def blake_encode_ngrams(ngrams, # type: Iterable[str] keys, # type: Sequence[bytes] ks, # type: Sequence[int] l, # type: int encoding # type: str ): # type: (...) -> bitarray.bitarray """ Computes the encoding of the ngrams using the BLAKE2 hash function. We deliberately do not use the double hashing scheme as proposed in [ Schnell2011]_, because this would introduce an exploitable structure into the Bloom filter. For more details on the weakness, see [Kroll2015]_. In short, the double hashing scheme only allows for :math:`l^2` different encodings for any possible n-gram, whereas the use of :math:`k` different independent hash functions gives you :math:`\\sum_{j=1}^{k}{\\binom{l}{j}}` combinations. **Our construction** It is advantageous to construct Bloom filters using a family of hash functions with the property of `k-independence <https://en.wikipedia.org/wiki/K-independent_hashing>`_ to compute the indices for an entry. This approach minimises the change of collisions. An informal definition of *k-independence* of a family of hash functions is, that if selecting a function at random from the family, it guarantees that the hash codes of any designated k keys are independent random variables. Our construction utilises the fact that the output bits of a cryptographic hash function are uniformly distributed, independent, binary random variables (well, at least as close to as possible. See [Kaminsky2011]_ for an analysis). Thus, slicing the output of a cryptographic hash function into k different slices gives you k independent random variables. We chose Blake2 as the cryptographic hash function mainly for two reasons: * it is fast. * in keyed hashing mode, Blake2 provides MACs with just one hash function call instead of the two calls in the HMAC construction used in the double hashing scheme. .. warning:: Please be aware that, although this construction makes the attack of [Kroll2015]_ infeasible, it is most likely not enough to ensure security. Or in their own words: | However, we think that using independent hash functions alone will not be sufficient to ensure security, since in this case other approaches (maybe related to or at least inspired through work from the area of Frequent Itemset Mining) are promising to detect at least the most frequent atoms automatically. :param ngrams: list of n-grams to be encoded :param keys: secret key for blake2 as bytes :param ks: ks[i] is k value to use for ngram[i] :param l: length of the output bitarray (has to be a power of 2) :param encoding: the encoding to use when turning the ngrams to bytes :return: bitarray of length l with the bits set which correspond to the encoding of the ngrams """ key, = keys # Unpack. log_l = int(math.log(l, 2)) if not 2 ** log_l == l: raise ValueError( 'parameter "l" has to be a power of two for the BLAKE2 encoding, ' 'but was: {}'.format( l)) bf = bitarray(l) bf.setall(False) for m, k in zip(ngrams, ks): random_shorts = [] # type: List[int] num_macs = (k + 31) // 32 for i in range(num_macs): hash_bytes = blake2b(m.encode(encoding=encoding), key=key, salt=str(i).encode()).digest() random_shorts.extend(struct.unpack('32H', hash_bytes)) # interpret # hash bytes as 32 unsigned shorts. for i in range(k): idx = random_shorts[i] % l bf[idx] = 1 return bf
0.001686
def get_mor_by_moid(si, obj_type, obj_moid): ''' Get reference to an object of specified object type and id si ServiceInstance for the vSphere or ESXi server (see get_service_instance) obj_type Type of the object (vim.StoragePod, vim.Datastore, etc) obj_moid ID of the object ''' inventory = get_inventory(si) container = inventory.viewManager.CreateContainerView(inventory.rootFolder, [obj_type], True) for item in container.view: if item._moId == obj_moid: return item return None
0.005272
def set_state(self, state): """ Overriding the SimStatePlugin.set_state() method :param state: A SimState object :return: None """ # Sanity check if REGION_MAPPING not in state.options: # add REGION_MAPPING into state.options l.warning('Option "REGION_MAPPING" must be enabled when using SimAbstractMemory as the memory model. ' 'The option is added to state options as a courtesy.' ) state.options.add(REGION_MAPPING) SimMemory.set_state(self, state) for _,v in self._regions.items(): v.set_state(state) # Delayed initialization of backer argument from __init__ if self._temp_backer is not None: for region, backer_dict in self._temp_backer.items(): self._regions[region] = MemoryRegion(region, self.state, init_memory=True, backer_dict=backer_dict, endness=self.endness ) self._temp_backer = None
0.003239
def add_query(self, query, join_with=AND): """Join a new query to existing queries on the stack. Args: query (tuple or list or DomainCondition): The condition for the query. If a ``DomainCondition`` object is not provided, the input should conform to the interface defined in :func:`~.domain.DomainCondition.from_tuple`. join_with (str): The join string to apply, if other queries are already on the stack. """ if not isinstance(query, DomainCondition): query = DomainCondition.from_tuple(query) if len(self.query): self.query.append(join_with) self.query.append(query)
0.00274
def build_absolute_uri(request, url): """ Allow to override printing url, not necessarily on the same server instance. """ if app_settings.get('CAPTURE_ROOT_URL'): return urljoin(app_settings.get('CAPTURE_ROOT_URL'), url) return request.build_absolute_uri(url)
0.003425
def _gotitem(self, key, ndim, subset=None): """ Sub-classes to define. Return a sliced object. Parameters ---------- key : string / list of selections ndim : 1,2 requested ndim of result subset : object, default None subset to act on """ # create a new object to prevent aliasing if subset is None: subset = self.obj # we need to make a shallow copy of ourselves # with the same groupby kwargs = {attr: getattr(self, attr) for attr in self._attributes} # Try to select from a DataFrame, falling back to a Series try: groupby = self._groupby[key] except IndexError: groupby = self._groupby self = self.__class__(subset, groupby=groupby, parent=self, **kwargs) self._reset_cache() if subset.ndim == 2: if is_scalar(key) and key in subset or is_list_like(key): self._selection = key return self
0.001765
def save(self): """Save the index data back to the wily cache.""" data = [i.asdict() for i in self._revisions.values()] logger.debug("Saving data") cache.store_archiver_index(self.config, self.archiver, data)
0.008333
def _construct_nested_stack(self): """Constructs a AWS::CloudFormation::Stack resource """ nested_stack = NestedStack(self.logical_id, depends_on=self.depends_on, attributes=self.get_passthrough_resource_attributes()) nested_stack.Parameters = self.Parameters nested_stack.NotificationArns = self.NotificationArns application_tags = self._get_application_tags() nested_stack.Tags = self._construct_tag_list(self.Tags, application_tags) nested_stack.TimeoutInMinutes = self.TimeoutInMinutes nested_stack.TemplateURL = self.TemplateUrl if self.TemplateUrl else "" return nested_stack
0.005739
def pad_chunk_columns(chunk): """Given a set of items to be inserted, make sure they all have the same columns by padding columns with None if they are missing.""" columns = set() for record in chunk: columns.update(record.keys()) for record in chunk: for column in columns: record.setdefault(column, None) return chunk
0.002695
def central_likelihood(self, axis): """Returns new histogram with all values replaced by their central likelihoods along axis.""" result = self.cumulative_density(axis) result.histogram = 1 - 2 * np.abs(result.histogram - 0.5) return result
0.011029
def prox_dca(x, f, g, niter, gamma, callback=None): r"""Proximal DCA of Sun, Sampaio and Candido. This algorithm solves a problem of the form :: min_x f(x) - g(x) where ``f`` and ``g`` are two proper, convex and lower semicontinuous functions. Parameters ---------- x : `LinearSpaceElement` Initial point, updated in-place. f : `Functional` Convex functional. Needs to implement ``f.proximal``. g : `Functional` Convex functional. Needs to implement ``g.gradient``. niter : int Number of iterations. gamma : positive float Stepsize in the primal updates. callback : callable, optional Function called with the current iterate after each iteration. Notes ----- The algorithm was proposed as Algorithm 2.3 in `[SSC2003] <http://www.global-sci.org/jcm/readabs.php?vol=21&no=4&page=451&year=2003&ppage=462>`_. It solves the problem .. math :: \min f(x) - g(x) by using subgradients of :math:`g` and proximal points of :math:`f`. The iteration is given by .. math :: y_n \in \partial g(x_n), \qquad x_{n+1} = \mathrm{Prox}_{\gamma f}(x_n + \gamma y_n). In contrast to `dca`, `prox_dca` uses proximal steps with respect to the convex part ``f``. Both algorithms use subgradients of the concave part ``g``. References ---------- [SSC2003] Sun, W, Sampaio R J B, and Candido M A B. *Proximal point algorithm for minimization of DC function*. Journal of Computational Mathematics, 21.4 (2003), pp 451--462. See also -------- dca : Solver with subgradinet steps for all the functionals. doubleprox_dc : Solver with proximal steps for all the nonsmooth convex functionals and a gradient step for a smooth functional. """ space = f.domain if g.domain != space: raise ValueError('`f.domain` and `g.domain` need to be equal, but ' '{} != {}'.format(space, g.domain)) for _ in range(niter): f.proximal(gamma)(x.lincomb(1, x, gamma, g.gradient(x)), out=x) if callback is not None: callback(x)
0.000453
def get_list(self, mutagen_file): """Get a list of all values for the field using this style. """ return [self.deserialize(item) for item in self.fetch(mutagen_file)]
0.010526
def where(self, field, value = None, operator = '='): """ Establece condiciones para la consulta unidas por AND """ if field is None: return self conjunction = None if value is None and isinstance(field, dict): for field, value in field.items(): operator, value = value if isinstance(value, tuple) else ('=', value) self.where(field, value, operator) else: if self.where_criteria.size() > 0: conjunction = 'AND' self.where_criteria.append(expressions.ConditionExpression(field, value, operator=operator, conjunction=conjunction)) return self
0.012766
def parse(url, engine=None, conn_max_age=0, ssl_require=False): """Parses a database URL.""" if url == 'sqlite://:memory:': # this is a special case, because if we pass this URL into # urlparse, urlparse will choke trying to interpret "memory" # as a port number return { 'ENGINE': SCHEMES['sqlite'], 'NAME': ':memory:' } # note: no other settings are required for sqlite # otherwise parse the url as normal config = {} url = urlparse.urlparse(url) # Split query strings from path. path = url.path[1:] if '?' in path and not url.query: path, query = path.split('?', 2) else: path, query = path, url.query query = urlparse.parse_qs(query) # If we are using sqlite and we have no path, then assume we # want an in-memory database (this is the behaviour of sqlalchemy) if url.scheme == 'sqlite' and path == '': path = ':memory:' # Handle postgres percent-encoded paths. hostname = url.hostname or '' if '%2f' in hostname.lower(): # Switch to url.netloc to avoid lower cased paths hostname = url.netloc if "@" in hostname: hostname = hostname.rsplit("@", 1)[1] if ":" in hostname: hostname = hostname.split(":", 1)[0] hostname = hostname.replace('%2f', '/').replace('%2F', '/') # Lookup specified engine. engine = SCHEMES[url.scheme] if engine is None else engine port = (str(url.port) if url.port and engine in [SCHEMES['oracle'], SCHEMES['mssql']] else url.port) # Update with environment configuration. config.update({ 'NAME': urlparse.unquote(path or ''), 'USER': urlparse.unquote(url.username or ''), 'PASSWORD': urlparse.unquote(url.password or ''), 'HOST': hostname, 'PORT': port or '', 'CONN_MAX_AGE': conn_max_age, }) # Pass the query string into OPTIONS. options = {} for key, values in query.items(): if url.scheme == 'mysql' and key == 'ssl-ca': options['ssl'] = {'ca': values[-1]} continue options[key] = values[-1] if ssl_require: options['sslmode'] = 'require' # Support for Postgres Schema URLs if 'currentSchema' in options and engine in ( 'django.contrib.gis.db.backends.postgis', 'django.db.backends.postgresql_psycopg2', 'django.db.backends.postgresql', 'django_redshift_backend', ): options['options'] = '-c search_path={0}'.format(options.pop('currentSchema')) if options: config['OPTIONS'] = options if engine: config['ENGINE'] = engine return config
0.001096
def fillHSV(self, hsv, start=0, end=-1): """Fill the entire strip with HSV color tuple""" self.fill(conversions.hsv2rgb(hsv), start, end)
0.013072
def regex_last_key(regex): """Sort key function factory that puts items that match a regular expression last. >>> from nose.config import Config >>> from nose.pyversion import sort_list >>> c = Config() >>> regex = c.testMatch >>> entries = ['.', '..', 'a_test', 'src', 'lib', 'test', 'foo.py'] >>> sort_list(entries, regex_last_key(regex)) >>> entries ['.', '..', 'foo.py', 'lib', 'src', 'a_test', 'test'] """ def k(obj): if regex.search(obj): return (1, obj) return (0, obj) return k
0.001767
def async_run(self, keyword, *args, **kwargs): ''' Executes the provided Robot Framework keyword in a separate thread and immediately returns a handle to be used with async_get ''' handle = self._last_thread_handle thread = self._threaded(keyword, *args, **kwargs) thread.start() self._thread_pool[handle] = thread self._last_thread_handle += 1 return handle
0.007246
def imean(nums): r"""Return identric (exponential) mean. The identric mean of two numbers x and y is: x if x = y otherwise :math:`\frac{1}{e} \sqrt[x-y]{\frac{x^x}{y^y}}` Cf. https://en.wikipedia.org/wiki/Identric_mean Parameters ---------- nums : list A series of numbers Returns ------- float The identric mean of nums Raises ------ AttributeError imean supports no more than two values Examples -------- >>> imean([1, 2]) 1.4715177646857693 >>> imean([1, 0]) nan >>> imean([2, 4]) 2.9430355293715387 """ if len(nums) == 1: return nums[0] if len(nums) > 2: raise AttributeError('imean supports no more than two values') if nums[0] <= 0 or nums[1] <= 0: return float('NaN') elif nums[0] == nums[1]: return nums[0] return (1 / math.e) * (nums[0] ** nums[0] / nums[1] ** nums[1]) ** ( 1 / (nums[0] - nums[1]) )
0.001004
def pre_dissect(self, s): """ We need to parse the padding and type as soon as possible, else we won't be able to parse the message list... """ if len(s) < 1: raise Exception("Invalid InnerPlaintext (too short).") tmp_len = len(s) - 1 if s[-1] != b"\x00": msg_len = tmp_len else: n = 1 while s[-n] != b"\x00" and n < tmp_len: n += 1 msg_len = tmp_len - n self.fields_desc[0].length_from = lambda pkt: msg_len self.type = struct.unpack("B", s[msg_len:msg_len + 1])[0] return s
0.003115
def _find_and_replace(text, start_string, end_string, replace_fn): """Remove everything found between instances of start_string and end_string. Replace each such instance with replace_fn(removed_text) e.g. _find_and_replace(u"the [[fat]] cat [[sat]]", u"[[", u"]]", lambda x: x) = u"the fat cat sat" Args: text: a unicode string start_string: a unicode string end_string: a unicode string replace_fn: a unary function from unicode string to unicode string Returns: a string """ ret = u"" current_pos = 0 while True: start_pos = text.find(start_string, current_pos) if start_pos == -1: ret += text[current_pos:] break ret += text[current_pos:start_pos] end_pos = text.find(end_string, start_pos + len(start_string)) if end_pos == -1: break ret += replace_fn(text[start_pos + len(start_string):end_pos]) current_pos = end_pos + len(end_string) return ret
0.009514
def dump(values): """ Dump a ValueTree instance, returning its dict representation. :param values: :type values: ValueTree :return: :rtype: dict """ root = {} def _dump(_values, container): for name,value in _values._values.items(): if isinstance(value, ValueTree): container[name] = _dump(value, {}) elif isinstance(value, list): items = [] for item in value: if not isinstance(item, str): raise ValueError() items.append(item) container[name] = items elif isinstance(value, str): container[name] = value else: raise ValueError() return container return _dump(values, root)
0.003576
def _parse_values(values, extra=None): """ Utility function to flatten out args. For internal use only. :param values: list, tuple, or str :param extra: list or None :return: list """ coerced = list(values) if coerced == values: values = coerced else: coerced = tuple(values) if coerced == values: values = list(values) else: values = [values] if extra: values.extend(extra) return values
0.001984
def _obj_display(obj, display=''): """Returns string representation of an object, either the default or based on the display template passed in. """ result = '' if not display: result = str(obj) else: template = Template(display) context = Context({'obj':obj}) result = template.render(context) return result
0.00542
def convert_field(self, name, field): """ Convert a single field from a Peewee model field to a validator field. :param name: Name of the field as defined on this validator. :param name: Peewee field instance. :return: Validator field. """ if PEEWEE3: field_type = field.field_type.lower() else: field_type = field.db_field pwv_field = ModelValidator.FIELD_MAP.get(field_type, StringField) print('pwv_field', field_type, pwv_field) validators = [] required = not bool(getattr(field, 'null', True)) choices = getattr(field, 'choices', ()) default = getattr(field, 'default', None) max_length = getattr(field, 'max_length', None) unique = getattr(field, 'unique', False) if required: validators.append(validate_required()) if choices: print('CHOICES', choices) validators.append(validate_one_of([c[0] for c in choices])) if max_length: validators.append(validate_length(high=max_length)) if unique: validators.append(validate_model_unique(field, self.instance.select(), self.pk_field, self.pk_value)) if isinstance(field, peewee.ForeignKeyField): if PEEWEE3: rel_field = field.rel_field else: rel_field = field.to_field return ModelChoiceField(field.rel_model, rel_field, default=default, validators=validators) if isinstance(field, ManyToManyField): return ManyModelChoiceField( field.rel_model, field.rel_model._meta.primary_key, default=default, validators=validators) return pwv_field(default=default, validators=validators)
0.002201
def handel_default(self) -> None: """ 处理设置到body上的数据默认 headers """ raw_body = self._body body = cast(Optional[bytes], None) default_type = 2 charset = self._charset or self._default_charset if raw_body is None: pass elif isinstance(raw_body, bytes): # body为bytes default_type = 2 body = raw_body elif isinstance(raw_body, str): # body 为字符串 default_type = 2 body = encode_str(raw_body, charset) elif isinstance(raw_body, (list, dict)): # body 为json default_type = 3 body = encode_str(json.dumps(raw_body, ensure_ascii=False), charset) elif isinstance(raw_body, RawIOBase): # body 为文件 default_type = 1 body = raw_body.read() raw_body.close() if "Content-Length" not in self._headers and \ "Transfer-Encoding" not in self._headers \ or self._headers["Transfer-Encoding"] != "chunked": if self.length is None: if body is not None: self.length = len(body) else: self.length = 0 # 设置默认 Content-Length self.set("Content-Length", str(self.length)) # print(body[0], body[1]) if body is not None and body.startswith(encode_str("<", charset)): default_type = 4 if "Content-Type" not in self._headers.keys(): type_str = self.type if type_str is None: temp = DEFAULT_TYPE.get(default_type) if temp is not None: if default_type != 1: temp += "; charset=%s" % charset type_str = temp if type_str is not None: # 设置默认 Content-Type self.set("Content-Type", type_str) self._body = body
0.001511
def validate(args): """ %prog validate input.vcf genome.fasta Fasta validation of vcf file. """ import pyfasta p = OptionParser(validate.__doc__) p.add_option("--prefix", help="Add prefix to seqid") opts, args = p.parse_args(args) vcffile, fastafile = args pf = opts.prefix genome = pyfasta.Fasta(fastafile, record_class=pyfasta.MemoryRecord) fp = must_open(vcffile) match_ref = match_alt = total = 0 for row in fp: if row[0] == '#': continue seqid, pos, id, ref, alt = row.split()[:5] total += 1 if pf: seqid = pf + seqid pos = int(pos) if seqid not in genome: continue true_ref = genome[seqid][pos - 1] if total % 100000 == 0: print(total, "sites parsed", file=sys.stderr) if ref == true_ref: match_ref += 1 elif alt == true_ref: match_alt += 1 logging.debug("Match REF: {}".format(percentage(match_ref, total))) logging.debug("Match ALT: {}".format(percentage(match_alt, total)))
0.000907
def getFailedItems(self): """ Return an iterable of two-tuples of listeners which raised an exception from C{processItem} and the item which was passed as the argument to that method. """ for failed in self.store.query(BatchProcessingError, BatchProcessingError.processor == self): yield (failed.listener, failed.item)
0.007937
def tanh_discrete_unbottleneck(x, hidden_size): """Simple un-discretization from tanh.""" x = tf.layers.dense(x, hidden_size, name="tanh_discrete_unbottleneck") return x
0.022857
def dumps(params, methodname=None, methodresponse=None, encoding=None, allow_none=0, utf8_encoding='standard'): """data [,options] -> marshalled data Convert an argument tuple or a Fault instance to an XML-RPC request (or response, if the methodresponse option is used). In addition to the data object, the following options can be given as keyword arguments: methodname: the method name for a methodCall packet methodresponse: true to create a methodResponse packet. If this option is used with a tuple, the tuple must be a singleton (i.e. it can contain only one element). encoding: the packet encoding (default is UTF-8) All 8-bit strings in the data structure are assumed to use the packet encoding. Unicode strings are automatically converted, where necessary. """ assert isinstance(params, TupleType) or isinstance(params, Fault),\ "argument must be tuple or Fault instance" if isinstance(params, Fault): methodresponse = 1 elif methodresponse and isinstance(params, TupleType): assert len(params) == 1, "response tuple must be a singleton" if not encoding: encoding = "utf-8" if FastMarshaller: m = FastMarshaller(encoding) else: m = Marshaller(encoding, allow_none) data = m.dumps(params) if encoding != "utf-8": xmlheader = "<?xml version='1.0' encoding='%s'?>\n" % str(encoding) else: xmlheader = "<?xml version='1.0'?>\n" # utf-8 is default # standard XML-RPC wrappings if methodname: # a method call if not isinstance(methodname, StringType): methodname = methodname.encode(encoding) data = ( xmlheader, "<methodCall>\n" "<methodName>", methodname, "</methodName>\n", data, "</methodCall>\n" ) elif methodresponse: # a method response, or a fault structure data = ( xmlheader, "<methodResponse>\n", data, "</methodResponse>\n" ) else: return data # return as is return string.join(data, "")
0.001353
def parse(self, text, element, context='eqn'): """ context : <string> 'eqn', 'defn' If context is set to equation, lone identifiers will be parsed as calls to elements If context is set to definition, lone identifiers will be cleaned and returned. """ # Remove the inline comments from `text` before parsing the grammar # http://docs.oasis-open.org/xmile/xmile/v1.0/csprd01/xmile-v1.0-csprd01.html#_Toc398039973 text = re.sub(r"\{[^}]*\}", "", text) self.ast = self.grammar.parse(text) self.context = context self.element = element self.new_structure = [] py_expr = self.visit(self.ast) return ({ 'py_expr': py_expr }, self.new_structure)
0.009697
def calculate_bidirectional_lstm_output_shapes(operator): ''' See bidirectional LSTM's conversion function for its output shapes. ''' check_input_and_output_numbers(operator, input_count_range=[1, 5], output_count_range=[1, 5]) check_input_and_output_types(operator, good_input_types=[FloatTensorType]) input_shape = operator.inputs[0].type.shape # LSTM accepts [N, C] and [N, C, 1, 1] inputs if len(input_shape) not in [2, 4]: raise RuntimeError('Input must be a 2-D or 4-D tensor') params = operator.raw_operator.biDirectionalLSTM # The following line is more accurate but it may break some tests # output_shape = ['None', params.outputVectorSize] if params.params.sequenceOutput else [1, 2 *params.outputVectorSize] output_shape = ['None', 2 * params.outputVectorSize] state_shape = [1, params.outputVectorSize] # TODO: Changing input shapes of an operator is dangerous, this should be move to Topology's _fix_shapes function if len(operator.inputs) > 1: Y_h_in = operator.inputs[1] # The forward initial hidden state of a single sequence Y_h_in.type.shape = state_shape Y_h_rev_in = operator.inputs[3] # The backward initial hidden state of a single sequence Y_h_rev_in.type.shape = state_shape if len(operator.inputs) > 2: Y_c_in = operator.inputs[2] # The forward initial cell state of a single sequence Y_c_in.type.shape = state_shape Y_c_rev_in = operator.inputs[4] # The backward initial cell state of a single sequence Y_c_rev_in.type.shape = state_shape operator.outputs[0].type.shape = output_shape if len(operator.outputs) > 1: operator.outputs[1].type.shape = state_shape operator.outputs[3].type.shape = state_shape if len(operator.outputs) > 2: operator.outputs[2].type.shape = state_shape operator.outputs[4].type.shape = state_shape
0.004126
def dynamic_template_data(self, value): """Data for a transactional template :param value: Data for a transactional template :type value: DynamicTemplateData, a JSON-serializeable structure """ if not isinstance(value, DynamicTemplateData): value = DynamicTemplateData(value) try: personalization = self._personalizations[value.personalization] has_internal_personalization = True except IndexError: personalization = Personalization() has_internal_personalization = False personalization.dynamic_template_data = value.dynamic_template_data if not has_internal_personalization: self.add_personalization( personalization, index=value.personalization)
0.002466
def blend(self, proportion=0.2, stratify=False, seed=100, indices=None): """Blend a single model. You should rarely be using this method. Use `ModelsPipeline.blend` instead. Parameters ---------- proportion : float, default 0.2 Test size holdout. stratify : bool, default False seed : int, default 100 indices : list(np.ndarray,np.ndarray), default None Two numpy arrays that contain indices for train/test slicing. (train_index,test_index) Returns ------- `Dataset` """ if self.use_cache: pdict = {'proportion': proportion, 'stratify': stratify, 'seed': seed, 'indices': indices} if indices is not None: pdict['train_index'] = np_hash(indices[0]) pdict['test_index'] = np_hash(indices[1]) dhash = self._dhash(pdict) c = Cache(dhash, prefix='b') if c.available: logger.info('Loading %s\'s blend results from cache.' % self._name) train = c.retrieve('train') test = c.retrieve('test') y_train = c.retrieve('y_train') return Dataset(X_train=train, y_train=y_train, X_test=test) elif not self.dataset.loaded: self.dataset.load() X_train, y_train, X_test, y_test = self.dataset.split(test_size=proportion, stratify=stratify, seed=seed, indices=indices) xt_shape = X_test.shape[0] x_t = concat(X_test, self.dataset.X_test) prediction_concat = reshape_1d(self._predict(X_train, y_train, x_t)) new_train, new_test = tsplit(prediction_concat, xt_shape) if self.use_cache: c.store('train', new_train) c.store('test', new_test) c.store('y_train', y_test) return Dataset(new_train, y_test, new_test)
0.004047
def _apply_rate(self, max_rate, aggressive=False): """ Try to adjust the rate (characters/second) of the fragments of the list, so that it does not exceed the given ``max_rate``. This is done by testing whether some slack can be borrowed from the fragment before the faster current one. If ``aggressive`` is ``True``, the slack might be retrieved from the fragment after the faster current one, if the previous fragment could not contribute enough slack. """ self.log(u"Called _apply_rate") self.log([u" Aggressive: %s", aggressive]) self.log([u" Max rate: %.3f", max_rate]) regular_fragments = list(self.smflist.regular_fragments) if len(regular_fragments) <= 1: self.log(u" The list contains at most one regular fragment, returning") return faster_fragments = [(i, f) for i, f in regular_fragments if (f.rate is not None) and (f.rate >= max_rate + Decimal("0.001"))] if len(faster_fragments) == 0: self.log(u" No regular fragment faster than max rate, returning") return self.log_warn(u" Some fragments have rate faster than max rate:") self.log([u" %s", [i for i, f in faster_fragments]]) self.log(u"Fixing rate for faster fragments...") for frag_index, fragment in faster_fragments: self.smflist.fix_fragment_rate(frag_index, max_rate, aggressive=aggressive) self.log(u"Fixing rate for faster fragments... done") faster_fragments = [(i, f) for i, f in regular_fragments if (f.rate is not None) and (f.rate >= max_rate + Decimal("0.001"))] if len(faster_fragments) > 0: self.log_warn(u" Some fragments still have rate faster than max rate:") self.log([u" %s", [i for i, f in faster_fragments]])
0.003682
def log_x_cb(self, w, val): """Toggle linear/log scale for X-axis.""" self.tab_plot.logx = val self.plot_two_columns()
0.014085
def delkey(ctx, pubkeys): """ Delete a private key from the wallet """ if not pubkeys: pubkeys = click.prompt("Public Keys").split(" ") if click.confirm( "Are you sure you want to delete keys from your wallet?\n" "This step is IRREVERSIBLE! If you don't have a backup, " "You may lose access to your account!" ): for pub in pubkeys: ctx.bitshares.wallet.removePrivateKeyFromPublicKey(pub)
0.002174
def getBucketValues(self): """ See the function description in base.py """ if self._bucketValues is None: numBuckets = len(self.encoder.getBucketValues()) self._bucketValues = [] for bucketIndex in range(numBuckets): self._bucketValues.append(self.getBucketInfo([bucketIndex])[0].value) return self._bucketValues
0.011364
def _cookie_attrs(self, cookies): """Return a list of cookie-attributes to be returned to server. like ['foo="bar"; $Path="/"', ...] The $Version attribute is also added when appropriate (currently only once per request). """ # add cookies in order of most specific (ie. longest) path first cookies.sort(key=lambda a: len(a.path), reverse=True) version_set = False attrs = [] for cookie in cookies: # set version of Cookie header # XXX # What should it be if multiple matching Set-Cookie headers have # different versions themselves? # Answer: there is no answer; was supposed to be settled by # RFC 2965 errata, but that may never appear... version = cookie.version if not version_set: version_set = True if version > 0: attrs.append("$Version=%s" % version) # quote cookie value if necessary # (not for Netscape protocol, which already has any quotes # intact, due to the poorly-specified Netscape Cookie: syntax) if ((cookie.value is not None) and self.non_word_re.search(cookie.value) and version > 0): value = self.quote_re.sub(r"\\\1", cookie.value) else: value = cookie.value # add cookie-attributes to be returned in Cookie header if cookie.value is None: attrs.append(cookie.name) else: attrs.append("%s=%s" % (cookie.name, value)) if version > 0: if cookie.path_specified: attrs.append('$Path="%s"' % cookie.path) if cookie.domain.startswith("."): domain = cookie.domain if (not cookie.domain_initial_dot and domain.startswith(".")): domain = domain[1:] attrs.append('$Domain="%s"' % domain) if cookie.port is not None: p = "$Port" if cookie.port_specified: p = p + ('="%s"' % cookie.port) attrs.append(p) return attrs
0.001722
def _parse(self, string): """Parse a string and return its features. :param string: A one-symbol string in NFD Notes ----- Strategy is rather simple: we determine the base part of a string and then search left and right of this part for the additional features as expressed by the diacritics. Fails if a segment has more than one basic part. """ nstring = self._norm(string) # check whether sound is in self.sounds if nstring in self.sounds: sound = self.sounds[nstring] sound.normalized = nstring != string sound.source = string return sound match = list(self._regex.finditer(nstring)) # if the match has length 2, we assume that we have two sounds, so we split # the sound and pass it on for separate evaluation (recursive function) if len(match) == 2: sound1 = self._parse(nstring[:match[1].start()]) sound2 = self._parse(nstring[match[1].start():]) # if we have ANY unknown sound, we mark the whole sound as unknown, if # we have two known sounds of the same type (vowel or consonant), we # either construct a diphthong or a cluster if 'unknownsound' not in (sound1.type, sound2.type) and \ sound1.type == sound2.type: # diphthong creation if sound1.type == 'vowel': return Diphthong.from_sounds( # noqa: F405 string, sound1, sound2, self) elif sound1.type == 'consonant' and \ sound1.manner in ('stop', 'implosive', 'click', 'nasal') and \ sound2.manner in ('stop', 'implosive', 'affricate', 'fricative'): return Cluster.from_sounds( # noqa: F405 string, sound1, sound2, self) return UnknownSound(grapheme=nstring, source=string, ts=self) # noqa: F405 if len(match) != 1: # Either no match or more than one; both is considered an error. return UnknownSound(grapheme=nstring, source=string, ts=self) # noqa: F405 pre, mid, post = nstring.partition(nstring[match[0].start():match[0].end()]) base_sound = self.sounds[mid] if isinstance(base_sound, Marker): # noqa: F405 assert pre or post return UnknownSound(grapheme=nstring, source=string, ts=self) # noqa: F405 # A base sound with diacritics or a custom symbol. features = attr.asdict(base_sound) features.update( source=string, generated=True, normalized=nstring != string, base=base_sound.grapheme) # we construct two versions: the "normal" version and the version where # we search for aliases and normalize them (as our features system for # diacritics may well define aliases grapheme, sound = '', '' for dia in [p + EMPTY for p in pre]: feature = self.diacritics[base_sound.type].get(dia, {}) if not feature: return UnknownSound( # noqa: F405 grapheme=nstring, source=string, ts=self) features[self._feature_values[feature]] = feature # we add the unaliased version to the grapheme grapheme += dia[0] # we add the corrected version (if this is needed) to the sound sound += self.features[base_sound.type][feature][0] # add the base sound grapheme += base_sound.grapheme sound += base_sound.s for dia in [EMPTY + p for p in post]: feature = self.diacritics[base_sound.type].get(dia, {}) # we are strict: if we don't know the feature, it's an unknown # sound if not feature: return UnknownSound( # noqa: F405 grapheme=nstring, source=string, ts=self) features[self._feature_values[feature]] = feature grapheme += dia[1] sound += self.features[base_sound.type][feature][1] features['grapheme'] = sound new_sound = self.sound_classes[base_sound.type](**features) # check whether grapheme differs from re-generated sound if text_type(new_sound) != sound: new_sound.alias = True if grapheme != sound: new_sound.alias = True new_sound.grapheme = grapheme return new_sound
0.001757
def print_config(self, _): ''' Print configuration. ''' for section in self.config.sections(): print '[%s]' % section items = dict(self.config.items(section)) for k in items: print "%(a)s=%(b)s" % {'a': k, 'b': items[k]} print ''
0.006472
def morphological_chan_vese(image, iterations, init_level_set='checkerboard', smoothing=1, lambda1=1, lambda2=1, iter_callback=lambda x: None): """Morphological Active Contours without Edges (MorphACWE) Active contours without edges implemented with morphological operators. It can be used to segment objects in images and volumes without well defined borders. It is required that the inside of the object looks different on average than the outside (i.e., the inner area of the object should be darker or lighter than the outer area on average). Parameters ---------- image : (M, N) or (L, M, N) array Grayscale image or volume to be segmented. iterations : uint Number of iterations to run init_level_set : str, (M, N) array, or (L, M, N) array Initial level set. If an array is given, it will be binarized and used as the initial level set. If a string is given, it defines the method to generate a reasonable initial level set with the shape of the `image`. Accepted values are 'checkerboard' and 'circle'. See the documentation of `checkerboard_level_set` and `circle_level_set` respectively for details about how these level sets are created. smoothing : uint, optional Number of times the smoothing operator is applied per iteration. Reasonable values are around 1-4. Larger values lead to smoother segmentations. lambda1 : float, optional Weight parameter for the outer region. If `lambda1` is larger than `lambda2`, the outer region will contain a larger range of values than the inner region. lambda2 : float, optional Weight parameter for the inner region. If `lambda2` is larger than `lambda1`, the inner region will contain a larger range of values than the outer region. iter_callback : function, optional If given, this function is called once per iteration with the current level set as the only argument. This is useful for debugging or for plotting intermediate results during the evolution. Returns ------- out : (M, N) or (L, M, N) array Final segmentation (i.e., the final level set) See also -------- circle_level_set, checkerboard_level_set Notes ----- This is a version of the Chan-Vese algorithm that uses morphological operators instead of solving a partial differential equation (PDE) for the evolution of the contour. The set of morphological operators used in this algorithm are proved to be infinitesimally equivalent to the Chan-Vese PDE (see [1]_). However, morphological operators are do not suffer from the numerical stability issues typically found in PDEs (it is not necessary to find the right time step for the evolution), and are computationally faster. The algorithm and its theoretical derivation are described in [1]_. References ---------- .. [1] A Morphological Approach to Curvature-based Evolution of Curves and Surfaces, Pablo Márquez-Neila, Luis Baumela, Luis Álvarez. In IEEE Transactions on Pattern Analysis and Machine Intelligence (PAMI), 2014, DOI 10.1109/TPAMI.2013.106 """ init_level_set = _init_level_set(init_level_set, image.shape) _check_input(image, init_level_set) u = np.int8(init_level_set > 0) iter_callback(u) for _ in range(iterations): # inside = u > 0 # outside = u <= 0 c0 = (image * (1 - u)).sum() / float((1 - u).sum() + 1e-8) c1 = (image * u).sum() / float(u.sum() + 1e-8) # Image attachment du = np.gradient(u) abs_du = np.abs(du).sum(0) aux = abs_du * (lambda1 * (image - c1)**2 - lambda2 * (image - c0)**2) u[aux < 0] = 1 u[aux > 0] = 0 # Smoothing for _ in range(smoothing): u = _curvop(u) iter_callback(u) return u
0.000247
def reset_env(exclude=[]): """Remove environment variables, used in Jupyter notebooks""" if os.getenv(env.INITED): wandb_keys = [key for key in os.environ.keys() if key.startswith( 'WANDB_') and key not in exclude] for key in wandb_keys: del os.environ[key] return True else: return False
0.002809
def onca(word, max_length=4, zero_pad=True): """Return the Oxford Name Compression Algorithm (ONCA) code for a word. This is a wrapper for :py:meth:`ONCA.encode`. Parameters ---------- word : str The word to transform max_length : int The maximum length (default 5) of the code to return zero_pad : bool Pad the end of the return value with 0s to achieve a max_length string Returns ------- str The ONCA code Examples -------- >>> onca('Christopher') 'C623' >>> onca('Niall') 'N400' >>> onca('Smith') 'S530' >>> onca('Schmidt') 'S530' """ return ONCA().encode(word, max_length, zero_pad)
0.001404
def load(path=None, **kwargs): ''' Loads the configuration from the file provided onto the device. path (required) Path where the configuration/template file is present. If the file has a ``.conf`` extension, the content is treated as text format. If the file has a ``.xml`` extension, the content is treated as XML format. If the file has a ``.set`` extension, the content is treated as Junos OS ``set`` commands. overwrite : False Set to ``True`` if you want this file is to completely replace the configuration file. replace : False Specify whether the configuration file uses ``replace:`` statements. If ``True``, only those statements under the ``replace`` tag will be changed. format Determines the format of the contents update : False Compare a complete loaded configuration against the candidate configuration. For each hierarchy level or configuration object that is different in the two configurations, the version in the loaded configuration replaces the version in the candidate configuration. When the configuration is later committed, only system processes that are affected by the changed configuration elements parse the new configuration. This action is supported from PyEZ 2.1. template_vars Variables to be passed into the template processing engine in addition to those present in pillar, the minion configuration, grains, etc. You may reference these variables in your template like so: .. code-block:: jinja {{ template_vars["var_name"] }} CLI Examples: .. code-block:: bash salt 'device_name' junos.load 'salt://production/network/routers/config.set' salt 'device_name' junos.load 'salt://templates/replace_config.conf' replace=True salt 'device_name' junos.load 'salt://my_new_configuration.conf' overwrite=True salt 'device_name' junos.load 'salt://syslog_template.conf' template_vars='{"syslog_host": "10.180.222.7"}' ''' conn = __proxy__['junos.conn']() ret = {} ret['out'] = True if path is None: ret['message'] = \ 'Please provide the salt path where the configuration is present' ret['out'] = False return ret op = {} if '__pub_arg' in kwargs: if kwargs['__pub_arg']: if isinstance(kwargs['__pub_arg'][-1], dict): op.update(kwargs['__pub_arg'][-1]) else: op.update(kwargs) template_vars = {} if "template_vars" in op: template_vars = op["template_vars"] template_cached_path = salt.utils.files.mkstemp() __salt__['cp.get_template']( path, template_cached_path, template_vars=template_vars) if not os.path.isfile(template_cached_path): ret['message'] = 'Invalid file path.' ret['out'] = False return ret if os.path.getsize(template_cached_path) == 0: ret['message'] = 'Template failed to render' ret['out'] = False return ret op['path'] = template_cached_path if 'format' not in op: if path.endswith('set'): template_format = 'set' elif path.endswith('xml'): template_format = 'xml' else: template_format = 'text' op['format'] = template_format if 'replace' in op and op['replace']: op['merge'] = False del op['replace'] elif 'overwrite' in op and op['overwrite']: op['overwrite'] = True elif 'overwrite' in op and not op['overwrite']: op['merge'] = True del op['overwrite'] try: conn.cu.load(**op) ret['message'] = "Successfully loaded the configuration." except Exception as exception: ret['message'] = 'Could not load configuration due to : "{0}"'.format( exception) ret['format'] = op['format'] ret['out'] = False return ret finally: salt.utils.files.safe_rm(template_cached_path) return ret
0.001211
def find_by_task(self, task, params={}, **options): """Returns the compact records for all attachments on the task. Parameters ---------- task : {Id} Globally unique identifier for the task. [params] : {Object} Parameters for the request """ path = "/tasks/%s/attachments" % (task) return self.client.get_collection(path, params, **options)
0.007389
def send(self): """ Send a verification email to the user. """ context = { "verification_url": app_settings.EMAIL_VERIFICATION_URL.format( key=self.key ) } email_utils.send_email( context=context, from_email=settings.DEFAULT_FROM_EMAIL, recipient_list=[self.email.email], subject=_("Please Verify Your Email Address"), template_name="rest_email_auth/emails/verify-email", ) logger.info( "Sent confirmation email to %s for user #%d", self.email.email, self.email.user.id, )
0.002933
def _can_retry(self, batch, error): """ We can retry a send if the error is transient and the number of attempts taken is fewer than the maximum allowed """ return (batch.attempts < self.config['retries'] and getattr(error, 'retriable', False))
0.006667
def fetch(elastic, backend, limit=None, search_after_value=None, scroll=True): """ Fetch the items from raw or enriched index """ logging.debug("Creating a elastic items generator.") elastic_scroll_id = None search_after = search_after_value while True: if scroll: rjson = get_elastic_items(elastic, elastic_scroll_id, limit) else: rjson = get_elastic_items_search(elastic, search_after, limit) if rjson and "_scroll_id" in rjson: elastic_scroll_id = rjson["_scroll_id"] if rjson and "hits" in rjson: if not rjson["hits"]["hits"]: break for hit in rjson["hits"]["hits"]: item = hit['_source'] if 'sort' in hit: search_after = hit['sort'] try: backend._fix_item(item) except Exception: pass yield item else: logging.error("No results found from %s", elastic.index_url) break return
0.000917
def make_estimator_input_fn(self, mode, hparams, data_dir=None, force_repeat=False, prevent_repeat=False, dataset_kwargs=None): """Return input_fn wrapped for Estimator.""" def estimator_input_fn(params, config): return self.input_fn( mode, hparams, data_dir=data_dir, params=params, config=config, force_repeat=force_repeat, prevent_repeat=prevent_repeat, dataset_kwargs=dataset_kwargs) return estimator_input_fn
0.011544
def minute_change(device): '''When we reach a minute change, animate it.''' hours = datetime.now().strftime('%H') minutes = datetime.now().strftime('%M') def helper(current_y): with canvas(device) as draw: text(draw, (0, 1), hours, fill="white", font=proportional(CP437_FONT)) text(draw, (15, 1), ":", fill="white", font=proportional(TINY_FONT)) text(draw, (17, current_y), minutes, fill="white", font=proportional(CP437_FONT)) time.sleep(0.1) for current_y in range(1, 9): helper(current_y) minutes = datetime.now().strftime('%M') for current_y in range(9, 1, -1): helper(current_y)
0.005882
def sync2(queryset, model_objs, unique_fields, update_fields=None, returning=False, ignore_duplicate_updates=True): """ Performs a sync operation on a queryset, making the contents of the queryset match the contents of model_objs. Note: The definition of a sync requires that we return untouched rows from the upsert opertion. There is no way to turn off returning untouched rows in a sync. Args: queryset (Model|QuerySet): A model or a queryset that defines the collection to sync model_objs (List[Model]): A list of Django models to sync. All models in this list will be bulk upserted and any models not in the table (or queryset) will be deleted if sync=True. unique_fields (List[str]): A list of fields that define the uniqueness of the model. The model must have a unique constraint on these fields update_fields (List[str], default=None): A list of fields to update whenever objects already exist. If an empty list is provided, it is equivalent to doing a bulk insert on the objects that don't exist. If `None`, all fields will be updated. returning (bool|List[str]): If True, returns all fields. If a list, only returns fields in the list. Return values are split in a tuple of created, updated, and deleted models. ignore_duplicate_updates (bool, default=False): Ignore updating a row in the upsert if all of the update fields are duplicates Returns: UpsertResult: A list of results if ``returning`` is not ``False``. created, updated, untouched, and deleted results can be obtained by accessing the ``created``, ``updated``, ``untouched``, and ``deleted`` properties of the result. """ results = upsert2.upsert(queryset, model_objs, unique_fields, update_fields=update_fields, returning=returning, sync=True, ignore_duplicate_updates=ignore_duplicate_updates) post_bulk_operation.send(sender=queryset.model, model=queryset.model) return results
0.007498
def tostring(self, cnf): """Convert Cnf object ot Dimacs cnf string cnf: Cnf object In the converted Cnf there will be only numbers for variable names. The conversion guarantees that the variables will be numbered alphabetically. """ self.varname_dict = {} self.varobj_dict = {} varis = set() for d in cnf.dis: for v in d: varis.add(v.name) ret = "p cnf %d %d" % (len(varis), len(cnf.dis)) varis = dict(list(zip(sorted(list(varis)),list(map(str,list(range(1,len(varis)+1))))))) for v in varis: vo = Variable(v) self.varname_dict[vo] = varis[v] self.varobj_dict[varis[v]] = vo for d in cnf.dis: ret += "\n" vnamelist = [] for v in d: vnamelist.append(("-" if v.inverted else "") + varis[v.name]) ret += " ".join(vnamelist) + " 0" return ret
0.007913
def data(self): """ Read all of the documents from disk into an in-memory list. """ def read(path): with open(path, 'r', encoding='UTF-8') as f: return f.read() return [ read(f) for f in self.files ]
0.006944
def try_float(s, default=None, minimum=None): """ Try parsing a string into a float. If None is passed, default is returned. On failure, InvalidFloat is raised. """ if not s: return default try: val = float(s) except (TypeError, ValueError): raise InvalidNumber(s, label='Invalid float value') if (minimum is not None) and (val < minimum): val = minimum return val
0.002273
def switch_charset(characters, target=''): ''' Transforms an iterable of kana characters to its opposite script. For example, it can turn [u'あ', u'い'] into [u'ア', u'イ'], or {u'ホ': u'ボ} into {u'ほ': u'ぼ'}. There are no safety checks--keep in mind that the correct source and target values must be set, otherwise the resulting characters will be garbled. ''' if isinstance(characters, dict): return _switch_charset_dict(characters, target) else: return _switch_charset_list(characters, target)
0.001842
def _non_framed_body_length(header, plaintext_length): """Calculates the length of a non-framed message body, given a complete header. :param header: Complete message header object :type header: aws_encryption_sdk.structures.MessageHeader :param int plaintext_length: Length of plaintext in bytes :rtype: int """ body_length = header.algorithm.iv_len # IV body_length += 8 # Encrypted Content Length body_length += plaintext_length # Encrypted Content body_length += header.algorithm.auth_len # Authentication Tag return body_length
0.003442
def _pseudodepths_wenner(configs, spacing=1, grid=None): """Given distances between electrodes, compute Wenner pseudo depths for the provided configuration The pseudodepth is computed after Roy & Apparao, 1971, as 0.11 times the distance between the two outermost electrodes. It's not really clear why the Wenner depths are different from the Dipole-Dipole depths, given the fact that Wenner configurations are a complete subset of the Dipole-Dipole configurations. """ if grid is None: xpositions = (configs - 1) * spacing else: xpositions = grid.get_electrode_positions()[configs - 1, 0] z = np.abs(np.max(xpositions, axis=1) - np.min(xpositions, axis=1)) * -0.11 x = np.mean(xpositions, axis=1) return x, z
0.001282
def map(self, mapper: Callable[[Any], Any]) -> 'Observable': r"""Map a function over an observable. Haskell: fmap f m = Cont $ \c -> runCont m (c . f) """ source = self return Observable(lambda on_next: source.subscribe(compose(on_next, mapper)))
0.010453
def searchZone(self, zone, q=None, has_geo=False, callback=None, errback=None): """ Search a zone for a given search query (e.g., for geological data, etc) :param zone: NOT a string like loadZone - an already loaded ns1.zones.Zone, like one returned from loadZone :return: """ import ns1.zones return zone.search(q, has_geo, callback=callback, errback=errback)
0.007194
def p_ExtendedAttributeIdent(p): """ExtendedAttributeIdent : IDENTIFIER "=" IDENTIFIER""" p[0] = model.ExtendedAttribute( name=p[1], value=model.ExtendedAttributeValue(name=p[3]))
0.015707
def writeText (self, filename=None): """Writes a text representation of this sequence to the given filename (defaults to self.txtpath). """ if filename is None: filename = self.txtpath with open(filename, 'wt') as output: self.printText(output)
0.01444
def populate(self, priority, address, rtr, data): """ :return: None """ assert isinstance(data, bytes) self.needs_high_priority(priority) self.needs_no_rtr(rtr) self.needs_data(data, 3) self.set_attributes(priority, address, rtr) self.closed = self.byte_to_channels(data[0]) self.opened = self.byte_to_channels(data[1]) self.closed_long = self.byte_to_channels(data[2])
0.004376
def extract_date_time(self, the_time_stamp): """Extract the parts of a date given a timestamp as per below example. :param the_time_stamp: The 'event_timestamp' attribute from grid.xml. :type the_time_stamp: str # now separate out its parts # >>> e = "2012-08-07T01:55:12WIB" #>>> e[0:10] #'2012-08-07' #>>> e[12:-3] #'01:55:11' #>>> e[-3:] #'WIB' (WIB = Western Indonesian Time) """ date_tokens = the_time_stamp[0:10].split('-') self.year = int(date_tokens[0]) self.month = int(date_tokens[1]) self.day = int(date_tokens[2]) time_tokens = the_time_stamp[11:19].split(':') self.hour = int(time_tokens[0]) self.minute = int(time_tokens[1]) self.second = int(time_tokens[2]) # right now only handles Indonesian Timezones tz_dict = { 'WIB': 'Asia/Jakarta', 'WITA': 'Asia/Makassar', 'WIT': 'Asia/Jayapura' } if self.time_zone in tz_dict: self.time_zone = tz_dict.get(self.time_zone, self.time_zone) # noinspection PyBroadException try: if not self.time_zone: # default to utc if empty tzinfo = pytz.utc else: tzinfo = timezone(self.time_zone) except BaseException: tzinfo = pytz.utc self.time = datetime( self.year, self.month, self.day, self.hour, self.minute, self.second) # For now realtime always uses Western Indonesia Time self.time = tzinfo.localize(self.time)
0.001166
def parse_meta(file_content, cable): """\ Extracts the reference id, date/time of creation, the classification, and the origin of the cable and assigns the value to the provided `cable`. """ end_idx = file_content.rindex("</table>") start_idx = file_content.rindex("<table class='cable'>", 0, end_idx) m = _META_PATTERN.search(file_content, start_idx, end_idx) if not m: raise ValueError('Cable table not found') if len(m.groups()) != 4: raise ValueError('Unexpected metadata result: "%r"' % m.groups()) # Table content: # Reference ID | Created | Classification | Origin ref, created, classification, origin = m.groups() if cable.reference_id != ref: reference_id = MALFORMED_CABLE_IDS.get(ref) if reference_id != cable.reference_id: reference_id = INVALID_CABLE_IDS.get(ref) if reference_id != cable.reference_id: raise ValueError('cable.reference_id != ref. reference_id="%s", ref="%s"' % (cable.reference_id, ref)) cable.created = created cable.origin = origin # classifications are usually written in upper case, but you never know.. cable.classification = classification.upper() # Try to find media IRIs start_idx = file_content.rfind(u'Appears in these', start_idx, end_idx) if start_idx > 0: cable.media_uris = _MEDIA_URLS_PATTERN.findall(file_content, start_idx, end_idx) return cable
0.003427
def reorder_matrix(m1, cost='line', verbose=False, H=1e4, Texp=10, T0=1e-3, Hbrk=10): ''' This function rearranges the nodes in matrix M1 such that the matrix elements are squeezed along the main diagonal. The function uses a version of simulated annealing. Parameters ---------- M1 : NxN np.ndarray connection matrix weighted/binary directed/undirected cost : str 'line' or 'circ' for shape of lattice (linear or ring lattice). Default is linear lattice. verbose : bool print out cost at each iteration. Default False. H : int annealing parameter, default value 1e6 Texp : int annealing parameter, default value 1. Coefficient of H s.t. Texp0=1-Texp/H T0 : float annealing parameter, default value 1e-3 Hbrk : int annealing parameter, default value = 10. Coefficient of H s.t. Hbrk0 = H/Hkbr Returns ------- Mreordered : NxN np.ndarray reordered connection matrix Mindices : Nx1 np.ndarray reordered indices Mcost : float objective function cost of reordered matrix Notes ----- Note that in general, the outcome will depend on the initial condition (the setting of the random number seed). Also, there is no good way to determine optimal annealing parameters in advance - these paramters will need to be adjusted "by hand" (particularly H, Texp, and T0). For large and/or dense matrices, it is highly recommended to perform exploratory runs varying the settings of 'H' and 'Texp' and then select the best values. Based on extensive testing, it appears that T0 and Hbrk can remain unchanged in most cases. Texp may be varied from 1-1/H to 1-10/H, for example. H is the most important parameter - set to larger values as the problem size increases. It is advisable to run this function multiple times and select the solution(s) with the lowest 'cost'. Setting 'Texp' to zero cancels annealing and uses a greedy algorithm instead. ''' from scipy import linalg, stats n = len(m1) if n < 2: raise BCTParamError("align_matrix will infinite loop on a singleton " "or null matrix.") # generate cost function if cost == 'line': profile = stats.norm.pdf(range(1, n + 1), loc=0, scale=n / 2)[::-1] elif cost == 'circ': profile = stats.norm.pdf( range(1, n + 1), loc=n / 2, scale=n / 4)[::-1] else: raise BCTParamError('cost must be line or circ') costf = linalg.toeplitz(profile, r=profile) * np.logical_not(np.eye(n)) costf /= np.sum(costf) # establish maxcost, lowcost, mincost maxcost = np.sum(np.sort(costf.flat) * np.sort(m1.flat)) lowcost = np.sum(m1 * costf) / maxcost mincost = lowcost # initialize anew = np.arange(n) amin = np.arange(n) h = 0 hcnt = 0 # adjust annealing parameters # H determines the maximal number of steps (user specified) # Texp determines the steepness of the temperature gradient Texp = 1 - Texp / H # T0 sets the initial temperature and scales the energy term (user provided) # Hbrk sets a break point for the stimulation Hbrk = H / Hbrk while h < H: h += 1 hcnt += 1 # terminate if no new mincost has been found for some time if hcnt > Hbrk: break T = T0 * Texp**h atmp = anew.copy() r1, r2 = rng.randint(n, size=(2,)) while r1 == r2: r2 = rng.randint(n) atmp[r1] = anew[r2] atmp[r2] = anew[r1] costnew = np.sum((m1[np.ix_(atmp, atmp)]) * costf) / maxcost # annealing if costnew < lowcost or rng.random_sample() < np.exp(-(costnew - lowcost) / T): anew = atmp lowcost = costnew # is this a new absolute best? if lowcost < mincost: amin = anew mincost = lowcost if verbose: print('step %i ... current lowest cost = %f' % (h, mincost)) hcnt = 0 if verbose: print('step %i ... final lowest cost = %f' % (h, mincost)) M_reordered = m1[np.ix_(amin, amin)] M_indices = amin cost = mincost return M_reordered, M_indices, cost
0.001145
def write_word_data(self, addr, cmd, val): """Write a word (2 bytes) of data to the specified cmd register of the device. Note that this will write the data in the endianness of the processor running Python (typically little endian)! """ assert self._device is not None, 'Bus must be opened before operations are made against it!' # Construct a string of data to send with the command register and word value. data = struct.pack('=BH', cmd & 0xFF, val & 0xFFFF) # Send the data to the device. self._select_device(addr) self._device.write(data)
0.00641
def next_frame_basic_stochastic(): """Basic 2-frame conv model with stochastic tower.""" hparams = basic_deterministic_params.next_frame_basic_deterministic() hparams.stochastic_model = True hparams.add_hparam("latent_channels", 1) hparams.add_hparam("latent_std_min", -5.0) hparams.add_hparam("num_iterations_1st_stage", 15000) hparams.add_hparam("num_iterations_2nd_stage", 15000) hparams.add_hparam("latent_loss_multiplier", 1e-3) hparams.add_hparam("latent_loss_multiplier_dynamic", False) hparams.add_hparam("latent_loss_multiplier_alpha", 1e-5) hparams.add_hparam("latent_loss_multiplier_epsilon", 1.0) hparams.add_hparam("latent_loss_multiplier_schedule", "constant") hparams.add_hparam("latent_num_frames", 0) # 0 means use all frames. hparams.add_hparam("anneal_end", 50000) hparams.add_hparam("information_capacity", 0.0) return hparams
0.01934
def set_time(self, time): """ 时间状态 """ rest_time = int(self.song_total_time) - self.time - 1 minute = int(rest_time) / 60 sec = int(rest_time) % 60 return str(minute).zfill(2) + ':' + str(sec).zfill(2)
0.007752
def render(self, is_unicode=False, **kwargs): """Render the graph, and return the svg string""" self.setup(**kwargs) svg = self.svg.render( is_unicode=is_unicode, pretty_print=self.pretty_print ) self.teardown() return svg
0.007092
def lookup_search_result(self, result, **kw): """Perform :meth:`lookup` on return value of :meth:`search`.""" return self.lookup(s['id_str'] for s in result['statuses'], **kw)
0.010471
def check_in(choices, **params): """Checks parameters are in a list of allowed parameters Parameters ---------- choices : array-like, accepted values params : object Named arguments, parameters to be checked Raises ------ ValueError : unacceptable choice of parameters """ for p in params: if params[p] not in choices: raise ValueError( "{} value {} not recognized. Choose from {}".format( p, params[p], choices))
0.001919
def extend_transformations(self, transformations, return_alternatives=False): """ Extends a sequence of transformations to the TransformedStructure. Args: transformations: Sequence of Transformations return_alternatives: Whether to return alternative TransformedStructures for one-to-many transformations. return_alternatives can be a number, which stipulates the total number of structures to return. """ for t in transformations: self.append_transformation(t, return_alternatives=return_alternatives)
0.004304
def parse_macro_params(token): """ Common parsing logic for both use_macro and macro_block """ try: bits = token.split_contents() tag_name, macro_name, values = bits[0], bits[1], bits[2:] except IndexError: raise template.TemplateSyntaxError( "{0} tag requires at least one argument (macro name)".format( token.contents.split()[0])) args = [] kwargs = {} # leaving most validation up to the template.Variable # class, but use regex here so that validation could # be added in future if necessary. kwarg_regex = ( r'^([A-Za-z_][\w_]*)=(".*"|{0}.*{0}|[A-Za-z_][\w_]*)$'.format( "'")) arg_regex = r'^([A-Za-z_][\w_]*|".*"|{0}.*{0}|(\d+))$'.format( "'") for value in values: # must check against the kwarg regex first # because the arg regex matches everything! kwarg_match = regex_match( kwarg_regex, value) if kwarg_match: kwargs[kwarg_match.groups()[0]] = template.Variable( # convert to a template variable here kwarg_match.groups()[1]) else: arg_match = regex_match( arg_regex, value) if arg_match: args.append(template.Variable(arg_match.groups()[0])) else: raise template.TemplateSyntaxError( "Malformed arguments to the {0} tag.".format( tag_name)) return tag_name, macro_name, args, kwargs
0.000642
def get_object_from_classbased_instance( instance, queryset, request, *args, **kwargs): """ Get object from an instance of classbased generic view Parameters ---------- instance : instance An instance of classbased generic view queryset : instance A queryset instance request : instance A instance of HttpRequest Returns ------- instance An instance of model object or None """ from django.views.generic.edit import BaseCreateView # initialize request, args, kwargs of classbased_instance # most of methods of classbased view assumed these attributes # but these attributes is initialized in ``dispatch`` method. instance.request = request instance.args = args instance.kwargs = kwargs # get queryset from class if ``queryset_or_model`` is not specified if hasattr(instance, 'get_queryset') and not queryset: queryset = instance.get_queryset() elif hasattr(instance, 'queryset') and not queryset: queryset = instance.queryset elif hasattr(instance, 'model') and not queryset: queryset = instance.model._default_manager.all() # get object if hasattr(instance, 'get_object'): try: obj = instance.get_object(queryset) except AttributeError as e: # CreateView has ``get_object`` method but CreateView # should not have any object before thus simply set # None if isinstance(instance, BaseCreateView): obj = None else: raise e elif hasattr(instance, 'object'): obj = instance.object else: obj = None return obj
0.000582
def save_params(model_name: str): """Save current global listener params to a file""" with open(model_name + '.params', 'w') as f: json.dump(pr.__dict__, f)
0.005814
def invite(self, email, roles=None): """ Send invitation to email with a list of roles :param email: :param roles: None or "ALL" or list of role_names :return: """ if roles is None: role_ids = [self.roles['Guest'].roleId] elif roles == "ALL": role_ids = list([i.id for i in self.roles]) else: if "Guest" not in roles: roles.append('Guest') role_ids = list([i.id for i in self.roles if i.name in roles]) self._router.invite_user(data=json.dumps({ "organizationId": self.organizationId, "email": email, "roles": role_ids}))
0.002849
def from_diff(diff, options=None, cwd=None): """Create a Radius object from a diff rather than a reposistory. """ return RadiusFromDiff(diff=diff, options=options, cwd=cwd)
0.010204
def save(self, commit=True, **kwargs): """ Saves the considered instances. """ if self.post: for form in self.forms: form.instance.post = self.post super().save(commit)
0.009091
def auth_traps_enabled(name, status=True): ''' Manage the sending of authentication traps. :param bool status: The enabled status. Example of usage: .. code-block:: yaml snmp-auth-traps: win_snmp.auth_traps_enabled: - status: True ''' ret = {'name': name, 'changes': {}, 'comment': six.text_type(), 'result': None} vname = 'EnableAuthenticationTraps' current_status = __salt__['win_snmp.get_auth_traps_enabled']() if status == current_status: ret['comment'] = '{0} already contains the provided value.'.format(vname) ret['result'] = True elif __opts__['test']: ret['comment'] = '{0} will be changed.'.format(vname) ret['changes'] = {'old': current_status, 'new': status} else: ret['comment'] = 'Set {0} to contain the provided value.'.format(vname) ret['changes'] = {'old': current_status, 'new': status} ret['result'] = __salt__['win_snmp.set_auth_traps_enabled'](status=status) return ret
0.002664
def transform_flask_from_import(node): '''Translates a flask.ext from-style import into a non-magical import. Translates: from flask.ext import wtf, bcrypt as fcrypt Into: import flask_wtf as wtf, flask_bcrypt as fcrypt ''' new_names = [] # node.names is a list of 2-tuples. Each tuple consists of (name, as_name). # So, the import would be represented as: # # from flask.ext import wtf as ftw, admin # # node.names = [('wtf', 'ftw'), ('admin', None)] for (name, as_name) in node.names: actual_module_name = 'flask_{}'.format(name) new_names.append((actual_module_name, as_name or name)) new_node = nodes.Import() copy_node_info(node, new_node) new_node.names = new_names mark_transformed(new_node) return new_node
0.00122
def _notify_thing_lid_change(self, from_lid, to_lid): """Used by Thing instances to indicate that a rename operation has happened""" try: with self.__private_things: self.__private_things[to_lid] = self.__private_things.pop(from_lid) except KeyError: logger.warning('Thing %s renamed (to %s), but not in private lookup table', from_lid, to_lid) else: # renaming could happen before get_thing is called on the original try: with self.__new_things: self.__new_things[to_lid] = self.__new_things.pop(from_lid) except KeyError: pass
0.007267
def get_neighbors_of_site_with_index(struct, n, approach="min_dist", delta=0.1, \ cutoff=10.0): """ Returns the neighbors of a given site using a specific neighbor-finding method. Args: struct (Structure): input structure. n (int): index of site in Structure object for which motif type is to be determined. approach (str): type of neighbor-finding approach, where "min_dist" will use the MinimumDistanceNN class, "voronoi" the VoronoiNN class, "min_OKeeffe" the MinimumOKeeffe class, and "min_VIRE" the MinimumVIRENN class. delta (float): tolerance involved in neighbor finding. cutoff (float): (large) radius to find tentative neighbors. Returns: neighbor sites. """ if approach == "min_dist": return MinimumDistanceNN(tol=delta, cutoff=cutoff).get_nn( struct, n) elif approach == "voronoi": return VoronoiNN(tol=delta, cutoff=cutoff).get_nn( struct, n) elif approach == "min_OKeeffe": return MinimumOKeeffeNN(tol=delta, cutoff=cutoff).get_nn( struct, n) elif approach == "min_VIRE": return MinimumVIRENN(tol=delta, cutoff=cutoff).get_nn( struct, n) else: raise RuntimeError("unsupported neighbor-finding method ({}).".format( approach))
0.002111
def update(self): """Update processes stats using the input method.""" # Init new stats stats = self.get_init_value() if self.input_method == 'local': # Update stats using the standard system lib # Note: Update is done in the processcount plugin # Just return the processes list stats = glances_processes.getlist() elif self.input_method == 'snmp': # No SNMP grab for processes pass # Update the stats self.stats = stats # Get the max values (dict) # Use Deep copy to avoid change between update and display self.max_values = copy.deepcopy(glances_processes.max_values()) return self.stats
0.002653
def get_value(self, field, quick): # type: (Field, bool) -> Any """ Ask user the question represented by this instance. Args: field (Field): The field we're asking the user to provide the value for. quick (bool): Enable quick mode. In quick mode, the form will reduce the number of question asked by using defaults wherever possible. This can greatly reduce the number of interactions required on the user part, but will obviously limit the user choices. This should probably be enabled only by a specific user action (like passing a ``--quick`` flag etc.). Returns: The user response converted to a python type using the :py:attr:`cliform.core.Field.type` converter. """ if callable(field.default): default = field.default(self) else: default = field.default if quick and default is not None: return default shell.cprint('<90>{}', field.help) while True: try: answer = click.prompt(field.pretty_prompt, default=default) return field.type(answer) except ValueError: shell.cprint("<31>Unsupported value")
0.002212
def remove(self, connection): '''Remove a connection''' key = (connection.host, connection.port) with self._lock: found = self._connections.pop(key, None) try: self.close_connection(found) except Exception as exc: logger.warn('Failed to close %s: %s', connection, exc) return found
0.005479