text
stringlengths
78
104k
score
float64
0
0.18
def add_user_to_allow(self, name, user): """Add a user to the given acl allow block.""" # Clear user from both allow and deny before adding if not self.remove_user_from_acl(name, user): return False if name not in self._acl: return False self._acl[name]['allow'].append(user) return True
0.005525
def get_tag(self, name): """Return the tag as Tag object.""" res = self.get_request('/tag/' + name) return Tag(cloud_manager=self, **res['tag'])
0.011905
def _get_mapping(self, section): '''mapping will take the section name from a Singularity recipe and return a map function to add it to the appropriate place. Any lines that don't cleanly map are assumed to be comments. Parameters ========== section: the name of the Singularity recipe section Returns ======= function: to map a line to its command group (e.g., install) ''' # Ensure section is lowercase section = section.lower() mapping = {"environment": self._env, "comments": self._comments, "runscript": self._run, "labels": self._labels, "setup": self._setup, "files": self._files, "from": self._from, "post": self._post, "test": self._test, "help": self._comments} if section in mapping: return mapping[section] return self._comments
0.004634
def image(self, src, title, text): """Rendering a image with title and text. :param src: source link of the image. :param title: title text of the image. :param text: alt text of the image. """ src = escape_link(src) text = escape(text, quote=True) if title: title = escape(title, quote=True) html = '<img src="%s" alt="%s" title="%s"' % (src, text, title) else: html = '<img src="%s" alt="%s"' % (src, text) if self.options.get('use_xhtml'): return '%s />' % html return '%s>' % html
0.003221
def visitTripleConstraint(self, ctx: ShExDocParser.TripleConstraintContext): """ tripleConstraint: senseFlags? predicate inlineShapeExpression cardinality? annotation* semanticActions """ # This exists because of the predicate within annotation - if we default to visitchildren, we intercept both # predicates if ctx.senseFlags(): self.visit(ctx.senseFlags()) self.visit(ctx.predicate()) self.visit(ctx.inlineShapeExpression()) self._card_annotations_and_semacts(ctx)
0.007477
def pos3(self): ''' Use pos-sc1-sc2 as POS ''' parts = [self.pos] if self.sc1 and self.sc1 != '*': parts.append(self.sc1) if self.sc2 and self.sc2 != '*': parts.append(self.sc2) return '-'.join(parts)
0.007353
def _linearize(cls, inst_list): """ A generator function which performs linearization of the list of instructions; that is, each instruction which should be executed will be yielded in turn, recursing into ``Instructions`` instances that appear in the list. :param inst_list: A list (or other sequence) of instructions. :returns: An iterator which returns all instructions. """ for inst in inst_list: # Check if we need to recurse if isinstance(inst, Instructions): for sub_inst in cls._linearize(inst.instructions): yield sub_inst else: yield inst
0.002813
def generate(self, profile, parameters, projectpath, inputfiles, provenancedata=None): """Yields (inputtemplate, inputfilename, outputfilename, metadata) tuples""" project = os.path.basename(projectpath) if self.parent: #pylint: disable=too-many-nested-blocks #We have a parent, infer the correct filename #copy filename from parent parent = self.getparent(profile) #get input files for the parent InputTemplate parentinputfiles = parent.matchingfiles(projectpath) if not parentinputfiles: raise Exception("OutputTemplate '"+self.id + "' has parent '" + self.parent + "', but no matching input files were found!") #Do we specify a full filename? for seqnr, inputfilename, inputtemplate in parentinputfiles: #pylint: disable=unused-variable if self.filename: filename = self.filename parentfile = CLAMInputFile(projectpath, inputfilename) elif parent: filename = inputfilename parentfile = CLAMInputFile(projectpath, inputfilename) else: raise Exception("OutputTemplate '"+self.id + "' has no parent nor filename defined!") #Make actual CLAMInputFile objects of ALL relevant input files, that is: all unique=True files and all unique=False files with the same sequence number relevantinputfiles = [] for seqnr2, inputfilename2, inputtemplate2 in inputfiles: if seqnr2 == 0 or seqnr2 == seqnr: relevantinputfiles.append( (inputtemplate2, CLAMInputFile(projectpath, inputfilename2)) ) #resolve # in filename (done later) #if not self.unique: # filename.replace('#',str(seqnr)) if not self.filename and self.removeextensions: #Remove unwanted extensions if self.removeextensions is True: #Remove any and all extensions filename = filename.split('.')[0] elif isinstance(self.removeextensions, list): #Remove specified extension for ext in self.removeextensions: if ext: if ext[0] != '.' and filename[-len(ext) - 1:] == '.' + ext: filename = filename[:-len(ext) - 1] elif ext[0] == '.' and filename[-len(ext):] == ext: filename = filename[:-len(ext)] if self.extension and not self.filename and filename[-len(self.extension) - 1:] != '.' + self.extension: #(also prevents duplicate extensions) filename += '.' + self.extension #Now we create the actual metadata metadata = self.generatemetadata(parameters, parentfile, relevantinputfiles, provenancedata) #Resolve filename filename = resolveoutputfilename(filename, parameters, metadata, self, seqnr, project, inputfilename) yield inputtemplate, inputfilename, filename, metadata elif self.unique and self.filename: #outputtemplate has no parent, but specified a filename and is unique, this implies it is not dependent on input files: metadata = self.generatemetadata(parameters, None, [], provenancedata) filename = resolveoutputfilename(self.filename, parameters, metadata, self, 0, project, None) yield None, None, filename, metadata else: raise Exception("Unable to generate from OutputTemplate, no parent or filename specified")
0.010644
def create_js_pay_params(self, **package): """ 签名 js 需要的参数 详情请参考 支付开发文档 :: wxclient.create_js_pay_params( body=标题, out_trade_no=本地订单号, total_fee=价格单位分, notify_url=通知url, spbill_create_ip=建议为支付人ip, ) :param package: 需要签名的的参数 :return: 支付需要的对象 """ pay_param, sign, sign_type = self._pay_sign_dict( package=self.create_js_pay_package(**package) ) pay_param['paySign'] = sign pay_param['signType'] = sign_type # 腾讯这个还得转成大写 JS 才认 for key in ['appId', 'timeStamp', 'nonceStr']: pay_param[key] = str(pay_param.pop(key.lower())) return pay_param
0.00267
def set_mode_label_to_keywords_creation(self): """Set the mode label to the Keywords Creation/Update mode.""" self.setWindowTitle(self.keyword_creation_wizard_name) if self.get_existing_keyword('layer_purpose'): mode_name = tr( 'Keywords update wizard for layer <b>{layer_name}</b>').format( layer_name=self.layer.name()) else: mode_name = tr( 'Keywords creation wizard for layer <b>{layer_name}</b>' ).format(layer_name=self.layer.name()) self.lblSubtitle.setText(mode_name)
0.003339
def list(self, **kwargs): """Retrieve a list of objects. Args: all (bool): If True, return all the items, without pagination per_page (int): Number of items to retrieve per request page (int): ID of the page to return (starts with page 1) as_list (bool): If set to False and no pagination option is defined, return a generator instead of a list **kwargs: Extra options to send to the server (e.g. sudo) Returns: list: The list of objects, or a generator if `as_list` is False Raises: GitlabAuthenticationError: If authentication is not correct GitlabListError: If the server cannot perform the request """ # Duplicate data to avoid messing with what the user sent us data = kwargs.copy() if self.gitlab.per_page: data.setdefault('per_page', self.gitlab.per_page) # We get the attributes that need some special transformation types = getattr(self, '_types', {}) if types: for attr_name, type_cls in types.items(): if attr_name in data.keys(): type_obj = type_cls(data[attr_name]) data[attr_name] = type_obj.get_for_api() # Allow to overwrite the path, handy for custom listings path = data.pop('path', self.path) obj = self.gitlab.http_list(path, **data) if isinstance(obj, list): return [self._obj_cls(self, item) for item in obj] else: return base.RESTObjectList(self, self._obj_cls, obj)
0.00122
async def _sync_revoc(self, rr_id: str, rr_size: int = None) -> None: """ Create revoc registry if need be for input revocation registry identifier; open and cache tails file reader. :param rr_id: revocation registry identifier :param rr_size: if new revocation registry necessary, its size (default as per _create_rev_reg()) """ LOGGER.debug('Issuer._sync_revoc >>> rr_id: %s, rr_size: %s', rr_id, rr_size) (cd_id, tag) = rev_reg_id2cred_def_id__tag(rr_id) try: await self.get_cred_def(cd_id) except AbsentCredDef: LOGGER.debug( 'Issuer._sync_revoc: <!< tails tree %s may be for another ledger; no cred def found on %s', self._dir_tails, cd_id) raise AbsentCredDef('Tails tree {} may be for another ledger; no cred def found on {}'.format( self._dir_tails, cd_id)) with REVO_CACHE.lock: revo_cache_entry = REVO_CACHE.get(rr_id, None) tails = None if revo_cache_entry is None else revo_cache_entry.tails if tails is None: # it's a new revocation registry, or not yet set in cache try: tails = await Tails(self._dir_tails, cd_id, tag).open() except AbsentTails: await self._create_rev_reg(rr_id, rr_size) # it's a new revocation registry tails = await Tails(self._dir_tails, cd_id, tag).open() # symlink should exist now if revo_cache_entry is None: REVO_CACHE[rr_id] = RevoCacheEntry(None, tails) else: REVO_CACHE[rr_id].tails = tails LOGGER.debug('Issuer._sync_revoc <<<')
0.006678
def _old_epd_diffmags(coeff, fsv, fdv, fkv, xcc, ycc, bgv, bge, mag): ''' This calculates the difference in mags after EPD coefficients are calculated. final EPD mags = median(magseries) + epd_diffmags() ''' return -(coeff[0]*fsv**2. + coeff[1]*fsv + coeff[2]*fdv**2. + coeff[3]*fdv + coeff[4]*fkv**2. + coeff[5]*fkv + coeff[6] + coeff[7]*fsv*fdv + coeff[8]*fsv*fkv + coeff[9]*fdv*fkv + coeff[10]*np.sin(2*np.pi*xcc) + coeff[11]*np.cos(2*np.pi*xcc) + coeff[12]*np.sin(2*np.pi*ycc) + coeff[13]*np.cos(2*np.pi*ycc) + coeff[14]*np.sin(4*np.pi*xcc) + coeff[15]*np.cos(4*np.pi*xcc) + coeff[16]*np.sin(4*np.pi*ycc) + coeff[17]*np.cos(4*np.pi*ycc) + coeff[18]*bgv + coeff[19]*bge - mag)
0.001035
def _add_references(self, rec): """ Adds the reference to the record """ for ref in self.document.getElementsByTagName('ref'): for ref_type, doi, authors, collaboration, journal, volume, page, year,\ label, arxiv, publisher, institution, unstructured_text,\ external_link, report_no, editors in self._get_reference(ref): subfields = [] if doi: subfields.append(('a', doi)) for author in authors: subfields.append(('h', author)) for editor in editors: subfields.append(('e', editor)) if year: subfields.append(('y', year)) if unstructured_text: if page: subfields.append(('m', unstructured_text + ', ' + page)) else: subfields.append(('m', unstructured_text)) if collaboration: subfields.append(('c', collaboration)) if institution: subfields.append(('m', institution)) if publisher: subfields.append(('p', publisher)) if arxiv: subfields.append(('r', arxiv)) if report_no: subfields.append(('r', report_no)) if external_link: subfields.append(('u', external_link)) if label: subfields.append(('o', label)) if ref_type == 'book': if journal: subfields.append(('t', journal)) if volume: subfields.append(('m', volume)) elif page and not unstructured_text: subfields.append(('m', page)) else: if volume and page: subfields.append(('s', journal + "," + volume + "," + page)) elif journal: subfields.append(('t', journal)) if ref_type: subfields.append(('d', ref_type)) if not subfields: #misc-type references try: r = ref.getElementsByTagName('mixed-citation')[0] text = xml_to_text(r) label = text.split()[0] text = " ".join(text.split()[1:]) subfields.append(('s', text)) record_add_field(rec, '999', ind1='C', ind2='5', subfields=subfields) except IndexError: #references without 'mixed-citation' tag try: r = ref.getElementsByTagName('note')[0] subfields.append(('s', xml_to_text(r))) record_add_field(rec, '999', ind1='C', ind2='5', subfields=subfields) except IndexError: #references without 'note' tag subfields.append(('s', xml_to_text(ref))) record_add_field(rec, '999', ind1='C', ind2='5', subfields=subfields) else: record_add_field(rec, '999', ind1='C', ind2='5', subfields=subfields)
0.003757
def show(self, id): """GET /datastores/id: Show a specific item.""" # url('DataStores', id=ID) datastore = meta.Session.query(DataStore).get(id) # do not raise RuntimeError from discover_datasources # if in "test" mode try: datasources = discover_datasources(datastore.ogrstring) except RuntimeError: if "test" in request.params: datasources = None else: raise result = datastore.to_json() result['datasources'] = datasources return result
0.003396
def stop(self): """Output Checkstyle XML reports.""" et = ET.ElementTree(self.checkstyle_element) f = BytesIO() et.write(f, encoding='utf-8', xml_declaration=True) xml = f.getvalue().decode('utf-8') if self.output_fd is None: print(xml) else: self.output_fd.write(xml) super(CheckstylePlugin, self).stop()
0.005089
def timer(fun, *a, **k): """ define a timer for a rule function for log and statistic purposes """ @wraps(fun) def timer(*a, **k): start = arrow.now() ret = fun(*a, **k) end = arrow.now() print('timer:fun: %s\n start:%s,end:%s, took [%s]' % ( str(fun), str(start), str(end), str(end - start))) return ret return timer
0.002571
def location_path(cls, project, location): """Return a fully-qualified location string.""" return google.api_core.path_template.expand( "projects/{project}/locations/{location}", project=project, location=location, )
0.007246
def query(self, query, interpolations=None): """ Queries a timeseries table. :param query: The timeseries query. :type query: string :rtype: :class:`TsObject <riak.ts_object.TsObject>` """ return self._client.ts_query(self, query, interpolations)
0.006601
def columnize_commands(self, commands): """List commands arranged in an aligned columns""" commands.sort() width = self.debugger.settings['width'] return columnize.columnize(commands, displaywidth=width, lineprefix=' ')
0.00692
def _init(): ''' Internal switchyard static initialization method. ''' if ApplicationLayer._isinit: return ApplicationLayer._isinit = True ApplicationLayer._to_app = {} ApplicationLayer._from_app = Queue()
0.010909
def uninstall(self, package): """Uninstalls the given package (given in pip's package syntax or a tuple of ('name', 'ver')) from this virtual environment.""" if isinstance(package, tuple): package = '=='.join(package) if not self.is_installed(package): self._write_to_log('%s is not installed, skipping' % package) return try: self._execute_pip(['uninstall', '-y', package]) except subprocess.CalledProcessError as e: raise PackageRemovalException((e.returncode, e.output, package))
0.005085
def _setup(): """Add a variety of default schemes.""" s = str.split if sys.version_info < (3, 0): # noinspection PyUnresolvedReferences s = unicode.split def pop_all(some_dict, some_list): for scheme in some_list: some_dict.pop(scheme) global SCHEMES SCHEMES = copy.deepcopy(sanscript.SCHEMES) pop_all(SCHEMES, [sanscript.ORIYA, sanscript.BENGALI, sanscript.GUJARATI]) SCHEMES[HK].update({ 'vowels': s("""a A i I u U R RR lR lRR E ai O au""") + s("""e o"""), 'marks': s("""A i I u U R RR lR lRR E ai O au""") + s("""e o"""), 'consonants': sanscript.SCHEMES[HK]['consonants'] + s("""n2 r2 zh""") }) SCHEMES[ITRANS].update({ 'vowels': s("""a A i I u U R RR LLi LLI E ai O au""") + s("""e o"""), 'marks': s("""A i I u U R RR LLi LLI E ai O au""") + s("""e o"""), 'consonants': sanscript.SCHEMES[ITRANS]['consonants'] + s("""n2 r2 zh""") }) pop_all(SCHEMES[ITRANS].synonym_map, s("""e o""")) SCHEMES[OPTITRANS].update({ 'vowels': s("""a A i I u U R RR LLi LLI E ai O au""") + s("""e o"""), 'marks': s("""A i I u U R RR LLi LLI E ai O au""") + s("""e o"""), 'consonants': sanscript.SCHEMES[OPTITRANS]['consonants'] + s("""n2 r2 zh""") }) pop_all(SCHEMES[OPTITRANS].synonym_map, s("""e o"""))
0.002168
def _sync(self): """Write persistent dictionary to disc.""" _logger.debug("_sync()") self._lock.acquire_write() # TODO: read access is enough? try: if self._loaded: self._dict.sync() finally: self._lock.release()
0.006826
def add_keywords_from_list(self, keyword_list): """To add keywords from a list Args: keyword_list (list(str)): List of keywords to add Examples: >>> keyword_processor.add_keywords_from_list(["java", "python"]}) Raises: AttributeError: If `keyword_list` is not a list. """ if not isinstance(keyword_list, list): raise AttributeError("keyword_list should be a list") for keyword in keyword_list: self.add_keyword(keyword)
0.003711
def timed_connectivity_check(self, event): """Tests internet connectivity in regular intervals and updates the nodestate accordingly""" self.status = self._can_connect() self.log('Timed connectivity check:', self.status, lvl=verbose) if self.status: if not self.old_status: self.log('Connectivity gained') self.fireEvent(backend_nodestate_toggle(STATE_UUID_CONNECTIVITY, on=True, force=True)) else: if self.old_status: self.log('Connectivity lost', lvl=warn) self.old_status = False self.fireEvent(backend_nodestate_toggle(STATE_UUID_CONNECTIVITY, off=True, force=True)) self.old_status = self.status
0.006631
def build_filter(filter_or_string, *args, **kwargs): """ Overloaded filter construction. If ``filter_or_string`` is a string we look up it's corresponding class in the filter registry and return it. Otherwise, assume ``filter_or_string`` is an instance of a filter. :return: :class:`~es_fluent.filters.Filter` """ if isinstance(filter_or_string, basestring): # Names that start with `~` indicate a negated filter. if filter_or_string.startswith('~'): filter_name = filter_or_string[1:] return ~FILTER_REGISTRY[filter_name](*args, **kwargs) else: filter_name = filter_or_string return FILTER_REGISTRY[filter_name](*args, **kwargs) else: return filter_or_string
0.001295
def set(self, key, value, *, section=DataStoreDocumentSection.Data): """ Store a value under the specified key in the given section of the document. This method stores a value into the specified section of the workflow data store document. Any existing value is overridden. Before storing a value, any linked GridFS document under the specified key is deleted. Args: key (str): The key pointing to the value that should be stored/updated. It supports MongoDB's dot notation for nested fields. value: The value that should be stored/updated. section (DataStoreDocumentSection): The section from which the data should be retrieved. Returns: bool: ``True`` if the value could be set/updated, otherwise ``False``. """ key_notation = '.'.join([section, key]) try: self._delete_gridfs_data(self._data_from_dotnotation(key_notation, default=None)) except KeyError: logger.info('Adding new field {} to the data store'.format(key_notation)) result = self._collection.update_one( {"_id": ObjectId(self._workflow_id)}, { "$set": { key_notation: self._encode_value(value) }, "$currentDate": {"lastModified": True} } ) return result.modified_count == 1
0.005917
def _ordereddict2dict(input_ordered_dict): ''' Convert ordered dictionary to a dictionary ''' return salt.utils.json.loads(salt.utils.json.dumps(input_ordered_dict))
0.005525
def addTerms(self, data, LIMIT=25, _print=True, crawl=False): """ need: label <str> type term, cde, anntation, or relationship <str> options: definition <str> #bug with qutations superclasses [{'id':<int>}] synonyms [{'literal':<str>}] existing_ids [{'iri':<str>,'curie':<str>'}] ontologies [{'id':<int>}] [{'type':'term', 'label':'brain'}] """ needed = set([ 'label', 'type', ]) url_base = self.base_url + '/api/1/ilx/add' terms = [] for d in data: if (set(list(d)) & needed) != needed: exit('You need keys: '+ str(needed - set(list(d)))) if not d.get('label') or not d.get('type'): # php wont catch empty type! exit('=== Data is missing label or type! ===') d['term'] = d.pop('label') # ilx only accepts term, will need to replaced back #d['batch-elastic'] = 'True' # term/add and edit should be ready now terms.append((url_base, d)) primer_responses = self.post( terms, action='Priming Terms', LIMIT=LIMIT, _print=_print, crawl=crawl) ilx = {} for primer_response in primer_responses: primer_response['term'] = primer_response['term'].replace('&#39;', "'") primer_response['term'] = primer_response['term'].replace('&#34;', '"') primer_response['label'] = primer_response.pop('term') ilx[primer_response['label'].lower()] = primer_response url_base = self.base_url + '/api/1/term/add' terms = [] for d in data: d['label'] = d.pop('term') d = scicrunch_client_helper.superclasses_bug_fix(d) if not ilx.get(d['label'].lower()): # ilx can be incomplete if errored term continue try: d.update({'ilx': ilx[d['label'].lower()]['ilx']}) except: d.update({'ilx': ilx[d['label'].lower()]['fragment']}) terms.append((url_base, d)) return self.post( terms, action='Adding Terms', LIMIT=LIMIT, _print=_print, crawl=crawl)
0.00571
def hardware_flexport_flexport_type_skip_deconfig(self, **kwargs): """Auto Generated Code """ config = ET.Element("config") hardware = ET.SubElement(config, "hardware", xmlns="urn:brocade.com:mgmt:brocade-hardware") flexport = ET.SubElement(hardware, "flexport") id_key = ET.SubElement(flexport, "id") id_key.text = kwargs.pop('id') flexport_type = ET.SubElement(flexport, "flexport_type") skip_deconfig = ET.SubElement(flexport_type, "skip_deconfig") callback = kwargs.pop('callback', self._callback) return callback(config)
0.004886
def parameter_present(name, db_parameter_group_family, description, parameters=None, apply_method="pending-reboot", tags=None, region=None, key=None, keyid=None, profile=None): ''' Ensure DB parameter group exists and update parameters. name The name for the parameter group. db_parameter_group_family The DB parameter group family name. A DB parameter group can be associated with one and only one DB parameter group family, and can be applied only to a DB instance running a database engine and engine version compatible with that DB parameter group family. description Parameter group description. parameters The DB parameters that need to be changed of type dictionary. apply_method The `apply-immediate` method can be used only for dynamic parameters; the `pending-reboot` method can be used with MySQL and Oracle DB instances for either dynamic or static parameters. For Microsoft SQL Server DB instances, the `pending-reboot` method can be used only for static parameters. tags A dict of tags. region Region to connect to. key Secret key to be used. keyid Access key to be used. profile A dict with region, key and keyid, or a pillar key (string) that contains a dict with region, key and keyid. ''' ret = {'name': name, 'result': True, 'comment': '', 'changes': {} } res = __salt__['boto_rds.parameter_group_exists'](name=name, tags=tags, region=region, key=key, keyid=keyid, profile=profile) if not res.get('exists'): if __opts__['test']: ret['comment'] = 'Parameter group {0} is set to be created.'.format(name) ret['result'] = None return ret created = __salt__['boto_rds.create_parameter_group'](name=name, db_parameter_group_family=db_parameter_group_family, description=description, tags=tags, region=region, key=key, keyid=keyid, profile=profile) if not created: ret['result'] = False ret['comment'] = 'Failed to create {0} parameter group.'.format(name) return ret ret['changes']['New Parameter Group'] = name ret['comment'] = 'Parameter group {0} created.'.format(name) else: ret['comment'] = 'Parameter group {0} present.'.format(name) if parameters is not None: params = {} changed = {} for items in parameters: for k, value in items.items(): if type(value) is bool: params[k] = 'on' if value else 'off' else: params[k] = six.text_type(value) log.debug('Parameters from user are : %s.', params) options = __salt__['boto_rds.describe_parameters'](name=name, region=region, key=key, keyid=keyid, profile=profile) if not options.get('result'): ret['result'] = False ret['comment'] = os.linesep.join([ret['comment'], 'Faled to get parameters for group {0}.'.format(name)]) return ret for parameter in options['parameters'].values(): if parameter['ParameterName'] in params and params.get(parameter['ParameterName']) != six.text_type(parameter['ParameterValue']): log.debug( 'Values that are being compared for %s are %s:%s.', parameter['ParameterName'], params.get(parameter['ParameterName']), parameter['ParameterValue'] ) changed[parameter['ParameterName']] = params.get(parameter['ParameterName']) if changed: if __opts__['test']: ret['comment'] = os.linesep.join([ret['comment'], 'Parameters {0} for group {1} are set to be changed.'.format(changed, name)]) ret['result'] = None return ret update = __salt__['boto_rds.update_parameter_group'](name, parameters=changed, apply_method=apply_method, tags=tags, region=region, key=key, keyid=keyid, profile=profile) if 'error' in update: ret['result'] = False ret['comment'] = os.linesep.join([ret['comment'], 'Failed to change parameters {0} for group {1}:'.format(changed, name), update['error']['message']]) return ret ret['changes']['Parameters'] = changed ret['comment'] = os.linesep.join([ret['comment'], 'Parameters {0} for group {1} are changed.'.format(changed, name)]) else: ret['comment'] = os.linesep.join([ret['comment'], 'Parameters {0} for group {1} are present.'.format(params, name)]) return ret
0.003959
def _modifies_cart(func): ''' Decorator that makes the wrapped function raise ValidationError if we're doing something that could modify the cart. It also wraps the execution of this function in a database transaction, and marks the boundaries of a cart operations batch. ''' @functools.wraps(func) def inner(self, *a, **k): self._fail_if_cart_is_not_active() with transaction.atomic(): with BatchController.batch(self.cart.user): # Mark the version of self in the batch cache as modified memoised = self.for_user(self.cart.user) memoised._modified_by_batch = True return func(self, *a, **k) return inner
0.00137
def diff(self, source_path='', target_path='', which=-1): """Build the diff between original docstring and proposed docstring. :type which: int -> -1 means all the dosctrings of the file -> >=0 means the index of the docstring to proceed (Default value = -1) :param source_path: (Default value = '') :param target_path: (Default value = '') :returns: the resulted diff :rtype: List[str] """ list_from, list_to = self.compute_before_after() if source_path.startswith(os.sep): source_path = source_path[1:] if source_path and not source_path.endswith(os.sep): source_path += os.sep if target_path.startswith(os.sep): target_path = target_path[1:] if target_path and not target_path.endswith(os.sep): target_path += os.sep fromfile = 'a/' + source_path + os.path.basename(self.input_file) tofile = 'b/' + target_path + os.path.basename(self.input_file) diff_list = difflib.unified_diff(list_from, list_to, fromfile, tofile) return [d for d in diff_list]
0.002606
def process_pgp(self, data, name): """ PGP key processing :param data: :param name: :return: """ ret = [] try: data = to_string(data) parts = re.split(r'-{5,}BEGIN', data) if len(parts) == 0: return if len(parts[0]) == 0: parts.pop(0) crt_arr = ['-----BEGIN' + x for x in parts] for idx, pem_rec in enumerate(crt_arr): try: pem_rec = pem_rec.strip() if len(pem_rec) == 0: continue ret.append(self.process_pgp_raw(pem_rec.encode(), name, idx)) except Exception as e: logger.error('Exception in processing PGP rec file %s: %s' % (name, e)) self.trace_logger.log(e) except Exception as e: logger.error('Exception in processing PGP file %s: %s' % (name, e)) self.trace_logger.log(e) return ret
0.003781
def parse(self, element): r"""Parse xml element. :param element: an :class:`~xml.etree.ElementTree.Element` instance :rtype: dict """ values = {} for child in element: node = self.get_node(child) subs = self.parse(child) value = subs or node['value'] if node['tag'] not in values: values[node['tag']] = value else: if not isinstance(values[node['tag']], list): values[node['tag']] = [values.pop(node['tag'])] values[node['tag']].append(value) return values
0.00312
def include(self, **attrs): """Add items to distribution that are named in keyword arguments For example, 'dist.include(py_modules=["x"])' would add 'x' to the distribution's 'py_modules' attribute, if it was not already there. Currently, this method only supports inclusion for attributes that are lists or tuples. If you need to add support for adding to other attributes in this or a subclass, you can add an '_include_X' method, where 'X' is the name of the attribute. The method will be called with the value passed to 'include()'. So, 'dist.include(foo={"bar":"baz"})' will try to call 'dist._include_foo({"bar":"baz"})', which can then handle whatever special inclusion logic is needed. """ for k, v in attrs.items(): include = getattr(self, '_include_' + k, None) if include: include(v) else: self._include_misc(k, v)
0.001996
def render_linked_css(self, css_files: Iterable[str]) -> str: """Default method used to render the final css links for the rendered webpage. Override this method in a sub-classed controller to change the output. """ paths = [] unique_paths = set() # type: Set[str] for path in css_files: if not is_absolute(path): path = self.static_url(path) if path not in unique_paths: paths.append(path) unique_paths.add(path) return "".join( '<link href="' + escape.xhtml_escape(p) + '" ' 'type="text/css" rel="stylesheet"/>' for p in paths )
0.002801
def _modify(self, **patch): '''Modify only draft or legacy policies Published policies cannot be modified :raises: OperationNotSupportedOnPublishedPolicy ''' legacy = patch.pop('legacy', False) tmos_ver = self._meta_data['bigip']._meta_data['tmos_version'] self._filter_version_specific_options(tmos_ver, **patch) if 'Drafts' not in self._meta_data['uri'] and \ LooseVersion(tmos_ver) >= LooseVersion('12.1.0') and \ not legacy: msg = 'Modify operation not allowed on a published policy.' raise OperationNotSupportedOnPublishedPolicy(msg) super(Policy, self)._modify(**patch)
0.002837
def email(random=random, *args, **kwargs): """ Return an e-mail address >>> mock_random.seed(0) >>> email(random=mock_random) '[email protected]' >>> email(random=mock_random) '[email protected]' >>> email(random=mock_random, name="charles") '[email protected]' """ if 'name' in kwargs and kwargs['name']: words = kwargs['name'] else: words = random.choice([ noun(random=random), name(random=random), name(random=random)+"+spam", ]) return _slugify(words)+"@"+domain(random=random)
0.00155
def confd_state_internal_callpoints_authorization_callbacks_registration_type_range_path(self, **kwargs): """Auto Generated Code """ config = ET.Element("config") confd_state = ET.SubElement(config, "confd-state", xmlns="http://tail-f.com/yang/confd-monitoring") internal = ET.SubElement(confd_state, "internal") callpoints = ET.SubElement(internal, "callpoints") authorization_callbacks = ET.SubElement(callpoints, "authorization-callbacks") registration_type = ET.SubElement(authorization_callbacks, "registration-type") range = ET.SubElement(registration_type, "range") path = ET.SubElement(range, "path") path.text = kwargs.pop('path') callback = kwargs.pop('callback', self._callback) return callback(config)
0.007335
def partition_source(src): """Partitions source into a list of `CodePartition`s for import refactoring. """ # In python2, ast.parse(text_string_with_encoding_pragma) raises # SyntaxError: encoding declaration in Unicode string ast_obj = ast.parse(src.encode('UTF-8')) visitor = TopLevelImportVisitor() visitor.visit(ast_obj) line_offsets = get_line_offsets_by_line_no(src) chunks = [] startpos = 0 pending_chunk_type = None possible_ending_tokens = None seen_import = False for ( token_type, token_text, (srow, scol), (erow, ecol), _, ) in tokenize.generate_tokens(io.StringIO(src).readline): # Searching for a start of a chunk if pending_chunk_type is None: if not seen_import and token_type == tokenize.COMMENT: if 'noreorder' in token_text: chunks.append(CodePartition(CodeType.CODE, src[startpos:])) break else: pending_chunk_type = CodeType.PRE_IMPORT_CODE possible_ending_tokens = TERMINATES_COMMENT elif not seen_import and token_type == tokenize.STRING: pending_chunk_type = CodeType.PRE_IMPORT_CODE possible_ending_tokens = TERMINATES_DOCSTRING elif scol == 0 and srow in visitor.top_level_import_line_numbers: seen_import = True pending_chunk_type = CodeType.IMPORT possible_ending_tokens = TERMINATES_IMPORT elif token_type == tokenize.NL: # A NL token is a non-important newline, we'll immediately # append a NON_CODE partition endpos = line_offsets[erow] + ecol srctext = src[startpos:endpos] startpos = endpos chunks.append(CodePartition(CodeType.NON_CODE, srctext)) elif token_type == tokenize.COMMENT: if 'noreorder' in token_text: chunks.append(CodePartition(CodeType.CODE, src[startpos:])) break else: pending_chunk_type = CodeType.CODE possible_ending_tokens = TERMINATES_COMMENT elif token_type == tokenize.ENDMARKER: # Token ended right before end of file or file was empty pass else: # We've reached a `CODE` block, which spans the rest of the # file (intentionally timid). Let's append that block and be # done chunks.append(CodePartition(CodeType.CODE, src[startpos:])) break # Attempt to find ending of token elif token_type in possible_ending_tokens: endpos = line_offsets[erow] + ecol srctext = src[startpos:endpos] startpos = endpos chunks.append(CodePartition(pending_chunk_type, srctext)) pending_chunk_type = None possible_ending_tokens = None elif token_type == tokenize.COMMENT and 'noreorder' in token_text: chunks.append(CodePartition(CodeType.CODE, src[startpos:])) break chunks = [chunk for chunk in chunks if chunk.src] # Make sure we're not removing any code assert _partitions_to_src(chunks) == src return chunks
0.000296
def get_upload_path(instance, filename): """Overriding to store the original filename""" if not instance.name: instance.name = filename # set original filename date = timezone.now().date() filename = '{name}.{ext}'.format(name=uuid4().hex, ext=filename.split('.')[-1]) return os.path.join('post_office_attachments', str(date.year), str(date.month), str(date.day), filename)
0.002155
def _store16(ins): """ Stores 2nd operand content into address of 1st operand. store16 a, x => *(&a) = x Use '*' for indirect store on 1st operand. """ output = [] output = _16bit_oper(ins.quad[2]) try: value = ins.quad[1] indirect = False if value[0] == '*': indirect = True value = value[1:] value = int(value) & 0xFFFF if indirect: output.append('ex de, hl') output.append('ld hl, (%s)' % str(value)) output.append('ld (hl), e') output.append('inc hl') output.append('ld (hl), d') else: output.append('ld (%s), hl' % str(value)) except ValueError: if value[0] == '_': if indirect: output.append('ex de, hl') output.append('ld hl, (%s)' % str(value)) output.append('ld (hl), e') output.append('inc hl') output.append('ld (hl), d') else: output.append('ld (%s), hl' % str(value)) elif value[0] == '#': value = value[1:] if indirect: output.append('ex de, hl') output.append('ld hl, (%s)' % str(value)) output.append('ld (hl), e') output.append('inc hl') output.append('ld (hl), d') else: output.append('ld (%s), hl' % str(value)) else: output.append('ex de, hl') if indirect: output.append('pop hl') output.append('ld a, (hl)') output.append('inc hl') output.append('ld h, (hl)') output.append('ld l, a') else: output.append('pop hl') output.append('ld (hl), e') output.append('inc hl') output.append('ld (hl), d') return output
0.000511
def max(self): """ Returns the maximum value of the domain. :rtype: `float` or `np.inf` """ return int(self._max) if not np.isinf(self._max) else self._max
0.010204
def make_response(event): """Make a response from webhook event.""" code, message = event.status response = jsonify(**event.response) response.headers['X-Hub-Event'] = event.receiver_id response.headers['X-Hub-Delivery'] = event.id if message: response.headers['X-Hub-Info'] = message add_link_header(response, {'self': url_for( '.event_item', receiver_id=event.receiver_id, event_id=event.id, _external=True )}) return response, code
0.002024
def non_empty_lines(path): """ Yield non-empty lines from file at path """ with open(path) as f: for line in f: line = line.strip() if line: yield line
0.004651
def start(self): """Starts watching the path and running the test jobs.""" assert not self.watching def selector(evt): if evt.is_directory: return False path = evt.path if path in self._last_fnames: # Detected a "killing cycle" return False for pattern in self.skip_pattern.split(";"): if fnmatch(path, pattern.strip()): return False return True def watchdog_handler(evt): wx.CallAfter(self._watchdog_handler, evt) # Force a first event self._watching = True self._last_fnames = [] self._evts = [None] self._run_subprocess() # Starts the watchdog observer from .watcher import watcher self._watcher = watcher(path=self.directory, selector=selector, handler=watchdog_handler) self._watcher.__enter__()
0.010539
def dispatch(self, packet): """ dispatch: XBee data dict -> None When called, dispatch checks the given packet against each registered callback method and calls each callback whose filter function returns true. """ for handler in self.handlers: if handler['filter'](packet): # Call the handler method with its associated # name and the packet which passed its filter check handler['callback'](handler['name'], packet)
0.003745
def uninstall_bundle(self, bundle): # type: (Bundle) -> None """ Ends the uninstallation of the given bundle (must be called by Bundle) :param bundle: The bundle to uninstall :raise BundleException: Invalid bundle """ if bundle is None: # Do nothing return with self.__bundles_lock: # Stop the bundle first bundle.stop() bundle_id = bundle.get_bundle_id() if bundle_id not in self.__bundles: raise BundleException("Invalid bundle {0}".format(bundle)) # Notify listeners self._dispatcher.fire_bundle_event( BundleEvent(BundleEvent.UNINSTALLED, bundle) ) # Remove it from the dictionary del self.__bundles[bundle_id] # Remove it from the system => avoid unintended behaviors and # forces a complete module reload if it is re-installed name = bundle.get_symbolic_name() try: del sys.modules[name] except KeyError: # Ignore pass try: # Clear reference in parent parent, basename = name.rsplit(".", 1) if parent: delattr(sys.modules[parent], basename) except (KeyError, AttributeError, ValueError): # Ignore errors pass
0.002026
def execute(self, run): """ This function executes the tool with a sourcefile with options. It also calls functions for output before and after the run. """ self.output_handler.output_before_run(run) benchmark = self.benchmark memlimit = benchmark.rlimits.get(MEMLIMIT) args = run.cmdline() logging.debug('Command line of run is %s', args) run_result = \ self.run_executor.execute_run( args, output_filename=run.log_file, output_dir=run.result_files_folder, result_files_patterns=benchmark.result_files_patterns, hardtimelimit=benchmark.rlimits.get(TIMELIMIT), softtimelimit=benchmark.rlimits.get(SOFTTIMELIMIT), walltimelimit=benchmark.rlimits.get(WALLTIMELIMIT), cores=self.my_cpus, memory_nodes=self.my_memory_nodes, memlimit=memlimit, environments=benchmark.environment(), workingDir=benchmark.working_directory(), maxLogfileSize=benchmark.config.maxLogfileSize, files_count_limit=benchmark.config.filesCountLimit, files_size_limit=benchmark.config.filesSizeLimit) if self.run_executor.PROCESS_KILLED: # If the run was interrupted, we ignore the result and cleanup. try: if benchmark.config.debug: os.rename(run.log_file, run.log_file + ".killed") else: os.remove(run.log_file) except OSError: pass return 1 if self.my_cpus: run_result['cpuCores'] = self.my_cpus if self.my_memory_nodes: run_result['memoryNodes'] = self.my_memory_nodes run.set_result(run_result) self.output_handler.output_after_run(run)
0.001028
def ReleaseTypeEnum(ctx): """Types of Releases.""" return Enum( ctx, all=0, selected=3, sametype=4, notselected=5, inversetype=6, default=Pass )
0.004717
def bin_query(self, table, chrom, start, end): """ perform an efficient spatial query using the bin column if available. The possible bins are calculated from the `start` and `end` sent to this function. Parameters ---------- table : str or table table to query chrom : str chromosome for the query start : int 0-based start postion end : int 0-based end position """ if isinstance(table, six.string_types): table = getattr(self, table) try: tbl = table._table except AttributeError: tbl = table.column_descriptions[0]['type']._table q = table.filter(tbl.c.chrom == chrom) if hasattr(tbl.c, "bin"): bins = Genome.bins(start, end) if len(bins) < 100: q = q.filter(tbl.c.bin.in_(bins)) if hasattr(tbl.c, "txStart"): return q.filter(tbl.c.txStart <= end).filter(tbl.c.txEnd >= start) return q.filter(tbl.c.chromStart <= end).filter(tbl.c.chromEnd >= start)
0.002632
def player_s(self, sid) : """Returns the full set of data on a player, no filtering""" try: url = self.pubg_url_steam.format(str(sid)) response = requests.request("GET", url, headers=self.headers) return json.loads(response.text) except BaseException as error: print('Unhandled exception: ' + str(error)) raise
0.007353
def get_parser(commands): """ Generate argument parser given a list of subcommand specifications. :type commands: list of (str, function, function) :arg commands: Each element must be a tuple ``(name, adder, runner)``. :param name: subcommand :param adder: a function takes one object which is an instance of :class:`argparse.ArgumentParser` and add arguments to it :param runner: a function takes keyword arguments which must be specified by the arguments parsed by the parser defined by `adder`. Docstring of this function will be the description of the subcommand. """ parser = argparse.ArgumentParser( formatter_class=Formatter, description=__doc__, epilog=EPILOG, ) subparsers = parser.add_subparsers() for (name, adder, runner) in commands: subp = subparsers.add_parser( name, formatter_class=Formatter, description=runner.__doc__ and textwrap.dedent(runner.__doc__)) adder(subp) subp.set_defaults(func=runner) return parser
0.000826
def git_available(func): """ Check, if a git repository exists in the given folder. """ def inner(*args): os.chdir(APISettings.GIT_DIR) if call(['git', 'rev-parse']) == 0: return func(*args) Shell.fail('There is no git repository!') return exit(1) return inner
0.003058
def vpn_connections(self): """Instance depends on the API version: * 2018-04-01: :class:`VpnConnectionsOperations<azure.mgmt.network.v2018_04_01.operations.VpnConnectionsOperations>` """ api_version = self._get_api_version('vpn_connections') if api_version == '2018-04-01': from .v2018_04_01.operations import VpnConnectionsOperations as OperationClass else: raise NotImplementedError("APIVersion {} is not available".format(api_version)) return OperationClass(self._client, self.config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version)))
0.009077
def get_resource_url(self): """ Get resource complete url """ name = self.__class__.resource_name url = self.__class__.rest_base_url() return "%s/%s" % (url, name)
0.010152
def get_export( self, export_type, generate=False, wait=False, wait_timeout=None, ): """ Downloads a data export over HTTP. Returns a `Requests Response <http://docs.python-requests.org/en/master/api/#requests.Response>`_ object containing the content of the export. - **export_type** is a string specifying which type of export should be downloaded. - **generate** is a boolean specifying whether to generate a new export and wait for it to be ready, or to just download the latest export. - **wait** is a boolean specifying whether to wait for an in-progress export to finish, if there is one. Has no effect if ``generate`` is ``True``. - **wait_timeout** is the number of seconds to wait if ``wait`` is ``True``. Has no effect if ``wait`` is ``False`` or if ``generate`` is ``True``. The returned :py:class:`.Response` object has two additional attributes as a convenience for working with the CSV content; **csv_reader** and **csv_dictreader**, which are wrappers for :py:meth:`.csv.reader` and :py:class:`csv.DictReader` respectively. These wrappers take care of correctly decoding the export content for the CSV parser. Example:: classification_export = Project(1234).get_export('classifications') for row in classification_export.csv_reader(): print(row) classification_export = Project(1234).get_export('classifications') for row in classification_export.csv_dictreader(): print(row) """ if generate: self.generate_export(export_type) if generate or wait: export = self.wait_export(export_type, wait_timeout) else: export = self.describe_export(export_type) if export_type in TALK_EXPORT_TYPES: media_url = export['data_requests'][0]['url'] else: media_url = export['media'][0]['src'] response = requests.get(media_url, stream=True) response.csv_reader = functools.partial( csv.reader, response.iter_lines(decode_unicode=True), ) response.csv_dictreader = functools.partial( csv.DictReader, response.iter_lines(decode_unicode=True), ) return response
0.001216
def genInterval(self, month=(), day=(), week=(), weekday=(), hour=(), minute=()): '''Generate list of config dictionarie(s) that represent a interval of time. Used to be passed into add() or remove(). For example:: genInterval(month=(1,4), week(1,4)) # generate list contains from first to third week in from January to March Args: month (tuple): (start, end) month in a year, from 1 to 12 week (tuple): (start, end) week in a month, from 1 to 4 day (tuple): (start, end) day in a month, from 1 to 31 weekday (tuple): (start, end) weekday in a week, from 0 to 7. 0 and 7 both represent Sunday hour (tuple): (start, end) hour in a day, from 0 to 24 minute (tuple): (start, end) minute in an hour, from 0 to 59 Returns: list: a list of dictionarie(s) with form [{'Day':12, 'Month':3}, {}, etc] ''' dic = { 'Month': month, 'Day': day, 'Week': week, 'Weekday': weekday, 'Day': day, 'Hour': hour, 'Minute': minute } dic = {k: v for k, v in dic.items() if v != ()} # e.g. dic: {'month': (1,5), 'day': (2,4)} grandList = [] for k in dic: # e.g. k: 'month', dic[k]: (1,5) l = [] # rangeTuple = (dic[k][0], dic[k][1] + 1) # e.g. (1,6) rangeTuple = dic[k] for num in range(rangeTuple[0], rangeTuple[1]): # e.g. 1, 2, 3, 4, 5 l.append({k: num}) # e.g. [{'month': 1}, {'month': 2}] grandList.append(l) # e.g. [[list of month], [list of day]] print(grandList) # grandList: [[list of month], [list of day]] # l: [[a,a1,a2,...], [b,b1,b2,...]] # combineDict return: [{a,b}, {a,b1}, {a,b2}, {a1,b}, {a1,b1}, {a1, b2}, {a2,b}, {a2,b1}, {a2,b2}] return crossCombine(grandList)
0.006598
def _bool_method_SERIES(cls, op, special): """ Wrapper function for Series arithmetic operations, to avoid code duplication. """ op_name = _get_op_name(op, special) def na_op(x, y): try: result = op(x, y) except TypeError: assert not isinstance(y, (list, ABCSeries, ABCIndexClass)) if isinstance(y, np.ndarray): # bool-bool dtype operations should be OK, should not get here assert not (is_bool_dtype(x) and is_bool_dtype(y)) x = ensure_object(x) y = ensure_object(y) result = libops.vec_binop(x, y, op) else: # let null fall thru assert lib.is_scalar(y) if not isna(y): y = bool(y) try: result = libops.scalar_binop(x, y, op) except (TypeError, ValueError, AttributeError, OverflowError, NotImplementedError): raise TypeError("cannot compare a dtyped [{dtype}] array " "with a scalar of type [{typ}]" .format(dtype=x.dtype, typ=type(y).__name__)) return result fill_int = lambda x: x.fillna(0) fill_bool = lambda x: x.fillna(False).astype(bool) def wrapper(self, other): is_self_int_dtype = is_integer_dtype(self.dtype) self, other = _align_method_SERIES(self, other, align_asobject=True) res_name = get_op_result_name(self, other) if isinstance(other, ABCDataFrame): # Defer to DataFrame implementation; fail early return NotImplemented elif isinstance(other, (ABCSeries, ABCIndexClass)): is_other_int_dtype = is_integer_dtype(other.dtype) other = fill_int(other) if is_other_int_dtype else fill_bool(other) ovalues = other.values finalizer = lambda x: x else: # scalars, list, tuple, np.array is_other_int_dtype = is_integer_dtype(np.asarray(other)) if is_list_like(other) and not isinstance(other, np.ndarray): # TODO: Can we do this before the is_integer_dtype check? # could the is_integer_dtype check be checking the wrong # thing? e.g. other = [[0, 1], [2, 3], [4, 5]]? other = construct_1d_object_array_from_listlike(other) ovalues = other finalizer = lambda x: x.__finalize__(self) # For int vs int `^`, `|`, `&` are bitwise operators and return # integer dtypes. Otherwise these are boolean ops filler = (fill_int if is_self_int_dtype and is_other_int_dtype else fill_bool) res_values = na_op(self.values, ovalues) unfilled = self._constructor(res_values, index=self.index, name=res_name) filled = filler(unfilled) return finalizer(filled) wrapper.__name__ = op_name return wrapper
0.001593
def _example_stock_quote(quote_ctx): """ 获取批量报价,输出 股票名称,时间,当前价,开盘价,最高价,最低价,昨天收盘价,成交量,成交额,换手率,振幅,股票状态 """ stock_code_list = ["US.AAPL", "HK.00700"] # subscribe "QUOTE" ret_status, ret_data = quote_ctx.subscribe(stock_code_list, ft.SubType.QUOTE) if ret_status != ft.RET_OK: print("%s %s: %s" % (stock_code_list, "QUOTE", ret_data)) exit() ret_status, ret_data = quote_ctx.query_subscription() if ret_status != ft.RET_OK: print(ret_status) exit() print(ret_data) ret_status, ret_data = quote_ctx.get_stock_quote(stock_code_list) if ret_status != ft.RET_OK: print(ret_data) exit() quote_table = ret_data print("QUOTE_TABLE") print(quote_table)
0.00266
def _get_kwsdag(self, goids, go2obj, **kws_all): """Get keyword args for a GoSubDag.""" kws_dag = {} # Term Counts for GO Term information score tcntobj = self._get_tcntobj(goids, go2obj, **kws_all) # TermCounts or None if tcntobj is not None: kws_dag['tcntobj'] = tcntobj # GO letters specified by the user if 'go_aliases' in kws_all: fin_go_aliases = kws_all['go_aliases'] if os.path.exists(fin_go_aliases): go2letter = read_d1_letter(fin_go_aliases) if go2letter: kws_dag['go2letter'] = go2letter return kws_dag
0.004498
def is_blocked(self): """:class:`bool`: Checks if the user is blocked. .. note:: This only applies to non-bot accounts. """ r = self.relationship if r is None: return False return r.type is RelationshipType.blocked
0.006944
def simulation(self, ts_length=90, random_state=None): """ Compute a simulated sample path assuming Gaussian shocks. Parameters ---------- ts_length : scalar(int), optional(default=90) Number of periods to simulate for random_state : int or np.random.RandomState, optional Random seed (integer) or np.random.RandomState instance to set the initial state of the random number generator for reproducibility. If None, a randomly initialized RandomState is used. Returns ------- vals : array_like(float) A simulation of the model that corresponds to this class """ from scipy.signal import dlsim random_state = check_random_state(random_state) sys = self.ma_poly, self.ar_poly, 1 u = random_state.randn(ts_length, 1) * self.sigma vals = dlsim(sys, u)[1] return vals.flatten()
0.002047
def create_subscription(request): "Shows subscriptions options for a new subscriber." if request.POST: form = NewSubscriptionForm(request.POST) if form.is_valid(): unverified = form.save() body = """Please confirm your email address to subscribe to status updates from %(name)s:\n\n%(link)s""" % dict( name=conf.NAME, link=urlparse.urljoin(conf.BASE_URL, reverse('overseer:verify_subscription', args=[unverified.ident])) ) # Send verification email from_mail = conf.FROM_EMAIL if not from_mail: from_mail = 'overseer@%s' % request.get_host().split(':', 1)[0] send_mail('Confirm Subscription', body, from_mail, [unverified.email], fail_silently=True) # Show success page return respond('overseer/create_subscription_complete.html', { 'subscription': unverified, }, request) else: form = NewSubscriptionForm() context = csrf(request) context.update({ 'form': form, 'service_list': Service.objects.all(), }) return respond('overseer/create_subscription.html', context, request)
0.005456
def apply(self, img, factor=0, **params): """ Args: factor (int): number of times the input will be rotated by 90 degrees. """ return np.ascontiguousarray(np.rot90(img, factor))
0.013575
def _log_board_terrain(self, terrain): """ Tiles are logged counterclockwise beginning from the top-left. See module hexgrid (https://github.com/rosshamish/hexgrid) for the tile layout. :param terrain: list of catan.board.Terrain objects """ self._logln('terrain: {0}'.format(' '.join(t.value for t in terrain)))
0.00831
def has_regex_namespace_name(self, namespace: str, name: str) -> bool: """Check that the namespace is defined as a regular expression and the name matches it.""" return self.has_regex_namespace(namespace) and self.namespace_to_pattern[namespace].match(name)
0.014652
def loadSVcols(fname, usecols=None, excludecols=None, valuefixer=None, colfixer=None, missingvalues=None, fillingvalues=None, typeinferer=None, **kwargs): """ Load a separated value text file to a list of column arrays. Basically, this function calls loadSVrecs, and transposes the string-valued row data returned by that function into a Python list of numpy arrays corresponding to columns, each a uniform Python type (int, float, str). Also uses and returns metadata including column names, formats, coloring, &c. if these items are determined during the loading process. **Parameters** **fname** : string or file object Path (or file object) corresponding to a separated variable (CSV) text file. **usecols** : sequence of non-negative integers or strings, optional Only the columns in *usecols* are loaded and processed. Columns can be described by number, with 0 being the first column; or if name metadata is present, then by name; or, if color group information is present in the file, then by color group name. Default is None, e.g. all columns are loaded. **excludecols** : sequence of non-negative integers or strings, optional Converse of **usecols**, e.g. all columns EXCEPT those listed will be loaded. **valuefixer** : callable, or list or dictionary of callables, optional These callable(s) are applied to every value in each field. The application is done after line strings are loaded and split into fields, but before any typing or missing-value imputation is done. The purpose of the **valuefixer** is to prepare column values for typing and imputation. The valuefixer callable can return a string or a python object. If `valuefixer` is a single callable, then that same callable is applied to values in all column; if it is a dictionary, then the keys can be either numbers or names and the value for the key will be applied to values in the corresponding column with that name or number; if it is a list, then the list elements must be in 1-to-1 correspondence with the loaded columns, and are applied to each respectively. **colfixer** : callable, or list or dictionary of callables, optional Same as **valuefixer**, but instead of being applied to individual values, are applied to whole columns (and must return columns or numpy arrays of identical length). Like valuefixer, colfixer callable(s) are applied before typing and missing-value imputation. **missingvalues** : string, callable returning string, or list or dictionary of strings or string-valued callable String value(s) to consider as "missing data" and to be replaced before typing is done. If specified as a callable, the callable will be applied to the column(s) to determine missing value. If specified as a dictionary, keys are expected to be numbers of names of columns, and values are individual missing values for those columns (like **valuefixer** inferface). **fillingvalues** : string, pair of strings, callable returning string, or list or dictionary of strings or string-valued callable Values to be used to replace missing data before typing is done. If specified as a single non-callable, non-tuple value, this value is used to replace all missing data. If specified as a callable, the callable is applied to the column and returns the fill value (e.g. to allow the value to depend on the column type). If specified as a pair of values, the first value acts as the missing value and the second as the value to replace with. If a dictionary or list of values, then values are applied to corresponding columns. NOTE: all the **missingvalues** and **fillingvalues** functionalities can be replicated (and generalized) using the **valuefixer** or **colfixer** parameters, by specifying function(s) which identify and replace missing values. While more limited, using **missingvalues** and **fillingvalues** interface is easier and gives better performance. **typer** : callable taking python list of strings (or other values) and returning 1-d numpy array; or list dictionary of such callables Function used to infer type and convert string lists into typed numpy arrays, if no format information has been provided. When applied at all, this function is applied after string have been loaded and split into fields. This function is expected to impute missing values as well, and will override any setting of **missingvalues** or **fillingvalues**. If a callable is passed, it is used as typer for all columns, while if a dictionary (or list) of callables is passed, they're used on corresponding columns. If needed (e.g. because formatting information hasn't been supplied) but **typer** isn't specified (at least, for a given column), the constructor defaults to using the `utils.DEFAULT_TYPEINFERER` function. **kwargs**: keyword argument dictionary of variable length Contains various parameters to be passed on to loadSVrecs, including **skiprows**, **comments**, **delimiter**, **lineterminator**, **uselines**, **metametadata**, **namesinheader**,**headerlines**, **linefixer**, **delimiter_regex**, **inflines**, **verbosity**, and various CSV module parameters like **escapechar**, **quoting**, **quotechar**, **doublequote**, **skipinitialspace**. **Returns** **columns** : list of numpy arrays List of arrays corresponding to columns of data. **metadata** : dictionary Metadata read and constructed during process of reading file. **See Also:** :func:`tabular.io.loadSV`, :func:`tabular.io.saveSV`, :func:`tabular.io.DEFAULT_TYPEINFERER` """ [records, metadata] = loadSVrecs(fname, **kwargs) lens = np.array([len(r) for r in records]) assert (lens == lens[0]).all(), 'Not all records have same number of fields' l0 = lens[0] processmetadata(metadata,items='types,formats', ncols = l0) if usecols is not None: getcols = [i if i >= 0 else l0 + i for i in usecols if isinstance(i, int)] if 'names' in metadata.keys(): names = metadata['names'] getcols += [names.index(c) for c in usecols if c in names] if 'coloring' in metadata.keys(): coloring = metadata['coloring'] for c in usecols: if c in coloring.keys(): getcols += [names.index(n) for n in coloring[c]] getcols = uniqify(getcols) else: if 'names' in metadata.keys(): names = metadata['names'] getcols = range(len(names)) else: getcols = range(l0) if excludecols is not None: dontget = [i if i >= 0 else l0 + i for i in excludecols if isinstance(i, int)] if 'names' in metadata.keys(): dontget += [names.index(c) for c in excludecols if c in names] if 'coloring' in metadata.keys(): coloring = metadata['coloring'] for c in excludecols: if c in coloring.keys(): dontget += [names.index(n) for n in coloring[c]] getcols = list(set(getcols).difference(dontget)) getcols.sort() if max(getcols) > l0: bad = [i for i in getcols if i >= l0] getcols = [i for i in getcols if i < l0] print 'Too many column names. Discarding columns,', bad metadatacolthreshold(metadata,getcols) if 'formats' in metadata.keys() or 'types' in metadata.keys(): if 'formats' in metadata.keys(): formats = metadata['formats'] else: formats = metadata['types'] formats = dict(zip(getcols,formats)) else: formats = dict([(j,None) for j in getcols]) if 'names' in metadata.keys(): names = metadata['names'] else: names = None valfix = utils.processvfd(valuefixer, numbers=getcols, names=names) colfix = utils.processvfd(colfixer, numbers=getcols, names=names) missval = utils.processvfd(missingvalues, numbers=getcols, names=names) fillval = utils.processvfd(fillingvalues, numbers=getcols, names=names) typer = utils.processvfd(typeinferer, numbers=getcols, names=names) return [[preparecol(records, j, formats[j], valfix[j], colfix[j], missval[j], fillval[j], typer[j]) for j in getcols], metadata]
0.011456
def cast_scalar_indexer(val): """ To avoid numpy DeprecationWarnings, cast float to integer where valid. Parameters ---------- val : scalar Returns ------- outval : scalar """ # assumes lib.is_scalar(val) if lib.is_float(val) and val == int(val): return int(val) return val
0.003021
def get(self, name_or_klass): """ Gets a specific panel instance. :param name_or_klass: Name or class of the panel to retrieve. :return: The specified panel instance. """ if not is_text_string(name_or_klass): name_or_klass = name_or_klass.__name__ for zone in range(4): try: panel = self._panels[zone][name_or_klass] except KeyError: pass else: return panel raise KeyError(name_or_klass)
0.00365
def newVersion(): """increments version counter in swhlab/version.py""" version=None fname='../swhlab/version.py' with open(fname) as f: raw=f.read().split("\n") for i,line in enumerate(raw): if line.startswith("__counter__"): if version is None: version = int(line.split("=")[1]) raw[i]="__counter__=%d"%(version+1) with open(fname,'w') as f: f.write("\n".join(raw)) print("upgraded from version %03d to %03d"%(version,version+1))
0.018416
def _get_backtrace(self, frames, inspect_packages=False, depth=0): ''' get a nicely formatted backtrace since -- 7-6-12 frames -- list -- the frame_tuple frames to format inpsect_packages -- boolean -- by default, this only prints code of packages that are not in the pythonN directories, that cuts out a lot of the noise, set this to True if you want a full stacktrace depth -- integer -- how deep you want the stack trace to print (ie, if you only care about the last three calls, pass in depth=3 so you only get the last 3 rows of the stack) return -- list -- each line will be a nicely formatted entry of the backtrace ''' calls = [] #count = 1 #for count, f in enumerate(frames[1:], 1): for count, f in enumerate(frames, 1): #prev_f = frames[i] #called_module = inspect.getmodule(prev_f[0]).__name__ #called_func = prev_f[3] call = self.call_class(f) s = self._get_call_summary(call, inspect_packages=inspect_packages, index=count) calls.append(s) #count += 1 if depth and (count > depth): break # reverse the order on return so most recent is on the bottom return calls[::-1]
0.011119
def get_model(app_dot_model): """ Returns Django model class corresponding to passed-in `app_dot_model` string. This is helpful for preventing circular-import errors in a Django project. Positional Arguments: ===================== - `app_dot_model`: Django's `<app_name>.<model_name>` syntax. For example, the default Django User model would be `auth.User`, where `auth` is the app and `User` is the model. """ try: app, model = app_dot_model.split('.') except ValueError: msg = (f'Passed in value \'{app_dot_model}\' was not in the format ' '`<app_name>.<model_name>`.') raise ValueError(msg) return apps.get_app_config(app).get_model(model)
0.001292
def check_authorization(self): """ Check for the presence of a basic auth Authorization header and if the credentials contained within in are valid. :return: Whether or not the credentials are valid. :rtype: bool """ try: store = self.__config.get('basic_auth') if store is None: return True auth_info = self.headers.get('Authorization') if not auth_info: return False auth_info = auth_info.split() if len(auth_info) != 2 or auth_info[0] != 'Basic': return False auth_info = base64.b64decode(auth_info[1]).decode(sys.getdefaultencoding()) username = auth_info.split(':')[0] password = ':'.join(auth_info.split(':')[1:]) password_bytes = password.encode(sys.getdefaultencoding()) if hasattr(self, 'custom_authentication'): if self.custom_authentication(username, password): self.basic_auth_user = username return True return False if not username in store: self.server.logger.warning('received invalid username: ' + username) return False password_data = store[username] if password_data['type'] == 'plain': if password == password_data['value']: self.basic_auth_user = username return True elif hashlib.new(password_data['type'], password_bytes).digest() == password_data['value']: self.basic_auth_user = username return True self.server.logger.warning('received invalid password from user: ' + username) except Exception: pass return False
0.030591
def _load_child_state_models(self, load_meta_data): """Adds models for each child state of the state :param bool load_meta_data: Whether to load the meta data of the child state """ self.states = {} # Create model for each child class child_states = self.state.states for child_state in child_states.values(): # Create hierarchy model_class = get_state_model_class_for_state(child_state) if model_class is not None: self._add_model(self.states, child_state, model_class, child_state.state_id, load_meta_data) else: logger.error("Unknown state type '{type:s}'. Cannot create model.".format(type=type(child_state)))
0.006667
def destroy_webdriver(driver): """ Destroy a driver """ # This is some very flaky code in selenium. Hence the retries # and catch-all exceptions try: retry_call(driver.close, tries=2) except Exception: pass try: driver.quit() except Exception: pass
0.003155
def edges_dump(self): """Dump the entire contents of the edges table.""" self._flush_edges() for ( graph, orig, dest, idx, branch, turn, tick, extant ) in self.sql('edges_dump'): yield ( self.unpack(graph), self.unpack(orig), self.unpack(dest), idx, branch, turn, tick, bool(extant) )
0.004132
def string_class(cls): """Define __unicode__ and __str__ methods on the given class in Python 2. The given class must define a __str__ method returning a unicode string, otherwise a TypeError is raised. Under Python 3, the class is returned as is. """ if not PY3: if '__str__' not in cls.__dict__: raise TypeError('the given class has no __str__ method') cls.__unicode__, cls.__string__ = ( cls.__str__, lambda self: self.__unicode__().encode('utf-8')) return cls
0.00188
def float2dec(ft, decimal_digits): """ Convert float (or int) to Decimal (rounding up) with the requested number of decimal digits. Arguments: ft (float, int): Number to convert decimal (int): Number of digits after decimal point Return: Decimal: Number converted to decima """ with decimal.localcontext() as ctx: ctx.rounding = decimal.ROUND_UP places = decimal.Decimal(10)**(-decimal_digits) return decimal.Decimal.from_float(float(ft)).quantize(places)
0.001876
def make_encrypted_token(self, key): """Encrypts the payload. Creates a JWE token with the header as the JWE protected header and the claims as the plaintext. See (:class:`jwcrypto.jwe.JWE`) for details on the exceptions that may be reaised. :param key: A (:class:`jwcrypto.jwk.JWK`) key. """ t = JWE(self.claims, self.header) t.add_recipient(key) self.token = t
0.004577
def sendNotification(snmpEngine, authData, transportTarget, contextData, notifyType, *varBinds, **options): """Send SNMP notification. Based on passed parameters, prepares SNMP TRAP or INFORM notification (:RFC:`1905#section-4.2.6`) and schedules its transmission by I/O framework at a later point of time. Parameters ---------- snmpEngine : :py:class:`~pysnmp.hlapi.SnmpEngine` Class instance representing SNMP engine. authData : :py:class:`~pysnmp.hlapi.CommunityData` or :py:class:`~pysnmp.hlapi.UsmUserData` Class instance representing SNMP credentials. transportTarget : :py:class:`~pysnmp.hlapi.asyncore.UdpTransportTarget` or :py:class:`~pysnmp.hlapi.asyncore.Udp6TransportTarget` Class instance representing transport type along with SNMP peer address. contextData : :py:class:`~pysnmp.hlapi.ContextData` Class instance representing SNMP ContextEngineId and ContextName values. notifyType : str Indicates type of notification to be sent. Recognized literal values are *trap* or *inform*. \*varBinds: :class:`tuple` of OID-value pairs or :py:class:`~pysnmp.smi.rfc1902.ObjectType` or :py:class:`~pysnmp.smi.rfc1902.NotificationType` One or more objects representing MIB variables to place into SNMP notification. It could be tuples of OID-values or :py:class:`~pysnmp.smi.rfc1902.ObjectType` class instances of :py:class:`~pysnmp.smi.rfc1902.NotificationType` objects. SNMP Notification PDU includes some housekeeping items that are required for SNMP to function. Agent information: * SNMPv2-MIB::sysUpTime.0 = <agent uptime> * SNMPv2-SMI::snmpTrapOID.0 = {SNMPv2-MIB::coldStart, ...} Applicable to SNMP v1 TRAP: * SNMP-COMMUNITY-MIB::snmpTrapAddress.0 = <agent-IP> * SNMP-COMMUNITY-MIB::snmpTrapCommunity.0 = <snmp-community-name> * SNMP-COMMUNITY-MIB::snmpTrapEnterprise.0 = <enterprise-OID> .. note:: Unless user passes some of these variable-bindings, `.sendNotification()` call will fill in the missing items. User variable-bindings: * SNMPv2-SMI::NOTIFICATION-TYPE * SNMPv2-SMI::OBJECT-TYPE .. note:: The :py:class:`~pysnmp.smi.rfc1902.NotificationType` object ensures properly formed SNMP notification (to comply MIB definition). If you build notification PDU out of :py:class:`~pysnmp.smi.rfc1902.ObjectType` objects or simple tuples of OID-value objects, it is your responsibility to provide well-formed notification payload. Other Parameters ---------------- \*\*options: * lookupMib: bool `lookupMib` - load MIB and resolve response MIB variables at the cost of slightly reduced performance. Default is `True`. * cbFun: callable user-supplied callable that is invoked to pass SNMP response to *INFORM* notification or error to user at a later point of time. The `cbFun` callable is never invoked for *TRAP* notifications. * cbCtx: object user-supplied object passing additional parameters to/from `cbFun` Notes ----- User-supplied `cbFun` callable must have the following call signature: * snmpEngine (:py:class:`~pysnmp.hlapi.SnmpEngine`): Class instance representing SNMP engine. * sendRequestHandle (int): Unique request identifier. Can be used for matching multiple ongoing *INFORM* notifications with received responses. * errorIndication (str): True value indicates SNMP engine error. * errorStatus (str): True value indicates SNMP PDU error. * errorIndex (int): Non-zero value refers to `varBinds[errorIndex-1]` * varBinds (tuple): A sequence of :py:class:`~pysnmp.smi.rfc1902.ObjectType` class instances representing MIB variables returned in SNMP response in exactly the same order as `varBinds` in request. * `cbCtx` : Original user-supplied object. Returns ------- sendRequestHandle : int Unique request identifier. Can be used for matching received responses with ongoing *INFORM* requests. Returns `None` for *TRAP* notifications. Raises ------ PySnmpError Or its derivative indicating that an error occurred while performing SNMP operation. Examples -------- >>> from pysnmp.hlapi.asyncore import * >>> >>> snmpEngine = SnmpEngine() >>> sendNotification( ... snmpEngine, ... CommunityData('public'), ... UdpTransportTarget(('demo.snmplabs.com', 162)), ... ContextData(), ... 'trap', ... NotificationType(ObjectIdentity('SNMPv2-MIB', 'coldStart')), ... ) >>> snmpEngine.transportDispatcher.runDispatcher() >>> """ # noinspection PyShadowingNames def __cbFun(snmpEngine, sendRequestHandle, errorIndication, errorStatus, errorIndex, varBinds, cbCtx): lookupMib, cbFun, cbCtx = cbCtx varBinds = VB_PROCESSOR.unmakeVarBinds( snmpEngine.cache, varBinds, lookupMib) return cbFun and cbFun( snmpEngine, sendRequestHandle, errorIndication, errorStatus, errorIndex, varBinds, cbCtx) notifyName = LCD.configure(snmpEngine, authData, transportTarget, notifyType, contextData.contextName) varBinds = VB_PROCESSOR.makeVarBinds(snmpEngine.cache, varBinds) return ntforg.NotificationOriginator().sendVarBinds( snmpEngine, notifyName, contextData.contextEngineId, contextData.contextName, varBinds, __cbFun, (options.get('lookupMib', True), options.get('cbFun'), options.get('cbCtx')))
0.001859
def expect_column_value_lengths_to_equal(self, column, value, mostly=None, result_format=None, include_config=False, catch_exceptions=None, meta=None ): """Expect column entries to be strings with length equal to the provided value. This expectation only works for string-type values. Invoking it on ints or floats will raise a TypeError. expect_column_values_to_be_between is a :func:`column_map_expectation <great_expectations.data_asset.dataset.Dataset.column_map_expectation>`. Args: column (str): \ The column name. value (int or None): \ The expected value for a column entry length. Keyword Args: mostly (None or a float between 0 and 1): \ Return `"success": True` if at least mostly percent of values match the expectation. \ For more detail, see :ref:`mostly`. Other Parameters: result_format (str or None): \ Which output mode to use: `BOOLEAN_ONLY`, `BASIC`, `COMPLETE`, or `SUMMARY`. For more detail, see :ref:`result_format <result_format>`. include_config (boolean): \ If True, then include the expectation config as part of the result object. \ For more detail, see :ref:`include_config`. catch_exceptions (boolean or None): \ If True, then catch exceptions and include them as part of the result object. \ For more detail, see :ref:`catch_exceptions`. meta (dict or None): \ A JSON-serializable dictionary (nesting allowed) that will be included in the output without modification. \ For more detail, see :ref:`meta`. Returns: A JSON-serializable expectation result object. Exact fields vary depending on the values passed to :ref:`result_format <result_format>` and :ref:`include_config`, :ref:`catch_exceptions`, and :ref:`meta`. See Also: expect_column_value_lengths_to_be_between """
0.007715
def reload_including_local(module): """ Reload a module. If it isn"t found, try to include the local service directory. This must be called from a thread that has acquired the import lock. :param module: the module to reload. """ try: reload(module) except ImportError: # This can happen if the module was loaded in the immediate script # directory. Add the service path and try again. if not hasattr(cherrypy.thread_data, "modulepath"): raise path = os.path.abspath(cherrypy.thread_data.modulepath) root = os.path.abspath(cherrypy.config.get("webroot")) if path not in sys.path and (path == root or path.startswith(root + os.path.sep)): oldpath = sys.path try: sys.path = [path] + sys.path reload(module) finally: sys.path = oldpath else: raise
0.002099
def action_verify_checksum(self, ids): """Inactivate users.""" try: count = 0 for file_id in ids: f = FileInstance.query.filter_by( id=uuid.UUID(file_id)).one_or_none() if f is None: raise ValueError(_("Cannot find file instance.")) verify_checksum.delay(file_id) count += 1 if count > 0: flash(_('Fixity check(s) sent to queue.'), 'success') except Exception as exc: if not self.handle_view_exception(exc): raise current_app.logger.exception(str(exc)) # pragma: no cover flash(_('Failed to run fixity checks.'), 'error')
0.002591
def digest(self): """Return final digest value. """ if self._digest is None: if self._buf: self._add_block(self._buf) self._buf = EMPTY ctx = self._blake2s(0, 1, True) for t in self._thread: ctx.update(t.digest()) self._digest = ctx.digest() return self._digest
0.005141
def store_context(context): """Persist a furious.context.Context object to the datastore by loading it into a FuriousContext ndb.Model. """ logging.debug("Attempting to store Context %s.", context.id) entity = FuriousContext.from_context(context) # TODO: Handle exceptions and retries here. marker = FuriousCompletionMarker(id=context.id) key, _ = ndb.put_multi((entity, marker)) logging.debug("Stored Context with key: %s.", key) return key
0.002058
def _cast(self, value): """ Cast the specifief value to the type of this setting. """ if self.type != 'text': value = utils.cast(self.TYPES.get(self.type)['cast'], value) return value
0.009132
def _message_to_payload(cls, message): '''Returns a Python object or a ProtocolError.''' try: return json.loads(message.decode()) except UnicodeDecodeError: message = 'messages must be encoded in UTF-8' except json.JSONDecodeError: message = 'invalid JSON' raise cls._error(cls.PARSE_ERROR, message, True, None)
0.005168
def item_sectie_adapter(obj, request): """ Adapter for rendering an object of :class: `crabpy.gateway.capakey.Sectie` to json. """ return { 'id': obj.id, 'afdeling': { 'id': obj.afdeling.id, 'naam': obj.afdeling.naam, 'gemeente': { 'id': obj.afdeling.gemeente.id, 'naam': obj.afdeling.gemeente.naam }, }, 'centroid': obj.centroid, 'bounding_box': obj.bounding_box }
0.001965
def status(self, status: int) -> None: """ 设置响应状态 """ self._status = status self._message = STATUS_CODES[status]
0.013158
def request_signed_by_signing_keys(keyjar, msreq, iss, lifetime, kid=''): """ A metadata statement signing request with 'signing_keys' signed by one of the keys in 'signing_keys'. :param keyjar: A KeyJar instance with the private signing key :param msreq: Metadata statement signing request. A MetadataStatement instance. :param iss: Issuer of the signing request also the owner of the signing keys. :return: Signed JWT where the body is the metadata statement """ try: jwks_to_keyjar(msreq['signing_keys'], iss) except KeyError: jwks = keyjar.export_jwks(issuer=iss) msreq['signing_keys'] = jwks _jwt = JWT(keyjar, iss=iss, lifetime=lifetime) return _jwt.pack(owner=iss, kid=kid, payload=msreq.to_dict())
0.003755
def t_binaryValue(t): r'[+-]?[0-9]+[bB]' # We must match [0-9], and then check the validity of the binary number. # If we match [0-1], the invalid binary number "2b" would match # 'decimalValue' 2 and 'IDENTIFIER 'b'. if re.search(r'[2-9]', t.value) is not None: msg = _format("Invalid binary number {0!A}", t.value) t.lexer.last_msg = msg t.type = 'error' # Setting error causes the value to be automatically skipped else: t.value = int(t.value[0:-1], 2) return t
0.00188
def email(self, comment, content_object, request): """ Overwritten for a better email notification. """ if not self.email_notification: return send_comment_posted(comment, request)
0.008584
def html(self) -> str: """Return string representation of this. Used in start tag of HTML representation of the Element node. """ if self._owner and self.name in self._owner._special_attr_boolean: return self.name else: value = self.value if isinstance(value, str): value = html_.escape(value) return '{name}="{value}"'.format(name=self.name, value=value)
0.004348
def gradient_factory(name): """Create gradient `Functional` for some ufuncs.""" if name == 'sin': def gradient(self): """Return the gradient operator.""" return cos(self.domain) elif name == 'cos': def gradient(self): """Return the gradient operator.""" return -sin(self.domain) elif name == 'tan': def gradient(self): """Return the gradient operator.""" return 1 + square(self.domain) * self elif name == 'sqrt': def gradient(self): """Return the gradient operator.""" return FunctionalQuotient(ConstantFunctional(self.domain, 0.5), self) elif name == 'square': def gradient(self): """Return the gradient operator.""" return ScalingFunctional(self.domain, 2.0) elif name == 'log': def gradient(self): """Return the gradient operator.""" return reciprocal(self.domain) elif name == 'exp': def gradient(self): """Return the gradient operator.""" return self elif name == 'reciprocal': def gradient(self): """Return the gradient operator.""" return FunctionalQuotient(ConstantFunctional(self.domain, -1.0), square(self.domain)) elif name == 'sinh': def gradient(self): """Return the gradient operator.""" return cosh(self.domain) elif name == 'cosh': def gradient(self): """Return the gradient operator.""" return sinh(self.domain) else: # Fallback to default gradient = Functional.gradient return gradient
0.000565
def logs_sidecars_jobs(job_uuid: str, job_name: str, log_lines: Optional[Union[str, Iterable[str]]]) -> None: """Signal handling for sidecars logs.""" handle_job_logs(job_uuid=job_uuid, job_name=job_name, log_lines=log_lines) publisher.publish_job_log( log_lines=log_lines, job_uuid=job_uuid, job_name=job_name, send_task=False )
0.00216
def main(): """Filters the document AST.""" # pylint: disable=global-statement global PANDOCVERSION global AttrTable # Get the output format and document fmt = args.fmt doc = json.loads(STDIN.read()) # Initialize pandocxnos # pylint: disable=too-many-function-args PANDOCVERSION = pandocxnos.init(args.pandocversion, doc) # Element primitives AttrTable = elt('Table', 6) # Chop up the doc meta = doc['meta'] if PANDOCVERSION >= '1.18' else doc[0]['unMeta'] blocks = doc['blocks'] if PANDOCVERSION >= '1.18' else doc[1:] # Process the metadata variables process(meta) # First pass detach_attrs_table = detach_attrs_factory(Table) insert_secnos = insert_secnos_factory(Table) delete_secnos = delete_secnos_factory(Table) altered = functools.reduce(lambda x, action: walk(x, action, fmt, meta), [attach_attrs_table, insert_secnos, process_tables, delete_secnos, detach_attrs_table], blocks) # Second pass process_refs = process_refs_factory(references.keys()) replace_refs = replace_refs_factory(references, use_cleveref_default, False, plusname if not capitalize else [name.title() for name in plusname], starname, 'table') altered = functools.reduce(lambda x, action: walk(x, action, fmt, meta), [repair_refs, process_refs, replace_refs], altered) # Insert supporting TeX if fmt in ['latex']: rawblocks = [] if has_unnumbered_tables: rawblocks += [RawBlock('tex', TEX0), RawBlock('tex', TEX1), RawBlock('tex', TEX2)] if captionname != 'Table': rawblocks += [RawBlock('tex', TEX3 % captionname)] insert_rawblocks = insert_rawblocks_factory(rawblocks) altered = functools.reduce(lambda x, action: walk(x, action, fmt, meta), [insert_rawblocks], altered) # Update the doc if PANDOCVERSION >= '1.18': doc['blocks'] = altered else: doc = doc[:1] + altered # Dump the results json.dump(doc, STDOUT) # Flush stdout STDOUT.flush()
0.000812
def main(): """ Sends an API AT command to read the lower-order address bits from an XBee Series 1 and looks for a response """ try: # Open serial port ser = serial.Serial('/dev/ttyUSB0', 9600) # Create XBee Series 1 object xbee = XBee(ser) # Send AT packet xbee.send('at', frame_id='A', command='DH') # Wait for response response = xbee.wait_read_frame() print response # Send AT packet xbee.send('at', frame_id='B', command='DL') # Wait for response response = xbee.wait_read_frame() print response # Send AT packet xbee.send('at', frame_id='C', command='MY') # Wait for response response = xbee.wait_read_frame() print response # Send AT packet xbee.send('at', frame_id='D', command='CE') # Wait for response response = xbee.wait_read_frame() print response except KeyboardInterrupt: pass finally: ser.close()
0.001889