code
stringlengths
75
104k
docstring
stringlengths
1
46.9k
def get_es_requirements(es_version): '''Get the requirements string for elasticsearch-py library Returns a suitable requirements string for the elsaticsearch-py library according to the elasticsearch version to be supported (es_version)''' # accepts version range in the form `2.x` es_version = es_version.replace('x', '0') es_version = map(int, es_version.split('.')) if es_version >= [6]: return ">=6.0.0, <7.0.0" elif es_version >= [5]: return ">=5.0.0, <6.0.0" elif es_version >= [2]: return ">=2.0.0, <3.0.0" elif es_version >= [1]: return ">=1.0.0, <2.0.0" else: return "<1.0.0"
Get the requirements string for elasticsearch-py library Returns a suitable requirements string for the elsaticsearch-py library according to the elasticsearch version to be supported (es_version)
def help_center_article_subscriptions(self, article_id, locale=None, **kwargs): "https://developer.zendesk.com/rest_api/docs/help_center/subscriptions#list-article-subscriptions" api_path = "/api/v2/help_center/articles/{article_id}/subscriptions.json" api_path = api_path.format(article_id=article_id) if locale: api_opt_path = "/api/v2/help_center/{locale}/articles/{article_id}/subscriptions.json" api_path = api_opt_path.format(article_id=article_id, locale=locale) return self.call(api_path, **kwargs)
https://developer.zendesk.com/rest_api/docs/help_center/subscriptions#list-article-subscriptions
def on_create_view(self): """ Trigger the click """ d = self.declaration changed = not d.condition if changed: d.condition = True view = self.get_view() if changed: self.ready.set_result(True) return view
Trigger the click
def required_fields(self): """The normal required fields (eg, no magic fields like _id are included)""" return {f:v for f, v in self.normal_fields.items() if v.required}
The normal required fields (eg, no magic fields like _id are included)
def updateVocalAuto(self, component, files): """Updates the auto-parameter with selected *component* to have *files*. Adds auto-parameter if not already present. The auto-parameter is expected to have only one selected component (the one given). If length of files < 1, removes the auto-parameter from the model. :param component: Component that the auto-parameter is modifying :type component: :class:`AbstractStimulusComponent<sparkle.stim.abstract_component.AbstractStimulusComponent>` :param files: list of file names to act as the auto-parameter list :type files: list<str> """ auto_model = self.model().autoParams() row = auto_model.fileParameter(component) if len(files) > 1: clean_component = self.model().data(self.model().indexByComponent(component), AbstractDragView.DragRole) p = {'parameter' : 'filename', 'names' : files, 'selection' : [clean_component] } if row is None: auto_model.insertItem(auto_model.index(0,0), p) else: auto_model.setData(auto_model.index(row,0),p) elif row is not None: # remove the autoparameter auto_model.removeRow(row) # if row is none and len(files) == 1 then we don't need to do anything self.countChanged.emit()
Updates the auto-parameter with selected *component* to have *files*. Adds auto-parameter if not already present. The auto-parameter is expected to have only one selected component (the one given). If length of files < 1, removes the auto-parameter from the model. :param component: Component that the auto-parameter is modifying :type component: :class:`AbstractStimulusComponent<sparkle.stim.abstract_component.AbstractStimulusComponent>` :param files: list of file names to act as the auto-parameter list :type files: list<str>
def parse_annotation(code): """Parse an annotation string. Return an AST Expr node. code: annotation string (excluding '@') """ module = ast.parse(code) assert type(module) is ast.Module, 'internal error #1' assert len(module.body) == 1, 'Annotation contains more than one expression' assert type(module.body[0]) is ast.Expr, 'Annotation is not expression' return module.body[0]
Parse an annotation string. Return an AST Expr node. code: annotation string (excluding '@')
def value_validate(self, value): """ Converts the input single value into the expected Python data type, raising django.core.exceptions.ValidationError if the data can't be converted. Returns the converted value. Subclasses should override this. """ if not isinstance(value, datetime.datetime): raise tldap.exceptions.ValidationError("is invalid date time")
Converts the input single value into the expected Python data type, raising django.core.exceptions.ValidationError if the data can't be converted. Returns the converted value. Subclasses should override this.
def _write( df, filename=None, schema='newick', taxon_col='uid', taxon_annotations=[], node_col='uid', node_annotations=[], branch_lengths=True, **kwargs ): """Write a phylopandas tree DataFrame to various formats. Parameters ---------- df : DataFrame DataFrame containing tree data. filename : str filepath to write out tree. If None, will return string. schema : str tree format to write out. taxon_col : str (optional) Column in dataframe to label the taxon. If None, the index will be used. taxon_annotations : str List of columns to annotation in the tree taxon. node_col : str (optional) Column in dataframe to label the nodes. If None, the index will be used. node_annotations : str List of columns to annotation in the node taxon. branch_lengths : bool If True, inclues branch lengths. """ tree = _pandas_df_to_dendropy_tree( df, taxon_col=taxon_col, taxon_annotations=taxon_annotations, node_col=node_col, node_annotations=node_annotations, branch_lengths=branch_lengths, ) # Write out format print(schema) if filename is not None: tree.write(path=filename, schema=schema, suppress_annotations=False, **kwargs) else: return tree.as_string(schema=schema)
Write a phylopandas tree DataFrame to various formats. Parameters ---------- df : DataFrame DataFrame containing tree data. filename : str filepath to write out tree. If None, will return string. schema : str tree format to write out. taxon_col : str (optional) Column in dataframe to label the taxon. If None, the index will be used. taxon_annotations : str List of columns to annotation in the tree taxon. node_col : str (optional) Column in dataframe to label the nodes. If None, the index will be used. node_annotations : str List of columns to annotation in the node taxon. branch_lengths : bool If True, inclues branch lengths.
def remove(self, items, working_tree=False, **kwargs): """Remove the given items from the index and optionally from the working tree as well. :param items: Multiple types of items are supported which may be be freely mixed. - path string Remove the given path at all stages. If it is a directory, you must specify the r=True keyword argument to remove all file entries below it. If absolute paths are given, they will be converted to a path relative to the git repository directory containing the working tree The path string may include globs, such as *.c. - Blob Object Only the path portion is used in this case. - BaseIndexEntry or compatible type The only relevant information here Yis the path. The stage is ignored. :param working_tree: If True, the entry will also be removed from the working tree, physically removing the respective file. This may fail if there are uncommitted changes in it. :param kwargs: Additional keyword arguments to be passed to git-rm, such as 'r' to allow recursive removal of :return: List(path_string, ...) list of repository relative paths that have been removed effectively. This is interesting to know in case you have provided a directory or globs. Paths are relative to the repository. """ args = [] if not working_tree: args.append("--cached") args.append("--") # preprocess paths paths = self._items_to_rela_paths(items) removed_paths = self.repo.git.rm(args, paths, **kwargs).splitlines() # process output to gain proper paths # rm 'path' return [p[4:-1] for p in removed_paths]
Remove the given items from the index and optionally from the working tree as well. :param items: Multiple types of items are supported which may be be freely mixed. - path string Remove the given path at all stages. If it is a directory, you must specify the r=True keyword argument to remove all file entries below it. If absolute paths are given, they will be converted to a path relative to the git repository directory containing the working tree The path string may include globs, such as *.c. - Blob Object Only the path portion is used in this case. - BaseIndexEntry or compatible type The only relevant information here Yis the path. The stage is ignored. :param working_tree: If True, the entry will also be removed from the working tree, physically removing the respective file. This may fail if there are uncommitted changes in it. :param kwargs: Additional keyword arguments to be passed to git-rm, such as 'r' to allow recursive removal of :return: List(path_string, ...) list of repository relative paths that have been removed effectively. This is interesting to know in case you have provided a directory or globs. Paths are relative to the repository.
def _compute_total_chunks(self, chunk_size): # type: (Descriptor, int) -> int """Compute total number of chunks for entity :param Descriptor self: this :param int chunk_size: chunk size :rtype: int :return: num chunks """ try: if self._src_block_list is not None: blen = len(self._src_block_list) if blen > 0: return blen else: return 1 else: return int(math.ceil(self._src_ase.size / chunk_size)) except ZeroDivisionError: return 1
Compute total number of chunks for entity :param Descriptor self: this :param int chunk_size: chunk size :rtype: int :return: num chunks
def persist(self): """ Banana banana """ if self.app.dry: return for proj in self.subprojects.values(): proj.persist()
Banana banana
def locate(cls): """Locates the active PEX bootstrap. :rtype: :class:`Bootstrap` """ if cls._INSTANCE is None: bootstrap_path = __file__ module_import_path = __name__.split('.') # For example, our __file__ might be requests.pex/.bootstrap/pex/bootstrap.pyc and our import # path pex.bootstrap; so we walk back through all the module components of our import path to # find the base sys.path entry where we were found (requests.pex/.bootstrap in this example). for _ in module_import_path: bootstrap_path = os.path.dirname(bootstrap_path) cls._INSTANCE = cls(sys_path_entry=bootstrap_path) return cls._INSTANCE
Locates the active PEX bootstrap. :rtype: :class:`Bootstrap`
def auto_track_url(track): """ Automatically sets the bigDataUrl for `track`. Requirements: * the track must be fully connected, such that its root is a Hub object * the root Hub object must have the Hub.url attribute set * the track must have the `source` attribute set """ hub = track.root(cls=Hub) if hub is None: raise ValueError( "track is not fully connected because the root is %s" % repr(hub)) if hub.url is None: raise ValueError("hub.url is not set") if track.source is None: raise ValueError("track.source is not set")
Automatically sets the bigDataUrl for `track`. Requirements: * the track must be fully connected, such that its root is a Hub object * the root Hub object must have the Hub.url attribute set * the track must have the `source` attribute set
def btc_tx_witness_strip( tx_serialized ): """ Strip the witness information from a serialized transaction """ if not btc_tx_is_segwit(tx_serialized): # already strippped return tx_serialized tx = btc_tx_deserialize(tx_serialized) for inp in tx['ins']: del inp['witness_script'] tx_stripped = btc_tx_serialize(tx) return tx_stripped
Strip the witness information from a serialized transaction
def delete(self, *args, **kwargs): """ This method implements retries for object deletion. """ count = 0 max_retries=3 while True: try: return super(BaseModel, self).delete(*args, **kwargs) except django.db.utils.OperationalError: if count >= max_retries: raise count += 1
This method implements retries for object deletion.
def unquoted(self): """ Return *key* with one level of double quotes removed. Redshift stores some identifiers without quotes in internal tables, even though the name must be quoted elsewhere. In particular, this happens for tables named as a keyword. """ key = str(self) if key.startswith('"') and key.endswith('"'): return key[1:-1] return key
Return *key* with one level of double quotes removed. Redshift stores some identifiers without quotes in internal tables, even though the name must be quoted elsewhere. In particular, this happens for tables named as a keyword.
def mutate(self,p_i,func_set,term_set): #, max_depth=2 """point mutation, addition, removal""" self.point_mutate(p_i,func_set,term_set)
point mutation, addition, removal
def createGroups(self, configFiles, dateTimeFormat=None): """Parses a JSON configuration file to create groups. Args: configFiles (list): A list of JSON files on disk containing configuration data for creating groups. dateTimeFormat (str): A valid date formatting directive, as understood by :py:meth:`datetime.datetime.strftime`. Defaults to ``None``, i.e., ``'%Y-%m-%d %H:%M'``. """ groupInfo = None groupFile = None iconPath = None startTime = None thumbnail = None result = None config = None sciptPath = None orgTools = None if dateTimeFormat is None: dateTimeFormat = '%Y-%m-%d %H:%M' scriptStartTime = datetime.datetime.now() try: print ("********************Create Groups********************") print ("Script started at %s" % scriptStartTime.strftime(dateTimeFormat)) if self.securityhandler is None or \ self.securityhandler.valid == False: print ("Login required") else: orgTools = orgtools.orgtools(securityinfo=self) if orgTools is None: print ("Error creating orgtools") else: for configFile in configFiles: config = common.init_config_json(config_file=configFile) if config is not None: startTime = datetime.datetime.now() print ("Processing config %s, starting at: %s" % (configFile,startTime.strftime(dateTimeFormat))) groupInfo = config['Groups'] groupFile = groupInfo['GroupInfo'] iconPath = groupInfo['IconPath'] if os.path.isfile(groupFile): with open(groupFile, 'rb') as csvfile: for row in csv.DictReader(csvfile,dialect='excel'): if os.path.isfile(os.path.join(iconPath,row['thumbnail'])): thumbnail = os.path.join(iconPath,row['thumbnail']) if not os.path.isabs(thumbnail): sciptPath = os.getcwd() thumbnail = os.path.join(sciptPath,thumbnail) result = orgTools.createGroup(title=row['title'],description=row['description'],tags=row['tags'],snippet=row['snippet'],phone=row['phone'],access=row['access'],sortField=row['sortField'],sortOrder=row['sortOrder'], \ isViewOnly=row['isViewOnly'],isInvitationOnly=row['isInvitationOnly'],thumbnail=thumbnail) else: result = orgTools.createGroup(title=row['title'],description=row['description'],tags=row['tags'],snippet=row['snippet'],phone=row['phone'],access=row['access'],sortField=row['sortField'],sortOrder=row['sortOrder'], \ isViewOnly=row['isViewOnly'],isInvitationOnly=row['isInvitationOnly']) if result is None: pass else: print ("Group created: " + result.title) print ("Config %s completed, time to complete: %s" % (configFile, str(datetime.datetime.now() - startTime))) else: print ("Config %s not found" % configFile) except(TypeError,ValueError,AttributeError) as e: print (e) except (common.ArcRestHelperError) as e: print ("error in function: %s" % e[0]['function']) print ("error on line: %s" % e[0]['line']) print ("error in file name: %s" % e[0]['filename']) print ("with error message: %s" % e[0]['synerror']) if 'arcpyError' in e[0]: print ("with arcpy message: %s" % e[0]['arcpyError']) except Exception as e: if (reportToolsInstalled): if isinstance(e,(ReportTools.ReportToolsError,DataPrep.DataPrepError)): print ("error in function: %s" % e[0]['function']) print ("error on line: %s" % e[0]['line']) print ("error in file name: %s" % e[0]['filename']) print ("with error message: %s" % e[0]['synerror']) if 'arcpyError' in e[0]: print ("with arcpy message: %s" % e[0]['arcpyError']) else: line, filename, synerror = trace() print ("error on line: %s" % line) print ("error in file name: %s" % filename) print ("with error message: %s" % synerror) else: line, filename, synerror = trace() print ("error on line: %s" % line) print ("error in file name: %s" % filename) print ("with error message: %s" % synerror) finally: print ("Script complete, time to complete: %s" % str(datetime.datetime.now() - scriptStartTime)) print ("###############Create Groups Completed#################") print ("") #if orgTools is not None: #orgTools.dispose() groupInfo = None groupFile = None iconPath = None startTime = None thumbnail = None result = None config = None sciptPath = None orgTools = None del groupInfo del groupFile del iconPath del startTime del thumbnail del result del config del sciptPath del orgTools gc.collect()
Parses a JSON configuration file to create groups. Args: configFiles (list): A list of JSON files on disk containing configuration data for creating groups. dateTimeFormat (str): A valid date formatting directive, as understood by :py:meth:`datetime.datetime.strftime`. Defaults to ``None``, i.e., ``'%Y-%m-%d %H:%M'``.
def get_char_weights(doc_weighted_spans, preserve_density=None): # type: (DocWeightedSpans, Optional[bool]) -> np.ndarray """ Return character weights for a text document with highlighted features. If preserve_density is True, then color for longer fragments will be less intensive than for shorter fragments, so that "sum" of intensities will correspond to feature weight. If preserve_density is None, then it's value is taken from the corresponding attribute of doc_weighted_spans. """ if preserve_density is None: preserve_density = doc_weighted_spans.preserve_density char_weights = np.zeros(len(doc_weighted_spans.document)) feature_counts = Counter(f for f, _, __ in doc_weighted_spans.spans) for feature, spans, weight in doc_weighted_spans.spans: for start, end in spans: # start can be -1 for char_wb at the start of the document. start = max(0, start) if preserve_density: weight /= (end - start) weight /= feature_counts[feature] char_weights[start:end] += weight return char_weights
Return character weights for a text document with highlighted features. If preserve_density is True, then color for longer fragments will be less intensive than for shorter fragments, so that "sum" of intensities will correspond to feature weight. If preserve_density is None, then it's value is taken from the corresponding attribute of doc_weighted_spans.
def interactive(plugin): '''A run mode for the CLI that runs the plugin in a loop based on user input. ''' items = [item for item in once(plugin) if not item.get_played()] parent_stack = [] # Keep track of parents so we can have a '..' option selected_item = get_user_choice(items) while selected_item is not None: if parent_stack and selected_item == parent_stack[-1]: # User selected the parent item, remove from list parent_stack.pop() else: # User selected non parent item, add current url to parent stack parent_stack.append(ListItem.from_dict(label='..', path=plugin.request.url)) patch_plugin(plugin, selected_item.get_path()) items = [item for item in once(plugin, parent_stack=parent_stack) if not item.get_played()] selected_item = get_user_choice(items)
A run mode for the CLI that runs the plugin in a loop based on user input.
def get_kubernetes_configuration(self, mount_point='kubernetes'): """GET /auth/<mount_point>/config :param mount_point: The "path" the k8s auth backend was mounted on. Vault currently defaults to "kubernetes". :type mount_point: str. :return: Parsed JSON response from the config GET request :rtype: dict. """ url = '/v1/auth/{0}/config'.format(mount_point) return self._adapter.get(url).json()
GET /auth/<mount_point>/config :param mount_point: The "path" the k8s auth backend was mounted on. Vault currently defaults to "kubernetes". :type mount_point: str. :return: Parsed JSON response from the config GET request :rtype: dict.
def setQuickColor( self, color ): """ Sets the quick color for the palette to the given color. :param color | <QColor> """ colorset = XPaletteColorSet() colorset.setPalette(QPalette(color)) self.setColorSet(colorset)
Sets the quick color for the palette to the given color. :param color | <QColor>
def batch_write_input(structures, vasp_input_set=MPRelaxSet, output_dir=".", make_dir_if_not_present=True, subfolder=None, sanitize=False, include_cif=False, **kwargs): """ Batch write vasp input for a sequence of structures to output_dir, following the format output_dir/{group}/{formula}_{number}. Args: structures ([Structure]): Sequence of Structures. vasp_input_set (VaspInputSet): VaspInputSet class that creates vasp input files from structures. Note that a class should be supplied. Defaults to MPRelaxSet. output_dir (str): Directory to output files. Defaults to current directory ".". make_dir_if_not_present (bool): Create the directory if not present. Defaults to True. subfolder (callable): Function to create subdirectory name from structure. Defaults to simply "formula_count". sanitize (bool): Boolean indicating whether to sanitize the structure before writing the VASP input files. Sanitized output are generally easier for viewing and certain forms of analysis. Defaults to False. include_cif (bool): Whether to output a CIF as well. CIF files are generally better supported in visualization programs. \\*\\*kwargs: Additional kwargs are passed to the vasp_input_set class in addition to structure. """ output_dir = Path(output_dir) for i, s in enumerate(structures): formula = re.sub(r'\s+', "", s.formula) if subfolder is not None: subdir = subfolder(s) d = output_dir / subdir else: d = output_dir / '{}_{}'.format(formula, i) if sanitize: s = s.copy(sanitize=True) v = vasp_input_set(s, **kwargs) v.write_input(str(d), make_dir_if_not_present=make_dir_if_not_present, include_cif=include_cif)
Batch write vasp input for a sequence of structures to output_dir, following the format output_dir/{group}/{formula}_{number}. Args: structures ([Structure]): Sequence of Structures. vasp_input_set (VaspInputSet): VaspInputSet class that creates vasp input files from structures. Note that a class should be supplied. Defaults to MPRelaxSet. output_dir (str): Directory to output files. Defaults to current directory ".". make_dir_if_not_present (bool): Create the directory if not present. Defaults to True. subfolder (callable): Function to create subdirectory name from structure. Defaults to simply "formula_count". sanitize (bool): Boolean indicating whether to sanitize the structure before writing the VASP input files. Sanitized output are generally easier for viewing and certain forms of analysis. Defaults to False. include_cif (bool): Whether to output a CIF as well. CIF files are generally better supported in visualization programs. \\*\\*kwargs: Additional kwargs are passed to the vasp_input_set class in addition to structure.
def parse_config(self, device=None, profile=None, native=None, attrs=None): """ Parse native configuration and load it into the corresponding models. Only models that have been added to the root object will be parsed. If ``native`` is passed to the method that's what we will parse, otherwise, we will use the ``device`` to retrieve it. Args: device (NetworkDriver): Device to load the configuration from. profile (list): Profiles that the device supports. If no ``profile`` is passed it will be read from ``device``. native (list of strings): Native configuration to parse. Examples: >>> # Load from device >>> running_config = napalm_yang.base.Root() >>> running_config.add_model(napalm_yang.models.openconfig_interfaces) >>> running_config.parse_config(device=d) >>> # Load from file >>> with open("junos.config", "r") as f: >>> config = f.read() >>> >>> running_config = napalm_yang.base.Root() >>> running_config.add_model(napalm_yang.models.openconfig_interfaces) >>> running_config.parse_config(native=[config], profile="junos") """ if attrs is None: attrs = self.elements().values() for v in attrs: parser = Parser( v, device=device, profile=profile, native=native, is_config=True ) parser.parse()
Parse native configuration and load it into the corresponding models. Only models that have been added to the root object will be parsed. If ``native`` is passed to the method that's what we will parse, otherwise, we will use the ``device`` to retrieve it. Args: device (NetworkDriver): Device to load the configuration from. profile (list): Profiles that the device supports. If no ``profile`` is passed it will be read from ``device``. native (list of strings): Native configuration to parse. Examples: >>> # Load from device >>> running_config = napalm_yang.base.Root() >>> running_config.add_model(napalm_yang.models.openconfig_interfaces) >>> running_config.parse_config(device=d) >>> # Load from file >>> with open("junos.config", "r") as f: >>> config = f.read() >>> >>> running_config = napalm_yang.base.Root() >>> running_config.add_model(napalm_yang.models.openconfig_interfaces) >>> running_config.parse_config(native=[config], profile="junos")
def read_node(self, name, **kwargs): # noqa: E501 """read_node # noqa: E501 read the specified Node # noqa: E501 This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.read_node(name, async_req=True) >>> result = thread.get() :param async_req bool :param str name: name of the Node (required) :param str pretty: If 'true', then the output is pretty printed. :param bool exact: Should the export be exact. Exact export maintains cluster-specific fields like 'Namespace'. :param bool export: Should this value be exported. Export strips fields that a user can not specify. :return: V1Node If the method is called asynchronously, returns the request thread. """ kwargs['_return_http_data_only'] = True if kwargs.get('async_req'): return self.read_node_with_http_info(name, **kwargs) # noqa: E501 else: (data) = self.read_node_with_http_info(name, **kwargs) # noqa: E501 return data
read_node # noqa: E501 read the specified Node # noqa: E501 This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.read_node(name, async_req=True) >>> result = thread.get() :param async_req bool :param str name: name of the Node (required) :param str pretty: If 'true', then the output is pretty printed. :param bool exact: Should the export be exact. Exact export maintains cluster-specific fields like 'Namespace'. :param bool export: Should this value be exported. Export strips fields that a user can not specify. :return: V1Node If the method is called asynchronously, returns the request thread.
def list_database_names(self, session=None): """Get a list of the names of all databases on the connected server. :Parameters: - `session` (optional): a :class:`~pymongo.client_session.ClientSession`. .. versionadded:: 3.6 """ return [doc["name"] for doc in self.list_databases(session, nameOnly=True)]
Get a list of the names of all databases on the connected server. :Parameters: - `session` (optional): a :class:`~pymongo.client_session.ClientSession`. .. versionadded:: 3.6
def emitRecordMiddleClicked(self, item): """ Emits the record clicked signal for the given item, provided the signals are not currently blocked. :param item | <QTreeWidgetItem> """ # emit that the record has been double clicked if isinstance(item, XOrbRecordItem) and not self.signalsBlocked(): self.recordMiddleClicked.emit(item.record())
Emits the record clicked signal for the given item, provided the signals are not currently blocked. :param item | <QTreeWidgetItem>
def toggle_autojump(): """Toggles Autojump""" if not autojump_enabled(): with open(AUTOJUMP_FILE, 'w+') as ajfile: ajfile.write("enabled") else: os.remove(AUTOJUMP_FILE)
Toggles Autojump
def all(self, audience=None, page=None, per_page=None, include_totals=False, client_id=None): """Retrieves all client grants. Args: audience (str, optional): URL encoded audience of a Resource Server to filter page (int, optional): The result's page number (zero based). per_page (int, optional): The amount of entries per page. include_totals (bool, optional): True if the query summary is to be included in the result, False otherwise. client_id (string, optional): The id of a client to filter See: https://auth0.com/docs/api/management/v2#!/Client_Grants/get_client_grants """ params = { 'audience': audience, 'page': page, 'per_page': per_page, 'include_totals': str(include_totals).lower(), 'client_id': client_id, } return self.client.get(self._url(), params=params)
Retrieves all client grants. Args: audience (str, optional): URL encoded audience of a Resource Server to filter page (int, optional): The result's page number (zero based). per_page (int, optional): The amount of entries per page. include_totals (bool, optional): True if the query summary is to be included in the result, False otherwise. client_id (string, optional): The id of a client to filter See: https://auth0.com/docs/api/management/v2#!/Client_Grants/get_client_grants
def visit_attribute(self, node): """check that the accessed attribute exists to avoid too much false positives for now, we'll consider the code as correct if a single of the inferred nodes has the accessed attribute. function/method, super call and metaclasses are ignored """ for pattern in self.config.generated_members: # attribute is marked as generated, stop here if re.match(pattern, node.attrname): return if re.match(pattern, node.as_string()): return try: inferred = list(node.expr.infer()) except exceptions.InferenceError: return # list of (node, nodename) which are missing the attribute missingattr = set() non_opaque_inference_results = [ owner for owner in inferred if owner is not astroid.Uninferable and not isinstance(owner, astroid.nodes.Unknown) ] if ( len(non_opaque_inference_results) != len(inferred) and self.config.ignore_on_opaque_inference ): # There is an ambiguity in the inference. Since we can't # make sure that we won't emit a false positive, we just stop # whenever the inference returns an opaque inference object. return for owner in non_opaque_inference_results: name = getattr(owner, "name", None) if _is_owner_ignored( owner, name, self.config.ignored_classes, self.config.ignored_modules ): continue try: if not [ n for n in owner.getattr(node.attrname) if not isinstance(n.statement(), astroid.AugAssign) ]: missingattr.add((owner, name)) continue except AttributeError: # XXX method / function continue except exceptions.NotFoundError: # This can't be moved before the actual .getattr call, # because there can be more values inferred and we are # stopping after the first one which has the attribute in question. # The problem is that if the first one has the attribute, # but we continue to the next values which doesn't have the # attribute, then we'll have a false positive. # So call this only after the call has been made. if not _emit_no_member( node, owner, name, ignored_mixins=self.config.ignore_mixin_members, ignored_none=self.config.ignore_none, ): continue missingattr.add((owner, name)) continue # stop on the first found break else: # we have not found any node with the attributes, display the # message for infered nodes done = set() for owner, name in missingattr: if isinstance(owner, astroid.Instance): actual = owner._proxied else: actual = owner if actual in done: continue done.add(actual) msg, hint = self._get_nomember_msgid_hint(node, owner) self.add_message( msg, node=node, args=(owner.display_type(), name, node.attrname, hint), confidence=INFERENCE, )
check that the accessed attribute exists to avoid too much false positives for now, we'll consider the code as correct if a single of the inferred nodes has the accessed attribute. function/method, super call and metaclasses are ignored
def add_field(self, field_instance_or_string): """ Appends a field, can be a :class:`~es_fluent.fields.Field` or string. """ if isinstance(field_instance_or_string, basestring): field_instance = Field(field_instance_or_string) elif isinstance(field_instance_or_string, Field): field_instance_or_string = field_instance else: raise ValueError('Expected a basetring or Field instance') self.fields.append(field_instance) return self
Appends a field, can be a :class:`~es_fluent.fields.Field` or string.
def camelize(word): """Convert a word from lower_with_underscores to CamelCase. Args: word: The string to convert. Returns: The modified string. """ return ''.join(w[0].upper() + w[1:] for w in re.sub('[^A-Z^a-z^0-9^:]+', ' ', word).split(' '))
Convert a word from lower_with_underscores to CamelCase. Args: word: The string to convert. Returns: The modified string.
def _is_instance(type_to_check, element, condition="any", deep=False): """ ----- Brief ----- Function that verifies when "all" or "any" elements of the list "element" have the type specified in "type_to_check" input. ----------- Description ----------- In some biosignalsnotebooks functions their implementation is extremely dependent on a specific criterion, i.e., 'all' list entries should be of a specific data type. In order to ensure this functionality _is_instance function was implemented. For example, when plotting data through 'plot' function of 'visualise' module, 'all' entries of time axis and data samples lists need to be 'Numeric'. In order to this condition be checked _is_instance should be called with the following input values: _is_instance(Number, [1, 2, 3, True, ...], 'all') Sometimes is also relevant to check if at least one of list entries belongs to a data type, for cases like this, the argument "condition" should have value equal to "any". -------- Examples -------- >>> _is_instance(Number, [1, 2, 3, True], 'all') False >>> _is_instance(Number, [1, 1.2, 3, 5], 'all') True ---------- Parameters ---------- type_to_check : type element Data type (all or any elements of 'element' list must be of the type specified in the current input). element : list List where condition specified in "condition" will be checked. condition : str String with values "any" or "all" verifying when "any" or "all" element entries have the specified type. deep : bool Flag that identifies when element is in a matrix format and each of its elements should be verified iteratively. Returns ------- out : boolean Returns True when the "condition" is verified for the entries of "element" list. """ out = None # Direct check of "condition" in "element". if deep is False: if condition == "any": out = any(isinstance(el, type_to_check) for el in element) elif condition == "all": out = all(isinstance(el, type_to_check) for el in element) # Since "element" is in a matrix format, then it will be necessary to check each dimension. else: for row in range(0, len(element)): for column in range(0, len(element[row])): flag = _is_instance(type_to_check, element[column][row], "all", deep=False) if flag is False: out = flag else: out = True return out
----- Brief ----- Function that verifies when "all" or "any" elements of the list "element" have the type specified in "type_to_check" input. ----------- Description ----------- In some biosignalsnotebooks functions their implementation is extremely dependent on a specific criterion, i.e., 'all' list entries should be of a specific data type. In order to ensure this functionality _is_instance function was implemented. For example, when plotting data through 'plot' function of 'visualise' module, 'all' entries of time axis and data samples lists need to be 'Numeric'. In order to this condition be checked _is_instance should be called with the following input values: _is_instance(Number, [1, 2, 3, True, ...], 'all') Sometimes is also relevant to check if at least one of list entries belongs to a data type, for cases like this, the argument "condition" should have value equal to "any". -------- Examples -------- >>> _is_instance(Number, [1, 2, 3, True], 'all') False >>> _is_instance(Number, [1, 1.2, 3, 5], 'all') True ---------- Parameters ---------- type_to_check : type element Data type (all or any elements of 'element' list must be of the type specified in the current input). element : list List where condition specified in "condition" will be checked. condition : str String with values "any" or "all" verifying when "any" or "all" element entries have the specified type. deep : bool Flag that identifies when element is in a matrix format and each of its elements should be verified iteratively. Returns ------- out : boolean Returns True when the "condition" is verified for the entries of "element" list.
def lookup_field_class(self, field, obj=None, default=None): """ Looks up any additional class we should include when rendering this field """ css = "" # is there a class specified for this field if field in self.field_config and 'class' in self.field_config[field]: css = self.field_config[field]['class'] # if we were given a default, use that elif default: css = default return css
Looks up any additional class we should include when rendering this field
def validateOneElement(self, doc, elem): """Try to validate a single element and it's attributes, basically it does the following checks as described by the XML-1.0 recommendation: - [ VC: Element Valid ] - [ VC: Required Attribute ] Then call xmlValidateOneAttribute() for each attribute present. The ID/IDREF checkings are done separately """ if doc is None: doc__o = None else: doc__o = doc._o if elem is None: elem__o = None else: elem__o = elem._o ret = libxml2mod.xmlValidateOneElement(self._o, doc__o, elem__o) return ret
Try to validate a single element and it's attributes, basically it does the following checks as described by the XML-1.0 recommendation: - [ VC: Element Valid ] - [ VC: Required Attribute ] Then call xmlValidateOneAttribute() for each attribute present. The ID/IDREF checkings are done separately
def set_tempo(self, bpm): """Convert the bpm to a midi event and write it to the track_data.""" self.bpm = bpm self.track_data += self.set_tempo_event(self.bpm)
Convert the bpm to a midi event and write it to the track_data.
def check_pre_approval_notification(self, code): """ check a notification by its code """ response = self.get( url=self.config.PRE_APPROVAL_NOTIFICATION_URL % code) return PagSeguroPreApprovalNotificationResponse( response.content, self.config)
check a notification by its code
def _CheckStorageFile(self, storage_file_path): # pylint: disable=arguments-differ """Checks if the storage file path is valid. Args: storage_file_path (str): path of the storage file. Raises: BadConfigOption: if the storage file path is invalid. """ if os.path.exists(storage_file_path): if not os.path.isfile(storage_file_path): raise errors.BadConfigOption( 'Storage file: {0:s} already exists and is not a file.'.format( storage_file_path)) logger.warning('Appending to an already existing storage file.') dirname = os.path.dirname(storage_file_path) if not dirname: dirname = '.' # TODO: add a more thorough check to see if the storage file really is # a plaso storage file. if not os.access(dirname, os.W_OK): raise errors.BadConfigOption( 'Unable to write to storage file: {0:s}'.format(storage_file_path))
Checks if the storage file path is valid. Args: storage_file_path (str): path of the storage file. Raises: BadConfigOption: if the storage file path is invalid.
def onPublish(self, topic, payload, qos, dup, retain, msgId): ''' Callback Receiving messages from publisher ''' log.debug("msg={payload}", payload=payload)
Callback Receiving messages from publisher
def parse_stdout(self, filelike): """Parse the formulae from the content written by the script to standard out. :param filelike: filelike object of stdout :returns: an exit code in case of an error, None otherwise """ from aiida.orm import Dict formulae = {} content = filelike.read().strip() if not content: return self.exit_codes.ERROR_EMPTY_OUTPUT_FILE try: for line in content.split('\n'): datablock, formula = re.split(r'\s+', line.strip(), 1) formulae[datablock] = formula except Exception: # pylint: disable=broad-except self.logger.exception('Failed to parse formulae from the stdout file\n%s', traceback.format_exc()) return self.exit_codes.ERROR_PARSING_OUTPUT_DATA else: self.out('formulae', Dict(dict=formulae)) return
Parse the formulae from the content written by the script to standard out. :param filelike: filelike object of stdout :returns: an exit code in case of an error, None otherwise
def audit_1_15(self): """1.15 Ensure IAM policies are attached only to groups or roles (Scored)""" for policy in resources.iam.policies.all(): self.assertEqual(len(list(policy.attached_users.all())), 0, "{} has users attached to it".format(policy))
1.15 Ensure IAM policies are attached only to groups or roles (Scored)
def _integrate_plugins(): """Integrate plugins to the context""" import sys from airflow.plugins_manager import macros_modules for macros_module in macros_modules: sys.modules[macros_module.__name__] = macros_module globals()[macros_module._name] = macros_module
Integrate plugins to the context
def expect_file_line_regex_match_count_to_be_between(self, regex, expected_min_count=0, expected_max_count=None, skip=None, mostly=None, null_lines_regex=r"^\s*$", result_format=None, include_config=False, catch_exceptions=None, meta=None, _lines=None): """ Expect the number of times a regular expression appears on each line of a file to be between a maximum and minimum value. Args: regex: \ A string that can be compiled as valid regular expression to match expected_min_count (None or nonnegative integer): \ Specifies the minimum number of times regex is expected to appear on each line of the file expected_max_count (None or nonnegative integer): \ Specifies the maximum number of times regex is expected to appear on each line of the file Keyword Args: skip (None or nonnegative integer): \ Integer specifying the first lines in the file the method should skip before assessing expectations mostly (None or number between 0 and 1): \ Specifies an acceptable error for expectations. If the percentage of unexpected lines is less than mostly, the method still returns true even if all lines don't match the expectation criteria. null_lines_regex (valid regular expression or None): \ If not none, a regex to skip lines as null. Defaults to empty or whitespace-only lines. Other Parameters: result_format (str or None): \ Which output mode to use: `BOOLEAN_ONLY`, `BASIC`, `COMPLETE`, or `SUMMARY`. For more detail, see :ref:`result_format <result_format>`. include_config (boolean): \ If True, then include the expectation config as part of the result object. For more detail, see :ref:`include_config`. catch_exceptions (boolean or None): \ If True, then catch exceptions and include them as part of the result object. For more detail, see :ref:`catch_exceptions`. meta (dict or None): \ A JSON-serializable dictionary (nesting allowed) that will be included in the output without modification. For more detail, see :ref:`meta`. _lines (list): \ The lines over which to operate (provided by the file_lines_map_expectation decorator) Returns: A JSON-serializable expectation result object. Exact fields vary depending on the values passed to :ref:`result_format <result_format>` and :ref:`include_config`, :ref:`catch_exceptions`, and :ref:`meta`. """ try: comp_regex = re.compile(regex) except: raise ValueError("Must enter valid regular expression for regex") if expected_min_count != None: try: assert float(expected_min_count).is_integer() assert float(expected_min_count) >= 0 except: raise ValueError("expected_min_count must be a non-negative \ integer or None") if expected_max_count != None: try: assert float(expected_max_count).is_integer() assert float(expected_max_count) >= 0 except: raise ValueError("expected_max_count must be a non-negative \ integer or None") if expected_max_count != None and expected_min_count != None: try: assert expected_max_count >= expected_min_count except: raise ValueError("expected_max_count must be greater than or \ equal to expected_min_count") if expected_max_count != None and expected_min_count != None: truth_list = [True if(len(comp_regex.findall(line)) >= expected_min_count and \ len(comp_regex.findall(line)) <= expected_max_count) else False \ for line in _lines] elif expected_max_count != None: truth_list = [True if(len(comp_regex.findall(line)) <= expected_max_count) else False \ for line in _lines] elif expected_min_count != None: truth_list = [True if(len(comp_regex.findall(line)) >= expected_min_count) else False \ for line in _lines] else: truth_list = [True for line in _lines] return truth_list
Expect the number of times a regular expression appears on each line of a file to be between a maximum and minimum value. Args: regex: \ A string that can be compiled as valid regular expression to match expected_min_count (None or nonnegative integer): \ Specifies the minimum number of times regex is expected to appear on each line of the file expected_max_count (None or nonnegative integer): \ Specifies the maximum number of times regex is expected to appear on each line of the file Keyword Args: skip (None or nonnegative integer): \ Integer specifying the first lines in the file the method should skip before assessing expectations mostly (None or number between 0 and 1): \ Specifies an acceptable error for expectations. If the percentage of unexpected lines is less than mostly, the method still returns true even if all lines don't match the expectation criteria. null_lines_regex (valid regular expression or None): \ If not none, a regex to skip lines as null. Defaults to empty or whitespace-only lines. Other Parameters: result_format (str or None): \ Which output mode to use: `BOOLEAN_ONLY`, `BASIC`, `COMPLETE`, or `SUMMARY`. For more detail, see :ref:`result_format <result_format>`. include_config (boolean): \ If True, then include the expectation config as part of the result object. For more detail, see :ref:`include_config`. catch_exceptions (boolean or None): \ If True, then catch exceptions and include them as part of the result object. For more detail, see :ref:`catch_exceptions`. meta (dict or None): \ A JSON-serializable dictionary (nesting allowed) that will be included in the output without modification. For more detail, see :ref:`meta`. _lines (list): \ The lines over which to operate (provided by the file_lines_map_expectation decorator) Returns: A JSON-serializable expectation result object. Exact fields vary depending on the values passed to :ref:`result_format <result_format>` and :ref:`include_config`, :ref:`catch_exceptions`, and :ref:`meta`.
def addGenotype( self, genotype_id, genotype_label, genotype_type=None, genotype_description=None ): """ If a genotype_type is not supplied, we will default to 'intrinsic_genotype' :param genotype_id: :param genotype_label: :param genotype_type: :param genotype_description: :return: """ if genotype_type is None: genotype_type = self.globaltt['intrinsic_genotype'] self.model.addIndividualToGraph( genotype_id, genotype_label, genotype_type, genotype_description) return
If a genotype_type is not supplied, we will default to 'intrinsic_genotype' :param genotype_id: :param genotype_label: :param genotype_type: :param genotype_description: :return:
def viewinfo(self, postinfo): ''' View the info ''' out_json = { 'uid': postinfo.uid, 'time_update': postinfo.time_update, 'title': postinfo.title, 'cnt_html': tornado.escape.xhtml_unescape(postinfo.cnt_html), } self.write(json.dumps(out_json))
View the info
def get_random(self): """ Returns a random statement from the database. """ import random Statement = self.get_model('statement') session = self.Session() count = self.count() if count < 1: raise self.EmptyDatabaseException() random_index = random.randrange(0, count) random_statement = session.query(Statement)[random_index] statement = self.model_to_object(random_statement) session.close() return statement
Returns a random statement from the database.
def calc_el_lz_v1(self): """Calculate lake evaporation. Required control parameters: |NmbZones| |ZoneType| |TTIce| Required derived parameters: |RelZoneArea| Required fluxes sequences: |TC| |EPC| Updated state sequence: |LZ| Basic equations: :math:`\\frac{dLZ}{dt} = -EL` \n :math:`EL = \\Bigl \\lbrace { {EPC \\ | \\ TC > TTIce} \\atop {0 \\ | \\ TC \\leq TTIce} }` Examples: Six zones of the same size are initialized. The first three zones are no internal lakes, they can not exhibit any lake evaporation. Of the last three zones, which are internal lakes, only the last one evaporates water. For zones five and six, evaporation is suppressed due to an assumed ice layer, whenever the associated theshold temperature is not exceeded: >>> from hydpy.models.hland import * >>> parameterstep('1d') >>> nmbzones(6) >>> zonetype(FIELD, FOREST, GLACIER, ILAKE, ILAKE, ILAKE) >>> ttice(-1.0) >>> derived.relzonearea = 1.0/6.0 >>> fluxes.epc = 0.6 >>> fluxes.tc = 0.0, 0.0, 0.0, 0.0, -1.0, -2.0 >>> states.lz = 10.0 >>> model.calc_el_lz_v1() >>> fluxes.el el(0.0, 0.0, 0.0, 0.6, 0.0, 0.0) >>> states.lz lz(9.9) Note that internal lakes always contain water. Hence, the HydPy-H-Land model allows for negative values of the lower zone storage: >>> states.lz = 0.05 >>> model.calc_el_lz_v1() >>> fluxes.el el(0.0, 0.0, 0.0, 0.6, 0.0, 0.0) >>> states.lz lz(-0.05) """ con = self.parameters.control.fastaccess der = self.parameters.derived.fastaccess flu = self.sequences.fluxes.fastaccess sta = self.sequences.states.fastaccess for k in range(con.nmbzones): if (con.zonetype[k] == ILAKE) and (flu.tc[k] > con.ttice[k]): flu.el[k] = flu.epc[k] sta.lz -= der.relzonearea[k]*flu.el[k] else: flu.el[k] = 0.
Calculate lake evaporation. Required control parameters: |NmbZones| |ZoneType| |TTIce| Required derived parameters: |RelZoneArea| Required fluxes sequences: |TC| |EPC| Updated state sequence: |LZ| Basic equations: :math:`\\frac{dLZ}{dt} = -EL` \n :math:`EL = \\Bigl \\lbrace { {EPC \\ | \\ TC > TTIce} \\atop {0 \\ | \\ TC \\leq TTIce} }` Examples: Six zones of the same size are initialized. The first three zones are no internal lakes, they can not exhibit any lake evaporation. Of the last three zones, which are internal lakes, only the last one evaporates water. For zones five and six, evaporation is suppressed due to an assumed ice layer, whenever the associated theshold temperature is not exceeded: >>> from hydpy.models.hland import * >>> parameterstep('1d') >>> nmbzones(6) >>> zonetype(FIELD, FOREST, GLACIER, ILAKE, ILAKE, ILAKE) >>> ttice(-1.0) >>> derived.relzonearea = 1.0/6.0 >>> fluxes.epc = 0.6 >>> fluxes.tc = 0.0, 0.0, 0.0, 0.0, -1.0, -2.0 >>> states.lz = 10.0 >>> model.calc_el_lz_v1() >>> fluxes.el el(0.0, 0.0, 0.0, 0.6, 0.0, 0.0) >>> states.lz lz(9.9) Note that internal lakes always contain water. Hence, the HydPy-H-Land model allows for negative values of the lower zone storage: >>> states.lz = 0.05 >>> model.calc_el_lz_v1() >>> fluxes.el el(0.0, 0.0, 0.0, 0.6, 0.0, 0.0) >>> states.lz lz(-0.05)
def _grabix_index(data): """Create grabix index of bgzip input file. grabix does not allow specification of output file, so symlink the original file into a transactional directory. """ in_file = data["bgzip_file"] config = data["config"] grabix = config_utils.get_program("grabix", config) gbi_file = _get_grabix_index(in_file) # We always build grabix input so we can use it for counting reads and doing downsampling if not gbi_file or _is_partial_index(gbi_file): if gbi_file: utils.remove_safe(gbi_file) else: gbi_file = in_file + ".gbi" with file_transaction(data, gbi_file) as tx_gbi_file: tx_in_file = os.path.splitext(tx_gbi_file)[0] utils.symlink_plus(in_file, tx_in_file) do.run([grabix, "index", tx_in_file], "Index input with grabix: %s" % os.path.basename(in_file)) assert utils.file_exists(gbi_file) return [gbi_file]
Create grabix index of bgzip input file. grabix does not allow specification of output file, so symlink the original file into a transactional directory.
def from_proto(cls, repeated_split_infos): """Returns a new SplitDict initialized from the `repeated_split_infos`.""" split_dict = cls() for split_info_proto in repeated_split_infos: split_info = SplitInfo() split_info.CopyFrom(split_info_proto) split_dict.add(split_info) return split_dict
Returns a new SplitDict initialized from the `repeated_split_infos`.
def mark_clean(self, entity): """ Marks the given entity as CLEAN. This is done when an entity is loaded fresh from the repository or after a commit. """ state = EntityState.get_state(entity) state.status = ENTITY_STATUS.CLEAN state.is_persisted = True
Marks the given entity as CLEAN. This is done when an entity is loaded fresh from the repository or after a commit.
def get_smtp_mail(self): """ Returns the SMTP formatted email, as it may be passed to sendmail. :rtype: string :return: The SMTP formatted mail. """ header = self.get_smtp_header() body = self.get_body().replace('\n', '\r\n') return header + '\r\n' + body + '\r\n'
Returns the SMTP formatted email, as it may be passed to sendmail. :rtype: string :return: The SMTP formatted mail.
def _all_tag(self): """Return the all tag of the Glances/Docker configuration file. # By default, Glances only display running containers # Set the following key to True to display all containers all=True """ all_tag = self.get_conf_value('all') if len(all_tag) == 0: return False else: return all_tag[0].lower() == 'true'
Return the all tag of the Glances/Docker configuration file. # By default, Glances only display running containers # Set the following key to True to display all containers all=True
def nifti_copy(filename,prefix=None,gzip=True): ''' creates a ``.nii`` copy of the given dataset and returns the filename as a string''' # I know, my argument ``prefix`` clobbers the global method... but it makes my arguments look nice and clean if prefix==None: prefix = filename nifti_filename = globals()['prefix'](prefix) + ".nii" if gzip: nifti_filename += '.gz' if not os.path.exists(nifti_filename): try: subprocess.check_call(['3dAFNItoNIFTI','-prefix',nifti_filename,str(filename)]) except subprocess.CalledProcessError: nl.notify('Error: could not convert "%s" to NIFTI dset!' % filename,level=nl.level.error) return None return nifti_filename
creates a ``.nii`` copy of the given dataset and returns the filename as a string
def memory_write32(self, addr, data, zone=None): """Writes words to memory of a target system. Args: self (JLink): the ``JLink`` instance addr (int): start address to write to data (list): list of words to write zone (str): optional memory zone to access Returns: Number of words written to target. Raises: JLinkException: on memory access error. """ return self.memory_write(addr, data, zone, 32)
Writes words to memory of a target system. Args: self (JLink): the ``JLink`` instance addr (int): start address to write to data (list): list of words to write zone (str): optional memory zone to access Returns: Number of words written to target. Raises: JLinkException: on memory access error.
def clean(self): """ Check user has cookies enabled """ if self.request: if not self.request.session.test_cookie_worked(): raise forms.ValidationError("Cookies must be enabled.") return self.cleaned_data
Check user has cookies enabled
def count(self): """ Count the number of distinct results of the wrapped query. @return: an L{int} representing the number of distinct results. """ if not self.query.store.autocommit: self.query.store.checkpoint() target = ', '.join([ tableClass.storeID.getColumnName(self.query.store) for tableClass in self.query.tableClass ]) sql, args = self.query._sqlAndArgs( 'SELECT DISTINCT', target) sql = 'SELECT COUNT(*) FROM (' + sql + ')' result = self.query.store.querySQL(sql, args) assert len(result) == 1, 'more than one result: %r' % (result,) return result[0][0] or 0
Count the number of distinct results of the wrapped query. @return: an L{int} representing the number of distinct results.
def GetBatchJobHelper(self, version=sorted(_SERVICE_MAP.keys())[-1], server=None): """Returns a BatchJobHelper to work with the BatchJobService. This is a convenience method. It is functionally identical to calling BatchJobHelper(adwords_client, version). Args: [optional] version: A string identifying the AdWords version to connect to. This defaults to what is currently the latest version. This will be updated in future releases to point to what is then the latest version. server: A string identifying the webserver hosting the AdWords API. Returns: An initialized BatchJobHelper tied to this client. """ if not server: server = _DEFAULT_ENDPOINT request_builder = BatchJobHelper.GetRequestBuilder( self, version=version, server=server) response_parser = BatchJobHelper.GetResponseParser() return BatchJobHelper(request_builder, response_parser)
Returns a BatchJobHelper to work with the BatchJobService. This is a convenience method. It is functionally identical to calling BatchJobHelper(adwords_client, version). Args: [optional] version: A string identifying the AdWords version to connect to. This defaults to what is currently the latest version. This will be updated in future releases to point to what is then the latest version. server: A string identifying the webserver hosting the AdWords API. Returns: An initialized BatchJobHelper tied to this client.
def set_palette_name(self, palette_name): """If the given palette matches an existing one, shows it in the combobox """ combo = self.get_widget('palette_name') found = False log.debug("wanting palette: %r", palette_name) for i in combo.get_model(): if i[0] == palette_name: combo.set_active_iter(i.iter) found = True break if not found: combo.set_active(self.custom_palette_index)
If the given palette matches an existing one, shows it in the combobox
def _domain_differs(self, href): """ Check that a link is not on the same domain as the source URL """ target = utils.get_domain(href) if not target: return False origin = utils.get_domain(self.url) return target != origin
Check that a link is not on the same domain as the source URL
def create(max_kl, cg_iters, line_search_iters, cg_damping, entropy_coef, vf_iters, discount_factor, gae_lambda=1.0, improvement_acceptance_ratio=0.1, max_grad_norm=0.5): """ Vel factory function """ return TrpoPolicyGradient( max_kl, int(cg_iters), int(line_search_iters), cg_damping, entropy_coef, vf_iters, discount_factor=discount_factor, gae_lambda=gae_lambda, improvement_acceptance_ratio=improvement_acceptance_ratio, max_grad_norm=max_grad_norm )
Vel factory function
def pipe_xpathfetchpage(context=None, _INPUT=None, conf=None, **kwargs): """A source that fetches the content of a given website as DOM nodes or a string. Loopable. context : pipe2py.Context object _INPUT : pipeforever pipe or an iterable of items or fields conf : dict URL -- url object contain the URL to download xpath -- xpath to extract html5 -- use html5 parser? useAsString -- emit items as string? TODOS: - don't retrieve pages larger than 1.5MB - don't retrieve if page is not indexable. Yields ------ _OUTPUT : items """ conf = DotDict(conf) urls = utils.listize(conf['URL']) for item in _INPUT: for item_url in urls: url = utils.get_value(DotDict(item_url), DotDict(item), **kwargs) url = utils.get_abspath(url) f = urlopen(url) # TODO: it seems that Yahoo! converts relative links to # absolute. This needs to be done on the content but seems to # be a non-trival task python? content = unicode(f.read(), 'utf-8') if context and context.verbose: print '............Content .................' print content print '...............EOF...................' xpath = conf.get('xpath', **kwargs) html5 = conf.get('html5', **kwargs) == 'true' use_as_string = conf.get('useAsString', **kwargs) == 'true' tree = html5parser.parse(f) if html5 else html.parse(f) root = tree.getroot() items = root.xpath(xpath) if context and context.verbose: print 'XPathFetchPage: found count items:', len(items) for etree in items: i = utils.etree_to_dict(etree) if context and context.verbose: print '--------------item data --------------------' print i print '--------------EOF item data ----------------' if use_as_string: yield {'content': unicode(i)} else: yield i if item.get('forever'): # _INPUT is pipeforever and not a loop, # so we just yield our item once break
A source that fetches the content of a given website as DOM nodes or a string. Loopable. context : pipe2py.Context object _INPUT : pipeforever pipe or an iterable of items or fields conf : dict URL -- url object contain the URL to download xpath -- xpath to extract html5 -- use html5 parser? useAsString -- emit items as string? TODOS: - don't retrieve pages larger than 1.5MB - don't retrieve if page is not indexable. Yields ------ _OUTPUT : items
def _to_dict(self): """Return a json dictionary representing this model.""" _dict = {} if hasattr(self, 'location') and self.location is not None: _dict['location'] = self.location._to_dict() if hasattr(self, 'text') and self.text is not None: _dict['text'] = self.text if hasattr(self, 'section_title') and self.section_title is not None: _dict['section_title'] = self.section_title._to_dict() if hasattr(self, 'table_headers') and self.table_headers is not None: _dict['table_headers'] = [x._to_dict() for x in self.table_headers] if hasattr(self, 'row_headers') and self.row_headers is not None: _dict['row_headers'] = [x._to_dict() for x in self.row_headers] if hasattr(self, 'column_headers') and self.column_headers is not None: _dict['column_headers'] = [ x._to_dict() for x in self.column_headers ] if hasattr(self, 'key_value_pairs') and self.key_value_pairs is not None: _dict['key_value_pairs'] = [ x._to_dict() for x in self.key_value_pairs ] if hasattr(self, 'body_cells') and self.body_cells is not None: _dict['body_cells'] = [x._to_dict() for x in self.body_cells] return _dict
Return a json dictionary representing this model.
def get_parent_aligned_annotation(self, ref_id): """" Give the aligment annotation that a reference annotation belongs to directly, or indirectly through other reference annotations. :param str ref_id: Id of a reference annotation. :raises KeyError: If no annotation exists with the id or if it belongs to an alignment annotation. :returns: The alignment annotation at the end of the reference chain. """ parentTier = self.tiers[self.annotations[ref_id]] while "PARENT_REF" in parentTier[2] and len(parentTier[2]) > 0: ref_id = parentTier[1][ref_id][0] parentTier = self.tiers[self.annotations[ref_id]] return parentTier[0][ref_id]
Give the aligment annotation that a reference annotation belongs to directly, or indirectly through other reference annotations. :param str ref_id: Id of a reference annotation. :raises KeyError: If no annotation exists with the id or if it belongs to an alignment annotation. :returns: The alignment annotation at the end of the reference chain.
def add_jump(self, name, min, max, num, warp=None, var_type=float): """ An integer/float-valued enumerable with `num` items, bounded between [`min`, `max`]. Note that the right endpoint of the interval includes `max`. This is a wrapper around the add_enum. `jump` can be a float or int. """ if not isinstance(var_type, type): if var_type == 'int': var_type = int elif var_type == 'float': var_type = float else: raise ValueError('var_type (%s) is not supported. use ' '"int" or "float",' % (var_type)) min, max = map(var_type, (min, max)) num = int(num) if not warp: choices = np.linspace(min, max, num=num, dtype=var_type) elif (min >= 0) and warp == 'log': choices = np.logspace(np.log10(min), np.log10(max), num=num, dtype=var_type) elif (min <= 0)and warp == 'log': raise ValueError('variable %s: log-warping requires min > 0') else: raise ValueError('variable %s: warp=%s is not supported. use ' 'None or "log",' % (name, warp)) self.variables[name] = EnumVariable(name, choices.tolist())
An integer/float-valued enumerable with `num` items, bounded between [`min`, `max`]. Note that the right endpoint of the interval includes `max`. This is a wrapper around the add_enum. `jump` can be a float or int.
def connect(config_dir=None, optional_config_files=None, cron_cfg="cron"): """ Initialize everything for interactive use. Returns a ready-to-use RtorrentEngine object. """ from pyrocore.scripts.base import ScriptBase from pyrocore.util import load_config ScriptBase.setup(cron_cfg=cron_cfg) load_config.ConfigLoader(config_dir).load(optional_config_files or []) from pyrocore import config config.engine.open() return config.engine
Initialize everything for interactive use. Returns a ready-to-use RtorrentEngine object.
def open_fileswitcher_dlg(self): """Open file list management dialog box""" if not self.tabs.count(): return if self.fileswitcher_dlg is not None and \ self.fileswitcher_dlg.is_visible: self.fileswitcher_dlg.hide() self.fileswitcher_dlg.is_visible = False return self.fileswitcher_dlg = FileSwitcher(self, self, self.tabs, self.data, ima.icon('TextFileIcon')) self.fileswitcher_dlg.sig_goto_file.connect(self.set_stack_index) self.fileswitcher_dlg.show() self.fileswitcher_dlg.is_visible = True
Open file list management dialog box
def modify_order(self, modify_order_op, order_id, qty, price, adjust_limit=0, trd_env=TrdEnv.REAL, acc_id=0, acc_index=0): """ 详细说明见基类接口说明,但有以下不同:不支持改单。 可撤单。删除订单是本地操作。 :param modify_order_op: :param order_id: :param qty: :param price: :param adjust_limit: :param trd_env: :param acc_id: :return: """ return super(OpenHKCCTradeContext, self).modify_order(modify_order_op=modify_order_op, order_id=order_id, qty=qty, price=price, adjust_limit=adjust_limit, trd_env=trd_env, acc_id=acc_id, acc_index=acc_index)
详细说明见基类接口说明,但有以下不同:不支持改单。 可撤单。删除订单是本地操作。 :param modify_order_op: :param order_id: :param qty: :param price: :param adjust_limit: :param trd_env: :param acc_id: :return:
def my_main(context): """ The starting point for your app.""" print('starting MyApp...') if context['debug']: print('Context:') for k in context: print('Key: {}\nValue: {}'.format(k, context[k])) print('Done!') return 0
The starting point for your app.
def generate_random_string(size=6, chars=string.ascii_uppercase + string.digits): """Generate random string. :param size: Length of the returned string. Default is 6. :param chars: List of the usable characters. Default is string.ascii_uppercase + string.digits. :type size: int :type chars: str :return: The random string. :rtype: str """ return ''.join(random.choice(chars) for _ in range(size))
Generate random string. :param size: Length of the returned string. Default is 6. :param chars: List of the usable characters. Default is string.ascii_uppercase + string.digits. :type size: int :type chars: str :return: The random string. :rtype: str
def pbkdf2(seed: str or bytes, dk_len: int) -> bytes: """ Derive one key from a seed. :param seed: the secret pass phrase to generate the keys from. :param dk_len: the length in bytes of every derived key. :return: """ key = b'' index = 1 bytes_seed = str_to_bytes(seed) while len(key) < dk_len: key += Digest.sha256(b''.join([bytes_seed, index.to_bytes(4, 'big', signed=True)])) index += 1 return key[:dk_len]
Derive one key from a seed. :param seed: the secret pass phrase to generate the keys from. :param dk_len: the length in bytes of every derived key. :return:
def build_url(base_url, partial_url): """ Makes sure the URL is built properly. >>> urllib.parse.urljoin('https://test.com/1/', '2/3') https://test.com/1/2/3 >>> urllib.parse.urljoin('https://test.com/1/', '/2/3') https://test.com/2/3 >>> urllib.parse.urljoin('https://test.com/1', '2/3') https://test.com/2/3' """ if not base_url.endswith('/'): base_url += '/' if partial_url.startswith('/'): partial_url = partial_url[1:] return urlparse.urljoin(base_url, partial_url)
Makes sure the URL is built properly. >>> urllib.parse.urljoin('https://test.com/1/', '2/3') https://test.com/1/2/3 >>> urllib.parse.urljoin('https://test.com/1/', '/2/3') https://test.com/2/3 >>> urllib.parse.urljoin('https://test.com/1', '2/3') https://test.com/2/3'
def input_validate_yubikey_secret(data, name='data'): """ Input validation for YHSM_YubiKeySecret or string. """ if isinstance(data, pyhsm.aead_cmd.YHSM_YubiKeySecret): data = data.pack() return input_validate_str(data, name)
Input validation for YHSM_YubiKeySecret or string.
def _clear_surface(self, surface, rect=None): """ Clear the buffer, taking in account colorkey or alpha :return: """ clear_color = self._rgb_clear_color if self._clear_color is None else self._clear_color surface.fill(clear_color, rect)
Clear the buffer, taking in account colorkey or alpha :return:
def random_tickers( length, n_tickers, endswith=None, letters=None, slicer=itertools.islice ): """Generate a length-n_tickers list of unique random ticker symbols. Parameters ---------- length : int The length of each ticker string. n_tickers : int Number of tickers to generate. endswith : str, default None Specify the ending element(s) of each ticker (for example, 'X'). letters : sequence, default None Sequence of possible letters to choose from. If None, defaults to `string.ascii_uppercase`. Returns ------- list of str Examples -------- >>> from pyfinance import utils >>> utils.random_tickers(length=5, n_tickers=4, endswith='X') ['UZTFX', 'ROYAX', 'ZBVIX', 'IUWYX'] >>> utils.random_tickers(3, 8) ['SBW', 'GDF', 'FOG', 'PWO', 'QDH', 'MJJ', 'YZD', 'QST'] """ # The trick here is that we need uniqueness. That defeats the # purpose of using NumPy because we need to generate 1x1. # (Although the alternative is just to generate a "large # enough" duplicated sequence and prune from it.) if letters is None: letters = string.ascii_uppercase if endswith: # Only generate substrings up to `endswith` length = length - len(endswith) join = "".join def yield_ticker(rand=random.choices): if endswith: while True: yield join(rand(letters, k=length)) + endswith else: while True: yield join(rand(letters, k=length)) tickers = itertools.islice(unique_everseen(yield_ticker()), n_tickers) return list(tickers)
Generate a length-n_tickers list of unique random ticker symbols. Parameters ---------- length : int The length of each ticker string. n_tickers : int Number of tickers to generate. endswith : str, default None Specify the ending element(s) of each ticker (for example, 'X'). letters : sequence, default None Sequence of possible letters to choose from. If None, defaults to `string.ascii_uppercase`. Returns ------- list of str Examples -------- >>> from pyfinance import utils >>> utils.random_tickers(length=5, n_tickers=4, endswith='X') ['UZTFX', 'ROYAX', 'ZBVIX', 'IUWYX'] >>> utils.random_tickers(3, 8) ['SBW', 'GDF', 'FOG', 'PWO', 'QDH', 'MJJ', 'YZD', 'QST']
def radius_server_host_protocol(self, **kwargs): """Auto Generated Code """ config = ET.Element("config") radius_server = ET.SubElement(config, "radius-server", xmlns="urn:brocade.com:mgmt:brocade-aaa") host = ET.SubElement(radius_server, "host") hostname_key = ET.SubElement(host, "hostname") hostname_key.text = kwargs.pop('hostname') use_vrf_key = ET.SubElement(host, "use-vrf") use_vrf_key.text = kwargs.pop('use_vrf') protocol = ET.SubElement(host, "protocol") protocol.text = kwargs.pop('protocol') callback = kwargs.pop('callback', self._callback) return callback(config)
Auto Generated Code
def _Open(self, path_spec, mode='rb'): """Opens the file system defined by path specification. Args: path_spec (PathSpec): path specification. mode (Optional[str]): file access mode. The default is 'rb' which represents read-only binary. Raises: AccessError: if the access to open the file was denied. IOError: if the file system could not be opened. PathSpecError: if the path specification is incorrect. ValueError: if the path specification is invalid. """ if not path_spec.HasParent(): raise errors.PathSpecError( 'Unsupported path specification without parent.') file_object = resolver.Resolver.OpenFileObject( path_spec.parent, resolver_context=self._resolver_context) cpio_archive_file = cpio.CPIOArchiveFile() try: cpio_archive_file.Open(file_object) except: file_object.close() raise self._file_object = file_object self._cpio_archive_file = cpio_archive_file
Opens the file system defined by path specification. Args: path_spec (PathSpec): path specification. mode (Optional[str]): file access mode. The default is 'rb' which represents read-only binary. Raises: AccessError: if the access to open the file was denied. IOError: if the file system could not be opened. PathSpecError: if the path specification is incorrect. ValueError: if the path specification is invalid.
def _raw_open(self, flags, mode=0o777): """ Open the file pointed by this path and return a file descriptor, as os.open() does. """ return self._accessor.open(self, flags, mode)
Open the file pointed by this path and return a file descriptor, as os.open() does.
def is_cython_function(fn): """Checks if a function is compiled w/Cython.""" if hasattr(fn, "__func__"): fn = fn.__func__ # Class method, static method name = type(fn).__name__ return ( name == "method_descriptor" or name == "cython_function_or_method" or name == "builtin_function_or_method" )
Checks if a function is compiled w/Cython.
def duration(self): """Get or set the duration of the event. | Will return a timedelta object. | May be set to anything that timedelta() understands. | May be set with a dict ({"days":2, "hours":6}). | If set to a non null value, removes any already existing end time. """ if self._duration: return self._duration elif self.end: # because of the clever getter for end, this also takes care of all_day events return self.end - self.begin else: # event has neither start, nor end, nor duration return None
Get or set the duration of the event. | Will return a timedelta object. | May be set to anything that timedelta() understands. | May be set with a dict ({"days":2, "hours":6}). | If set to a non null value, removes any already existing end time.
def get_annotation_values(graph, annotation: str) -> Set[str]: """Get all values for the given annotation. :param pybel.BELGraph graph: A BEL graph :param annotation: The annotation to summarize :return: A set of all annotation values """ return set(iter_annotation_values(graph, annotation))
Get all values for the given annotation. :param pybel.BELGraph graph: A BEL graph :param annotation: The annotation to summarize :return: A set of all annotation values
def sanitize(self): ''' Check if the current settings conform to the LISP specifications and fix them where possible. ''' super(MapNotifyMessage, self).sanitize() # The first bit after the Type field in a Map-Notify message is # allocated as the "I" bit. I bit indicates that a 128 bit xTR-ID and # 64 bit site-ID field is present at the end of the Map-Notify message, # following the final Record in the Map-Notify. if not isinstance(self.xtr_id, numbers.Integral) \ or self.xtr_id < 0 or self.xtr_id >= 2 ** 128: raise ValueError('Invalid xTR-ID') # site-ID is a 64 bit field at the end of the Map-Register message, # following the xTR-ID. if not isinstance(self.site_id, numbers.Integral) \ or self.site_id < 0 or self.site_id >= 2 ** 64: raise ValueError('Invalid site-ID') # Nonce: This 8-octet Nonce field is set to 0 in Map-Register # messages. Since the Map-Register message is authenticated, the # nonce field is not currently used for any security function but # may be in the future as part of an anti-replay solution. if len(bytes(self.nonce)) != 8: raise ValueError('Invalid nonce') # Key ID: A configured ID to find the configured Message # Authentication Code (MAC) algorithm and key value used for the # authentication function. See Section 14.4 for codepoint # assignments. if self.key_id not in (KEY_ID_NONE, KEY_ID_HMAC_SHA_1_96, KEY_ID_HMAC_SHA_256_128): raise ValueError('Invalid Key ID') # Authentication Data: The message digest used from the output of the # Message Authentication Code (MAC) algorithm. The entire Map- # Register payload is authenticated with this field preset to 0. # After the MAC is computed, it is placed in this field. # Implementations of this specification MUST include support for # HMAC-SHA-1-96 [RFC2404] and support for HMAC-SHA-256-128 [RFC6234] # is RECOMMENDED. if not isinstance(self.authentication_data, bytes): raise ValueError('Invalid authentication data') # Map-Reply Record: When the M bit is set, this field is the size of a # single "Record" in the Map-Reply format. This Map-Reply record # contains the EID-to-RLOC mapping entry associated with the Source # EID. This allows the ETR which will receive this Map-Request to # cache the data if it chooses to do so. for record in self.records: if not isinstance(record, MapRegisterRecord): raise ValueError('Invalid record') record.sanitize()
Check if the current settings conform to the LISP specifications and fix them where possible.
def _fast_hit_windows(ref, est, window): '''Fast calculation of windowed hits for time events. Given two lists of event times ``ref`` and ``est``, and a tolerance window, computes a list of pairings ``(i, j)`` where ``|ref[i] - est[j]| <= window``. This is equivalent to, but more efficient than the following: >>> hit_ref, hit_est = np.where(np.abs(np.subtract.outer(ref, est)) ... <= window) Parameters ---------- ref : np.ndarray, shape=(n,) Array of reference values est : np.ndarray, shape=(m,) Array of estimated values window : float >= 0 Size of the tolerance window Returns ------- hit_ref : np.ndarray hit_est : np.ndarray indices such that ``|hit_ref[i] - hit_est[i]| <= window`` ''' ref = np.asarray(ref) est = np.asarray(est) ref_idx = np.argsort(ref) ref_sorted = ref[ref_idx] left_idx = np.searchsorted(ref_sorted, est - window, side='left') right_idx = np.searchsorted(ref_sorted, est + window, side='right') hit_ref, hit_est = [], [] for j, (start, end) in enumerate(zip(left_idx, right_idx)): hit_ref.extend(ref_idx[start:end]) hit_est.extend([j] * (end - start)) return hit_ref, hit_est
Fast calculation of windowed hits for time events. Given two lists of event times ``ref`` and ``est``, and a tolerance window, computes a list of pairings ``(i, j)`` where ``|ref[i] - est[j]| <= window``. This is equivalent to, but more efficient than the following: >>> hit_ref, hit_est = np.where(np.abs(np.subtract.outer(ref, est)) ... <= window) Parameters ---------- ref : np.ndarray, shape=(n,) Array of reference values est : np.ndarray, shape=(m,) Array of estimated values window : float >= 0 Size of the tolerance window Returns ------- hit_ref : np.ndarray hit_est : np.ndarray indices such that ``|hit_ref[i] - hit_est[i]| <= window``
def updateHistory(self, activeCells, forceOutput=False): """ Computes one cycle of the Union Pooler algorithm. Return the union SDR Parameters: ---------------------------- @param activeCells: A list that stores indices of active cells @param forceOutput: if True, a union will be created without regard to minHistory """ self._activeCellsHistory.append(activeCells) if len(self._activeCellsHistory) > self._historyLength: self._activeCellsHistory.pop(0) self._unionSDR = numpy.zeros(shape=(self._numInputs,)) if (len(self._activeCellsHistory) >= self._minHistory) or forceOutput: for i in self._activeCellsHistory: self._unionSDR[i] = 1 return self._unionSDR
Computes one cycle of the Union Pooler algorithm. Return the union SDR Parameters: ---------------------------- @param activeCells: A list that stores indices of active cells @param forceOutput: if True, a union will be created without regard to minHistory
def resurrect(self, force=False): """ Attempt to resurrect a connection from the dead pool. It will try to locate one (not all) eligible (it's timeout is over) connection to return to th live pool. :arg force: resurrect a connection even if there is none eligible (used when we have no live connections) """ # no dead connections if self.dead.empty(): return try: # retrieve a connection to check timeout, connection = self.dead.get(block=False) except Empty: # other thread has been faster and the queue is now empty return if not force and timeout > time.time(): # return it back if not eligible and not forced self.dead.put((timeout, connection)) return # either we were forced or the connection is elligible to be retried self.connections.append(connection) logger.info('Resurrecting connection %r (force=%s).', connection, force)
Attempt to resurrect a connection from the dead pool. It will try to locate one (not all) eligible (it's timeout is over) connection to return to th live pool. :arg force: resurrect a connection even if there is none eligible (used when we have no live connections)
def all(cls): '''Return all tags that are currently applied to any dataset. :returns: a list of all tags that are currently applied to any dataset :rtype: list of ckan.model.tag.Tag objects ''' # if vocab_id_or_name: # vocab = vocabulary.Vocabulary.get(vocab_id_or_name) # if vocab is None: # # The user specified an invalid vocab. # raise ckan.logic.NotFound("could not find vocabulary '%s'" # % vocab_id_or_name) # query = meta.Session.query(Tag).filter(Tag.vocabulary_id==vocab.id) # else: query = meta.Session.query(SemanticTag) query = query.distinct().join(TagSemanticTag) # query = query.filter_by(state='active') return query
Return all tags that are currently applied to any dataset. :returns: a list of all tags that are currently applied to any dataset :rtype: list of ckan.model.tag.Tag objects
def set_instrument(self, channel, instr, bank=1): """Add a program change and bank select event to the track_data.""" self.track_data += self.select_bank(channel, bank) self.track_data += self.program_change_event(channel, instr)
Add a program change and bank select event to the track_data.
def protected_resource_view(scopes=None): """ View decorator. The client accesses protected resources by presenting the access token to the resource server. https://tools.ietf.org/html/rfc6749#section-7 """ if scopes is None: scopes = [] def wrapper(view): def view_wrapper(request, *args, **kwargs): access_token = extract_access_token(request) try: try: kwargs['token'] = Token.objects.get(access_token=access_token) except Token.DoesNotExist: logger.debug('[UserInfo] Token does not exist: %s', access_token) raise BearerTokenError('invalid_token') if kwargs['token'].has_expired(): logger.debug('[UserInfo] Token has expired: %s', access_token) raise BearerTokenError('invalid_token') if not set(scopes).issubset(set(kwargs['token'].scope)): logger.debug('[UserInfo] Missing openid scope.') raise BearerTokenError('insufficient_scope') except BearerTokenError as error: response = HttpResponse(status=error.status) response['WWW-Authenticate'] = 'error="{0}", error_description="{1}"'.format( error.code, error.description) return response return view(request, *args, **kwargs) return view_wrapper return wrapper
View decorator. The client accesses protected resources by presenting the access token to the resource server. https://tools.ietf.org/html/rfc6749#section-7
def seek(self, offset, whence=SEEK_SET): """Seek pointer in lob data buffer to requested position. Might trigger further loading of data from the database if the pointer is beyond currently read data. """ # A nice trick is to (ab)use BytesIO.seek() to go to the desired position for easier calculation. # This will not add any data to the buffer however - very convenient! self.data.seek(offset, whence) new_pos = self.data.tell() missing_bytes_to_read = new_pos - self._current_lob_length if missing_bytes_to_read > 0: # Trying to seek beyond currently available LOB data, so need to load some more first. # We are smart here: (at least trying...): # If a user sets a certain file position s/he probably wants to read data from # there. So already read some extra data to avoid yet another immediate # reading step. Try with EXTRA_NUM_ITEMS_TO_READ_AFTER_SEEK additional items (bytes/chars). # jump to the end of the current buffer and read the new data: self.data.seek(0, SEEK_END) self.read(missing_bytes_to_read + self.EXTRA_NUM_ITEMS_TO_READ_AFTER_SEEK) # reposition file pointer a originally desired position: self.data.seek(new_pos) return new_pos
Seek pointer in lob data buffer to requested position. Might trigger further loading of data from the database if the pointer is beyond currently read data.
def run_failure_step_group(pipeline, context): """Run the on_failure step group if it exists. This function will swallow all errors, to prevent obfuscating the error condition that got it here to begin with. """ logger.debug("starting") try: assert pipeline # if no on_failure exists, it'll do nothing. run_step_group(pipeline_definition=pipeline, step_group_name='on_failure', context=context) except Exception as exception: logger.error("Failure handler also failed. Swallowing.") logger.error(exception) logger.debug("done")
Run the on_failure step group if it exists. This function will swallow all errors, to prevent obfuscating the error condition that got it here to begin with.
def _provision_vm(name=None, session=None): ''' Provision vm right after clone/copy ''' if session is None: session = _get_session() log.info('Provisioning VM %s', name) vm = _get_vm(name, session) task = session.xenapi.Async.VM.provision(vm) _run_async_task(task, session)
Provision vm right after clone/copy
def run(self, **kwargs): """ Drive servo to the position set in the `position_sp` attribute. """ for key in kwargs: setattr(self, key, kwargs[key]) self.command = self.COMMAND_RUN
Drive servo to the position set in the `position_sp` attribute.
def getAll(self): '''Return a dictionary with all variables''' if not bool(len(self.ATTRIBUTES)): self.load_attributes() return eval(str(self.ATTRIBUTES))
Return a dictionary with all variables
def tokenize(self, path): """Tokenizes a text file.""" assert os.path.exists(path) # Add words to the dictionary with open(path, 'r') as f: tokens = 0 for line in f: words = line.split() + ['<eos>'] tokens += len(words) for word in words: self.dictionary.add_word(word) # Tokenize file content with open(path, 'r') as f: ids = np.zeros((tokens,), dtype='int32') token = 0 for line in f: words = line.split() + ['<eos>'] for word in words: ids[token] = self.dictionary.word2idx[word] token += 1 return mx.nd.array(ids, dtype='int32')
Tokenizes a text file.
def is_all_field_none(self): """ :rtype: bool """ if self._UserLight is not None: return False if self._UserPerson is not None: return False if self._UserCompany is not None: return False if self._UserApiKey is not None: return False return True
:rtype: bool
def AgregarTambo(self, nro_tambo_interno, nro_renspa, fecha_venc_cert_tuberculosis, fecha_venc_cert_brucelosis, nro_tambo_provincial=None, **kwargs): "Agrego los datos del productor a la liq." tambo = {'nroTamboInterno': nro_tambo_interno, 'nroTamboProvincial': nro_tambo_provincial, 'nroRenspa': nro_renspa, 'ubicacionTambo': {}, 'fechaVencCertTuberculosis': fecha_venc_cert_tuberculosis, 'fechaVencCertBrucelosis': fecha_venc_cert_brucelosis} self.solicitud['tambo'] = tambo return True
Agrego los datos del productor a la liq.
def open_zip(path_or_file, *args, **kwargs): """A with-context for zip files. Passes through *args and **kwargs to zipfile.ZipFile. :API: public :param path_or_file: Full path to zip file. :param args: Any extra args accepted by `zipfile.ZipFile`. :param kwargs: Any extra keyword args accepted by `zipfile.ZipFile`. :raises: `InvalidZipPath` if path_or_file is invalid. :raises: `zipfile.BadZipfile` if zipfile.ZipFile cannot open a zip at path_or_file. :returns: `class 'contextlib.GeneratorContextManager`. """ if not path_or_file: raise InvalidZipPath('Invalid zip location: {}'.format(path_or_file)) allowZip64 = kwargs.pop('allowZip64', True) try: zf = zipfile.ZipFile(path_or_file, *args, allowZip64=allowZip64, **kwargs) except zipfile.BadZipfile as bze: # Use the realpath in order to follow symlinks back to the problem source file. raise zipfile.BadZipfile("Bad Zipfile {0}: {1}".format(os.path.realpath(path_or_file), bze)) try: yield zf finally: zf.close()
A with-context for zip files. Passes through *args and **kwargs to zipfile.ZipFile. :API: public :param path_or_file: Full path to zip file. :param args: Any extra args accepted by `zipfile.ZipFile`. :param kwargs: Any extra keyword args accepted by `zipfile.ZipFile`. :raises: `InvalidZipPath` if path_or_file is invalid. :raises: `zipfile.BadZipfile` if zipfile.ZipFile cannot open a zip at path_or_file. :returns: `class 'contextlib.GeneratorContextManager`.
def modutf7_encode(data: str) -> bytes: """Encode the string using modified UTF-7. Args: data: The input string to encode. """ ret = bytearray() is_usascii = True encode_start = None for i, symbol in enumerate(data): charpoint = ord(symbol) if is_usascii: if charpoint == 0x26: ret.extend(b'&-') elif 0x20 <= charpoint <= 0x7e: ret.append(charpoint) else: encode_start = i is_usascii = False else: if 0x20 <= charpoint <= 0x7e: to_encode = data[encode_start:i] encoded = _modified_b64encode(to_encode) ret.append(0x26) ret.extend(encoded) ret.extend((0x2d, charpoint)) is_usascii = True if not is_usascii: to_encode = data[encode_start:] encoded = _modified_b64encode(to_encode) ret.append(0x26) ret.extend(encoded) ret.append(0x2d) return bytes(ret)
Encode the string using modified UTF-7. Args: data: The input string to encode.
def GetPixelColorsHorizontally(self, x: int, y: int, count: int) -> ctypes.Array: """ x: int. y: int. count: int. Return `ctypes.Array`, an iterable array of int values in argb form point x,y horizontally. """ arrayType = ctypes.c_uint32 * count values = arrayType() _DllClient.instance().dll.BitmapGetPixelsHorizontally(ctypes.c_size_t(self._bitmap), x, y, values, count) return values
x: int. y: int. count: int. Return `ctypes.Array`, an iterable array of int values in argb form point x,y horizontally.
def ignore_whitespace_text_nodes(cls, wrapped_node): """ Find and delete any text nodes containing nothing but whitespace in in the given node and its descendents. This is useful for cleaning up excess low-value text nodes in a document DOM after parsing a pretty-printed XML document. """ for child in wrapped_node.children: if child.is_text and child.value.strip() == '': child.delete() else: cls.ignore_whitespace_text_nodes(child)
Find and delete any text nodes containing nothing but whitespace in in the given node and its descendents. This is useful for cleaning up excess low-value text nodes in a document DOM after parsing a pretty-printed XML document.
def fit_class1_pan_allele_models( self, n_models, architecture_hyperparameters, alleles, peptides, affinities, inequalities, models_dir_for_save=None, verbose=1, progress_preamble="", progress_print_interval=5.0): """ Fit one or more pan-allele predictors using a single neural network architecture. The new predictors are saved in the Class1AffinityPredictor instance and will be used on subsequent calls to `predict`. Parameters ---------- n_models : int Number of neural networks to fit architecture_hyperparameters : dict alleles : list of string Allele names (not sequences) corresponding to each peptide peptides : `EncodableSequences` or list of string affinities : list of float nM affinities inequalities : list of string, each element one of ">", "<", or "=" See Class1NeuralNetwork.fit for details. models_dir_for_save : string, optional If specified, the Class1AffinityPredictor is (incrementally) written to the given models dir after each neural network is fit. verbose : int Keras verbosity progress_preamble : string Optional string of information to include in each progress update progress_print_interval : float How often (in seconds) to print progress. Set to None to disable. Returns ------- list of `Class1NeuralNetwork` """ alleles = pandas.Series(alleles).map(mhcnames.normalize_allele_name) allele_encoding = AlleleEncoding( alleles, allele_to_fixed_length_sequence=self.allele_to_fixed_length_sequence) encodable_peptides = EncodableSequences.create(peptides) models = [] for i in range(n_models): logging.info("Training model %d / %d" % (i + 1, n_models)) model = Class1NeuralNetwork(**architecture_hyperparameters) model.fit( encodable_peptides, affinities, inequalities=inequalities, allele_encoding=allele_encoding, verbose=verbose, progress_preamble=progress_preamble, progress_print_interval=progress_print_interval) model_name = self.model_name("pan-class1", i) self.class1_pan_allele_models.append(model) row = pandas.Series(collections.OrderedDict([ ("model_name", model_name), ("allele", "pan-class1"), ("config_json", json.dumps(model.get_config())), ("model", model), ])).to_frame().T self._manifest_df = pandas.concat( [self.manifest_df, row], ignore_index=True) if models_dir_for_save: self.save( models_dir_for_save, model_names_to_write=[model_name]) models.append(model) self.clear_cache() return models
Fit one or more pan-allele predictors using a single neural network architecture. The new predictors are saved in the Class1AffinityPredictor instance and will be used on subsequent calls to `predict`. Parameters ---------- n_models : int Number of neural networks to fit architecture_hyperparameters : dict alleles : list of string Allele names (not sequences) corresponding to each peptide peptides : `EncodableSequences` or list of string affinities : list of float nM affinities inequalities : list of string, each element one of ">", "<", or "=" See Class1NeuralNetwork.fit for details. models_dir_for_save : string, optional If specified, the Class1AffinityPredictor is (incrementally) written to the given models dir after each neural network is fit. verbose : int Keras verbosity progress_preamble : string Optional string of information to include in each progress update progress_print_interval : float How often (in seconds) to print progress. Set to None to disable. Returns ------- list of `Class1NeuralNetwork`