text
stringlengths
78
104k
score
float64
0
0.18
def _installed_snpeff_genome(base_name, config): """Find the most recent installed genome for snpEff with the given name. """ snpeff_config_file = os.path.join(config_utils.get_program("snpeff", config, "dir"), "snpEff.config") if os.path.exists(snpeff_config_file): data_dir = _find_snpeff_datadir(snpeff_config_file) dbs = [d for d in sorted(glob.glob(os.path.join(data_dir, "%s*" % base_name)), reverse=True) if os.path.isdir(d)] else: data_dir = None dbs = [] if len(dbs) == 0: raise ValueError("No database found in %s for %s" % (data_dir, base_name)) else: return data_dir, os.path.split(dbs[0])[-1]
0.00545
def ToABMag(self, wave, flux, **kwargs): """Convert to ``abmag``. .. math:: \\textnormal{AB}_{\\nu} = -2.5 \\; \\log(h \\lambda \\; \\textnormal{photlam}) - 48.6 where :math:`h` is as defined in :ref:`pysynphot-constants`. Parameters ---------- wave, flux : number or array_like Wavelength and flux values to be used for conversion. kwargs : dict Extra keywords (not used). Returns ------- result : number or array_like Converted values. """ arg = H * flux * wave return -1.085736 * N.log(arg) + ABZERO
0.004539
def calculate_dependencies(): """Calculate test dependencies First do a topological sorting based on the dependencies. Then sort the different dependency groups based on priorities. """ order = [] for group in toposort(dependencies): priority_sorted_group = sorted(group, key=lambda x: (priorities[x], x)) order.extend(priority_sorted_group) return order
0.006897
def run(self, dag): """ Run one pass of cx cancellation on the circuit Args: dag (DAGCircuit): the directed acyclic graph to run on. Returns: DAGCircuit: Transformed DAG. """ cx_runs = dag.collect_runs(["cx"]) for cx_run in cx_runs: # Partition the cx_run into chunks with equal gate arguments partition = [] chunk = [] for i in range(len(cx_run) - 1): chunk.append(cx_run[i]) qargs0 = cx_run[i].qargs qargs1 = cx_run[i + 1].qargs if qargs0 != qargs1: partition.append(chunk) chunk = [] chunk.append(cx_run[-1]) partition.append(chunk) # Simplify each chunk in the partition for chunk in partition: if len(chunk) % 2 == 0: for n in chunk: dag.remove_op_node(n) else: for n in chunk[1:]: dag.remove_op_node(n) return dag
0.00177
def get_feed(self, buckets=None, since=None, results=15, start=0): """ Returns feed (news, blogs, reviews, audio, video) for the catalog artists; response depends on requested buckets Args: Kwargs: buckets (list): A list of strings specifying which feed items to retrieve results (int): An integer number of results to return start (int): An integer starting value for the result set Returns: A list of news, blogs, reviews, audio or video document dicts; Example: >>> c <catalog - my_artists> >>> c.get_feed(results=15) {u'date_found': u'2011-02-06T07:50:25', u'date_posted': u'2011-02-06T07:50:23', u'id': u'caec686c0dff361e4c53dceb58fb9d2f', u'name': u'Linkin Park \u2013 \u201cWaiting For The End\u201d + \u201cWhen They Come For Me\u201d 2/5 SNL', u'references': [{u'artist_id': u'ARQUMH41187B9AF699', u'artist_name': u'Linkin Park'}], u'summary': u'<span>Linkin</span> <span>Park</span> performed "Waiting For The End" and "When They Come For Me" on Saturday Night Live. Watch the videos below and pick up their album A Thousand Suns on iTunes, Amazon MP3, CD Social Bookmarking ... ', u'type': u'blogs', u'url': u'http://theaudioperv.com/2011/02/06/linkin-park-waiting-for-the-end-when-they-come-for-me-25-snl/'} >>> """ kwargs = {} kwargs['bucket'] = buckets or [] if since: kwargs['since']=since response = self.get_attribute("feed", results=results, start=start, **kwargs) rval = ResultList(response['feed']) return rval
0.019324
def update(self, request, *args, **kwargs): """Update the ``Relation`` object. Reject the update if user doesn't have ``EDIT`` permission on the collection referenced in the ``Relation``. """ instance = self.get_object() if (not request.user.has_perm('edit_collection', instance.collection) and not request.user.is_superuser): return Response(status=status.HTTP_401_UNAUTHORIZED) return super().update(request, *args, **kwargs)
0.003899
def __disambiguate_with_lexicon(self, docs, lexicon, hiddenWords): """ Teostab lemmade leksikoni järgi mitmeste morf analüüside ühestamise - eemaldab üleliigsed analüüsid; Toetub ideele "üks tähendus teksti kohta": kui mitmeseks jäänud lemma esineb tekstis/korpuses ka mujal ning lõppkokkuvõttes esineb sagedamini kui alternatiivsed analüüsid, siis tõenäoliselt see ongi õige lemma/analüüs; """ for d in range(len(docs)): for w in range(len(docs[d][WORDS])): word = docs[d][WORDS][w] # Jätame vahele nn peidetud sõnad if (d, w) in hiddenWords: continue # Vaatame vaid mitmeseks jäänud analüüsidega sõnu if len(word[ANALYSIS]) > 1: # 1) Leiame suurima esinemissageduse mitmeste lemmade seas highestFreq = 0 for analysis in word[ANALYSIS]: lemma = analysis[ROOT]+'ma' if analysis[POSTAG]=='V' else analysis[ROOT] if lemma in lexicon and lexicon[lemma] > highestFreq: highestFreq = lexicon[lemma] if highestFreq > 0: # 2) Jätame välja kõik analüüsid, mille lemma esinemissagedus # on väiksem kui suurim esinemissagedus; toDelete = [] for analysis in word[ANALYSIS]: lemma = analysis[ROOT]+'ma' if analysis[POSTAG]=='V' else analysis[ROOT] freq = lexicon[lemma] if lemma in lexicon else 0 if freq < highestFreq: toDelete.append(analysis) for analysis in toDelete: word[ANALYSIS].remove(analysis)
0.005219
def sanitize(name): """ Sanitize the specified ``name`` for use with breathe directives. **Parameters** ``name`` (:class:`python:str`) The name to be sanitized. **Return** :class:`python:str` The input ``name`` sanitized to use with breathe directives (primarily for use with ``.. doxygenfunction::``). Replacements such as ``"&lt;" -> "<"`` are performed, as well as removing spaces ``"< " -> "<"`` must be done. Breathe is particularly sensitive with respect to whitespace. """ return name.replace( "&lt;", "<" ).replace( "&gt;", ">" ).replace( "&amp;", "&" ).replace( "< ", "<" ).replace( " >", ">" ).replace( " &", "&" ).replace( "& ", "&" )
0.004957
def joint_sfs_scaled(dac1, dac2, n1=None, n2=None): """Compute the joint site frequency spectrum between two populations, scaled such that a constant value is expected across the spectrum for neutral variation, constant population size and unrelated populations. Parameters ---------- dac1 : array_like, int, shape (n_variants,) Derived allele counts for the first population. dac2 : array_like, int, shape (n_variants,) Derived allele counts for the second population. n1, n2 : int, optional The total number of chromosomes called in each population. Returns ------- joint_sfs_scaled : ndarray, int, shape (n1 + 1, n2 + 1) Array where the (i, j)th element is the scaled frequency of variant sites with i derived alleles in the first population and j derived alleles in the second population. """ # compute site frequency spectrum s = joint_sfs(dac1, dac2, n1=n1, n2=n2) # apply scaling s = scale_joint_sfs(s) return s
0.000962
def formatDateParms(context, date_id): """ Obtain and reformat the from and to dates into a printable date parameter construct """ from_date = context.REQUEST.get('%s_fromdate' % date_id, None) to_date = context.REQUEST.get('%s_todate' % date_id, None) date_parms = {} if from_date and to_date: date_parms = 'from %s to %s' % (from_date, to_date) elif from_date: date_parms = 'from %s' % (from_date) elif to_date: date_parms = 'to %s' % (to_date) return date_parms
0.001873
def _src_path_stats(self, src_path): """ Return a dict of statistics for the source file at `src_path`. """ # Find violation lines violation_lines = self.violation_lines(src_path) violations = sorted(self._diff_violations()[src_path].violations) # Load source snippets (if the report will display them) # If we cannot load the file, then fail gracefully if self.INCLUDE_SNIPPETS: try: snippets = Snippet.load_snippets_html(src_path, violation_lines) except IOError: snippets = [] else: snippets = [] return { 'percent_covered': self.percent_covered(src_path), 'violation_lines': TemplateReportGenerator.combine_adjacent_lines(violation_lines), 'violations': violations, 'snippets_html': snippets }
0.004386
def notify(self, event_id): """Let the FlowControl system know that there is an event.""" self._event_buffer.extend([event_id]) self._event_count += 1 if self._event_count >= self.threshold: logger.debug("Eventcount >= threshold") self.make_callback(kind="event")
0.00627
def create_grid_layout(rowNum=8, colNum=12, row_labels=None, col_labels=None, xlim=None, ylim=None, xlabel=None, ylabel=None, row_label_xoffset=None, col_label_yoffset=None, hide_tick_labels=True, hide_tick_lines=False, hspace=0, wspace=0, row_labels_kwargs={}, col_labels_kwargs={}, subplot_kw={}, xscale=None, yscale=None, plotFuncList=None): """ Creates a figure with a 2d matrix of subplots (rows=rowNum, cols=colNum), automatically annotating and setting default plotting settings for the subplots. The function creates a main axes that is invisible but can be used for setting x and y labels. On top of the main axes, it creates the subplots, which are used for plotting. Parameters ---------- rowNum : int Specifies the number of rows colNum : int Specifies the number of columns row_labels : [list of str, None] Used for labeling each row of subplots If set to None, no labels are written. Default is None. col_labels : [list of str, None] Used for labeling each col of subplots. If set to None, no labels are written. Default is None. {_graph_grid_layout} plotFuncList : list of callable functions Each function must accept row, col arguments Each of these functions must know how to plot using the row, col arguments Returns ------- {_graph_grid_layout_returns} How to use ---------- Call this function with the desired parameters. Use the references to the individual subplot axis to plot. Call autoscale() command after plotting on the subplots in order to adjust their limits properly. """ fig = plt.gcf() # get reference to current open figure # Configure main axis appearance ax_main = plt.gca() # Hides the main axis but retains the ability to use xlabel, ylabel for spine in ax_main.spines.values(): spine.set_visible(False) ax_main.patch.set_alpha(0) # ax_main.axison = False # If true, xlabel and ylabel do not work _set_tick_lines_visibility(ax_main, False) _set_tick_labels_visibility(ax_main, False) # Configure subplot appearance subplot_kw.update(xlim=xlim, ylim=ylim, xscale=xscale, yscale=yscale) # This could potentially confuse a user plt.subplots_adjust(wspace=wspace, hspace=hspace) _, ax_subplots = plt.subplots(rowNum, colNum, squeeze=False, subplot_kw=subplot_kw, num=fig.number) # configure defaults for appearance of row and col labels row_labels_kwargs.setdefault('horizontalalignment', 'right') row_labels_kwargs.setdefault('verticalalignment', 'center') row_labels_kwargs.setdefault('size', 'x-large') col_labels_kwargs.setdefault('horizontalalignment', 'center') col_labels_kwargs.setdefault('verticalalignment', 'top') col_labels_kwargs.setdefault('size', 'x-large') # Translate using figure coordinates row_label_translation = -0.10 if row_label_xoffset: row_label_translation -= row_label_xoffset col_label_translation = -0.10 if col_label_yoffset: col_label_translation -= col_label_yoffset offset_row_labels = transforms.ScaledTranslation(row_label_translation, 0, fig.dpi_scale_trans) offset_col_labels = transforms.ScaledTranslation(0, col_label_translation, fig.dpi_scale_trans) for (row, col), ax in numpy.ndenumerate(ax_subplots): plt.sca(ax) # Sets the current axis for all plotting operations if row_labels is not None and col == 0: plt.text(0, 0.5, '{0}'.format(row_labels[row]), transform=(ax.transAxes + offset_row_labels), **row_labels_kwargs) # plt.text(0 + row_label_translation, 0.5, '{0}'.format(row_labels[row]), transform=(ax.transAxes), **row_labels_kwargs) if col_labels is not None and row == (rowNum - 1): plt.text(0.5, 0, '{0}'.format(col_labels[col]), transform=(ax.transAxes + offset_col_labels), **col_labels_kwargs) # plt.text(0.5, 0+ col_label_translation, '{0}'.format(col_labels[col]), transform=(ax.transAxes), **col_labels_kwargs) if row == 0 and col == colNum - 1: visible = [False, False] if xlabel: ax.xaxis.tick_top() ax.xaxis.set_label_position('top') ax.set_xlabel(xlabel, fontsize='large', labelpad=5) visible[0] = True if ylabel: ax.yaxis.tick_right() ax.yaxis.set_label_position('right') ax.set_ylabel(ylabel, fontsize='large', labelpad=5) visible[1] = True _set_tick_lines_visibility(ax, visible) _set_tick_labels_visibility(ax, visible) else: if hide_tick_lines: _set_tick_lines_visibility(ax, False) if hide_tick_labels: _set_tick_labels_visibility(ax, False) if plotFuncList is not None: for plot_function in plotFuncList: plot_function(row, col) # Make sure this is right before the return statement. # It is here to comply with a user's expectation to calls that make reference to gca(). plt.sca(ax_main) return (ax_main, ax_subplots)
0.002158
def build_request_signature(self, saml_request, relay_state, sign_algorithm=OneLogin_Saml2_Constants.RSA_SHA1): """ Builds the Signature of the SAML Request. :param saml_request: The SAML Request :type saml_request: string :param relay_state: The target URL the user should be redirected to :type relay_state: string :param sign_algorithm: Signature algorithm method :type sign_algorithm: string """ return self.__build_signature(saml_request, relay_state, 'SAMLRequest', sign_algorithm)
0.007018
def to_neo4j(graph, neo_connection, use_tqdm=False): """Upload a BEL graph to a Neo4j graph database using :mod:`py2neo`. :param pybel.BELGraph graph: A BEL Graph :param neo_connection: A :mod:`py2neo` connection object. Refer to the `py2neo documentation <http://py2neo.org/v3/database.html#the-graph>`_ for how to build this object. :type neo_connection: str or py2neo.Graph Example Usage: >>> import py2neo >>> import pybel >>> from pybel.examples import sialic_acid_graph >>> neo_graph = py2neo.Graph("http://localhost:7474/db/data/") # use your own connection settings >>> pybel.to_neo4j(sialic_acid_graph, neo_graph) """ import py2neo if isinstance(neo_connection, str): neo_connection = py2neo.Graph(neo_connection) tx = neo_connection.begin() node_map = {} nodes = list(graph) if use_tqdm: nodes = tqdm(nodes, desc='nodes') for node in nodes: if NAMESPACE not in node or VARIANTS in node or MEMBERS in node or FUSION in node: attrs = {'name': node.as_bel()} else: attrs = {'namespace': node.namespace} if node.name and node.identifier: attrs['name'] = node.name attrs['identifier'] = node.identifier elif node.identifier and not node.name: attrs['name'] = node.identifier elif node.name and not node.identifier: attrs['name'] = node.name node_map[node] = py2neo.Node(node.function, **attrs) tx.create(node_map[node]) edges = graph.edges(keys=True, data=True) if use_tqdm: edges = tqdm(edges, desc='edges') for u, v, key, node in edges: rel_type = node[RELATION] d = node.copy() del d[RELATION] attrs = {} annotations = d.pop(ANNOTATIONS, None) if annotations: for annotation, values in annotations.items(): attrs[annotation] = list(values) citation = d.pop(CITATION, None) if citation: attrs[CITATION] = '{}:{}'.format(citation[CITATION_TYPE], citation[CITATION_REFERENCE]) if EVIDENCE in d: attrs[EVIDENCE] = d[EVIDENCE] for side in (SUBJECT, OBJECT): side_data = d.get(side) if side_data: attrs.update(flatten_dict(side_data, parent_key=side)) rel = py2neo.Relationship(node_map[u], rel_type, node_map[v], key=key, **attrs) tx.create(rel) tx.commit()
0.002359
def get_design_run_results(self, data_view_id, run_uuid): """ Retrieves the results of an existing designrun :param data_view_id: The ID number of the data view to which the run belongs, as a string :type data_view_id: str :param run_uuid: The UUID of the design run to retrieve results from :type run_uuid: str :return: A :class:`DesignResults` object """ url = routes.get_data_view_design_results(data_view_id, run_uuid) response = self._get(url).json() result = response["data"] return DesignResults( best_materials=result.get("best_material_results"), next_experiments=result.get("next_experiment_results") )
0.002642
def __build_sign_query(saml_data, relay_state, algorithm, saml_type, lowercase_urlencoding=False): """ Build sign query :param saml_data: The Request data :type saml_data: str :param relay_state: The Relay State :type relay_state: str :param algorithm: The Signature Algorithm :type algorithm: str :param saml_type: The target URL the user should be redirected to :type saml_type: string SAMLRequest | SAMLResponse :param lowercase_urlencoding: lowercase or no :type lowercase_urlencoding: boolean """ sign_data = ['%s=%s' % (saml_type, OneLogin_Saml2_Utils.escape_url(saml_data, lowercase_urlencoding))] if relay_state is not None: sign_data.append('RelayState=%s' % OneLogin_Saml2_Utils.escape_url(relay_state, lowercase_urlencoding)) sign_data.append('SigAlg=%s' % OneLogin_Saml2_Utils.escape_url(algorithm, lowercase_urlencoding)) return '&'.join(sign_data)
0.005911
def read_table_pattern(self, header_pattern, row_pattern, footer_pattern, postprocess=str, attribute_name=None, last_one_only=True): """ Parse table-like data. A table composes of three parts: header, main body, footer. All the data matches "row pattern" in the main body will be returned. Args: header_pattern (str): The regular expression pattern matches the table header. This pattern should match all the text immediately before the main body of the table. For multiple sections table match the text until the section of interest. MULTILINE and DOTALL options are enforced, as a result, the "." meta-character will also match "\n" in this section. row_pattern (str): The regular expression matches a single line in the table. Capture interested field using regular expression groups. footer_pattern (str): The regular expression matches the end of the table. E.g. a long dash line. postprocess (callable): A post processing function to convert all matches. Defaults to str, i.e., no change. attribute_name (str): Name of this table. If present the parsed data will be attached to "data. e.g. self.data["efg"] = [...] last_one_only (bool): All the tables will be parsed, if this option is set to True, only the last table will be returned. The enclosing list will be removed. i.e. Only a single table will be returned. Default to be True. Returns: List of tables. 1) A table is a list of rows. 2) A row if either a list of attribute values in case the the capturing group is defined without name in row_pattern, or a dict in case that named capturing groups are defined by row_pattern. """ with zopen(self.filename, 'rt') as f: text = f.read() table_pattern_text = header_pattern + r"\s*^(?P<table_body>(?:\s+" + \ row_pattern + r")+)\s+" + footer_pattern table_pattern = re.compile(table_pattern_text, re.MULTILINE | re.DOTALL) rp = re.compile(row_pattern) tables = [] for mt in table_pattern.finditer(text): table_body_text = mt.group("table_body") table_contents = [] for line in table_body_text.split("\n"): ml = rp.search(line) d = ml.groupdict() if len(d) > 0: processed_line = {k: postprocess(v) for k, v in d.items()} else: processed_line = [postprocess(v) for v in ml.groups()] table_contents.append(processed_line) tables.append(table_contents) if last_one_only: retained_data = tables[-1] else: retained_data = tables if attribute_name is not None: self.data[attribute_name] = retained_data return retained_data
0.003119
def load_file(self, path=None, just_settings=False): """ Loads a data file. After the file is loaded, calls self.after_load_file(self), which you can overwrite if you like! just_settings=True will only load the configuration of the controls, and will not plot anything or run after_load_file """ # if it's just the settings file, make a new databox if just_settings: d = _d.databox() header_only = True # otherwise use the internal databox else: d = self header_only = False # import the settings if they exist in the header if not None == _d.databox.load_file(d, path, filters=self.file_type, header_only=header_only, quiet=just_settings): # loop over the autosettings and update the gui for x in self._autosettings_controls: self._load_gui_setting(x,d) # always sync the internal data self._synchronize_controls() # plot the data if this isn't just a settings load if not just_settings: self.plot() self.after_load_file()
0.006867
def list_permissions(self, group_name=None, resource=None): """ List permission sets associated filtering by group and/or resource. Args: group_name (string): Name of group. resource (intern.resource.boss.Resource): Identifies which data model object to operate on. Returns: (list): List of permissions. Raises: requests.HTTPError on failure. """ self.project_service.set_auth(self._token_project) return self.project_service.list_permissions(group_name, resource)
0.003361
def to_array(self): """ Serializes this VideoNote to a dictionary. :return: dictionary representation of this object. :rtype: dict """ array = super(VideoNote, self).to_array() array['file_id'] = u(self.file_id) # py2: type unicode, py3: type str array['length'] = int(self.length) # type int array['duration'] = int(self.duration) # type int if self.thumb is not None: array['thumb'] = self.thumb.to_array() # type PhotoSize if self.file_size is not None: array['file_size'] = int(self.file_size) # type int return array
0.003082
def dynamic_content_item_show(self, id, **kwargs): "https://developer.zendesk.com/rest_api/docs/core/dynamic_content#show-item" api_path = "/api/v2/dynamic_content/items/{id}.json" api_path = api_path.format(id=id) return self.call(api_path, **kwargs)
0.010601
def pipe(*args): """ Takes as parameters several dicts, each with the same parameters passed to popen. Runs the various processes in a pipeline, connecting the stdout of every process except the last with the stdin of the next process. Adapted from http://www.enricozini.org/2009/debian/python-pipes/ """ if len(args) < 2: raise ValueError("pipe needs at least 2 processes") # Set stdout=PIPE in every subprocess except the last for i in args[:-1]: i["stdout"] = subprocess.PIPE # Runs all subprocesses connecting stdins and stdouts to create the # pipeline. Closes stdouts to avoid deadlocks. popens = [popen_sp(**args[0])] for i in range(1, len(args)): args[i]["stdin"] = popens[i - 1].stdout popens.append(popen_sp(**args[i])) popens[i - 1].stdout.close() # Returns the array of subprocesses just created return popens
0.00107
def make_flask_url_dispatcher(): """Return an URL dispatcher based on the current :ref:`request context <flask:request-context>`. You generally don’t need to call this directly. The context is used when the dispatcher is first created but not afterwards. It is not required after this function has returned. Dispatch to the context’s app URLs below the context’s root URL. If the app has a ``SERVER_NAME`` :ref:`config <flask:config>`, also accept URLs that have that domain name or a subdomain thereof. """ def parse_netloc(netloc): """Return (hostname, port).""" parsed = urlparse.urlsplit('http://' + netloc) return parsed.hostname, parsed.port app = current_app._get_current_object() root_path = request.script_root server_name = app.config.get('SERVER_NAME') if server_name: hostname, port = parse_netloc(server_name) def accept(url): """Accept any URL scheme; also accept subdomains.""" return url.hostname is not None and ( url.hostname == hostname or url.hostname.endswith('.' + hostname)) else: scheme = request.scheme hostname, port = parse_netloc(request.host) if (scheme, port) in DEFAULT_PORTS: port = None def accept(url): """Do not accept subdomains.""" return (url.scheme, url.hostname) == (scheme, hostname) def dispatch(url_string): if isinstance(url_string, bytes): url_string = url_string.decode('utf8') url = urlparse.urlsplit(url_string) url_port = url.port if (url.scheme, url_port) in DEFAULT_PORTS: url_port = None if accept(url) and url_port == port and url.path.startswith(root_path): netloc = url.netloc if url.port and not url_port: netloc = netloc.rsplit(':', 1)[0] # remove default port base_url = '%s://%s%s' % (url.scheme, netloc, root_path) path = url.path[len(root_path):] if url.query: path += '?' + url.query # Ignore url.fragment return app, base_url, path return dispatch
0.000448
def get_version(): """ Get version of pydf and wkhtmltopdf binary :return: version string """ try: wk_version = _string_execute('-V') except Exception as e: # we catch all errors here to make sure we get a version no matter what wk_version = '%s: %s' % (e.__class__.__name__, e) return 'pydf version: %s\nwkhtmltopdf version: %s' % (VERSION, wk_version)
0.002463
def SyncManagedObject(self, difference, deleteNotPresent=False, noVersionFilter=False, dumpXml=None): """ Syncs Managed Object. Method takes the difference object (output of CompareManagedObject) and applies the differences on reference Managed Object. - difference specifies the Difference object (output of ComparesManagedObject) which has differences of the properties of two or more Managed Objects. - deleteNotPresent, if set as True, any missing MOs in reference Managed Object set will be deleted. - noVersionFilter, If set as True, minversion for Mos or properties to be added in reference Managed Object will not be checked. """ from UcsBase import WriteUcsWarning, WriteObject, UcsUtils, ManagedObject, AbstractFilter, GenericMO, \ SyncAction, UcsException, UcsValidationException from Ucs import ClassFactory, Pair, ConfigMap if ((difference == None) or (isinstance(difference, list) and (len(difference) == 0))): raise UcsValidationException("difference object parameter is not provided.") # raise Exception("[Error]: SyncManagedObject: Difference Object can not be Null") configMap = ConfigMap() configDoc = xml.dom.minidom.parse(UcsUtils.GetSyncMoConfigFilePath()) moConfigMap = {} moConfigMap = UcsUtils.GetSyncMoConfig(configDoc) for moDiff in difference: mo = moDiff.InputObject classId = mo.classId gMoDiff = None metaClassId = UcsUtils.FindClassIdInMoMetaIgnoreCase(classId) if metaClassId == None: # TODO: Add Warning/Error messages in Logger. WriteUcsWarning("Ignoring [%s]. Unknown ClassId [%s]." % (moDiff.InputObject.getattr("Dn"), classId)) continue moConfigs = [] moConfig = None if UcsUtils.WordL(classId) in moConfigMap: moConfigs = moConfigMap[UcsUtils.WordL(classId)] if moConfigs: for conf in moConfigs: if (self._version.CompareTo(conf.getActionVersion()) == 0): moConfig = conf # Removes Difference Object. if ((moDiff.SideIndicator == UcsMoDiff.REMOVE) and (deleteNotPresent)): if ( moConfig is not None and moConfig.getAction() == SyncAction.ignore and moConfig.getStatus() is not None and moConfig.getStatus().contains( Status.DELETED)): if (moConfig.getIgnoreReason() is None or len(moConfig.getIgnoreReason()) == 0): continue ir = moConfig.getIgnoreReason() try: for prop in ir: propValue = ir[prop] if mo.getattr(prop): attrValue = mo.getattr(prop) else: attrValue = None if (not propValue or not attrValue or propValue != attrValue): ignore = False break except Exception, err: ignore = False if ignore: continue gMoDiff = ManagedObject(classId) gMoDiff.setattr("Dn", mo.getattr("Dn")) gMoDiff.setattr("Status", Status().DELETED) gMoDiff = GenericMO(gMoDiff, WriteXmlOption.AllConfig) # gMoDiff should be generic object if moDiff.SideIndicator == UcsMoDiff.ADD_MODIFY: gMoDiff = ManagedObject(classId) addExists = False moMeta = UcsUtils.IsPropertyInMetaIgnoreCase(classId, "Meta") if ((moMeta != None) and ('Add' in moMeta.verbs)): addExists = True # Add Difference Object. if ((addExists) and ((moDiff.DiffProperty == None) or (len(moDiff.DiffProperty) == 0))): if ( moConfig is not None and moConfig.getAction() == SyncAction.ignore and moConfig.getStatus() is not None and moConfig.getStatus().contains( Status.CREATED)): if (moConfig.getIgnoreReason() is None or len(moConfig.getIgnoreReason()) == 0): continue ir = moConfig.getIgnoreReason() try: for prop in ir: propValue = ir[prop] if mo.getattr(prop): attrValue = mo.getattr(prop) else: attrValue = None if (not propValue or not attrValue or propValue != attrValue): ignore = False break except Exception, err: ignore = False if ignore: continue for prop in mo.__dict__: propMoMeta = UcsUtils.IsPropertyInMetaIgnoreCase(classId, prop) if (propMoMeta != None): if ( prop.lower() == "rn" or prop.lower() == "dn" or propMoMeta.access == UcsPropertyMeta.ReadOnly): continue exclude = False if (moConfig is not None and moConfig.getExcludeList() is not None): for exProp in moConfig.getExcludeList(): if prop.lower() == exProp.lower(): exclude = True if not exclude: gMoDiff.setattr(propMoMeta.name, mo.getattr(prop)) gMoDiff.setattr("Dn", mo.getattr("Dn")) if (moConfig is not None and moConfig.getAction() == SyncAction.statusChange): gMoDiff.setattr("Status", moConfig.getStatus()) else: gMoDiff.setattr("Status", Status().CREATED) gMoDiff = GenericMO(gMoDiff, WriteXmlOption.AllConfig) # gMoDiff should be generic object if (not noVersionFilter): hReference = mo.GetHandle() if ((hReference != None) and (hReference._version != None)): gMoDiff.FilterVersion(hReference._version) # Modify the Managed Object else: if ( moConfig is not None and moConfig.getAction() == SyncAction.ignore and moConfig.getStatus() is not None and moConfig.getStatus().contains( Status.DELETED)): if (moConfig.getIgnoreReason() is None or len(moConfig.getIgnoreReason()) == 0): continue ir = moConfig.getIgnoreReason() try: for prop in ir: propValue = ir[prop] if mo.getattr(prop): attrValue = mo.getattr(prop) else: attrValue = None if (not propValue or not attrValue or propValue != attrValue): ignore = False break except Exception, err: ignore = False if ignore: continue if ((moDiff.DiffProperty == None) or (len(moDiff.DiffProperty) == 0)): # TODO: Add Warning/Error messages in Logger. WriteUcsWarning('Add not supported for classId ' + classId + '. Reverting to modify.') continue finalDiffPropStr = None finalDiffProps = moDiff.DiffProperty gMoDiff = ManagedObject(classId) for diffprop in finalDiffProps: exclude = False if (moConfig is not None and moConfig.getExcludeList() is not None): for exProp in moConfig.getExcludeList(): if diffprop.lower() == exProp.lower(): exclude = True if not exclude: finalDiffPropStr = finalDiffPropStr + "," for prop in finalDiffProps: propMoMeta = UcsUtils.IsPropertyInMetaIgnoreCase(classId, prop) if (propMoMeta != None): if ( prop.lower() == "rn" or prop.lower() == "dn" or propMoMeta.access == UcsPropertyMeta.ReadOnly): continue exclude = False if (moConfig is not None and moConfig.getExcludeList() is not None): for exProp in moConfig.getExcludeList(): if diffprop.lower() == exProp.lower(): exclude = True if not exclude: gMoDiff.setattr(propMoMeta.name, mo.getattr(prop)) gMoDiff.setattr("Dn", mo.getattr("Dn")) gMoDiff.setattr("Status", Status().MODIFIED) gMoDiff = GenericMO(gMoDiff, WriteXmlOption.AllConfig) # gMoDiff should be generic object to apply FilterVersion on it. # TODO: NoversionFilter functionality discussion. if ((gMoDiff != None) and (not noVersionFilter)): gMoMeta = UcsUtils.GetUcsPropertyMeta(gMoDiff.classId, "Meta") if ((gMoMeta != None) and (self._version != None)): if self._version < gMoMeta.version: # TODO: Add Warning/Error messages in Logger. WriteUcsWarning( 'Ignoring unsupported classId %s for Dn %s.' % (gMoDiff.classId, gMoDiff.getattr("Dn"))) gMoDiff = None if ((gMoDiff != None) and (self._version != None)): gMoDiff.FilterVersion(self._version) if (gMoDiff != None): if gMoDiff.__dict__.has_key("_excludePropList"): for prop in gMoDiff.__dict__["_excludePropList"]: if prop == "XtraProperty": gMoDiff.__dict__[prop] = {} continue gMoDiff.__dict__[prop] = None if (gMoDiff != None): pair = Pair() pair.setattr("Key", gMoDiff.getattr("Dn")) pair.AddChild(gMoDiff) configMap.AddChild(pair) if configMap.GetChildCount() == 0: return None ccm = self.ConfigConfMos(configMap, False, dumpXml) if ccm.errorCode == 0: moList = [] for child in ccm.OutConfigs.GetChild(): if (isinstance(child, Pair) == True): for mo in child.GetChild(): moList.append(mo) elif (isinstance(child, ManagedObject) == True): moList.append(child) # WriteObject(moList) return moList else: raise UcsException(ccm.errorCode, ccm.errorDescr)
0.029048
def zero_disk(self, disk_xml=None): """ Collector and publish not zeroed disk metrics """ troubled_disks = 0 for filer_disk in disk_xml: raid_state = filer_disk.find('raid-state').text if not raid_state == 'spare': continue is_zeroed = filer_disk.find('is-zeroed').text if is_zeroed == 'false': troubled_disks += 1 self.push('not_zeroed', 'disk', troubled_disks)
0.004132
def set_cursor_enter_callback(window, cbfun): """ Sets the cursor enter/exit callback. Wrapper for: GLFWcursorenterfun glfwSetCursorEnterCallback(GLFWwindow* window, GLFWcursorenterfun cbfun); """ window_addr = ctypes.cast(ctypes.pointer(window), ctypes.POINTER(ctypes.c_long)).contents.value if window_addr in _cursor_enter_callback_repository: previous_callback = _cursor_enter_callback_repository[window_addr] else: previous_callback = None if cbfun is None: cbfun = 0 c_cbfun = _GLFWcursorenterfun(cbfun) _cursor_enter_callback_repository[window_addr] = (cbfun, c_cbfun) cbfun = c_cbfun _glfw.glfwSetCursorEnterCallback(window, cbfun) if previous_callback is not None and previous_callback[0] != 0: return previous_callback[0]
0.002345
def approxMeanOneLognormal(N, sigma=1.0, **kwargs): ''' Calculate a discrete approximation to a mean one lognormal distribution. Based on function approxLognormal; see that function's documentation for further notes. Parameters ---------- N : int Size of discrete space vector to be returned. sigma : float standard deviation associated with underlying normal probability distribution. Returns ------- X : np.array Discrete points for discrete probability mass function. pmf : np.array Probability associated with each point in X. Written by Nathan M. Palmer Based on Matab function "setup_shocks.m," from Chris Carroll's [Solution Methods for Microeconomic Dynamic Optimization Problems] (http://www.econ2.jhu.edu/people/ccarroll/solvingmicrodsops/) toolkit. Latest update: 01 May 2015 ''' mu_adj = - 0.5*sigma**2; pmf,X = approxLognormal(N=N, mu=mu_adj, sigma=sigma, **kwargs) return [pmf,X]
0.004921
def prefix(self, sign: Optional[PrefixSign] = None, symbol: bool = False) -> str: """Get a random prefix for the International System of Units. :param sign: Sing of number. :param symbol: Return symbol of prefix. :return: Prefix for SI. :raises NonEnumerableError: if sign is not supported. :Example: mega """ prefixes = SI_PREFIXES_SYM if \ symbol else SI_PREFIXES key = self._validate_enum(item=sign, enum=PrefixSign) return self.random.choice(prefixes[key])
0.005155
def _create_snapshot(volume): """ Create a new snapshot :type volume: boto.ec2.volume.Volume :param volume: Volume to snapshot :returns: boto.ec2.snapshot.Snapshot -- The new snapshot """ logger.info('Creating new snapshot for {}'.format(volume.id)) snapshot = volume.create_snapshot( description="Automatic snapshot by Automated EBS Snapshots") logger.info('Created snapshot {} for volume {}'.format( snapshot.id, volume.id)) return snapshot
0.002016
def has_permission(self, request, view): """ If the JWT has a user filter, verify that the filtered user value matches the user in the URL. """ user_filter = self._get_user_filter(request) if not user_filter: # no user filters are present in the token to limit access return True username_param = get_username_param(request) allowed = user_filter == username_param if not allowed: log.warning( u"Permission JwtHasUserFilterForRequestedUser: user_filter %s doesn't match username %s.", user_filter, username_param, ) return allowed
0.004243
def add_message_for(users, level, message_text, extra_tags='', date=None, url=None, fail_silently=False): """ Send a message to a list of users without passing through `django.contrib.messages` :param users: an iterable containing the recipients of the messages :param level: message level :param message_text: the string containing the message :param extra_tags: like the Django api, a string containing extra tags for the message :param date: a date, different than the default timezone.now :param url: an optional url :param fail_silently: not used at the moment """ BackendClass = stored_messages_settings.STORAGE_BACKEND backend = BackendClass() m = backend.create_message(level, message_text, extra_tags, date, url) backend.archive_store(users, m) backend.inbox_store(users, m)
0.004739
def save_metadata(hparams): """Saves FLAGS and hparams to output_dir.""" output_dir = os.path.expanduser(FLAGS.output_dir) if not tf.gfile.Exists(output_dir): tf.gfile.MakeDirs(output_dir) # Save FLAGS in txt file if hasattr(FLAGS, "flags_into_string"): flags_str = FLAGS.flags_into_string() t2t_flags_str = "\n".join([ "--%s=%s" % (f.name, f.value) for f in FLAGS.flags_by_module_dict()["tensor2tensor.utils.flags"] ]) else: flags_dict = FLAGS.__dict__["__flags"] flags_str = "\n".join( ["--%s=%s" % (name, str(f)) for (name, f) in flags_dict.items()]) t2t_flags_str = None flags_txt = os.path.join(output_dir, "flags.txt") with tf.gfile.Open(flags_txt, "w") as f: f.write(flags_str) if t2t_flags_str: t2t_flags_txt = os.path.join(output_dir, "flags_t2t.txt") with tf.gfile.Open(t2t_flags_txt, "w") as f: f.write(t2t_flags_str) # Save hparams as hparams.json new_hparams = hparams_lib.copy_hparams(hparams) # Modality class is not JSON serializable so remove. new_hparams.del_hparam("modality") hparams_fname = os.path.join(output_dir, "hparams.json") with tf.gfile.Open(hparams_fname, "w") as f: f.write(new_hparams.to_json(indent=0, sort_keys=True))
0.013481
def main(): """ NAME lnp_magic.py DESCRIPTION makes equal area projections site by site from specimen formatted file with Fisher confidence ellipse using McFadden and McElhinny (1988) technique for combining lines and planes SYNTAX lnp_magic [command line options] INPUT takes magic formatted specimens file OUPUT prints site_name n_lines n_planes K alpha95 dec inc R OPTIONS -h prints help message and quits -f FILE: specify input file, default is 'specimens.txt', ('pmag_specimens.txt' for legacy data model 2) -fsa FILE: specify samples file, required to plot by site for data model 3 (otherwise will plot by sample) default is 'samples.txt' -crd [s,g,t]: specify coordinate system, [s]pecimen, [g]eographic, [t]ilt adjusted default is specimen -fmt [svg,png,jpg] format for plots, default is svg -sav save plots and quit -P: do not plot -F FILE, specify output file of dec, inc, alpha95 data for plotting with plotdi_a and plotdi_e -exc use criteria in criteria table # NOT IMPLEMENTED -DM NUMBER MagIC data model (2 or 3, default 3) """ if '-h' in sys.argv: print(main.__doc__) sys.exit() dir_path = pmag.get_named_arg("-WD", ".") data_model = int(float(pmag.get_named_arg("-DM", 3))) fmt = pmag.get_named_arg("-fmt", 'svg') if data_model == 2: in_file = pmag.get_named_arg('-f', 'pmag_specimens.txt') crit_file = "pmag_criteria.txt" else: in_file = pmag.get_named_arg('-f', 'specimens.txt') samp_file = pmag.get_named_arg('-fsa', 'samples.txt') crit_file = "criteria.txt" in_file = pmag.resolve_file_name(in_file, dir_path) dir_path = os.path.split(in_file)[0] if data_model == 3: samp_file = pmag.resolve_file_name(samp_file, dir_path) if '-crd' in sys.argv: ind = sys.argv.index("-crd") crd = sys.argv[ind+1] if crd == 's': coord = "-1" if crd == 'g': coord = "0" if crd == 't': coord = "100" else: coord = "-1" out_file = pmag.get_named_arg('-F', '') if out_file: out = open(dir_path+'/'+out_file, 'w') if '-P' in sys.argv: make_plots = 0 # do not plot else: make_plots = 1 # do plot if '-sav' in sys.argv: plot = 1 # save plots and quit else: plot = 0 # show plots intereactively (if make_plots) # if data_model == 2: Specs, file_type = pmag.magic_read(in_file) if 'specimens' not in file_type: print('Error opening ', in_file, file_type) sys.exit() else: fnames = {'specimens': in_file, 'samples': samp_file} con = cb.Contribution(dir_path, read_tables=['samples', 'specimens'], custom_filenames=fnames) con.propagate_name_down('site', 'specimens') if 'site' in con.tables['specimens'].df.columns: site_col = 'site' else: site_col = 'sample' tilt_corr_col = "dir_tilt_correction" mad_col = "dir_mad_free" alpha95_col = "dir_alpha95" site_alpha95_col = "dir_alpha95" dec_col = "dir_dec" inc_col = "dir_inc" num_meas_col = "dir_n_measurements" k_col = "dir_k" cols = [site_col, tilt_corr_col, mad_col, alpha95_col, dec_col, inc_col] con.tables['specimens'].front_and_backfill(cols) con.tables['specimens'].df = con.tables['specimens'].df.where(con.tables['specimens'].df.notnull(), "") Specs = con.tables['specimens'].convert_to_pmag_data_list() ## using criteria file was never fully implemented #if '-exc' in sys.argv: # Crits, file_type = pmag.magic_read(pmag.resolve_file_name(crit_file, dir_path)) # for crit in Crits: # if mad_col in crit: # M = float(crit['specimen_mad']) # if num_meas_col in crit: # N = float(crit['specimen_n']) # if site_alpha95_col in crit and 'site' in crit: # acutoff = float(crit['site_alpha95']) # if k_col in crit: # kcutoff = float(crit['site_k']) #else: # Crits = "" sitelist = [] # initialize some variables FIG = {} # plot dictionary FIG['eqarea'] = 1 # eqarea is figure 1 M, N, acutoff, kcutoff = 180., 1, 180., 0. if data_model == 2: site_col = 'er_site_name' tilt_corr_col = "specimen_tilt_correction" mad_col = "specimen_mad" alpha95_col = 'specimen_alpha95' dec_col = "specimen_dec" inc_col = "specimen_inc" num_meas_col = "specimen_n" site_alpha95_col = "site_alpha95" else: # data model 3 pass for rec in Specs: if rec[site_col] not in sitelist: sitelist.append(rec[site_col]) sitelist.sort() if make_plots == 1: EQ = {} EQ['eqarea'] = 1 for site in sitelist: pmagplotlib.plot_init(EQ['eqarea'], 4, 4) print(site) data = [] for spec in Specs: if tilt_corr_col not in list(spec.keys()): spec[tilt_corr_col] = '-1' # assume unoriented if spec[site_col] == site: if mad_col not in list(spec.keys()) or spec[mad_col] == "": if alpha95_col in list(spec.keys()) and spec[alpha95_col] != "": spec[mad_col] = spec[alpha95_col] else: spec[mad_col] = '180' if not spec[num_meas_col]: continue if (float(spec[tilt_corr_col]) == float(coord)) and (float(spec[mad_col]) <= M) and (float(spec[num_meas_col]) >= N): rec = {} for key in list(spec.keys()): rec[key] = spec[key] rec["dec"] = float(spec[dec_col]) rec["inc"] = float(spec[inc_col]) rec["tilt_correction"] = spec[tilt_corr_col] data.append(rec) if len(data) > 2: fpars = pmag.dolnp(data, 'specimen_direction_type') print("Site lines planes kappa a95 dec inc") print(site, fpars["n_lines"], fpars["n_planes"], fpars["K"], fpars["alpha95"], fpars["dec"], fpars["inc"], fpars["R"]) if out_file != "": if float(fpars["alpha95"]) <= acutoff and float(fpars["K"]) >= kcutoff: out.write('%s %s %s\n' % (fpars["dec"], fpars['inc'], fpars['alpha95'])) print('% tilt correction: ', coord) if make_plots == 1: files = {} files['eqarea'] = site+'_'+crd+'_'+'eqarea'+'.'+fmt pmagplotlib.plot_lnp(EQ['eqarea'], site, data, fpars, 'specimen_direction_type') if plot == 0: pmagplotlib.draw_figs(EQ) ans = input( "s[a]ve plot, [q]uit, <return> to continue:\n ") if ans == "a": pmagplotlib.save_plots(EQ, files) if ans == "q": sys.exit() else: pmagplotlib.save_plots(EQ, files) else: print('skipping site - not enough data with specified coordinate system')
0.002762
def cmd_host(verbose): """Collect information about the host where habu is running. Example: \b $ habu.host { "kernel": [ "Linux", "demo123", "5.0.6-200.fc29.x86_64", "#1 SMP Wed Apr 3 15:09:51 UTC 2019", "x86_64", "x86_64" ], "distribution": [ "Fedora", "29", "Twenty Nine" ], "libc": [ "glibc", "2.2.5" ], "arch": "x86_64", "python_version": "3.7.3", "os_name": "Linux", "cpu": "x86_64", "static_hostname": "demo123", "fqdn": "demo123.lab.sierra" } """ if verbose: logging.basicConfig(level=logging.INFO, format='%(message)s') print("Gather information about the host...", file=sys.stderr) result = gather_details() if result: print(json.dumps(result, indent=4)) else: print("[X] Unable to gather information") return True
0.000964
def check_clang_apply_replacements_binary(args): """Checks if invoking supplied clang-apply-replacements binary works.""" try: subprocess.check_call([args.clang_apply_replacements_binary, '--version']) except: print('Unable to run clang-apply-replacements. Is clang-apply-replacements ' 'binary correctly specified?', file=sys.stderr) traceback.print_exc() sys.exit(1)
0.015
def trigger_switch_all_to(request, switch): """ switch the status of all the "my" triggers then go back home :param request: request object :param switch: the switch value :type request: HttpRequest object :type switch: string off or on """ now = arrow.utcnow().to(settings.TIME_ZONE).format('YYYY-MM-DD HH:mm:ss') status = True if switch == 'off': status = False if status: TriggerService.objects.filter(user=request.user).update(status=status, date_triggered=now) else: TriggerService.objects.filter(user=request.user).update(status=status) return HttpResponseRedirect(reverse('base'))
0.002924
def likelihood_data_given_model(self, kwargs_lens, kwargs_source, kwargs_lens_light, kwargs_ps, source_marg=False): """ computes the likelihood of the data given a model This is specified with the non-linear parameters and a linear inversion and prior marginalisation. :param kwargs_lens: :param kwargs_source: :param kwargs_lens_light: :param kwargs_ps: :return: log likelihood (natural logarithm) (sum of the log likelihoods of the individual images) """ # generate image logL = 0 for i in range(self._num_bands): if self._compute_bool[i] is True: logL += self._imageModel_list[i].likelihood_data_given_model(kwargs_lens, kwargs_source, kwargs_lens_light, kwargs_ps, source_marg=source_marg) return logL
0.008073
def read_column(self, col, **keys): """ Read the specified column Alternatively, you can use slice notation fits=fitsio.FITS(filename) fits[ext][colname][:] fits[ext][colname][2:5] fits[ext][colname][200:235:2] fits[ext][colname][rows] Note, if reading multiple columns, it is more efficient to use read(columns=) or slice notation with a list of column names. parameters ---------- col: string/int, required The column name or number. rows: optional An optional set of row numbers to read. vstorage: string, optional Over-ride the default method to store variable length columns. Can be 'fixed' or 'object'. See docs on fitsio.FITS for details. """ res = self.read_columns([col], **keys) colname = res.dtype.names[0] data = res[colname] self._maybe_trim_strings(data, **keys) return data
0.001938
def remove_positive_resample(X, y, model_generator, method_name, num_fcounts=11): """ Remove Positive (resample) xlabel = "Max fraction of features removed" ylabel = "Negative mean model output" transform = "negate" sort_order = 13 """ return __run_measure(measures.remove_resample, X, y, model_generator, method_name, 1, num_fcounts, __mean_pred)
0.008
def _set_config_keystone(self, username, password): """ Set config to Keystone """ self._keystone_auth = KeystoneAuth( settings.KEYSTONE_AUTH_URL, settings.KEYSTONE_PROJECT_NAME, username, password, settings.KEYSTONE_USER_DOMAIN_NAME, settings.KEYSTONE_PROJECT_DOMAIN_NAME, settings.KEYSTONE_TIMEOUT)
0.011331
def _server_response_handler(self, response: Dict[str, Any]): """处理100~199段状态码,针对不同的服务响应进行操作. Parameters: (response): - 响应的python字典形式数据 Return: (bool): - 准确地说没有错误就会返回True """ code = response.get("CODE") if code == 100: if self.debug: print("auth succeed") self._login_fut.set_result(response) if code == 101: if self.debug: print('pong') return True
0.003937
def _extract_hunt_results(self, output_file_path): """Open a hunt output archive and extract files. Args: output_file_path: The path where the hunt archive is downloaded to. Returns: list: tuples containing: str: The name of the client from where the files were downloaded. str: The directory where the files were downloaded to. """ # Extract items from archive by host for processing collection_paths = [] client_ids = set() client_id_to_fqdn = {} hunt_dir = None try: with zipfile.ZipFile(output_file_path) as archive: items = archive.infolist() for f in items: if not hunt_dir: hunt_dir = f.filename.split('/')[0] # If we're dealing with client_info.yaml, use it to build a client # ID to FQDN correspondence table & skip extraction. if f.filename.split('/')[-1] == 'client_info.yaml': client_id, fqdn = self._get_client_fqdn(archive.read(f)) client_id_to_fqdn[client_id] = fqdn continue client_id = f.filename.split('/')[1] if client_id.startswith('C.'): if client_id not in client_ids: client_directory = os.path.join(self.output_path, hunt_dir, client_id) collection_paths.append((client_id, client_directory)) client_ids.add(client_id) try: archive.extract(f, self.output_path) except KeyError as exception: print('Extraction error: {0:s}'.format(exception)) return [] except OSError as exception: msg = 'Error manipulating file {0:s}: {1!s}'.format( output_file_path, exception) self.state.add_error(msg, critical=True) return [] except zipfile.BadZipfile as exception: msg = 'Bad zipfile {0:s}: {1!s}'.format( output_file_path, exception) self.state.add_error(msg, critical=True) return [] try: os.remove(output_file_path) except OSError as exception: print('Output path {0:s} could not be removed: {1:s}'.format( output_file_path, exception)) # Translate GRR client IDs to FQDNs with the information retrieved # earlier fqdn_collection_paths = [] for client_id, path in collection_paths: fqdn = client_id_to_fqdn.get(client_id, client_id) fqdn_collection_paths.append((fqdn, path)) if not fqdn_collection_paths: self.state.add_error('Nothing was extracted from the hunt archive', critical=True) return [] return fqdn_collection_paths
0.009665
def expire(self, key, timeout): """Set a timeout on key. After the timeout has expired, the key will automatically be deleted. A key with an associated timeout is often said to be volatile in Redis terminology. The timeout is cleared only when the key is removed using the :meth:`~tredis.RedisClient.delete` method or overwritten using the :meth:`~tredis.RedisClient.set` or :meth:`~tredis.RedisClient.getset` methods. This means that all the operations that conceptually alter the value stored at the key without replacing it with a new one will leave the timeout untouched. For instance, incrementing the value of a key with :meth:`~tredis.RedisClient.incr`, pushing a new value into a list with :meth:`~tredis.RedisClient.lpush`, or altering the field value of a hash with :meth:`~tredis.RedisClient.hset` are all operations that will leave the timeout untouched. The timeout can also be cleared, turning the key back into a persistent key, using the :meth:`~tredis.RedisClient.persist` method. If a key is renamed with :meth:`~tredis.RedisClient.rename`, the associated time to live is transferred to the new key name. If a key is overwritten by :meth:`~tredis.RedisClient.rename`, like in the case of an existing key ``Key_A`` that is overwritten by a call like ``client.rename(Key_B, Key_A)`` it does not matter if the original ``Key_A`` had a timeout associated or not, the new key ``Key_A`` will inherit all the characteristics of ``Key_B``. .. note:: **Time complexity**: ``O(1)`` :param key: The key to set an expiration for :type key: :class:`str`, :class:`bytes` :param int timeout: The number of seconds to set the timeout to :rtype: bool :raises: :exc:`~tredis.exceptions.RedisError` """ return self._execute( [b'EXPIRE', key, ascii(timeout).encode('ascii')], 1)
0.000977
def latmio_dir(R, itr, D=None, seed=None): ''' This function "latticizes" a directed network, while preserving the in- and out-degree distributions. In weighted networks, the function preserves the out-strength but not the in-strength distributions. Parameters ---------- R : NxN np.ndarray directed binary/weighted connection matrix itr : int rewiring parameter. Each edge is rewired approximately itr times. D : np.ndarray | None distance-to-diagonal matrix. Defaults to the actual distance matrix if not specified. seed : hashable, optional If None (default), use the np.random's global random state to generate random numbers. Otherwise, use a new np.random.RandomState instance seeded with the given value. Returns ------- Rlatt : NxN np.ndarray latticized network in original node ordering Rrp : NxN np.ndarray latticized network in node ordering used for latticization ind_rp : Nx1 np.ndarray node ordering used for latticization eff : int number of actual rewirings carried out ''' rng = get_rng(seed) n = len(R) ind_rp = rng.permutation(n) # randomly reorder matrix R = R.copy() R = R[np.ix_(ind_rp, ind_rp)] # create distance to diagonal matrix if not specified by user if D is None: D = np.zeros((n, n)) un = np.mod(range(1, n), n) um = np.mod(range(n - 1, 0, -1), n) u = np.append((0,), np.where(un < um, un, um)) for v in range(int(np.ceil(n / 2))): D[n - v - 1, :] = np.append(u[v + 1:], u[:v + 1]) D[v, :] = D[n - v - 1, :][::-1] i, j = np.where(R) k = len(i) itr *= k # maximal number of rewiring attempts per iteration max_attempts = np.round(n * k / (n * (n - 1))) # actual number of successful rewirings eff = 0 for it in range(itr): att = 0 while att <= max_attempts: # while not rewired while True: e1 = rng.randint(k) e2 = rng.randint(k) while e1 == e2: e2 = rng.randint(k) a = i[e1] b = j[e1] c = i[e2] d = j[e2] if a != c and a != d and b != c and b != d: break # rewiring condition if not (R[a, d] or R[c, b]): # lattice condition if (D[a, b] * R[a, b] + D[c, d] * R[c, d] >= D[a, d] * R[a, b] + D[c, b] * R[c, d]): R[a, d] = R[a, b] R[a, b] = 0 R[c, b] = R[c, d] R[c, d] = 0 j.setflags(write=True) j[e1] = d j[e2] = b # reassign edge indices eff += 1 break att += 1 Rlatt = R[np.ix_(ind_rp[::-1], ind_rp[::-1])] # reverse random permutation return Rlatt, R, ind_rp, eff
0.001317
def createEditor(self, parent, column, op, value): """ Creates a new editor for the parent based on the plugin parameters. :param parent | <QWidget> :return <QWidget> || None """ try: cls = self._operatorMap[nativestring(op)].cls except KeyError, AttributeError: return None # create the new editor if cls: widget = cls(parent) widget.setAttribute(Qt.WA_DeleteOnClose) projexui.setWidgetValue(widget, value) return widget return None
0.009231
def _should_skip_entry(self, entry): """Determine if this oplog entry should be skipped. This has the possible side effect of modifying the entry's namespace and filtering fields from updates and inserts. """ # Don't replicate entries resulting from chunk moves if entry.get("fromMigrate"): return True, False # Ignore no-ops if entry["op"] == "n": return True, False ns = entry["ns"] if "." not in ns: return True, False coll = ns.split(".", 1)[1] # Ignore system collections if coll.startswith("system."): return True, False # Ignore GridFS chunks if coll.endswith(".chunks"): return True, False is_gridfs_file = False if coll.endswith(".files"): ns = ns[: -len(".files")] if self.namespace_config.gridfs_namespace(ns): is_gridfs_file = True else: return True, False # Commands should not be ignored, filtered, or renamed. Renaming is # handled by the DocManagers via the CommandHelper class. if coll == "$cmd": return False, False # Rename or filter out namespaces that are ignored keeping # included gridfs namespaces. namespace = self.namespace_config.lookup(ns) if namespace is None: LOG.debug( "OplogThread: Skipping oplog entry: " "'%s' is not in the namespace configuration." % (ns,) ) return True, False # Update the namespace. entry["ns"] = namespace.dest_name # Take fields out of the oplog entry that shouldn't be replicated. # This may nullify the document if there's nothing to do. if not self.filter_oplog_entry( entry, include_fields=namespace.include_fields, exclude_fields=namespace.exclude_fields, ): return True, False return False, is_gridfs_file
0.000961
def referenceframe(self, event): """Handles navigational reference frame updates. These are necessary to assign geo coordinates to alerts and other misc things. :param event with incoming referenceframe message """ self.log("Got a reference frame update! ", event, lvl=events) self.reference_frame = event.data
0.00542
def drag_to(self, target, duration=2.0): """ Similar to swipe action, but the end point is provide by a UI proxy or by fixed coordinates. Args: target (:py:class:`UIObjectProxy <poco.proxy.UIObjectProxy>`): a UI proxy or 2-list/2-tuple coordinates (x, y) in NormalizedCoordinate system duration (:py:obj:`float`): time interval in which the action is performed Raises: PocoNoSuchNodeException: raised when the UI element does not exist """ try: duration = float(duration) except ValueError: raise ValueError('Argument `duration` should be <float>. Got {}'.format(repr(duration))) if type(target) in (list, tuple): target_pos = target else: target_pos = target.get_position() origin_pos = self.get_position() dir_ = [target_pos[0] - origin_pos[0], target_pos[1] - origin_pos[1]] return self.swipe(dir_, duration=duration)
0.0059
def _isCheckpointDir(checkpointDir): """Return true iff checkpointDir appears to be a checkpoint directory.""" lastSegment = os.path.split(checkpointDir)[1] if lastSegment[0] == '.': return False if not checkpointDir.endswith(g_defaultCheckpointExtension): return False if not os.path.isdir(checkpointDir): return False return True
0.019553
def _compose_restart(services): """Well, this is annoying. Compose 1.2 shipped with the restart functionality fucking broken, so we can't set a faster timeout than 10 seconds (which is way too long) using Compose. We are therefore resigned to trying to hack this together ourselves. Lame. Relevant fix which will make it into the next release: https://github.com/docker/compose/pull/1318""" def _restart_container(client, container): log_to_client('Restarting {}'.format(get_canonical_container_name(container))) client.restart(container['Id'], timeout=1) assembled_specs = get_assembled_specs() if services == []: services = [spec.name for spec in assembled_specs.get_apps_and_services()] logging.info('Restarting service containers from list: {}'.format(services)) client = get_docker_client() for service in services: container = get_container_for_app_or_service(service, include_exited=True) if container is None: log_to_client('No container found for {}'.format(service)) continue stopped_linked_containers = _check_stopped_linked_containers(container, assembled_specs) if stopped_linked_containers: log_to_client('No running containers {0}, which are linked to by {1}. Cannot restart {1}'.format( stopped_linked_containers, service)) else: _restart_container(client, container)
0.004727
def __Delete(self, path, request, headers): """Azure Cosmos 'DELETE' http request. :params str url: :params str path: :params dict headers: :return: Tuple of (result, headers). :rtype: tuple of (dict, dict) """ return synchronized_request.SynchronizedRequest(self, request, self._global_endpoint_manager, self.connection_policy, self._requests_session, 'DELETE', path, request_data=None, query_params=None, headers=headers)
0.002947
def parse_pasv_response(s): """ Parsing `PASV` server response. :param s: response line :type s: :py:class:`str` :return: (ip, port) :rtype: (:py:class:`str`, :py:class:`int`) """ sub, *_ = re.findall(r"[^(]*\(([^)]*)", s) nums = tuple(map(int, sub.split(","))) ip = ".".join(map(str, nums[:4])) port = (nums[4] << 8) | nums[5] return ip, port
0.004535
def imports_on_separate_lines(logical_line): r"""Place imports on separate lines. Okay: import os\nimport sys E401: import sys, os Okay: from subprocess import Popen, PIPE Okay: from myclas import MyClass Okay: from foo.bar.yourclass import YourClass Okay: import myclass Okay: import foo.bar.yourclass """ line = logical_line if line.startswith('import '): found = line.find(',') if -1 < found and ';' not in line[:found]: yield found, "E401 multiple imports on one line"
0.001832
def reduce_paths(G): """ Make graph into a directed acyclic graph (DAG). """ from jcvi.algorithms.lpsolve import min_feedback_arc_set while not nx.is_directed_acyclic_graph(G): edges = [] for a, b, w in G.edges_iter(data=True): w = w['weight'] edges.append((a, b, w)) mf, mf_score = min_feedback_arc_set(edges) for a, b, w in mf: G.remove_edge(a, b) assert nx.is_directed_acyclic_graph(G) G = transitive_reduction(G) return G
0.001898
def objHasUnsavedChanges(self): ''' objHasUnsavedChanges - @see ForeignLinkData.objHasUnsavedChanges True if ANY object has unsaved changes. ''' if not self.obj: return False for thisObj in self.obj: if not thisObj: continue if thisObj.hasUnsavedChanges(cascadeObjects=True): return True return False
0.041667
def pre_scan(self, func=operator.add, seed=0): ''' An exclusive prefix sum which returns the cumulative application of the supplied function up to but excluding the current element. Args: func: An optional binary function which is commutative - that is, the order of the arguments is unimportant. Defaults to a summing operator. seed: The first element of the prefix sum and therefore also the first element of the returned sequence. Returns: A Queryable such that the nth element is the sum of the first n-1 elements of the source sequence. Raises: ValueError: If the Queryable has been closed. TypeError: If func is not callable. ''' if self.closed(): raise ValueError("Attempt to call pre_scan() on a " "closed Queryable.") if not is_callable(func): raise TypeError("pre_scan() parameter func={0} is " "not callable".format(repr(func))) return self._create(self._generate_pre_scan_result(func, seed))
0.001672
def dirlist(self, path): """ This method returns the directory list for the path specified where SAS is running """ code = """ data _null_; spd = '""" + path + """'; rc = filename('saspydir', spd); did = dopen('saspydir'); if did > 0 then do; memcount = dnum(did); put 'MEMCOUNT=' memcount; do while (memcount > 0); name = dread(did, memcount); memcount = memcount - 1; qname = spd || '"""+self.hostsep+"""' || name; rc = filename('saspydq', qname); dq = dopen('saspydq'); if dq NE 0 then do; dname = strip(name) || '"""+self.hostsep+"""'; put 'DIR=' dname; rc = dclose(dq); end; else put 'FILE=' name; end; put 'MEMEND'; rc = dclose(did); end; else do; put 'MEMCOUNT=0'; put 'MEMEND'; end; rc = filename('saspydq'); rc = filename('saspydir'); run; """ if self.nosub: print(code) return None else: ll = self.submit(code, results='text') dirlist = [] l2 = ll['LOG'].rpartition("MEMCOUNT=")[2].partition("\n") memcount = int(l2[0]) l3 = l2[2].rpartition("MEMEND")[0] for row in l3.split(sep='\n'): i = row.partition('=') if i[0] in ['FILE', 'DIR']: dirlist.append(i[2]) if memcount != len(dirlist): print("Some problem parsing list. Should be " + str(memcount) + " entries but got " + str( len(dirlist)) + " instead.") return dirlist
0.00255
async def call(self, methname, *args, **kwargs): ''' Call a remote method by name. Args: methname (str): The name of the remote method. *args: Arguments to the method call. **kwargs: Keyword arguments to the method call. Most use cases will likely use the proxy methods directly: The following two are effectively the same: valu = proxy.getFooBar(x, y) valu = proxy.call('getFooBar', x, y) ''' todo = (methname, args, kwargs) return await self.task(todo)
0.003436
def analytics(account=None, *args, **kwargs): """ Simple Google Analytics integration. First looks for an ``account`` parameter. If not supplied, uses Django ``GOOGLE_ANALYTICS_ACCOUNT`` setting. If account not set, raises ``TemplateSyntaxError``. :param account: Google Analytics account id to be used. """ if not account: try: account = settings.GOOGLE_ANALYTICS_ACCOUNT except: raise template.TemplateSyntaxError( "Analytics account could not found either " "in tag parameters or settings") return {'account': account, 'params':kwargs }
0.008982
def get_netconf_client_capabilities_output_session_session_id(self, **kwargs): """Auto Generated Code """ config = ET.Element("config") get_netconf_client_capabilities = ET.Element("get_netconf_client_capabilities") config = get_netconf_client_capabilities output = ET.SubElement(get_netconf_client_capabilities, "output") session = ET.SubElement(output, "session") session_id = ET.SubElement(session, "session-id") session_id.text = kwargs.pop('session_id') callback = kwargs.pop('callback', self._callback) return callback(config)
0.004831
def cookiejar_from_dict(cookie_dict): """Returns a CookieJar from a key/value dictionary. :param cookie_dict: Dict of key/values to insert into CookieJar. """ # return cookiejar if one was passed in if isinstance(cookie_dict, cookielib.CookieJar): return cookie_dict # create cookiejar cj = cookielib.CookieJar() cj = add_dict_to_cookiejar(cj, cookie_dict) return cj
0.00241
def ensure(data_type, check_value, default_value=None): """ function to ensure the given check value is in the given data type, if yes, return the check value directly, otherwise return the default value :param data_type: different data type: can be int, str, list, tuple etc, must be python supportable data type or new defined data type :param check_value: different value: the value to check :param default_value: None/ different value: provide the default value :return: check value or default value """ if default_value is not None and not isinstance(default_value, data_type): raise ValueError("default_value must be the value in the given data " "type.") elif isinstance(check_value, data_type): return check_value try: new_value = data_type(check_value) except: return default_value return new_value
0.002167
def proc_check_guard(self, instance, sql): """ check to see if the guard SQL returns a single column containing 0 or 1 We return true if 1, else False """ self.open_db_connections(instance, self.PROC_GUARD_DB_KEY) cursor = self.get_cursor(instance, self.PROC_GUARD_DB_KEY) should_run = False try: cursor.execute(sql, ()) result = cursor.fetchone() should_run = result[0] == 1 except Exception as e: self.log.error("Failed to run proc_only_if sql {} : {}".format(sql, e)) self.close_cursor(cursor) self.close_db_connections(instance, self.PROC_GUARD_DB_KEY) return should_run
0.004167
def log_level(self, subsystem, level, **kwargs): r"""Changes the logging output of a running daemon. .. code-block:: python >>> c.log_level("path", "info") {'Message': "Changed log level of 'path' to 'info'\n"} Parameters ---------- subsystem : str The subsystem logging identifier (Use ``"all"`` for all subsystems) level : str The desired logging level. Must be one of: * ``"debug"`` * ``"info"`` * ``"warning"`` * ``"error"`` * ``"fatal"`` * ``"panic"`` Returns ------- dict : Status message """ args = (subsystem, level) return self._client.request('/log/level', args, decoder='json', **kwargs)
0.002312
def by_median_household_income(self, lower=-1, upper=2 ** 31, zipcode_type=ZipcodeType.Standard, sort_by=SimpleZipcode.median_household_income.name, ascending=False, returns=DEFAULT_LIMIT): """ Search zipcode information by median household income. """ return self.query( median_household_income_lower=lower, median_household_income_upper=upper, sort_by=sort_by, zipcode_type=zipcode_type, ascending=ascending, returns=returns, )
0.01238
def _copy_problem_hparams(p_hparams): """Use input modality, vocab, and space id for target.""" p = p_hparams # Duplicate input modality. p.modality["targets"] = p.modality["inputs"] # Duplicate input vocab size. p.vocab_size["targets"] = p.vocab_size["inputs"] # Duplicate input vocabulary. p.vocabulary["targets"] = p.vocabulary["inputs"] # Duplicate input space ids. p.target_space_id = p.input_space_id # Mark that p was reversed. p.was_copy = True
0.027311
def init_app(self, app, sqla, namespace='api', route_prefix='/api'): """ Initialize the adapter if it hasn't already been initialized. :param app: Flask application :param sqla: Flask-SQLAlchemy instance :param namespace: Prefixes all generated routes :param route_prefix: The base path for the generated routes """ self.app = app self.sqla = sqla self._setup_adapter(namespace, route_prefix)
0.004219
def raise_on_wrong_settings(self): """ Validates the configuration settings and raises RuntimeError on error """ self.__ensure_dir_exists(self.working_directory, 'working directory') for idir in self.include_paths: self.__ensure_dir_exists(idir, 'include directory') if self.__xml_generator not in ["castxml", "gccxml"]: msg = ('xml_generator("%s") should either be ' + '"castxml" or "gccxml".') % self.xml_generator raise RuntimeError(msg)
0.003683
def __score_method(X, y, fcounts, model_generator, score_function, method_name, nreps=10, test_size=100, cache_dir="/tmp"): """ Test an explanation method. """ old_seed = np.random.seed() np.random.seed(3293) # average the method scores over several train/test splits method_reps = [] data_hash = hashlib.sha256(__toarray(X).flatten()).hexdigest() + hashlib.sha256(__toarray(y)).hexdigest() for i in range(nreps): X_train, X_test, y_train, y_test = train_test_split(__toarray(X), y, test_size=test_size, random_state=i) # define the model we are going to explain, caching so we onlu build it once model_id = "model_cache__v" + "__".join([__version__, data_hash, model_generator.__name__])+".pickle" cache_file = os.path.join(cache_dir, model_id + ".pickle") if os.path.isfile(cache_file): with open(cache_file, "rb") as f: model = pickle.load(f) else: model = model_generator() model.fit(X_train, y_train) with open(cache_file, "wb") as f: pickle.dump(model, f) attr_key = "_".join([model_generator.__name__, method_name, str(test_size), str(nreps), str(i), data_hash]) def score(attr_function): def cached_attr_function(X_inner): if attr_key not in _attribution_cache: _attribution_cache[attr_key] = attr_function(X_inner) return _attribution_cache[attr_key] #cached_attr_function = lambda X: __check_cache(attr_function, X) if fcounts is None: return score_function(X_train, X_test, y_train, y_test, cached_attr_function, model, i) else: scores = [] for f in fcounts: scores.append(score_function(f, X_train, X_test, y_train, y_test, cached_attr_function, model, i)) return np.array(scores) # evaluate the method (only building the attribution function if we need to) if attr_key not in _attribution_cache: method_reps.append(score(getattr(methods, method_name)(model, X_train))) else: method_reps.append(score(None)) np.random.seed(old_seed) return np.array(method_reps).mean(0)
0.00564
def person_involved(self, person): """Return True if the given person has been involved in the discussion, False otherwise. """ return any(message.posted_by == person for message in self.discussion)
0.008658
def plotConvergenceByColumnTopology(results, columnRange, featureRange, networkType, numTrials): """ Plots the convergence graph: iterations vs number of columns. Each curve shows the convergence for a given number of unique features. """ ######################################################################## # # Accumulate all the results per column in a convergence array. # # Convergence[f, c, t] = how long it took it to converge with f unique # features, c columns and topology t. convergence = numpy.zeros((max(featureRange), max(columnRange) + 1, len(networkType))) networkTypeNames = {} for i, topologyType in enumerate(networkType): if "Topology" in topologyType: networkTypeNames[i] = "Normal" else: networkTypeNames[i] = "Dense" for r in results: convergence[r["numFeatures"] - 1, r["numColumns"], networkType.index(r["networkType"])] += r["convergencePoint"] convergence /= numTrials # For each column, print convergence as fct of number of unique features for c in range(1, max(columnRange) + 1): for t in range(len(networkType)): print c, convergence[:, c, t] # Print everything anyway for debugging print "Average convergence array=", convergence ######################################################################## # # Create the plot. x-axis= plt.figure() plotPath = os.path.join("plots", "convergence_by_column_topology.pdf") # Plot each curve legendList = [] colormap = plt.get_cmap("jet") colorList = [colormap(x) for x in numpy.linspace(0., 1., len(featureRange)*len(networkType))] for i in range(len(featureRange)): for t in range(len(networkType)): f = featureRange[i] print columnRange print convergence[f-1,columnRange, t] legendList.append('Unique features={}, topology={}'.format(f, networkTypeNames[t])) plt.plot(columnRange, convergence[f-1,columnRange, t], color=colorList[i*len(networkType) + t]) # format plt.legend(legendList, loc="upper right") plt.xlabel("Number of columns") plt.xticks(columnRange) plt.yticks(range(0,int(convergence.max())+1)) plt.ylabel("Average number of touches") plt.title("Number of touches to recognize one object (multiple columns)") # save plt.savefig(plotPath) plt.close()
0.022825
def get_formset(self, request, obj=None, **kwargs): """ Load Synchronizer schema to display specific fields in admin """ if obj is not None: try: # this is enough to load the new schema obj.external except LayerExternal.DoesNotExist: pass return super(LayerExternalInline, self).get_formset(request, obj=None, **kwargs)
0.006961
def acceptNavigationRequest(self, url, kind, is_main_frame): """Open external links in browser and internal links in the webview""" ready_url = url.toEncoded().data().decode() is_clicked = kind == self.NavigationTypeLinkClicked if is_clicked and self.root_url not in ready_url: QtGui.QDesktopServices.openUrl(url) return False return super(WebPage, self).acceptNavigationRequest(url, kind, is_main_frame)
0.00641
def load_obsdata(self, idx: int) -> None: """Load the next obs sequence value (of the given index).""" if self._obs_ramflag: self.obs[0] = self._obs_array[idx] elif self._obs_diskflag: raw = self._obs_file.read(8) self.obs[0] = struct.unpack('d', raw)
0.006431
def to_capabilities(self): """Marshals the IE options to the correct object.""" caps = self._caps opts = self._options.copy() if len(self._arguments) > 0: opts[self.SWITCHES] = ' '.join(self._arguments) if len(self._additional) > 0: opts.update(self._additional) if len(opts) > 0: caps[Options.KEY] = opts return caps
0.004854
def shorten_text(self, text): """Shortens text to fit into the :attr:`width`.""" if len(text) > self.width: return text[:self.width - 3] + '...' return text
0.010417
def notification_message(cls, item): '''Convert an RPCRequest item to a message.''' assert isinstance(item, Notification) return cls.encode_payload(cls.request_payload(item, None))
0.009804
def serve(args): """Start a server which will watch .md and .rst files for changes. If a md file changes, the Home Documentation is rebuilt. If a .rst file changes, the updated sphinx project is rebuilt Args: args (ArgumentParser): flags from the CLI """ # Sever's parameters port = args.serve_port or PORT host = "0.0.0.0" # Current working directory dir_path = Path().absolute() web_dir = dir_path / "site" # Update routes utils.set_routes() # Offline mode if args.offline: os.environ["MKINX_OFFLINE"] = "true" _ = subprocess.check_output("mkdocs build > /dev/null", shell=True) utils.make_offline() class MkinxHTTPHandler(SimpleHTTPRequestHandler): """Class routing urls (paths) to projects (resources) """ def translate_path(self, path): # default root -> cwd location = str(web_dir) route = location if len(path) != 0 and path != "/": for key, loc in utils.get_routes(): if path.startswith(key): location = loc path = path[len(key) :] break if location[-1] == "/" or not path or path[0] == "/": route = location + path else: route = location + "/" + path return route.split("?")[0] # Serve as deamon thread success = False count = 0 print("Waiting for server port...") try: while not success: try: httpd = socketserver.TCPServer((host, port), MkinxHTTPHandler) success = True except OSError: count += 1 finally: if not success and count > 20: s = "port {} seems occupied. Try with {} ? (y/n)" if "y" in input(s.format(port, port + 1)): port += 1 count = 0 else: print("You can specify a custom port with mkinx serve -s") return time.sleep(0.5) except KeyboardInterrupt: print("Aborting.") return httpd.allow_reuse_address = True print("\nServing at http://{}:{}\n".format(host, port)) thread = threading.Thread(target=httpd.serve_forever) thread.daemon = True thread.start() # Watch for changes event_handler = utils.MkinxFileHandler( patterns=["*.rst", "*.md", "*.yml", "*.yaml"] ) observer = Observer() observer.schedule(event_handler, path=str(dir_path), recursive=True) observer.start() try: while True: time.sleep(1) except KeyboardInterrupt: observer.stop() httpd.server_close() observer.join()
0.001041
def contains_set(self, other): """Return ``True`` if ``other`` is a subset of the real numbers. Returns ------- contained : bool ``True`` if other is an instance of `RealNumbers` or `Integers` False otherwise. Examples -------- >>> real_numbers = RealNumbers() >>> real_numbers.contains_set(RealNumbers()) True """ if other is self: return True return (isinstance(other, RealNumbers) or isinstance(other, Integers))
0.003534
def field_items(self, path=str(), **options): """ Returns a **flatten** list of ``('field path', field item)`` tuples for the `Pointer` field itself and for each :class:`Field` *nested* in the :attr:`data` object referenced by the `Pointer` field. :param str path: path of the `Pointer` field. :keyword bool nested: if ``True`` all :class:`Pointer` fields in the :attr:`data` object referenced by the `Pointer` field lists their referenced :attr:`~Pointer.data` object field items as well (chained method call). """ items = list() # Field items.append((path if path else 'field', self)) # Data Object data_path = '{0}.{1}'.format(path, 'data') if path else 'data' # Container if is_container(self._data): for field_item in self._data.field_items(data_path, **options): items.append(field_item) # Pointer elif is_pointer(self._data) and get_nested(options): for field_item in self._data.field_items(data_path, **options): items.append(field_item) # Field elif is_field(self._data): items.append((data_path, self._data)) return items
0.002344
def unstub(*objs): """Unstubs all stubbed methods and functions If you don't pass in any argument, *all* registered mocks and patched modules, classes etc. will be unstubbed. Note that additionally, the underlying registry will be cleaned. After an `unstub` you can't :func:`verify` anymore because all interactions will be forgotten. """ if objs: for obj in objs: mock_registry.unstub(obj) else: mock_registry.unstub_all()
0.002041
def update(self): """Once you open a dataset, it activates all the widgets. """ self.info.display_dataset() self.overview.update() self.labels.update(labels=self.info.dataset.header['chan_name']) self.channels.update() try: self.info.markers = self.info.dataset.read_markers() except FileNotFoundError: lg.info('No notes/markers present in the header of the file') else: self.notes.update_dataset_marker()
0.003891
def n2s(n): """ Number to string. """ s = hex(n)[2:].rstrip("L") if len(s) % 2 != 0: s = "0" + s return s.decode("hex")
0.006623
def generate_random_string(length=6): ''' Returns a random string of a specified length. >>> len(generate_random_string(length=25)) 25 Test randomness. Try N times and observe no duplicaton >>> N = 100 >>> len(set(generate_random_string(10) for i in range(N))) == N True ''' n = int(length / 2 + 1) x = binascii.hexlify(os.urandom(n)) s = x[:length] return s.decode('utf-8')
0.002336
def get_versioned_references_for(self, instance): """Returns the versioned references for the given instance """ vrefs = [] # Retrieve the referenced objects refs = instance.getRefs(relationship=self.relationship) ref_versions = getattr(instance, REFERENCE_VERSIONS, None) # No versions stored, return the original references if ref_versions is None: return refs for ref in refs: uid = api.get_uid(ref) # get the linked version to the reference version = ref_versions.get(uid) # append the versioned reference vrefs.append(self.retrieve_version(ref, version)) return vrefs
0.002747
def apply_obb(self): """ Transform the current path so that its OBB is axis aligned and OBB center is at the origin. """ if len(self.root) == 1: matrix, bounds = polygons.polygon_obb( self.polygons_closed[self.root[0]]) self.apply_transform(matrix) return matrix else: raise ValueError('Not implemented for multibody geometry')
0.004556
def update_subnet(self, subnet, name=None): ''' Updates a subnet ''' subnet_id = self._find_subnet_id(subnet) return self.network_conn.update_subnet( subnet=subnet_id, body={'subnet': {'name': name}})
0.007937
def _get_app_libs_volume_mounts(app_name, assembled_specs): """ Returns a list of the formatted volume mounts for all libs that an app uses """ volumes = [] for lib_name in assembled_specs['apps'][app_name]['depends']['libs']: lib_spec = assembled_specs['libs'][lib_name] volumes.append("{}:{}".format(Repo(lib_spec['repo']).vm_path, container_code_path(lib_spec))) return volumes
0.007282
def __erase_primes(self): """Erase all prime markings""" for i in range(self.n): for j in range(self.n): if self.marked[i][j] == 2: self.marked[i][j] = 0
0.009217
def clr(M, **kwargs): """Implementation of the Context Likelihood or Relatedness Network algorithm. Args: mat (numpy.ndarray): matrix, if it is a square matrix, the program assumes it is a relevance matrix where mat(i,j) represents the similarity content between nodes i and j. Elements of matrix should be non-negative. Returns: mat_nd (numpy.ndarray): Output deconvolved matrix (direct dependency matrix). Its components represent direct edge weights of observed interactions. .. note:: Ref:Jeremiah J. Faith, Boris Hayete, Joshua T. Thaden, Ilaria Mogno, Jamey Wierzbowski, Guillaume Cottarel, Simon Kasif, James J. Collins, and Timothy S. Gardner. Large-scale mapping and validation of escherichia coli transcriptional regulation from a compendium of expression profiles. PLoS Biology, 2007 """ R = np.zeros(M.shape) Id = [[0, 0] for i in range(M.shape[0])] for i in range(M.shape[0]): mu_i = np.mean(M[i, :]) sigma_i = np.std(M[i, :]) Id[i] = [mu_i, sigma_i] for i in range(M.shape[0]): for j in range(i + 1, M.shape[0]): z_i = np.max([0, (M[i, j] - Id[i][0]) / Id[i][0]]) z_j = np.max([0, (M[i, j] - Id[j][0]) / Id[j][0]]) R[i, j] = np.sqrt(z_i**2 + z_j**2) R[j, i] = R[i, j] # Symmetric return R
0.004264
def read_config(cls, configparser): """Read configuration file options.""" config = dict() config[cls._filename_re_key] = configparser.get(cls.__name__, cls._filename_re_key) \ if configparser.has_option(cls.__name__, cls._filename_re_key) else None return config
0.013029
def _convert_axes_to_absolute(dshape, axes): """axes = (-2,-1) does not work in reikna, so we have to convetr that""" if axes is None: return None elif isinstance(axes, (tuple, list)): return tuple(np.arange(len(dshape))[list(axes)]) else: raise NotImplementedError("axes %s is of unsupported type %s "%(str(axes), type(axes)))
0.008152
def on_presence(self, session, presence): """Handles presence stanzas""" from_jid = presence.getFrom() is_member = self.is_member(from_jid.getStripped()) if is_member: member = self.get_member(from_jid.getStripped()) else: member = None logger.info('presence: from=%s is_member=%s type=%s' % (from_jid, is_member, presence.getType())) if presence.getType() == 'subscribed': if is_member: logger.info('[%s] accepted their invitation' % (from_jid,)) member['STATUS'] = 'ACTIVE' else: #TODO: user accepted, but is no longer be on the roster, unsubscribe? pass elif presence.getType() == 'subscribe': if is_member: logger.info('Acknowledging subscription request from [%s]' % (from_jid,)) self.client.sendPresence(jid=from_jid, typ='subscribed') member['STATUS'] = 'ACTIVE' self.broadcast('%s has accepted their invitation!' % (from_jid,)) else: #TODO: show that a user has requested membership? pass elif presence.getType() == None: if is_member: member['ONLINE'] += 1 elif presence.getType() == 'unavailable': if is_member: member['ONLINE'] -= 1 else: logger.info('Unhandled presence stanza of type [%s] from [%s]' % (presence.getType(), from_jid))
0.00651
def locate_module(module_id: str, module_type: str = None): """ Locate module by module ID Args: module_id: Module ID module_type: Type of module, one of ``'master'``, ``'slave'`` and ``'middleware'`` """ entry_point = None if module_type: entry_point = 'ehforwarderbot.%s' % module_type module_id = module_id.split('#', 1)[0] if entry_point: for i in pkg_resources.iter_entry_points(entry_point): if i.name == module_id: return i.load() return pydoc.locate(module_id)
0.003509
def _opening_bracket_index(self, text, bpair=('(', ')')): """Return the index of the opening bracket that matches the closing bracket at the end of the text.""" level = 1 for i, char in enumerate(reversed(text[:-1])): if char == bpair[1]: level += 1 elif char == bpair[0]: level -= 1 if level == 0: return len(text) - i - 2
0.006961
def solve(self,stops, method="POST", barriers=None, polylineBarriers=None, polygonBarriers=None, travelMode=None, attributeParameterValues=None, returnDirections=None, returnRoutes=True, returnStops=False, returnBarriers=False, returnPolylineBarriers=True, returnPolygonBarriers=True, outSR=None, ignoreInvalidLocations=True, outputLines=None, findBestSequence=False, preserveFirstStop=True, preserveLastStop=True, useTimeWindows=False, startTime=None, startTimeIsUTC=False, accumulateAttributeNames=None, impedanceAttributeName=None, restrictionAttributeNames=None, restrictUTurns=None, useHierarchy=True, directionsLanguage=None, directionsOutputType=None, directionsStyleName=None, directionsLengthUnits=None, directionsTimeAttributeName=None, outputGeometryPrecision=None, outputGeometryPrecisionUnits=None, returnZ=False ): """The solve operation is performed on a network layer resource. The solve operation is supported on a network layer whose layerType is esriNAServerRouteLayer. You can provide arguments to the solve route operation as query parameters. Inputs: stops - The set of stops loaded as network locations during analysis. Stops can be specified using a simple comma / semi-colon based syntax or as a JSON structure. If stops are not specified, preloaded stops from the map document are used in the analysis. barriers - The set of barriers loaded as network locations during analysis. Barriers can be specified using a simple comma / semi-colon based syntax or as a JSON structure. If barriers are not specified, preloaded barriers from the map document are used in the analysis. If an empty json object is passed ('{}') preloaded barriers are ignored. polylineBarriers - The set of polyline barriers loaded as network locations during analysis. If polyline barriers are not specified, preloaded polyline barriers from the map document are used in the analysis. If an empty json object is passed ('{}') preloaded polyline barriers are ignored. polygonBarriers - The set of polygon barriers loaded as network locations during analysis. If polygon barriers are not specified, preloaded polygon barriers from the map document are used in the analysis. If an empty json object is passed ('{}') preloaded polygon barriers are ignored. travelMode - Travel modes provide override values that help you quickly and consistently model a vehicle or mode of transportation. The chosen travel mode must be preconfigured on the network dataset that the routing service references. attributeParameterValues - A set of attribute parameter values that can be parameterized to determine which network elements can be used by a vehicle. returnDirections - If true, directions will be generated and returned with the analysis results. Default is true. returnRoutes - If true, routes will be returned with the analysis results. Default is true. returnStops - If true, stops will be returned with the analysis results. Default is false. returnBarriers - If true, barriers will be returned with the analysis results. Default is false. returnPolylineBarriers - If true, polyline barriers will be returned with the analysis results. Default is false. returnPolygonBarriers - If true, polygon barriers will be returned with the analysis results. Default is false. outSR - The spatial reference of the geometries returned with the analysis results. ignoreInvalidLocations - If true, the solver will ignore invalid locations. Otherwise, it will raise an error. The default is as defined in the network layer. outputLines - The type of output lines to be generated in the result. The default is as defined in the network layer. findBestSequence - If true, the solver should re-sequence the route in the optimal order. The default is as defined in the network layer. preserveFirstStop - If true, the solver should keep the first stop fixed in the sequence. The default is as defined in the network layer. preserveLastStop - If true, the solver should keep the last stop fixed in the sequence. The default is as defined in the network layer. useTimeWindows - If true, the solver should consider time windows. The default is as defined in the network layer. startTime - The time the route begins. If not specified, the solver will use the default as defined in the network layer. startTimeIsUTC - The time zone of the startTime parameter. accumulateAttributeNames - The list of network attribute names to be accumulated with the analysis. The default is as defined in the network layer. The value should be specified as a comma separated list of attribute names. You can also specify a value of none to indicate that no network attributes should be accumulated. impedanceAttributeName - The network attribute name to be used as the impedance attribute in analysis. The default is as defined in the network layer. restrictionAttributeNames -The list of network attribute names to be used as restrictions with the analysis. The default is as defined in the network layer. The value should be specified as a comma separated list of attribute names. You can also specify a value of none to indicate that no network attributes should be used as restrictions. restrictUTurns - Specifies how U-Turns should be restricted in the analysis. The default is as defined in the network layer. Values: esriNFSBAllowBacktrack | esriNFSBAtDeadEndsOnly | esriNFSBNoBacktrack | esriNFSBAtDeadEndsAndIntersections useHierarchy - If true, the hierarchy attribute for the network should be used in analysis. The default is as defined in the network layer. directionsLanguage - The language to be used when computing directions. The default is as defined in the network layer. The list of supported languages can be found in REST layer description. directionsOutputType - Defines content, verbosity of returned directions. The default is esriDOTStandard. Values: esriDOTComplete | esriDOTCompleteNoEvents | esriDOTInstructionsOnly | esriDOTStandard | esriDOTSummaryOnly directionsStyleName - The style to be used when returning the directions. The default is as defined in the network layer. The list of supported styles can be found in REST layer description. directionsLengthUnits - The length units to use when computing directions. The default is as defined in the network layer. Values: esriNAUFeet | esriNAUKilometers | esriNAUMeters | esriNAUMiles | esriNAUNauticalMiles | esriNAUYards | esriNAUUnknown directionsTimeAttributeName - The name of network attribute to use for the drive time when computing directions. The default is as defined in the network layer. outputGeometryPrecision - The precision of the output geometry after generalization. If 0, no generalization of output geometry is performed. The default is as defined in the network service configuration. outputGeometryPrecisionUnits - The units of the output geometry precision. The default value is esriUnknownUnits. Values: esriUnknownUnits | esriCentimeters | esriDecimalDegrees | esriDecimeters | esriFeet | esriInches | esriKilometers | esriMeters | esriMiles | esriMillimeters | esriNauticalMiles | esriPoints | esriYards returnZ - If true, Z values will be included in the returned routes and compressed geometry if the network dataset is Z-aware. The default is false. """ if not self.layerType == "esriNAServerRouteLayer": raise ValueError("The solve operation is supported on a network " "layer of Route type only") url = self._url + "/solve" params = { "f" : "json", "stops": stops } if not barriers is None: params['barriers'] = barriers if not polylineBarriers is None: params['polylineBarriers'] = polylineBarriers if not polygonBarriers is None: params['polygonBarriers'] = polygonBarriers if not travelMode is None: params['travelMode'] = travelMode if not attributeParameterValues is None: params['attributeParameterValues'] = attributeParameterValues if not returnDirections is None: params['returnDirections'] = returnDirections if not returnRoutes is None: params['returnRoutes'] = returnRoutes if not returnStops is None: params['returnStops'] = returnStops if not returnBarriers is None: params['returnBarriers'] = returnBarriers if not returnPolylineBarriers is None: params['returnPolylineBarriers'] = returnPolylineBarriers if not returnPolygonBarriers is None: params['returnPolygonBarriers'] = returnPolygonBarriers if not outSR is None: params['outSR'] = outSR if not ignoreInvalidLocations is None: params['ignoreInvalidLocations'] = ignoreInvalidLocations if not outputLines is None: params['outputLines'] = outputLines if not findBestSequence is None: params['findBestSequence'] = findBestSequence if not preserveFirstStop is None: params['preserveFirstStop'] = preserveFirstStop if not preserveLastStop is None: params['preserveLastStop'] = preserveLastStop if not useTimeWindows is None: params['useTimeWindows'] = useTimeWindows if not startTime is None: params['startTime'] = startTime if not startTimeIsUTC is None: params['startTimeIsUTC'] = startTimeIsUTC if not accumulateAttributeNames is None: params['accumulateAttributeNames'] = accumulateAttributeNames if not impedanceAttributeName is None: params['impedanceAttributeName'] = impedanceAttributeName if not restrictionAttributeNames is None: params['restrictionAttributeNames'] = restrictionAttributeNames if not restrictUTurns is None: params['restrictUTurns'] = restrictUTurns if not useHierarchy is None: params['useHierarchy'] = useHierarchy if not directionsLanguage is None: params['directionsLanguage'] = directionsLanguage if not directionsOutputType is None: params['directionsOutputType'] = directionsOutputType if not directionsStyleName is None: params['directionsStyleName'] = directionsStyleName if not directionsLengthUnits is None: params['directionsLengthUnits'] = directionsLengthUnits if not directionsTimeAttributeName is None: params['directionsTimeAttributeName'] = directionsTimeAttributeName if not outputGeometryPrecision is None: params['outputGeometryPrecision'] = outputGeometryPrecision if not outputGeometryPrecisionUnits is None: params['outputGeometryPrecisionUnits'] = outputGeometryPrecisionUnits if not returnZ is None: params['returnZ'] = returnZ if method.lower() == "post": return self._post(url=url, param_dict=params, securityHandler=self._securityHandler, proxy_url=self._proxy_url, proxy_port=self._proxy_port) else: return self._get(url=url, param_dict=params, securityHandler=self._securityHandler, proxy_url=self._proxy_url, proxy_port=self._proxy_port)
0.008649
def get_bugs(self, project, status=None): """ Retrives a List of bugs for a given project. By default, this will only return activate bugs. If you wish to retrieve a non-active bug then specify the status through the status parameter. :param project: The formal project name. :param status: Allows filtering of bugs by current status. """ uri = '{base}/{project}'.format(base=self.BASE_URI, project=project) parameters = {'ws.op': 'searchTasks'} if status: parameters['status'] = status resp = self._client.get(uri, model=models.Bug, is_collection=True, params=parameters) return resp
0.002747
def active_conf_set_name(self): '''The name of the currently-active configuration set.''' with self._mutex: if not self.conf_sets: return '' if not self._active_conf_set: return '' return self._active_conf_set
0.006826