code
stringlengths
75
104k
docstring
stringlengths
1
46.9k
def _expectation(p, mean, none, kern, feat, nghp=None): """ Compute the expectation: expectation[n] = <x_n K_{x_n, Z}>_p(x_n) - K_{.,} :: Linear kernel or the equivalent for MarkovGaussian :return: NxDxM """ return tf.matrix_transpose(expectation(p, (kern, feat), mean))
Compute the expectation: expectation[n] = <x_n K_{x_n, Z}>_p(x_n) - K_{.,} :: Linear kernel or the equivalent for MarkovGaussian :return: NxDxM
def load(self, typedef, value, **kwargs): """ Return the result of the bound load method for a typedef Looks up the load function that was bound to the engine for a typedef, and return the result of passing the given `value` and any `context` to that function. Parameters ---------- typedef : :class:`~TypeDefinition` The typedef whose bound load method should be used value : object The value to be passed into the bound load method **kwargs : kwargs Context for the value being loaded Returns ------- loaded_value : object The return value of the load function for the input value Raises ------ exc : :class:`KeyError` If the input typedef is not bound to this engine Example ------- .. code-block:: python class Account(TypeDefinition): prefix = "::account" def load(self, value, **context): return value + Account.prefix def dump(self, value, **context): return value[:-len(Account.prefix)] typedef = Account() engine = TypeEngine("accounts") engine.register(typedef) engine.bind() assert engine.dump(typedef, "Jill::account") == "Jill" """ try: bound_type = self.bound_types[typedef] except KeyError: raise DeclareException( "Can't load unknown type {}".format(typedef)) else: # Don't need to try/catch since load/dump are bound together return bound_type["load"](value, **kwargs)
Return the result of the bound load method for a typedef Looks up the load function that was bound to the engine for a typedef, and return the result of passing the given `value` and any `context` to that function. Parameters ---------- typedef : :class:`~TypeDefinition` The typedef whose bound load method should be used value : object The value to be passed into the bound load method **kwargs : kwargs Context for the value being loaded Returns ------- loaded_value : object The return value of the load function for the input value Raises ------ exc : :class:`KeyError` If the input typedef is not bound to this engine Example ------- .. code-block:: python class Account(TypeDefinition): prefix = "::account" def load(self, value, **context): return value + Account.prefix def dump(self, value, **context): return value[:-len(Account.prefix)] typedef = Account() engine = TypeEngine("accounts") engine.register(typedef) engine.bind() assert engine.dump(typedef, "Jill::account") == "Jill"
def dispatch(self, *args, **kwargs): ''' Entry point for this class, here we decide basic stuff ''' # Get if this class is working as only a base render and List funcionality shouldn't be enabled onlybase = getattr(self, "onlybase", False) # REST not available when onlybase is enabled if not onlybase: # Check if this is a REST query to pusth the answer to responde in JSON if bool(self.request.META.get('HTTP_X_REST', False)): self.json = True if self.request.GET.get('json', self.request.POST.get('json', None)) is None: newget = {} newget['json'] = "{}" for key in self.request.GET: newget[key] = self.request.GET[key] self.request.GET = QueryDict('').copy() self.request.GET.update(newget) # return HttpResponseBadRequest(_("The service requires you to set a GET argument named json={} which will contains all the filters you can apply to a list")) # Check if this is a REST query to add an element if self.request.method == 'POST': target = get_class(resolve("{}/add".format(self.request.META.get("REQUEST_URI"))).func) target.json = True return target.as_view()(self.request) # Set class internal variables self._setup(self.request) # Deprecations deprecated = [('retrictions', '2016061000')] for (depre, version) in deprecated: if hasattr(self, depre): raise IOError("The attribute '{}' has been deprecated in version '{}' and it is not available anymore".format(version)) # Build extracontext if not hasattr(self, 'extra_context'): self.extra_context = {} if not hasattr(self, 'client_context'): self.client_context = {} # Attach user to the extra_context self.extra_context['user'] = self.user # Attach WS entry point and STATIC entry point self.extra_context['ws_entry_point'] = self.BASE_URL + getattr(self, "ws_entry_point", "{0}/{1}".format(self._appname, "{0}s".format(self._modelname.lower()))) static_partial_row_path = getattr(self, 'static_partial_row', "{0}/{1}_rows.html".format(self._appname, "{0}s".format(self._modelname.lower()))) self.extra_context['static_partial_row'] = get_static(static_partial_row_path, self.user, self.language, self.DEFAULT_STATIC_PARTIAL_ROWS, 'html', relative=True) static_partial_header_path = getattr(self, 'static_partial_header', "{0}/{1}_header.html".format(self._appname, "{0}s".format(self._modelname.lower()))) self.extra_context['static_partial_header'] = get_static(static_partial_header_path, self.user, self.language, None, 'html', relative=True) static_partial_summary_path = getattr(self, 'static_partial_summary', "{0}/{1}_summary.html".format(self._appname, "{0}s".format(self._modelname.lower()))) self.extra_context['static_partial_summary'] = get_static(static_partial_summary_path, self.user, self.language, self.DEFAULT_STATIC_PARTIAL_SUMMARY, 'html', relative=True) static_app_row_path = getattr(self, 'static_app_row', "{0}/{1}_app.js".format(self._appname, "{0}s".format(self._modelname.lower()))) self.extra_context['static_app_row'] = get_static(static_app_row_path, self.user, self.language, os.path.join(settings.STATIC_URL, 'codenerix/js/app.js'), 'js', relative=True) static_controllers_row_path = getattr(self, 'static_controllers_row', "{0}/{1}_controllers.js".format(self._appname, "{0}s".format(self._modelname.lower()))) self.extra_context['static_controllers_row'] = get_static(static_controllers_row_path, self.user, self.language, None, 'js', relative=True) static_filters_row_path = getattr(self, 'static_filters_row', "{0}/{1}_filters.js".format(self._appname, "{0}s".format(self._modelname.lower()))) self.extra_context['static_filters_row'] = get_static(static_filters_row_path, self.user, self.language, os.path.join(settings.STATIC_URL, 'codenerix/js/rows.js'), 'js', relative=True) self.extra_context['field_delete'] = getattr(self, 'field_delete', False) self.extra_context['field_check'] = getattr(self, 'field_check', None) # Default value for extends_base if hasattr(self, 'extends_base'): self.extra_context['extends_base'] = self.extends_base elif hasattr(self, 'extends_base'): self.extra_context['extends_base'] = self.extends_base # Get if this is a template only answer self.__authtoken = (bool(getattr(self.request, "authtoken", False))) self.json_worker = (hasattr(self, 'json_builder')) or self.__authtoken or (self.json is True) if self.json_worker: # Check if the request has some json query, if not, just render the template if self.request.GET.get('json', self.request.POST.get('json', None)) is None: # Calculate tabs if getattr(self, 'show_details', False): self.extra_context['tabs_js'] = json.dumps(self.get_tabs_js()) # Silence the normal execution from this class self.get_queryset = lambda: None self.get_context_data = lambda **kwargs: self.extra_context self.render_to_response = lambda context, **response_kwargs: super(GenList, self).render_to_response(context, **response_kwargs) # Call the base implementation and finish execution here return super(GenList, self).dispatch(*args, **kwargs) # The systems is requesting a list, we are not allowed if onlybase: json_answer = {"error": True, "errortxt": _("Not allowed, this kind of requests has been prohibited for this view!")} return HttpResponse(json.dumps(json_answer), content_type='application/json') # Initialize a default context self.__kwargs = kwargs self.__context = {} # Force export list self.export = getattr(self, 'export', self.request.GET.get('export', self.request.POST.get('export', None))) # Call the base implementation return super(GenList, self).dispatch(*args, **kwargs)
Entry point for this class, here we decide basic stuff
def get(self): """ *get the cone_search object* **Return:** - ``results`` -- the results of the conesearch """ self.log.info('starting the ``get`` method') # sort results by angular separation from operator import itemgetter results = list(self.squareResults) results = sorted( results, key=itemgetter('separation_arcsec'), reverse=True) # order of results headers = ["sdss_name", "type", "ra", "dec", "specz", "specz_err", "photoz", "photoz_err", "separation_arcsec", "separation_north_arcsec", "separation_east_arcsec"] import collections orderDict = collections.OrderedDict(sorted({}.items())) # filter out results greater than the search radius filteredResults = [] for row in results: if float(row["separation_arcsec"]) < self.searchRadius: orderDict = collections.OrderedDict(sorted({}.items())) for h in headers: if h in row.keys(): orderDict[h] = row[h] filteredResults.append(orderDict) else: pass if self.nearest and len(filteredResults): orderDict = collections.OrderedDict(sorted({}.items())) for h in headers: if h in filteredResults[0].keys(): orderDict[h] = row[h] filteredResults = [orderDict] # filteredResults = [filteredResults[0]] if not len(filteredResults): orderDict = collections.OrderedDict(sorted({}.items())) for h in headers: if self.galaxyType == "all" or self.galaxyType == False or (self.galaxyType == "specz" and h not in ["photoz_err", "photoz"]) or (self.galaxyType == "photoz" and h not in ["specz", "specz_err"]): orderDict[h] = "" filteredResults = [orderDict] # pretty format print dataSet = list_of_dictionaries( log=self.log, listOfDictionaries=list(reversed(filteredResults)) ) if self.outputFormat == "csv": results = dataSet.csv() else: results = dataSet.table() # sdss only allows 60 hits per minute sleep(1) self.log.info('completed the ``get`` method') return results
*get the cone_search object* **Return:** - ``results`` -- the results of the conesearch
def __find_index(alig_file_pth, idx_extensions): """ Find an index file for a genome alignment file in the same directory. :param alig_file_path: path to the alignment file. :param idx_extensions: check for index files with these extensions :return: path to first index file that matches the name of the alignment file and has one of the specified extensions. """ if idx_extensions is None: return None base, _ = os.path.splitext(alig_file_pth) for idx_ext in idx_extensions: candidate = base + os.extsep + idx_ext if os.path.isfile(candidate): return candidate return None
Find an index file for a genome alignment file in the same directory. :param alig_file_path: path to the alignment file. :param idx_extensions: check for index files with these extensions :return: path to first index file that matches the name of the alignment file and has one of the specified extensions.
def tabulate(self, restricted_predicted_column_indices = [], restricted_predicted_column_names = [], dataset_name = None): '''Returns summary analysis from the dataframe as a DataTable object. DataTables are wrapped pandas dataframes which can be combined if the have the same width. This is useful for combining multiple analyses. DataTables can be printed to terminal as a tabular string using their representation function (i.e. print(data_table)). This function (tabulate) looks at specific analysis; this class (DatasetDataFrame) can be subclassed for custom tabulation.''' self._analyze() data_series = self.get_series_names(column_indices = restricted_predicted_column_indices, column_names = restricted_predicted_column_names) # Determine the multi-index headers group_names = [] for l in self.index_layers: group_names.append(l) # Set up the table headers headers = ['Dataset'] + group_names + ['n', 'R', 'rho', 'MAE', 'Fraction correct ', 'FC sign', 'SB sensitivity', 'SB specificity'] table_rows = [] for dseries in data_series: if isinstance(dseries, tuple): dseries_l = list(dseries) else: assert(isinstance(dseries, basestring)) dseries_l = [dseries] results = [] assert (len(self.index_layers) == len(dseries)) if self.analysis.get(dseries, {}).get('partial') and self.analysis.get(dseries, {}).get('full'):# data_series in self.analysis[dseries]['full']: results.append((dseries_l[:-1] + [dseries_l[-1] + '*'], self.analysis[dseries]['partial'])) results.append((dseries_l[:-1] + [dseries_l[-1]], self.analysis[dseries]['full'])) elif (self.analysis.get(dseries, {}).get('partial')): results.append((dseries_l[:-1] + [dseries_l[-1] + '*'], self.analysis[dseries]['partial'])) elif (self.analysis.get(dseries, {}).get('full')): results = [(dseries, self.analysis[dseries]['full'])] for result in results: n = result[1]['data']['n'] R = result[1]['data']['pearsonr'][0] rho = result[1]['data']['spearmanr'][0] mae = result[1]['data']['MAE'] fraction_correct = result[1]['data']['fraction_correct'] accuracy = result[1]['data']['accuracy'] SBSensitivity = '{0:.3f} / {1}'.format(result[1]['data']['significant_beneficient_sensitivity'][0], result[1]['data']['significant_beneficient_sensitivity'][1]) SBSpecificity = '{0:.3f} / {1}'.format(result[1]['data']['significant_beneficient_specificity'][0], result[1]['data']['significant_beneficient_specificity'][1]) method = result[0] if isinstance(method, tuple): method = list(method) table_rows.append([dataset_name or self.reference_dataset_name] + method + [n, R, rho, mae, fraction_correct, accuracy, SBSensitivity, SBSpecificity]) # Convert the lists into a (wrapped) pandas dataframe to make use of the pandas formatting code to save reinventing the wheel... return DataTable(pandas.DataFrame(table_rows, columns = headers), self.index_layers)
Returns summary analysis from the dataframe as a DataTable object. DataTables are wrapped pandas dataframes which can be combined if the have the same width. This is useful for combining multiple analyses. DataTables can be printed to terminal as a tabular string using their representation function (i.e. print(data_table)). This function (tabulate) looks at specific analysis; this class (DatasetDataFrame) can be subclassed for custom tabulation.
def validate_config(config): """ Validates the extractor configuration file. Ensures that there are no duplicate field names, etc. :param config: The configuration file that contains the specification of the extractor :return: True if config is valid, else raises a exception that specifies the correction to be made """ fields = [f for f in get_fields(config)] if len(fields) != len(set(fields)): raise InvalidConfigException( "Invalid configuration file - %d duplicate field names" % len(fields) - len(set(fields)) ) return True
Validates the extractor configuration file. Ensures that there are no duplicate field names, etc. :param config: The configuration file that contains the specification of the extractor :return: True if config is valid, else raises a exception that specifies the correction to be made
def run_download_media(filename=None): """ Downloads the media dump from the server into your local machine. In order to import the downloaded media dump, run ``fab import_media`` Usage:: fab prod run_download_media fab prod run_download_media:filename=foobar.tar.gz """ if not filename: filename = settings.MEDIA_DUMP_FILENAME if env.key_filename: ssh = settings.PROJECT_NAME else: ssh = '{0}@{1}'.format(env.user, env.host_string) local('scp {0}:{1}{2} .'.format( ssh, settings.FAB_SETTING('SERVER_MEDIA_BACKUP_DIR'), filename))
Downloads the media dump from the server into your local machine. In order to import the downloaded media dump, run ``fab import_media`` Usage:: fab prod run_download_media fab prod run_download_media:filename=foobar.tar.gz
def parse(self,tolerance=0,downsample=None,evidence=2,use_gene_names=False): """Divide out the transcripts. allow junction tolerance if wanted""" g = Graph() nodes = [Node(x) for x in self._transcripts] for n in nodes: g.add_node(n) for i in range(0,len(nodes)): for j in range(0,len(nodes)): if i == j: continue jov = nodes[i].payload.junction_overlap(nodes[j].payload,tolerance) sub = jov.is_subset() if not sub: continue if sub == 1: g.add_edge(Edge(nodes[i],nodes[j])) g.add_edge(Edge(nodes[j],nodes[i])) if sub == 2: g.add_edge(Edge(nodes[i],nodes[j])) g.merge_cycles() roots = g.roots groups = [] for r in roots: g2 = g.get_root_graph(r) c = CompatibleGraph(g2,tolerance,downsample,evidence,use_gene_names=use_gene_names) groups.append(c) return groups
Divide out the transcripts. allow junction tolerance if wanted
def compute_bin_edges(features, num_bins, edge_range, trim_outliers, trim_percentile, use_orig_distr=False): "Compute the edges for the histogram bins to keep it the same for all nodes." if use_orig_distr: print('Using original distribution (without histogram) to compute edge weights!') edges=None return edges if edge_range is None: if trim_outliers: # percentiles_to_keep = [ trim_percentile, 1.0-trim_percentile] # [0.05, 0.95] edges_of_edges = np.array([np.percentile(features, trim_percentile), np.percentile(features, 100 - trim_percentile)]) else: edges_of_edges = np.array([np.min(features), np.max(features)]) else: edges_of_edges = edge_range # Edges computed using data from all nodes, in order to establish correspondence edges = np.linspace(edges_of_edges[0], edges_of_edges[1], num=num_bins, endpoint=True) return edges
Compute the edges for the histogram bins to keep it the same for all nodes.
def iter_prio_dict(prio_dict): """ Iterate over a priority dictionary. A priority dictionary is a dictionary keyed by integer priority, with the values being lists of objects. This generator will iterate over the dictionary in priority order (from lowest integer value to highest integer value), yielding each object in the lists in turn. :param prio_dict: A priority dictionary, as described above. :returns: An iterator that yields each object in the correct order, based on priority and ordering within the priority values. """ for _prio, objs in sorted(prio_dict.items(), key=lambda x: x[0]): for obj in objs: yield obj
Iterate over a priority dictionary. A priority dictionary is a dictionary keyed by integer priority, with the values being lists of objects. This generator will iterate over the dictionary in priority order (from lowest integer value to highest integer value), yielding each object in the lists in turn. :param prio_dict: A priority dictionary, as described above. :returns: An iterator that yields each object in the correct order, based on priority and ordering within the priority values.
def save(self, filething=None, padding=None): """save(filething=None, padding=None) Save a tag to a file. If no filename is given, the one most recently loaded is used. Args: filething (filething) padding (:obj:`mutagen.PaddingFunction`) Raises: mutagen.MutagenError """ try: self.tags._inject(filething.fileobj, padding) except (IOError, error) as e: reraise(self._Error, e, sys.exc_info()[2]) except EOFError: raise self._Error("no appropriate stream found")
save(filething=None, padding=None) Save a tag to a file. If no filename is given, the one most recently loaded is used. Args: filething (filething) padding (:obj:`mutagen.PaddingFunction`) Raises: mutagen.MutagenError
def features_node_edge_graph(obj): """ Transform the features into a more graph-like structure by appropriately splitting LineString features into two-point "edges" that connect Point "nodes". """ points = {} features = obj['features'] for feature in tqdm(obj['features']): for (lon, lat) in geojson.utils.coords(feature): points.setdefault((lon, lat), 0) points[(lon, lat)] += 1 points = [p for (p, c) in points.items() if c > 1] features = [geojson.Point(p) for p in points] # For each feature, split it into "edge" features # that occur between every point. for f in tqdm(obj['features']): seqs = [] seq = [] for point in geojson.utils.coords(f): if len(seq) > 0: seq.append(point) if point in points: seq.append(point) if len(seq) > 1 and seq[0] in points: seqs.append(seq) seq = [point] for seq in seqs: features.append(geojson.Feature(geometry={"coordinates":seq, "type":f['geometry']['type']}, properties=f['properties'], type=f['type'])) obj['features'] = features return obj
Transform the features into a more graph-like structure by appropriately splitting LineString features into two-point "edges" that connect Point "nodes".
def most_by_mask(self, mask, y, mult): """ Extracts the first 4 most correct/incorrect indexes from the ordered list of probabilities Arguments: mask (numpy.ndarray): the mask of probabilities specific to the selected class; a boolean array with shape (num_of_samples,) which contains True where class==selected_class, and False everywhere else y (int): the selected class mult (int): sets the ordering; -1 descending, 1 ascending Returns: idxs (ndarray): An array of indexes of length 4 """ idxs = np.where(mask)[0] cnt = min(4, len(idxs)) return idxs[np.argsort(mult * self.probs[idxs,y])[:cnt]]
Extracts the first 4 most correct/incorrect indexes from the ordered list of probabilities Arguments: mask (numpy.ndarray): the mask of probabilities specific to the selected class; a boolean array with shape (num_of_samples,) which contains True where class==selected_class, and False everywhere else y (int): the selected class mult (int): sets the ordering; -1 descending, 1 ascending Returns: idxs (ndarray): An array of indexes of length 4
def get_form(self, request, obj=None, **kwargs): """ Returns a Form class for use in the admin add view. This is used by add_view and change_view. """ parent_id = request.GET.get('parent_id', None) if not parent_id: parent_id = request.POST.get('parent_id', None) if parent_id: return AddFolderPopupForm else: folder_form = super(FolderAdmin, self).get_form( request, obj=None, **kwargs) def folder_form_clean(form_obj): cleaned_data = form_obj.cleaned_data folders_with_same_name = self.get_queryset(request).filter( parent=form_obj.instance.parent, name=cleaned_data['name']) if form_obj.instance.pk: folders_with_same_name = folders_with_same_name.exclude( pk=form_obj.instance.pk) if folders_with_same_name.exists(): raise ValidationError( 'Folder with this name already exists.') return cleaned_data # attach clean to the default form rather than defining a new form class folder_form.clean = folder_form_clean return folder_form
Returns a Form class for use in the admin add view. This is used by add_view and change_view.
def reset(self): """ Reset all internal storage to initial status Returns ------- None """ self.solved = False self.niter = 0 self.iter_mis = [] self.F = None self.system.dae.factorize = True
Reset all internal storage to initial status Returns ------- None
def gfrepi(window, begmss, endmss): """ This entry point initializes a search progress report. http://naif.jpl.nasa.gov/pub/naif/toolkit_docs/C/cspice/gfrepi_c.html :param window: A window over which a job is to be performed. :type window: spiceypy.utils.support_types.SpiceCell :param begmss: Beginning of the text portion of the output message. :type begmss: str :param endmss: End of the text portion of the output message. :type endmss: str """ begmss = stypes.stringToCharP(begmss) endmss = stypes.stringToCharP(endmss) # don't do anything if we were given a pointer to a SpiceCell, like if we were in a callback if not isinstance(window, ctypes.POINTER(stypes.SpiceCell)): assert isinstance(window, stypes.SpiceCell) assert window.is_double() window = ctypes.byref(window) libspice.gfrepi_c(window, begmss, endmss)
This entry point initializes a search progress report. http://naif.jpl.nasa.gov/pub/naif/toolkit_docs/C/cspice/gfrepi_c.html :param window: A window over which a job is to be performed. :type window: spiceypy.utils.support_types.SpiceCell :param begmss: Beginning of the text portion of the output message. :type begmss: str :param endmss: End of the text portion of the output message. :type endmss: str
def parse_value(named_reg_value): """ Convert the value returned from EnumValue to a (name, value) tuple using the value classes. """ name, value, value_type = named_reg_value value_class = REG_VALUE_TYPE_MAP[value_type] return name, value_class(value)
Convert the value returned from EnumValue to a (name, value) tuple using the value classes.
def _getModelPosterior(self,min): """ USES LAPLACE APPROXIMATION TO CALCULATE THE BAYESIAN MODEL POSTERIOR """ Sigma = self._getLaplaceCovar(min) n_params = self.vd.getNumberScales() ModCompl = 0.5*n_params*sp.log(2*sp.pi)+0.5*sp.log(sp.linalg.det(Sigma)) RV = min['LML']+ModCompl return RV
USES LAPLACE APPROXIMATION TO CALCULATE THE BAYESIAN MODEL POSTERIOR
def row_wise_rescale(matrix): """ Row-wise rescale of a given matrix. For fMRI data (num_voxels x num_time_points), this would translate to voxel-wise normalization over time. Parameters ---------- matrix : ndarray Input rectangular matrix, typically a carpet of size num_voxels x num_4th_dim, 4th_dim could be time points or gradients or other appropriate Returns ------- normed : ndarray normalized matrix """ if matrix.shape[0] <= matrix.shape[1]: raise ValueError('Number of voxels is less than the number of time points!! ' 'Are you sure data is reshaped correctly?') min_ = matrix.min(axis=1) range_ = matrix.ptp(axis=1) # ptp : peak to peak, max-min min_tile = np.tile(min_, (matrix.shape[1], 1)).T range_tile = np.tile(range_, (matrix.shape[1], 1)).T # avoiding any numerical difficulties range_tile[range_tile < np.finfo(np.float).eps] = 1.0 normed = (matrix - min_tile) / range_tile del min_, range_, min_tile, range_tile return normed
Row-wise rescale of a given matrix. For fMRI data (num_voxels x num_time_points), this would translate to voxel-wise normalization over time. Parameters ---------- matrix : ndarray Input rectangular matrix, typically a carpet of size num_voxels x num_4th_dim, 4th_dim could be time points or gradients or other appropriate Returns ------- normed : ndarray normalized matrix
def batch_insert(self, records, typecast=False): """ Calls :any:`insert` repetitively, following set API Rate Limit (5/sec) To change the rate limit use ``airtable.API_LIMIT = 0.2`` (5 per second) >>> records = [{'Name': 'John'}, {'Name': 'Marc'}] >>> airtable.batch_insert(records) Args: records(``list``): Records to insert typecast(``boolean``): Automatic data conversion from string values. Returns: records (``list``): list of added records """ return self._batch_request(self.insert, records)
Calls :any:`insert` repetitively, following set API Rate Limit (5/sec) To change the rate limit use ``airtable.API_LIMIT = 0.2`` (5 per second) >>> records = [{'Name': 'John'}, {'Name': 'Marc'}] >>> airtable.batch_insert(records) Args: records(``list``): Records to insert typecast(``boolean``): Automatic data conversion from string values. Returns: records (``list``): list of added records
def to_graph_decomposition(H): """Returns an UndirectedHypergraph object that has the same nodes (and corresponding attributes) as the given H, except that for all hyperedges in the given H, each node in the hyperedge is pairwise connected to every other node also in that hyperedge in the new H. Said another way, each of the original hyperedges are decomposed in the new H into cliques (aka the "2-section" or "clique graph"). :param H: the H to decompose into a graph. :returns: UndirectedHypergraph -- the decomposed H. :raises: TypeError -- Transformation only applicable to undirected Hs """ if not isinstance(H, UndirectedHypergraph): raise TypeError("Transformation only applicable to \ undirected Hs") G = UndirectedHypergraph() nodes = [(node, H.get_node_attributes(node_attributes)) for node in G.node_iterator()] G.add_nodes(nodes) edges = [(node_a, node_b) for hyperedge_id in H.hyperedge_id_iterator() for node_a in H.get_hyperedge_nodes(hyperedge_id) for node_b in H.get_hyperedge_nodes(hyperedge_id) if node_a != node_b] G.add_hyperedges(edges) return G
Returns an UndirectedHypergraph object that has the same nodes (and corresponding attributes) as the given H, except that for all hyperedges in the given H, each node in the hyperedge is pairwise connected to every other node also in that hyperedge in the new H. Said another way, each of the original hyperedges are decomposed in the new H into cliques (aka the "2-section" or "clique graph"). :param H: the H to decompose into a graph. :returns: UndirectedHypergraph -- the decomposed H. :raises: TypeError -- Transformation only applicable to undirected Hs
def saved(name, source='running', user=None, group=None, mode=None, attrs=None, makedirs=False, dir_mode=None, replace=True, backup='', show_changes=True, create=True, tmp_dir='', tmp_ext='', encoding=None, encoding_errors='strict', allow_empty=False, follow_symlinks=True, check_cmd=None, win_owner=None, win_perms=None, win_deny_perms=None, win_inheritance=True, win_perms_reset=False, **kwargs): ''' .. versionadded:: 2019.2.0 Save the configuration to a file on the local file system. name Absolute path to file where to save the configuration. To push the files to the Master, use :mod:`cp.push <salt.modules.cp.push>` Execution function. source: ``running`` The configuration source. Choose from: ``running``, ``candidate``, ``startup``. Default: ``running``. user The user to own the file, this defaults to the user salt is running as on the minion group The group ownership set for the file, this defaults to the group salt is running as on the minion. On Windows, this is ignored mode The permissions to set on this file, e.g. ``644``, ``0775``, or ``4664``. The default mode for new files and directories corresponds to the umask of the salt process. The mode of existing files and directories will only be changed if ``mode`` is specified. .. note:: This option is **not** supported on Windows. attrs The attributes to have on this file, e.g. ``a``, ``i``. The attributes can be any or a combination of the following characters: ``aAcCdDeijPsStTu``. .. note:: This option is **not** supported on Windows. makedirs: ``False`` If set to ``True``, then the parent directories will be created to facilitate the creation of the named file. If ``False``, and the parent directory of the destination file doesn't exist, the state will fail. dir_mode If directories are to be created, passing this option specifies the permissions for those directories. If this is not set, directories will be assigned permissions by adding the execute bit to the mode of the files. The default mode for new files and directories corresponds umask of salt process. For existing files and directories it's not enforced. replace: ``True`` If set to ``False`` and the file already exists, the file will not be modified even if changes would otherwise be made. Permissions and ownership will still be enforced, however. backup Overrides the default backup mode for this specific file. See :ref:`backup_mode documentation <file-state-backups>` for more details. show_changes: ``True`` Output a unified diff of the old file and the new file. If ``False`` return a boolean if any changes were made. create: ``True`` If set to ``False``, then the file will only be managed if the file already exists on the system. encoding If specified, then the specified encoding will be used. Otherwise, the file will be encoded using the system locale (usually UTF-8). See https://docs.python.org/3/library/codecs.html#standard-encodings for the list of available encodings. encoding_errors: ``'strict'`` Error encoding scheme. Default is ```'strict'```. See https://docs.python.org/2/library/codecs.html#codec-base-classes for the list of available schemes. allow_empty: ``True`` If set to ``False``, then the state will fail if the contents specified by ``contents_pillar`` or ``contents_grains`` are empty. follow_symlinks: ``True`` If the desired path is a symlink follow it and make changes to the file to which the symlink points. check_cmd The specified command will be run with an appended argument of a *temporary* file containing the new managed contents. If the command exits with a zero status the new managed contents will be written to the managed destination. If the command exits with a nonzero exit code, the state will fail and no changes will be made to the file. tmp_dir Directory for temp file created by ``check_cmd``. Useful for checkers dependent on config file location (e.g. daemons restricted to their own config directories by an apparmor profile). tmp_ext Suffix for temp file created by ``check_cmd``. Useful for checkers dependent on config file extension (e.g. the init-checkconf upstart config checker). win_owner: ``None`` The owner of the directory. If this is not passed, user will be used. If user is not passed, the account under which Salt is running will be used. win_perms: ``None`` A dictionary containing permissions to grant and their propagation. For example: ``{'Administrators': {'perms': 'full_control'}}`` Can be a single basic perm or a list of advanced perms. ``perms`` must be specified. ``applies_to`` does not apply to file objects. win_deny_perms: ``None`` A dictionary containing permissions to deny and their propagation. For example: ``{'Administrators': {'perms': 'full_control'}}`` Can be a single basic perm or a list of advanced perms. ``perms`` must be specified. ``applies_to`` does not apply to file objects. win_inheritance: ``True`` True to inherit permissions from the parent directory, False not to inherit permission. win_perms_reset: ``False`` If ``True`` the existing DACL will be cleared and replaced with the settings defined in this function. If ``False``, new entries will be appended to the existing DACL. Default is ``False``. State SLS Example: .. code-block:: yaml /var/backups/{{ opts.id }}/{{ salt.status.time('%s') }}.cfg: netconfig.saved: - source: running - makedirs: true The state SLS above would create a backup config grouping the files by the Minion ID, in chronological files. For example, if the state is executed at on the 3rd of August 2018, at 5:15PM, on the Minion ``core1.lon01``, the configuration would saved in the file: ``/var/backups/core01.lon01/1533316558.cfg`` ''' ret = __salt__['net.config'](source=source) if not ret['result']: return { 'name': name, 'changes': {}, 'result': False, 'comment': ret['comment'] } return __states__['file.managed'](name, user=user, group=group, mode=mode, attrs=attrs, makedirs=makedirs, dir_mode=dir_mode, replace=replace, backup=backup, show_changes=show_changes, create=create, contents=ret['out'][source], tmp_dir=tmp_dir, tmp_ext=tmp_ext, encoding=encoding, encoding_errors=encoding_errors, allow_empty=allow_empty, follow_symlinks=follow_symlinks, check_cmd=check_cmd, win_owner=win_owner, win_perms=win_perms, win_deny_perms=win_deny_perms, win_inheritance=win_inheritance, win_perms_reset=win_perms_reset, **kwargs)
.. versionadded:: 2019.2.0 Save the configuration to a file on the local file system. name Absolute path to file where to save the configuration. To push the files to the Master, use :mod:`cp.push <salt.modules.cp.push>` Execution function. source: ``running`` The configuration source. Choose from: ``running``, ``candidate``, ``startup``. Default: ``running``. user The user to own the file, this defaults to the user salt is running as on the minion group The group ownership set for the file, this defaults to the group salt is running as on the minion. On Windows, this is ignored mode The permissions to set on this file, e.g. ``644``, ``0775``, or ``4664``. The default mode for new files and directories corresponds to the umask of the salt process. The mode of existing files and directories will only be changed if ``mode`` is specified. .. note:: This option is **not** supported on Windows. attrs The attributes to have on this file, e.g. ``a``, ``i``. The attributes can be any or a combination of the following characters: ``aAcCdDeijPsStTu``. .. note:: This option is **not** supported on Windows. makedirs: ``False`` If set to ``True``, then the parent directories will be created to facilitate the creation of the named file. If ``False``, and the parent directory of the destination file doesn't exist, the state will fail. dir_mode If directories are to be created, passing this option specifies the permissions for those directories. If this is not set, directories will be assigned permissions by adding the execute bit to the mode of the files. The default mode for new files and directories corresponds umask of salt process. For existing files and directories it's not enforced. replace: ``True`` If set to ``False`` and the file already exists, the file will not be modified even if changes would otherwise be made. Permissions and ownership will still be enforced, however. backup Overrides the default backup mode for this specific file. See :ref:`backup_mode documentation <file-state-backups>` for more details. show_changes: ``True`` Output a unified diff of the old file and the new file. If ``False`` return a boolean if any changes were made. create: ``True`` If set to ``False``, then the file will only be managed if the file already exists on the system. encoding If specified, then the specified encoding will be used. Otherwise, the file will be encoded using the system locale (usually UTF-8). See https://docs.python.org/3/library/codecs.html#standard-encodings for the list of available encodings. encoding_errors: ``'strict'`` Error encoding scheme. Default is ```'strict'```. See https://docs.python.org/2/library/codecs.html#codec-base-classes for the list of available schemes. allow_empty: ``True`` If set to ``False``, then the state will fail if the contents specified by ``contents_pillar`` or ``contents_grains`` are empty. follow_symlinks: ``True`` If the desired path is a symlink follow it and make changes to the file to which the symlink points. check_cmd The specified command will be run with an appended argument of a *temporary* file containing the new managed contents. If the command exits with a zero status the new managed contents will be written to the managed destination. If the command exits with a nonzero exit code, the state will fail and no changes will be made to the file. tmp_dir Directory for temp file created by ``check_cmd``. Useful for checkers dependent on config file location (e.g. daemons restricted to their own config directories by an apparmor profile). tmp_ext Suffix for temp file created by ``check_cmd``. Useful for checkers dependent on config file extension (e.g. the init-checkconf upstart config checker). win_owner: ``None`` The owner of the directory. If this is not passed, user will be used. If user is not passed, the account under which Salt is running will be used. win_perms: ``None`` A dictionary containing permissions to grant and their propagation. For example: ``{'Administrators': {'perms': 'full_control'}}`` Can be a single basic perm or a list of advanced perms. ``perms`` must be specified. ``applies_to`` does not apply to file objects. win_deny_perms: ``None`` A dictionary containing permissions to deny and their propagation. For example: ``{'Administrators': {'perms': 'full_control'}}`` Can be a single basic perm or a list of advanced perms. ``perms`` must be specified. ``applies_to`` does not apply to file objects. win_inheritance: ``True`` True to inherit permissions from the parent directory, False not to inherit permission. win_perms_reset: ``False`` If ``True`` the existing DACL will be cleared and replaced with the settings defined in this function. If ``False``, new entries will be appended to the existing DACL. Default is ``False``. State SLS Example: .. code-block:: yaml /var/backups/{{ opts.id }}/{{ salt.status.time('%s') }}.cfg: netconfig.saved: - source: running - makedirs: true The state SLS above would create a backup config grouping the files by the Minion ID, in chronological files. For example, if the state is executed at on the 3rd of August 2018, at 5:15PM, on the Minion ``core1.lon01``, the configuration would saved in the file: ``/var/backups/core01.lon01/1533316558.cfg``
def CI_calc(mean, SE, CV=1.96): """ Calculate confidence interval. :param mean: mean of data :type mean : float :param SE: standard error of data :type SE : float :param CV: critical value :type CV:float :return: confidence interval as tuple """ try: CI_down = mean - CV * SE CI_up = mean + CV * SE return (CI_down, CI_up) except Exception: return ("None", "None")
Calculate confidence interval. :param mean: mean of data :type mean : float :param SE: standard error of data :type SE : float :param CV: critical value :type CV:float :return: confidence interval as tuple
def initSchd_1_to_4(self): """ Initialize first tariff schedule :class:`~ekmmeters.SerialBlock`. """ self.m_schd_1_to_4["reserved_40"] = [6, FieldType.Hex, ScaleType.No, "", 0, False, False] self.m_schd_1_to_4["Schedule_1_Period_1_Hour"] = [2, FieldType.Int, ScaleType.No, "", 0, False, True] self.m_schd_1_to_4["Schedule_1_Period_1_Min"] = [2, FieldType.Int, ScaleType.No, "", 0, False, True] self.m_schd_1_to_4["Schedule_1_Period_1_Tariff"] = [2, FieldType.Int, ScaleType.No, "", 0, False, True] self.m_schd_1_to_4["Schedule_1_Period_2_Hour"] = [2, FieldType.Int, ScaleType.No, "", 0, False, True] self.m_schd_1_to_4["Schedule_1_Period_2_Min"] = [2, FieldType.Int, ScaleType.No, "", 0, False, True] self.m_schd_1_to_4["Schedule_1_Period_2_Tariff"] = [2, FieldType.Int, ScaleType.No, "", 0, False, True] self.m_schd_1_to_4["Schedule_1_Period_3_Hour"] = [2, FieldType.Int, ScaleType.No, "", 0, False, True] self.m_schd_1_to_4["Schedule_1_Period_3_Min"] = [2, FieldType.Int, ScaleType.No, "", 0, False, True] self.m_schd_1_to_4["Schedule_1_Period_3_Tariff"] = [2, FieldType.Int, ScaleType.No, "", 0, False, True] self.m_schd_1_to_4["Schedule_1_Period_4_Hour"] = [2, FieldType.Int, ScaleType.No, "", 0, False, True] self.m_schd_1_to_4["Schedule_1_Period_4_Min"] = [2, FieldType.Int, ScaleType.No, "", 0, False, True] self.m_schd_1_to_4["Schedule_1_Period_4_Tariff"] = [2, FieldType.Int, ScaleType.No, "", 0, False, True] self.m_schd_1_to_4["reserved_41"] = [24, FieldType.Hex, ScaleType.No, "", 0, False, True] self.m_schd_1_to_4["Schedule_2_Period_1_Hour"] = [2, FieldType.Int, ScaleType.No, "", 0, False, True] self.m_schd_1_to_4["Schedule_2_Period_1_Min"] = [2, FieldType.Int, ScaleType.No, "", 0, False, True] self.m_schd_1_to_4["Schedule_2_Period_1_Tariff"] = [2, FieldType.Int, ScaleType.No, "", 0, False, True] self.m_schd_1_to_4["Schedule_2_Period_2_Hour"] = [2, FieldType.Int, ScaleType.No, "", 0, False, True] self.m_schd_1_to_4["Schedule_2_Period_2_Min"] = [2, FieldType.Int, ScaleType.No, "", 0, False, True] self.m_schd_1_to_4["Schedule_2_Period_2_Tariff"] = [2, FieldType.Int, ScaleType.No, "", 0, False, True] self.m_schd_1_to_4["Schedule_2_Period_3_Hour"] = [2, FieldType.Int, ScaleType.No, "", 0, False, True] self.m_schd_1_to_4["Schedule_2_Period_3_Min"] = [2, FieldType.Int, ScaleType.No, "", 0, False, True] self.m_schd_1_to_4["Schedule_2_Period_3_Tariff"] = [2, FieldType.Int, ScaleType.No, "", 0, False, True] self.m_schd_1_to_4["Schedule_2_Period_4_Hour"] = [2, FieldType.Int, ScaleType.No, "", 0, False, True] self.m_schd_1_to_4["Schedule_2_Period_4_Min"] = [2, FieldType.Int, ScaleType.No, "", 0, False, True] self.m_schd_1_to_4["Schedule_2_Period_4_Tariff"] = [2, FieldType.Int, ScaleType.No, "", 0, False, True] self.m_schd_1_to_4["reserved_42"] = [24, FieldType.Hex, ScaleType.No, "", 0, False, True] self.m_schd_1_to_4["Schedule_3_Period_1_Hour"] = [2, FieldType.Int, ScaleType.No, "", 0, False, True] self.m_schd_1_to_4["Schedule_3_Period_1_Min"] = [2, FieldType.Int, ScaleType.No, "", 0, False, True] self.m_schd_1_to_4["Schedule_3_Period_1_Tariff"] = [2, FieldType.Int, ScaleType.No, "", 0, False, True] self.m_schd_1_to_4["Schedule_3_Period_2_Hour"] = [2, FieldType.Int, ScaleType.No, "", 0, False, True] self.m_schd_1_to_4["Schedule_3_Period_2_Min"] = [2, FieldType.Int, ScaleType.No, "", 0, False, True] self.m_schd_1_to_4["Schedule_3_Period_2_Tariff"] = [2, FieldType.Int, ScaleType.No, "", 0, False, True] self.m_schd_1_to_4["Schedule_3_Period_3_Hour"] = [2, FieldType.Int, ScaleType.No, "", 0, False, True] self.m_schd_1_to_4["Schedule_3_Period_3_Min"] = [2, FieldType.Int, ScaleType.No, "", 0, False, True] self.m_schd_1_to_4["Schedule_3_Period_3_Tariff"] = [2, FieldType.Int, ScaleType.No, "", 0, False, True] self.m_schd_1_to_4["Schedule_3_Period_4_Hour"] = [2, FieldType.Int, ScaleType.No, "", 0, False, True] self.m_schd_1_to_4["Schedule_3_Period_4_Min"] = [2, FieldType.Int, ScaleType.No, "", 0, False, True] self.m_schd_1_to_4["Schedule_3_Period_4_Tariff"] = [2, FieldType.Int, ScaleType.No, "", 0, False, True] self.m_schd_1_to_4["reserved_43"] = [24, FieldType.Hex, ScaleType.No, "", 0, False, True] self.m_schd_1_to_4["Schedule_4_Period_1_Hour"] = [2, FieldType.Int, ScaleType.No, "", 0, False, True] self.m_schd_1_to_4["Schedule_4_Period_1_Min"] = [2, FieldType.Int, ScaleType.No, "", 0, False, True] self.m_schd_1_to_4["Schedule_4_Period_1_Tariff"] = [2, FieldType.Int, ScaleType.No, "", 0, False, True] self.m_schd_1_to_4["Schedule_4_Period_2_Hour"] = [2, FieldType.Int, ScaleType.No, "", 0, False, True] self.m_schd_1_to_4["Schedule_4_Period_2_Min"] = [2, FieldType.Int, ScaleType.No, "", 0, False, True] self.m_schd_1_to_4["Schedule_4_Period_2_Tariff"] = [2, FieldType.Int, ScaleType.No, "", 0, False, True] self.m_schd_1_to_4["Schedule_4_Period_3_Hour"] = [2, FieldType.Int, ScaleType.No, "", 0, False, True] self.m_schd_1_to_4["Schedule_4_Period_3_Min"] = [2, FieldType.Int, ScaleType.No, "", 0, False, True] self.m_schd_1_to_4["Schedule_4_Period_3_Tariff"] = [2, FieldType.Int, ScaleType.No, "", 0, False, True] self.m_schd_1_to_4["Schedule_4_Period_4_Hour"] = [2, FieldType.Int, ScaleType.No, "", 0, False, True] self.m_schd_1_to_4["Schedule_4_Period_4_Min"] = [2, FieldType.Int, ScaleType.No, "", 0, False, True] self.m_schd_1_to_4["Schedule_4_Period_4_Tariff"] = [2, FieldType.Int, ScaleType.No, "", 0, False, True] self.m_schd_1_to_4["reserved_44"] = [79, FieldType.Hex, ScaleType.No, "", 0, False, True] self.m_schd_1_to_4["crc16"] = [2, FieldType.Hex, ScaleType.No, "", 0, False, False] pass
Initialize first tariff schedule :class:`~ekmmeters.SerialBlock`.
def get_current_future_chain(self, continuous_future, dt): """ Retrieves the future chain for the contract at the given `dt` according the `continuous_future` specification. Returns ------- future_chain : list[Future] A list of active futures, where the first index is the current contract specified by the continuous future definition, the second is the next upcoming contract and so on. """ rf = self._roll_finders[continuous_future.roll_style] session = self.trading_calendar.minute_to_session_label(dt) contract_center = rf.get_contract_center( continuous_future.root_symbol, session, continuous_future.offset) oc = self.asset_finder.get_ordered_contracts( continuous_future.root_symbol) chain = oc.active_chain(contract_center, session.value) return self.asset_finder.retrieve_all(chain)
Retrieves the future chain for the contract at the given `dt` according the `continuous_future` specification. Returns ------- future_chain : list[Future] A list of active futures, where the first index is the current contract specified by the continuous future definition, the second is the next upcoming contract and so on.
def line(x_fn, y_fn, *, options={}, **interact_params): """ Generates an interactive line chart that allows users to change the parameters of the inputs x_fn and y_fn. Args: x_fn (Array | (*args -> Array str | Array int | Array float)): If array, uses array values for x-coordinates. If function, must take parameters to interact with and return an array of strings or numbers. These will become the x-coordinates of the line plot. y_fn (Array | (Array, *args -> Array int | Array float)): If array, uses array values for y-coordinates. If function, must take in the output of x_fn as its first parameter and optionally other parameters to interact with. Must return an array of numbers. These will become the y-coordinates of the line plot. Kwargs: {options} interact_params (dict): Keyword arguments in the same format as `ipywidgets.interact`. One argument is required for each argument of both `x_fn` and `y_fn`. If `x_fn` and `y_fn` have conflicting parameter names, prefix the corresponding kwargs with `x__` and `y__`. Returns: VBox with two children: the interactive controls and the figure. >>> line([1, 2, 3], [4, 7, 10]) VBox(...) >>> def x_values(max): return np.arange(0, max) >>> def y_values(xs, sd): ... return xs + np.random.normal(len(xs), scale=sd) >>> line(x_values, y_values, max=(10, 50), sd=(1, 10)) VBox(...) """ fig = options.get('_fig', False) or _create_fig(options=options) [line] = (_create_marks(fig=fig, marks=[bq.Lines], options=options)) _add_marks(fig, [line]) def wrapped(**interact_params): x_data = util.maybe_call(x_fn, interact_params, prefix='x') line.x = x_data y_bound = util.maybe_curry(y_fn, x_data) line.y = util.maybe_call(y_bound, interact_params, prefix='y') controls = widgets.interactive(wrapped, **interact_params) return widgets.VBox([controls, fig])
Generates an interactive line chart that allows users to change the parameters of the inputs x_fn and y_fn. Args: x_fn (Array | (*args -> Array str | Array int | Array float)): If array, uses array values for x-coordinates. If function, must take parameters to interact with and return an array of strings or numbers. These will become the x-coordinates of the line plot. y_fn (Array | (Array, *args -> Array int | Array float)): If array, uses array values for y-coordinates. If function, must take in the output of x_fn as its first parameter and optionally other parameters to interact with. Must return an array of numbers. These will become the y-coordinates of the line plot. Kwargs: {options} interact_params (dict): Keyword arguments in the same format as `ipywidgets.interact`. One argument is required for each argument of both `x_fn` and `y_fn`. If `x_fn` and `y_fn` have conflicting parameter names, prefix the corresponding kwargs with `x__` and `y__`. Returns: VBox with two children: the interactive controls and the figure. >>> line([1, 2, 3], [4, 7, 10]) VBox(...) >>> def x_values(max): return np.arange(0, max) >>> def y_values(xs, sd): ... return xs + np.random.normal(len(xs), scale=sd) >>> line(x_values, y_values, max=(10, 50), sd=(1, 10)) VBox(...)
def _apply_theme(self): """ Apply theme attributes to Matplotlib objects """ self.theme.apply_axs(self.axs) self.theme.apply_figure(self.figure)
Apply theme attributes to Matplotlib objects
def unblock_username(username, pipe=None): """ unblock the given Username """ do_commit = False if not pipe: pipe = REDIS_SERVER.pipeline() do_commit = True if username: pipe.delete(get_username_attempt_cache_key(username)) pipe.delete(get_username_blocked_cache_key(username)) if do_commit: pipe.execute()
unblock the given Username
def sources_remove(source_uri, ruby=None, runas=None, gem_bin=None): ''' Remove a gem source. :param source_uri: string The source URI to remove. :param gem_bin: string : None Full path to ``gem`` binary to use. :param ruby: string : None If RVM or rbenv are installed, the ruby version and gemset to use. Ignored if ``gem_bin`` is specified. :param runas: string : None The user to run gem as. CLI Example: .. code-block:: bash salt '*' gem.sources_remove http://rubygems.org/ ''' return _gem(['sources', '--remove', source_uri], ruby, gem_bin=gem_bin, runas=runas)
Remove a gem source. :param source_uri: string The source URI to remove. :param gem_bin: string : None Full path to ``gem`` binary to use. :param ruby: string : None If RVM or rbenv are installed, the ruby version and gemset to use. Ignored if ``gem_bin`` is specified. :param runas: string : None The user to run gem as. CLI Example: .. code-block:: bash salt '*' gem.sources_remove http://rubygems.org/
def map_lazy( self, target: Callable, map_iter: Sequence[Any] = None, *, map_args: Sequence[Sequence[Any]] = None, args: Sequence = None, map_kwargs: Sequence[Mapping[str, Any]] = None, kwargs: Mapping = None, pass_state: bool = False, num_chunks: int = None, ) -> SequenceTaskResult: r""" Functional equivalent of ``map()`` in-built function, but executed in a parallel fashion. Distributes the iterables, provided in the ``map_*`` arguments to ``num_chunks`` no of worker nodes. The idea is to: 1. Split the the iterables provided in the ``map_*`` arguments into ``num_chunks`` no of equally sized chunks. 2. Send these chunks to ``num_chunks`` number of worker nodes. 3. Wait for all these worker nodes to finish their task(s). 4. Combine the acquired results in the same sequence as provided in the ``map_*`` arguments. 5. Return the combined results. *Steps 3-5 can be done lazily, on the fly with the help of an iterator* :param target: The ``Callable`` to be invoked inside a :py:class:`Process`. *It is invoked with the following signature:* ``target(map_iter[i], *map_args[i], *args, **map_kwargs[i], **kwargs)`` *Where:* - ``i`` is the index of n\ :sup:`th` element of the Iterable(s) provided in the ``map_*`` arguments. - ``args`` and ``kwargs`` are passed from the ``**process_kwargs``. The ``pass_state`` Keyword Argument of allows you to include the ``state`` arg. :param map_iter: A sequence whose elements are supplied as the *first* positional argument to the ``target``. :param map_args: A sequence whose elements are supplied as positional arguments (``*args``) to the ``target``. :param map_kwargs: A sequence whose elements are supplied as keyword arguments (``**kwargs``) to the ``target``. :param args: The argument tuple for ``target``, supplied after ``map_iter`` and ``map_args``. By default, it is an empty ``tuple``. :param kwargs: A dictionary of keyword arguments for ``target``. By default, it is an empty ``dict``. :param pass_state: Weather this process needs to access the state. If this is set to ``False``, then the ``state`` argument won't be provided to the ``target``. If this is set to ``True``, then a :py:class:`State` object is provided as the first Argument to the ``target``. Unlike :py:class:`Process` it is set to ``False`` by default. (To retain a similar API to in-built ``map()``) :param num_chunks: The number of worker nodes to use. By default, it is set to ``multiprocessing.cpu_count()`` (The number of CPU cores on your system) :param lazy: Wheteher to return immediately put :return: The result is quite similar to ``map()`` in-built function. It returns a :py:class:`Iterable` which contatins, the return values of the ``target`` function, when applied to every item of the Iterables provided in the ``map_*`` arguments. The actual "processing" starts as soon as you call this function. The returned :py:class:`Iterable` only fetches the results from the worker processes. .. note:: - If ``len(map_iter) != len(maps_args) != len(map_kwargs)``, then the results will be cut-off at the shortest Sequence. See :ref:`worker_map` for Examples. """ if num_chunks is None: num_chunks = multiprocessing.cpu_count() lengths = [len(i) for i in (map_iter, map_args, map_kwargs) if i is not None] assert ( lengths ), "At least one of `map_iter`, `map_args`, or `map_kwargs` must be provided as a non-empty Sequence." length = min(lengths) assert ( length > num_chunks ), "`length`(%d) cannot be less than `num_chunks`(%d)" % (length, num_chunks) chunk_length, extra = divmod(length, num_chunks) if extra: chunk_length += 1 task_id = util.generate_task_id((chunk_length, length, num_chunks)) iter_chunks = util.make_chunks(map_iter, chunk_length, num_chunks) args_chunks = util.make_chunks(map_args, chunk_length, num_chunks) kwargs_chunks = util.make_chunks(map_kwargs, chunk_length, num_chunks) target_bytes = serializer.dumps_fn(target) for index in range(num_chunks): params = ( iter_chunks[index], args_chunks[index], args, kwargs_chunks[index], kwargs, ) task = (params, pass_state, self.namespace) self._task_push.send_multipart( [ util.encode_chunk_id(task_id, index), target_bytes, serializer.dumps(task), ] ) return SequenceTaskResult(self.server_address, task_id)
r""" Functional equivalent of ``map()`` in-built function, but executed in a parallel fashion. Distributes the iterables, provided in the ``map_*`` arguments to ``num_chunks`` no of worker nodes. The idea is to: 1. Split the the iterables provided in the ``map_*`` arguments into ``num_chunks`` no of equally sized chunks. 2. Send these chunks to ``num_chunks`` number of worker nodes. 3. Wait for all these worker nodes to finish their task(s). 4. Combine the acquired results in the same sequence as provided in the ``map_*`` arguments. 5. Return the combined results. *Steps 3-5 can be done lazily, on the fly with the help of an iterator* :param target: The ``Callable`` to be invoked inside a :py:class:`Process`. *It is invoked with the following signature:* ``target(map_iter[i], *map_args[i], *args, **map_kwargs[i], **kwargs)`` *Where:* - ``i`` is the index of n\ :sup:`th` element of the Iterable(s) provided in the ``map_*`` arguments. - ``args`` and ``kwargs`` are passed from the ``**process_kwargs``. The ``pass_state`` Keyword Argument of allows you to include the ``state`` arg. :param map_iter: A sequence whose elements are supplied as the *first* positional argument to the ``target``. :param map_args: A sequence whose elements are supplied as positional arguments (``*args``) to the ``target``. :param map_kwargs: A sequence whose elements are supplied as keyword arguments (``**kwargs``) to the ``target``. :param args: The argument tuple for ``target``, supplied after ``map_iter`` and ``map_args``. By default, it is an empty ``tuple``. :param kwargs: A dictionary of keyword arguments for ``target``. By default, it is an empty ``dict``. :param pass_state: Weather this process needs to access the state. If this is set to ``False``, then the ``state`` argument won't be provided to the ``target``. If this is set to ``True``, then a :py:class:`State` object is provided as the first Argument to the ``target``. Unlike :py:class:`Process` it is set to ``False`` by default. (To retain a similar API to in-built ``map()``) :param num_chunks: The number of worker nodes to use. By default, it is set to ``multiprocessing.cpu_count()`` (The number of CPU cores on your system) :param lazy: Wheteher to return immediately put :return: The result is quite similar to ``map()`` in-built function. It returns a :py:class:`Iterable` which contatins, the return values of the ``target`` function, when applied to every item of the Iterables provided in the ``map_*`` arguments. The actual "processing" starts as soon as you call this function. The returned :py:class:`Iterable` only fetches the results from the worker processes. .. note:: - If ``len(map_iter) != len(maps_args) != len(map_kwargs)``, then the results will be cut-off at the shortest Sequence. See :ref:`worker_map` for Examples.
def bin(args): """ %prog bin data.tsv Conver tsv to binary format. """ p = OptionParser(bin.__doc__) p.add_option("--dtype", choices=("float32", "int32"), help="dtype of the matrix") opts, args = p.parse_args(args) if len(args) != 1: sys.exit(not p.print_help()) tsvfile, = args dtype = opts.dtype if dtype is None: # Guess dtype = np.int32 if "data" in tsvfile else np.float32 else: dtype = np.int32 if dtype == "int32" else np.float32 print("dtype: {}".format(dtype), file=sys.stderr) fp = open(tsvfile) next(fp) arrays = [] for i, row in enumerate(fp): a = np.fromstring(row, sep="\t", dtype=dtype) a = a[1:] arrays.append(a) print(i, a, file=sys.stderr) print("Merging", file=sys.stderr) b = np.concatenate(arrays) print("Binary shape: {}".format(b.shape), file=sys.stderr) binfile = tsvfile.rsplit(".", 1)[0] + ".bin" b.tofile(binfile)
%prog bin data.tsv Conver tsv to binary format.
def remove_bucket(self, bucket_name): """ Remove a bucket. :param bucket_name: Bucket to remove """ is_valid_bucket_name(bucket_name) self._url_open('DELETE', bucket_name=bucket_name) # Make sure to purge bucket_name from region cache. self._delete_bucket_region(bucket_name)
Remove a bucket. :param bucket_name: Bucket to remove
def raise_301(instance, location): """Abort the current request with a 301 (Moved Permanently) response code. Sets the Location header correctly. If the location does not start with a slash, the path of the current request is prepended. :param instance: Resource instance (used to access the response) :type instance: :class:`webob.resource.Resource` :raises: :class:`webob.exceptions.ResponseException` of status 301 """ _set_location(instance, location) instance.response.status = 301 raise ResponseException(instance.response)
Abort the current request with a 301 (Moved Permanently) response code. Sets the Location header correctly. If the location does not start with a slash, the path of the current request is prepended. :param instance: Resource instance (used to access the response) :type instance: :class:`webob.resource.Resource` :raises: :class:`webob.exceptions.ResponseException` of status 301
def startup_config_content(self, startup_config): """ Update the startup config :param startup_config: content of the startup configuration file """ try: startup_config_path = os.path.join(self.working_dir, "startup-config.cfg") if startup_config is None: startup_config = '' # We disallow erasing the startup config file if len(startup_config) == 0 and os.path.exists(startup_config_path): return with open(startup_config_path, 'w+', encoding='utf-8') as f: if len(startup_config) == 0: f.write('') else: startup_config = startup_config.replace("%h", self._name) f.write(startup_config) vlan_file = os.path.join(self.working_dir, "vlan.dat-{:05d}".format(self.application_id)) if os.path.exists(vlan_file): try: os.remove(vlan_file) except OSError as e: log.error("Could not delete VLAN file '{}': {}".format(vlan_file, e)) except OSError as e: raise IOUError("Can't write startup-config file '{}': {}".format(startup_config_path, e))
Update the startup config :param startup_config: content of the startup configuration file
def querytime(self, value): """ Sets self._querytime as well as self.query.querytime. :param value: None or datetime :return: """ self._querytime = value self.query.querytime = value
Sets self._querytime as well as self.query.querytime. :param value: None or datetime :return:
def weighted_axioms(self, x, y, xg): """ return a tuple (sub,sup,equiv,other) indicating estimated prior probabilities for an interpretation of a mapping between x and y. See kboom paper """ # TODO: allow additional weighting # weights are log odds w=log(p/(1-p)) # (Sub,Sup,Eq,Other) scope_pairs = [ ('label', 'label', 0.0, 0.0, 3.0,-0.8), ('label', 'exact', 0.0, 0.0, 2.5,-0.5), ('label', 'broad', -1.0, 1.0, 0.0, 0.0), ('label', 'narrow', 1.0,-1.0, 0.0, 0.0), ('label', 'related', 0.0, 0.0, 0.0, 0.0), ('exact', 'exact', 0.0, 0.0, 2.5,-0.5), ('exact', 'broad', -1.0, 1.0, 0.0, 0.0), ('exact', 'narrow', 1.0,-1.0, 0.0, 0.0), ('exact', 'related', 0.0, 0.0, 0.0, 0.0), ('related', 'broad', -0.5, 0.5, 0.0, 0.0), ('related', 'narrow', 0.5,-0.5, 0.0, 0.0), ('related', 'related', 0.0, 0.0, 0.0, 0.0), ('broad', 'broad', 0.0, 0.0, 0.0, 1.0), ('broad', 'narrow', -0.5, 0.5, 0.0, 0.2), ('narrow', 'narrow', 0.0, 0.0, 0.0, 0.0) ] # populate symmetric lookup matrix scope_map = defaultdict(dict) for (l,r,w1,w2,w3,w4) in scope_pairs: l = l.upper() r = r.upper() scope_map[l][r] = np.array((w1,w2,w3,w4)) scope_map[r][l] = np.array((w2,w1,w3,w4)) # TODO: get prior based on ontology pair # cumulative sum of weights WS = None pfx1 = self._id_to_ontology(x) pfx2 = self._id_to_ontology(y) for mw in self.config.get('match_weights', []): mpfx1 = mw.get('prefix1','') mpfx2 = mw.get('prefix2','') X = np.array(mw['weights']) if mpfx1 == pfx1 and mpfx2 == pfx2: WS = X elif mpfx2 == pfx1 and mpfx1 == pfx2: WS = self._flipweights(X) elif mpfx1 == pfx1 and mpfx2 == '' and WS is None: WS = X elif mpfx2 == pfx1 and mpfx1 == '' and WS is None: WS = self._flipweights(X) if WS is None: WS = np.array((0.0, 0.0, 0.0, 0.0)) # defaults WS += np.array(self.config.get('default_weights', [0.0, 0.0, 1.5, -0.1])) logging.info('WS defaults={}'.format(WS)) for xw in self.config.get('xref_weights', []): left = xw.get('left','') right = xw.get('right','') X = np.array(xw['weights']) if x == left and y == right: WS += X logging.info('MATCH: {} for {}-{}'.format(X, x, y)) elif y == left and x == right: WS += self._flipweights(X) logging.info('IMATCH: {}'.format(X)) smap = self.smap # TODO: symmetrical WT = np.array((0.0, 0.0, 0.0, 0.0)) WBESTMAX = np.array((0.0, 0.0, 0.0, 0.0)) n = 0 for sx in smap[x]: WBEST, _ = self._best_match_syn(sx, smap[y], scope_map) if WBEST is not None: WT += WBEST n += 1 if max(abs(WBEST)) > max(abs(WBESTMAX)): WBESTMAX = WBEST for sy in smap[y]: WBEST, _ = self._best_match_syn(sy, smap[x], scope_map) if WBEST is not None: WT += WBEST n += 1 # average best match if n > 0: logging.info('Adding BESTMAX={}'.format(WBESTMAX)) WS += WBESTMAX # TODO: xref, many to many WS += self._graph_weights(x, y, xg) # TODO: include additional defined weights, eg ORDO logging.info('Adding WS, gw={}'.format(WS)) # jaccard similarity (ss1,ss2) = xg[x][y][self.SIMSCORES] WS[3] += ((1-ss1) + (1-ss2)) / 2 # reciprocal best hits are higher confidence of equiv rs = xg[x][y]['reciprocal_score'] if rs == 4: WS[2] += 0.5 if rs == 0: WS[2] -= 0.2 #P = np.expit(WS) P = 1/(1+np.exp(-WS)) logging.info('Final WS={}, init P={}'.format(WS, P)) # probs should sum to 1.0 P = P / np.sum(P) return P
return a tuple (sub,sup,equiv,other) indicating estimated prior probabilities for an interpretation of a mapping between x and y. See kboom paper
def obfn_fvar(self, i): r"""Variable to be evaluated in computing :math:`f_i(\cdot)`, depending on the ``fEvalX`` option value. """ return self.X[..., i] if self.opt['fEvalX'] else self.Y
r"""Variable to be evaluated in computing :math:`f_i(\cdot)`, depending on the ``fEvalX`` option value.
def set_node_as_int(self, dst, src): """ Set a node to a value captured from another node example:: R = [ In : node #setcapture(_, node) ] """ dst.value = self.value(src) return True
Set a node to a value captured from another node example:: R = [ In : node #setcapture(_, node) ]
def citation_count(doi, url = "http://www.crossref.org/openurl/", key = "[email protected]", **kwargs): ''' Get a citation count with a DOI :param doi: [String] DOI, digital object identifier :param url: [String] the API url for the function (should be left to default) :param keyc: [String] your API key See http://labs.crossref.org/openurl/ for more info on this Crossref API service. Usage:: from habanero import counts counts.citation_count(doi = "10.1371/journal.pone.0042793") counts.citation_count(doi = "10.1016/j.fbr.2012.01.001") # DOI not found ## FIXME counts.citation_count(doi = "10.1016/j.fbr.2012") ''' args = {"id": "doi:" + doi, "pid": key, "noredirect": True} args = dict((k, v) for k, v in args.items() if v) res = requests.get(url, params = args, headers = make_ua(), **kwargs) xmldoc = minidom.parseString(res.content) val = xmldoc.getElementsByTagName('query')[0].attributes['fl_count'].value return int(str(val))
Get a citation count with a DOI :param doi: [String] DOI, digital object identifier :param url: [String] the API url for the function (should be left to default) :param keyc: [String] your API key See http://labs.crossref.org/openurl/ for more info on this Crossref API service. Usage:: from habanero import counts counts.citation_count(doi = "10.1371/journal.pone.0042793") counts.citation_count(doi = "10.1016/j.fbr.2012.01.001") # DOI not found ## FIXME counts.citation_count(doi = "10.1016/j.fbr.2012")
def readline(self, size=-1): "Ignore the `size` since a complete line must be processed." while True: try: record = next(self.reader) except StopIteration: break # Ensure this is a valid record if checks.record_is_valid(record): if self.use_cache: # Ensures the cache is updated and available self.variants.ensure_cache(record) # Calculate the MD5 of the variant itself (not the record) md5 = calculate_md5(record) # Ensure this variant is not already loaded if not self.use_cache or md5 not in self.variants: cleaned = self.process_line(record) cleaned.append(md5) return self.outdel.join(cleaned) + '\n' return ''
Ignore the `size` since a complete line must be processed.
def inline(args): """ Parse input file with the specified parser and post messages based on lint output :param args: Contains the following interface: How are we going to post comments? owner: Username of repo owner repo: Repository name pr: Pull request ID token: Authentication for repository url: Root URL of repository (not your project) Default: https://github.com dryrun: Prints instead of posting comments. zero_exit: If true: always return a 0 exit code. install: If true: install linters. max_comments: Maximum comments to write :return: Exit code. 1 if there are any comments, 0 if there are none. """ # don't load trusted value from config because we don't trust the config trusted = args.trusted args = load_config(args) print("Args:") pprint.pprint(args) ret_code = 0 # TODO: consider moving this git parsing stuff into the github interface url = args.url if args.repo_slug: owner = args.repo_slug.split("/")[0] repo = args.repo_slug.split("/")[1] else: owner = args.owner repo = args.repo if args.url: try: url_to_parse = args.url # giturlparse won't parse URLs that don't end in .git if not url_to_parse.endswith(".git"): url_to_parse += ".git" parsed = giturlparse.parse(str(url_to_parse)) url = parsed.resource if not url.startswith("https://"): url = "https://" + url if parsed.owner: owner = parsed.owner if parsed.name: repo = parsed.name except giturlparse.parser.ParserError: pass if not args.dryrun and args.interface not in interfaces.INTERFACES: print("Valid inline-plz config not found") return 1 print("Using interface: {0}".format(args.interface)) my_interface = None filenames = None if not args.dryrun: my_interface = interfaces.INTERFACES[args.interface]( owner, repo, args.pull_request, args.branch, args.token, url, args.commit, args.ignore_paths, args.prefix, args.autofix, args.set_status, ) if not my_interface.is_valid(): print("Invalid review. Exiting.") return 0 filenames = my_interface.filenames my_interface.start_review() try: linter_runner = LinterRunner( args.install, args.autorun, args.ignore_paths, args.config_dir, args.enabled_linters, args.disabled_linters, args.autofix, trusted, filenames, ) messages = linter_runner.run_linters() except Exception: # pylint: disable=broad-except print("Linting failed:\n{}".format(traceback.format_exc())) print("inline-plz version: {}".format(__version__)) print("Python version: {}".format(sys.version)) ret_code = 1 if my_interface: my_interface.finish_review(error=True) return ret_code print("{} lint messages found".format(len(messages))) print("inline-plz version: {}".format(__version__)) print("Python version: {}".format(sys.version)) # TODO: implement dryrun as an interface instead of a special case here if args.dryrun: print_messages(messages) write_messages_to_json(messages) return ret_code try: if my_interface.post_messages(messages, args.max_comments): if not args.zero_exit: ret_code = 1 if args.delete_outdated: my_interface.clear_outdated_messages() my_interface.finish_review(success=False) write_messages_to_json(messages) return ret_code if args.delete_outdated: my_interface.clear_outdated_messages() my_interface.finish_review(success=True) except KeyError: print("Interface not found: {}".format(args.interface)) traceback.print_exc() write_messages_to_json(messages) return ret_code
Parse input file with the specified parser and post messages based on lint output :param args: Contains the following interface: How are we going to post comments? owner: Username of repo owner repo: Repository name pr: Pull request ID token: Authentication for repository url: Root URL of repository (not your project) Default: https://github.com dryrun: Prints instead of posting comments. zero_exit: If true: always return a 0 exit code. install: If true: install linters. max_comments: Maximum comments to write :return: Exit code. 1 if there are any comments, 0 if there are none.
def _run_pre_command(self, pre_cmd): ''' Run a pre command to get external args for a command ''' logger.debug('Executing pre-command: %s', pre_cmd) try: pre_proc = Popen(pre_cmd, stdout=PIPE, stderr=STDOUT, shell=True) except OSError as err: if err.errno == errno.ENOENT: logger.debug('Command %s not found', pre_cmd) return stdout, stderr = pre_proc.communicate() the_return_code = pre_proc.poll() logger.debug("Pre-command results:") logger.debug("STDOUT: %s", stdout) logger.debug("STDERR: %s", stderr) logger.debug("Return Code: %s", the_return_code) if the_return_code != 0: return [] if six.PY3: stdout = stdout.decode('utf-8') return stdout.splitlines()
Run a pre command to get external args for a command
def list(self, log=values.unset, message_date_before=values.unset, message_date=values.unset, message_date_after=values.unset, limit=None, page_size=None): """ Lists NotificationInstance records from the API as a list. Unlike stream(), this operation is eager and will load `limit` records into memory before returning. :param unicode log: Filter by log level :param date message_date_before: Filter by date :param date message_date: Filter by date :param date message_date_after: Filter by date :param int limit: Upper limit for the number of records to return. list() guarantees never to return more than limit. Default is no limit :param int page_size: Number of records to fetch per request, when not set will use the default value of 50 records. If no page_size is defined but a limit is defined, list() will attempt to read the limit with the most efficient page size, i.e. min(limit, 1000) :returns: Generator that will yield up to limit results :rtype: list[twilio.rest.api.v2010.account.call.notification.NotificationInstance] """ return list(self.stream( log=log, message_date_before=message_date_before, message_date=message_date, message_date_after=message_date_after, limit=limit, page_size=page_size, ))
Lists NotificationInstance records from the API as a list. Unlike stream(), this operation is eager and will load `limit` records into memory before returning. :param unicode log: Filter by log level :param date message_date_before: Filter by date :param date message_date: Filter by date :param date message_date_after: Filter by date :param int limit: Upper limit for the number of records to return. list() guarantees never to return more than limit. Default is no limit :param int page_size: Number of records to fetch per request, when not set will use the default value of 50 records. If no page_size is defined but a limit is defined, list() will attempt to read the limit with the most efficient page size, i.e. min(limit, 1000) :returns: Generator that will yield up to limit results :rtype: list[twilio.rest.api.v2010.account.call.notification.NotificationInstance]
def main(): '''main routine''' # create parser arg_parser = argparse.ArgumentParser() arg_parser.add_argument('--vmssname', '-s', required=True, action='store', help='VM Scale Set name') arg_parser.add_argument('--resourcegroup', '-r', required=True, dest='resource_group', action='store', help='Resource group name') arg_parser.add_argument('--newversion', '-n', dest='newversion', action='store', help='New platform image version string') arg_parser.add_argument('--customuri', '-c', dest='customuri', action='store', help='New custom image URI string') arg_parser.add_argument('--updatedomain', '-u', dest='updatedomain', action='store', type=int, help='Update domain (int)') arg_parser.add_argument('--vmid', '-i', dest='vmid', action='store', type=int, help='Single VM ID (int)') arg_parser.add_argument('--vmlist', '-l', dest='vmlist', action='store', help='List of VM IDs e.g. "["1", "2"]"') arg_parser.add_argument('--nowait', '-w', action='store_true', default=False, help='Start upgrades and then exit without waiting') arg_parser.add_argument('--verbose', '-v', action='store_true', default=False, help='Show additional information') arg_parser.add_argument('-y', dest='noprompt', action='store_true', default=False, help='Do not prompt for confirmation') args = arg_parser.parse_args() # switches to determine program behavior # go ahead and upgrade without waiting for confirmation when True noprompt = args.noprompt nowait = args.nowait # don't loop waiting for upgrade provisioning to complete when True verbose = args.verbose # print extra status information when True vmssname = args.vmssname resource_group = args.resource_group if args.newversion is not None: newversion = args.newversion storagemode = 'platform' elif args.customuri is not None: customuri = args.customuri storagemode = 'custom' else: arg_parser.error( 'You must specify a new version for platform images or a custom uri for custom images') if args.updatedomain is not None: updatedomain = args.updatedomain upgrademode = 'updatedomain' elif args.vmid is not None: vmid = args.vmid upgrademode = 'vmid' elif args.vmlist is not None: vmlist = args.vmlist upgrademode = 'vmlist' else: arg_parser.error( 'You must specify an update domain, a vm id, or a vm list') # Load Azure app defaults try: with open('vmssconfig.json') as config_file: config_data = json.load(config_file) except FileNotFoundError: print("Error: Expecting vmssconfig.json in current folder") sys.exit() tenant_id = config_data['tenantId'] app_id = config_data['appId'] app_secret = config_data['appSecret'] subscription_id = config_data['subscriptionId'] access_token = azurerm.get_access_token(tenant_id, app_id, app_secret) # get the vmss model vmssmodel = azurerm.get_vmss( access_token, subscription_id, resource_group, vmssname) # print(json.dumps(vmssmodel, sort_keys=False, indent=2, separators=(',', ': '))) if storagemode == 'platform': # check current version imgref = \ vmssmodel['properties']['virtualMachineProfile']['storageProfile']['imagereference'] print('Current image reference in Scale Set model:') print(json.dumps(imgref, sort_keys=False, indent=2, separators=(',', ': '))) # compare current version with new version if imgref['version'] == newversion: print('Scale Set model version is already set to ' + newversion + ', skipping model update.') else: if not noprompt: response = input( 'Confirm version upgrade to: ' + newversion + ' (y/n)') if response.lower() != 'y': sys.exit(1) # change the version vmssmodel['properties']['virtualMachineProfile']['storageProfile']['imagereference']['version'] = newversion # put the vmss model updateresult = azurerm.update_vmss( access_token, subscription_id, resource_group, vmssname, json.dumps(vmssmodel)) if verbose: print(updateresult) print('OS version updated to ' + newversion + ' in model for VM Scale Set: ' + vmssname) else: # storagemode = custom # check current uri oldimageuri = \ vmssmodel['properties']['virtualMachineProfile']['storageProfile']['osDisk']['image']['uri'] print('Current image URI in Scale Set model:' + oldimageuri) # compare current uri with new uri if oldimageuri == customuri: print('Scale Set model version is already set to ' + customuri + ', skipping model update.') else: if not noprompt: response = input('Confirm uri upgrade to: ' + customuri + ' (y/n)') if response.lower() != 'y': sys.exit(1) # change the version vmssmodel['properties']['virtualMachineProfile']['storageProfile']['osDisk']['image']['uri'] = customuri # put the vmss model updateresult = azurerm.update_vmss( access_token, subscription_id, resource_group, vmssname, json.dumps(vmssmodel)) if verbose: print(updateresult) print('Image URI updated to ' + customuri + ' in model for VM Scale Set: ' + vmssname) # build the list of VMs to upgrade depending on the upgrademode setting if upgrademode == 'updatedomain': # list the VMSS VM instance views to determine their update domains print('Examining the scale set..') udinstancelist = get_vm_ids_by_ud( access_token, subscription_id, resource_group, vmssname, updatedomain) print('VM instances in UD: ' + str(updatedomain) + ' to upgrade:') print(udinstancelist) vmids = json.dumps(udinstancelist) print('Upgrading VMs in UD: ' + str(updatedomain)) elif upgrademode == 'vmid': vmids = json.dumps([str(vmid)]) print('Upgrading VM ID: ' + str(vmid)) else: # upgrademode = vmlist vmids = vmlist print('Upgrading VM IDs: ' + vmlist) # do manualupgrade on the VMs in the list upgraderesult = azurerm.upgrade_vmss_vms( access_token, subscription_id, resource_group, vmssname, vmids) print(upgraderesult) # now wait for upgrade to complete # query VM scale set instance view if not nowait: updatecomplete = False provisioningstate = '' while not updatecomplete: vmssinstance_view = azurerm.get_vmss_instance_view( access_token, subscription_id, resource_group, vmssname) for status in vmssinstance_view['statuses']: provisioningstate = status['code'] if provisioningstate == 'ProvisioningState/succeeded': updatecomplete = True if verbose: print(provisioningstate) time.sleep(5) else: print('Check Scale Set provisioning state to determine when upgrade is complete.')
main routine
def list(self, request, *args, **kwargs): """ To get a list of SSH keys, run **GET** against */api/keys/* as authenticated user. A new SSH key can be created by any active users. Example of a valid request: .. code-block:: http POST /api/keys/ HTTP/1.1 Content-Type: application/json Accept: application/json Authorization: Token c84d653b9ec92c6cbac41c706593e66f567a7fa4 Host: example.com { "name": "ssh_public_key1", "public_key": "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDDURXDP5YhOQUYoDuTxJ84DuzqMJYJqJ8+SZT28 TtLm5yBDRLKAERqtlbH2gkrQ3US58gd2r8H9jAmQOydfvgwauxuJUE4eDpaMWupqquMYsYLB5f+vVGhdZbbzfc6DTQ2rY dknWoMoArlG7MvRMA/xQ0ye1muTv+mYMipnd7Z+WH0uVArYI9QBpqC/gpZRRIouQ4VIQIVWGoT6M4Kat5ZBXEa9yP+9du D2C05GX3gumoSAVyAcDHn/xgej9pYRXGha4l+LKkFdGwAoXdV1z79EG1+9ns7wXuqMJFHM2KDpxAizV0GkZcojISvDwuh vEAFdOJcqjyyH4FOGYa8usP1 [email protected]", } """ return super(SshKeyViewSet, self).list(request, *args, **kwargs)
To get a list of SSH keys, run **GET** against */api/keys/* as authenticated user. A new SSH key can be created by any active users. Example of a valid request: .. code-block:: http POST /api/keys/ HTTP/1.1 Content-Type: application/json Accept: application/json Authorization: Token c84d653b9ec92c6cbac41c706593e66f567a7fa4 Host: example.com { "name": "ssh_public_key1", "public_key": "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDDURXDP5YhOQUYoDuTxJ84DuzqMJYJqJ8+SZT28 TtLm5yBDRLKAERqtlbH2gkrQ3US58gd2r8H9jAmQOydfvgwauxuJUE4eDpaMWupqquMYsYLB5f+vVGhdZbbzfc6DTQ2rY dknWoMoArlG7MvRMA/xQ0ye1muTv+mYMipnd7Z+WH0uVArYI9QBpqC/gpZRRIouQ4VIQIVWGoT6M4Kat5ZBXEa9yP+9du D2C05GX3gumoSAVyAcDHn/xgej9pYRXGha4l+LKkFdGwAoXdV1z79EG1+9ns7wXuqMJFHM2KDpxAizV0GkZcojISvDwuh vEAFdOJcqjyyH4FOGYa8usP1 [email protected]", }
def moderate_model(ParentModel, publication_date_field=None, enable_comments_field=None): """ Register a parent model (e.g. ``Blog`` or ``Article``) that should receive comment moderation. :param ParentModel: The parent model, e.g. a ``Blog`` or ``Article`` model. :param publication_date_field: The field name of a :class:`~django.db.models.DateTimeField` in the parent model which stores the publication date. :type publication_date_field: str :param enable_comments_field: The field name of a :class:`~django.db.models.BooleanField` in the parent model which stores the whether comments are enabled. :type enable_comments_field: str """ attrs = { 'auto_close_field': publication_date_field, 'auto_moderate_field': publication_date_field, 'enable_field': enable_comments_field, } ModerationClass = type(ParentModel.__name__ + 'Moderator', (FluentCommentsModerator,), attrs) moderator.register(ParentModel, ModerationClass)
Register a parent model (e.g. ``Blog`` or ``Article``) that should receive comment moderation. :param ParentModel: The parent model, e.g. a ``Blog`` or ``Article`` model. :param publication_date_field: The field name of a :class:`~django.db.models.DateTimeField` in the parent model which stores the publication date. :type publication_date_field: str :param enable_comments_field: The field name of a :class:`~django.db.models.BooleanField` in the parent model which stores the whether comments are enabled. :type enable_comments_field: str
def sign(self, msg, key): """ Create a signature over a message as defined in RFC7515 using an RSA key :param msg: the message. :type msg: bytes :returns: bytes, the signature of data. :rtype: bytes """ if not isinstance(key, rsa.RSAPrivateKey): raise TypeError( "The key must be an instance of rsa.RSAPrivateKey") sig = key.sign(msg, self.padding, self.hash) return sig
Create a signature over a message as defined in RFC7515 using an RSA key :param msg: the message. :type msg: bytes :returns: bytes, the signature of data. :rtype: bytes
def ngettext(*args, **kwargs): """ Like :func:`gettext`, except it supports pluralization. """ is_plural = args[2] > 1 if not is_plural: key = args[0] key_match = TRANSLATION_KEY_RE.match(key) else: key = args[1] key_match = PLURAL_TRANSLATION_KEY_RE.match(key) translation = _ngettext(*args, **kwargs) if not key_match or translation != key: return translation return _get_domain(key_match).ngettext(*args, **kwargs)
Like :func:`gettext`, except it supports pluralization.
def _get_module_filename(module): """ Return the filename of `module` if it can be imported. If `module` is a package, its directory will be returned. If it cannot be imported ``None`` is returned. If the ``__file__`` attribute is missing, or the module or package is a compiled egg, then an :class:`Unparseable` instance is returned, since the source can't be retrieved. :param module: A module name, such as ``'test.test_config'`` :type module: str """ # Split up the module and its containing package, if it has one module = module.split('.') package = '.'.join(module[:-1]) module = module[-1] try: if not package: # We aren't accessing a module within a package, but rather a top # level package, so it's a straight up import module = __import__(module) else: # Import the package containing our desired module package = __import__(package, fromlist=[module]) # Get the module from that package module = getattr(package, module, None) filename = getattr(module, '__file__', None) if not filename: # No filename? Nothing to do here return Unparseable() # If we get a .pyc, strip the c to get .py so we can parse the source if filename.endswith('.pyc'): filename = filename[:-1] if not os.path.exists(filename) and os.path.isfile(filename): # If there's only a .pyc and no .py it's a compile package or # egg and we can't get at the source for parsing return Unparseable() # If we have a package, we want the directory not the init file if filename.endswith('__init__.py'): filename = filename[:-11] # Yey, we found it return filename except ImportError: # Definitely not a valid module or package return
Return the filename of `module` if it can be imported. If `module` is a package, its directory will be returned. If it cannot be imported ``None`` is returned. If the ``__file__`` attribute is missing, or the module or package is a compiled egg, then an :class:`Unparseable` instance is returned, since the source can't be retrieved. :param module: A module name, such as ``'test.test_config'`` :type module: str
def mask_plane(data, wcs, region, negate=False): """ Mask a 2d image (data) such that pixels within 'region' are set to nan. Parameters ---------- data : 2d-array Image array. wcs : astropy.wcs.WCS WCS for the image in question. region : :class:`AegeanTools.regions.Region` A region within which the image pixels will be masked. negate : bool If True then pixels *outside* the region are masked. Default = False. Returns ------- masked : 2d-array The original array, but masked as required. """ # create an array but don't set the values (they are random) indexes = np.empty((data.shape[0]*data.shape[1], 2), dtype=int) # since I know exactly what the index array needs to look like i can construct # it faster than list comprehension would allow # we do this only once and then recycle it idx = np.array([(j, 0) for j in range(data.shape[1])]) j = data.shape[1] for i in range(data.shape[0]): idx[:, 1] = i indexes[i*j:(i+1)*j] = idx # put ALL the pixles into our vectorized functions and minimise our overheads ra, dec = wcs.wcs_pix2world(indexes, 1).transpose() bigmask = region.sky_within(ra, dec, degin=True) if not negate: bigmask = np.bitwise_not(bigmask) # rework our 1d list into a 2d array bigmask = bigmask.reshape(data.shape) # and apply the mask data[bigmask] = np.nan return data
Mask a 2d image (data) such that pixels within 'region' are set to nan. Parameters ---------- data : 2d-array Image array. wcs : astropy.wcs.WCS WCS for the image in question. region : :class:`AegeanTools.regions.Region` A region within which the image pixels will be masked. negate : bool If True then pixels *outside* the region are masked. Default = False. Returns ------- masked : 2d-array The original array, but masked as required.
def _dimension_keys(self): """ Helper for __mul__ that returns the list of keys together with the dimension labels. """ return [tuple(zip([d.name for d in self.kdims], [k] if self.ndims == 1 else k)) for k in self.keys()]
Helper for __mul__ that returns the list of keys together with the dimension labels.
def validate_query_params(self, strict=True): """Check if the request is valid and can be sent, raise ValueError if not. `strict` is a boolean argument that defaults to True which means an exception is raised on every invalid query parameter, if set to False an exception is raised only when the search request cannot be performed because required query params are missing. """ if not (self.api_key or default_api_key): raise ValueError('API key is missing') if strict and self.query_params_mode not in (None, 'and', 'or'): raise ValueError('query_params_match should be one of "and"/"or"') if not self.person.is_searchable: raise ValueError('No valid name/username/phone/email in request') if strict and self.person.unsearchable_fields: raise ValueError('Some fields are unsearchable: %s' % self.person.unsearchable_fields)
Check if the request is valid and can be sent, raise ValueError if not. `strict` is a boolean argument that defaults to True which means an exception is raised on every invalid query parameter, if set to False an exception is raised only when the search request cannot be performed because required query params are missing.
def prepare_state_m_for_insert_as(state_m_to_insert, previous_state_size): """Prepares and scales the meta data to fit into actual size of the state.""" # TODO check how much code is duplicated or could be reused for library fit functionality meta data helper # TODO DO REFACTORING !!! and move maybe the hole method to meta data and rename it if isinstance(state_m_to_insert, AbstractStateModel) and \ not gui_helper_meta_data.model_has_empty_meta(state_m_to_insert): if isinstance(state_m_to_insert, ContainerStateModel): # print("TARGET1", state_m_to_insert.state.state_element_attrs) models_dict = {'state': state_m_to_insert} for state_element_key in state_m_to_insert.state.state_element_attrs: state_element_list = getattr(state_m_to_insert, state_element_key) # Some models are hold in a gtkmvc3.support.wrappers.ObsListWrapper, not a list if hasattr(state_element_list, 'keys'): state_element_list = state_element_list.values() models_dict[state_element_key] = {elem.core_element.core_element_id: elem for elem in state_element_list} resize_factor = gui_helper_meta_data.scale_meta_data_according_state(models_dict, as_template=True) gui_helper_meta_data.resize_income_of_state_m(state_m_to_insert, resize_factor) elif isinstance(state_m_to_insert, StateModel): # print("TARGET2", state_m_to_insert.state.state_element_attrs) if previous_state_size: current_size = state_m_to_insert.get_meta_data_editor()['size'] factor = gui_helper_meta_data.divide_two_vectors(current_size, previous_state_size) state_m_to_insert.set_meta_data_editor('size', previous_state_size) factor = (min(*factor), min(*factor)) gui_helper_meta_data.resize_state_meta(state_m_to_insert, factor) else: logger.debug("For insert as template of {0} no resize of state meta data is performed because " "the meta data has empty fields.".format(state_m_to_insert)) # library state is not resize because its ports became resized indirectly -> see was resized flag elif not isinstance(state_m_to_insert, LibraryStateModel): raise TypeError("For insert as template of {0} no resize of state meta data is performed because " "state model type is not ContainerStateModel or StateModel".format(state_m_to_insert)) else: logger.info("For insert as template of {0} no resize of state meta data is performed because the meta data has " "empty fields.".format(state_m_to_insert))
Prepares and scales the meta data to fit into actual size of the state.
def from_xdr_object(cls, op_xdr_object): """Creates a :class:`Payment` object from an XDR Operation object. """ if not op_xdr_object.sourceAccount: source = None else: source = encode_check( 'account', op_xdr_object.sourceAccount[0].ed25519).decode() destination = encode_check( 'account', op_xdr_object.body.paymentOp.destination.ed25519).decode() asset = Asset.from_xdr_object(op_xdr_object.body.paymentOp.asset) amount = Operation.from_xdr_amount(op_xdr_object.body.paymentOp.amount) return cls( source=source, destination=destination, asset=asset, amount=amount, )
Creates a :class:`Payment` object from an XDR Operation object.
def _unrecognised(achr): """ Handle unrecognised characters. """ if options['handleUnrecognised'] == UNRECOGNISED_ECHO: return achr elif options['handleUnrecognised'] == UNRECOGNISED_SUBSTITUTE: return options['substituteChar'] else: raise KeyError(achr)
Handle unrecognised characters.
def execute_pool_txns(self, three_pc_batch) -> List: """ Execute a transaction that involves consensus pool management, like adding a node, client or a steward. :param ppTime: PrePrepare request time :param reqs_keys: requests keys to be committed """ committed_txns = self.default_executer(three_pc_batch) for txn in committed_txns: self.poolManager.onPoolMembershipChange(txn) return committed_txns
Execute a transaction that involves consensus pool management, like adding a node, client or a steward. :param ppTime: PrePrepare request time :param reqs_keys: requests keys to be committed
def _get_app_auth_headers(self): """Set the correct auth headers to authenticate against GitHub.""" now = datetime.now(timezone.utc) expiry = now + timedelta(minutes=5) data = {"iat": now, "exp": expiry, "iss": self.app_id} app_token = jwt.encode(data, self.app_key, algorithm="RS256").decode("utf-8") headers = { "Accept": PREVIEW_JSON_ACCEPT, "Authorization": "Bearer {}".format(app_token), } return headers
Set the correct auth headers to authenticate against GitHub.
def _resolve_model(obj): """ Resolve supplied `obj` to a Django model class. `obj` must be a Django model class itself, or a string representation of one. Useful in situations like GH #1225 where Django may not have resolved a string-based reference to a model in another model's foreign key definition. String representations should have the format: 'appname.ModelName' """ if isinstance(obj, six.string_types) and len(obj.split('.')) == 2: app_name, model_name = obj.split('.') resolved_model = apps.get_model(app_name, model_name) if resolved_model is None: msg = "Django did not return a model for {0}.{1}" raise ImproperlyConfigured(msg.format(app_name, model_name)) return resolved_model elif inspect.isclass(obj) and issubclass(obj, models.Model): return obj raise ValueError("{0} is not a Django model".format(obj))
Resolve supplied `obj` to a Django model class. `obj` must be a Django model class itself, or a string representation of one. Useful in situations like GH #1225 where Django may not have resolved a string-based reference to a model in another model's foreign key definition. String representations should have the format: 'appname.ModelName'
def delete(self, using=None): """ Deletes the post instance. """ if self.is_alone: # The default way of operating is to trigger the deletion of the associated topic # only if the considered post is the only post embedded in the topic self.topic.delete() else: super(AbstractPost, self).delete(using) self.topic.update_trackers()
Deletes the post instance.
def dir(self, filetype, **kwargs): """Return the directory containing a file of a given type. Parameters ---------- filetype : str File type parameter. Returns ------- dir : str Directory containing the file. """ full = kwargs.get('full', None) if not full: full = self.full(filetype, **kwargs) return os.path.dirname(full)
Return the directory containing a file of a given type. Parameters ---------- filetype : str File type parameter. Returns ------- dir : str Directory containing the file.
def get_objective(self, objective_id=None): """Gets the Objective specified by its Id. In plenary mode, the exact Id is found or a NotFound results. Otherwise, the returned Objective may have a different Id than requested, such as the case where a duplicate Id was assigned to an Objective and retained for compatibility. arg: objectiveId (osid.id.Id): Id of the Objective return: (osid.learning.Objective) - the objective raise: NotFound - objectiveId not found raise: NullArgument - objectiveId is null raise: OperationFailed - unable to complete request raise: PermissionDenied - authorization failure compliance: mandatory - This method is must be implemented. """ if objective_id is None: raise NullArgument() url_path = construct_url('objectives', obj_id=objective_id) return objects.Objective(self._get_request(url_path))
Gets the Objective specified by its Id. In plenary mode, the exact Id is found or a NotFound results. Otherwise, the returned Objective may have a different Id than requested, such as the case where a duplicate Id was assigned to an Objective and retained for compatibility. arg: objectiveId (osid.id.Id): Id of the Objective return: (osid.learning.Objective) - the objective raise: NotFound - objectiveId not found raise: NullArgument - objectiveId is null raise: OperationFailed - unable to complete request raise: PermissionDenied - authorization failure compliance: mandatory - This method is must be implemented.
def cast_to_subclass(self): """ Load the bundle file from the database to get the derived bundle class, then return a new bundle built on that class :return: """ self.import_lib() self.load_requirements() try: self.commit() # To ensure the rollback() doesn't clear out anything important bsf = self.build_source_files.file(File.BSFILE.BUILD) except Exception as e: self.log('Error trying to create a bundle source file ... {} '.format(e)) raise self.rollback() return self try: clz = bsf.import_bundle() except Exception as e: raise BundleError('Failed to load bundle code file, skipping : {}'.format(e)) b = clz(self._dataset, self._library, self._source_url, self._build_url) b.limited_run = self.limited_run b.capture_exceptions = self.capture_exceptions b.multi = self.multi return b
Load the bundle file from the database to get the derived bundle class, then return a new bundle built on that class :return:
def cloud_front_origin_access_identity_exists(Id, region=None, key=None, keyid=None, profile=None): ''' Return True if a CloudFront origin access identity exists with the given Resource ID or False otherwise. Id Resource ID of the CloudFront origin access identity. region Region to connect to. key Secret key to use. keyid Access key to use. profile Dict, or pillar key pointing to a dict, containing AWS region/key/keyid. CLI Example: .. code-block:: bash salt myminion boto_cloudfront.cloud_front_origin_access_identity_exists Id=E30RBTSABCDEF0 ''' authargs = {'region': region, 'key': key, 'keyid': keyid, 'profile': profile} oais = list_cloud_front_origin_access_identities(**authargs) or [] return bool([i['Id'] for i in oais if i['Id'] == Id])
Return True if a CloudFront origin access identity exists with the given Resource ID or False otherwise. Id Resource ID of the CloudFront origin access identity. region Region to connect to. key Secret key to use. keyid Access key to use. profile Dict, or pillar key pointing to a dict, containing AWS region/key/keyid. CLI Example: .. code-block:: bash salt myminion boto_cloudfront.cloud_front_origin_access_identity_exists Id=E30RBTSABCDEF0
def makeFigFromFile(filename,*args,**kwargs): """ Renders an image in a matplotlib figure, so it can be added to reports args and kwargs are passed to plt.subplots """ import matplotlib.pyplot as plt img = plt.imread(filename) fig,ax = plt.subplots(*args,**kwargs) ax.axis('off') ax.imshow(img) return fig
Renders an image in a matplotlib figure, so it can be added to reports args and kwargs are passed to plt.subplots
def t_ID(self, t): r'[a-z][a-zA-Z0-9_]*' t.type = self.reserved.get(t.value, 'ID') if t.type == 'ID': t.value = node.Id(t.value, self.lineno, self.filename) return t
r'[a-z][a-zA-Z0-9_]*
def resource(resource_id): """Show a resource.""" resource_obj = app.db.resource(resource_id) if 'raw' in request.args: return send_from_directory(os.path.dirname(resource_obj.path), os.path.basename(resource_obj.path)) return render_template('resource.html', resource=resource_obj)
Show a resource.
def global_set_option(self, opt, value): """set option on the correct option provider""" self._all_options[opt].set_option(opt, value)
set option on the correct option provider
def get_urls(self): """ Appends the custom will_not_clone url to the admin site """ not_clone_url = [url(r'^(.+)/will_not_clone/$', admin.site.admin_view(self.will_not_clone))] restore_url = [ url(r'^(.+)/restore/$', admin.site.admin_view(self.restore))] return not_clone_url + restore_url + super(VersionedAdmin, self).get_urls()
Appends the custom will_not_clone url to the admin site
def get_color_label(self): """Text for colorbar label """ if self.args.norm: return 'Normalized to {}'.format(self.args.norm) if len(self.units) == 1 and self.usetex: return r'ASD $\left({0}\right)$'.format( self.units[0].to_string('latex').strip('$')) elif len(self.units) == 1: return 'ASD ({0})'.format(self.units[0].to_string('generic')) return super(Spectrogram, self).get_color_label()
Text for colorbar label
def load_plug_in(self, name): """Loads a DBGF plug-in. in name of type str The plug-in name or DLL. Special name 'all' loads all installed plug-ins. return plug_in_name of type str The name of the loaded plug-in. """ if not isinstance(name, basestring): raise TypeError("name can only be an instance of type basestring") plug_in_name = self._call("loadPlugIn", in_p=[name]) return plug_in_name
Loads a DBGF plug-in. in name of type str The plug-in name or DLL. Special name 'all' loads all installed plug-ins. return plug_in_name of type str The name of the loaded plug-in.
def represent_datetime(self, d): """ turns a given datetime obj into a string representation. This will: 1) look if a fixed 'timestamp_format' is given in the config 2) check if a 'timestamp_format' hook is defined 3) use :func:`~alot.helper.pretty_datetime` as fallback """ fixed_format = self.get('timestamp_format') if fixed_format: rep = string_decode(d.strftime(fixed_format), 'UTF-8') else: format_hook = self.get_hook('timestamp_format') if format_hook: rep = string_decode(format_hook(d), 'UTF-8') else: rep = pretty_datetime(d) return rep
turns a given datetime obj into a string representation. This will: 1) look if a fixed 'timestamp_format' is given in the config 2) check if a 'timestamp_format' hook is defined 3) use :func:`~alot.helper.pretty_datetime` as fallback
def _command(self, event, command, *args, **kwargs): """ Context state controller. Check whether the transition is possible or not, it executes it and triggers the Hooks with the pre_* and post_* events. @param event: (str) event generated by the command. @param command: (virDomain.method) state transition to impose. @raise: RuntimeError. """ self._assert_transition(event) self.trigger('pre_%s' % event, **kwargs) self._execute_command(command, *args) self.trigger('post_%s' % event, **kwargs)
Context state controller. Check whether the transition is possible or not, it executes it and triggers the Hooks with the pre_* and post_* events. @param event: (str) event generated by the command. @param command: (virDomain.method) state transition to impose. @raise: RuntimeError.
def encipher(self,string): """Encipher string using Delastelle cipher according to initialised key. Example:: ciphertext = Delastelle('APCZ WRLFBDKOTYUQGENHXMIVS').encipher(plaintext) :param string: The string to encipher. :returns: The enciphered string. The ciphertext will be 3 times the length of the plaintext. """ string = self.remove_punctuation(string,filter='[^'+self.key+']') ctext = "" for c in string: ctext += ''.join([str(i) for i in L2IND[c]]) return ctext
Encipher string using Delastelle cipher according to initialised key. Example:: ciphertext = Delastelle('APCZ WRLFBDKOTYUQGENHXMIVS').encipher(plaintext) :param string: The string to encipher. :returns: The enciphered string. The ciphertext will be 3 times the length of the plaintext.
def _do_connection(self, wgt, sig, func): """ Make a connection between a GUI widget and a callable. wgt and sig are strings with widget and signal name func is a callable for that signal """ #new style (we use this) #self.btn_name.clicked.connect(self.on_btn_name_clicked) #old style #self.connect(self.btn_name, SIGNAL('clicked()'), self.on_btn_name_clicked) if hasattr(self, wgt): wgtobj = getattr(self, wgt) if hasattr(wgtobj, sig): sigobj = getattr(wgtobj, sig) if isinstance(sigobj, Signal): sigobj.connect(func) return 0 return 1
Make a connection between a GUI widget and a callable. wgt and sig are strings with widget and signal name func is a callable for that signal
def set_child_value(self, name, value): """Set the text value of the (nameless) plain-text child of a named child node.""" return XMLElement(lib.lsl_set_child_value(self.e, str.encode(name), str.encode(value)))
Set the text value of the (nameless) plain-text child of a named child node.
def predict(self, X): """Predict target values for X. Parameters ---------- X : array-like of shape = [n_samples, n_features] The input samples. Returns ------- y : array of shape = [n_samples] The predicted target value. """ K = pairwise_kernels(self.X, X, metric=self.kernel, gamma=self.gamma) return (K * self.y[:, None]).sum(axis=0) / K.sum(axis=0)
Predict target values for X. Parameters ---------- X : array-like of shape = [n_samples, n_features] The input samples. Returns ------- y : array of shape = [n_samples] The predicted target value.
def put(self, key): """Put and return the only unique identifier possible, its path """ self.client.write(self._key_path(key['name']), **key) return self._key_path(key['name'])
Put and return the only unique identifier possible, its path
def vtrees(self): """ Get list of VTrees from ScaleIO cluster :return: List of VTree objects - Can be empty of no VTrees exist :rtype: VTree object """ self.connection._check_login() response = self.connection._do_get("{}/{}".format(self.connection._api_url, "types/VTree/instances")).json() all_vtrees = [] for vtree in response: all_vtrees.append( SIO_Vtree.from_dict(vtree) ) return all_vtrees
Get list of VTrees from ScaleIO cluster :return: List of VTree objects - Can be empty of no VTrees exist :rtype: VTree object
def nodes(self): """ Returns an (n,2) list of nodes, or vertices on the path. Note that this generic class function assumes that all of the reference points are on the path which is true for lines and three point arcs. If you were to define another class where that wasn't the case (for example, the control points of a bezier curve), you would need to implement an entity- specific version of this function. The purpose of having a list of nodes is so that they can then be added as edges to a graph so we can use functions to check connectivity, extract paths, etc. The slicing on this function is essentially just tiling points so the first and last vertices aren't repeated. Example: self.points = [0,1,2] returns: [[0,1], [1,2]] """ return np.column_stack((self.points, self.points)).reshape( -1)[1:-1].reshape((-1, 2))
Returns an (n,2) list of nodes, or vertices on the path. Note that this generic class function assumes that all of the reference points are on the path which is true for lines and three point arcs. If you were to define another class where that wasn't the case (for example, the control points of a bezier curve), you would need to implement an entity- specific version of this function. The purpose of having a list of nodes is so that they can then be added as edges to a graph so we can use functions to check connectivity, extract paths, etc. The slicing on this function is essentially just tiling points so the first and last vertices aren't repeated. Example: self.points = [0,1,2] returns: [[0,1], [1,2]]
def line_segment(X0, X1): r""" Calculate the voxel coordinates of a straight line between the two given end points Parameters ---------- X0 and X1 : array_like The [x, y] or [x, y, z] coordinates of the start and end points of the line. Returns ------- coords : list of lists A list of lists containing the X, Y, and Z coordinates of all voxels that should be drawn between the start and end points to create a solid line. """ X0 = sp.around(X0).astype(int) X1 = sp.around(X1).astype(int) if len(X0) == 3: L = sp.amax(sp.absolute([[X1[0]-X0[0]], [X1[1]-X0[1]], [X1[2]-X0[2]]])) + 1 x = sp.rint(sp.linspace(X0[0], X1[0], L)).astype(int) y = sp.rint(sp.linspace(X0[1], X1[1], L)).astype(int) z = sp.rint(sp.linspace(X0[2], X1[2], L)).astype(int) return [x, y, z] else: L = sp.amax(sp.absolute([[X1[0]-X0[0]], [X1[1]-X0[1]]])) + 1 x = sp.rint(sp.linspace(X0[0], X1[0], L)).astype(int) y = sp.rint(sp.linspace(X0[1], X1[1], L)).astype(int) return [x, y]
r""" Calculate the voxel coordinates of a straight line between the two given end points Parameters ---------- X0 and X1 : array_like The [x, y] or [x, y, z] coordinates of the start and end points of the line. Returns ------- coords : list of lists A list of lists containing the X, Y, and Z coordinates of all voxels that should be drawn between the start and end points to create a solid line.
def selenol_params(**kwargs): """Decorate request parameters to transform them into Selenol objects.""" def params_decorator(func): """Param decorator. :param f: Function to decorate, typically on_request. """ def service_function_wrapper(service, message): """Wrap function call. :param service: SelenolService object. :param message: SelenolMessage request. """ params = {k: f(service, message) for k, f in kwargs.items()} return func(service, **params) return service_function_wrapper return params_decorator
Decorate request parameters to transform them into Selenol objects.
def load_grammar(self, path): """Load a grammar file (python file containing grammar definitions) by file path. When loaded, the global variable ``GRAMFUZZER`` will be set within the module. This is not always needed, but can be useful. :param str path: The path to the grammar file """ if not os.path.exists(path): raise Exception("path does not exist: {!r}".format(path)) # this will let grammars reference eachother with relative # imports. # # E.g.: # grams/ # gram1.py # gram2.py # # gram1.py can do "import gram2" to require rules in gram2.py to # be loaded grammar_path = os.path.dirname(path) if grammar_path not in sys.path: sys.path.append(grammar_path) with open(path, "r") as f: data = f.read() code = compile(data, path, "exec") locals_ = {"GRAMFUZZER": self, "__file__": path} exec(code) in locals_ if "TOP_CAT" in locals_: cat_group = os.path.basename(path).replace(".py", "") self.set_cat_group_top_level_cat(cat_group, locals_["TOP_CAT"])
Load a grammar file (python file containing grammar definitions) by file path. When loaded, the global variable ``GRAMFUZZER`` will be set within the module. This is not always needed, but can be useful. :param str path: The path to the grammar file
def get_text(node, strategy): """ Get the most confident text results, either those with @index = 1 or the first text results or empty string. """ textEquivs = node.get_TextEquiv() if not textEquivs: log.debug("No text results on %s %s", node, node.id) return '' # elif strategy == 'index1': else: if len(textEquivs) > 1: index1 = [x for x in textEquivs if x.index == 1] if index1: return index1[0].get_Unicode().strip() return textEquivs[0].get_Unicode().strip()
Get the most confident text results, either those with @index = 1 or the first text results or empty string.
def get_file_to_path(self, share_name, directory_name, file_name, file_path, open_mode='wb', start_range=None, end_range=None, range_get_content_md5=None, progress_callback=None, max_connections=1, max_retries=5, retry_wait=1.0, timeout=None): ''' Downloads a file to a file path, with automatic chunking and progress notifications. Returns an instance of File with properties and metadata. :param str share_name: Name of existing share. :param str directory_name: The path to the directory. :param str file_name: Name of existing file. :param str file_path: Path of file to write to. :param str open_mode: Mode to use when opening the file. :param int start_range: Start of byte range to use for downloading a section of the file. If no end_range is given, all bytes after the start_range will be downloaded. The start_range and end_range params are inclusive. Ex: start_range=0, end_range=511 will download first 512 bytes of file. :param int end_range: End of byte range to use for downloading a section of the file. If end_range is given, start_range must be provided. The start_range and end_range params are inclusive. Ex: start_range=0, end_range=511 will download first 512 bytes of file. :param bool range_get_content_md5: When this header is set to True and specified together with the Range header, the service returns the MD5 hash for the range, as long as the range is less than or equal to 4 MB in size. :param progress_callback: Callback for progress with signature function(current, total) where current is the number of bytes transfered so far, and total is the size of the file if known. :type progress_callback: callback function in format of func(current, total) :param int max_connections: Set to 1 to download the file sequentially. Set to 2 or greater if you want to download a file larger than 64MB in chunks. If the file size does not exceed 64MB it will be downloaded in one chunk. :param int max_retries: Number of times to retry download of file chunk if an error occurs. :param int retry_wait: Sleep time in secs between retries. :param int timeout: The timeout parameter is expressed in seconds. This method may make multiple calls to the Azure service and the timeout will apply to each call individually. :return: A File with properties and metadata. :rtype: :class:`~azure.storage.file.models.File` ''' _validate_not_none('share_name', share_name) _validate_not_none('file_name', file_name) _validate_not_none('file_path', file_path) _validate_not_none('open_mode', open_mode) with open(file_path, open_mode) as stream: file = self.get_file_to_stream( share_name, directory_name, file_name, stream, start_range, end_range, range_get_content_md5, progress_callback, max_connections, max_retries, retry_wait, timeout) return file
Downloads a file to a file path, with automatic chunking and progress notifications. Returns an instance of File with properties and metadata. :param str share_name: Name of existing share. :param str directory_name: The path to the directory. :param str file_name: Name of existing file. :param str file_path: Path of file to write to. :param str open_mode: Mode to use when opening the file. :param int start_range: Start of byte range to use for downloading a section of the file. If no end_range is given, all bytes after the start_range will be downloaded. The start_range and end_range params are inclusive. Ex: start_range=0, end_range=511 will download first 512 bytes of file. :param int end_range: End of byte range to use for downloading a section of the file. If end_range is given, start_range must be provided. The start_range and end_range params are inclusive. Ex: start_range=0, end_range=511 will download first 512 bytes of file. :param bool range_get_content_md5: When this header is set to True and specified together with the Range header, the service returns the MD5 hash for the range, as long as the range is less than or equal to 4 MB in size. :param progress_callback: Callback for progress with signature function(current, total) where current is the number of bytes transfered so far, and total is the size of the file if known. :type progress_callback: callback function in format of func(current, total) :param int max_connections: Set to 1 to download the file sequentially. Set to 2 or greater if you want to download a file larger than 64MB in chunks. If the file size does not exceed 64MB it will be downloaded in one chunk. :param int max_retries: Number of times to retry download of file chunk if an error occurs. :param int retry_wait: Sleep time in secs between retries. :param int timeout: The timeout parameter is expressed in seconds. This method may make multiple calls to the Azure service and the timeout will apply to each call individually. :return: A File with properties and metadata. :rtype: :class:`~azure.storage.file.models.File`
def do_prune(self): """ Return True if prune_table, prune_column, and prune_date are implemented. If only a subset of prune variables are override, an exception is raised to remind the user to implement all or none. Prune (data newer than prune_date deleted) before copying new data in. """ if self.prune_table and self.prune_column and self.prune_date: return True elif self.prune_table or self.prune_column or self.prune_date: raise Exception('override zero or all prune variables') else: return False
Return True if prune_table, prune_column, and prune_date are implemented. If only a subset of prune variables are override, an exception is raised to remind the user to implement all or none. Prune (data newer than prune_date deleted) before copying new data in.
async def get_source_list(self, scheme: str = "") -> List[Source]: """Return available sources for playback.""" res = await self.services["avContent"]["getSourceList"](scheme=scheme) return [Source.make(**x) for x in res]
Return available sources for playback.
def normalize_docroot(app, root): """Creates a package-list URL and a link base from a docroot element. Args: app: the global app object root: the docroot element [string or dictionary] """ srcdir = app.env.srcdir default_version = app.config.javalink_default_version if isinstance(root, basestring): (url, base) = _parse_docroot_str(srcdir, root) return {'root': url, 'base': base, 'version': default_version} else: normalized = {} normalized['root'] = _parse_docroot_str(srcdir, root['root'])[0] if 'base' in root: normalized['base'] = _parse_docroot_str(srcdir, root['base'])[1] else: normalized['base'] = _parse_docroot_str(srcdir, root['root'])[1] if 'version' in root: normalized['version'] = root['version'] else: normalized['version'] = default_version return normalized
Creates a package-list URL and a link base from a docroot element. Args: app: the global app object root: the docroot element [string or dictionary]
def lock(self): """ This method sets a cache variable to mark current job as "already running". """ if self.cache.get(self.lock_name): return False else: self.cache.set(self.lock_name, timezone.now(), self.timeout) return True
This method sets a cache variable to mark current job as "already running".
def _eval_target_jumptable(state, ip, limit): """ A *very* fast method to evaluate symbolic jump targets if they are a) concrete targets, or b) targets coming from jump tables. :param state: A SimState instance. :param ip: The AST of the instruction pointer to evaluate. :param limit: The maximum number of concrete IPs. :return: A list of conditions and the corresponding concrete IPs, or None which indicates fallback is necessary. :rtype: list or None """ if ip.symbolic is False: return [ (claripy.ast.bool.true, ip) ] # concrete # Detect whether ip is in the form of "if a == 1 then addr_0 else if a == 2 then addr_1 else ..." cond_and_targets = [ ] # tuple of (condition, target) ip_ = ip # Handle the outer Reverse outer_reverse = False if ip_.op == "Reverse": ip_ = ip_.args[0] outer_reverse = True fallback = False target_variable = None concretes = set() reached_sentinel = False for cond, target in claripy.reverse_ite_cases(ip_): # We must fully unpack the entire AST to make sure it indeed complies with the form above if reached_sentinel: # We should not have any other value beyond the sentinel - maybe one of the possible targets happens to # be the same as the sentinel value? fallback = True break if target.symbolic is False and state.solver.eval(target) == DUMMY_SYMBOLIC_READ_VALUE: # Ignore the dummy value, which acts as the sentinel of this ITE tree reached_sentinel = True continue if cond.op != "__eq__": # We only support equivalence right now. Fallback fallback = True break if cond.args[0].symbolic is True and cond.args[1].symbolic is False: variable, value = cond.args elif cond.args[0].symbolic is False and cond.args[1].symbolic is True: value, variable = cond.args else: # Cannot determine variable and value. Fallback fallback = True break if target_variable is None: target_variable = variable elif target_variable is not variable: # it's checking a different variable. Fallback fallback = True break # Make sure the conditions are mutually exclusive value_concrete = state.solver.eval(value) if value_concrete in concretes: # oops... the conditions are not mutually exclusive fallback = True break concretes.add(value_concrete) if target.symbolic is True: # Cannot handle symbolic targets. Fallback fallback = True break cond_and_targets.append((cond, target if not outer_reverse else state.solver.Reverse(target))) if reached_sentinel is False: # huh? fallback = True if fallback: return None else: return cond_and_targets[ : limit]
A *very* fast method to evaluate symbolic jump targets if they are a) concrete targets, or b) targets coming from jump tables. :param state: A SimState instance. :param ip: The AST of the instruction pointer to evaluate. :param limit: The maximum number of concrete IPs. :return: A list of conditions and the corresponding concrete IPs, or None which indicates fallback is necessary. :rtype: list or None
def hook_point(self, hook_name, handle=None): """Used to call module function that may define a hook function for hook_name Available hook points: - `tick`, called on each daemon loop turn - `save_retention`; called by the scheduler when live state saving is to be done - `load_retention`; called by the scheduler when live state restoring is necessary (on restart) - `get_new_actions`; called by the scheduler before adding the actions to be executed - `early_configuration`; called by the arbiter when it begins parsing the configuration - `read_configuration`; called by the arbiter when it read the configuration - `late_configuration`; called by the arbiter when it finishes parsing the configuration As a default, the `handle` parameter provided to the hooked function is the caller Daemon object. The scheduler will provide its own instance when it call this function. :param hook_name: function name we may hook in module :type hook_name: str :param handle: parameter to provide to the hook function :type: handle: alignak.Satellite :return: None """ full_hook_name = 'hook_' + hook_name for module in self.modules_manager.instances: _ts = time.time() if not hasattr(module, full_hook_name): continue fun = getattr(module, full_hook_name) try: fun(handle if handle is not None else self) # pylint: disable=broad-except except Exception as exp: # pragma: no cover, never happen during unit tests... logger.warning('The instance %s raised an exception %s. I disabled it,' ' and set it to restart later', module.name, str(exp)) logger.exception('Exception %s', exp) self.modules_manager.set_to_restart(module) else: statsmgr.timer('hook.%s.%s' % (hook_name, module.name), time.time() - _ts)
Used to call module function that may define a hook function for hook_name Available hook points: - `tick`, called on each daemon loop turn - `save_retention`; called by the scheduler when live state saving is to be done - `load_retention`; called by the scheduler when live state restoring is necessary (on restart) - `get_new_actions`; called by the scheduler before adding the actions to be executed - `early_configuration`; called by the arbiter when it begins parsing the configuration - `read_configuration`; called by the arbiter when it read the configuration - `late_configuration`; called by the arbiter when it finishes parsing the configuration As a default, the `handle` parameter provided to the hooked function is the caller Daemon object. The scheduler will provide its own instance when it call this function. :param hook_name: function name we may hook in module :type hook_name: str :param handle: parameter to provide to the hook function :type: handle: alignak.Satellite :return: None
def eval(self, code, mode='single'): """Evaluate code in the context of the frame.""" if isinstance(code, str): if isinstance(code, str): code = UTF8_COOKIE + code.encode('utf-8') code = compile(code, '<interactive>', mode) if mode != 'exec': return eval(code, self.globals, self.locals) exec(code, self.globals, self.locals)
Evaluate code in the context of the frame.
def imported_targets(self): """ :returns: target instances for specs referenced by imported_target_specs. :rtype: list of Target """ libs = [] for spec in self.imported_target_specs(payload=self.payload): resolved_target = self._build_graph.get_target_from_spec(spec, relative_to=self.address.spec_path) if not resolved_target: raise self.UnresolvedImportError( 'Could not find target {spec} referenced from {relative_to}' .format(spec=spec, relative_to=self.address.spec)) try: libs.append(self.expected_target_constraint.validate_satisfied_by(resolved_target)) except TypeConstraintError as e: raise self.WrongTargetTypeError( 'Wrong target type {spec} referenced from remote sources target {relative_to}: {err}' .format(spec=spec, relative_to=self.address.spec, err=str(e)), e) return libs
:returns: target instances for specs referenced by imported_target_specs. :rtype: list of Target
def add_fields(self, field_dict): """Add a mapping of field names to PayloadField instances. :API: public """ for key, field in field_dict.items(): self.add_field(key, field)
Add a mapping of field names to PayloadField instances. :API: public
def opens_platforms(self, tag=None, fromdate=None, todate=None): """ Gets an overview of the platforms used to open your emails. This is only recorded when open tracking is enabled for that email. """ return self.call("GET", "/stats/outbound/opens/platforms", tag=tag, fromdate=fromdate, todate=todate)
Gets an overview of the platforms used to open your emails. This is only recorded when open tracking is enabled for that email.
def ps_ball(radius): r""" Creates spherical ball structuring element for morphological operations Parameters ---------- radius : float or int The desired radius of the structuring element Returns ------- strel : 3D-array A 3D numpy array of the structuring element """ rad = int(sp.ceil(radius)) other = sp.ones((2 * rad + 1, 2 * rad + 1, 2 * rad + 1), dtype=bool) other[rad, rad, rad] = False ball = spim.distance_transform_edt(other) < radius return ball
r""" Creates spherical ball structuring element for morphological operations Parameters ---------- radius : float or int The desired radius of the structuring element Returns ------- strel : 3D-array A 3D numpy array of the structuring element
def global_config(cls, key, *args): ''' This reads or sets the global settings stored in class.settings. ''' if args: cls.settings[key] = args[0] else: return cls.settings[key]
This reads or sets the global settings stored in class.settings.
def decode_base64(data): """ Decodes a base64 string, with padding being optional Args: data: A base64 encoded string Returns: bytes: The decoded bytes """ data = bytes(data, encoding="ascii") missing_padding = len(data) % 4 if missing_padding != 0: data += b'=' * (4 - missing_padding) return base64.b64decode(data)
Decodes a base64 string, with padding being optional Args: data: A base64 encoded string Returns: bytes: The decoded bytes
def get_teams(): """ :return: all knows teams """ LOGGER.debug("TeamService.get_teams") args = {'http_operation': 'GET', 'operation_path': ''} response = TeamService.requester.call(args) ret = None if response.rc == 0: ret = [] for team in response.response_content['teams']: ret.append(Team.json_2_team(team)) elif response.rc != 404: err_msg = 'TeamService.get_teams - Problem while getting teams. ' \ '. Reason: ' + str(response.response_content) + '-' + str(response.error_message) + \ " (" + str(response.rc) + ")" LOGGER.warning(err_msg) return ret
:return: all knows teams
def CountFlowLogEntries(self, client_id, flow_id): """Returns number of flow log entries of a given flow.""" return len(self.ReadFlowLogEntries(client_id, flow_id, 0, sys.maxsize))
Returns number of flow log entries of a given flow.