code
stringlengths
75
104k
docstring
stringlengths
1
46.9k
def list_networks(full_ids=False): """ Lists networks on the Docker remote host, similar to ``docker network ls``. :param full_ids: Shows the full network ids. When ``False`` (default) only shows the first 12 characters. :type full_ids: bool """ networks = docker_fabric().networks() _format_output_table(networks, NETWORK_COLUMNS, full_ids)
Lists networks on the Docker remote host, similar to ``docker network ls``. :param full_ids: Shows the full network ids. When ``False`` (default) only shows the first 12 characters. :type full_ids: bool
def get_ssl_context(private_key, certificate): """Get ssl context from private key and certificate paths. The return value is used when calling Flask. i.e. app.run(ssl_context=get_ssl_context(,,,)) """ if ( certificate and os.path.isfile(certificate) and private_key and os.path.isfile(private_key) ): context = ssl.SSLContext(ssl.PROTOCOL_TLSv1_2) context.load_cert_chain(certificate, private_key) return context return None
Get ssl context from private key and certificate paths. The return value is used when calling Flask. i.e. app.run(ssl_context=get_ssl_context(,,,))
def stop(self): """ Stop the interface :rtype: None """ should_sleep = self._is_running super(Sensor, self).stop() if should_sleep: # Make sure everything has enough time to exit time.sleep(max(self._select_timeout, self._retransmit_timeout) + 1) if self._listen_socket is not None: self._shutdown_listen_socket()
Stop the interface :rtype: None
def check_frequencies(pfeed, *, as_df=False, include_warnings=False): """ Check that ``pfeed.frequency`` follows the ProtoFeed spec. Return a list of problems of the form described in :func:`gt.check_table`; the list will be empty if no problems are found. """ table = 'frequencies' problems = [] # Preliminary checks if pfeed.frequencies is None: problems.append(['error', 'Missing table', table, []]) else: f = pfeed.frequencies.copy() problems = check_for_required_columns(problems, table, f) if problems: return gt.format_problems(problems, as_df=as_df) if include_warnings: problems = check_for_invalid_columns(problems, table, f) # Check route_short_name and route_long_name for column in ['route_short_name', 'route_long_name']: problems = gt.check_column(problems, table, f, column, gt.valid_str, column_required=False) cond = ~(f['route_short_name'].notnull() | f['route_long_name'].notnull()) problems = gt.check_table(problems, table, f, cond, 'route_short_name and route_long_name both empty') # Check route_type v = lambda x: x in range(8) problems = gt.check_column(problems, table, f, 'route_type', v) # Check service window ID problems = gt.check_column_linked_id(problems, table, f, 'service_window_id', pfeed.service_windows) # Check direction v = lambda x: x in range(3) problems = gt.check_column(problems, table, f, 'direction', v) # Check frequency v = lambda x: isinstance(x, int) problems = gt.check_column(problems, table, f, 'frequency', v) # Check speed problems = gt.check_column(problems, table, f, 'speed', valid_speed, column_required=False) # Check shape ID problems = gt.check_column_linked_id(problems, table, f, 'shape_id', pfeed.shapes) return gt.format_problems(problems, as_df=as_df)
Check that ``pfeed.frequency`` follows the ProtoFeed spec. Return a list of problems of the form described in :func:`gt.check_table`; the list will be empty if no problems are found.
def get_changesets(self, start=None, end=None, start_date=None, end_date=None, branch_name=None, reverse=False): """ Returns iterator of ``MercurialChangeset`` objects from start to end (both are inclusive) :param start: None, str, int or mercurial lookup format :param end: None, str, int or mercurial lookup format :param start_date: :param end_date: :param branch_name: :param reversed: return changesets in reversed order """ start_raw_id = self._get_revision(start) start_pos = self.revisions.index(start_raw_id) if start else None end_raw_id = self._get_revision(end) end_pos = self.revisions.index(end_raw_id) if end else None if None not in [start, end] and start_pos > end_pos: raise RepositoryError("Start revision '%s' cannot be " "after end revision '%s'" % (start, end)) if branch_name and branch_name not in self.allbranches.keys(): raise BranchDoesNotExistError('Branch %s not found in' ' this repository' % branch_name) if end_pos is not None: end_pos += 1 #filter branches filter_ = [] if branch_name: filter_.append('branch("%s")' % (branch_name)) if start_date: filter_.append('date(">%s")' % start_date) if end_date: filter_.append('date("<%s")' % end_date) if filter_: revisions = scmutil.revrange(self._repo, [" and ".join(filter_)]) else: revisions = self.revisions revs = revisions[start_pos:end_pos] if reverse: revs = reversed(revs) return CollectionGenerator(self, revs)
Returns iterator of ``MercurialChangeset`` objects from start to end (both are inclusive) :param start: None, str, int or mercurial lookup format :param end: None, str, int or mercurial lookup format :param start_date: :param end_date: :param branch_name: :param reversed: return changesets in reversed order
def _prepare_hiveconf(d): """ This function prepares a list of hiveconf params from a dictionary of key value pairs. :param d: :type d: dict >>> hh = HiveCliHook() >>> hive_conf = {"hive.exec.dynamic.partition": "true", ... "hive.exec.dynamic.partition.mode": "nonstrict"} >>> hh._prepare_hiveconf(hive_conf) ["-hiveconf", "hive.exec.dynamic.partition=true",\ "-hiveconf", "hive.exec.dynamic.partition.mode=nonstrict"] """ if not d: return [] return as_flattened_list( zip(["-hiveconf"] * len(d), ["{}={}".format(k, v) for k, v in d.items()]) )
This function prepares a list of hiveconf params from a dictionary of key value pairs. :param d: :type d: dict >>> hh = HiveCliHook() >>> hive_conf = {"hive.exec.dynamic.partition": "true", ... "hive.exec.dynamic.partition.mode": "nonstrict"} >>> hh._prepare_hiveconf(hive_conf) ["-hiveconf", "hive.exec.dynamic.partition=true",\ "-hiveconf", "hive.exec.dynamic.partition.mode=nonstrict"]
def main(): """ NAME vgp_di.py DESCRIPTION converts site latitude, longitude and pole latitude, longitude to declination, inclination SYNTAX vgp_di.py [-h] [-i] [-f FILE] [< filename] OPTIONS -h prints help message and quits -i interactive data entry -f FILE to specify file name on the command line INPUT for file entry: PLAT PLON SLAT SLON where: PLAT: pole latitude PLON: pole longitude (positive east) SLAT: site latitude (positive north) SLON: site longitude (positive east) OUTPUT D I where: D: declination I: inclination """ if '-h' in sys.argv: print(main.__doc__) sys.exit() if '-i' in sys.argv: # if one is -i while 1: try: ans=input("Input Pole Latitude [positive north]: <cntrl-D to quit> ") plat=float(ans) # assign input to plat, after conversion to floating point ans=input("Input Pole Longitude [positive east]: ") plon =float(ans) ans=input("Input Site Latitude: ") slat =float(ans) ans=input("Input Site Longitude: ") slong =float(ans) dec,inc=pmag.vgp_di(plat,plon,slat,slong) # call vgp_di function from pmag module print('%7.1f %7.1f'%(dec,inc)) # print out returned stuff except EOFError: print("\n Good-bye\n") sys.exit() elif '-f' in sys.argv: # manual input of file name ind=sys.argv.index('-f') file=sys.argv[ind+1] f=open(file,'r') inp = f.readlines() # read from standard inp for line in inp: # read in the data (as string variable), line by line dec,inc= spitout(line) else: inp = sys.stdin.readlines() # read from standard input for line in inp: # read in the data (as string variable), line by line spitout(line)
NAME vgp_di.py DESCRIPTION converts site latitude, longitude and pole latitude, longitude to declination, inclination SYNTAX vgp_di.py [-h] [-i] [-f FILE] [< filename] OPTIONS -h prints help message and quits -i interactive data entry -f FILE to specify file name on the command line INPUT for file entry: PLAT PLON SLAT SLON where: PLAT: pole latitude PLON: pole longitude (positive east) SLAT: site latitude (positive north) SLON: site longitude (positive east) OUTPUT D I where: D: declination I: inclination
def get_load(jid): ''' Return the load data that marks a specified jid ''' jid_dir = salt.utils.jid.jid_dir(jid, _job_dir(), __opts__['hash_type']) load_fn = os.path.join(jid_dir, LOAD_P) if not os.path.exists(jid_dir) or not os.path.exists(load_fn): return {} serial = salt.payload.Serial(__opts__) ret = {} load_p = os.path.join(jid_dir, LOAD_P) num_tries = 5 for index in range(1, num_tries + 1): with salt.utils.files.fopen(load_p, 'rb') as rfh: try: ret = serial.load(rfh) break except Exception as exc: if index == num_tries: time.sleep(0.25) else: log.critical('Failed to unpack %s', load_p) raise exc if ret is None: ret = {} minions_cache = [os.path.join(jid_dir, MINIONS_P)] minions_cache.extend( glob.glob(os.path.join(jid_dir, SYNDIC_MINIONS_P.format('*'))) ) all_minions = set() for minions_path in minions_cache: log.debug('Reading minion list from %s', minions_path) try: with salt.utils.files.fopen(minions_path, 'rb') as rfh: all_minions.update(serial.load(rfh)) except IOError as exc: salt.utils.files.process_read_exception(exc, minions_path) if all_minions: ret['Minions'] = sorted(all_minions) return ret
Return the load data that marks a specified jid
def get_timeout(self): "setup a timeout for waiting for a proposal" if self.timeout_time is not None or self.proposal: return now = self.cm.chainservice.now round_timeout = ConsensusManager.round_timeout round_timeout_factor = ConsensusManager.round_timeout_factor delay = round_timeout * round_timeout_factor ** self.round self.timeout_time = now + delay return delay
setup a timeout for waiting for a proposal
def write_temp_file(self, content, filename=None, mode='w'): """Write content to a temporary file. Args: content (bytes|str): The file content. If passing binary data the mode needs to be set to 'wb'. filename (str, optional): The filename to use when writing the file. mode (str, optional): The file write mode which could be either 'w' or 'wb'. Returns: str: Fully qualified path name for the file. """ if filename is None: filename = str(uuid.uuid4()) fqpn = os.path.join(self.tcex.default_args.tc_temp_path, filename) with open(fqpn, mode) as fh: fh.write(content) return fqpn
Write content to a temporary file. Args: content (bytes|str): The file content. If passing binary data the mode needs to be set to 'wb'. filename (str, optional): The filename to use when writing the file. mode (str, optional): The file write mode which could be either 'w' or 'wb'. Returns: str: Fully qualified path name for the file.
def all_notebook_jobs(self): """ Similar to notebook_jobs, but uses the default manager to return archived experiments as well. """ from db.models.notebooks import NotebookJob return NotebookJob.all.filter(project=self)
Similar to notebook_jobs, but uses the default manager to return archived experiments as well.
def simxGetArrayParameter(clientID, paramIdentifier, operationMode): ''' Please have a look at the function description/documentation in the V-REP user manual ''' paramValues = (ct.c_float*3)() ret = c_GetArrayParameter(clientID, paramIdentifier, paramValues, operationMode) arr = [] for i in range(3): arr.append(paramValues[i]) return ret, arr
Please have a look at the function description/documentation in the V-REP user manual
def get_notifications(self, startDate, endDate, loadBalancerID, loadBalancerRuleID): """ Get the load balancer notifications for a specific rule within a specifying window time frame :type startDate: datetime :type endDate: datetime :type loadBalancerID: int :type loadBalancerRuleID: int :param startDate: From Date :param endDate: To Date :param loadBalancerID: ID of the Laod Balancer :param loadBalancerRuleID: ID of the Load Balancer Rule """ return self._call(GetLoadBalancerNotifications, startDate=startDate, endDate=endDate, loadBalancerID=loadBalancerID, loadBalancerRuleID=loadBalancerRuleID)
Get the load balancer notifications for a specific rule within a specifying window time frame :type startDate: datetime :type endDate: datetime :type loadBalancerID: int :type loadBalancerRuleID: int :param startDate: From Date :param endDate: To Date :param loadBalancerID: ID of the Laod Balancer :param loadBalancerRuleID: ID of the Load Balancer Rule
def merge_with(self, other, multiset_op, other_op=None): '''Merge this feature collection with another. Merges two feature collections using the given ``multiset_op`` on each corresponding multiset and returns a new :class:`FeatureCollection`. The contents of the two original feature collections are not modified. For each feature name in both feature sets, if either feature collection being merged has a :class:`collections.Counter` instance as its value, then the two values are merged by calling `multiset_op` with both values as parameters. If either feature collection has something other than a :class:`collections.Counter`, and `other_op` is not :const:`None`, then `other_op` is called with both values to merge them. If `other_op` is :const:`None` and a feature is not present in either feature collection with a counter value, then the feature will not be present in the result. :param other: The feature collection to merge into ``self``. :type other: :class:`FeatureCollection` :param multiset_op: Function to merge two counters :type multiset_op: fun(Counter, Counter) -> Counter :param other_op: Function to merge two non-counters :type other_op: fun(object, object) -> object :rtype: :class:`FeatureCollection` ''' result = FeatureCollection() for ms_name in set(self._counters()) | set(other._counters()): c1 = self.get(ms_name, None) c2 = other.get(ms_name, None) if c1 is None and c2 is not None: c1 = c2.__class__() if c2 is None and c1 is not None: c2 = c1.__class__() result[ms_name] = multiset_op(c1, c2) if other_op is not None: for o_name in (set(self._not_counters()) | set(other._not_counters())): v = other_op(self.get(o_name, None), other.get(o_name, None)) if v is not None: result[o_name] = v return result
Merge this feature collection with another. Merges two feature collections using the given ``multiset_op`` on each corresponding multiset and returns a new :class:`FeatureCollection`. The contents of the two original feature collections are not modified. For each feature name in both feature sets, if either feature collection being merged has a :class:`collections.Counter` instance as its value, then the two values are merged by calling `multiset_op` with both values as parameters. If either feature collection has something other than a :class:`collections.Counter`, and `other_op` is not :const:`None`, then `other_op` is called with both values to merge them. If `other_op` is :const:`None` and a feature is not present in either feature collection with a counter value, then the feature will not be present in the result. :param other: The feature collection to merge into ``self``. :type other: :class:`FeatureCollection` :param multiset_op: Function to merge two counters :type multiset_op: fun(Counter, Counter) -> Counter :param other_op: Function to merge two non-counters :type other_op: fun(object, object) -> object :rtype: :class:`FeatureCollection`
def convergence_from_grid(self, grid): """ Calculate the projected convergence at a given set of arc-second gridded coordinates. Parameters ---------- grid : grids.RegularGrid The grid of (y,x) arc-second coordinates the surface density is computed on. """ surface_density_grid = np.zeros(grid.shape[0]) grid_eta = self.grid_to_elliptical_radii(grid) for i in range(grid.shape[0]): surface_density_grid[i] = self.convergence_func(grid_eta[i]) return surface_density_grid
Calculate the projected convergence at a given set of arc-second gridded coordinates. Parameters ---------- grid : grids.RegularGrid The grid of (y,x) arc-second coordinates the surface density is computed on.
def load_isd_daily_temp_data( self, start, end, read_from_cache=True, write_to_cache=True ): """ Load resampled daily ISD temperature data from start date to end date (inclusive). This is the primary convenience method for loading resampled daily ISD temperature data. Parameters ---------- start : datetime.datetime The earliest date from which to load data. end : datetime.datetime The latest date until which to load data. read_from_cache : bool Whether or not to load data from cache. write_to_cache : bool Whether or not to write newly loaded data to cache. """ return load_isd_daily_temp_data( self.usaf_id, start, end, read_from_cache=read_from_cache, write_to_cache=write_to_cache, )
Load resampled daily ISD temperature data from start date to end date (inclusive). This is the primary convenience method for loading resampled daily ISD temperature data. Parameters ---------- start : datetime.datetime The earliest date from which to load data. end : datetime.datetime The latest date until which to load data. read_from_cache : bool Whether or not to load data from cache. write_to_cache : bool Whether or not to write newly loaded data to cache.
def export_to_dicts(table, *args, **kwargs): """Export a `rows.Table` to a list of dicts""" field_names = table.field_names return [{key: getattr(row, key) for key in field_names} for row in table]
Export a `rows.Table` to a list of dicts
def handle_selected_page(self): """ Open the subscription and submission pages subwindows, but close the current page if any other type of page is selected. """ if not self.selected_page: pass if self.selected_page.name in ('subscription', 'submission'): # Launch page in a subwindow self.selected_page = self.selected_page.loop() elif self.selected_page.name in ('subreddit', 'inbox'): # Replace the current page self.active = False else: raise RuntimeError(self.selected_page.name)
Open the subscription and submission pages subwindows, but close the current page if any other type of page is selected.
def delete_ec2_nodes( instance_id_list, client=None ): """This deletes EC2 nodes and terminates the instances. Parameters ---------- instance_id_list : list of str A list of EC2 instance IDs to terminate. client : boto3.Client or None If None, this function will instantiate a new `boto3.Client` object to use in its operations. Alternatively, pass in an existing `boto3.Client` instance to re-use it here. Returns ------- Nothing. """ if not client: client = boto3.client('ec2') resp = client.terminate_instances( InstanceIds=instance_id_list ) return resp
This deletes EC2 nodes and terminates the instances. Parameters ---------- instance_id_list : list of str A list of EC2 instance IDs to terminate. client : boto3.Client or None If None, this function will instantiate a new `boto3.Client` object to use in its operations. Alternatively, pass in an existing `boto3.Client` instance to re-use it here. Returns ------- Nothing.
def _make_repr_table_from_sframe(X): """ Serializes an SFrame to a list of strings, that, when printed, creates a well-formatted table. """ assert isinstance(X, _SFrame) column_names = X.column_names() out_data = [ [None]*len(column_names) for i in range(X.num_rows())] column_sizes = [len(s) for s in column_names] for i, c in enumerate(column_names): for j, e in enumerate(X[c]): out_data[j][i] = str(e) column_sizes[i] = max(column_sizes[i], len(e)) # now, go through and pad everything. out_data = ([ [cn.ljust(k, ' ') for cn, k in zip(column_names, column_sizes)], ["-"*k for k in column_sizes] ] + [ [e.ljust(k, ' ') for e, k in zip(row, column_sizes)] for row in out_data] ) return [' '.join(row) for row in out_data]
Serializes an SFrame to a list of strings, that, when printed, creates a well-formatted table.
def state(self): """Get the Document's state""" state = self._resource.get('state', self.default_state) if state in State: return state else: return getattr(State, state)
Get the Document's state
def _check_connection(self): """Check if connection is alive every reconnect_timeout seconds.""" try: super()._check_connection() except OSError as exc: _LOGGER.error(exc) self.protocol.transport.close() self.protocol.conn_lost_callback() return task = self.loop.call_later( self.reconnect_timeout + 0.1, self._check_connection) self.cancel_check_conn = task.cancel
Check if connection is alive every reconnect_timeout seconds.
def __create_coordinates(self, lat, lon, elev): """ GeoJSON standard: Use to determine 2-point or 4-point coordinates :param list lat: :param list lon: :return dict: """ # Sort lat an lon in numerical order lat.sort() lon.sort() geo_dict = {} # 4 coordinate values if len(lat) == 2 and len(lon) == 2: # Matching coordinate pairs. Not 4 unique values. if lat[0] == lat[1] and lon[0] == lon[1]: logger_noaa_lpd.info("coordinates found: {}".format("2")) lat.pop() lon.pop() geo_dict = self.__geo_point(lat, lon, elev) # 4 unique coordinates else: logger_noaa_lpd.info("coordinates found: {}".format("4")) geo_dict = self.__geo_multipoint(lat, lon, elev) # 2 coordinate values elif len(lat) == 1 and len(lon) == 1: logger_noaa_lpd.info("coordinates found: {}".format("2")) geo_dict = self.__geo_point(lat, lon, elev) # 0 coordinate values elif not lat and not lon: logger_noaa_lpd.info("coordinates found: {}".format("0")) else: geo_dict = {} logger_noaa_lpd.info("coordinates found: {}".format("too many")) return geo_dict
GeoJSON standard: Use to determine 2-point or 4-point coordinates :param list lat: :param list lon: :return dict:
def predict(self): """ predict the value of the fixed effect """ RV = np.zeros((self.N,self.P)) for term_i in range(self.n_terms): RV+=np.dot(self.Fstar()[term_i],np.dot(self.B()[term_i],self.Astar()[term_i])) return RV
predict the value of the fixed effect
def relative_datetime(self): """Return human-readable relative time string.""" now = datetime.now(timezone.utc) tense = "from now" if self.created_at > now else "ago" return "{0} {1}".format(humanize.naturaldelta(now - self.created_at), tense)
Return human-readable relative time string.
def plot_evec(fignum, Vs, symsize, title): """ plots eigenvector directions of S vectors Paramters ________ fignum : matplotlib figure number Vs : nested list of eigenvectors symsize : size in pts for symbol title : title for plot """ # plt.figure(num=fignum) plt.text(-1.1, 1.15, title) # plot V1s as squares, V2s as triangles and V3s as circles symb, symkey = ['s', 'v', 'o'], 0 col = ['r', 'b', 'k'] # plot V1s rec, V2s blue, V3s black for VEC in range(3): X, Y = [], [] for Vdirs in Vs: # # # plot the V1 data first # XY = pmag.dimap(Vdirs[VEC][0], Vdirs[VEC][1]) X.append(XY[0]) Y.append(XY[1]) plt.scatter(X, Y, s=symsize, marker=symb[VEC], c=col[VEC], edgecolors='none') plt.axis("equal")
plots eigenvector directions of S vectors Paramters ________ fignum : matplotlib figure number Vs : nested list of eigenvectors symsize : size in pts for symbol title : title for plot
def generate_ppi_network( ppi_graph_path: str, dge_list: List[Gene], max_adj_p: float, max_log2_fold_change: float, min_log2_fold_change: float, ppi_edge_min_confidence: Optional[float] = None, current_disease_ids_path: Optional[str] = None, disease_associations_path: Optional[str] = None, ) -> Network: """Generate the protein-protein interaction network. :return Network: Protein-protein interaction network with information on differential expression. """ # Compilation of a protein-protein interaction (PPI) graph (HIPPIE) protein_interactions = parsers.parse_ppi_graph(ppi_graph_path, ppi_edge_min_confidence) protein_interactions = protein_interactions.simplify() if disease_associations_path is not None and current_disease_ids_path is not None: current_disease_ids = parsers.parse_disease_ids(current_disease_ids_path) disease_associations = parsers.parse_disease_associations(disease_associations_path, current_disease_ids) else: disease_associations = None # Build an undirected weighted graph with the remaining interactions based on Entrez gene IDs network = Network( protein_interactions, max_adj_p=max_adj_p, max_l2fc=max_log2_fold_change, min_l2fc=min_log2_fold_change, ) network.set_up_network(dge_list, disease_associations=disease_associations) return network
Generate the protein-protein interaction network. :return Network: Protein-protein interaction network with information on differential expression.
def get_authorization_user(self, **kwargs): """Gets the user the authorization object is for.""" if self.authorization_user is not None: return self.authorization_user self.authorization_user = self.request.user return self.request.user
Gets the user the authorization object is for.
def load_schema(schema_path): """Load the JSON schema at the given path as a Python object. Args: schema_path: A filename for a JSON schema. Returns: A Python object representation of the schema. """ try: with open(schema_path) as schema_file: schema = json.load(schema_file) except ValueError as e: raise SchemaInvalidError('Invalid JSON in schema or included schema: ' '%s\n%s' % (schema_file.name, str(e))) return schema
Load the JSON schema at the given path as a Python object. Args: schema_path: A filename for a JSON schema. Returns: A Python object representation of the schema.
def _gzip_open_filename(handle): """Hide Python 2 vs. 3 differences in gzip.open()""" import gzip if sys.version_info[0] > 2: handle = gzip.open(handle, mode='rt', encoding="UTF-8") else: handle = gzip.open(handle) return handle
Hide Python 2 vs. 3 differences in gzip.open()
def get_as_bytes(self, s3_path): """ Get the contents of an object stored in S3 as bytes :param s3_path: URL for target S3 location :return: File contents as pure bytes """ (bucket, key) = self._path_to_bucket_and_key(s3_path) obj = self.s3.Object(bucket, key) contents = obj.get()['Body'].read() return contents
Get the contents of an object stored in S3 as bytes :param s3_path: URL for target S3 location :return: File contents as pure bytes
def _set_url(self, url): """ Set a new URL for the data server. If we're unable to contact the given url, then the original url is kept. """ original_url = self._url try: self._update_index(url) except: self._url = original_url raise
Set a new URL for the data server. If we're unable to contact the given url, then the original url is kept.
def playURI(self, uri): """Play a Spotify uri, for example spotify:track:5Yn8WCB4Dqm8snemB5Mu4K :param uri: Playlist, Artist, Album, or Song Uri """ url: str = get_url("/remote/play.json") params = { "oauth": self._oauth_token, "csrf": self._csrf_token, "uri": uri, "context": uri, } r = self._request(url=url, params=params) return r.json()
Play a Spotify uri, for example spotify:track:5Yn8WCB4Dqm8snemB5Mu4K :param uri: Playlist, Artist, Album, or Song Uri
def _op_msg_no_header(flags, command, identifier, docs, check_keys, opts): """Get a OP_MSG message. Note: this method handles multiple documents in a type one payload but it does not perform batch splitting and the total message size is only checked *after* generating the entire message. """ # Encode the command document in payload 0 without checking keys. encoded = _dict_to_bson(command, False, opts) flags_type = _pack_op_msg_flags_type(flags, 0) total_size = len(encoded) max_doc_size = 0 if identifier: type_one = _pack_byte(1) cstring = _make_c_string(identifier) encoded_docs = [_dict_to_bson(doc, check_keys, opts) for doc in docs] size = len(cstring) + sum(len(doc) for doc in encoded_docs) + 4 encoded_size = _pack_int(size) total_size += size max_doc_size = max(len(doc) for doc in encoded_docs) data = ([flags_type, encoded, type_one, encoded_size, cstring] + encoded_docs) else: data = [flags_type, encoded] return b''.join(data), total_size, max_doc_size
Get a OP_MSG message. Note: this method handles multiple documents in a type one payload but it does not perform batch splitting and the total message size is only checked *after* generating the entire message.
def get_item_with_id(self, uid): """ Returns item for defined UID. >>> book.get_item_with_id('image_001') :Args: - uid: UID for the item :Returns: Returns item object. Returns None if nothing was found. """ for item in self.get_items(): if item.id == uid: return item return None
Returns item for defined UID. >>> book.get_item_with_id('image_001') :Args: - uid: UID for the item :Returns: Returns item object. Returns None if nothing was found.
def compare_and_set(self, expect, update): ''' Atomically sets the value to `update` if the current value is equal to `expect`. :param expect: The expected current value. :param update: The value to set if and only if `expect` equals the current value. ''' with self._reference.get_lock(): if self._reference.value == expect: self._reference.value = update return True return False
Atomically sets the value to `update` if the current value is equal to `expect`. :param expect: The expected current value. :param update: The value to set if and only if `expect` equals the current value.
def convert_sequence_to_motor_units(cycles, unit_converter): """ Converts a move sequence to motor units. Converts a move sequence to motor units using the provied converter. Parameters ---------- cycles : iterable of dicts The iterable of cycles of motion to do one after another. See ``compile_sequence`` for format. unit_converter : UnitConverter, optional ``GeminiMotorDrive.utilities.UnitConverter`` to use to convert the units in `cycles` to motor units. Returns ------- motor_cycles : list of dicts A deep copy of `cycles` with all units converted to motor units. See Also -------- compile_sequence GeminiMotorDrive.utilities.UnitConverter """ # Make a deep copy of cycles so that the conversions don't damage # the original one. cv_cycles = copy.deepcopy(cycles) # Go through each cycle and do the conversions. for cycle in cv_cycles: # Go through each of the moves and do the conversions. for move in cycle['moves']: move['A'] = unit_converter.to_motor_velocity_acceleration( \ move['A']) move['AD'] = \ unit_converter.to_motor_velocity_acceleration( \ move['AD']) move['V'] = unit_converter.to_motor_velocity_acceleration( \ move['V']) move['D'] = int(unit_converter.to_motor_distance(move['D'])) # Now return the converted move sequence. return cv_cycles
Converts a move sequence to motor units. Converts a move sequence to motor units using the provied converter. Parameters ---------- cycles : iterable of dicts The iterable of cycles of motion to do one after another. See ``compile_sequence`` for format. unit_converter : UnitConverter, optional ``GeminiMotorDrive.utilities.UnitConverter`` to use to convert the units in `cycles` to motor units. Returns ------- motor_cycles : list of dicts A deep copy of `cycles` with all units converted to motor units. See Also -------- compile_sequence GeminiMotorDrive.utilities.UnitConverter
def run_step(context): """Wipe the entire context. Args: Context is a dictionary or dictionary-like. Does not require any specific keys in context. """ logger.debug("started") context.clear() logger.info(f"Context wiped. New context size: {len(context)}") logger.debug("done")
Wipe the entire context. Args: Context is a dictionary or dictionary-like. Does not require any specific keys in context.
def hash(self): """ Return a value that's used to uniquely identify an entry in a date so we can regroup all entries that share the same hash. """ return u''.join([ self.alias, self.description, str(self.ignored), str(self.flags), ])
Return a value that's used to uniquely identify an entry in a date so we can regroup all entries that share the same hash.
def get_mean_and_stddevs(self, sites, rup, dists, imt, stddev_types): # pylint: disable=too-many-arguments """ See :meth:`superclass method <.base.GroundShakingIntensityModel.get_mean_and_stddevs>` for specification of input and result values. Implements the correction factor for the upper crust, equation (12) on p. 484: ``P' = P x Correction_factor`` """ ln_mean, [ln_stddev] = super().get_mean_and_stddevs( sites, rup, dists, imt, stddev_types) # compute site corrections, equation (9) coeffs = self.COEFFS_UPPER[imt] ln_mean += np.log(coeffs['correction']) return ln_mean, [ln_stddev]
See :meth:`superclass method <.base.GroundShakingIntensityModel.get_mean_and_stddevs>` for specification of input and result values. Implements the correction factor for the upper crust, equation (12) on p. 484: ``P' = P x Correction_factor``
def _truncated_power_method(self, A, x0, k, max_iter=10000, thresh=1e-8): ''' given a matrix A, an initial guess x0, and a maximum cardinality k, find the best k-sparse approximation to its dominant eigenvector References ---------- [1] Yuan, X-T. and Zhang, T. "Truncated Power Method for Sparse Eigenvalue Problems." Journal of Machine Learning Research. Vol. 14. 2013. http://www.jmlr.org/papers/volume14/yuan13a/yuan13a.pdf ''' xts = [x0] for t in range(max_iter): xts.append(self._normalize(self._truncate(np.dot(A, xts[-1]), k))) if np.linalg.norm(xts[-1] - xts[-2]) < thresh: break return xts[-1]
given a matrix A, an initial guess x0, and a maximum cardinality k, find the best k-sparse approximation to its dominant eigenvector References ---------- [1] Yuan, X-T. and Zhang, T. "Truncated Power Method for Sparse Eigenvalue Problems." Journal of Machine Learning Research. Vol. 14. 2013. http://www.jmlr.org/papers/volume14/yuan13a/yuan13a.pdf
def api_request(*args, **kwargs): """ Wrapper which converts a requests.Response into our custom APIResponse object :param args: :param kwargs: :return: """ r = requests.request(*args, **kwargs) return APIResponse(r)
Wrapper which converts a requests.Response into our custom APIResponse object :param args: :param kwargs: :return:
def reftrack_element_data(rt, role): """Return the data for the element (e.g. the Asset or Shot) :param rt: the :class:`jukeboxcore.reftrack.Reftrack` holds the data :type rt: :class:`jukeboxcore.reftrack.Reftrack` :param role: item data role :type role: QtCore.Qt.ItemDataRole :returns: data for the element :rtype: depending on role :raises: None """ element = rt.get_element() if element is None: return if role == QtCore.Qt.DisplayRole or role == QtCore.Qt.EditRole: return element.name
Return the data for the element (e.g. the Asset or Shot) :param rt: the :class:`jukeboxcore.reftrack.Reftrack` holds the data :type rt: :class:`jukeboxcore.reftrack.Reftrack` :param role: item data role :type role: QtCore.Qt.ItemDataRole :returns: data for the element :rtype: depending on role :raises: None
def list_tables(self): """ Runs the ``\\dt`` command and returns a list of column values with information about all tables in the database. """ lines = output_lines(self.exec_psql('\\dt')) return [line.split('|') for line in lines]
Runs the ``\\dt`` command and returns a list of column values with information about all tables in the database.
def fetch_next(self): """A Future used with `gen.coroutine`_ to asynchronously retrieve the next document in the result set, fetching a batch of documents from the server if necessary. Resolves to ``False`` if there are no more documents, otherwise :meth:`next_object` is guaranteed to return a document. .. _`gen.coroutine`: http://tornadoweb.org/en/stable/gen.html .. doctest:: fetch_next :hide: >>> _ = MongoClient().test.test_collection.delete_many({}) >>> collection = MotorClient().test.test_collection .. doctest:: fetch_next >>> @gen.coroutine ... def f(): ... yield collection.insert_many([{'_id': i} for i in range(5)]) ... cursor = collection.find().sort([('_id', 1)]) ... while (yield cursor.fetch_next): ... doc = cursor.next_object() ... sys.stdout.write(str(doc['_id']) + ', ') ... print('done') ... >>> IOLoop.current().run_sync(f) 0, 1, 2, 3, 4, done While it appears that fetch_next retrieves each document from the server individually, the cursor actually fetches documents efficiently in `large batches`_. In Python 3.5 and newer, cursors can be iterated elegantly and very efficiently in native coroutines with `async for`: .. doctest:: fetch_next >>> async def f(): ... async for doc in collection.find(): ... sys.stdout.write(str(doc['_id']) + ', ') ... print('done') ... >>> IOLoop.current().run_sync(f) 0, 1, 2, 3, 4, done .. _`large batches`: https://docs.mongodb.com/manual/tutorial/iterate-a-cursor/#cursor-batches """ if not self._buffer_size() and self.alive: # Return the Future, which resolves to number of docs fetched or 0. return self._get_more() elif self._buffer_size(): future = self._framework.get_future(self.get_io_loop()) future.set_result(True) return future else: # Dead future = self._framework.get_future(self.get_io_loop()) future.set_result(False) return future
A Future used with `gen.coroutine`_ to asynchronously retrieve the next document in the result set, fetching a batch of documents from the server if necessary. Resolves to ``False`` if there are no more documents, otherwise :meth:`next_object` is guaranteed to return a document. .. _`gen.coroutine`: http://tornadoweb.org/en/stable/gen.html .. doctest:: fetch_next :hide: >>> _ = MongoClient().test.test_collection.delete_many({}) >>> collection = MotorClient().test.test_collection .. doctest:: fetch_next >>> @gen.coroutine ... def f(): ... yield collection.insert_many([{'_id': i} for i in range(5)]) ... cursor = collection.find().sort([('_id', 1)]) ... while (yield cursor.fetch_next): ... doc = cursor.next_object() ... sys.stdout.write(str(doc['_id']) + ', ') ... print('done') ... >>> IOLoop.current().run_sync(f) 0, 1, 2, 3, 4, done While it appears that fetch_next retrieves each document from the server individually, the cursor actually fetches documents efficiently in `large batches`_. In Python 3.5 and newer, cursors can be iterated elegantly and very efficiently in native coroutines with `async for`: .. doctest:: fetch_next >>> async def f(): ... async for doc in collection.find(): ... sys.stdout.write(str(doc['_id']) + ', ') ... print('done') ... >>> IOLoop.current().run_sync(f) 0, 1, 2, 3, 4, done .. _`large batches`: https://docs.mongodb.com/manual/tutorial/iterate-a-cursor/#cursor-batches
def attach_event_handler(canvas, handler=close_on_esc_or_middlemouse): """ Attach a handler function to the ProcessedEvent slot, defaulting to closing when middle mouse is clicked or escape is pressed Note that escape only works if the pad has focus, which in ROOT-land means the mouse has to be over the canvas area. """ if getattr(canvas, "_py_event_dispatcher_attached", None): return event_dispatcher = C.TPyDispatcherProcessedEvent(handler) canvas.Connect("ProcessedEvent(int,int,int,TObject*)", "TPyDispatcherProcessedEvent", event_dispatcher, "Dispatch(int,int,int,TObject*)") # Attach a handler only once to each canvas, and keep the dispatcher alive canvas._py_event_dispatcher_attached = event_dispatcher
Attach a handler function to the ProcessedEvent slot, defaulting to closing when middle mouse is clicked or escape is pressed Note that escape only works if the pad has focus, which in ROOT-land means the mouse has to be over the canvas area.
def visit_AsyncFunctionDef(self, node): """Visit an async function node.""" node = self.get_function_node(node) if node is not None: node._async = True
Visit an async function node.
def _load_isd_station_metadata(download_path): """ Collect metadata for US isd stations. """ from shapely.geometry import Point # load ISD history which contains metadata isd_history = pd.read_csv( os.path.join(download_path, "isd-history.csv"), dtype=str, parse_dates=["BEGIN", "END"], ) hasGEO = ( isd_history.LAT.notnull() & isd_history.LON.notnull() & (isd_history.LAT != 0) ) isUS = ( ((isd_history.CTRY == "US") & (isd_history.STATE.notnull())) # AQ = American Samoa, GQ = Guam, RQ = Peurto Rico, VQ = Virgin Islands | (isd_history.CTRY.str[1] == "Q") ) hasUSAF = isd_history.USAF != "999999" metadata = {} for usaf_station, group in isd_history[hasGEO & isUS & hasUSAF].groupby("USAF"): # find most recent recent = group.loc[group.END.idxmax()] wban_stations = list(group.WBAN) metadata[usaf_station] = { "usaf_id": usaf_station, "wban_ids": wban_stations, "recent_wban_id": recent.WBAN, "name": recent["STATION NAME"], "icao_code": recent.ICAO, "latitude": recent.LAT if recent.LAT not in ("+00.000",) else None, "longitude": recent.LON if recent.LON not in ("+000.000",) else None, "point": Point(float(recent.LON), float(recent.LAT)), "elevation": recent["ELEV(M)"] if not str(float(recent["ELEV(M)"])).startswith("-999") else None, "state": recent.STATE, } return metadata
Collect metadata for US isd stations.
def QA_SU_save_stock_list(client=DATABASE, ui_log=None, ui_progress=None): """save stock_list Keyword Arguments: client {[type]} -- [description] (default: {DATABASE}) """ client.drop_collection('stock_list') coll = client.stock_list coll.create_index('code') try: # 🛠todo 这个应该是第一个任务 JOB01, 先更新股票列表!! QA_util_log_info( '##JOB08 Now Saving STOCK_LIST ====', ui_log=ui_log, ui_progress=ui_progress, ui_progress_int_value=5000 ) stock_list_from_tdx = QA_fetch_get_stock_list() pandas_data = QA_util_to_json_from_pandas(stock_list_from_tdx) coll.insert_many(pandas_data) QA_util_log_info( "完成股票列表获取", ui_log=ui_log, ui_progress=ui_progress, ui_progress_int_value=10000 ) except Exception as e: QA_util_log_info(e, ui_log=ui_log) print(" Error save_tdx.QA_SU_save_stock_list exception!") pass
save stock_list Keyword Arguments: client {[type]} -- [description] (default: {DATABASE})
def upsert(manager, defaults=None, updates=None, **kwargs): """ Performs an update on an object or an insert if the object does not exist. :type defaults: dict :param defaults: These values are set when the object is created, but are irrelevant when the object already exists. This field should only be used when values only need to be set during creation. :type updates: dict :param updates: These values are updated when the object is updated. They also override any values provided in the defaults when inserting the object. :param kwargs: These values provide the arguments used when checking for the existence of the object. They are used in a similar manner to Django's get_or_create function. :returns: A tuple of the upserted object and a Boolean that is True if it was created (False otherwise) Examples: .. code-block:: python # Upsert a test model with an int value of 1. Use default values that will be given to it when created model_obj, created = upsert(TestModel.objects, int_field=1, defaults={'float_field': 2.0}) print(created) True print(model_obj.int_field, model_obj.float_field) 1, 2.0 # Do an upsert on that same model with different default fields. Since it already exists, the defaults # are not used model_obj, created = upsert(TestModel.objects, int_field=1, defaults={'float_field': 3.0}) print(created) False print(model_obj.int_field, model_obj.float_field) 1, 2.0 # In order to update the float field in an existing object, use the updates dictionary model_obj, created = upsert(TestModel.objects, int_field=1, updates={'float_field': 3.0}) print(created) False print(model_obj.int_field, model_obj.float_field) 1, 3.0 # You can use updates on a newly created object that will also be used as initial values. model_obj, created = upsert(TestModel.objects, int_field=2, updates={'float_field': 4.0}) print(created) True print(model_obj.int_field, model_obj.float_field) 2, 4.0 """ defaults = defaults or {} # Override any defaults with updates defaults.update(updates or {}) # Do a get or create obj, created = manager.get_or_create(defaults=defaults, **kwargs) # Update any necessary fields if updates is not None and not created and any(getattr(obj, k) != updates[k] for k in updates): for k, v in updates.items(): setattr(obj, k, v) obj.save(update_fields=updates) return obj, created
Performs an update on an object or an insert if the object does not exist. :type defaults: dict :param defaults: These values are set when the object is created, but are irrelevant when the object already exists. This field should only be used when values only need to be set during creation. :type updates: dict :param updates: These values are updated when the object is updated. They also override any values provided in the defaults when inserting the object. :param kwargs: These values provide the arguments used when checking for the existence of the object. They are used in a similar manner to Django's get_or_create function. :returns: A tuple of the upserted object and a Boolean that is True if it was created (False otherwise) Examples: .. code-block:: python # Upsert a test model with an int value of 1. Use default values that will be given to it when created model_obj, created = upsert(TestModel.objects, int_field=1, defaults={'float_field': 2.0}) print(created) True print(model_obj.int_field, model_obj.float_field) 1, 2.0 # Do an upsert on that same model with different default fields. Since it already exists, the defaults # are not used model_obj, created = upsert(TestModel.objects, int_field=1, defaults={'float_field': 3.0}) print(created) False print(model_obj.int_field, model_obj.float_field) 1, 2.0 # In order to update the float field in an existing object, use the updates dictionary model_obj, created = upsert(TestModel.objects, int_field=1, updates={'float_field': 3.0}) print(created) False print(model_obj.int_field, model_obj.float_field) 1, 3.0 # You can use updates on a newly created object that will also be used as initial values. model_obj, created = upsert(TestModel.objects, int_field=2, updates={'float_field': 4.0}) print(created) True print(model_obj.int_field, model_obj.float_field) 2, 4.0
def printPre(self, *args): """ get/set the str_pre string. """ if len(args): self.b_printPre = args[0] else: return self.b_printPre
get/set the str_pre string.
def to_volume(self): """Return a 3D volume of the data""" if hasattr(self.header.definitions, "Lattice"): X, Y, Z = self.header.definitions.Lattice else: raise ValueError("Unable to determine data size") volume = self.decoded_data.reshape(Z, Y, X) return volume
Return a 3D volume of the data
def get(self, key, value): """Gets an element from an index under a given key-value pair @params key: Index key string @params value: Index value string @returns A generator of Vertex or Edge objects""" for element in self.neoindex[key][value]: if self.indexClass == "vertex": yield Vertex(element) elif self.indexClass == "edge": yield Edge(element) else: raise TypeError(self.indexClass)
Gets an element from an index under a given key-value pair @params key: Index key string @params value: Index value string @returns A generator of Vertex or Edge objects
def sized_imap(func, iterable, strict=False): ''' Return an iterable whose elements are the result of applying the callable `func` to each element of `iterable`. If `iterable` has a `len()`, then the iterable returned by this function will have the same `len()`. Otherwise calling `len()` on the returned iterable will raise `TypeError`. :param func: The function to apply to each element of `iterable`. :param iterable: An iterable whose objects will be mapped. :param bool strict: If `True` and `iterable` does not support `len()`, raise an exception immediately instead of returning an iterable that does not support `len()`. ''' try: length = len(iterable) except TypeError: if strict: raise else: return imap(func, iterable) return SizedGenerator(lambda: imap(func, iterable), length=length)
Return an iterable whose elements are the result of applying the callable `func` to each element of `iterable`. If `iterable` has a `len()`, then the iterable returned by this function will have the same `len()`. Otherwise calling `len()` on the returned iterable will raise `TypeError`. :param func: The function to apply to each element of `iterable`. :param iterable: An iterable whose objects will be mapped. :param bool strict: If `True` and `iterable` does not support `len()`, raise an exception immediately instead of returning an iterable that does not support `len()`.
def ingest_user(self): '''Username responsible for ingesting this object into the repository, as recorded in the :attr:`audit_trail`, if available.''' # if there is an audit trail and it has records and the first # action is ingest, return the user if self.audit_trail and self.audit_trail.records \ and self.audit_trail.records[0].action == 'ingest': return self.audit_trail.records[0].user
Username responsible for ingesting this object into the repository, as recorded in the :attr:`audit_trail`, if available.
def XYZ100_to_CIECAM02(self, XYZ100, on_negative_A="raise"): """Computes CIECAM02 appearance correlates for the given tristimulus value(s) XYZ (normalized to be on the 0-100 scale). Example: ``vc.XYZ100_to_CIECAM02([30.0, 45.5, 21.0])`` :param XYZ100: An array-like of tristimulus values. These should be given on the 0-100 scale, not the 0-1 scale. The array-like should have shape ``(..., 3)``; e.g., you can use a simple 3-item list (shape = ``(3,)``), or to efficiently perform multiple computations at once, you could pass a higher-dimensional array, e.g. an image. :arg on_negative_A: A known infelicity of the CIECAM02 model is that for some inputs, the achromatic signal :math:`A` can be negative, which makes it impossible to compute :math:`J`, :math:`C`, :math:`Q`, :math:`M`, or :math:`s` -- only :math:`h`: and :math:`H` are spared. (See, e.g., section 2.6.4.1 of :cite:`Luo-CIECAM02` for discussion.) This argument allows you to specify a strategy for handling such points. Options are: * ``"raise"``: throws a :class:`NegativeAError` (a subclass of :class:`ValueError`) * ``"nan"``: return not-a-number values for the affected elements. (This may be particularly useful if converting a large number of points at once.) :returns: A named tuple of type :class:`JChQMsH`, with attributes ``J``, ``C``, ``h``, ``Q``, ``M``, ``s``, and ``H`` containing the CIECAM02 appearance correlates. """ #### Argument checking XYZ100 = np.asarray(XYZ100, dtype=float) if XYZ100.shape[-1] != 3: raise ValueError("XYZ100 shape must be (..., 3)") #### Step 1 RGB = broadcasting_matvec(M_CAT02, XYZ100) #### Step 2 RGB_C = self.D_RGB * RGB #### Step 3 RGBprime = broadcasting_matvec(M_HPE_M_CAT02_inv, RGB_C) #### Step 4 RGBprime_signs = np.sign(RGBprime) tmp = (self.F_L * RGBprime_signs * RGBprime / 100) ** 0.42 RGBprime_a = RGBprime_signs * 400 * (tmp / (tmp + 27.13)) + 0.1 #### Step 5 a = broadcasting_matvec([1, -12. / 11, 1. / 11], RGBprime_a) b = broadcasting_matvec([1. / 9, 1. / 9, -2. / 9], RGBprime_a) h_rad = np.arctan2(b, a) h = np.rad2deg(h_rad) % 360 # #### Step 6 # hprime = h, unless h < 20.14, in which case hprime = h + 360. hprime = np.select([h < h_i[0], True], [h + 360, h]) # we use 0-based indexing, so our i is one less than the reference # formulas' i. i = np.searchsorted(h_i, hprime, side="right") - 1 tmp = (hprime - h_i[i]) / e_i[i] H = H_i[i] + ((100 * tmp) / (tmp + (h_i[i + 1] - hprime) / e_i[i + 1])) #### Step 7 A = ((broadcasting_matvec([2, 1, 1. / 20], RGBprime_a) - 0.305) * self.N_bb) if on_negative_A == "raise": if np.any(A < 0): raise NegativeAError("attempted to convert a tristimulus " "value whose achromatic signal was " "negative, and on_negative_A=\"raise\"") elif on_negative_A == "nan": A = np.select([A < 0, True], [np.nan, A]) else: raise ValueError("Invalid on_negative_A argument: got %r, " "expected \"raise\" or \"nan\"" % (on_negative_A,)) #### Step 8 J = 100 * (A / self.A_w) ** (self.c * self.z) #### Step 9 Q = self._J_to_Q(J) #### Step 10 e = (12500. / 13) * self.N_c * self.N_cb * (np.cos(h_rad + 2) + 3.8) t = (e * np.sqrt(a ** 2 + b ** 2) / broadcasting_matvec([1, 1, 21. / 20], RGBprime_a)) C = t**0.9 * (J / 100)**0.5 * (1.64 - 0.29**self.n)**0.73 M = C * self.F_L**0.25 s = 100 * (M / Q)**0.5 return JChQMsH(J, C, h, Q, M, s, H)
Computes CIECAM02 appearance correlates for the given tristimulus value(s) XYZ (normalized to be on the 0-100 scale). Example: ``vc.XYZ100_to_CIECAM02([30.0, 45.5, 21.0])`` :param XYZ100: An array-like of tristimulus values. These should be given on the 0-100 scale, not the 0-1 scale. The array-like should have shape ``(..., 3)``; e.g., you can use a simple 3-item list (shape = ``(3,)``), or to efficiently perform multiple computations at once, you could pass a higher-dimensional array, e.g. an image. :arg on_negative_A: A known infelicity of the CIECAM02 model is that for some inputs, the achromatic signal :math:`A` can be negative, which makes it impossible to compute :math:`J`, :math:`C`, :math:`Q`, :math:`M`, or :math:`s` -- only :math:`h`: and :math:`H` are spared. (See, e.g., section 2.6.4.1 of :cite:`Luo-CIECAM02` for discussion.) This argument allows you to specify a strategy for handling such points. Options are: * ``"raise"``: throws a :class:`NegativeAError` (a subclass of :class:`ValueError`) * ``"nan"``: return not-a-number values for the affected elements. (This may be particularly useful if converting a large number of points at once.) :returns: A named tuple of type :class:`JChQMsH`, with attributes ``J``, ``C``, ``h``, ``Q``, ``M``, ``s``, and ``H`` containing the CIECAM02 appearance correlates.
def _iterate_uniqueness_keys(self, field): """Iterates over the keys marked as "unique" in the specified field. Arguments: field: The field of which key's to iterate over. """ uniqueness = getattr(field, 'uniqueness', None) if not uniqueness: return for keys in uniqueness: composed_keys = self._compose_keys(keys) yield composed_keys
Iterates over the keys marked as "unique" in the specified field. Arguments: field: The field of which key's to iterate over.
def get_homes(self, query=None, gps_lat=None, gps_lng=None, offset=0, items_per_grid=8): """ Search listings with * Query (e.g. query="Lisbon, Portugal") or * Location (e.g. gps_lat=55.6123352&gps_lng=37.7117917) """ params = { 'is_guided_search': 'true', 'version': '1.3.9', 'section_offset': '0', 'items_offset': str(offset), 'adults': '0', 'screen_size': 'small', 'source': 'explore_tabs', 'items_per_grid': str(items_per_grid), '_format': 'for_explore_search_native', 'metadata_only': 'false', 'refinement_paths[]': '/homes', 'timezone': 'Europe/Lisbon', 'satori_version': '1.0.7' } if not query and not (gps_lat and gps_lng): raise MissingParameterError("Missing query or gps coordinates") if query: params['query'] = query if gps_lat and gps_lng: params['gps_lat'] = gps_lat params['gps_lng'] = gps_lng r = self._session.get(API_URL + '/explore_tabs', params=params) r.raise_for_status() return r.json()
Search listings with * Query (e.g. query="Lisbon, Portugal") or * Location (e.g. gps_lat=55.6123352&gps_lng=37.7117917)
def require_perms(view_func, required): """Enforces permission-based access controls. :param list required: A tuple of permission names, all of which the request user must possess in order access the decorated view. Example usage:: from horizon.decorators import require_perms @require_perms(['foo.admin', 'foo.member']) def my_view(request): ... Raises a :exc:`~horizon.exceptions.NotAuthorized` exception if the requirements are not met. """ from horizon.exceptions import NotAuthorized # We only need to check each permission once for a view, so we'll use a set current_perms = getattr(view_func, '_required_perms', set([])) view_func._required_perms = current_perms | set(required) @functools.wraps(view_func, assigned=available_attrs(view_func)) def dec(request, *args, **kwargs): if request.user.is_authenticated: if request.user.has_perms(view_func._required_perms): return view_func(request, *args, **kwargs) raise NotAuthorized(_("You are not authorized to access %s") % request.path) # If we don't have any permissions, just return the original view. if required: return dec else: return view_func
Enforces permission-based access controls. :param list required: A tuple of permission names, all of which the request user must possess in order access the decorated view. Example usage:: from horizon.decorators import require_perms @require_perms(['foo.admin', 'foo.member']) def my_view(request): ... Raises a :exc:`~horizon.exceptions.NotAuthorized` exception if the requirements are not met.
def case_comments(self): """Return only comments made on the case.""" comments = (comment for comment in self.comments if comment.variant_id is None) return comments
Return only comments made on the case.
def export_olx(self, tarball, root_path): """if sequestered, only export the assets""" def append_asset_to_soup_and_export(asset_): if isinstance(asset_, Item): try: unique_url = asset_.export_olx(tarball, root_path) except AttributeError: pass else: unique_name = get_file_name_without_extension(unique_url) asset_type = asset_.genus_type.identifier asset_tag = my_soup.new_tag(asset_type) asset_tag['url_name'] = unique_name getattr(my_soup, my_tag).append(asset_tag) else: try: unique_urls = asset_.export_olx(tarball, root_path) except AttributeError: pass else: for index, ac in enumerate(asset_.get_asset_contents()): asset_type = ac.genus_type.identifier unique_url = unique_urls[index] unique_name = get_file_name_without_extension(unique_url) asset_tag = my_soup.new_tag(asset_type) asset_tag['url_name'] = unique_name getattr(my_soup, my_tag).append(asset_tag) def get_file_name_without_extension(filepath): return filepath.split('/')[-1].replace('.xml', '') my_path = None if self.my_osid_object.is_sequestered(): # just export assets for asset in self.assets: try: asset.export_olx(tarball, root_path) except AttributeError: pass else: # also add to the /<tag>/ folder my_tag = self.my_osid_object.genus_type.identifier expected_name = self.get_unique_name(tarball, self.url, my_tag, root_path) my_path = '{0}{1}/{2}.xml'.format(root_path, my_tag, expected_name) my_soup = BeautifulSoup('<' + my_tag + '/>', 'xml') getattr(my_soup, my_tag)['display_name'] = self.my_osid_object.display_name.text if my_tag == 'split_test': getattr(my_soup, my_tag)['group_id_to_child'] = self.my_osid_object.group_id_to_child getattr(my_soup, my_tag)['user_partition_id'] = self.my_osid_object.user_partition_id.text rm = self.my_osid_object._get_provider_manager('REPOSITORY') if self.my_osid_object._proxy is None: cls = rm.get_composition_lookup_session() else: cls = rm.get_composition_lookup_session(proxy=self.my_osid_object._proxy) cls.use_federated_repository_view() cls.use_unsequestered_composition_view() for child_id in self.my_osid_object.get_child_ids(): child = cls.get_composition(child_id) if child.is_sequestered(): # append its assets here for asset in child.assets: append_asset_to_soup_and_export(asset) else: child_type = child.genus_type.identifier child_tag = my_soup.new_tag(child_type) child_path = child.export_olx(tarball, root_path) if child_path is not None: child_tag['url_name'] = get_file_name_without_extension(child_path) getattr(my_soup, my_tag).append(child_tag) for asset in self.assets: append_asset_to_soup_and_export(asset) self.write_to_tarfile(tarball, my_path, my_soup) return my_path
if sequestered, only export the assets
def getDescendants(self, all_descendants=False): """Returns the descendant Analysis Requests :param all_descendants: recursively include all descendants """ # N.B. full objects returned here from # `Products.Archetypes.Referenceable.getBRefs` # -> don't add this method into Metadata children = self.getBackReferences( "AnalysisRequestParentAnalysisRequest") descendants = [] # recursively include all children if all_descendants: for child in children: descendants.append(child) descendants += child.getDescendants(all_descendants=True) else: descendants = children return descendants
Returns the descendant Analysis Requests :param all_descendants: recursively include all descendants
def write_oplog_progress(self): """ Writes oplog progress to file provided by user """ if self.oplog_checkpoint is None: return None with self.oplog_progress as oplog_prog: oplog_dict = oplog_prog.get_dict() items = [[name, util.bson_ts_to_long(oplog_dict[name])] for name in oplog_dict] if not items: return # write to temp file backup_file = self.oplog_checkpoint + ".backup" os.rename(self.oplog_checkpoint, backup_file) # for each of the threads write to file with open(self.oplog_checkpoint, "w") as dest: if len(items) == 1: # Write 1-dimensional array, as in previous versions. json_str = json.dumps(items[0]) else: # Write a 2d array to support sharded clusters. json_str = json.dumps(items) try: dest.write(json_str) except IOError: # Basically wipe the file, copy from backup dest.truncate() with open(backup_file, "r") as backup: shutil.copyfile(backup, dest) os.remove(backup_file)
Writes oplog progress to file provided by user
def execute(self, sources, target): """ :type sources: list[DFAdapter] :type target: DFAdapter """ fields = self._get_fields_list_from_eps(sources) ret_fields = fields[0] if self.clear_feature: ret_fields = list(self._remove_field_roles(ret_fields, set(six.iterkeys(self.field_mapping)), FieldRole.FEATURE)) target._ml_fields = list(self._set_singleton_role(ret_fields, self.field_mapping))
:type sources: list[DFAdapter] :type target: DFAdapter
def to_excess_returns(returns, rf, nperiods=None): """ Given a series of returns, it will return the excess returns over rf. Args: * returns (Series, DataFrame): Returns * rf (float, Series): `Risk-Free rate(s) <https://www.investopedia.com/terms/r/risk-freerate.asp>`_ expressed in annualized term or return series * nperiods (int): Optional. If provided, will convert rf to different frequency using deannualize only if rf is a float Returns: * excess_returns (Series, DataFrame): Returns - rf """ if type(rf) is float and nperiods is not None: _rf = deannualize(rf, nperiods) else: _rf = rf return returns - _rf
Given a series of returns, it will return the excess returns over rf. Args: * returns (Series, DataFrame): Returns * rf (float, Series): `Risk-Free rate(s) <https://www.investopedia.com/terms/r/risk-freerate.asp>`_ expressed in annualized term or return series * nperiods (int): Optional. If provided, will convert rf to different frequency using deannualize only if rf is a float Returns: * excess_returns (Series, DataFrame): Returns - rf
def create_environment_vip(self): """Get an instance of environment_vip services facade.""" return EnvironmentVIP( self.networkapi_url, self.user, self.password, self.user_ldap)
Get an instance of environment_vip services facade.
def list(self, all_my_agents=False, limit=500, offset=0): """List `all` the things created by this client on this or all your agents Returns QAPI list function payload Raises [LinkException](../Core/AmqpLink.m.html#IoticAgent.Core.AmqpLink.LinkException) if there is a communications problem between you and the infrastructure `all_my_agents` (optional) (boolean) If `False` limit search to just this agent, if `True` return list of things belonging to all agents you own. `limit` (optional) (integer) Return this many Point details `offset` (optional) (integer) Return Point details starting at this offset """ logger.info("list(all_my_agents=%s, limit=%s, offset=%s)", all_my_agents, limit, offset) if all_my_agents: evt = self._request_entity_list_all(limit=limit, offset=offset) else: evt = self._request_entity_list(limit=limit, offset=offset) self._wait_and_except_if_failed(evt) return evt.payload['entities']
List `all` the things created by this client on this or all your agents Returns QAPI list function payload Raises [LinkException](../Core/AmqpLink.m.html#IoticAgent.Core.AmqpLink.LinkException) if there is a communications problem between you and the infrastructure `all_my_agents` (optional) (boolean) If `False` limit search to just this agent, if `True` return list of things belonging to all agents you own. `limit` (optional) (integer) Return this many Point details `offset` (optional) (integer) Return Point details starting at this offset
def method(self, returns, **parameter_types): """Syntactic sugar for registering a method Example: >>> registry = Registry() >>> @registry.method(returns=int, x=int, y=int) ... def add(x, y): ... return x + y :param returns: The method's return type :type returns: type :param parameter_types: The types of the method's parameters :type parameter_types: dict[str, type] .. versionadded:: 0.1.0 """ @wrapt.decorator def type_check_wrapper(method, instance, args, kwargs): """Wraps a method so that it is type-checked. :param method: The method to wrap :type method: (T) -> U :return: The result of calling the method with the given parameters :rtype: U """ if instance is not None: raise Exception("Instance shouldn't be set.") parameter_names = inspect.getargspec(method).args # pylint: disable=deprecated-method defaults = inspect.getargspec(method).defaults # pylint: disable=deprecated-method parameters = self._collect_parameters(parameter_names, args, kwargs, defaults) parameter_checker.check_types(parameters, parameter_types, self._strict_floats) result = method(*args, **kwargs) parameter_checker.check_return_type(result, returns, self._strict_floats) return result def register_method(method): """Registers a method with its fully qualified name. :param method: The method to register :type method: function :return: The original method wrapped into a type-checker :rtype: function """ parameter_names = inspect.getargspec(method).args # pylint: disable=deprecated-method parameter_checker.check_type_declaration(parameter_names, parameter_types) wrapped_method = type_check_wrapper(method, None, None, None) fully_qualified_name = "{}.{}".format(method.__module__, method.__name__) self.register(fully_qualified_name, wrapped_method, MethodSignature.create(parameter_names, parameter_types, returns)) return wrapped_method return register_method
Syntactic sugar for registering a method Example: >>> registry = Registry() >>> @registry.method(returns=int, x=int, y=int) ... def add(x, y): ... return x + y :param returns: The method's return type :type returns: type :param parameter_types: The types of the method's parameters :type parameter_types: dict[str, type] .. versionadded:: 0.1.0
def register_callback_reassigned(self, func, serialised=True): """ Register a callback for resource reassignment. This will be called when any resource is reassigned to or from your agent. If `serialised` is not set, the callbacks might arrive in a different order to they were requested. The payload passed to your callback is an OrderedDict with the following keys #!python r : R_ENTITY, R_FEED, etc # the type of resource reassigned lid : <name> # the local name of the resource epId : <GUID> # the global Id of the agent the # resource has been reassigned *to* id : <GUID> # the global Id of the resource `Note` resource types are defined [here](../Core/Const.m.html) `Note` You can check whether this is an assign "in" or "out" by comparing the epId with your current agent id, using the `IOT.Client.agent_id` property. If it's the same it's a reassign to you. `Example` #!python def reassigned_callback(args): print(args) ... client.register_callback_reassigned(reassigned_callback) This would print out something like the following on assignment of an R_ENTITY to #!python OrderedDict([(u'lid', u'moved_thing'), (u'r', 1), (u'epId', u'5a8d603ee757133d66d99875d0584c72'), (u'id', u'4448993b44738411de5fe2a6cf32d957')]) """ self.__client.register_callback_reassigned(partial(self.__callback_payload_only, func), serialised)
Register a callback for resource reassignment. This will be called when any resource is reassigned to or from your agent. If `serialised` is not set, the callbacks might arrive in a different order to they were requested. The payload passed to your callback is an OrderedDict with the following keys #!python r : R_ENTITY, R_FEED, etc # the type of resource reassigned lid : <name> # the local name of the resource epId : <GUID> # the global Id of the agent the # resource has been reassigned *to* id : <GUID> # the global Id of the resource `Note` resource types are defined [here](../Core/Const.m.html) `Note` You can check whether this is an assign "in" or "out" by comparing the epId with your current agent id, using the `IOT.Client.agent_id` property. If it's the same it's a reassign to you. `Example` #!python def reassigned_callback(args): print(args) ... client.register_callback_reassigned(reassigned_callback) This would print out something like the following on assignment of an R_ENTITY to #!python OrderedDict([(u'lid', u'moved_thing'), (u'r', 1), (u'epId', u'5a8d603ee757133d66d99875d0584c72'), (u'id', u'4448993b44738411de5fe2a6cf32d957')])
def get_reset_data(self, data): """ Returns the user info to reset """ error = False reset = None msg = "" user = self.database.users.find_one({"reset": data["reset"]}) if user is None: error = True msg = "Invalid reset hash." else: reset = {"hash": data["reset"], "username": user["username"], "realname": user["realname"]} return msg, error, reset
Returns the user info to reset
def i2s_frameid(x): """ Get representation name of a pnio frame ID :param x: a key of the PNIO_FRAME_IDS dictionary :returns: str """ try: return PNIO_FRAME_IDS[x] except KeyError: pass if 0x0100 <= x < 0x1000: return "RT_CLASS_3 (%4x)" % x if 0x8000 <= x < 0xC000: return "RT_CLASS_1 (%4x)" % x if 0xC000 <= x < 0xFC00: return "RT_CLASS_UDP (%4x)" % x if 0xFF80 <= x < 0xFF90: return "FragmentationFrameID (%4x)" % x return x
Get representation name of a pnio frame ID :param x: a key of the PNIO_FRAME_IDS dictionary :returns: str
def do_work_spec(self, args): '''dump the contents of an existing work spec''' work_spec_name = self._get_work_spec_name(args) spec = self.task_master.get_work_spec(work_spec_name) if args.json: self.stdout.write(json.dumps(spec, indent=4, sort_keys=True) + '\n') else: yaml.safe_dump(spec, self.stdout)
dump the contents of an existing work spec
def start_capture(self, adapter_number, output_file): """ Starts a packet capture. :param adapter_number: adapter number :param output_file: PCAP destination file for the capture """ try: adapter = self._ethernet_adapters[adapter_number] except KeyError: raise VirtualBoxError("Adapter {adapter_number} doesn't exist on VirtualBox VM '{name}'".format(name=self.name, adapter_number=adapter_number)) nio = adapter.get_nio(0) if not nio: raise VirtualBoxError("Adapter {} is not connected".format(adapter_number)) if nio.capturing: raise VirtualBoxError("Packet capture is already activated on adapter {adapter_number}".format(adapter_number=adapter_number)) nio.startPacketCapture(output_file) if self.ubridge: yield from self._ubridge_send('bridge start_capture {name} "{output_file}"'.format(name="VBOX-{}-{}".format(self._id, adapter_number), output_file=output_file)) log.info("VirtualBox VM '{name}' [{id}]: starting packet capture on adapter {adapter_number}".format(name=self.name, id=self.id, adapter_number=adapter_number))
Starts a packet capture. :param adapter_number: adapter number :param output_file: PCAP destination file for the capture
def _set_get_stp_brief_info(self, v, load=False): """ Setter method for get_stp_brief_info, mapped from YANG variable /brocade_xstp_ext_rpc/get_stp_brief_info (rpc) If this variable is read-only (config: false) in the source YANG file, then _set_get_stp_brief_info is considered as a private method. Backends looking to populate this variable should do so via calling thisObj._set_get_stp_brief_info() directly. YANG Description: RPC to return spanning tree information similar to the CLI 'show spanning-tree'. """ if hasattr(v, "_utype"): v = v._utype(v) try: t = YANGDynClass(v,base=get_stp_brief_info.get_stp_brief_info, is_leaf=True, yang_name="get-stp-brief-info", rest_name="get-stp-brief-info", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=False, extensions={u'tailf-common': {u'hidden': u'rpccmd', u'actionpoint': u'show-spanning-tree-action-point'}}, namespace='urn:brocade.com:mgmt:brocade-xstp-ext', defining_module='brocade-xstp-ext', yang_type='rpc', is_config=True) except (TypeError, ValueError): raise ValueError({ 'error-string': """get_stp_brief_info must be of a type compatible with rpc""", 'defined-type': "rpc", 'generated-type': """YANGDynClass(base=get_stp_brief_info.get_stp_brief_info, is_leaf=True, yang_name="get-stp-brief-info", rest_name="get-stp-brief-info", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=False, extensions={u'tailf-common': {u'hidden': u'rpccmd', u'actionpoint': u'show-spanning-tree-action-point'}}, namespace='urn:brocade.com:mgmt:brocade-xstp-ext', defining_module='brocade-xstp-ext', yang_type='rpc', is_config=True)""", }) self.__get_stp_brief_info = t if hasattr(self, '_set'): self._set()
Setter method for get_stp_brief_info, mapped from YANG variable /brocade_xstp_ext_rpc/get_stp_brief_info (rpc) If this variable is read-only (config: false) in the source YANG file, then _set_get_stp_brief_info is considered as a private method. Backends looking to populate this variable should do so via calling thisObj._set_get_stp_brief_info() directly. YANG Description: RPC to return spanning tree information similar to the CLI 'show spanning-tree'.
def getScriptLocation(): """Helper function to get the location of a Python file.""" location = os.path.abspath("./") if __file__.rfind("/") != -1: location = __file__[:__file__.rfind("/")] return location
Helper function to get the location of a Python file.
def _expand_prefix_spec(self, spec, prefix = ''): """ Expand prefix specification to SQL. """ # sanity checks if type(spec) is not dict: raise NipapInputError('invalid prefix specification') for key in spec.keys(): if key not in _prefix_spec: raise NipapExtraneousInputError("Key '" + key + "' not allowed in prefix spec.") where = "" params = {} # if we have id, no other input is needed if 'id' in spec: if spec != {'id': spec['id']}: raise NipapExtraneousInputError("If 'id' specified, no other keys are allowed.") family = None if 'family' in spec: family = spec['family'] del(spec['family']) # rename prefix columns spec2 = {} for k in spec: spec2[prefix + k] = spec[k] spec = spec2 # handle keys which refer to external keys if prefix + 'vrf_id' in spec: # "translate" vrf id None to id = 0 if spec[prefix + 'vrf_id'] is None: spec[prefix + 'vrf_id'] = 0 if prefix + 'vrf_name' in spec: spec['vrf.name'] = spec[prefix + 'vrf_name'] del(spec[prefix + 'vrf_name']) if prefix + 'vrf_rt' in spec: spec['vrf.rt'] = spec[prefix + 'vrf_rt'] del(spec[prefix + 'vrf_rt']) if prefix + 'pool_name' in spec: spec['pool.name'] = spec[prefix + 'pool_name'] del(spec[prefix + 'pool_name']) where, params = self._sql_expand_where(spec) # prefix family needs to be handled separately as it's not stored # explicitly in the database if family: if len(params) == 0: where = "family(" + prefix + "prefix) = %(family)s" else: where += " AND family(" + prefix + "prefix) = %(family)s" params['family'] = family self._logger.debug("_expand_prefix_spec; where: %s params: %s" % (where, unicode(params))) return where, params
Expand prefix specification to SQL.
def build(self, endpoint, values=None, method=None, force_external=False, append_unknown=True): """Building URLs works pretty much the other way round. Instead of `match` you call `build` and pass it the endpoint and a dict of arguments for the placeholders. The `build` function also accepts an argument called `force_external` which, if you set it to `True` will force external URLs. Per default external URLs (include the server name) will only be used if the target URL is on a different subdomain. >>> m = Map([ ... Rule('/', endpoint='index'), ... Rule('/downloads/', endpoint='downloads/index'), ... Rule('/downloads/<int:id>', endpoint='downloads/show') ... ]) >>> urls = m.bind("example.com", "/") >>> urls.build("index", {}) '/' >>> urls.build("downloads/show", {'id': 42}) '/downloads/42' >>> urls.build("downloads/show", {'id': 42}, force_external=True) 'http://example.com/downloads/42' Because URLs cannot contain non ASCII data you will always get bytestrings back. Non ASCII characters are urlencoded with the charset defined on the map instance. Additional values are converted to unicode and appended to the URL as URL querystring parameters: >>> urls.build("index", {'q': 'My Searchstring'}) '/?q=My+Searchstring' If a rule does not exist when building a `BuildError` exception is raised. The build method accepts an argument called `method` which allows you to specify the method you want to have an URL built for if you have different methods for the same endpoint specified. .. versionadded:: 0.6 the `append_unknown` parameter was added. :param endpoint: the endpoint of the URL to build. :param values: the values for the URL to build. Unhandled values are appended to the URL as query parameters. :param method: the HTTP method for the rule if there are different URLs for different methods on the same endpoint. :param force_external: enforce full canonical external URLs. :param append_unknown: unknown parameters are appended to the generated URL as query string argument. Disable this if you want the builder to ignore those. """ self.map.update() if values: if isinstance(values, MultiDict): valueiter = values.iteritems(multi=True) else: valueiter = iteritems(values) values = dict((k, v) for k, v in valueiter if v is not None) else: values = {} rv = self._partial_build(endpoint, values, method, append_unknown) if rv is None: raise BuildError(endpoint, values, method) domain_part, path = rv host = self.get_host(domain_part) # shortcut this. if not force_external and ( (self.map.host_matching and host == self.server_name) or (not self.map.host_matching and domain_part == self.subdomain)): return str(urljoin(self.script_name, './' + path.lstrip('/'))) return str('%s://%s%s/%s' % ( self.url_scheme, host, self.script_name[:-1], path.lstrip('/') ))
Building URLs works pretty much the other way round. Instead of `match` you call `build` and pass it the endpoint and a dict of arguments for the placeholders. The `build` function also accepts an argument called `force_external` which, if you set it to `True` will force external URLs. Per default external URLs (include the server name) will only be used if the target URL is on a different subdomain. >>> m = Map([ ... Rule('/', endpoint='index'), ... Rule('/downloads/', endpoint='downloads/index'), ... Rule('/downloads/<int:id>', endpoint='downloads/show') ... ]) >>> urls = m.bind("example.com", "/") >>> urls.build("index", {}) '/' >>> urls.build("downloads/show", {'id': 42}) '/downloads/42' >>> urls.build("downloads/show", {'id': 42}, force_external=True) 'http://example.com/downloads/42' Because URLs cannot contain non ASCII data you will always get bytestrings back. Non ASCII characters are urlencoded with the charset defined on the map instance. Additional values are converted to unicode and appended to the URL as URL querystring parameters: >>> urls.build("index", {'q': 'My Searchstring'}) '/?q=My+Searchstring' If a rule does not exist when building a `BuildError` exception is raised. The build method accepts an argument called `method` which allows you to specify the method you want to have an URL built for if you have different methods for the same endpoint specified. .. versionadded:: 0.6 the `append_unknown` parameter was added. :param endpoint: the endpoint of the URL to build. :param values: the values for the URL to build. Unhandled values are appended to the URL as query parameters. :param method: the HTTP method for the rule if there are different URLs for different methods on the same endpoint. :param force_external: enforce full canonical external URLs. :param append_unknown: unknown parameters are appended to the generated URL as query string argument. Disable this if you want the builder to ignore those.
def notify(self, msg, color='green', notify='true', message_format='text'): """Send notification to specified HipChat room""" self.message_dict = { 'message': msg, 'color': color, 'notify': notify, 'message_format': message_format, } if not self.debug: return requests.post( self.notification_url, json.dumps(self.message_dict), headers=self.headers ) else: print('HipChat message: <{}>'.format(msg)) return []
Send notification to specified HipChat room
def possible_version_evaluation(self): """Evaluate the possible range of versions for each target, yielding the output analysis.""" only_broken = self.get_options().only_broken ranges = self._ranges yield 'Allowable JVM platform ranges (* = anything):' for target in sorted(filter(self._is_relevant, self.jvm_targets)): min_version = ranges.min_allowed_version.get(target) max_version = ranges.max_allowed_version.get(target) current_valid = True if min_version and self.jvm_version(target) < min_version: current_valid = False if max_version and self.jvm_version(target) > max_version: current_valid = False current_text = str(self.jvm_version(target)) if not current_valid: current_text = self._format_error(current_text) elif only_broken: continue if min_version and max_version: range_text = '{} to {}'.format(min_version, max_version) if min_version > max_version: range_text = self._format_error(range_text) elif min_version: range_text = '{}+'.format(min_version) elif max_version: range_text = '<={}'.format(max_version) else: range_text = '*' yield '{address}: {range} (is {current})'.format(address=target.address.spec, range=range_text, current=current_text,) if self.get_options().detailed or not current_valid: if min_version: min_because = [t for t in ranges.target_dependencies[target] if self.jvm_version(t) == min_version] yield ' min={} because of dependencies:'.format(min_version) for dep in sorted(min_because): yield ' {}'.format(dep.address.spec) if max_version: max_because = [t for t in ranges.target_dependees[target] if self.jvm_version(t) == max_version] yield ' max={} because of dependees:'.format(max_version) for dep in sorted(max_because): yield ' {}'.format(dep.address.spec) yield ''
Evaluate the possible range of versions for each target, yielding the output analysis.
def load_schema(schema): """ Load a schema file with path +schema+ into the database. Assumes that there exists an active database connection. """ with repo.Repo.db: repo.Repo.db.executescript(schema)
Load a schema file with path +schema+ into the database. Assumes that there exists an active database connection.
def dead(self): """Whether the callback no longer exists. If the callback is maintained via a weak reference, and that weak reference has been collected, this will be true instead of false. """ if not self._weak: return False cb = self._callback() if cb is None: return True return False
Whether the callback no longer exists. If the callback is maintained via a weak reference, and that weak reference has been collected, this will be true instead of false.
async def _process_latching(self, key, latching_entry): """ This is a private utility method. This method process latching events and either returns them via callback or stores them in the latch map :param key: Encoded pin :param latching_entry: a latch table entry :returns: Callback or store data in latch map """ if latching_entry[Constants.LATCH_CALLBACK]: # auto clear entry and execute the callback if latching_entry[Constants.LATCH_CALLBACK_TYPE]: await latching_entry[Constants.LATCH_CALLBACK] \ ([key, latching_entry[Constants.LATCHED_DATA], time.time()]) # noinspection PyPep8 else: latching_entry[Constants.LATCH_CALLBACK] \ ([key, latching_entry[Constants.LATCHED_DATA], time.time()]) self.latch_map[key] = [0, 0, 0, 0, 0, None] else: updated_latch_entry = latching_entry updated_latch_entry[Constants.LATCH_STATE] = \ Constants.LATCH_LATCHED updated_latch_entry[Constants.LATCHED_DATA] = \ latching_entry[Constants.LATCHED_DATA] # time stamp it updated_latch_entry[Constants.LATCHED_TIME_STAMP] = time.time() self.latch_map[key] = updated_latch_entry
This is a private utility method. This method process latching events and either returns them via callback or stores them in the latch map :param key: Encoded pin :param latching_entry: a latch table entry :returns: Callback or store data in latch map
def compute_laplacian(self, lap_type='combinatorial'): r"""Compute a graph Laplacian. For undirected graphs, the combinatorial Laplacian is defined as .. math:: L = D - W, where :math:`W` is the weighted adjacency matrix and :math:`D` the weighted degree matrix. The normalized Laplacian is defined as .. math:: L = I - D^{-1/2} W D^{-1/2}, where :math:`I` is the identity matrix. For directed graphs, the Laplacians are built from a symmetrized version of the weighted adjacency matrix that is the average of the weighted adjacency matrix and its transpose. As the Laplacian is defined as the divergence of the gradient, it is not affected by the orientation of the edges. For both Laplacians, the diagonal entries corresponding to disconnected nodes (i.e., nodes with degree zero) are set to zero. Once computed, the Laplacian is accessible by the attribute :attr:`L`. Parameters ---------- lap_type : {'combinatorial', 'normalized'} The kind of Laplacian to compute. Default is combinatorial. Examples -------- Combinatorial and normalized Laplacians of an undirected graph. >>> graph = graphs.Graph([ ... [0, 2, 0], ... [2, 0, 1], ... [0, 1, 0], ... ]) >>> graph.compute_laplacian('combinatorial') >>> graph.L.toarray() array([[ 2., -2., 0.], [-2., 3., -1.], [ 0., -1., 1.]]) >>> graph.compute_laplacian('normalized') >>> graph.L.toarray() array([[ 1. , -0.81649658, 0. ], [-0.81649658, 1. , -0.57735027], [ 0. , -0.57735027, 1. ]]) Combinatorial and normalized Laplacians of a directed graph. >>> graph = graphs.Graph([ ... [0, 2, 0], ... [2, 0, 1], ... [0, 0, 0], ... ]) >>> graph.compute_laplacian('combinatorial') >>> graph.L.toarray() array([[ 2. , -2. , 0. ], [-2. , 2.5, -0.5], [ 0. , -0.5, 0.5]]) >>> graph.compute_laplacian('normalized') >>> graph.L.toarray() array([[ 1. , -0.89442719, 0. ], [-0.89442719, 1. , -0.4472136 ], [ 0. , -0.4472136 , 1. ]]) The Laplacian is defined as the divergence of the gradient. See :meth:`compute_differential_operator` for details. >>> graph = graphs.Path(20) >>> graph.compute_differential_operator() >>> L = graph.D.dot(graph.D.T) >>> np.all(L.toarray() == graph.L.toarray()) True The Laplacians have a bounded spectrum. >>> G = graphs.Sensor(50) >>> G.compute_laplacian('combinatorial') >>> G.compute_fourier_basis() >>> -1e-10 < G.e[0] < 1e-10 < G.e[-1] < 2*np.max(G.dw) True >>> G.compute_laplacian('normalized') >>> G.compute_fourier_basis() >>> -1e-10 < G.e[0] < 1e-10 < G.e[-1] < 2 True """ if lap_type != self.lap_type: # Those attributes are invalidated when the Laplacian is changed. # Alternative: don't allow the user to change the Laplacian. self._lmax = None self._U = None self._e = None self._coherence = None self._D = None self.lap_type = lap_type if not self.is_directed(): W = self.W else: W = utils.symmetrize(self.W, method='average') if lap_type == 'combinatorial': D = sparse.diags(self.dw) self.L = D - W elif lap_type == 'normalized': d = np.zeros(self.n_vertices) disconnected = (self.dw == 0) np.power(self.dw, -0.5, where=~disconnected, out=d) D = sparse.diags(d) self.L = sparse.identity(self.n_vertices) - D * W * D self.L[disconnected, disconnected] = 0 self.L.eliminate_zeros() else: raise ValueError('Unknown Laplacian type {}'.format(lap_type))
r"""Compute a graph Laplacian. For undirected graphs, the combinatorial Laplacian is defined as .. math:: L = D - W, where :math:`W` is the weighted adjacency matrix and :math:`D` the weighted degree matrix. The normalized Laplacian is defined as .. math:: L = I - D^{-1/2} W D^{-1/2}, where :math:`I` is the identity matrix. For directed graphs, the Laplacians are built from a symmetrized version of the weighted adjacency matrix that is the average of the weighted adjacency matrix and its transpose. As the Laplacian is defined as the divergence of the gradient, it is not affected by the orientation of the edges. For both Laplacians, the diagonal entries corresponding to disconnected nodes (i.e., nodes with degree zero) are set to zero. Once computed, the Laplacian is accessible by the attribute :attr:`L`. Parameters ---------- lap_type : {'combinatorial', 'normalized'} The kind of Laplacian to compute. Default is combinatorial. Examples -------- Combinatorial and normalized Laplacians of an undirected graph. >>> graph = graphs.Graph([ ... [0, 2, 0], ... [2, 0, 1], ... [0, 1, 0], ... ]) >>> graph.compute_laplacian('combinatorial') >>> graph.L.toarray() array([[ 2., -2., 0.], [-2., 3., -1.], [ 0., -1., 1.]]) >>> graph.compute_laplacian('normalized') >>> graph.L.toarray() array([[ 1. , -0.81649658, 0. ], [-0.81649658, 1. , -0.57735027], [ 0. , -0.57735027, 1. ]]) Combinatorial and normalized Laplacians of a directed graph. >>> graph = graphs.Graph([ ... [0, 2, 0], ... [2, 0, 1], ... [0, 0, 0], ... ]) >>> graph.compute_laplacian('combinatorial') >>> graph.L.toarray() array([[ 2. , -2. , 0. ], [-2. , 2.5, -0.5], [ 0. , -0.5, 0.5]]) >>> graph.compute_laplacian('normalized') >>> graph.L.toarray() array([[ 1. , -0.89442719, 0. ], [-0.89442719, 1. , -0.4472136 ], [ 0. , -0.4472136 , 1. ]]) The Laplacian is defined as the divergence of the gradient. See :meth:`compute_differential_operator` for details. >>> graph = graphs.Path(20) >>> graph.compute_differential_operator() >>> L = graph.D.dot(graph.D.T) >>> np.all(L.toarray() == graph.L.toarray()) True The Laplacians have a bounded spectrum. >>> G = graphs.Sensor(50) >>> G.compute_laplacian('combinatorial') >>> G.compute_fourier_basis() >>> -1e-10 < G.e[0] < 1e-10 < G.e[-1] < 2*np.max(G.dw) True >>> G.compute_laplacian('normalized') >>> G.compute_fourier_basis() >>> -1e-10 < G.e[0] < 1e-10 < G.e[-1] < 2 True
def register_languages(): """Register all supported languages to ensure compatibility.""" for language in set(SUPPORTED_LANGUAGES) - {"en"}: language_stemmer = partial(nltk_stemmer, get_language_stemmer(language)) Pipeline.register_function(language_stemmer, "stemmer-{}".format(language))
Register all supported languages to ensure compatibility.
def _process_image_msg(self, msg): """ Process an image message and return a numpy array with the image data Returns ------- :obj:`numpy.ndarray` containing the image in the image message Raises ------ CvBridgeError If the bridge is not able to convert the image """ encoding = msg.encoding try: image = self._bridge.imgmsg_to_cv2(msg, encoding) except CvBridgeError as e: rospy.logerr(e) return image
Process an image message and return a numpy array with the image data Returns ------- :obj:`numpy.ndarray` containing the image in the image message Raises ------ CvBridgeError If the bridge is not able to convert the image
def gen_unordered(self): """Generate batches of operations, batched by type of operation, in arbitrary order. """ operations = [_Run(_INSERT), _Run(_UPDATE), _Run(_DELETE)] for idx, (op_type, operation) in enumerate(self.ops): operations[op_type].add(idx, operation) for run in operations: if run.ops: yield run
Generate batches of operations, batched by type of operation, in arbitrary order.
def calculate_request_digest(method, partial_digest, digest_response=None, uri=None, nonce=None, nonce_count=None, client_nonce=None): ''' Calculates a value for the 'response' value of the client authentication request. Requires the 'partial_digest' calculated from the realm, username, and password. Either call it with a digest_response to use the values from an authentication request, or pass the individual parameters (i.e. to generate an authentication request). ''' if digest_response: if uri or nonce or nonce_count or client_nonce: raise Exception("Both digest_response and one or more " "individual parameters were sent.") uri = digest_response.uri nonce = digest_response.nonce nonce_count = digest_response.nc client_nonce=digest_response.cnonce elif not (uri and nonce and (nonce_count != None) and client_nonce): raise Exception("Neither digest_response nor all individual parameters were sent.") ha2 = md5.md5("%s:%s" % (method, uri)).hexdigest() data = "%s:%s:%s:%s:%s" % (nonce, "%08x" % nonce_count, client_nonce, 'auth', ha2) kd = md5.md5("%s:%s" % (partial_digest, data)).hexdigest() return kd
Calculates a value for the 'response' value of the client authentication request. Requires the 'partial_digest' calculated from the realm, username, and password. Either call it with a digest_response to use the values from an authentication request, or pass the individual parameters (i.e. to generate an authentication request).
def load(cls, path_to_file): """ Loads the image data from a file on disk and tries to guess the image MIME type :param path_to_file: path to the source file :type path_to_file: str :return: a `pyowm.image.Image` instance """ import mimetypes mimetypes.init() mime = mimetypes.guess_type('file://%s' % path_to_file)[0] img_type = ImageTypeEnum.lookup_by_mime_type(mime) with open(path_to_file, 'rb') as f: data = f.read() return Image(data, image_type=img_type)
Loads the image data from a file on disk and tries to guess the image MIME type :param path_to_file: path to the source file :type path_to_file: str :return: a `pyowm.image.Image` instance
def run_preassembly_related(preassembler, beliefengine, **kwargs): """Run related stage of preassembly on a list of statements. Parameters ---------- preassembler : indra.preassembler.Preassembler A Preassembler instance which already has a set of unique statements internally. beliefengine : indra.belief.BeliefEngine A BeliefEngine instance. return_toplevel : Optional[bool] If True, only the top-level statements are returned. If False, all statements are returned irrespective of level of specificity. Default: True poolsize : Optional[int] The number of worker processes to use to parallelize the comparisons performed by the function. If None (default), no parallelization is performed. NOTE: Parallelization is only available on Python 3.4 and above. size_cutoff : Optional[int] Groups with size_cutoff or more statements are sent to worker processes, while smaller groups are compared in the parent process. Default value is 100. Not relevant when parallelization is not used. flatten_evidence : Optional[bool] If True, evidences are collected and flattened via supports/supported_by links. Default: False flatten_evidence_collect_from : Optional[str] String indicating whether to collect and flatten evidence from the `supports` attribute of each statement or the `supported_by` attribute. If not set, defaults to 'supported_by'. Only relevant when flatten_evidence is True. save : Optional[str] The name of a pickle file to save the results (stmts_out) into. Returns ------- stmts_out : list[indra.statements.Statement] A list of preassembled top-level statements. """ logger.info('Combining related on %d statements...' % len(preassembler.unique_stmts)) return_toplevel = kwargs.get('return_toplevel', True) poolsize = kwargs.get('poolsize', None) size_cutoff = kwargs.get('size_cutoff', 100) stmts_out = preassembler.combine_related(return_toplevel=False, poolsize=poolsize, size_cutoff=size_cutoff) # Calculate beliefs beliefengine.set_hierarchy_probs(stmts_out) # Flatten evidence if needed do_flatten_evidence = kwargs.get('flatten_evidence', False) if do_flatten_evidence: flatten_evidences_collect_from = \ kwargs.get('flatten_evidence_collect_from', 'supported_by') stmts_out = flatten_evidence(stmts_out, flatten_evidences_collect_from) # Filter to top if needed stmts_top = filter_top_level(stmts_out) if return_toplevel: stmts_out = stmts_top logger.info('%d top-level statements' % len(stmts_out)) else: logger.info('%d statements out of which %d are top-level' % (len(stmts_out), len(stmts_top))) dump_pkl = kwargs.get('save') if dump_pkl: dump_statements(stmts_out, dump_pkl) return stmts_out
Run related stage of preassembly on a list of statements. Parameters ---------- preassembler : indra.preassembler.Preassembler A Preassembler instance which already has a set of unique statements internally. beliefengine : indra.belief.BeliefEngine A BeliefEngine instance. return_toplevel : Optional[bool] If True, only the top-level statements are returned. If False, all statements are returned irrespective of level of specificity. Default: True poolsize : Optional[int] The number of worker processes to use to parallelize the comparisons performed by the function. If None (default), no parallelization is performed. NOTE: Parallelization is only available on Python 3.4 and above. size_cutoff : Optional[int] Groups with size_cutoff or more statements are sent to worker processes, while smaller groups are compared in the parent process. Default value is 100. Not relevant when parallelization is not used. flatten_evidence : Optional[bool] If True, evidences are collected and flattened via supports/supported_by links. Default: False flatten_evidence_collect_from : Optional[str] String indicating whether to collect and flatten evidence from the `supports` attribute of each statement or the `supported_by` attribute. If not set, defaults to 'supported_by'. Only relevant when flatten_evidence is True. save : Optional[str] The name of a pickle file to save the results (stmts_out) into. Returns ------- stmts_out : list[indra.statements.Statement] A list of preassembled top-level statements.
def visible(self, visible): """When visible changed, do setup or unwatch and call visible_callback""" self._visible = visible if visible and len(self.panel.objects) == 0: self.setup() self.select.visible = True self.control_panel.extend(self.controls) self.panel.extend(self.children) elif not visible and len(self.panel.objects) > 0: self.unwatch() # do children self.select.visible = False self.control_panel.clear() self.search.visible = False self.add.visible = False self.panel.clear() if self.visible_callback: self.visible_callback(visible)
When visible changed, do setup or unwatch and call visible_callback
def eval(self, expression, args=None, *, timeout=-1.0, push_subscribe=False) -> _MethodRet: """ Eval request coroutine. Examples: .. code-block:: pycon >>> await conn.eval('return 42') <Response sync=3 rowcount=1 data=[42]> >>> await conn.eval('return box.info.version') <Response sync=3 rowcount=1 data=['2.1.1-7-gd381a45b6']> :param expression: expression to execute :param args: arguments to pass to the function, that will execute your expression (list object) :param timeout: Request timeout :param push_subscribe: Subscribe to push messages :returns: :class:`asynctnt.Response` instance """ return self._db.eval(expression, args, timeout=timeout, push_subscribe=push_subscribe)
Eval request coroutine. Examples: .. code-block:: pycon >>> await conn.eval('return 42') <Response sync=3 rowcount=1 data=[42]> >>> await conn.eval('return box.info.version') <Response sync=3 rowcount=1 data=['2.1.1-7-gd381a45b6']> :param expression: expression to execute :param args: arguments to pass to the function, that will execute your expression (list object) :param timeout: Request timeout :param push_subscribe: Subscribe to push messages :returns: :class:`asynctnt.Response` instance
def run(configobj, wcsmap=None): """ Initial example by Nadia ran MD with configobj EPAR using: It can be run in one of two ways: from stsci.tools import teal 1. Passing a config object to teal teal.teal('drizzlepac/pars/astrodrizzle.cfg') 2. Passing a task name: teal.teal('astrodrizzle') The example config files are in drizzlepac/pars """ # turn on logging, redirecting stdout/stderr messages to a log file # while also printing them out to stdout as well # also, initialize timing of processing steps # # We need to define a default logfile name from the user's parameters input_list, output, ivmlist, odict = \ processInput.processFilenames(configobj['input']) if output is not None: def_logname = output elif len(input_list) > 0: def_logname = input_list[0] else: print(textutil.textbox( "ERROR:\nNo valid input files found! Please restart the task " "and check the value for the 'input' parameter."), file=sys.stderr) def_logname = None return clean = configobj['STATE OF INPUT FILES']['clean'] procSteps = util.ProcSteps() print("AstroDrizzle Version {:s} ({:s}) started at: {:s}\n" .format(__version__, __version_date__, util._ptime()[0])) util.print_pkg_versions(log=log) log.debug('') log.debug( "==== AstroDrizzle was invoked with the following parameters: ====" ) log.debug('') util.print_cfg(configobj, log.debug) try: # Define list of imageObject instances and output WCSObject instance # based on input paramters imgObjList = None procSteps.addStep('Initialization') imgObjList, outwcs = processInput.setCommonInput(configobj) procSteps.endStep('Initialization') if imgObjList is None or not imgObjList: errmsg = "No valid images found for processing!\n" errmsg += "Check log file for full details.\n" errmsg += "Exiting AstroDrizzle now..." print(textutil.textbox(errmsg, width=65)) print(textutil.textbox( 'ERROR:\nAstroDrizzle Version {:s} encountered a problem! ' 'Processing terminated at {:s}.' .format(__version__, util._ptime()[0])), file=sys.stderr) return log.info("USER INPUT PARAMETERS common to all Processing Steps:") util.printParams(configobj, log=log) # Call rest of MD steps... #create static masks for each image staticMask.createStaticMask(imgObjList, configobj, procSteps=procSteps) #subtract the sky sky.subtractSky(imgObjList, configobj, procSteps=procSteps) # _dbg_dump_virtual_outputs(imgObjList) #drizzle to separate images adrizzle.drizSeparate(imgObjList, outwcs, configobj, wcsmap=wcsmap, procSteps=procSteps) # _dbg_dump_virtual_outputs(imgObjList) #create the median images from the driz sep images createMedian.createMedian(imgObjList, configobj, procSteps=procSteps) #blot the images back to the original reference frame ablot.runBlot(imgObjList, outwcs, configobj, wcsmap=wcsmap, procSteps=procSteps) #look for cosmic rays drizCR.rundrizCR(imgObjList, configobj, procSteps=procSteps) #Make your final drizzled image adrizzle.drizFinal(imgObjList, outwcs, configobj, wcsmap=wcsmap, procSteps=procSteps) print() print("AstroDrizzle Version {:s} is finished processing at {:s}.\n" .format(__version__, util._ptime()[0])) except: clean = False print(textutil.textbox( "ERROR:\nAstroDrizzle Version {:s} encountered a problem! " "Processing terminated at {:s}." .format(__version__, util._ptime()[0])), file=sys.stderr) raise finally: procSteps.reportTimes() if imgObjList: for image in imgObjList: if clean: image.clean() image.close() del imgObjList del outwcs
Initial example by Nadia ran MD with configobj EPAR using: It can be run in one of two ways: from stsci.tools import teal 1. Passing a config object to teal teal.teal('drizzlepac/pars/astrodrizzle.cfg') 2. Passing a task name: teal.teal('astrodrizzle') The example config files are in drizzlepac/pars
def _check_minions_directories(pki_dir): ''' Return the minion keys directory paths. This function is a copy of salt.key.Key._check_minions_directories. ''' minions_accepted = os.path.join(pki_dir, salt.key.Key.ACC) minions_pre = os.path.join(pki_dir, salt.key.Key.PEND) minions_rejected = os.path.join(pki_dir, salt.key.Key.REJ) minions_denied = os.path.join(pki_dir, salt.key.Key.DEN) return minions_accepted, minions_pre, minions_rejected, minions_denied
Return the minion keys directory paths. This function is a copy of salt.key.Key._check_minions_directories.
def write(self, filename, type_='obo'): #FIXME this is bugged """ Write file, will not overwrite files with the same name outputs to obo by default but can also output to ttl if passed type_='ttl' when called. """ if os.path.exists(filename): name, ext = filename.rsplit('.',1) try: prefix, num = name.rsplit('_',1) n = int(num) n += 1 filename = prefix + '_' + str(n) + '.' + ext except ValueError: filename = name + '_1.' + ext print('file exists, renaming to %s' % filename) self.write(filename, type_) else: with open(filename, 'wt', encoding='utf-8') as f: if type_ == 'obo': f.write(str(self)) # FIXME this is incredibly slow for big files :/ elif type_ == 'ttl': f.write(self.__ttl__()) else: raise TypeError('No exporter for file type %s!' % type_)
Write file, will not overwrite files with the same name outputs to obo by default but can also output to ttl if passed type_='ttl' when called.
def parsebool(el): """ Parse a ``BeautifulSoup`` element as a bool """ txt = text(el) up = txt.upper() if up == "OUI": return True if up == "NON": return False return bool(parseint(el))
Parse a ``BeautifulSoup`` element as a bool
def namedb_history_save( cur, opcode, history_id, creator_address, value_hash, block_id, vtxindex, txid, accepted_rec, history_snapshot=False ): """ Insert data into the state engine's history. It must be for a never-before-seen (txid,block_id,vtxindex) set. @history_id is either the name or namespace ID Return True on success Raise an Exception on error """ assert 'op' in accepted_rec, "Malformed record at ({},{}): missing op".format(block_id, accepted_rec['vtxindex']) op = accepted_rec['op'] record_data = op_canonicalize(opcode, accepted_rec) record_txt = json.dumps(record_data, sort_keys=True) history_insert = { "txid": txid, "history_id": history_id, "creator_address": creator_address, "block_id": block_id, "vtxindex": vtxindex, "op": op, "opcode": opcode, "history_data": record_txt, 'value_hash': value_hash } try: query, values = namedb_insert_prepare( cur, history_insert, "history" ) except Exception, e: log.exception(e) log.error("FATAL: failed to append history record for '%s' at (%s, %s)" % (history_id, block_id, vtxindex)) os.abort() namedb_query_execute( cur, query, values ) return True
Insert data into the state engine's history. It must be for a never-before-seen (txid,block_id,vtxindex) set. @history_id is either the name or namespace ID Return True on success Raise an Exception on error
def record_serializer(self): """Github Release API class.""" imp = current_app.config['GITHUB_RECORD_SERIALIZER'] if isinstance(imp, string_types): return import_string(imp) return imp
Github Release API class.
def run(self): ''' Execute the salt command line ''' import salt.client self.parse_args() if self.config['log_level'] not in ('quiet', ): # Setup file logging! self.setup_logfile_logger() verify_log(self.config) try: # We don't need to bail on config file permission errors # if the CLI process is run with the -a flag skip_perm_errors = self.options.eauth != '' self.local_client = salt.client.get_local_client( self.get_config_file_path(), skip_perm_errors=skip_perm_errors, auto_reconnect=True) except SaltClientError as exc: self.exit(2, '{0}\n'.format(exc)) return if self.options.batch or self.options.static: # _run_batch() will handle all output and # exit with the appropriate error condition # Execution will not continue past this point # in batch mode. self._run_batch() return if self.options.preview_target: minion_list = self._preview_target() self._output_ret(minion_list, self.config.get('output', 'nested')) return if self.options.timeout <= 0: self.options.timeout = self.local_client.opts['timeout'] kwargs = { 'tgt': self.config['tgt'], 'fun': self.config['fun'], 'arg': self.config['arg'], 'timeout': self.options.timeout, 'show_timeout': self.options.show_timeout, 'show_jid': self.options.show_jid} if 'token' in self.config: import salt.utils.files try: with salt.utils.files.fopen(os.path.join(self.config['cachedir'], '.root_key'), 'r') as fp_: kwargs['key'] = fp_.readline() except IOError: kwargs['token'] = self.config['token'] kwargs['delimiter'] = self.options.delimiter if self.selected_target_option: kwargs['tgt_type'] = self.selected_target_option else: kwargs['tgt_type'] = 'glob' # If batch_safe_limit is set, check minions matching target and # potentially switch to batch execution if self.options.batch_safe_limit > 1: if len(self._preview_target()) >= self.options.batch_safe_limit: salt.utils.stringutils.print_cli('\nNOTICE: Too many minions targeted, switching to batch execution.') self.options.batch = self.options.batch_safe_size self._run_batch() return if getattr(self.options, 'return'): kwargs['ret'] = getattr(self.options, 'return') if getattr(self.options, 'return_config'): kwargs['ret_config'] = getattr(self.options, 'return_config') if getattr(self.options, 'return_kwargs'): kwargs['ret_kwargs'] = yamlify_arg( getattr(self.options, 'return_kwargs')) if getattr(self.options, 'module_executors'): kwargs['module_executors'] = yamlify_arg(getattr(self.options, 'module_executors')) if getattr(self.options, 'executor_opts'): kwargs['executor_opts'] = yamlify_arg(getattr(self.options, 'executor_opts')) if getattr(self.options, 'metadata'): kwargs['metadata'] = yamlify_arg( getattr(self.options, 'metadata')) # If using eauth and a token hasn't already been loaded into # kwargs, prompt the user to enter auth credentials if 'token' not in kwargs and 'key' not in kwargs and self.options.eauth: # This is expensive. Don't do it unless we need to. import salt.auth resolver = salt.auth.Resolver(self.config) res = resolver.cli(self.options.eauth) if self.options.mktoken and res: tok = resolver.token_cli( self.options.eauth, res ) if tok: kwargs['token'] = tok.get('token', '') if not res: sys.stderr.write('ERROR: Authentication failed\n') sys.exit(2) kwargs.update(res) kwargs['eauth'] = self.options.eauth if self.config['async']: jid = self.local_client.cmd_async(**kwargs) salt.utils.stringutils.print_cli('Executed command with job ID: {0}'.format(jid)) return # local will be None when there was an error if not self.local_client: return retcodes = [] errors = [] try: if self.options.subset: cmd_func = self.local_client.cmd_subset kwargs['sub'] = self.options.subset kwargs['cli'] = True else: cmd_func = self.local_client.cmd_cli if self.options.progress: kwargs['progress'] = True self.config['progress'] = True ret = {} for progress in cmd_func(**kwargs): out = 'progress' try: self._progress_ret(progress, out) except LoaderError as exc: raise SaltSystemExit(exc) if 'return_count' not in progress: ret.update(progress) self._progress_end(out) self._print_returns_summary(ret) elif self.config['fun'] == 'sys.doc': ret = {} out = '' for full_ret in self.local_client.cmd_cli(**kwargs): ret_, out, retcode = self._format_ret(full_ret) ret.update(ret_) self._output_ret(ret, out, retcode=retcode) else: if self.options.verbose: kwargs['verbose'] = True ret = {} for full_ret in cmd_func(**kwargs): try: ret_, out, retcode = self._format_ret(full_ret) retcodes.append(retcode) self._output_ret(ret_, out, retcode=retcode) ret.update(full_ret) except KeyError: errors.append(full_ret) # Returns summary if self.config['cli_summary'] is True: if self.config['fun'] != 'sys.doc': if self.options.output is None: self._print_returns_summary(ret) self._print_errors_summary(errors) # NOTE: Return code is set here based on if all minions # returned 'ok' with a retcode of 0. # This is the final point before the 'salt' cmd returns, # which is why we set the retcode here. if not all(exit_code == salt.defaults.exitcodes.EX_OK for exit_code in retcodes): sys.stderr.write('ERROR: Minions returned with non-zero exit code\n') sys.exit(salt.defaults.exitcodes.EX_GENERIC) except (AuthenticationError, AuthorizationError, SaltInvocationError, EauthAuthenticationError, SaltClientError) as exc: ret = six.text_type(exc) self._output_ret(ret, '', retcode=1)
Execute the salt command line
def fig16(): """The network shown in Figure 5B of the 2014 IIT 3.0 paper.""" tpm = np.array([ [0, 0, 0, 0, 0, 0, 0], [0, 0, 1, 0, 1, 0, 0], [1, 0, 1, 0, 0, 0, 0], [1, 0, 0, 0, 1, 0, 0], [1, 0, 0, 0, 0, 0, 0], [1, 0, 1, 0, 1, 0, 0], [1, 0, 1, 0, 0, 0, 0], [1, 0, 0, 0, 1, 0, 0], [0, 0, 0, 1, 1, 0, 0], [0, 0, 1, 1, 1, 0, 0], [1, 0, 1, 1, 1, 0, 0], [1, 0, 0, 1, 1, 0, 0], [1, 0, 0, 1, 1, 0, 0], [1, 1, 1, 1, 1, 0, 0], [1, 0, 1, 1, 1, 0, 0], [1, 1, 0, 1, 1, 0, 0], [0, 0, 0, 1, 1, 0, 0], [0, 0, 1, 1, 1, 0, 0], [1, 0, 1, 1, 1, 0, 0], [1, 0, 0, 1, 1, 0, 0], [1, 0, 0, 1, 1, 0, 0], [1, 0, 1, 1, 1, 0, 0], [1, 0, 1, 1, 1, 0, 0], [1, 0, 0, 1, 1, 0, 0], [0, 0, 0, 1, 1, 0, 0], [0, 0, 1, 1, 1, 0, 0], [1, 0, 1, 1, 1, 0, 0], [1, 0, 0, 1, 1, 0, 0], [1, 0, 0, 1, 1, 0, 0], [1, 1, 1, 1, 1, 0, 0], [1, 0, 1, 1, 1, 0, 0], [1, 1, 0, 1, 1, 0, 0], [0, 0, 0, 0, 0, 1, 0], [0, 0, 1, 0, 1, 1, 0], [1, 0, 1, 0, 0, 1, 0], [1, 0, 0, 0, 1, 1, 0], [1, 0, 0, 0, 0, 1, 0], [1, 0, 1, 0, 1, 1, 0], [1, 0, 1, 0, 0, 1, 0], [1, 0, 0, 0, 1, 1, 0], [0, 0, 0, 1, 1, 1, 0], [0, 0, 1, 1, 1, 1, 0], [1, 0, 1, 1, 1, 1, 0], [1, 0, 0, 1, 1, 1, 0], [1, 0, 0, 1, 1, 1, 0], [1, 1, 1, 1, 1, 1, 0], [1, 0, 1, 1, 1, 1, 0], [1, 1, 0, 1, 1, 1, 0], [0, 0, 0, 1, 1, 1, 0], [0, 0, 1, 1, 1, 1, 0], [1, 0, 1, 1, 1, 1, 0], [1, 0, 0, 1, 1, 1, 0], [1, 0, 0, 1, 1, 1, 0], [1, 0, 1, 1, 1, 1, 0], [1, 0, 1, 1, 1, 1, 0], [1, 0, 0, 1, 1, 1, 0], [0, 0, 0, 1, 1, 1, 0], [0, 0, 1, 1, 1, 1, 0], [1, 0, 1, 1, 1, 1, 0], [1, 0, 0, 1, 1, 1, 0], [1, 0, 0, 1, 1, 1, 0], [1, 1, 1, 1, 1, 1, 0], [1, 0, 1, 1, 1, 1, 0], [1, 1, 0, 1, 1, 1, 0], [0, 0, 0, 0, 0, 1, 0], [0, 0, 1, 0, 1, 1, 0], [1, 0, 1, 0, 0, 1, 0], [1, 0, 0, 0, 1, 1, 0], [1, 0, 0, 0, 0, 1, 0], [1, 0, 1, 0, 1, 1, 0], [1, 0, 1, 0, 0, 1, 0], [1, 0, 0, 0, 1, 1, 0], [0, 0, 0, 1, 1, 1, 0], [0, 0, 1, 1, 1, 1, 0], [1, 0, 1, 1, 1, 1, 0], [1, 0, 0, 1, 1, 1, 0], [1, 0, 0, 1, 1, 1, 0], [1, 1, 1, 1, 1, 1, 0], [1, 0, 1, 1, 1, 1, 0], [1, 1, 0, 1, 1, 1, 0], [0, 0, 0, 1, 1, 1, 0], [0, 0, 1, 1, 1, 1, 0], [1, 0, 1, 1, 1, 1, 0], [1, 0, 0, 1, 1, 1, 0], [1, 0, 0, 1, 1, 1, 0], [1, 0, 1, 1, 1, 1, 0], [1, 0, 1, 1, 1, 1, 0], [1, 0, 0, 1, 1, 1, 0], [0, 0, 0, 1, 1, 1, 0], [0, 0, 1, 1, 1, 1, 0], [1, 0, 1, 1, 1, 1, 0], [1, 0, 0, 1, 1, 1, 0], [1, 0, 0, 1, 1, 1, 0], [1, 1, 1, 1, 1, 1, 0], [1, 0, 1, 1, 1, 1, 0], [1, 1, 0, 1, 1, 1, 0], [0, 0, 0, 0, 0, 1, 1], [0, 0, 1, 0, 1, 1, 1], [1, 0, 1, 0, 0, 1, 1], [1, 0, 0, 0, 1, 1, 1], [1, 0, 0, 0, 0, 1, 1], [1, 0, 1, 0, 1, 1, 1], [1, 0, 1, 0, 0, 1, 1], [1, 0, 0, 0, 1, 1, 1], [0, 0, 0, 1, 1, 1, 1], [0, 0, 1, 1, 1, 1, 1], [1, 0, 1, 1, 1, 1, 1], [1, 0, 0, 1, 1, 1, 1], [1, 0, 0, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1], [1, 0, 1, 1, 1, 1, 1], [1, 1, 0, 1, 1, 1, 1], [0, 0, 0, 1, 1, 1, 1], [0, 0, 1, 1, 1, 1, 1], [1, 0, 1, 1, 1, 1, 1], [1, 0, 0, 1, 1, 1, 1], [1, 0, 0, 1, 1, 1, 1], [1, 0, 1, 1, 1, 1, 1], [1, 0, 1, 1, 1, 1, 1], [1, 0, 0, 1, 1, 1, 1], [0, 0, 0, 1, 1, 1, 1], [0, 0, 1, 1, 1, 1, 1], [1, 0, 1, 1, 1, 1, 1], [1, 0, 0, 1, 1, 1, 1], [1, 0, 0, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1], [1, 0, 1, 1, 1, 1, 1], [1, 1, 0, 1, 1, 1, 1] ]) cm = np.array([ [0, 1, 1, 0, 1, 0, 0], [1, 0, 1, 0, 0, 0, 0], [1, 1, 0, 0, 0, 0, 0], [0, 1, 0, 1, 1, 0, 0], [0, 0, 0, 1, 1, 0, 0], [0, 0, 0, 0, 0, 1, 1], [0, 0, 0, 0, 0, 1, 1] ]) return Network(tpm, cm=cm, node_labels=LABELS[:tpm.shape[1]])
The network shown in Figure 5B of the 2014 IIT 3.0 paper.
def sanitize_type(raw_type): """Sanitize the raw type string.""" cleaned = get_printable(raw_type).strip() for bad in [ r'__drv_aliasesMem', r'__drv_freesMem', r'__drv_strictTypeMatch\(\w+\)', r'__out_data_source\(\w+\)', r'_In_NLS_string_\(\w+\)', r'_Frees_ptr_', r'_Frees_ptr_opt_', r'opt_', r'\(Mem\) ' ]: cleaned = re.sub(bad, '', cleaned).strip() if cleaned in ['_EXCEPTION_RECORD *', '_EXCEPTION_POINTERS *']: cleaned = cleaned.strip('_') cleaned = cleaned.replace('[]', '*') return cleaned
Sanitize the raw type string.