code
stringlengths
75
104k
docstring
stringlengths
1
46.9k
def _refresh(self, session, stopping=False): '''Get this task's current state. This must be called under the registry's lock. It updates the :attr:`finished` and :attr:`failed` flags and the :attr:`data` dictionary based on the current state in the registry. In the normal case, nothing will change and this function will return normally. If it turns out that the work unit is already finished, the state of this object will change before :exc:`rejester.exceptions.LostLease` is raised. :param session: locked registry session :param stopping: don't raise if the work unit is finished :raises rejester.exceptions.LostLease: if this worker is no longer doing this work unit ''' data = session.get( WORK_UNITS_ + self.work_spec_name + _FINISHED, self.key) if data is not None: self.finished = True self.data = data if not stopping: raise LostLease('work unit is already finished') return self.finished = False data = session.get( WORK_UNITS_ + self.work_spec_name + _FAILED, self.key) if data is not None: self.failed = True self.data = data if not stopping: raise LostLease('work unit has already failed') return self.failed = False # (You need a pretty specific sequence of events to get here) data = session.get( WORK_UNITS_ + self.work_spec_name + _BLOCKED, self.key) if data is not None: self.data = data raise LostLease('work unit now blocked by others') worker_id = session.get( WORK_UNITS_ + self.work_spec_name + '_locks', self.key) if worker_id != self.worker_id: raise LostLease('work unit claimed by %r', worker_id) # NB: We could check the priority here, but don't. # If at this point we're technically overtime but nobody # else has started doing work yet, since we're under the # global lock, we can get away with finishing whatever # transition we were going to try to do. data = session.get( WORK_UNITS_ + self.work_spec_name, self.key) if data is None: raise NoSuchWorkUnitError('work unit is gone') # Since we should still own the work unit, any changes # in data should be on our end; do not touch it return
Get this task's current state. This must be called under the registry's lock. It updates the :attr:`finished` and :attr:`failed` flags and the :attr:`data` dictionary based on the current state in the registry. In the normal case, nothing will change and this function will return normally. If it turns out that the work unit is already finished, the state of this object will change before :exc:`rejester.exceptions.LostLease` is raised. :param session: locked registry session :param stopping: don't raise if the work unit is finished :raises rejester.exceptions.LostLease: if this worker is no longer doing this work unit
def this_week_day(base_date, weekday): """ Finds coming weekday """ day_of_week = base_date.weekday() # If today is Tuesday and the query is `this monday` # We should output the next_week monday if day_of_week > weekday: return next_week_day(base_date, weekday) start_of_this_week = base_date - timedelta(days=day_of_week + 1) day = start_of_this_week + timedelta(days=1) while day.weekday() != weekday: day = day + timedelta(days=1) return day
Finds coming weekday
def clear_vdp_vsi(self, port_uuid): """Stores the vNIC specific info for VDP Refresh. :param uuid: vNIC UUID """ try: LOG.debug("Clearing VDP VSI MAC %(mac)s UUID %(uuid)s", {'mac': self.vdp_vif_map[port_uuid].get('mac'), 'uuid': self.vdp_vif_map[port_uuid].get('vsiid')}) del self.vdp_vif_map[port_uuid] except Exception: LOG.error("VSI does not exist") self.clear_oui(port_uuid)
Stores the vNIC specific info for VDP Refresh. :param uuid: vNIC UUID
def verify_jwt_in_request_optional(): """ Optionally check if this request has a valid access token. If an access token in present in the request, :func:`~flask_jwt_extended.get_jwt_identity` will return the identity of the access token. If no access token is present in the request, this simply returns, and :func:`~flask_jwt_extended.get_jwt_identity` will return `None` instead. If there is an invalid access token in the request (expired, tampered with, etc), this will still raise the appropiate exception. """ try: if request.method not in config.exempt_methods: jwt_data = _decode_jwt_from_request(request_type='access') ctx_stack.top.jwt = jwt_data verify_token_claims(jwt_data) _load_user(jwt_data[config.identity_claim_key]) except (NoAuthorizationError, InvalidHeaderError): pass
Optionally check if this request has a valid access token. If an access token in present in the request, :func:`~flask_jwt_extended.get_jwt_identity` will return the identity of the access token. If no access token is present in the request, this simply returns, and :func:`~flask_jwt_extended.get_jwt_identity` will return `None` instead. If there is an invalid access token in the request (expired, tampered with, etc), this will still raise the appropiate exception.
def info(self, category_id, store_view=None, attributes=None): """ Retrieve Category details :param category_id: ID of category to retrieve :param store_view: Store view ID or code :param attributes: Return the fields specified :return: Dictionary of data """ return self.call( 'catalog_category.info', [category_id, store_view, attributes] )
Retrieve Category details :param category_id: ID of category to retrieve :param store_view: Store view ID or code :param attributes: Return the fields specified :return: Dictionary of data
def process_delivery(message, notification): """Function to process a delivery notification""" mail = message['mail'] delivery = message['delivery'] if 'timestamp' in delivery: delivered_datetime = clean_time(delivery['timestamp']) else: delivered_datetime = None deliveries = [] for eachrecipient in delivery['recipients']: # Create each delivery deliveries += [Delivery.objects.create( sns_topic=notification['TopicArn'], sns_messageid=notification['MessageId'], mail_timestamp=clean_time(mail['timestamp']), mail_id=mail['messageId'], mail_from=mail['source'], address=eachrecipient, # delivery delivered_time=delivered_datetime, processing_time=int(delivery['processingTimeMillis']), smtp_response=delivery['smtpResponse'] )] # Send signals for each delivery. for eachdelivery in deliveries: signals.feedback.send( sender=Delivery, instance=eachdelivery, message=message, notification=notification ) logger.info('Logged %s Deliveries(s)', str(len(deliveries))) return HttpResponse('Delivery Processed')
Function to process a delivery notification
def _ns_query(self, session): """ Return a SQLAlchemy query that is already namespaced by the app and namespace given to this backend during initialization. Returns: a SQLAlchemy query object """ return session.query(ORMJob).filter(ORMJob.app == self.app, ORMJob.namespace == self.namespace)
Return a SQLAlchemy query that is already namespaced by the app and namespace given to this backend during initialization. Returns: a SQLAlchemy query object
def parse_alert(output): """ Parses the supplied output and yields any alerts. Example alert format: 01/28/14-22:26:04.885446 [**] [1:1917:11] INDICATOR-SCAN UPnP service discover attempt [**] [Classification: Detection of a Network Scan] [Priority: 3] {UDP} 10.1.1.132:58650 -> 239.255.255.250:1900 :param output: A string containing the output of running snort :returns: Generator of snort alert dicts """ for x in output.splitlines(): match = ALERT_PATTERN.match(x) if match: rec = {'timestamp': datetime.strptime(match.group('timestamp'), '%m/%d/%y-%H:%M:%S.%f'), 'sid': int(match.group('sid')), 'revision': int(match.group('revision')), 'priority': int(match.group('priority')), 'message': match.group('message'), 'source': match.group('src'), 'destination': match.group('dest'), 'protocol': match.group('protocol'), } if match.group('classtype'): rec['classtype'] = match.group('classtype') yield rec
Parses the supplied output and yields any alerts. Example alert format: 01/28/14-22:26:04.885446 [**] [1:1917:11] INDICATOR-SCAN UPnP service discover attempt [**] [Classification: Detection of a Network Scan] [Priority: 3] {UDP} 10.1.1.132:58650 -> 239.255.255.250:1900 :param output: A string containing the output of running snort :returns: Generator of snort alert dicts
def ping(self): """ Check server is alive over HTTP """ status, _, body = self._request('GET', self.ping_path()) return(status is not None) and (bytes_to_str(body) == 'OK')
Check server is alive over HTTP
def CompleteHuntIfExpirationTimeReached(hunt_obj): """Marks the hunt as complete if it's past its expiry time.""" # TODO(hanuszczak): This should not set the hunt state to `COMPLETED` but we # should have a sparate `EXPIRED` state instead and set that. if (hunt_obj.hunt_state not in [ rdf_hunt_objects.Hunt.HuntState.STOPPED, rdf_hunt_objects.Hunt.HuntState.COMPLETED ] and hunt_obj.expired): StopHunt(hunt_obj.hunt_id, reason="Hunt completed.") data_store.REL_DB.UpdateHuntObject( hunt_obj.hunt_id, hunt_state=hunt_obj.HuntState.COMPLETED) return data_store.REL_DB.ReadHuntObject(hunt_obj.hunt_id) return hunt_obj
Marks the hunt as complete if it's past its expiry time.
def parse(self, data): """ Converts a CNML structure to a NetworkX Graph object which is then returned. """ graph = self._init_graph() # loop over links and create networkx graph # Add only working nodes with working links for link in data.get_inner_links(): if link.status != libcnml.libcnml.Status.WORKING: continue interface_a, interface_b = link.getLinkedInterfaces() source = interface_a.ipv4 dest = interface_b.ipv4 # add link to Graph graph.add_edge(source, dest, weight=1) return graph
Converts a CNML structure to a NetworkX Graph object which is then returned.
def authorize(self, email, permission_type='read', cloud=None, api_key=None, version=None, **kwargs): """ This API endpoint allows you to authorize another user to access your model in a read or write capacity. Before calling authorize, you must first make sure your model has been registered. Inputs: email - String: The email of the user you would like to share access with. permission_type (optional) - String: One of ['read', 'write']. Users with read permissions can only call `predict`. Users with `write` permissions can add new input examples and train models. api_key (optional) - String: Your API key, required only if the key has not been declared elsewhere. This allows the API to recognize a request as yours and automatically route it to the appropriate destination. cloud (optional) - String: Your private cloud domain, required only if the key has not been declared elsewhere. This allows the API to recognize a request as yours and automatically route it to the appropriate destination. """ kwargs['permission_type'] = permission_type kwargs['email'] = email url_params = {"batch": False, "api_key": api_key, "version": version, "method": "authorize"} return self._api_handler(None, cloud=cloud, api="custom", url_params=url_params, **kwargs)
This API endpoint allows you to authorize another user to access your model in a read or write capacity. Before calling authorize, you must first make sure your model has been registered. Inputs: email - String: The email of the user you would like to share access with. permission_type (optional) - String: One of ['read', 'write']. Users with read permissions can only call `predict`. Users with `write` permissions can add new input examples and train models. api_key (optional) - String: Your API key, required only if the key has not been declared elsewhere. This allows the API to recognize a request as yours and automatically route it to the appropriate destination. cloud (optional) - String: Your private cloud domain, required only if the key has not been declared elsewhere. This allows the API to recognize a request as yours and automatically route it to the appropriate destination.
def keyEvent(self, key, down=1): """For most ordinary keys, the "keysym" is the same as the corresponding ASCII value. Other common keys are shown in the KEY_ constants.""" self.transport.write(pack("!BBxxI", 4, down, key))
For most ordinary keys, the "keysym" is the same as the corresponding ASCII value. Other common keys are shown in the KEY_ constants.
def roc_curve(roc_objs, obj_labels, colors, markers, filename, figsize=(8, 8), xlabel="Probability of False Detection", ylabel="Probability of Detection", title="ROC Curve", ticks=np.arange(0, 1.1, 0.1), dpi=300, legend_params=None, bootstrap_sets=None, ci=(2.5, 97.5), label_fontsize=14, title_fontsize=16, tick_fontsize=12): """ Plots a set receiver/relative operating characteristic (ROC) curves from DistributedROC objects. The ROC curve shows how well a forecast discriminates between two outcomes over a series of thresholds. It features Probability of Detection (True Positive Rate) on the y-axis and Probability of False Detection (False Alarm Rate) on the x-axis. This plotting function allows one to customize the colors and markers of the ROC curves as well as the parameters of the legend and the title. Args: roc_objs (list): DistributedROC objects being plotted. obj_labels (list): Label describing the forecast associated with a DistributedROC object. colors (list): List of matplotlib-readable colors (names or hex-values) for each curve. markers (list): Matplotlib marker (e.g. *, o, v, etc.) for each curve. filename (str): Name of figure file being saved. figsize (tuple): (Width, height) of the figure in inches. xlabel (str): Label for the x-axis. ylabel (str): Label for the y-axis. title (str): The title of the figure. ticks (numpy.ndarray): Values shown on the x and y axes. dpi (int): Figure resolution in dots per inch. legend_params (None, dict): Keyword arguments for the formatting of the figure legend. bootstrap_sets (list): List of lists of DistributedROC objects that were bootstrap resampled for each model. ci (tuple of 2 floats): Quantiles of the edges of the bootstrap confidence intervals ranging from 0 to 100. label_fontsize (int): Font size of the x and y axis labels. title_fontsize (int): Font size of the title. tick_fontsize (int): Font size of the x and y tick labels. Examples: >>> from hagelslag.evaluation import DistributedROC >>> import numpy as np >>> forecasts = np.random.random(1000) >>> obs = np.random.random_integers(0, 1, 1000) >>> roc = DistributedROC() >>> roc.update(forecasts, obs) >>> roc_curve([roc], ["Random"], ["orange"], ["o"], "random_roc.png") """ if legend_params is None: legend_params = dict(loc=4, fontsize=12, framealpha=1, frameon=True) plt.figure(figsize=figsize, dpi=dpi) plt.plot(ticks, ticks, "k--") if bootstrap_sets is not None: for b, b_set in enumerate(bootstrap_sets): broc_curves = np.dstack([b_roc.roc_curve().values for b_roc in b_set]) pod_range = np.percentile(broc_curves[:,0], ci, axis=1) pofd_range = np.percentile(broc_curves[:, 1], ci, axis=1) pod_poly = np.concatenate((pod_range[1], pod_range[0, ::-1])) pofd_poly = np.concatenate((pofd_range[0], pofd_range[1, ::-1])) pod_poly[np.isnan(pod_poly)] = 0 pofd_poly[np.isnan(pofd_poly)] = 0 plt.fill(pofd_poly, pod_poly, alpha=0.5, color=colors[b]) for r, roc_obj in enumerate(roc_objs): roc_data = roc_obj.roc_curve() plt.plot(roc_data["POFD"], roc_data["POD"], marker=markers[r], color=colors[r], label=obj_labels[r]) plt.xlabel(xlabel, fontsize=label_fontsize) plt.ylabel(ylabel, fontsize=label_fontsize) plt.xticks(ticks, fontsize=tick_fontsize) plt.yticks(ticks, fontsize=tick_fontsize) plt.title(title, fontsize=title_fontsize) plt.legend(**legend_params) plt.savefig(filename, dpi=dpi, bbox_inches="tight") plt.close()
Plots a set receiver/relative operating characteristic (ROC) curves from DistributedROC objects. The ROC curve shows how well a forecast discriminates between two outcomes over a series of thresholds. It features Probability of Detection (True Positive Rate) on the y-axis and Probability of False Detection (False Alarm Rate) on the x-axis. This plotting function allows one to customize the colors and markers of the ROC curves as well as the parameters of the legend and the title. Args: roc_objs (list): DistributedROC objects being plotted. obj_labels (list): Label describing the forecast associated with a DistributedROC object. colors (list): List of matplotlib-readable colors (names or hex-values) for each curve. markers (list): Matplotlib marker (e.g. *, o, v, etc.) for each curve. filename (str): Name of figure file being saved. figsize (tuple): (Width, height) of the figure in inches. xlabel (str): Label for the x-axis. ylabel (str): Label for the y-axis. title (str): The title of the figure. ticks (numpy.ndarray): Values shown on the x and y axes. dpi (int): Figure resolution in dots per inch. legend_params (None, dict): Keyword arguments for the formatting of the figure legend. bootstrap_sets (list): List of lists of DistributedROC objects that were bootstrap resampled for each model. ci (tuple of 2 floats): Quantiles of the edges of the bootstrap confidence intervals ranging from 0 to 100. label_fontsize (int): Font size of the x and y axis labels. title_fontsize (int): Font size of the title. tick_fontsize (int): Font size of the x and y tick labels. Examples: >>> from hagelslag.evaluation import DistributedROC >>> import numpy as np >>> forecasts = np.random.random(1000) >>> obs = np.random.random_integers(0, 1, 1000) >>> roc = DistributedROC() >>> roc.update(forecasts, obs) >>> roc_curve([roc], ["Random"], ["orange"], ["o"], "random_roc.png")
def should_expand(self, tag): """Return whether the specified tag should be expanded.""" return self.indentation is not None and tag and ( not self.previous_indent or ( tag.serializer == 'list' and tag.subtype.serializer in ('array', 'list', 'compound') ) or ( tag.serializer == 'compound' ) )
Return whether the specified tag should be expanded.
def get_undefined_namespace_names(graph: BELGraph, namespace: str) -> Set[str]: """Get the names from a namespace that wasn't actually defined. :return: The set of all names from the undefined namespace """ return { exc.name for _, exc, _ in graph.warnings if isinstance(exc, UndefinedNamespaceWarning) and exc.namespace == namespace }
Get the names from a namespace that wasn't actually defined. :return: The set of all names from the undefined namespace
def _train_lbfgs(self, X_feat_train, X_seq_train, y_train, X_feat_valid, X_seq_valid, y_valid, graph, var, other_var, early_stop_patience=None, n_cores=3): """ Train the model actual model Updates weights / variables, computes and returns the training and validation accuracy """ tic = time.time() # take out the parameters for conveience n_epochs = self._param["n_epochs"] print_every = self._param["print_every"] step_size = self._param["step_size"] num_steps = n_epochs print('Number of epochs:', n_epochs) # print("Number of steps per epoch:", num_steps) # print("Number of total steps:", num_steps * n_epochs) # move into the graph and start the model loss_history = [] train_acc_vec = [] valid_acc_vec = [] step_history = [] with tf.Session(graph=graph, config=tf.ConfigProto( use_per_session_threads=True, inter_op_parallelism_threads=n_cores, intra_op_parallelism_threads=n_cores)) as sess: sess.run(other_var["init"]) best_performance = None best_performance_epoch = 0 for step in range(n_epochs): # run the model (sess.run) # compute the optimizer, loss and train_prediction in the graph # save the last two as l and predictions # put thet data into TF form: feed_dict = {other_var["tf_X_seq"]: X_seq_train, other_var["tf_y"]: y_train, other_var["tf_X_feat"]: X_feat_train, other_var["tf_step_size"]: step_size} # run the optimizer # https://github.com/tensorflow/tensorflow/blob/master/tensorflow/contrib/opt/python/training/external_optimizer.py#L115 other_var["optimizer"].minimize(sess, feed_dict=feed_dict) l = sess.run(other_var["loss"], feed_dict=feed_dict) loss_history.append(l) # keep storing the full loss history # sometimes print the actual training prediction (l) if (step % print_every == 0): train_accuracy = self._accuracy_in_session(sess, other_var, X_feat_train, X_seq_train, y_train) valid_accuracy = self._accuracy_in_session(sess, other_var, X_feat_valid, X_seq_valid, y_valid) # append the prediction accuracies train_acc_vec.append(train_accuracy) valid_acc_vec.append(valid_accuracy) step_history.append(step / num_steps) print('Step %4d: loss %f, train mse: %f, validation mse: %f' % (step, l, train_accuracy, valid_accuracy)) # check if this is the best accuracy if best_performance is None or valid_accuracy <= best_performance: best_performance = valid_accuracy best_performance_epoch = step if early_stop_patience is not None and step > best_performance_epoch + early_stop_patience: print("Early stopping. best_performance_epoch: %d, best_performance: %f" % (best_performance_epoch, best_performance)) break # get the test accuracies train_accuracy_final = self._accuracy_in_session(sess, other_var, X_feat_train, X_seq_train, y_train) valid_accuracy_final = self._accuracy_in_session(sess, other_var, X_feat_valid, X_seq_valid, y_valid) print('Validation accuracy final: %f' % valid_accuracy_final) # store the fitted weights var_res = self._get_var_res_sess(sess, var) # store also the quasi splines fit if self._param["n_splines"] is not None: self._splines["quasi_X"] = [self._predict_in_session(sess, other_var, X_feat_train[i:(i + 1)], X_seq_train[i:(i + 1)], variable="spline_quasi_X") for i in range(X_feat_train.shape[0])] # transform into the appropriate form self._splines["quasi_X"] = np.concatenate([x[0][np.newaxis] for x in self._splines["quasi_X"]]) accuracy = { "loss_history": np.array(loss_history), "step_history": np.array(step_history), "train_acc_history": np.array(train_acc_vec), "val_acc_history": np.array(valid_acc_vec), "train_acc_final": train_accuracy_final, "val_acc_final": valid_accuracy_final, "best_val_acc": best_performance, "best_val_acc_epoch": best_performance_epoch, "test_acc_final": None, # test_accuracy_final, "y_test": None, # y_test, "y_test_prediction": None, # test_prediction.eval(), "id_vec_test": None # id_vec_test } self._accuracy = accuracy toc = time.time() exec_time = toc - tic self._exec_time = exec_time print('That took %fs' % exec_time) # weights = {"motif_base_weights": motif_base_weights, # "motif_weights": motif_weights, # "motif_bias": motif_bias, # "final_bias": final_bias, # "feature_weights": feature_weights, # "spline_pred": spline_pred # } return var_res
Train the model actual model Updates weights / variables, computes and returns the training and validation accuracy
def get_cutout(self, resource, resolution, x_range, y_range, z_range, time_range=None, id_list=[], no_cache=None, access_mode=CacheMode.no_cache, **kwargs): """Get a cutout from the volume service. Note that access_mode=no_cache is desirable when reading large amounts of data at once. In these cases, the data is not first read into the cache, but instead, is sent directly from the data store to the requester. Args: resource (intern.resource.boss.resource.ChannelResource | str): Channel or layer Resource. If a string is provided instead, BossRemote.parse_bossURI is called instead on a URI-formatted string of the form `bossdb://collection/experiment/channel`. resolution (int): 0 indicates native resolution. x_range (list[int]): x range such as [10, 20] which means x>=10 and x<20. y_range (list[int]): y range such as [10, 20] which means y>=10 and y<20. z_range (list[int]): z range such as [10, 20] which means z>=10 and z<20. time_range (optional [list[int]]): time range such as [30, 40] which means t>=30 and t<40. id_list (optional [list[int]]): list of object ids to filter the cutout by. no_cache (optional [boolean or None]): Deprecated way to specify the use of cache to be True or False. access_mode should be used instead access_mode (optional [Enum]): Identifies one of three cache access options: cache = Will check both cache and for dirty keys no_cache = Will skip cache check but check for dirty keys raw = Will skip both the cache and dirty keys check TODO: Add mode to documentation Returns: (numpy.array): A 3D or 4D (time) numpy matrix in (time)ZYX order. Raises: requests.HTTPError on error. """ if no_cache is not None: warnings.warn("The no-cache option has been deprecated and will not be used in future versions of intern.") warnings.warn("Please from intern.service.boss.volume import CacheMode and use access_mode=CacheMode.[cache,no-cache,raw] instead.") if no_cache and access_mode != CacheMode.no_cache: warnings.warn("Both no_cache and access_mode were used, please use access_mode only. As no_cache has been deprecated. ") warnings.warn("Your request will be made using the default mode no_cache.") access_mode=CacheMode.no_cache if no_cache: access_mode=CacheMode.no_cache elif no_cache == False: access_mode=CacheMode.cache return self._volume.get_cutout(resource, resolution, x_range, y_range, z_range, time_range, id_list, access_mode, **kwargs)
Get a cutout from the volume service. Note that access_mode=no_cache is desirable when reading large amounts of data at once. In these cases, the data is not first read into the cache, but instead, is sent directly from the data store to the requester. Args: resource (intern.resource.boss.resource.ChannelResource | str): Channel or layer Resource. If a string is provided instead, BossRemote.parse_bossURI is called instead on a URI-formatted string of the form `bossdb://collection/experiment/channel`. resolution (int): 0 indicates native resolution. x_range (list[int]): x range such as [10, 20] which means x>=10 and x<20. y_range (list[int]): y range such as [10, 20] which means y>=10 and y<20. z_range (list[int]): z range such as [10, 20] which means z>=10 and z<20. time_range (optional [list[int]]): time range such as [30, 40] which means t>=30 and t<40. id_list (optional [list[int]]): list of object ids to filter the cutout by. no_cache (optional [boolean or None]): Deprecated way to specify the use of cache to be True or False. access_mode should be used instead access_mode (optional [Enum]): Identifies one of three cache access options: cache = Will check both cache and for dirty keys no_cache = Will skip cache check but check for dirty keys raw = Will skip both the cache and dirty keys check TODO: Add mode to documentation Returns: (numpy.array): A 3D or 4D (time) numpy matrix in (time)ZYX order. Raises: requests.HTTPError on error.
def label_position(self): ''' Find the largest region and position the label in that. ''' reg_sizes = [(r.size(), r) for r in self.pieces] reg_sizes.sort() return reg_sizes[-1][1].label_position()
Find the largest region and position the label in that.
def get_function_for_cognito_trigger(self, trigger): """ Get the associated function to execute for a cognito trigger """ print("get_function_for_cognito_trigger", self.settings.COGNITO_TRIGGER_MAPPING, trigger, self.settings.COGNITO_TRIGGER_MAPPING.get(trigger)) return self.settings.COGNITO_TRIGGER_MAPPING.get(trigger)
Get the associated function to execute for a cognito trigger
def add_job(self, idx): """Called after self.targets[idx] just got the job with header. Override with subclasses. The default ordering is simple LRU. The default loads are the number of outstanding jobs.""" self.loads[idx] += 1 for lis in (self.targets, self.loads): lis.append(lis.pop(idx))
Called after self.targets[idx] just got the job with header. Override with subclasses. The default ordering is simple LRU. The default loads are the number of outstanding jobs.
def read(self, source = None, **options): ''' Reads and optionally parses a single message. :Parameters: - `source` - optional data buffer to be read, if not specified data is read from the wrapped stream :Options: - `raw` (`boolean`) - indicates whether read data should parsed or returned in raw byte form - `numpy_temporals` (`boolean`) - if ``False`` temporal vectors are backed by raw q representation (:class:`.QTemporalList`, :class:`.QTemporal`) instances, otherwise are represented as `numpy datetime64`/`timedelta64` arrays and atoms, **Default**: ``False`` :returns: :class:`.QMessage` - read data (parsed or raw byte form) along with meta information ''' message = self.read_header(source) message.data = self.read_data(message.size, message.is_compressed, **options) return message
Reads and optionally parses a single message. :Parameters: - `source` - optional data buffer to be read, if not specified data is read from the wrapped stream :Options: - `raw` (`boolean`) - indicates whether read data should parsed or returned in raw byte form - `numpy_temporals` (`boolean`) - if ``False`` temporal vectors are backed by raw q representation (:class:`.QTemporalList`, :class:`.QTemporal`) instances, otherwise are represented as `numpy datetime64`/`timedelta64` arrays and atoms, **Default**: ``False`` :returns: :class:`.QMessage` - read data (parsed or raw byte form) along with meta information
def dbg_print_irsb(self, irsb_addr, project=None): """ Pretty-print an IRSB with whitelist information """ if project is None: project = self._project if project is None: raise Exception("Dict addr_to_run is empty. " + \ "Give me a project, and I'll recreate the IRSBs for you.") else: vex_block = project.factory.block(irsb_addr).vex statements = vex_block.statements whitelist = self.get_whitelisted_statements(irsb_addr) for i in range(0, len(statements)): if whitelist is True or i in whitelist: line = "+" else: line = "-" line += "[% 3d] " % i # We cannot get data returned by pp(). WTF? print(line, end='') statements[i].pp()
Pretty-print an IRSB with whitelist information
def pass_to_pipeline_if_article( self, response, source_domain, original_url, rss_title=None ): """ Responsible for passing a NewscrawlerItem to the pipeline if the response contains an article. :param obj response: the scrapy response to work on :param str source_domain: the response's domain as set for the crawler :param str original_url: the url set in the json file :param str rss_title: the title extracted by an rssCrawler :return NewscrawlerItem: NewscrawlerItem to pass to the pipeline """ if self.helper.heuristics.is_article(response, original_url): return self.pass_to_pipeline( response, source_domain, rss_title=None)
Responsible for passing a NewscrawlerItem to the pipeline if the response contains an article. :param obj response: the scrapy response to work on :param str source_domain: the response's domain as set for the crawler :param str original_url: the url set in the json file :param str rss_title: the title extracted by an rssCrawler :return NewscrawlerItem: NewscrawlerItem to pass to the pipeline
def plot_correlated_groups(self, group=None, n_genes=5, **kwargs): """Plots orthogonal expression patterns. In the default mode, plots orthogonal gene expression patterns. A specific correlated group of genes can be specified to plot gene expression patterns within that group. Parameters ---------- group - int, optional, default None If specified, display the genes within the desired correlated group. Otherwise, display the top ranked gene within each distinct correlated group. n_genes - int, optional, default 5 The number of top ranked genes to display within a correlated group if 'group' is specified. **kwargs - All keyword arguments in 'show_gene_expression' and 'scatter' are eligible. """ geneID_groups = self.adata.uns['gene_groups'] if(group is None): for i in range(len(geneID_groups)): self.show_gene_expression(geneID_groups[i][0], **kwargs) else: for i in range(n_genes): self.show_gene_expression(geneID_groups[group][i], **kwargs)
Plots orthogonal expression patterns. In the default mode, plots orthogonal gene expression patterns. A specific correlated group of genes can be specified to plot gene expression patterns within that group. Parameters ---------- group - int, optional, default None If specified, display the genes within the desired correlated group. Otherwise, display the top ranked gene within each distinct correlated group. n_genes - int, optional, default 5 The number of top ranked genes to display within a correlated group if 'group' is specified. **kwargs - All keyword arguments in 'show_gene_expression' and 'scatter' are eligible.
def _should_retry(exc): """Predicate for determining when to retry. We retry if and only if the 'reason' is 'backendError' or 'rateLimitExceeded'. """ if not hasattr(exc, "errors"): return False if len(exc.errors) == 0: # Check for unstructured error returns, e.g. from GFE return isinstance(exc, _UNSTRUCTURED_RETRYABLE_TYPES) reason = exc.errors[0]["reason"] return reason in _RETRYABLE_REASONS
Predicate for determining when to retry. We retry if and only if the 'reason' is 'backendError' or 'rateLimitExceeded'.
def scp_file(dest_path, contents=None, kwargs=None, local_file=None): ''' Use scp or sftp to copy a file to a server ''' file_to_upload = None try: if contents is not None: try: tmpfd, file_to_upload = tempfile.mkstemp() os.write(tmpfd, contents) finally: try: os.close(tmpfd) except OSError as exc: if exc.errno != errno.EBADF: raise exc log.debug('Uploading %s to %s', dest_path, kwargs['hostname']) ssh_args = [ # Don't add new hosts to the host key database '-oStrictHostKeyChecking=no', # Set hosts key database path to /dev/null, i.e., non-existing '-oUserKnownHostsFile=/dev/null', # Don't re-use the SSH connection. Less failures. '-oControlPath=none' ] if local_file is not None: file_to_upload = local_file if os.path.isdir(local_file): ssh_args.append('-r') if 'key_filename' in kwargs: # There should never be both a password and an ssh key passed in, so ssh_args.extend([ # tell SSH to skip password authentication '-oPasswordAuthentication=no', '-oChallengeResponseAuthentication=no', # Make sure public key authentication is enabled '-oPubkeyAuthentication=yes', # do only use the provided identity file '-oIdentitiesOnly=yes', # No Keyboard interaction! '-oKbdInteractiveAuthentication=no', # Also, specify the location of the key file '-i {0}'.format(kwargs['key_filename']) ]) if 'port' in kwargs: ssh_args.append('-oPort={0}'.format(kwargs['port'])) ssh_args.append(__ssh_gateway_arguments(kwargs)) try: if socket.inet_pton(socket.AF_INET6, kwargs['hostname']): ipaddr = '[{0}]'.format(kwargs['hostname']) else: ipaddr = kwargs['hostname'] except socket.error: ipaddr = kwargs['hostname'] if file_to_upload is None: log.warning( 'No source file to upload. Please make sure that either file ' 'contents or the path to a local file are provided.' ) cmd = ( 'scp {0} {1} {2[username]}@{4}:{3} || ' 'echo "put {1} {3}" | sftp {0} {2[username]}@{4} || ' 'rsync -avz -e "ssh {0}" {1} {2[username]}@{2[hostname]}:{3}'.format( ' '.join(ssh_args), file_to_upload, kwargs, dest_path, ipaddr ) ) log.debug('SCP command: \'%s\'', cmd) retcode = _exec_ssh_cmd(cmd, error_msg='Failed to upload file \'{0}\': {1}\n{2}', password_retries=3, **kwargs) finally: if contents is not None: try: os.remove(file_to_upload) except OSError as exc: if exc.errno != errno.ENOENT: raise exc return retcode
Use scp or sftp to copy a file to a server
def iter_multi_items(mapping): """Iterates over the items of a mapping yielding keys and values without dropping any from more complex structures. """ if isinstance(mapping, MultiDict): for item in iteritems(mapping, multi=True): yield item elif isinstance(mapping, dict): for key, value in iteritems(mapping): if isinstance(value, (tuple, list)): for value in value: yield key, value else: yield key, value else: for item in mapping: yield item
Iterates over the items of a mapping yielding keys and values without dropping any from more complex structures.
def save(self, fname, compression='blosc'): """ Save method for the Egg object The data will be saved as a 'egg' file, which is a dictionary containing the elements of a Egg saved in the hd5 format using `deepdish`. Parameters ---------- fname : str A name for the file. If the file extension (.egg) is not specified, it will be appended. compression : str The kind of compression to use. See the deepdish documentation for options: http://deepdish.readthedocs.io/en/latest/api_io.html#deepdish.io.save """ # put egg vars into a dict egg = { 'pres' : df2list(self.pres), 'rec' : df2list(self.rec), 'dist_funcs' : self.dist_funcs, 'subjgroup' : self.subjgroup, 'subjname' : self.subjname, 'listgroup' : self.listgroup, 'listname' : self.listname, 'date_created' : self.date_created, 'meta' : self.meta } # if extension wasn't included, add it if fname[-4:]!='.egg': fname+='.egg' # save with warnings.catch_warnings(): warnings.simplefilter("ignore") dd.io.save(fname, egg, compression=compression)
Save method for the Egg object The data will be saved as a 'egg' file, which is a dictionary containing the elements of a Egg saved in the hd5 format using `deepdish`. Parameters ---------- fname : str A name for the file. If the file extension (.egg) is not specified, it will be appended. compression : str The kind of compression to use. See the deepdish documentation for options: http://deepdish.readthedocs.io/en/latest/api_io.html#deepdish.io.save
def build(self, construct): """Build a single construct in CLIPS. The Python equivalent of the CLIPS build command. """ if lib.EnvBuild(self._env, construct.encode()) != 1: raise CLIPSError(self._env)
Build a single construct in CLIPS. The Python equivalent of the CLIPS build command.
def rename(self, newpath): "Move folder to a new name, possibly a whole new path" # POST https://www.jottacloud.com/jfs/**USERNAME**/Jotta/Sync/Ny%20mappe?mvDir=/**USERNAME**/Jotta/Sync/testFolder #url = '%s?mvDir=/%s%s' % (self.path, self.jfs.username, newpath) params = {'mvDir':'/%s%s' % (self.jfs.username, newpath)} r = self.jfs.post(self.path, extra_headers={'Content-Type':'application/octet-stream'}, params=params) return r
Move folder to a new name, possibly a whole new path
def make_energy_funnel_data(self, cores=1): """Compares models created during the minimisation to the best model. Returns ------- energy_rmsd_gen: [(float, float, int)] A list of triples containing the BUFF score, RMSD to the top model and generation of a model generated during the minimisation. """ if not self.parameter_log: raise AttributeError( 'No parameter log data to make funnel, have you ran the ' 'optimiser?') model_cls = self._params['specification'] gen_tagged = [] for gen, models in enumerate(self.parameter_log): for model in models: gen_tagged.append((model[0], model[1], gen)) sorted_pps = sorted(gen_tagged, key=lambda x: x[1]) top_result = sorted_pps[0] top_result_model = model_cls(*top_result[0]) if (cores == 1) or (sys.platform == 'win32'): energy_rmsd_gen = map( self.funnel_rebuild, [(x, top_result_model, self._params['specification']) for x in sorted_pps[1:]]) else: with futures.ProcessPoolExecutor( max_workers=self._params['processors']) as executor: energy_rmsd_gen = executor.map( self.funnel_rebuild, [(x, top_result_model, self._params['specification']) for x in sorted_pps[1:]]) return list(energy_rmsd_gen)
Compares models created during the minimisation to the best model. Returns ------- energy_rmsd_gen: [(float, float, int)] A list of triples containing the BUFF score, RMSD to the top model and generation of a model generated during the minimisation.
def precmd(self, line): """Handle alias expansion and ';;' separator.""" if not line.strip(): return line args = line.split() while args[0] in self.aliases: line = self.aliases[args[0]] ii = 1 for tmpArg in args[1:]: line = line.replace("%" + str(ii), tmpArg) ii += 1 line = line.replace("%*", ' '.join(args[1:])) args = line.split() # split into ';;' separated commands # unless it's an alias command if args[0] != 'alias': marker = line.find(';;') if marker >= 0: # queue up everything after marker next = line[marker+2:].lstrip() self.cmdqueue.append(next) line = line[:marker].rstrip() return line
Handle alias expansion and ';;' separator.
def getCandScoresMap(self, profile): """ Returns a dictionary that associates integer representations of each candidate with their Copeland score. :ivar Profile profile: A Profile object that represents an election profile. """ # Currently, we expect the profile to contain complete ordering over candidates. Ties are # allowed however. elecType = profile.getElecType() if elecType != "soc" and elecType != "toc": print("ERROR: unsupported election type") exit() # Initialize each Copeland score as 0.0. copelandScores = dict() for cand in profile.candMap.keys(): copelandScores[cand] = 0.0 preferenceCounts = profile.getPreferenceCounts() # For each pair of candidates, calculate the number of votes in which one beat the other. wmgMap = profile.getWmg() for cand1, cand2 in itertools.combinations(wmgMap.keys(), 2): if cand2 in wmgMap[cand1].keys(): if wmgMap[cand1][cand2] > 0: copelandScores[cand1] += 1.0 elif wmgMap[cand1][cand2] < 0: copelandScores[cand2] += 1.0 # If a pair of candidates is tied, we add alpha to their score for each vote. else: copelandScores[cand1] += self.alpha copelandScores[cand2] += self.alpha return copelandScores
Returns a dictionary that associates integer representations of each candidate with their Copeland score. :ivar Profile profile: A Profile object that represents an election profile.
def part_sum(x, i=0): """All subsetsums from x[i:] :param x: table of values :param int i: index defining suffix of x to be considered :iterates: over all values, in arbitrary order :complexity: :math:`O(2^{len(x)-i})` """ if i == len(x): yield 0 else: for s in part_sum(x, i + 1): yield s yield s + x[i]
All subsetsums from x[i:] :param x: table of values :param int i: index defining suffix of x to be considered :iterates: over all values, in arbitrary order :complexity: :math:`O(2^{len(x)-i})`
def iter_auth_hashes(user, purpose, minutes_valid): """ Generate auth tokens tied to user and specified purpose. The hash expires at midnight on the minute of now + minutes_valid, such that when minutes_valid=1 you get *at least* 1 minute to use the token. """ now = timezone.now().replace(microsecond=0, second=0) for minute in range(minutes_valid + 1): yield hashlib.sha1( '%s:%s:%s:%s:%s' % ( now - datetime.timedelta(minutes=minute), user.password, purpose, user.pk, settings.SECRET_KEY, ), ).hexdigest()
Generate auth tokens tied to user and specified purpose. The hash expires at midnight on the minute of now + minutes_valid, such that when minutes_valid=1 you get *at least* 1 minute to use the token.
def remove(self, safe=None): """Removes the document itself from database. The optional ``safe`` argument is a boolean that specifies if the remove method should wait for the operation to complete. """ self._session.remove(self, safe=None) self._session.flush()
Removes the document itself from database. The optional ``safe`` argument is a boolean that specifies if the remove method should wait for the operation to complete.
def images(self): ''' a method to list the local docker images :return: list of dictionaries with available image fields [ { 'CREATED': '7 days ago', 'TAG': 'latest', 'IMAGE ID': '2298fbaac143', 'VIRTUAL SIZE': '302.7 MB', 'REPOSITORY': 'test1' } ] ''' sys_command = 'docker images' sys_output = self.command(sys_command) image_list = self._images(sys_output) return image_list
a method to list the local docker images :return: list of dictionaries with available image fields [ { 'CREATED': '7 days ago', 'TAG': 'latest', 'IMAGE ID': '2298fbaac143', 'VIRTUAL SIZE': '302.7 MB', 'REPOSITORY': 'test1' } ]
def start_notebook(self, name, context: dict, fg=False): """Start new IPython Notebook daemon. :param name: The owner of the Notebook will be *name*. He/she gets a new Notebook content folder created where all files are placed. :param context: Extra context information passed to the started Notebook. This must contain {context_hash:int} parameter used to identify the launch parameters for the notebook """ assert context assert type(context) == dict assert "context_hash" in context assert type(context["context_hash"]) == int http_port = self.pick_port() assert http_port context = context.copy() context["http_port"] = http_port # We can't proxy websocket URLs, so let them go directly through localhost or have front end server to do proxying (nginx) if "websocket_url" not in context: context["websocket_url"] = "ws://localhost:{port}".format(port=http_port) if "{port}" in context["websocket_url"]: # Do port substitution for the websocket URL context["websocket_url"] = context["websocket_url"].format(port=http_port) pid = self.get_pid(name) assert "terminated" not in context comm.set_context(pid, context) if fg: self.exec_notebook_daemon_command(name, "fg", port=http_port) else: self.exec_notebook_daemon_command(name, "start", port=http_port)
Start new IPython Notebook daemon. :param name: The owner of the Notebook will be *name*. He/she gets a new Notebook content folder created where all files are placed. :param context: Extra context information passed to the started Notebook. This must contain {context_hash:int} parameter used to identify the launch parameters for the notebook
def dafopw(fname): """ Open a DAF for subsequent write requests. http://naif.jpl.nasa.gov/pub/naif/toolkit_docs/C/cspice/dafopw_c.html :param fname: Name of DAF to be opened. :type fname: str :return: Handle assigned to DAF. :rtype: int """ fname = stypes.stringToCharP(fname) handle = ctypes.c_int() libspice.dafopw_c(fname, ctypes.byref(handle)) return handle.value
Open a DAF for subsequent write requests. http://naif.jpl.nasa.gov/pub/naif/toolkit_docs/C/cspice/dafopw_c.html :param fname: Name of DAF to be opened. :type fname: str :return: Handle assigned to DAF. :rtype: int
def visit_attribute(self, node): """check if the getattr is an access to a class member if so, register it. Also check for access to protected class member from outside its class (but ignore __special__ methods) """ # Check self if self._uses_mandatory_method_param(node): self._accessed.set_accessed(node) return if not self.linter.is_message_enabled("protected-access"): return self._check_protected_attribute_access(node)
check if the getattr is an access to a class member if so, register it. Also check for access to protected class member from outside its class (but ignore __special__ methods)
def get_logger(name=None, level=logging.DEBUG, stream=None): """returns a colorized logger. This function can be used just like :py:func:`logging.getLogger` except you can set the level right away.""" logger = logging.getLogger(name) colored = colorize_logger(logger, stream=stream, level=level) return colored
returns a colorized logger. This function can be used just like :py:func:`logging.getLogger` except you can set the level right away.
def __reorganize_chron_header(line): """ Reorganize the list of variables. If there are units given, log them. :param str line: :return dict: key: variable, val: units (optional) """ d = {} # Header variables should be tab-delimited. Use regex to split by tabs m = re.split(re_tab_split, line) # If there was an output match from the line, then keep going if m: # Loop once for each variable in the line for s in m: # Match the variable to the 'variable (units)' regex to look for units m2 = re.match(re_var_w_units, s) # If there was a match if m2: # If no units were found, set to blank if m2.group(2) is None: d[m2.group(1)] = "" # Units were found else: # Set both values d[m2.group(1)] = m2.group(2) return d
Reorganize the list of variables. If there are units given, log them. :param str line: :return dict: key: variable, val: units (optional)
def is_special_string(obj): """Is special string.""" import bs4 return isinstance(obj, (bs4.Comment, bs4.Declaration, bs4.CData, bs4.ProcessingInstruction))
Is special string.
def _set_row_label(self, value): "Set the row label format string (empty to hide)" if not value: self.wx_obj.SetRowLabelSize(0) else: self.wx_obj._table._row_label = value
Set the row label format string (empty to hide)
def get_body(self): """Get the body value from the response. :return: a future contains the deserialized value of body """ raw_body = yield get_arg(self, 2) if not self.serializer: raise tornado.gen.Return(raw_body) else: body = self.serializer.deserialize_body(raw_body) raise tornado.gen.Return(body)
Get the body value from the response. :return: a future contains the deserialized value of body
def potential_cloud_layer(self, pcp, water, tlow, land_cloud_prob, land_threshold, water_cloud_prob, water_threshold=0.5): """Final step of determining potential cloud layer Equation 18 (Zhu and Woodcock, 2012) Saturation (green or red) test is not in the algorithm Parameters ---------- pcps: ndarray potential cloud pixels water: ndarray water mask tirs1: ndarray tlow: float low percentile of land temperature land_cloud_prob: ndarray probability of cloud over land land_threshold: float cutoff for cloud over land water_cloud_prob: ndarray probability of cloud over water water_threshold: float cutoff for cloud over water Output ------ ndarray: potential cloud layer, boolean """ # Using pcp and water as mask todo # change water threshold to dynamic, line 132 in Zhu, 2015 todo part1 = (pcp & water & (water_cloud_prob > water_threshold)) part2 = (pcp & ~water & (land_cloud_prob > land_threshold)) temptest = self.tirs1 < (tlow - 35) # 35degrees C colder if self.sat in ['LT5', 'LE7']: saturation = self.blue_saturated | self.green_saturated | self.red_saturated return part1 | part2 | temptest | saturation else: return part1 | part2 | temptest
Final step of determining potential cloud layer Equation 18 (Zhu and Woodcock, 2012) Saturation (green or red) test is not in the algorithm Parameters ---------- pcps: ndarray potential cloud pixels water: ndarray water mask tirs1: ndarray tlow: float low percentile of land temperature land_cloud_prob: ndarray probability of cloud over land land_threshold: float cutoff for cloud over land water_cloud_prob: ndarray probability of cloud over water water_threshold: float cutoff for cloud over water Output ------ ndarray: potential cloud layer, boolean
def highlight_multiline_block(self, block, start_pattern, end_pattern, state, format): """ Highlights given multiline text block. :param block: Text block. :type block: QString :param pattern: Start regex pattern. :type pattern: QRegExp :param pattern: End regex pattern. :type pattern: QRegExp :param format: Format. :type format: QTextCharFormat :param state: Block state. :type state: int :return: Current block matching state. :rtype: bool """ if self.previousBlockState() == state: start = 0 extend = 0 else: start = start_pattern.indexIn(block) extend = start_pattern.matchedLength() while start >= 0: end = end_pattern.indexIn(block, start + extend) if end >= extend: length = end - start + extend + end_pattern.matchedLength() self.setCurrentBlockState(0) else: self.setCurrentBlockState(state) length = block.length() - start + extend self.setFormat(start, length, format) start = start_pattern.indexIn(block, start + length) if self.currentBlockState() == state: return True else: return False
Highlights given multiline text block. :param block: Text block. :type block: QString :param pattern: Start regex pattern. :type pattern: QRegExp :param pattern: End regex pattern. :type pattern: QRegExp :param format: Format. :type format: QTextCharFormat :param state: Block state. :type state: int :return: Current block matching state. :rtype: bool
def _set_default_configuration_options(app): """ Sets the default configuration options used by this extension """ # Where to look for the JWT. Available options are cookies or headers app.config.setdefault('JWT_TOKEN_LOCATION', ('headers',)) # Options for JWTs when the TOKEN_LOCATION is headers app.config.setdefault('JWT_HEADER_NAME', 'Authorization') app.config.setdefault('JWT_HEADER_TYPE', 'Bearer') # Options for JWTs then the TOKEN_LOCATION is query_string app.config.setdefault('JWT_QUERY_STRING_NAME', 'jwt') # Option for JWTs when the TOKEN_LOCATION is cookies app.config.setdefault('JWT_ACCESS_COOKIE_NAME', 'access_token_cookie') app.config.setdefault('JWT_REFRESH_COOKIE_NAME', 'refresh_token_cookie') app.config.setdefault('JWT_ACCESS_COOKIE_PATH', '/') app.config.setdefault('JWT_REFRESH_COOKIE_PATH', '/') app.config.setdefault('JWT_COOKIE_SECURE', False) app.config.setdefault('JWT_COOKIE_DOMAIN', None) app.config.setdefault('JWT_SESSION_COOKIE', True) app.config.setdefault('JWT_COOKIE_SAMESITE', None) # Option for JWTs when the TOKEN_LOCATION is json app.config.setdefault('JWT_JSON_KEY', 'access_token') app.config.setdefault('JWT_REFRESH_JSON_KEY', 'refresh_token') # Options for using double submit csrf protection app.config.setdefault('JWT_COOKIE_CSRF_PROTECT', True) app.config.setdefault('JWT_CSRF_METHODS', ['POST', 'PUT', 'PATCH', 'DELETE']) app.config.setdefault('JWT_ACCESS_CSRF_HEADER_NAME', 'X-CSRF-TOKEN') app.config.setdefault('JWT_REFRESH_CSRF_HEADER_NAME', 'X-CSRF-TOKEN') app.config.setdefault('JWT_CSRF_IN_COOKIES', True) app.config.setdefault('JWT_ACCESS_CSRF_COOKIE_NAME', 'csrf_access_token') app.config.setdefault('JWT_REFRESH_CSRF_COOKIE_NAME', 'csrf_refresh_token') app.config.setdefault('JWT_ACCESS_CSRF_COOKIE_PATH', '/') app.config.setdefault('JWT_REFRESH_CSRF_COOKIE_PATH', '/') # How long an a token will live before they expire. app.config.setdefault('JWT_ACCESS_TOKEN_EXPIRES', datetime.timedelta(minutes=15)) app.config.setdefault('JWT_REFRESH_TOKEN_EXPIRES', datetime.timedelta(days=30)) # What algorithm to use to sign the token. See here for a list of options: # https://github.com/jpadilla/pyjwt/blob/master/jwt/api_jwt.py app.config.setdefault('JWT_ALGORITHM', 'HS256') # Secret key to sign JWTs with. Only used if a symmetric algorithm is # used (such as the HS* algorithms). We will use the app secret key # if this is not set. app.config.setdefault('JWT_SECRET_KEY', None) # Keys to sign JWTs with when use when using an asymmetric # (public/private key) algorithm, such as RS* or EC* app.config.setdefault('JWT_PRIVATE_KEY', None) app.config.setdefault('JWT_PUBLIC_KEY', None) # Options for blacklisting/revoking tokens app.config.setdefault('JWT_BLACKLIST_ENABLED', False) app.config.setdefault('JWT_BLACKLIST_TOKEN_CHECKS', ('access', 'refresh')) app.config.setdefault('JWT_IDENTITY_CLAIM', 'identity') app.config.setdefault('JWT_USER_CLAIMS', 'user_claims') app.config.setdefault('JWT_DECODE_AUDIENCE', None) app.config.setdefault('JWT_DECODE_LEEWAY', 0) app.config.setdefault('JWT_CLAIMS_IN_REFRESH_TOKEN', False) app.config.setdefault('JWT_ERROR_MESSAGE_KEY', 'msg')
Sets the default configuration options used by this extension
def get_registration(self, path): """ Returns registration item for specified path. If an email template is not registered, this will raise NotRegistered. """ if not self.is_registered(path): raise NotRegistered("Email template not registered") return self._registry[path]
Returns registration item for specified path. If an email template is not registered, this will raise NotRegistered.
def get_pixel(framebuf, x, y): """Get the color of a given pixel""" index = (y >> 3) * framebuf.stride + x offset = y & 0x07 return (framebuf.buf[index] >> offset) & 0x01
Get the color of a given pixel
def create_CreateProcessWarnMultiproc(original_name): """ CreateProcess(*args, **kwargs) """ def new_CreateProcess(*args): try: import _subprocess except ImportError: import _winapi as _subprocess warn_multiproc() return getattr(_subprocess, original_name)(*args) return new_CreateProcess
CreateProcess(*args, **kwargs)
def length_range(string, minimum, maximum): """ Requires values' length to be in a certain range. :param string: Value to validate :param minimum: Minimum length to accept :param maximum: Maximum length to accept :type string: str :type minimum: int :type maximum: int """ int_range(len(string), minimum, maximum) return string
Requires values' length to be in a certain range. :param string: Value to validate :param minimum: Minimum length to accept :param maximum: Maximum length to accept :type string: str :type minimum: int :type maximum: int
def check_oscntab(oscntab, ccdamp, xsize, ysize, leading, trailing): """Check if the supplied parameters are in the ``OSCNTAB`` reference file. .. note:: Even if an entry does not exist in ``OSCNTAB``, as long as the subarray does not have any overscan, it should not be a problem for CALACS. .. note:: This function does not check the virtual bias rows. Parameters ---------- oscntab : str Path to the ``OSCNTAB`` reference file being checked against. ccdamp : str Amplifier(s) used to read out the CCDs. xsize : int Number of columns in the readout. ysize : int Number of rows in the readout. leading : int Number of columns in the bias section ("TRIMX1" to be trimmed off by ``BLEVCORR``) on the A/C amplifiers side of the CCDs. trailing : int Number of columns in the bias section ("TRIMX2" to be trimmed off by ``BLEVCORR``) on the B/D amplifiers side of the CCDs. Returns ------- supported : bool Result of test if input parameters are in ``OSCNTAB``. """ tab = Table.read(oscntab) ccdamp = ccdamp.lower().rstrip() for row in tab: if (row['CCDAMP'].lower().rstrip() in ccdamp and row['NX'] == xsize and row['NY'] == ysize and row['TRIMX1'] == leading and row['TRIMX2'] == trailing): return True return False
Check if the supplied parameters are in the ``OSCNTAB`` reference file. .. note:: Even if an entry does not exist in ``OSCNTAB``, as long as the subarray does not have any overscan, it should not be a problem for CALACS. .. note:: This function does not check the virtual bias rows. Parameters ---------- oscntab : str Path to the ``OSCNTAB`` reference file being checked against. ccdamp : str Amplifier(s) used to read out the CCDs. xsize : int Number of columns in the readout. ysize : int Number of rows in the readout. leading : int Number of columns in the bias section ("TRIMX1" to be trimmed off by ``BLEVCORR``) on the A/C amplifiers side of the CCDs. trailing : int Number of columns in the bias section ("TRIMX2" to be trimmed off by ``BLEVCORR``) on the B/D amplifiers side of the CCDs. Returns ------- supported : bool Result of test if input parameters are in ``OSCNTAB``.
def get_playlists(self, search, start=0, max_items=100): """Search for playlists. See get_music_service_information for details on the arguments. Note: Un-intuitively this method returns MSAlbumList items. See note in class doc string for details. """ return self.get_music_service_information('playlists', search, start, max_items)
Search for playlists. See get_music_service_information for details on the arguments. Note: Un-intuitively this method returns MSAlbumList items. See note in class doc string for details.
def filter_and_save(raw, symbol_ids, destination_path): """ Parameters ---------- raw : dict with key 'handwriting_datasets' symbol_ids : dict Maps LaTeX to write-math.com id destination_path : str Path where the filtered dict 'raw' will be saved """ logging.info('Start filtering...') new_hw_ds = [] for el in raw['handwriting_datasets']: if el['formula_id'] in symbol_ids: el['formula_id'] = symbol_ids[el['formula_id']] el['handwriting'].formula_id = symbol_ids[el['formula_id']] new_hw_ds.append(el) raw['handwriting_datasets'] = new_hw_ds # pickle logging.info('Start dumping %i recordings...', len(new_hw_ds)) pickle.dump(raw, open(destination_path, "wb"), 2)
Parameters ---------- raw : dict with key 'handwriting_datasets' symbol_ids : dict Maps LaTeX to write-math.com id destination_path : str Path where the filtered dict 'raw' will be saved
def get_source(self, source_id=None, source_name=None): """Returns a dict with keys ['id', 'name', 'description'] or None if no match. The ``id`` field is guaranteed to be an int that exists in table source. Requires exactly one of ``source_id`` or ``source_name``. A new source corresponding to ``source_name`` is created if necessary. """ if not (bool(source_id) ^ bool(source_name)): raise ValueError('exactly one of source_id or source_name is required') if source_id: try: source_id = int(source_id) except (ValueError, TypeError): raise ValueError( 'source_id must be an int or a string representing one') sel = select([self.source], self.source.c.id == source_id).execute() else: sel = select([self.source], self.source.c.name == source_name).execute() result = sel.fetchone() if not result: raise ValueError( 'there is no source with id {} or name {}'.format( source_id, source_name)) return dict(list(zip(list(sel.keys()), result)))
Returns a dict with keys ['id', 'name', 'description'] or None if no match. The ``id`` field is guaranteed to be an int that exists in table source. Requires exactly one of ``source_id`` or ``source_name``. A new source corresponding to ``source_name`` is created if necessary.
def remove_all_timers(self): """Remove all waiting timers and terminate any blocking threads.""" with self.lock: if self.rtimer is not None: self.rtimer.cancel() self.timers = {} self.heap = [] self.rtimer = None self.expiring = False
Remove all waiting timers and terminate any blocking threads.
def conversations_setTopic( self, *, channel: str, topic: str, **kwargs ) -> SlackResponse: """Sets the topic for a conversation. Args: channel (str): The channel id. e.g. 'C1234567890' topic (str): The new topic for the channel. e.g. 'My Topic' """ kwargs.update({"channel": channel, "topic": topic}) return self.api_call("conversations.setTopic", json=kwargs)
Sets the topic for a conversation. Args: channel (str): The channel id. e.g. 'C1234567890' topic (str): The new topic for the channel. e.g. 'My Topic'
def rm_eltorito(self): # type: () -> None ''' Remove the El Torito boot record (and Boot Catalog) from the ISO. Parameters: None. Returns: Nothing. ''' if not self._initialized: raise pycdlibexception.PyCdlibInvalidInput('This object is not yet initialized; call either open() or new() to create an ISO') if self.eltorito_boot_catalog is None: raise pycdlibexception.PyCdlibInvalidInput('This ISO does not have an El Torito Boot Record') for brindex, br in enumerate(self.brs): if br.boot_system_identifier == b'EL TORITO SPECIFICATION'.ljust(32, b'\x00'): eltorito_index = brindex break else: # There was a boot catalog, but no corresponding boot record. This # should never happen. raise pycdlibexception.PyCdlibInternalError('El Torito boot catalog found with no corresponding boot record') del self.brs[eltorito_index] num_bytes_to_remove = 0 # On a UDF ISO, removing the Boot Record doesn't actually decrease # the size, since there are a bunch of gaps at the beginning. if not self._has_udf: num_bytes_to_remove += self.pvd.logical_block_size() # Remove all of the DirectoryRecord/UDFFileEntries associated with # the Boot Catalog for rec in self.eltorito_boot_catalog.dirrecords: if isinstance(rec, dr.DirectoryRecord): num_bytes_to_remove += self._rm_dr_link(rec) elif isinstance(rec, udfmod.UDFFileEntry): num_bytes_to_remove += self._rm_udf_link(rec) else: # This should never happen raise pycdlibexception.PyCdlibInternalError('Saw an El Torito record that was neither ISO nor UDF') # Remove the linkage from the El Torito Entries to the inodes entries_to_remove = [self.eltorito_boot_catalog.initial_entry] for sec in self.eltorito_boot_catalog.sections: for entry in sec.section_entries: entries_to_remove.append(entry) for entry in entries_to_remove: if entry.inode is not None: new_list = [] for linkrec in entry.inode.linked_records: if id(linkrec) != id(entry): new_list.append(linkrec) entry.inode.linked_records = new_list num_bytes_to_remove += len(self.eltorito_boot_catalog.record()) self.eltorito_boot_catalog = None self._finish_remove(num_bytes_to_remove, True)
Remove the El Torito boot record (and Boot Catalog) from the ISO. Parameters: None. Returns: Nothing.
def ping(self, endpoint=''): """ Ping the server to make sure that you can access the base URL. Arguments: None Returns: `boolean` Successful access of server (or status code) """ r = requests.get(self.url() + "/" + endpoint) return r.status_code
Ping the server to make sure that you can access the base URL. Arguments: None Returns: `boolean` Successful access of server (or status code)
def respond_to_contact_info(self, message): """contact info: Show everyone's emergency contact info.""" contacts = self.load("contact_info", {}) context = { "contacts": contacts, } contact_html = rendered_template("contact_info.html", context) self.say(contact_html, message=message)
contact info: Show everyone's emergency contact info.
def path(self): """The path attribute returns a stringified, concise representation of the MultiFieldSelector. It can be reversed by the ``from_path`` constructor. """ if len(self.heads) == 1: return _fmt_mfs_path(self.heads.keys()[0], self.heads.values()[0]) else: return "(" + "|".join( _fmt_mfs_path(k, v) for (k, v) in self.heads.items() ) + ")"
The path attribute returns a stringified, concise representation of the MultiFieldSelector. It can be reversed by the ``from_path`` constructor.
def detrend(x, deg=1): """ remove polynomial from data. used by autocorr_noise_id() Parameters ---------- x: numpy.array time-series deg: int degree of polynomial to remove from x Returns ------- x_detrended: numpy.array detrended time-series """ t=range(len(x)) p = np.polyfit(t, x, deg) residual = x - np.polyval(p, t) return residual
remove polynomial from data. used by autocorr_noise_id() Parameters ---------- x: numpy.array time-series deg: int degree of polynomial to remove from x Returns ------- x_detrended: numpy.array detrended time-series
def _find_executables(self): """Finds the list of executables that pass the requirements necessary to have a wrapper created for them. """ if len(self.needs) > 0: return for execname, executable in list(self.module.executables.items()): skip = False #At the moment we can't handle executables that use special derived types. if not execname in self.module.publics or not executable.primitive: msg.info("Skipping {}.{} because it is not public.".format(self.module.name, execname)) skip = True #Check that all the parameters have intent specified, otherwise we won't handle them well.ha if any([p.direction == "" for p in executable.ordered_parameters]): msg.warn("Some parameters in {}.{} have no intent".format(self.module.name, execname) + " specified. Can't wrap that executable.") skip = True if not skip: self.uses.append(execname) for depmod in executable.search_dependencies(): if depmod not in self.needs: self.needs.append(depmod)
Finds the list of executables that pass the requirements necessary to have a wrapper created for them.
def _setup_chassis(self): """ Sets up the router with the corresponding chassis (create slots and insert default adapters). """ self._create_slots(2) self._slots[0] = self.integrated_adapters[self._chassis]()
Sets up the router with the corresponding chassis (create slots and insert default adapters).
async def get_chat(self): """ Returns `chat`, but will make an API call to find the chat unless it's already cached. """ # See `get_sender` for information about 'min'. if (self._chat is None or getattr(self._chat, 'min', None))\ and await self.get_input_chat(): try: self._chat =\ await self._client.get_entity(self._input_chat) except ValueError: await self._refetch_chat() return self._chat
Returns `chat`, but will make an API call to find the chat unless it's already cached.
def unindent(self): """ Performs an un-indentation """ if self.tab_always_indent: cursor = self.editor.textCursor() if not cursor.hasSelection(): cursor.select(cursor.LineUnderCursor) self.unindent_selection(cursor) else: super(PyIndenterMode, self).unindent()
Performs an un-indentation
def addContainer(self, query): """ Creates a new query container widget object and slides it into the frame. :return <XOrbQueryContainer> """ self.setUpdatesEnabled(False) self.blockSignals(True) container = XOrbQueryContainer(self) # setup properties container.setShowBack(self.count() > 0) # create connections container.enterCompoundRequested.connect(self.enterContainer) container.exitCompoundRequested.connect(self.exitContainer) # show the widget self.addWidget(container) self.setUpdatesEnabled(True) self.blockSignals(False) container.setQuery(query) self.slideInNext() return container
Creates a new query container widget object and slides it into the frame. :return <XOrbQueryContainer>
def dlopen(ffi, *names): """Try various names for the same library, for different platforms.""" for name in names: for lib_name in (name, 'lib' + name): try: path = ctypes.util.find_library(lib_name) lib = ffi.dlopen(path or lib_name) if lib: return lib except OSError: pass raise OSError("dlopen() failed to load a library: %s" % ' / '.join(names))
Try various names for the same library, for different platforms.
def autocommit(data_access): """Make statements autocommit. :param data_access: a DataAccess instance """ if not data_access.autocommit: data_access.commit() old_autocommit = data_access.autocommit data_access.autocommit = True try: yield data_access finally: data_access.autocommit = old_autocommit
Make statements autocommit. :param data_access: a DataAccess instance
def match_replace(cls, ops, kwargs): """Match and replace a full operand specification to a function that provides a replacement for the whole expression or raises a :exc:`.CannotSimplify` exception. E.g. First define an operation:: >>> class Invert(Operation): ... _rules = OrderedDict() ... simplifications = [match_replace, ] Then some _rules:: >>> A = wc("A") >>> A_float = wc("A", head=float) >>> Invert_A = pattern(Invert, A) >>> Invert._rules.update([ ... ('r1', (pattern_head(Invert_A), lambda A: A)), ... ('r2', (pattern_head(A_float), lambda A: 1./A)), ... ]) Check rule application:: >>> print(srepr(Invert.create("hallo"))) # matches no rule Invert('hallo') >>> Invert.create(Invert("hallo")) # matches first rule 'hallo' >>> Invert.create(.2) # matches second rule 5.0 A pattern can also have the same wildcard appear twice:: >>> class X(Operation): ... _rules = { ... 'r1': (pattern_head(A, A), lambda A: A), ... } ... simplifications = [match_replace, ] >>> X.create(1,2) X(1, 2) >>> X.create(1,1) 1 """ expr = ProtoExpr(ops, kwargs) if LOG: logger = logging.getLogger('QNET.create') for key, rule in cls._rules.items(): pat, replacement = rule match_dict = match_pattern(pat, expr) if match_dict: try: replaced = replacement(**match_dict) if LOG: logger.debug( "%sRule %s.%s: (%s, %s) -> %s", (" " * (LEVEL)), cls.__name__, key, expr.args, expr.kwargs, replaced) return replaced except CannotSimplify: if LOG_NO_MATCH: logger.debug( "%sRule %s.%s: no match: CannotSimplify", (" " * (LEVEL)), cls.__name__, key) continue else: if LOG_NO_MATCH: logger.debug( "%sRule %s.%s: no match: %s", (" " * (LEVEL)), cls.__name__, key, match_dict.reason) # No matching rules return ops, kwargs
Match and replace a full operand specification to a function that provides a replacement for the whole expression or raises a :exc:`.CannotSimplify` exception. E.g. First define an operation:: >>> class Invert(Operation): ... _rules = OrderedDict() ... simplifications = [match_replace, ] Then some _rules:: >>> A = wc("A") >>> A_float = wc("A", head=float) >>> Invert_A = pattern(Invert, A) >>> Invert._rules.update([ ... ('r1', (pattern_head(Invert_A), lambda A: A)), ... ('r2', (pattern_head(A_float), lambda A: 1./A)), ... ]) Check rule application:: >>> print(srepr(Invert.create("hallo"))) # matches no rule Invert('hallo') >>> Invert.create(Invert("hallo")) # matches first rule 'hallo' >>> Invert.create(.2) # matches second rule 5.0 A pattern can also have the same wildcard appear twice:: >>> class X(Operation): ... _rules = { ... 'r1': (pattern_head(A, A), lambda A: A), ... } ... simplifications = [match_replace, ] >>> X.create(1,2) X(1, 2) >>> X.create(1,1) 1
def handle_modifier_up(self, modifier): """ Updates the state of the given modifier key to 'released'. """ _logger.debug("%s released", modifier) # Caps and num lock are handled on key down only if modifier not in (Key.CAPSLOCK, Key.NUMLOCK): self.modifiers[modifier] = False
Updates the state of the given modifier key to 'released'.
def estimate(self, X, **params): """ Parameters ---------- X : tuple of (ttrajs, dtrajs, btrajs) Simulation trajectories. ttrajs contain the indices of the thermodynamic state, dtrajs contains the indices of the configurational states and btrajs contain the biases. ttrajs : list of numpy.ndarray(X_i, dtype=int) Every element is a trajectory (time series). ttrajs[i][t] is the index of the thermodynamic state visited in trajectory i at time step t. dtrajs : list of numpy.ndarray(X_i, dtype=int) dtrajs[i][t] is the index of the configurational state (Markov state) visited in trajectory i at time step t. btrajs : list of numpy.ndarray((X_i, T), dtype=numpy.float64) For every simulation frame seen in trajectory i and time step t, btrajs[i][t,k] is the bias energy of that frame evaluated in the k'th thermodynamic state (i.e. at the k'th Umbrella/Hamiltonian/temperature). """ return super(TRAM, self).estimate(X, **params)
Parameters ---------- X : tuple of (ttrajs, dtrajs, btrajs) Simulation trajectories. ttrajs contain the indices of the thermodynamic state, dtrajs contains the indices of the configurational states and btrajs contain the biases. ttrajs : list of numpy.ndarray(X_i, dtype=int) Every element is a trajectory (time series). ttrajs[i][t] is the index of the thermodynamic state visited in trajectory i at time step t. dtrajs : list of numpy.ndarray(X_i, dtype=int) dtrajs[i][t] is the index of the configurational state (Markov state) visited in trajectory i at time step t. btrajs : list of numpy.ndarray((X_i, T), dtype=numpy.float64) For every simulation frame seen in trajectory i and time step t, btrajs[i][t,k] is the bias energy of that frame evaluated in the k'th thermodynamic state (i.e. at the k'th Umbrella/Hamiltonian/temperature).
def edge_tuple(self, vertex0_id, vertex1_id): """To avoid duplicate edges where the vertex ids are reversed, we maintain that the vertex ids are ordered so that the corresponding pathway names are alphabetical. Parameters ----------- vertex0_id : int one vertex in the edge vertex1_id : int the other vertex in the edge Returns ----------- tup(int, int)|None, the edge id or None if the vertices do not exist in the network or they map to the same pathway (there should not be any self-loops in the network) """ pw0 = self.__getitem__(vertex0_id) pw1 = self.__getitem__(vertex1_id) if not pw0 or not pw1: return None if pw0 < pw1: return (vertex0_id, vertex1_id) elif pw0 > pw1: return (vertex1_id, vertex0_id) else: return None
To avoid duplicate edges where the vertex ids are reversed, we maintain that the vertex ids are ordered so that the corresponding pathway names are alphabetical. Parameters ----------- vertex0_id : int one vertex in the edge vertex1_id : int the other vertex in the edge Returns ----------- tup(int, int)|None, the edge id or None if the vertices do not exist in the network or they map to the same pathway (there should not be any self-loops in the network)
def add(self, key, value): """Add an entry to a list preference Add `value` to the list of entries for the `key` preference. """ if not key in self.prefs: self.prefs[key] = [] self.prefs[key].append(value)
Add an entry to a list preference Add `value` to the list of entries for the `key` preference.
def interpret(self, config_dict): """ Converts the config_parser output into the proper type, supplies defaults if available and needed, and checks for some errors. """ value = config_dict.get(self.name) if value is None: if self.default is None: raise RuntimeError('Missing configuration item: ' + self.name) else: warnings.warn("Using default {!r} for '{!s}'".format(self.default, self.name), DeprecationWarning) if (str != self.value_type) and isinstance(self.default, self.value_type): return self.default else: value = self.default try: if str == self.value_type: return str(value) if int == self.value_type: return int(value) if bool == self.value_type: if value.lower() == "true": return True elif value.lower() == "false": return False else: raise RuntimeError(self.name + " must be True or False") if float == self.value_type: return float(value) if list == self.value_type: return value.split(" ") except Exception: raise RuntimeError("Error interpreting config item '{}' with value {!r} and type {}".format( self.name, value, self.value_type)) raise RuntimeError("Unexpected configuration type: " + repr(self.value_type))
Converts the config_parser output into the proper type, supplies defaults if available and needed, and checks for some errors.
def all(user, groupby='week', summary='default', network=False, split_week=False, split_day=False, filter_empty=True, attributes=True, flatten=False): """ Returns a dictionary containing all bandicoot indicators for the user, as well as reporting variables. Relevant indicators are defined in the 'individual', and 'spatial' modules. =================================== ======================================================================= Reporting variables Description =================================== ======================================================================= antennas_path path of the CSV file containing antennas locations attributes_path directory where attributes were loaded version bandicoot version groupby grouping method ('week' or None) split_week whether or not indicators are also computed for weekday and weekend split_day whether or not indicators are also computed for day and night start_time time of the first record end_time time of the last record night_start, night_end start and end time to define nights weekend days used to define the weekend (``[6, 7]`` by default, where 1 is Monday) bins number of weeks if the record are grouped has_call whether or not records include calls has_text whether or not records include texts has_home whether or not a :meth:`home location <bandicoot.core.User.recompute_home>` has been found has_network whether or not correspondents where loaded percent_records_missing_location percentage of records without location antennas_missing_locations number of antennas missing a location percent_outofnetwork_calls percentage of calls, received or emitted, made with a correspondant not loaded in the network percent_outofnetwork_texts percentage of texts with contacts not loaded in the network percent_outofnetwork_contacts percentage of contacts not loaded in the network percent_outofnetwork_call_durations percentage of minutes of calls where the contact was not loaded in the network number_of_records total number of records number_of_weeks number of weeks with records =================================== ======================================================================= We also include a last set of reporting variables, for the records ignored at load-time. Values can be ignored due to missing or inconsistent fields (e.g., not including a valid 'datetime' value). .. code-block:: python { 'all': 0, 'interaction': 0, 'direction': 0, 'correspondent_id': 0, 'datetime': 0, 'call_duration': 0 } with the total number of records ignored (key ``'all'``), as well as the number of records with faulty values for each columns. """ scalar_type = 'distribution_scalar' if groupby is not None else 'scalar' summary_type = 'distribution_summarystats' if groupby is not None else 'summarystats' number_of_interactions_in = partial(bc.individual.number_of_interactions, direction='in') number_of_interactions_in.__name__ = 'number_of_interaction_in' number_of_interactions_out = partial(bc.individual.number_of_interactions, direction='out') number_of_interactions_out.__name__ = 'number_of_interaction_out' functions = [ (bc.individual.active_days, scalar_type), (bc.individual.number_of_contacts, scalar_type), (bc.individual.call_duration, summary_type), (bc.individual.percent_nocturnal, scalar_type), (bc.individual.percent_initiated_conversations, scalar_type), (bc.individual.percent_initiated_interactions, scalar_type), (bc.individual.response_delay_text, summary_type), (bc.individual.response_rate_text, scalar_type), (bc.individual.entropy_of_contacts, scalar_type), (bc.individual.balance_of_contacts, summary_type), (bc.individual.interactions_per_contact, summary_type), (bc.individual.interevent_time, summary_type), (bc.individual.percent_pareto_interactions, scalar_type), (bc.individual.percent_pareto_durations, scalar_type), (bc.individual.number_of_interactions, scalar_type), (number_of_interactions_in, scalar_type), (number_of_interactions_out, scalar_type), (bc.spatial.number_of_antennas, scalar_type), (bc.spatial.entropy_of_antennas, scalar_type), (bc.spatial.percent_at_home, scalar_type), (bc.spatial.radius_of_gyration, scalar_type), (bc.spatial.frequent_antennas, scalar_type), (bc.spatial.churn_rate, scalar_type) ] if user.has_recharges: functions += [ (bc.recharge.amount_recharges, summary_type), (bc.recharge.interevent_time_recharges, summary_type), (bc.recharge.percent_pareto_recharges, scalar_type), (bc.recharge.number_of_recharges, scalar_type), (bc.recharge.average_balance_recharges, scalar_type) ] network_functions = [ bc.network.clustering_coefficient_unweighted, bc.network.clustering_coefficient_weighted, bc.network.assortativity_attributes, bc.network.assortativity_indicators ] groups = list(group_records(user.records, groupby=groupby)) bins_with_data = len(groups) groups = list(group_records_with_padding(user.records, groupby=groupby)) bins = len(groups) bins_without_data = bins - bins_with_data reporting = OrderedDict([ ('antennas_path', user.antennas_path), ('attributes_path', user.attributes_path), ('recharges_path', user.attributes_path), ('version', bc.__version__), ('code_signature', bc.helper.tools.bandicoot_code_signature()), ('groupby', groupby), ('split_week', split_week), ('split_day', split_day), ('start_time', user.start_time and str(user.start_time)), ('end_time', user.end_time and str(user.end_time)), ('night_start', str(user.night_start)), ('night_end', str(user.night_end)), ('weekend', user.weekend), ('number_of_records', len(user.records)), ('number_of_antennas', len(user.antennas)), ('number_of_recharges', len(user.recharges)), ('bins', bins), ('bins_with_data', bins_with_data), ('bins_without_data', bins_without_data), ('has_call', user.has_call), ('has_text', user.has_text), ('has_home', user.has_home), ('has_recharges', user.has_recharges), ('has_attributes', user.has_attributes), ('has_network', user.has_network), ('percent_records_missing_location', bc.helper.tools.percent_records_missing_location(user)), ('antennas_missing_locations', bc.helper.tools.antennas_missing_locations(user)), ('percent_outofnetwork_calls', user.percent_outofnetwork_calls), ('percent_outofnetwork_texts', user.percent_outofnetwork_texts), ('percent_outofnetwork_contacts', user.percent_outofnetwork_contacts), ('percent_outofnetwork_call_durations', user.percent_outofnetwork_call_durations), ]) if user.ignored_records is not None: reporting['ignored_records'] = OrderedDict(user.ignored_records) returned = OrderedDict([ ('name', user.name), ('reporting', reporting) ]) for fun, datatype in functions: try: metric = fun(user, groupby=groupby, summary=summary, datatype=datatype, filter_empty=filter_empty, split_week=split_week, split_day=split_day) except ValueError: metric = fun(user, groupby=groupby, datatype=datatype, split_week=split_week, filter_empty=filter_empty, split_day=split_day) returned[fun.__name__] = metric if network and user.has_network: for fun in network_functions: returned[fun.__name__] = fun(user) if attributes and user.attributes != {}: returned['attributes'] = OrderedDict(user.attributes) if flatten is True: return globals()['flatten'](returned) return returned
Returns a dictionary containing all bandicoot indicators for the user, as well as reporting variables. Relevant indicators are defined in the 'individual', and 'spatial' modules. =================================== ======================================================================= Reporting variables Description =================================== ======================================================================= antennas_path path of the CSV file containing antennas locations attributes_path directory where attributes were loaded version bandicoot version groupby grouping method ('week' or None) split_week whether or not indicators are also computed for weekday and weekend split_day whether or not indicators are also computed for day and night start_time time of the first record end_time time of the last record night_start, night_end start and end time to define nights weekend days used to define the weekend (``[6, 7]`` by default, where 1 is Monday) bins number of weeks if the record are grouped has_call whether or not records include calls has_text whether or not records include texts has_home whether or not a :meth:`home location <bandicoot.core.User.recompute_home>` has been found has_network whether or not correspondents where loaded percent_records_missing_location percentage of records without location antennas_missing_locations number of antennas missing a location percent_outofnetwork_calls percentage of calls, received or emitted, made with a correspondant not loaded in the network percent_outofnetwork_texts percentage of texts with contacts not loaded in the network percent_outofnetwork_contacts percentage of contacts not loaded in the network percent_outofnetwork_call_durations percentage of minutes of calls where the contact was not loaded in the network number_of_records total number of records number_of_weeks number of weeks with records =================================== ======================================================================= We also include a last set of reporting variables, for the records ignored at load-time. Values can be ignored due to missing or inconsistent fields (e.g., not including a valid 'datetime' value). .. code-block:: python { 'all': 0, 'interaction': 0, 'direction': 0, 'correspondent_id': 0, 'datetime': 0, 'call_duration': 0 } with the total number of records ignored (key ``'all'``), as well as the number of records with faulty values for each columns.
def comments(self): """ Iterator of :class:`stravalib.model.ActivityComment` objects for this activity. """ if self._comments is None: self.assert_bind_client() if self.comment_count > 0: self._comments = self.bind_client.get_activity_comments(self.id) else: # Shortcut if we know there aren't any self._comments = [] return self._comments
Iterator of :class:`stravalib.model.ActivityComment` objects for this activity.
def fftlog(fEM, time, freq, ftarg): r"""Fourier Transform using FFTLog. FFTLog is the logarithmic analogue to the Fast Fourier Transform FFT. FFTLog was presented in Appendix B of [Hami00]_ and published at <http://casa.colorado.edu/~ajsh/FFTLog>. This function uses a simplified version of ``pyfftlog``, which is a python-version of ``FFTLog``. For more details regarding ``pyfftlog`` see <https://github.com/prisae/pyfftlog>. Not the full flexibility of ``FFTLog`` is available here: Only the logarithmic FFT (``fftl`` in ``FFTLog``), not the Hankel transform (``fht`` in ``FFTLog``). Furthermore, the following parameters are fixed: - ``kr`` = 1 (initial value) - ``kropt`` = 1 (silently adjusts ``kr``) - ``dir`` = 1 (forward) Furthermore, ``q`` is restricted to -1 <= q <= 1. The function is called from one of the modelling routines in :mod:`model`. Consult these modelling routines for a description of the input and output parameters. Returns ------- tEM : array Returns time-domain EM response of ``fEM`` for given ``time``. conv : bool Only relevant for QWE/QUAD. """ # Get tcalc, dlnr, kr, rk, q; a and n _, _, q, mu, tcalc, dlnr, kr, rk = ftarg if mu > 0: # Sine a = -fEM.imag else: # Cosine a = fEM.real n = a.size # 1. Amplitude and Argument of kr^(-2 i y) U_mu(q + 2 i y) ln2kr = np.log(2.0/kr) d = np.pi/(n*dlnr) m = np.arange(1, (n+1)/2) y = m*d # y = m*pi/(n*dlnr) if q == 0: # unbiased case (q = 0) zp = special.loggamma((mu + 1)/2.0 + 1j*y) arg = 2.0*(ln2kr*y + zp.imag) else: # biased case (q != 0) xp = (mu + 1.0 + q)/2.0 xm = (mu + 1.0 - q)/2.0 zp = special.loggamma(xp + 0j) zm = special.loggamma(xm + 0j) # Amplitude and Argument of U_mu(q) amp = np.exp(np.log(2.0)*q + zp.real - zm.real) # note +Im(zm) to get conjugate value below real axis arg = zp.imag + zm.imag # first element: cos(arg) = ±1, sin(arg) = 0 argcos1 = amp*np.cos(arg) # remaining elements zp = special.loggamma(xp + 1j*y) zm = special.loggamma(xm + 1j*y) argamp = np.exp(np.log(2.0)*q + zp.real - zm.real) arg = 2*ln2kr*y + zp.imag + zm.imag argcos = np.cos(arg) argsin = np.sin(arg) # 2. Centre point of array jc = np.array((n + 1)/2.0) j = np.arange(n)+1 # 3. a(r) = A(r) (r/rc)^[-dir*(q-.5)] a *= np.exp(-(q - 0.5)*(j - jc)*dlnr) # 4. transform a(r) -> ã(k) # 4.a normal FFT a = fftpack.rfft(a) # 4.b m = np.arange(1, n/2, dtype=int) # index variable if q == 0: # unbiased (q = 0) transform # multiply by (kr)^[- i 2 m pi/(n dlnr)] U_mu[i 2 m pi/(n dlnr)] ar = a[2*m-1] ai = a[2*m] a[2*m-1] = ar*argcos[:-1] - ai*argsin[:-1] a[2*m] = ar*argsin[:-1] + ai*argcos[:-1] # problematical last element, for even n if np.mod(n, 2) == 0: ar = argcos[-1] a[-1] *= ar else: # biased (q != 0) transform # multiply by (kr)^[- i 2 m pi/(n dlnr)] U_mu[q + i 2 m pi/(n dlnr)] # phase ar = a[2*m-1] ai = a[2*m] a[2*m-1] = ar*argcos[:-1] - ai*argsin[:-1] a[2*m] = ar*argsin[:-1] + ai*argcos[:-1] a[0] *= argcos1 a[2*m-1] *= argamp[:-1] a[2*m] *= argamp[:-1] # problematical last element, for even n if np.mod(n, 2) == 0: m = int(n/2)-3 ar = argcos[m-1]*argamp[m-1] a[-1] *= ar # 4.c normal FFT back a = fftpack.irfft(a) # Ã(k) = ã(k) k^[-dir*(q+.5)] rc^[-dir*(q-.5)] # = ã(k) (k/kc)^[-dir*(q+.5)] (kc rc)^(-dir*q) (rc/kc)^(dir*.5) a = a[::-1]*np.exp(-((q + 0.5)*(j - jc)*dlnr + q*np.log(kr) - np.log(rk)/2.0)) # Interpolate for the desired times ttEM = iuSpline(np.log(tcalc), a) tEM = ttEM(np.log(time)) # (Second argument is only for QWE) return tEM, True
r"""Fourier Transform using FFTLog. FFTLog is the logarithmic analogue to the Fast Fourier Transform FFT. FFTLog was presented in Appendix B of [Hami00]_ and published at <http://casa.colorado.edu/~ajsh/FFTLog>. This function uses a simplified version of ``pyfftlog``, which is a python-version of ``FFTLog``. For more details regarding ``pyfftlog`` see <https://github.com/prisae/pyfftlog>. Not the full flexibility of ``FFTLog`` is available here: Only the logarithmic FFT (``fftl`` in ``FFTLog``), not the Hankel transform (``fht`` in ``FFTLog``). Furthermore, the following parameters are fixed: - ``kr`` = 1 (initial value) - ``kropt`` = 1 (silently adjusts ``kr``) - ``dir`` = 1 (forward) Furthermore, ``q`` is restricted to -1 <= q <= 1. The function is called from one of the modelling routines in :mod:`model`. Consult these modelling routines for a description of the input and output parameters. Returns ------- tEM : array Returns time-domain EM response of ``fEM`` for given ``time``. conv : bool Only relevant for QWE/QUAD.
def update_service(self, stack, service, args): """更新服务 更新指定名称服务的配置如容器镜像等参数,容器被重新部署后生效。 如果指定manualUpdate参数,则需要额外调用 部署服务 接口并指定参数进行部署;处于人工升级模式的服务禁止执行其他修改操作。 如果不指定manualUpdate参数,平台会自动完成部署。 Args: - stack: 服务所属的服务组名称 - service: 服务名 - args: 服务具体描述请求参数(json),参考 http://kirk-docs.qiniu.com/apidocs/ Returns: 返回一个tuple对象,其格式为(<result>, <ResponseInfo>) - result 成功返回空dict{},失败返回{"error": "<errMsg string>"} - ResponseInfo 请求的Response信息 """ url = '{0}/v3/stacks/{1}/services/{2}'.format(self.host, stack, service) return self.__post(url, args)
更新服务 更新指定名称服务的配置如容器镜像等参数,容器被重新部署后生效。 如果指定manualUpdate参数,则需要额外调用 部署服务 接口并指定参数进行部署;处于人工升级模式的服务禁止执行其他修改操作。 如果不指定manualUpdate参数,平台会自动完成部署。 Args: - stack: 服务所属的服务组名称 - service: 服务名 - args: 服务具体描述请求参数(json),参考 http://kirk-docs.qiniu.com/apidocs/ Returns: 返回一个tuple对象,其格式为(<result>, <ResponseInfo>) - result 成功返回空dict{},失败返回{"error": "<errMsg string>"} - ResponseInfo 请求的Response信息
def insert_instance(self, block): """Insert a fetched instance into embed block.""" embed_type = block.get('type', None) data = block.get('data', {}) serializer = self.serializers.get(embed_type, None) if serializer is None: return block try: instance_id = serializer.get_id(data) instance = self.instances[embed_type][instance_id] data[embed_type] = serializer.serialize(instance) except: data[embed_type] = None block['data'] = data return block
Insert a fetched instance into embed block.
def radialvelocity(self, rf='', v0='0m/s', off=None): """Defines a radialvelocity measure. It has to specify a reference code, radialvelocity quantity value (see introduction for the action on a scalar quantity with either a vector or scalar value, and when a vector of quantities is given), and optionally it can specify an offset, which in itself has to be a radialvelocity. :param rf: reference code string; Allowable reference codes are: *LSRK LSRD BARY GEO TOPO GALACTO* Note that additional ones may become available. Check with:: dm.list_codes(dm.radialvelocity()) :param v0: longitude or x as quantity or string :param off: an optional offset measure of same type """ loc = {'type': "radialvelocity", 'refer': rf, 'm0': dq.quantity(v0)} if is_measure(off): if not off['type'] == "radialvelocity": raise TypeError('Illegal offset type specified.') loc["offset"] = off return self.measure(loc, rf)
Defines a radialvelocity measure. It has to specify a reference code, radialvelocity quantity value (see introduction for the action on a scalar quantity with either a vector or scalar value, and when a vector of quantities is given), and optionally it can specify an offset, which in itself has to be a radialvelocity. :param rf: reference code string; Allowable reference codes are: *LSRK LSRD BARY GEO TOPO GALACTO* Note that additional ones may become available. Check with:: dm.list_codes(dm.radialvelocity()) :param v0: longitude or x as quantity or string :param off: an optional offset measure of same type
def save(self): """Save this object to the database. Behaves very similarly to whatever collection.save(document) would, ie. does upserts on _id presence. If methods ``pre_save`` or ``post_save`` are defined, those are called. If there is a spec document, then the document is validated against it after the ``pre_save`` hook but before the save.""" if hasattr(self, 'pre_save'): self.pre_save() database, collection = self._collection_key.split('.') self.validate() _id = current()[database][collection].save(dict(self)) if _id: self._id = _id if hasattr(self, 'post_save'): self.post_save()
Save this object to the database. Behaves very similarly to whatever collection.save(document) would, ie. does upserts on _id presence. If methods ``pre_save`` or ``post_save`` are defined, those are called. If there is a spec document, then the document is validated against it after the ``pre_save`` hook but before the save.
def _construct_version(self, function, intrinsics_resolver): """Constructs a Lambda Version resource that will be auto-published when CodeUri of the function changes. Old versions will not be deleted without a direct reference from the CloudFormation template. :param model.lambda_.LambdaFunction function: Lambda function object that is being connected to a version :param model.intrinsics.resolver.IntrinsicsResolver intrinsics_resolver: Class that can help resolve references to parameters present in CodeUri. It is a common usecase to set S3Key of Code to be a template parameter. Need to resolve the values otherwise we will never detect a change in Code dict :return: Lambda function Version resource """ code_dict = function.Code if not code_dict: raise ValueError("Lambda function code must be a valid non-empty dictionary") if not intrinsics_resolver: raise ValueError("intrinsics_resolver is required for versions creation") # Resolve references to template parameters before creating hash. This will *not* resolve all intrinsics # because we cannot resolve runtime values like Arn of a resource. For purposes of detecting changes, this # is good enough. Here is why: # # When using intrinsic functions there are two cases when has must change: # - Value of the template parameter changes # - (or) LogicalId of a referenced resource changes ie. !GetAtt NewResource.Arn # # Later case will already change the hash because some value in the Code dictionary changes. We handle the # first case by resolving references to template parameters. It is okay even if these references are # present inside another intrinsic such as !Join. The resolver will replace the reference with the parameter's # value and keep all other parts of !Join identical. This will still trigger a change in the hash. code_dict = intrinsics_resolver.resolve_parameter_refs(code_dict) # Construct the LogicalID of Lambda version by appending 10 characters of SHA of CodeUri. This is necessary # to trigger creation of a new version every time code location changes. Since logicalId changes, CloudFormation # will drop the old version and create a new one for us. We set a DeletionPolicy on the version resource to # prevent CloudFormation from actually deleting the underlying version resource # # SHA Collisions: For purposes of triggering a new update, we are concerned about just the difference previous # and next hashes. The chances that two subsequent hashes collide is fairly low. prefix = "{id}Version".format(id=self.logical_id) logical_id = logical_id_generator.LogicalIdGenerator(prefix, code_dict).gen() attributes = self.get_passthrough_resource_attributes() if attributes is None: attributes = {} attributes["DeletionPolicy"] = "Retain" lambda_version = LambdaVersion(logical_id=logical_id, attributes=attributes) lambda_version.FunctionName = function.get_runtime_attr('name') lambda_version.Description = self.VersionDescription return lambda_version
Constructs a Lambda Version resource that will be auto-published when CodeUri of the function changes. Old versions will not be deleted without a direct reference from the CloudFormation template. :param model.lambda_.LambdaFunction function: Lambda function object that is being connected to a version :param model.intrinsics.resolver.IntrinsicsResolver intrinsics_resolver: Class that can help resolve references to parameters present in CodeUri. It is a common usecase to set S3Key of Code to be a template parameter. Need to resolve the values otherwise we will never detect a change in Code dict :return: Lambda function Version resource
def parse_prototype(prototype): '''Returns a :attr:`FunctionSpec` instance from the input. ''' val = ' '.join(prototype.splitlines()) f = match(func_pat, val) # match the whole function if f is None: raise Exception('Cannot parse function prototype "{}"'.format(val)) ftp, pointer, name, arg = [v.strip() for v in f.groups()] args = [] if arg.strip(): # split each arg into type, zero or more *, and name for item in split(arg_split_pat, arg): m = match(variable_pat, item.strip()) if m is None: raise Exception('Cannot parse function prototype "{}"'.format(val)) tp, star, nm, count = [v.strip() if v else '' for v in m.groups()] args.append(VariableSpec(tp, star, nm, count)) return FunctionSpec('FLYCAPTURE2_C_API', ftp, pointer, name, args)
Returns a :attr:`FunctionSpec` instance from the input.
def get_config(): """ Reads the music download filepath from scdl.cfg """ global token config = configparser.ConfigParser() config.read(os.path.join(os.path.expanduser('~'), '.config/scdl/scdl.cfg')) try: token = config['scdl']['auth_token'] path = config['scdl']['path'] except: logger.error('Are you sure scdl.cfg is in $HOME/.config/scdl/ ?') logger.error('Are both "auth_token" and "path" defined there?') sys.exit() if os.path.exists(path): os.chdir(path) else: logger.error('Invalid path in scdl.cfg...') sys.exit()
Reads the music download filepath from scdl.cfg
def validate_lang(ctx, param, lang): """Validation callback for the <lang> option. Ensures <lang> is a supported language unless the <nocheck> flag is set """ if ctx.params['nocheck']: return lang try: if lang not in tts_langs(): raise click.UsageError( "'%s' not in list of supported languages.\n" "Use --all to list languages or " "add --nocheck to disable language check." % lang) else: # The language is valid. # No need to let gTTS re-validate. ctx.params['nocheck'] = True except RuntimeError as e: # Only case where the <nocheck> flag can be False # Non-fatal. gTTS will try to re-validate. log.debug(str(e), exc_info=True) return lang
Validation callback for the <lang> option. Ensures <lang> is a supported language unless the <nocheck> flag is set
def compute(cls, observation, prediction): """Compute a z-score from an observation and a prediction.""" assert isinstance(observation, dict) try: p_value = prediction['mean'] # Use the prediction's mean. except (TypeError, KeyError, IndexError): # If there isn't one... try: p_value = prediction['value'] # Use the prediction's value. except (TypeError, IndexError): # If there isn't one... p_value = prediction # Use the prediction (assume numeric). o_mean = observation['mean'] o_std = observation['std'] value = (p_value - o_mean)/o_std value = utils.assert_dimensionless(value) if np.isnan(value): score = InsufficientDataScore('One of the input values was NaN') else: score = ZScore(value) return score
Compute a z-score from an observation and a prediction.
def parse_raml(self): """ Parse RAML file """ if utils.is_url(self.ramlfile): raml = utils.download_file(self.ramlfile) else: with codecs.open(self.ramlfile, "rb", encoding="utf-8") as raml_f: raml = raml_f.read() loader = ramlfications.loads(raml) config = ramlfications.setup_config(self.ramlconfig) self.raml = ramlfications.parse_raml(loader, config)
Parse RAML file
def main(args): of = sys.stdout if args.output and args.output[-4:] == '.bam': cmd = 'samtools view -Sb - -o '+args.output pof = Popen(cmd.split(),stdin=PIPE) of = pof.stdin elif args.output: of = open(args.output,'w') """Use the valid input file to get the header information.""" header = None if args.HQ: cmd = 'samtools view -H '+args.HQ sys.stderr.write(cmd+"\n") header = Popen(cmd.split(),stdout=PIPE).communicate()[0] of.write(header) if (not header) and args.HQCorrected: cmd = 'samtools view -H '+args.HQCorrected sys.stderr.write(cmd+"\n") header = Popen(cmd.split(),stdout=PIPE).communicate()[0] of.write(header) if (not header) and args.AQ: cmd = 'samtools view -H '+args.AQ sys.stderr.write(cmd+"\n") header = Popen(cmd.split(),stdout=PIPE).communicate()[0] of.write(header) if (not header) and args.AQCorrected: cmd = 'samtools view -H '+args.AQCorrected sys.stderr.write(cmd+"\n") header = Popen(cmd.split(),stdout=PIPE).communicate()[0] of.write(header) if (not header) and args.subreads: cmd = 'samtools view -H '+args.subreads sys.stderr.write(cmd+"\n") header = Popen(cmd.split(),stdout=PIPE).communicate()[0] of.write(header) if (not header) and args.subreadsCorrected: cmd = 'samtools view -H '+args.subreadsCorrected sys.stderr.write(cmd+"\n") header = Popen(cmd.split(),stdout=PIPE).communicate()[0] of.write(header) _nameprog = re.compile('^(\S+)') negative_filter = set() # remove these """ Next read throught he alignments THAT ALIGNED in order of priority""" negative_filter = get_best_set(negative_filter,'-F 4',of,args,True) """After traversing all the aligned reads we can do it all over again this time with the unaligned portion of reads""" """ Finally go through the reads that did NOT ALIGN to get anything left""" get_best_set(negative_filter,'-f 4',of,args,False) if args.output and args.output[-4:] == '.bam': pof.communicate() else: of.close()
Use the valid input file to get the header information.
def process_into(self, node, obj): """ Process a BeautifulSoup node and fill its elements into a pyth base object. """ if isinstance(node, BeautifulSoup.NavigableString): text = self.process_text(node) if text: obj.append(text) return if node.name == 'p': # add a new paragraph into the pyth object new_obj = document.Paragraph() obj.append(new_obj) obj = new_obj elif node.name == 'ul': # add a new list new_obj = document.List() obj.append(new_obj) obj = new_obj elif node.name == 'li': # add a new list entry new_obj = document.ListEntry() obj.append(new_obj) obj = new_obj for child in node: self.process_into(child, obj)
Process a BeautifulSoup node and fill its elements into a pyth base object.
def dump_image_data(dataset_dir, data_dir, dataset, color_array_info, root=None, compress=True): """Dump image data object to vtkjs""" if root is None: root = {} root['vtkClass'] = 'vtkImageData' container = root container['spacing'] = dataset.GetSpacing() container['origin'] = dataset.GetOrigin() container['extent'] = dataset.GetExtent() dump_all_arrays(dataset_dir, data_dir, dataset, container, compress) return root
Dump image data object to vtkjs
def afterglow(self, src=None, event=None, dst=None, **kargs): """Experimental clone attempt of http://sourceforge.net/projects/afterglow each datum is reduced as src -> event -> dst and the data are graphed. by default we have IP.src -> IP.dport -> IP.dst""" if src is None: src = lambda x: x['IP'].src if event is None: event = lambda x: x['IP'].dport if dst is None: dst = lambda x: x['IP'].dst sl = {} el = {} dl = {} for i in self.res: try: s, e, d = src(i), event(i), dst(i) if s in sl: n, lst = sl[s] n += 1 if e not in lst: lst.append(e) sl[s] = (n, lst) else: sl[s] = (1, [e]) if e in el: n, lst = el[e] n += 1 if d not in lst: lst.append(d) el[e] = (n, lst) else: el[e] = (1, [d]) dl[d] = dl.get(d, 0) + 1 except Exception: continue import math def normalize(n): return 2 + math.log(n) / 4.0 def minmax(x): m, M = reduce(lambda a, b: (min(a[0], b[0]), max(a[1], b[1])), ((a, a) for a in x)) if m == M: m = 0 if M == 0: M = 1 return m, M mins, maxs = minmax(x for x, _ in six.itervalues(sl)) mine, maxe = minmax(x for x, _ in six.itervalues(el)) mind, maxd = minmax(six.itervalues(dl)) gr = 'digraph "afterglow" {\n\tedge [len=2.5];\n' gr += "# src nodes\n" for s in sl: n, _ = sl[s] n = 1 + float(n - mins) / (maxs - mins) gr += '"src.%s" [label = "%s", shape=box, fillcolor="#FF0000", style=filled, fixedsize=1, height=%.2f,width=%.2f];\n' % (repr(s), repr(s), n, n) # noqa: E501 gr += "# event nodes\n" for e in el: n, _ = el[e] n = n = 1 + float(n - mine) / (maxe - mine) gr += '"evt.%s" [label = "%s", shape=circle, fillcolor="#00FFFF", style=filled, fixedsize=1, height=%.2f, width=%.2f];\n' % (repr(e), repr(e), n, n) # noqa: E501 for d in dl: n = dl[d] n = n = 1 + float(n - mind) / (maxd - mind) gr += '"dst.%s" [label = "%s", shape=triangle, fillcolor="#0000ff", style=filled, fixedsize=1, height=%.2f, width=%.2f];\n' % (repr(d), repr(d), n, n) # noqa: E501 gr += "###\n" for s in sl: n, lst = sl[s] for e in lst: gr += ' "src.%s" -> "evt.%s";\n' % (repr(s), repr(e)) for e in el: n, lst = el[e] for d in lst: gr += ' "evt.%s" -> "dst.%s";\n' % (repr(e), repr(d)) gr += "}" return do_graph(gr, **kargs)
Experimental clone attempt of http://sourceforge.net/projects/afterglow each datum is reduced as src -> event -> dst and the data are graphed. by default we have IP.src -> IP.dport -> IP.dst
def get_stp_mst_detail_output_cist_migrate_time(self, **kwargs): """Auto Generated Code """ config = ET.Element("config") get_stp_mst_detail = ET.Element("get_stp_mst_detail") config = get_stp_mst_detail output = ET.SubElement(get_stp_mst_detail, "output") cist = ET.SubElement(output, "cist") migrate_time = ET.SubElement(cist, "migrate-time") migrate_time.text = kwargs.pop('migrate_time') callback = kwargs.pop('callback', self._callback) return callback(config)
Auto Generated Code
def get_ccle_mutations(): """Get CCLE mutations returns the amino acid changes for a given list of genes and cell lines """ if request.method == 'OPTIONS': return {} response = request.body.read().decode('utf-8') body = json.loads(response) gene_list = body.get('gene_list') cell_lines = body.get('cell_lines') mutations = cbio_client.get_ccle_mutations(gene_list, cell_lines) res = {'mutations': mutations} return res
Get CCLE mutations returns the amino acid changes for a given list of genes and cell lines
def get_mean(self, col, row): """ Returns the mean at this location (if valid location). :param col: the 0-based column index :type col: int :param row: the 0-based row index :type row: int :return: the mean :rtype: float """ return javabridge.call(self.jobject, "getMean", "(II)D", col, row)
Returns the mean at this location (if valid location). :param col: the 0-based column index :type col: int :param row: the 0-based row index :type row: int :return: the mean :rtype: float
def gen_textfiles_from_filenames( filenames: Iterable[str]) -> Generator[TextIO, None, None]: """ Generates file-like objects from a list of filenames. Args: filenames: iterable of filenames Yields: each file as a :class:`TextIO` object """ for filename in filenames: with open(filename) as f: yield f
Generates file-like objects from a list of filenames. Args: filenames: iterable of filenames Yields: each file as a :class:`TextIO` object
def subdomain(self, index, value=None): """ Return a subdomain or set a new value and return a new :class:`URL` instance. :param integer index: 0-indexed subdomain :param string value: New subdomain """ if value is not None: subdomains = self.subdomains() subdomains[index] = value return URL._mutate(self, host='.'.join(subdomains)) return self.subdomains()[index]
Return a subdomain or set a new value and return a new :class:`URL` instance. :param integer index: 0-indexed subdomain :param string value: New subdomain
def find_stack_elements(self, module, module_name="", _visited_modules=None): """ This function goes through the given container and returns the stack elements. Each stack element is represented by a tuple: ( container_name, element_name, stack_element) The tuples are returned in an array """ from types import ModuleType if _visited_modules is None: _visited_modules = [] _visited_modules.append(module) # elements = [] for el_name in dir(module): the_el = module.__getattribute__(el_name) if isinstance(the_el, ModuleType): # Recursively go into the module if the_el in _visited_modules: continue elements = elements + self.find_stack_elements(the_el, module_name + el_name + ".", _visited_modules) elif isinstance(the_el, StackElement): # Add to list elements.append((module_name, el_name, the_el)) return elements
This function goes through the given container and returns the stack elements. Each stack element is represented by a tuple: ( container_name, element_name, stack_element) The tuples are returned in an array