code
stringlengths
75
104k
docstring
stringlengths
1
46.9k
def debug(self, message, *args, **kwargs): """Debug level to use and abuse when coding """ self._log(logging.DEBUG, message, *args, **kwargs)
Debug level to use and abuse when coding
def _set_fcoe_max_enode(self, v, load=False): """ Setter method for fcoe_max_enode, mapped from YANG variable /rbridge_id/fcoe_config/fcoe_max_enode (fcoe-max-enodes-per-rbridge-type) If this variable is read-only (config: false) in the source YANG file, then _set_fcoe_max_enode is considered as a private method. Backends looking to populate this variable should do so via calling thisObj._set_fcoe_max_enode() directly. YANG Description: This specifies the Number of the FCoE Enodes. """ if hasattr(v, "_utype"): v = v._utype(v) try: t = YANGDynClass(v,base=RestrictedClassType(base_type=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), restriction_dict={'range': [u'0..1000']}), is_leaf=True, yang_name="fcoe-max-enode", rest_name="fcoe-enodes", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'callpoint': u'fcoe_enode_cp', u'cli-full-command': None, u'alt-name': u'fcoe-enodes', u'info': u'Configure the fcoe enodes for the FCoE Fabric-map', u'display-when': u'not (/fcoe-fsb/fcoe-fsb-enable)'}}, namespace='urn:brocade.com:mgmt:brocade-fcoe', defining_module='brocade-fcoe', yang_type='fcoe-max-enodes-per-rbridge-type', is_config=True) except (TypeError, ValueError): raise ValueError({ 'error-string': """fcoe_max_enode must be of a type compatible with fcoe-max-enodes-per-rbridge-type""", 'defined-type': "brocade-fcoe:fcoe-max-enodes-per-rbridge-type", 'generated-type': """YANGDynClass(base=RestrictedClassType(base_type=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), restriction_dict={'range': [u'0..1000']}), is_leaf=True, yang_name="fcoe-max-enode", rest_name="fcoe-enodes", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'callpoint': u'fcoe_enode_cp', u'cli-full-command': None, u'alt-name': u'fcoe-enodes', u'info': u'Configure the fcoe enodes for the FCoE Fabric-map', u'display-when': u'not (/fcoe-fsb/fcoe-fsb-enable)'}}, namespace='urn:brocade.com:mgmt:brocade-fcoe', defining_module='brocade-fcoe', yang_type='fcoe-max-enodes-per-rbridge-type', is_config=True)""", }) self.__fcoe_max_enode = t if hasattr(self, '_set'): self._set()
Setter method for fcoe_max_enode, mapped from YANG variable /rbridge_id/fcoe_config/fcoe_max_enode (fcoe-max-enodes-per-rbridge-type) If this variable is read-only (config: false) in the source YANG file, then _set_fcoe_max_enode is considered as a private method. Backends looking to populate this variable should do so via calling thisObj._set_fcoe_max_enode() directly. YANG Description: This specifies the Number of the FCoE Enodes.
def generate_html_documentation(self): """generate_html_documentation() => html documentation for the server Generates HTML documentation for the server using introspection for installed functions and instances that do not implement the _dispatch method. Alternatively, instances can choose to implement the _get_method_argstring(method_name) method to provide the argument string used in the documentation and the _methodHelp(method_name) method to provide the help text used in the documentation.""" methods = {} for method_name in self.system_listMethods(): if method_name in self.funcs: method = self.funcs[method_name] elif self.instance is not None: method_info = [None, None] # argspec, documentation if hasattr(self.instance, '_get_method_argstring'): method_info[0] = self.instance._get_method_argstring(method_name) if hasattr(self.instance, '_methodHelp'): method_info[1] = self.instance._methodHelp(method_name) method_info = tuple(method_info) if method_info != (None, None): method = method_info elif not hasattr(self.instance, '_dispatch'): try: method = resolve_dotted_attribute( self.instance, method_name ) except AttributeError: method = method_info else: method = method_info else: assert 0, "Could not find method in self.functions and no "\ "instance installed" methods[method_name] = method documenter = ServerHTMLDoc() documentation = documenter.docserver( self.server_name, self.server_documentation, methods ) return documenter.page(self.server_title, documentation)
generate_html_documentation() => html documentation for the server Generates HTML documentation for the server using introspection for installed functions and instances that do not implement the _dispatch method. Alternatively, instances can choose to implement the _get_method_argstring(method_name) method to provide the argument string used in the documentation and the _methodHelp(method_name) method to provide the help text used in the documentation.
def _parse_migrate_output(self, output): """ Args: output: str, output of "manage.py migrate" Returns (succeeded: list(tuple), failed: tuple or None) Both tuples are of the form (app, migration) """ failed = None succeeded = [] # Mark migrations: # - before exception migration as success # - exception migration as failed for line in output.split('\n'): line = _remove_escape_characters(line).strip() line_match = self.migration_regex.match(line) if line_match: migration = (line_match.group('app_name'), line_match.group('migration_name')) if line_match.group('success') == 'OK': # The migration succeeded succeeded.append(migration) else: # The migration failed failed = migration break return succeeded, failed
Args: output: str, output of "manage.py migrate" Returns (succeeded: list(tuple), failed: tuple or None) Both tuples are of the form (app, migration)
def get_directory(db, user_id, api_dirname, content): """ Return the names of all files/directories that are direct children of api_dirname. If content is False, return a bare model containing just a database-style name. """ db_dirname = from_api_dirname(api_dirname) if not _dir_exists(db, user_id, db_dirname): raise NoSuchDirectory(api_dirname) if content: files = files_in_directory( db, user_id, db_dirname, ) subdirectories = directories_in_directory( db, user_id, db_dirname, ) else: files, subdirectories = None, None # TODO: Consider using namedtuples for these return values. return { 'name': db_dirname, 'files': files, 'subdirs': subdirectories, }
Return the names of all files/directories that are direct children of api_dirname. If content is False, return a bare model containing just a database-style name.
def _zforce(self,R,z,phi=0,t=0): """ NAME: _zforce PURPOSE: evaluate the vertical force at (R,z, phi) INPUT: R - Cylindrical Galactocentric radius z - vertical height phi - azimuth t - time OUTPUT: vertical force at (R,z, phi) HISTORY: 2016-12-26 - Written - Bovy (UofT/CCA) """ r= numpy.sqrt(R**2.+z**2.) out= self._scf.zforce(R,z,phi=phi,use_physical=False) for a,s,ds,H,dH in zip(self._Sigma_amp,self._Sigma,self._dSigmadR, self._Hz,self._dHzdz): out-= 4.*numpy.pi*a*(ds(r)*H(z)*z/r+s(r)*dH(z)) return out
NAME: _zforce PURPOSE: evaluate the vertical force at (R,z, phi) INPUT: R - Cylindrical Galactocentric radius z - vertical height phi - azimuth t - time OUTPUT: vertical force at (R,z, phi) HISTORY: 2016-12-26 - Written - Bovy (UofT/CCA)
def __filterItems(self, terms, autoExpand=True, caseSensitive=False, parent=None, level=0): """ Filters the items in this tree based on the inputed keywords. :param terms | {<int> column: [<str> term, ..], ..} autoExpand | <bool> caseSensitive | <bool> parent | <QtGui.QTreeWidgetItem> || None :return <bool> | found """ # make sure we're within our mex level for filtering max_level = self.maximumFilterLevel() if max_level != None and level > max_level: return False found = False items = [] # collect the items to process if not parent: for i in range(self.topLevelItemCount()): items.append(self.topLevelItem(i)) else: for c in range(parent.childCount()): items.append(parent.child(c)) for item in items: # if there is no filter keywords, then all items will be visible if not any(terms.values()): found = True item.setHidden(False) if autoExpand: if item.parent() is not None or self.rootIsDecorated(): item.setExpanded(False) self.__filterItems(terms, autoExpand, caseSensitive, item, level + 1) else: # match all generic keywords generic = terms.get(-1, []) generic_found = dict((key, False) for key in generic) # match all specific keywords col_found = dict((col, False) for col in terms if col != -1) # look for any matches for any column mfound = False for column in self._filteredColumns: # determine the check text based on case sensitivity if caseSensitive: check = nativestring(item.text(column)) else: check = nativestring(item.text(column)).lower() specific = terms.get(column, []) # make sure all the keywords match for key in generic + specific: if not key: continue # look for exact keywords elif key.startswith('"') and key.endswith('"'): if key.strip('"') == check: if key in generic: generic_found[key] = True if key in specific: col_found[column] = True # look for ending keywords elif key.startswith('*') and not key.endswith('*'): if check.endswith(key.strip('*')): if key in generic: generic_found[key] = True if key in specific: col_found[column] = True # look for starting keywords elif key.endswith('*') and not key.startswith('*'): if check.startswith(key.strip('*')): if key in generic: generic_found[key] = True if key in specific: col_found[column] = True # look for generic keywords elif key.strip('*') in check: if key in generic: generic_found[key] = True if key in specific: col_found[column] = True mfound = all(col_found.values()) and \ all(generic_found.values()) if mfound: break # if this item is not found, then check all children if not mfound and (autoExpand or item.isExpanded()): mfound = self.__filterItems(terms, autoExpand, caseSensitive, item, level + 1) item.setHidden(not mfound) if mfound: found = True if mfound and autoExpand and item.childCount(): item.setExpanded(True) return found
Filters the items in this tree based on the inputed keywords. :param terms | {<int> column: [<str> term, ..], ..} autoExpand | <bool> caseSensitive | <bool> parent | <QtGui.QTreeWidgetItem> || None :return <bool> | found
def organismsKEGG(): """ Lists all organisms present in the KEGG database. :returns: a dataframe containing one organism per row. """ organisms=urlopen("http://rest.kegg.jp/list/organism").read() organisms=organisms.split("\n") #for o in organisms: # print o # sys.stdout.flush() organisms=[ s.split("\t") for s in organisms ] organisms=pd.DataFrame(organisms) return organisms
Lists all organisms present in the KEGG database. :returns: a dataframe containing one organism per row.
def sequential_connect(self): """ Sequential connect is designed to return a connection to the Rendezvous Server but it does so in a way that the local port ranges (both for the server and used for subsequent hole punching) are allocated sequentially and predictably. This is because Delta+1 type NATs only preserve the delta value when the source ports increase by one. """ # Connect to rendezvous server. try: mappings = sequential_bind(self.mapping_no + 1, self.interface) con = self.server_connect(mappings[0]["sock"]) except Exception as e: log.debug(e) log.debug("this err") return None # First mapping is used to talk to server. mappings.remove(mappings[0]) # Receive port mapping. msg = "SOURCE TCP %s" % (str(mappings[0]["source"])) con.send_line(msg) reply = con.recv_line(timeout=2) remote_port = self.parse_remote_port(reply) if not remote_port: return None # Generate port predictions. predictions = "" if self.nat_type != "random": mappings = self.predict_mappings(mappings) for mapping in mappings: predictions += str(mapping["remote"]) + " " predictions = predictions.rstrip() else: predictions = "1337" return [con, mappings, predictions]
Sequential connect is designed to return a connection to the Rendezvous Server but it does so in a way that the local port ranges (both for the server and used for subsequent hole punching) are allocated sequentially and predictably. This is because Delta+1 type NATs only preserve the delta value when the source ports increase by one.
def request(self, url, *, method='GET', headers=None, data=None, result_callback=None): """Perform request. :param str url: request URL. :param str method: request method. :param dict headers: request headers. :param object data: request data. :param object -> object result_callback: result callback. :rtype: dict :raise: APIError """ url = self._make_full_url(url) self._log.debug('Performing %s request to %s', method, url) return self._request(url, method=method, headers=headers, data=data, result_callback=result_callback)
Perform request. :param str url: request URL. :param str method: request method. :param dict headers: request headers. :param object data: request data. :param object -> object result_callback: result callback. :rtype: dict :raise: APIError
def _list_itemstrs(list_, **kwargs): """ Create a string representation for each item in a list. """ items = list(list_) kwargs['_return_info'] = True _tups = [repr2(item, **kwargs) for item in items] itemstrs = [t[0] for t in _tups] max_height = max([t[1]['max_height'] for t in _tups]) if _tups else 0 _leaf_info = { 'max_height': max_height + 1, } sort = kwargs.get('sort', None) if sort is None: # Force orderings on sets. sort = isinstance(list_, (set, frozenset)) if sort: itemstrs = _sort_itemstrs(items, itemstrs) return itemstrs, _leaf_info
Create a string representation for each item in a list.
def design_expparams_field(self, guess, field, cost_scale_k=1.0, disp=False, maxiter=None, maxfun=None, store_guess=False, grad_h=None, cost_mult=False ): r""" Designs a new experiment by varying a single field of a shape ``(1,)`` record array and minimizing the objective function .. math:: O(\vec{e}) = r(\vec{e}) + k \$(\vec{e}), where :math:`r` is the Bayes risk as calculated by the updater, and where :math:`\$` is the cost function specified by the model. Here, :math:`k` is a parameter specified to relate the units of the risk and the cost. See :ref:`expdesign` for more details. :param guess: Either a record array with a single guess, or a callable function that generates guesses. :type guess: Instance of :class:`~Heuristic`, `callable` or :class:`~numpy.ndarray` of ``dtype`` :attr:`~qinfer.abstract_model.Simulatable.expparams_dtype` :param str field: The name of the ``expparams`` field to be optimized. All other fields of ``guess`` will be held constant. :param float cost_scale_k: A scale parameter :math:`k` relating the Bayes risk to the experiment cost. See :ref:`expdesign`. :param bool disp: If `True`, the optimization will print additional information as it proceeds. :param int maxiter: For those optimization algorithms which support it (currently, only CG and NELDER_MEAD), limits the number of optimization iterations used for each guess. :param int maxfun: For those optimization algorithms which support it (currently, only NCG and NELDER_MEAD), limits the number of objective calls that can be made. :param bool store_guess: If ``True``, will compare the outcome of this guess to previous guesses and then either store the optimization of this experiment, or the previous best-known experiment design. :param float grad_h: Step size to use in estimating gradients. Used only if ``opt_algo`` is NCG. :return: An array representing the best experiment design found so far for the current experiment. """ # Define some short names for commonly used properties. up = self._updater m = up.model # Generate a new guess or use a guess provided, depending on the # type of the guess argument. if isinstance(guess, Heuristic): raise NotImplementedError("Not yet implemented.") elif callable(guess): # Generate a new guess by calling the guess function provided. ep = guess( idx_exp=len(up.data_record), mean=up.est_mean(), cov=up.est_covariance_mtx() ) else: # Make a copy of the guess that we can manipulate, but otherwise # use it as-is. ep = np.copy(guess) # Define an objective function that wraps a vector of scalars into # an appropriate record array. if (cost_mult==False): def objective_function(x): """ Used internally by design_expparams_field. If you see this, something probably went wrong. """ ep[field] = x return up.bayes_risk(ep) + cost_scale_k * m.experiment_cost(ep) else: def objective_function(x): """ Used internally by design_expparams_field. If you see this, something probably went wrong. """ ep[field] = x return up.bayes_risk(ep)* m.experiment_cost(ep)**cost_scale_k # Some optimizers require gradients of the objective function. # Here, we create a FiniteDifference object to compute that for # us. d_dx_objective = FiniteDifference(objective_function, ep[field].size) # Allocate a variable to hold the local optimum value found. # This way, if an optimization algorithm doesn't support returning # the value as well as the location, we can find it manually. f_opt = None # Here's the core, where we break out and call the various optimization # routines provided by SciPy. if self._opt_algo == OptimizationAlgorithms.NULL: # This optimization algorithm does nothing locally, but only # exists to leverage the store_guess functionality below. x_opt = guess[0][field] elif self._opt_algo == OptimizationAlgorithms.CG: # Prepare any additional options. opt_options = {} if maxiter is not None: opt_options['maxiter'] = maxiter # Actually call fmin_cg, gathering all outputs we can. x_opt, f_opt, func_calls, grad_calls, warnflag = opt.fmin_cg( objective_function, guess[0][field], disp=disp, full_output=True, **opt_options ) elif self._opt_algo == OptimizationAlgorithms.NCG: # Prepare any additional options. opt_options = {} if maxfun is not None: opt_options['maxfun'] = maxfun if grad_h is not None: opt_options['epsilon'] = grad_h # Actually call fmin_tnc, gathering all outputs we can. # We use fmin_tnc in preference to fmin_ncg, as they implement the # same algorithm, but fmin_tnc seems better behaved with respect # to very flat gradient regions, due to respecting maxfun. # By contrast, fmin_ncg can get stuck in an infinite loop in # versions of SciPy < 0.11. # # Note that in some versions of SciPy, there was a bug in # fmin_ncg and fmin_tnc that can propagate outward if the gradient # is too flat. We catch it here and return the initial guess in that # case, since by hypothesis, it's too flat to make much difference # anyway. try: x_opt, f_opt, func_calls, grad_calls, h_calls, warnflag = opt.fmin_tnc( objective_function, guess[0][field], fprime=None, bounds=None, approx_grad=True, disp=disp, full_output=True, **opt_options ) except TypeError: warnings.warn( "Gradient function too flat for NCG.", RuntimeWarning) x_opt = guess[0][field] f_opt = None elif self._opt_algo == OptimizationAlgorithms.NELDER_MEAD: opt_options = {} if maxfun is not None: opt_options['maxfun'] = maxfun if maxiter is not None: opt_options['maxiter'] = maxiter x_opt, f_opt, iters, func_calls, warnflag = opt.fmin( objective_function, guess[0][field], disp=disp, full_output=True, **opt_options ) # Optionally compare the result to previous guesses. if store_guess: # Possibly compute the objective function value at the local optimum # if we don't already know it. if f_opt is None: guess_qual = objective_function(x_opt) # Compare to the known best cost so far. if self.__best_cost is None or (self.__best_cost > f_opt): # No known best yet, or we're better than the previous best, # so record this guess. ep[field] = x_opt self.__best_cost = f_opt self.__best_ep = ep else: ep = self.__best_ep # Guess is bad, return current best guess else: # We aren't using guess recording, so just pack the local optima # into ep for returning. ep[field] = x_opt # In any case, return the optimized guess. return ep
r""" Designs a new experiment by varying a single field of a shape ``(1,)`` record array and minimizing the objective function .. math:: O(\vec{e}) = r(\vec{e}) + k \$(\vec{e}), where :math:`r` is the Bayes risk as calculated by the updater, and where :math:`\$` is the cost function specified by the model. Here, :math:`k` is a parameter specified to relate the units of the risk and the cost. See :ref:`expdesign` for more details. :param guess: Either a record array with a single guess, or a callable function that generates guesses. :type guess: Instance of :class:`~Heuristic`, `callable` or :class:`~numpy.ndarray` of ``dtype`` :attr:`~qinfer.abstract_model.Simulatable.expparams_dtype` :param str field: The name of the ``expparams`` field to be optimized. All other fields of ``guess`` will be held constant. :param float cost_scale_k: A scale parameter :math:`k` relating the Bayes risk to the experiment cost. See :ref:`expdesign`. :param bool disp: If `True`, the optimization will print additional information as it proceeds. :param int maxiter: For those optimization algorithms which support it (currently, only CG and NELDER_MEAD), limits the number of optimization iterations used for each guess. :param int maxfun: For those optimization algorithms which support it (currently, only NCG and NELDER_MEAD), limits the number of objective calls that can be made. :param bool store_guess: If ``True``, will compare the outcome of this guess to previous guesses and then either store the optimization of this experiment, or the previous best-known experiment design. :param float grad_h: Step size to use in estimating gradients. Used only if ``opt_algo`` is NCG. :return: An array representing the best experiment design found so far for the current experiment.
def remove(self, child): """Remove a child element.""" for i in range(len(self)): if self[i] == child: del self[i]
Remove a child element.
def netconf_state_schemas_schema_identifier(self, **kwargs): """Auto Generated Code """ config = ET.Element("config") netconf_state = ET.SubElement(config, "netconf-state", xmlns="urn:ietf:params:xml:ns:yang:ietf-netconf-monitoring") schemas = ET.SubElement(netconf_state, "schemas") schema = ET.SubElement(schemas, "schema") version_key = ET.SubElement(schema, "version") version_key.text = kwargs.pop('version') format_key = ET.SubElement(schema, "format") format_key.text = kwargs.pop('format') identifier = ET.SubElement(schema, "identifier") identifier.text = kwargs.pop('identifier') callback = kwargs.pop('callback', self._callback) return callback(config)
Auto Generated Code
def get_interface_detail_output_interface_ip_mtu(self, **kwargs): """Auto Generated Code """ config = ET.Element("config") get_interface_detail = ET.Element("get_interface_detail") config = get_interface_detail output = ET.SubElement(get_interface_detail, "output") interface = ET.SubElement(output, "interface") interface_type_key = ET.SubElement(interface, "interface-type") interface_type_key.text = kwargs.pop('interface_type') interface_name_key = ET.SubElement(interface, "interface-name") interface_name_key.text = kwargs.pop('interface_name') ip_mtu = ET.SubElement(interface, "ip-mtu") ip_mtu.text = kwargs.pop('ip_mtu') callback = kwargs.pop('callback', self._callback) return callback(config)
Auto Generated Code
def _layer_norm_new_params(input_shape, rng, epsilon=1e-6): # pylint: disable=invalid-name """Helper: create layer norm parameters.""" del rng, epsilon features = input_shape[-1] scale = np.ones(features) bias = np.zeros(features) return (scale, bias)
Helper: create layer norm parameters.
def get_permissions(self): """ Permissions of the user. Returns: List of Permission objects. """ user_role = self.last_login_role() if self.last_login_role_key else self.role_set[0].role return user_role.get_permissions()
Permissions of the user. Returns: List of Permission objects.
def feed_fetch_force(request, id, redirect_to): """Forcibly fetch tweets for the feed""" feed = Feed.objects.get(id=id) feed.fetch(force=True) msg = _("Fetched tweets for %s" % feed.name) messages.success(request, msg, fail_silently=True) return HttpResponseRedirect(redirect_to)
Forcibly fetch tweets for the feed
def repeat_str(state): """Convert internal API repeat state to string.""" if state == const.REPEAT_STATE_OFF: return 'Off' if state == const.REPEAT_STATE_TRACK: return 'Track' if state == const.REPEAT_STATE_ALL: return 'All' return 'Unsupported'
Convert internal API repeat state to string.
def is_threat(self, result=None, harmless_age=None, threat_score=None, threat_type=None): """ Check if IP is a threat :param result: httpBL results; if None, then results from last check_ip() used (optional) :param harmless_age: harmless age for check if httpBL age is older (optional) :param threat_score: threat score for check if httpBL threat is lower (optional) :param threat_type: threat type, if not equal httpBL score type, then return False (optional) :return: True or False """ harmless_age = harmless_age if harmless_age is not None else settings.CACHED_HTTPBL_HARMLESS_AGE threat_score = threat_score if threat_score is not None else settings.CACHED_HTTPBL_THREAT_SCORE threat_type = threat_type if threat_type is not None else -1 result = result if result is not None else self._last_result threat = False if result is not None: if result['age'] < harmless_age and result['threat'] > threat_score: threat = True if threat_type > -1: if result['type'] & threat_type: threat = True else: threat = False return threat
Check if IP is a threat :param result: httpBL results; if None, then results from last check_ip() used (optional) :param harmless_age: harmless age for check if httpBL age is older (optional) :param threat_score: threat score for check if httpBL threat is lower (optional) :param threat_type: threat type, if not equal httpBL score type, then return False (optional) :return: True or False
def quit(self, message=None): """Quit from the server.""" if message is None: message = 'Quit' if self.connected: self.send('QUIT', params=[message])
Quit from the server.
def start_dashboard(self): """Start the dashboard.""" stdout_file, stderr_file = self.new_log_files("dashboard", True) self._webui_url, process_info = ray.services.start_dashboard( self.redis_address, self._temp_dir, stdout_file=stdout_file, stderr_file=stderr_file, redis_password=self._ray_params.redis_password) assert ray_constants.PROCESS_TYPE_DASHBOARD not in self.all_processes if process_info is not None: self.all_processes[ray_constants.PROCESS_TYPE_DASHBOARD] = [ process_info ] redis_client = self.create_redis_client() redis_client.hmset("webui", {"url": self._webui_url})
Start the dashboard.
def extract(self, package_name): """Extract given package.""" for cmd in ['rpm2cpio', 'cpio']: if not Cmd.which(cmd): message = '{0} command not found. Install {0}.'.format(cmd) raise InstallError(message) pattern = '{0}*{1}.rpm'.format(package_name, self.arch) rpm_files = Cmd.find('.', pattern) if not rpm_files: raise InstallError('PRM file not found.') cmd = 'rpm2cpio {0} | cpio -idmv'.format(rpm_files[0]) Cmd.sh_e(cmd)
Extract given package.
def audio_inputs(self): """ :return: A list of audio input :class:`Ports`. """ return self.client.get_ports(is_audio=True, is_physical=True, is_input=True)
:return: A list of audio input :class:`Ports`.
def _GenerateDescription(self): """Generates description into a MANIFEST file in the archive.""" manifest = { "description": self.description, "processed_files": len(self.processed_files), "archived_files": len(self.archived_files), "ignored_files": len(self.ignored_files), "failed_files": len(self.failed_files) } if self.ignored_files: manifest["ignored_files_list"] = [ _ClientPathToString(cp, prefix="aff4:") for cp in self.ignored_files ] if self.failed_files: manifest["failed_files_list"] = [ _ClientPathToString(cp, prefix="aff4:") for cp in self.failed_files ] manifest_fd = io.BytesIO() if self.total_files != len(self.archived_files): manifest_fd.write(self.FILES_SKIPPED_WARNING) manifest_fd.write(yaml.Dump(manifest).encode("utf-8")) manifest_fd.seek(0) st = os.stat_result( (0o644, 0, 0, 0, 0, 0, len(manifest_fd.getvalue()), 0, 0, 0)) for chunk in self.archive_generator.WriteFromFD( manifest_fd, os.path.join(self.prefix, "MANIFEST"), st=st): yield chunk
Generates description into a MANIFEST file in the archive.
def gen_xlsx_table_info(): ''' 向表中插入数据 ''' XLSX_FILE = './database/esheet/20180811.xlsx' if os.path.exists(XLSX_FILE): pass else: return RAW_LIST = ['A', 'B', 'C', 'D', 'E', 'F', 'G', 'H', 'I', 'J', 'K', 'L', 'M', 'N', 'O', 'P', 'Q', 'R', 'S', 'T', 'U', 'V', 'W', 'X', 'Y', 'Z'] FILTER_COLUMNS = RAW_LIST + ["A" + x for x in RAW_LIST] + \ ["B" + x for x in RAW_LIST] + \ ["C" + x for x in RAW_LIST] + \ ["D" + x for x in RAW_LIST] tvalue = [] file_d = open_workbook(XLSX_FILE) # 获得第一个页签对象 x = 0 for sheet_ranges in load_workbook(filename=XLSX_FILE): select_sheet = file_d.sheets()[x] # 获取总共的行数 rows_num = select_sheet.nrows + 1 for row_num in range(6, rows_num): tvalue = [] for xr in FILTER_COLUMNS: row1_val = sheet_ranges[xr + '1'].value row4_val = sheet_ranges[xr + '{0}'.format(row_num)].value if row1_val: if row4_val == None: row4_val = '' tvalue.append(row4_val) insert_tab(tvalue) x = x + 1 print("成功插入 " + str(rows_num - 6) + " 行数据")
向表中插入数据
def _parse_response(self, xmlstr, response_cls, service, binding, outstanding_certs=None, **kwargs): """ Deal with a Response :param xmlstr: The response as a xml string :param response_cls: What type of response it is :param binding: What type of binding this message came through. :param outstanding_certs: Certificates that belongs to me that the IdP may have used to encrypt a response/assertion/.. :param kwargs: Extra key word arguments :return: None if the reply doesn't contain a valid SAML Response, otherwise the response. """ if self.config.accepted_time_diff: kwargs["timeslack"] = self.config.accepted_time_diff if "asynchop" not in kwargs: if binding in [BINDING_SOAP, BINDING_PAOS]: kwargs["asynchop"] = False else: kwargs["asynchop"] = True response = None if not xmlstr: return response if "return_addrs" not in kwargs: bindings = { BINDING_SOAP, BINDING_HTTP_REDIRECT, BINDING_HTTP_POST, } if binding in bindings: # expected return address kwargs["return_addrs"] = self.config.endpoint( service, binding=binding, context=self.entity_type) try: response = response_cls(self.sec, **kwargs) except Exception as exc: logger.info("%s", exc) raise xmlstr = self.unravel(xmlstr, binding, response_cls.msgtype) if not xmlstr: # Not a valid reponse return None try: response_is_signed = False # Record the response signature requirement. require_response_signature = response.require_response_signature # Force the requirement that the response be signed in order to # force signature checking to happen so that we can know whether # or not the response is signed. The attribute on the response class # is reset to the recorded value in the finally clause below. response.require_response_signature = True response = response.loads(xmlstr, False, origxml=xmlstr) except SigverError as err: if require_response_signature: logger.error("Signature Error: %s", err) raise else: # The response is not signed but a signature is not required # so reset the attribute on the response class to the recorded # value and attempt to consume the unpacked XML again. response.require_response_signature = require_response_signature response = response.loads(xmlstr, False, origxml=xmlstr) except UnsolicitedResponse: logger.error("Unsolicited response") raise except Exception as err: if "not well-formed" in "%s" % err: logger.error("Not well-formed XML") raise else: response_is_signed = True finally: response.require_response_signature = require_response_signature logger.debug("XMLSTR: %s", xmlstr) if not response: return response keys = None if outstanding_certs: try: cert = outstanding_certs[response.in_response_to] except KeyError: keys = None else: if not isinstance(cert, list): cert = [cert] keys = [] for _cert in cert: keys.append(_cert["key"]) try: assertions_are_signed = False # Record the assertions signature requirement. require_signature = response.require_signature # Force the requirement that the assertions be signed in order to # force signature checking to happen so that we can know whether # or not the assertions are signed. The attribute on the response class # is reset to the recorded value in the finally clause below. response.require_signature = True # Verify that the assertion is syntactically correct and the # signature on the assertion is correct if present. response = response.verify(keys) except SignatureError as err: if require_signature: logger.error("Signature Error: %s", err) raise else: response.require_signature = require_signature response = response.verify(keys) else: assertions_are_signed = True finally: response.require_signature = require_signature # If so configured enforce that either the response is signed # or the assertions within it are signed. if response.require_signature_or_response_signature: if not response_is_signed and not assertions_are_signed: msg = "Neither the response nor the assertions are signed" logger.error(msg) raise SigverError(msg) return response
Deal with a Response :param xmlstr: The response as a xml string :param response_cls: What type of response it is :param binding: What type of binding this message came through. :param outstanding_certs: Certificates that belongs to me that the IdP may have used to encrypt a response/assertion/.. :param kwargs: Extra key word arguments :return: None if the reply doesn't contain a valid SAML Response, otherwise the response.
def field_to_dict(fields): """ Build dictionnary which dependancy for each field related to "root" fields = ["toto", "toto__tata", "titi__tutu"] dico = { "toto": { EMPTY_DICT, "tata": EMPTY_DICT }, "titi" : { "tutu": EMPTY_DICT } } EMPTY_DICT is useful because we don't lose field without it dico["toto"] would only contains "tata" inspired from django.db.models.sql.add_select_related """ field_dict = {} for field in fields: d_tmp = field_dict for part in field.split(LOOKUP_SEP)[:-1]: d_tmp = d_tmp.setdefault(part, {}) d_tmp = d_tmp.setdefault( field.split(LOOKUP_SEP)[-1], deepcopy(EMPTY_DICT) ).update(deepcopy(EMPTY_DICT)) return field_dict
Build dictionnary which dependancy for each field related to "root" fields = ["toto", "toto__tata", "titi__tutu"] dico = { "toto": { EMPTY_DICT, "tata": EMPTY_DICT }, "titi" : { "tutu": EMPTY_DICT } } EMPTY_DICT is useful because we don't lose field without it dico["toto"] would only contains "tata" inspired from django.db.models.sql.add_select_related
def ReadDataFile(self): """ Reads the contents of the Comtrade .dat file and store them in a private variable. For accessing a specific channel data, see methods getAnalogData and getDigitalData. """ if os.path.isfile(self.filename[0:-4] + '.dat'): filename = self.filename[0:-4] + '.dat' elif os.path.isfile(self.filename[0:-4] + '.DAT'): filename = self.filename[0:-4] + '.DAT' else: print "Data file File not found." return 0 self.filehandler = open(filename,'rb') self.DatFileContent = self.filehandler.read() # END READING .dat FILE. self.filehandler.close() # Close file. return 1
Reads the contents of the Comtrade .dat file and store them in a private variable. For accessing a specific channel data, see methods getAnalogData and getDigitalData.
def AgregarFusion(self, nro_ing_brutos, nro_actividad, **kwargs): "Datos de comprador o vendedor según liquidación a ajustar (fusión.)" self.ajuste['ajusteBase']['fusion'] = {'nroIngBrutos': nro_ing_brutos, 'nroActividad': nro_actividad, } return True
Datos de comprador o vendedor según liquidación a ajustar (fusión.)
def rget(self, key, replica_index=None, quiet=None): """Get an item from a replica node :param string key: The key to fetch :param int replica_index: The replica index to fetch. If this is ``None`` then this method will return once any replica responds. Use :attr:`configured_replica_count` to figure out the upper bound for this parameter. The value for this parameter must be a number between 0 and the value of :attr:`configured_replica_count`-1. :param boolean quiet: Whether to suppress errors when the key is not found This method (if `replica_index` is not supplied) functions like the :meth:`get` method that has been passed the `replica` parameter:: c.get(key, replica=True) .. seealso:: :meth:`get` :meth:`rget_multi` """ if replica_index is not None: return _Base._rgetix(self, key, replica=replica_index, quiet=quiet) else: return _Base._rget(self, key, quiet=quiet)
Get an item from a replica node :param string key: The key to fetch :param int replica_index: The replica index to fetch. If this is ``None`` then this method will return once any replica responds. Use :attr:`configured_replica_count` to figure out the upper bound for this parameter. The value for this parameter must be a number between 0 and the value of :attr:`configured_replica_count`-1. :param boolean quiet: Whether to suppress errors when the key is not found This method (if `replica_index` is not supplied) functions like the :meth:`get` method that has been passed the `replica` parameter:: c.get(key, replica=True) .. seealso:: :meth:`get` :meth:`rget_multi`
def plot_site(fignum, SiteRec, data, key): """ deprecated (used in ipmag) """ print('Site mean data: ') print(' dec inc n_lines n_planes kappa R alpha_95 comp coord') print(SiteRec['site_dec'], SiteRec['site_inc'], SiteRec['site_n_lines'], SiteRec['site_n_planes'], SiteRec['site_k'], SiteRec['site_r'], SiteRec['site_alpha95'], SiteRec['site_comp_name'], SiteRec['site_tilt_correction']) print('sample/specimen, dec, inc, n_specs/a95,| method codes ') for i in range(len(data)): print('%s: %s %s %s / %s | %s' % (data[i]['er_' + key + '_name'], data[i][key + '_dec'], data[i] [key + '_inc'], data[i][key + '_n'], data[i][key + '_alpha95'], data[i]['magic_method_codes'])) plot_slnp(fignum, SiteRec, data, key) plot = input("s[a]ve plot, [q]uit or <return> to continue: ") if plot == 'q': print("CUL8R") sys.exit() if plot == 'a': files = {} for key in list(EQ.keys()): files[key] = site + '_' + key + '.' + fmt save_plots(EQ, files)
deprecated (used in ipmag)
def _generate_struct_class(self, ns, data_type): # type: (ApiNamespace, Struct) -> None """Defines a Python class that represents a struct in Stone.""" self.emit(self._class_declaration_for_type(ns, data_type)) with self.indent(): if data_type.has_documented_type_or_fields(): self.emit('"""') if data_type.doc: self.emit_wrapped_text( self.process_doc(data_type.doc, self._docf)) if data_type.has_documented_fields(): self.emit() for field in data_type.fields: if not field.doc: continue self.emit_wrapped_text(':ivar {}: {}'.format( fmt_namespaced_var(ns.name, data_type.name, field.name), self.process_doc(field.doc, self._docf)), subsequent_prefix=' ') self.emit('"""') self.emit() self._generate_struct_class_slots(data_type) self._generate_struct_class_has_required_fields(data_type) self._generate_struct_class_init(data_type) self._generate_struct_class_properties(ns, data_type) self._generate_struct_class_custom_annotations(ns, data_type) self._generate_struct_class_repr(data_type) if data_type.has_enumerated_subtypes(): validator = 'StructTree' else: validator = 'Struct' self.emit('{0}_validator = bv.{1}({0})'.format( class_name_for_data_type(data_type), validator, )) self.emit()
Defines a Python class that represents a struct in Stone.
def to_datetime(dt, tzinfo=None, format=None): """ Convert a date or time to datetime with tzinfo """ if not dt: return dt tz = pick_timezone(tzinfo, __timezone__) if isinstance(dt, (str, unicode)): if not format: formats = DEFAULT_DATETIME_INPUT_FORMATS else: formats = list(format) d = None for fmt in formats: try: d = datetime.strptime(dt, fmt) except ValueError: continue if not d: return None d = d.replace(tzinfo=tz) else: d = datetime(getattr(dt, 'year', 1970), getattr(dt, 'month', 1), getattr(dt, 'day', 1), getattr(dt, 'hour', 0), getattr(dt, 'minute', 0), getattr(dt, 'second', 0), getattr(dt, 'microsecond', 0)) if not getattr(dt, 'tzinfo', None): d = d.replace(tzinfo=tz) else: d = d.replace(tzinfo=dt.tzinfo) return to_timezone(d, tzinfo)
Convert a date or time to datetime with tzinfo
def mutual_information( self, M_c, X_L_list, X_D_list, Q, seed, n_samples=1000): """Estimate mutual information for each pair of columns on Q given the set of samples. :param Q: List of tuples where each tuple contains the two column indexes to compare :type Q: list of two-tuples of ints :param n_samples: the number of simple predictive samples to use :type n_samples: int :returns: list of list -- where each sublist is a set of MIs and Linfoots from each crosscat sample. """ get_next_seed = make_get_next_seed(seed) return iu.mutual_information( M_c, X_L_list, X_D_list, Q, get_next_seed, n_samples)
Estimate mutual information for each pair of columns on Q given the set of samples. :param Q: List of tuples where each tuple contains the two column indexes to compare :type Q: list of two-tuples of ints :param n_samples: the number of simple predictive samples to use :type n_samples: int :returns: list of list -- where each sublist is a set of MIs and Linfoots from each crosscat sample.
def interrupt(self): """ Invoked on a write operation into the IR of the RendererDevice. """ if(self.device.read(9) & 0x01): self.handle_request() self.device.clear_IR()
Invoked on a write operation into the IR of the RendererDevice.
def pop(self, key, *args): 'Remove *key* from maps[0] and return its value. Raise KeyError if *key* not in maps[0].' try: return self.maps[0].pop(key, *args) except KeyError: raise KeyError('Key not found in the first mapping: {!r}'.format(key))
Remove *key* from maps[0] and return its value. Raise KeyError if *key* not in maps[0].
def calculate_lyapunov(self): """ Return the current Lyapunov Characteristic Number (LCN). Note that you need to call init_megno() before the start of the simulation. To get a timescale (the Lyapunov timescale), take the inverse of this quantity. """ if self._calculate_megno==0: raise RuntimeError("Lyapunov Characteristic Number cannot be calculated. Make sure to call init_megno() after adding all particles but before integrating the simulation.") clibrebound.reb_tools_calculate_lyapunov.restype = c_double return clibrebound.reb_tools_calculate_lyapunov(byref(self))
Return the current Lyapunov Characteristic Number (LCN). Note that you need to call init_megno() before the start of the simulation. To get a timescale (the Lyapunov timescale), take the inverse of this quantity.
def _get_nsamps_samples_n(res): """ Helper function for calculating the number of samples Parameters ---------- res : :class:`~dynesty.results.Results` instance The :class:`~dynesty.results.Results` instance taken from a previous nested sampling run. Returns ------- nsamps: int The total number of samples samples_n: array Number of live points at a given iteration """ try: # Check if the number of live points explicitly changes. samples_n = res.samples_n nsamps = len(samples_n) except: # If the number of live points is constant, compute `samples_n`. niter = res.niter nlive = res.nlive nsamps = len(res.logvol) if nsamps == niter: samples_n = np.ones(niter, dtype='int') * nlive elif nsamps == (niter + nlive): samples_n = np.append(np.ones(niter, dtype='int') * nlive, np.arange(1, nlive + 1)[::-1]) else: raise ValueError("Final number of samples differs from number of " "iterations and number of live points.") return nsamps, samples_n
Helper function for calculating the number of samples Parameters ---------- res : :class:`~dynesty.results.Results` instance The :class:`~dynesty.results.Results` instance taken from a previous nested sampling run. Returns ------- nsamps: int The total number of samples samples_n: array Number of live points at a given iteration
def is_gene_list(bed_file): """Check if the file is only a list of genes, not a BED """ with utils.open_gzipsafe(bed_file) as in_handle: for line in in_handle: if not line.startswith("#"): if len(line.split()) == 1: return True else: return False
Check if the file is only a list of genes, not a BED
def is_decorated_with_property(node): """Check if the function is decorated as a property. :param node: The node to check. :type node: astroid.nodes.FunctionDef :returns: True if the function is a property, False otherwise. :rtype: bool """ if not node.decorators: return False for decorator in node.decorators.nodes: if not isinstance(decorator, astroid.Name): continue try: if _is_property_decorator(decorator): return True except astroid.InferenceError: pass return False
Check if the function is decorated as a property. :param node: The node to check. :type node: astroid.nodes.FunctionDef :returns: True if the function is a property, False otherwise. :rtype: bool
def cpustats(): ''' Return the CPU stats for this minion .. versionchanged:: 2016.11.4 Added support for AIX .. versionchanged:: 2018.3.0 Added support for OpenBSD CLI Example: .. code-block:: bash salt '*' status.cpustats ''' def linux_cpustats(): ''' linux specific implementation of cpustats ''' ret = {} try: with salt.utils.files.fopen('/proc/stat', 'r') as fp_: stats = salt.utils.stringutils.to_unicode(fp_.read()) except IOError: pass else: for line in stats.splitlines(): if not line: continue comps = line.split() if comps[0] == 'cpu': ret[comps[0]] = {'idle': _number(comps[4]), 'iowait': _number(comps[5]), 'irq': _number(comps[6]), 'nice': _number(comps[2]), 'softirq': _number(comps[7]), 'steal': _number(comps[8]), 'system': _number(comps[3]), 'user': _number(comps[1])} elif comps[0] == 'intr': ret[comps[0]] = {'total': _number(comps[1]), 'irqs': [_number(x) for x in comps[2:]]} elif comps[0] == 'softirq': ret[comps[0]] = {'total': _number(comps[1]), 'softirqs': [_number(x) for x in comps[2:]]} else: ret[comps[0]] = _number(comps[1]) return ret def freebsd_cpustats(): ''' freebsd specific implementation of cpustats ''' vmstat = __salt__['cmd.run']('vmstat -P').splitlines() vm0 = vmstat[0].split() cpu0loc = vm0.index('cpu0') vm1 = vmstat[1].split() usloc = vm1.index('us') vm2 = vmstat[2].split() cpuctr = 0 ret = {} for cpu in vm0[cpu0loc:]: ret[cpu] = {'us': _number(vm2[usloc + 3 * cpuctr]), 'sy': _number(vm2[usloc + 1 + 3 * cpuctr]), 'id': _number(vm2[usloc + 2 + 3 * cpuctr]), } cpuctr += 1 return ret def sunos_cpustats(): ''' sunos specific implementation of cpustats ''' mpstat = __salt__['cmd.run']('mpstat 1 2').splitlines() fields = mpstat[0].split() ret = {} for cpu in mpstat: if cpu.startswith('CPU'): continue cpu = cpu.split() ret[_number(cpu[0])] = {} for i in range(1, len(fields)-1): ret[_number(cpu[0])][fields[i]] = _number(cpu[i]) return ret def aix_cpustats(): ''' AIX specific implementation of cpustats ''' ret = {} ret['mpstat'] = [] procn = None fields = [] for line in __salt__['cmd.run']('mpstat -a').splitlines(): if not line: continue procn = len(ret['mpstat']) if line.startswith('System'): comps = line.split(':') ret['mpstat'].append({}) ret['mpstat'][procn]['system'] = {} cpu_comps = comps[1].split() for i in range(0, len(cpu_comps)): cpu_vals = cpu_comps[i].split('=') ret['mpstat'][procn]['system'][cpu_vals[0]] = cpu_vals[1] if line.startswith('cpu'): fields = line.split() continue if fields: cpustat = line.split() ret[_number(cpustat[0])] = {} for i in range(1, len(fields)-1): ret[_number(cpustat[0])][fields[i]] = _number(cpustat[i]) return ret def openbsd_cpustats(): ''' openbsd specific implementation of cpustats ''' systat = __salt__['cmd.run']('systat -s 2 -B cpu').splitlines() fields = systat[3].split() ret = {} for cpu in systat[4:]: cpu_line = cpu.split() cpu_idx = cpu_line[0] ret[cpu_idx] = {} for idx, field in enumerate(fields[1:]): ret[cpu_idx][field] = cpu_line[idx+1] return ret # dict that return a function that does the right thing per platform get_version = { 'Linux': linux_cpustats, 'FreeBSD': freebsd_cpustats, 'OpenBSD': openbsd_cpustats, 'SunOS': sunos_cpustats, 'AIX': aix_cpustats, } errmsg = 'This method is unsupported on the current operating system!' return get_version.get(__grains__['kernel'], lambda: errmsg)()
Return the CPU stats for this minion .. versionchanged:: 2016.11.4 Added support for AIX .. versionchanged:: 2018.3.0 Added support for OpenBSD CLI Example: .. code-block:: bash salt '*' status.cpustats
def zDDEInit(self): """Initiates link with OpticStudio DDE server""" self.pyver = _get_python_version() # do this only one time or when there is no channel if _PyZDDE.liveCh==0: try: _PyZDDE.server = _dde.CreateServer() _PyZDDE.server.Create("ZCLIENT") except Exception as err: _sys.stderr.write("{}: DDE server may be in use!".format(str(err))) return -1 # Try to create individual conversations for each ZEMAX application. self.conversation = _dde.CreateConversation(_PyZDDE.server) try: self.conversation.ConnectTo(self.appName, " ") except Exception as err: _sys.stderr.write("{}.\nOpticStudio UI may not be running!\n".format(str(err))) # should close the DDE server if it exist self.zDDEClose() return -1 else: _PyZDDE.liveCh += 1 self.connection = True return 0
Initiates link with OpticStudio DDE server
def get(self, key): """ Get an entry from the cache by key. @raise KeyError: if the given key is not present in the cache. @raise CacheFault: (a L{KeyError} subclass) if the given key is present in the cache, but the value it points to is gone. """ o = self.data[key]() if o is None: # On CPython, the weakref callback will always(?) run before any # other code has a chance to observe that the weakref is broken; # and since the callback removes the item from the dict, this # branch of code should never run. However, on PyPy (and possibly # other Python implementations), the weakref callback does not run # immediately, thus we may be able to observe this intermediate # state. Should this occur, we remove the dict item ourselves, # and raise CacheFault (which is a KeyError subclass). del self.data[key] raise CacheFault( "FinalizingCache has %r but its value is no more." % (key,)) log.msg(interface=iaxiom.IStatEvent, stat_cache_hits=1, key=key) return o
Get an entry from the cache by key. @raise KeyError: if the given key is not present in the cache. @raise CacheFault: (a L{KeyError} subclass) if the given key is present in the cache, but the value it points to is gone.
def dict_to_env(d, pathsep=os.pathsep): ''' Convert a python dict to a dict containing valid environment variable values. :param d: Dict to convert to an env dict :param pathsep: Path separator used to join lists(default os.pathsep) ''' out_env = {} for k, v in d.iteritems(): if isinstance(v, list): out_env[k] = pathsep.join(v) elif isinstance(v, string_types): out_env[k] = v else: raise TypeError('{} not a valid env var type'.format(type(v))) return out_env
Convert a python dict to a dict containing valid environment variable values. :param d: Dict to convert to an env dict :param pathsep: Path separator used to join lists(default os.pathsep)
def grid(script, size=1.0, x_segments=1, y_segments=1, center=False, color=None): """2D square/plane/grid created on XY plane x_segments # Number of segments in the X direction. y_segments # Number of segments in the Y direction. center="false" # If true square will be centered on origin; otherwise it is place in the positive XY quadrant. """ size = util.make_list(size, 2) filter_xml = ''.join([ ' <filter name="Grid Generator">\n', ' <Param name="absScaleX" ', 'value="{}" '.format(size[0]), 'description="x scale" ', 'type="RichFloat" ', '/>\n', ' <Param name="absScaleY" ', 'value="{}" '.format(size[1]), 'description="y scale" ', 'type="RichFloat" ', '/>\n', ' <Param name="numVertX" ', 'value="{:d}" '.format(x_segments + 1), 'description="num vertices on x" ', 'type="RichInt" ', '/>\n', ' <Param name="numVertY" ', 'value="{:d}" '.format(y_segments + 1), 'description="num vertices on y" ', 'type="RichInt" ', '/>\n', ' <Param name="center" ', 'value="false" ', 'description="centered on origin" ', 'type="RichBool" ', '/>\n', ' </filter>\n']) util.write_filter(script, filter_xml) if isinstance(script, FilterScript): script.add_layer('Grid Generator', change_layer=True) """This is to work around a bug in MeshLab whereby the Grid Generator does not create zero values for z. Ref bug #458: https://sourceforge.net/p/meshlab/bugs/458/""" transform.vert_function(script, z_func='rint(z)') """Note that the "center" parameter in the mlx script does not actually center the square, not sure what it is doing. Instead this is set to false, which places the plane in the -X,+Y quadrant, and it is translated to the appropriate position after creation. """ if center: transform.translate(script, value=[size[0]/2, -size[1]/2, 0]) else: transform.translate(script, value=[size[0], 0, 0]) if color is not None: vert_color.function(script, color=color) return None
2D square/plane/grid created on XY plane x_segments # Number of segments in the X direction. y_segments # Number of segments in the Y direction. center="false" # If true square will be centered on origin; otherwise it is place in the positive XY quadrant.
def analyzeSweep(abf,sweep,m1=None,m2=None,plotToo=False): """ m1 and m2, if given, are in seconds. returns [# EPSCs, # IPSCs] """ abf.setsweep(sweep) if m1 is None: m1=0 else: m1=m1*abf.pointsPerSec if m2 is None: m2=-1 else: m2=m2*abf.pointsPerSec # obtain X and Y Yorig=abf.sweepY[int(m1):int(m2)] X=np.arange(len(Yorig))/abf.pointsPerSec Ylpf=linear_gaussian(Yorig,sigmaSize=abf.pointsPerMs*300,forwardOnly=False) Yflat=Yorig-Ylpf EPSCs,IPSCs=[],[] if plotToo: plt.figure(figsize=(15,6)) ax1=plt.subplot(211) plt.title("%s sweep %d"%(abf.ID,sweep)) plt.grid() plt.plot(X,Yorig,alpha=.5) plt.plot(X,Ylpf,'k',alpha=.5,lw=2) plt.margins(0,.2) plt.subplot(212,sharex=ax1) plt.title("gaussian baseline subtraction") plt.grid() plt.plot(X,Yflat,alpha=.5) plt.axhline(0,color='k',lw=2,alpha=.5) plt.tight_layout() plt.show() # TEST GAUSS hist, bin_edges = np.histogram(Yflat, density=True, bins=200) peakPa=bin_edges[np.where(hist==max(hist))[0][0]+1] if plotToo: plt.figure() plt.grid() plt.plot(bin_edges[1:],hist,alpha=.5) plt.axvline(0,color='k') plt.axvline(peakPa,color='r',ls='--',lw=2,alpha=.5) plt.semilogy() plt.title("sweep data distribution") plt.ylabel("power") plt.xlabel("pA deviation") plt.show() return peakPa
m1 and m2, if given, are in seconds. returns [# EPSCs, # IPSCs]
def record(self): # type: () -> bytes ''' Generate a string representing the Rock Ridge Relocated Directory record. Parameters: None. Returns: String containing the Rock Ridge record. ''' if not self._initialized: raise pycdlibexception.PyCdlibInternalError('RE record not yet initialized!') return b'RE' + struct.pack('=BB', RRRERecord.length(), SU_ENTRY_VERSION)
Generate a string representing the Rock Ridge Relocated Directory record. Parameters: None. Returns: String containing the Rock Ridge record.
def FaultFromException(ex, inheader, tb=None, actor=None): '''Return a Fault object created from a Python exception. <SOAP-ENV:Fault> <faultcode>SOAP-ENV:Server</faultcode> <faultstring>Processing Failure</faultstring> <detail> <ZSI:FaultDetail> <ZSI:string></ZSI:string> <ZSI:trace></ZSI:trace> </ZSI:FaultDetail> </detail> </SOAP-ENV:Fault> ''' tracetext = None if tb: try: lines = '\n'.join(['%s:%d:%s' % (name, line, func) for name, line, func, text in traceback.extract_tb(tb)]) except: pass else: tracetext = lines exceptionName = "" try: exceptionName = ":".join([ex.__module__, ex.__class__.__name__]) except: pass elt = ZSIFaultDetail(string=exceptionName + "\n" + str(ex), trace=tracetext) if inheader: detail, headerdetail = None, elt else: detail, headerdetail = elt, None return Fault(Fault.Server, 'Processing Failure', actor, detail, headerdetail)
Return a Fault object created from a Python exception. <SOAP-ENV:Fault> <faultcode>SOAP-ENV:Server</faultcode> <faultstring>Processing Failure</faultstring> <detail> <ZSI:FaultDetail> <ZSI:string></ZSI:string> <ZSI:trace></ZSI:trace> </ZSI:FaultDetail> </detail> </SOAP-ENV:Fault>
def getoutputerror(cmd): """Return (standard output, standard error) of executing cmd in a shell. Accepts the same arguments as os.system(). Parameters ---------- cmd : str A command to be executed in the system shell. Returns ------- stdout : str stderr : str """ out_err = process_handler(cmd, lambda p: p.communicate()) if out_err is None: return '', '' out, err = out_err return py3compat.bytes_to_str(out), py3compat.bytes_to_str(err)
Return (standard output, standard error) of executing cmd in a shell. Accepts the same arguments as os.system(). Parameters ---------- cmd : str A command to be executed in the system shell. Returns ------- stdout : str stderr : str
def setMotorShutdown(self, value, device=DEFAULT_DEVICE_ID, message=True): """ Set the motor shutdown on error status stored on the hardware device. :Parameters: value : `int` An integer indicating the effect on the motors when an error occurs. A `1` will cause the cause the motors to stop on an error and a `0` will ignore errors keeping the motors running. :Keywords: device : `int` The device is the integer number of the hardware devices ID and is only used with the Pololu Protocol. Defaults to the hardware's default value. message : `bool` If set to `True` a text message will be returned, if set to `False` the integer stored in the Qik will be returned. :Returns: Text message indicating the status of the shutdown error. A text message or an int. See the `message` parameter above. :Exceptions: * `SerialException` IO error indicating there was a problem reading from the serial connection. """ return self._setMotorShutdown(value, device, message)
Set the motor shutdown on error status stored on the hardware device. :Parameters: value : `int` An integer indicating the effect on the motors when an error occurs. A `1` will cause the cause the motors to stop on an error and a `0` will ignore errors keeping the motors running. :Keywords: device : `int` The device is the integer number of the hardware devices ID and is only used with the Pololu Protocol. Defaults to the hardware's default value. message : `bool` If set to `True` a text message will be returned, if set to `False` the integer stored in the Qik will be returned. :Returns: Text message indicating the status of the shutdown error. A text message or an int. See the `message` parameter above. :Exceptions: * `SerialException` IO error indicating there was a problem reading from the serial connection.
def xpathNextChild(self, cur): """Traversal function for the "child" direction The child axis contains the children of the context node in document order. """ if cur is None: cur__o = None else: cur__o = cur._o ret = libxml2mod.xmlXPathNextChild(self._o, cur__o) if ret is None:raise xpathError('xmlXPathNextChild() failed') __tmp = xmlNode(_obj=ret) return __tmp
Traversal function for the "child" direction The child axis contains the children of the context node in document order.
def to_json(value, pretty=False): """ Serializes the given value to JSON. :param value: the value to serialize :param pretty: whether or not to format the output in a more human-readable way; if not specified, defaults to ``False`` :type pretty: bool :rtype: str """ options = { 'sort_keys': False, 'cls': BasicJSONEncoder, } if pretty: options['indent'] = 2 options['separators'] = (',', ': ') return json.dumps(value, **options)
Serializes the given value to JSON. :param value: the value to serialize :param pretty: whether or not to format the output in a more human-readable way; if not specified, defaults to ``False`` :type pretty: bool :rtype: str
def t_text(self, t): r':\s*<text>' t.lexer.text_start = t.lexer.lexpos - len('<text>') t.lexer.begin('text')
r':\s*<text>
def count(self, request, *args, **kwargs): """ To get a count of events - run **GET** against */api/events/count/* as authenticated user. Endpoint support same filters as events list. Response example: .. code-block:: javascript {"count": 12321} """ self.queryset = self.filter_queryset(self.get_queryset()) return response.Response({'count': self.queryset.count()}, status=status.HTTP_200_OK)
To get a count of events - run **GET** against */api/events/count/* as authenticated user. Endpoint support same filters as events list. Response example: .. code-block:: javascript {"count": 12321}
def conn_gcp(cred, crid): """Establish connection to GCP.""" gcp_auth_type = cred.get('gcp_auth_type', "S") if gcp_auth_type == "A": # Application Auth gcp_crd_ia = CONFIG_DIR + ".gcp_libcloud_a_auth." + cred['gcp_proj_id'] gcp_crd = {'user_id': cred['gcp_client_id'], 'key': cred['gcp_client_sec'], 'project': cred['gcp_proj_id'], 'auth_type': "IA", 'credential_file': gcp_crd_ia} else: # Service Account Auth gcp_pem = CONFIG_DIR + cred['gcp_pem_file'] gcp_crd_sa = CONFIG_DIR + ".gcp_libcloud_s_auth." + cred['gcp_proj_id'] gcp_crd = {'user_id': cred['gcp_svc_acct_email'], 'key': gcp_pem, 'project': cred['gcp_proj_id'], 'credential_file': gcp_crd_sa} driver = get_driver(Provider.GCE) try: gcp_obj = driver(**gcp_crd) except SSLError as e: abort_err("\r SSL Error with GCP: {}".format(e)) except (InvalidCredsError, ValueError) as e: abort_err("\r Error with GCP Credentials: {}".format(e)) return {crid: gcp_obj}
Establish connection to GCP.
def create_symlinks(d): """Create new symbolic links in output directory.""" data = loadJson(d) outDir = prepare_output(d) unseen = data["pages"].keys() while len(unseen) > 0: latest = work = unseen[0] while work in unseen: unseen.remove(work) if "prev" in data["pages"][work]: work = data["pages"][work]["prev"] print("Latest page: %s" % (latest)) order = [] work = latest while work in data["pages"]: order.extend(data["pages"][work]["images"].values()) if "prev" in data["pages"][work]: work = data["pages"][work]["prev"] else: work = None order.reverse() for i, img in enumerate(order): os.symlink(os.path.join('..', img), os.path.join(outDir, '%05i_%s' % (i, img)))
Create new symbolic links in output directory.
def list_slot_differences_slot( self, resource_group_name, name, slot, target_slot, preserve_vnet, custom_headers=None, raw=False, **operation_config): """Get the difference in configuration settings between two web app slots. Get the difference in configuration settings between two web app slots. :param resource_group_name: Name of the resource group to which the resource belongs. :type resource_group_name: str :param name: Name of the app. :type name: str :param slot: Name of the source slot. If a slot is not specified, the production slot is used as the source slot. :type slot: str :param target_slot: Destination deployment slot during swap operation. :type target_slot: str :param preserve_vnet: <code>true</code> to preserve Virtual Network to the slot during swap; otherwise, <code>false</code>. :type preserve_vnet: bool :param dict custom_headers: headers that will be added to the request :param bool raw: returns the direct response alongside the deserialized response :param operation_config: :ref:`Operation configuration overrides<msrest:optionsforoperations>`. :return: An iterator like instance of SlotDifference :rtype: ~azure.mgmt.web.models.SlotDifferencePaged[~azure.mgmt.web.models.SlotDifference] :raises: :class:`DefaultErrorResponseException<azure.mgmt.web.models.DefaultErrorResponseException>` """ slot_swap_entity = models.CsmSlotEntity(target_slot=target_slot, preserve_vnet=preserve_vnet) def internal_paging(next_link=None, raw=False): if not next_link: # Construct URL url = self.list_slot_differences_slot.metadata['url'] path_format_arguments = { 'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._\(\)]+[^\.]$'), 'name': self._serialize.url("name", name, 'str'), 'slot': self._serialize.url("slot", slot, 'str'), 'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str') } url = self._client.format_url(url, **path_format_arguments) # Construct parameters query_parameters = {} query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str') else: url = next_link query_parameters = {} # Construct headers header_parameters = {} header_parameters['Accept'] = 'application/json' header_parameters['Content-Type'] = 'application/json; charset=utf-8' if self.config.generate_client_request_id: header_parameters['x-ms-client-request-id'] = str(uuid.uuid1()) if custom_headers: header_parameters.update(custom_headers) if self.config.accept_language is not None: header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str') # Construct body body_content = self._serialize.body(slot_swap_entity, 'CsmSlotEntity') # Construct and send request request = self._client.post(url, query_parameters, header_parameters, body_content) response = self._client.send(request, stream=False, **operation_config) if response.status_code not in [200]: raise models.DefaultErrorResponseException(self._deserialize, response) return response # Deserialize response deserialized = models.SlotDifferencePaged(internal_paging, self._deserialize.dependencies) if raw: header_dict = {} client_raw_response = models.SlotDifferencePaged(internal_paging, self._deserialize.dependencies, header_dict) return client_raw_response return deserialized
Get the difference in configuration settings between two web app slots. Get the difference in configuration settings between two web app slots. :param resource_group_name: Name of the resource group to which the resource belongs. :type resource_group_name: str :param name: Name of the app. :type name: str :param slot: Name of the source slot. If a slot is not specified, the production slot is used as the source slot. :type slot: str :param target_slot: Destination deployment slot during swap operation. :type target_slot: str :param preserve_vnet: <code>true</code> to preserve Virtual Network to the slot during swap; otherwise, <code>false</code>. :type preserve_vnet: bool :param dict custom_headers: headers that will be added to the request :param bool raw: returns the direct response alongside the deserialized response :param operation_config: :ref:`Operation configuration overrides<msrest:optionsforoperations>`. :return: An iterator like instance of SlotDifference :rtype: ~azure.mgmt.web.models.SlotDifferencePaged[~azure.mgmt.web.models.SlotDifference] :raises: :class:`DefaultErrorResponseException<azure.mgmt.web.models.DefaultErrorResponseException>`
def _suppress_distutils_logs(): """Hack to hide noise generated by `setup.py develop`. There isn't a good way to suppress them now, so let's monky-patch. See https://bugs.python.org/issue25392. """ f = distutils.log.Log._log def _log(log, level, msg, args): if level >= distutils.log.ERROR: f(log, level, msg, args) distutils.log.Log._log = _log yield distutils.log.Log._log = f
Hack to hide noise generated by `setup.py develop`. There isn't a good way to suppress them now, so let's monky-patch. See https://bugs.python.org/issue25392.
def close(self): """Close the current session, if still open.""" if self.session is not None: self.session.cookies.clear() self.session.close() self.session = None
Close the current session, if still open.
def disable_ipython(self): """ Disable plotting in the iPython notebook. After disabling, lightning plots will be produced in your lightning server, but will not appear in the notebook. """ from IPython.core.getipython import get_ipython self.ipython_enabled = False ip = get_ipython() formatter = ip.display_formatter.formatters['text/html'] formatter.type_printers.pop(Visualization, None) formatter.type_printers.pop(VisualizationLocal, None)
Disable plotting in the iPython notebook. After disabling, lightning plots will be produced in your lightning server, but will not appear in the notebook.
def api(self): """ Get or create an Api() instance using django settings. """ api = getattr(self, '_api', None) if api is None: self._api = mailjet.Api() return self._api
Get or create an Api() instance using django settings.
def estimate_from_ssr(histograms, readout_povm, channel_ops, settings): """ Estimate a density matrix from single shot histograms obtained by measuring bitstrings in the Z-eigenbasis after application of given channel operators. :param numpy.ndarray histograms: The single shot histograms, `shape=(n_channels, dim)`. :param DiagognalPOVM readout_povm: The POVM corresponding to the readout plus classifier. :param list channel_ops: The tomography measurement channels as `qutip.Qobj`'s. :param TomographySettings settings: The solver and estimation settings. :return: The generated StateTomography object. :rtype: StateTomography """ nqc = len(channel_ops[0].dims[0]) pauli_basis = grove.tomography.operator_utils.PAULI_BASIS ** nqc pi_basis = readout_povm.pi_basis if not histograms.shape[1] == pi_basis.dim: # pragma no coverage raise ValueError("Currently tomography is only implemented for two-level systems.") # prepare the log-likelihood function parameters, see documentation n_kj = np.asarray(histograms) c_jk_m = _prepare_c_jk_m(readout_povm, pauli_basis, channel_ops) rho_m = cvxpy.Variable(pauli_basis.dim) p_jk = c_jk_m * rho_m obj = -n_kj.ravel() * cvxpy.log(p_jk) p_jk_mat = cvxpy.reshape(p_jk, pi_basis.dim, len(channel_ops)) # cvxpy has col-major order # Default constraints: # MLE must describe valid probability distribution # i.e., for each k, p_jk must sum to one and be element-wise non-negative: # 1. \sum_j p_jk == 1 for all k # 2. p_jk >= 0 for all j, k # where p_jk = \sum_m c_jk_m rho_m constraints = [ p_jk >= 0, np.matrix(np.ones((1, pi_basis.dim))) * p_jk_mat == 1, ] rho_m_real_imag = sum((rm * o_ut.to_realimag(Pm) for (rm, Pm) in ut.izip(rho_m, pauli_basis.ops)), 0) if POSITIVE in settings.constraints: if tomography._SDP_SOLVER.is_functional(): constraints.append(rho_m_real_imag >> 0) else: # pragma no coverage _log.warning("No convex solver capable of semi-definite problems installed.\n" "Dropping the positivity constraint on the density matrix.") if UNIT_TRACE in settings.constraints: # this assumes that the first element of the Pauli basis is always proportional to # the identity constraints.append(rho_m[0, 0] == 1. / pauli_basis.ops[0].tr().real) prob = cvxpy.Problem(cvxpy.Minimize(obj), constraints) _log.info("Starting convex solver") prob.solve(solver=tomography.SOLVER, **settings.solver_kwargs) if prob.status != cvxpy.OPTIMAL: # pragma no coverage _log.warning("Problem did not converge to optimal solution. " "Solver settings: {}".format(settings.solver_kwargs)) return StateTomography(np.array(rho_m.value).ravel(), pauli_basis, settings)
Estimate a density matrix from single shot histograms obtained by measuring bitstrings in the Z-eigenbasis after application of given channel operators. :param numpy.ndarray histograms: The single shot histograms, `shape=(n_channels, dim)`. :param DiagognalPOVM readout_povm: The POVM corresponding to the readout plus classifier. :param list channel_ops: The tomography measurement channels as `qutip.Qobj`'s. :param TomographySettings settings: The solver and estimation settings. :return: The generated StateTomography object. :rtype: StateTomography
def get_resource_area(self, area_id, enterprise_name=None, organization_name=None): """GetResourceArea. [Preview API] :param str area_id: :param str enterprise_name: :param str organization_name: :rtype: :class:`<ResourceAreaInfo> <azure.devops.v5_0.location.models.ResourceAreaInfo>` """ route_values = {} if area_id is not None: route_values['areaId'] = self._serialize.url('area_id', area_id, 'str') query_parameters = {} if enterprise_name is not None: query_parameters['enterpriseName'] = self._serialize.query('enterprise_name', enterprise_name, 'str') if organization_name is not None: query_parameters['organizationName'] = self._serialize.query('organization_name', organization_name, 'str') response = self._send(http_method='GET', location_id='e81700f7-3be2-46de-8624-2eb35882fcaa', version='5.0-preview.1', route_values=route_values, query_parameters=query_parameters) return self._deserialize('ResourceAreaInfo', response)
GetResourceArea. [Preview API] :param str area_id: :param str enterprise_name: :param str organization_name: :rtype: :class:`<ResourceAreaInfo> <azure.devops.v5_0.location.models.ResourceAreaInfo>`
def _setBitOn(x, bitNum): """Set bit 'bitNum' to True. Args: * x (int): The value before. * bitNum (int): The bit number that should be set to True. Returns: The value after setting the bit. This is an integer. For example: For x = 4 (dec) = 0100 (bin), setting bit number 0 results in 0101 (bin) = 5 (dec). """ _checkInt(x, minvalue=0, description='input value') _checkInt(bitNum, minvalue=0, description='bitnumber') return x | (1 << bitNum)
Set bit 'bitNum' to True. Args: * x (int): The value before. * bitNum (int): The bit number that should be set to True. Returns: The value after setting the bit. This is an integer. For example: For x = 4 (dec) = 0100 (bin), setting bit number 0 results in 0101 (bin) = 5 (dec).
def post(self): '''This handles POST requests. Saves the changes made by the user on the frontend back to the current checkplot-list.json file. ''' # if self.readonly is set, then don't accept any changes # return immediately with a 400 if self.readonly: msg = "checkplotserver is in readonly mode. no updates allowed." resultdict = {'status':'error', 'message':msg, 'readonly':self.readonly, 'result':None} self.write(resultdict) raise tornado.web.Finish() objectid = self.get_argument('objectid', None) changes = self.get_argument('changes',None) # if either of the above is invalid, return nothing if not objectid or not changes: msg = ("could not parse changes to the checkplot filelist " "from the frontend") LOGGER.error(msg) resultdict = {'status':'error', 'message':msg, 'readonly':self.readonly, 'result':None} self.write(resultdict) raise tornado.web.Finish() # otherwise, update the checkplot list JSON objectid = xhtml_escape(objectid) changes = json.loads(changes) # update the dictionary if 'reviewed' not in self.currentproject: self.currentproject['reviewed'] = {} self.currentproject['reviewed'][objectid] = changes # update the JSON file with open(self.cplistfile,'w') as outfd: json.dump(self.currentproject, outfd) # return status msg = ("wrote all changes to the checkplot filelist " "from the frontend for object: %s" % objectid) LOGGER.info(msg) resultdict = {'status':'success', 'message':msg, 'readonly':self.readonly, 'result':{'objectid':objectid, 'changes':changes}} self.write(resultdict) self.finish()
This handles POST requests. Saves the changes made by the user on the frontend back to the current checkplot-list.json file.
def handle_webhook_event(self, environ, url, params): """ Webhook handler - each handler for the webhook event takes an initial pattern argument for matching the URL requested. Here we match the URL to the pattern for each webhook handler, and bail out if it returns a response. """ for handler in self.events["webhook"]: urlpattern = handler.event.args["urlpattern"] if not urlpattern or match(urlpattern, url): response = handler(self, environ, url, params) if response: return response
Webhook handler - each handler for the webhook event takes an initial pattern argument for matching the URL requested. Here we match the URL to the pattern for each webhook handler, and bail out if it returns a response.
def _prepare_executor(self, data, executor): """Copy executor sources into the destination directory. :param data: The :class:`~resolwe.flow.models.Data` object being prepared for. :param executor: The fully qualified name of the executor that is to be used for this data object. :return: Tuple containing the relative fully qualified name of the executor class ('relative' to how the executor will be run) and the path to the directory where the executor will be deployed. :rtype: (str, str) """ logger.debug(__("Preparing executor for Data with id {}", data.id)) # Both of these imports are here only to get the packages' paths. import resolwe.flow.executors as executor_package exec_dir = os.path.dirname(inspect.getsourcefile(executor_package)) dest_dir = self._get_per_data_dir('RUNTIME_DIR', data.location.subpath) dest_package_dir = os.path.join(dest_dir, 'executors') shutil.copytree(exec_dir, dest_package_dir) dir_mode = self.settings_actual.get('FLOW_EXECUTOR', {}).get('RUNTIME_DIR_MODE', 0o755) os.chmod(dest_dir, dir_mode) class_name = executor.rpartition('.executors.')[-1] return '.{}'.format(class_name), dest_dir
Copy executor sources into the destination directory. :param data: The :class:`~resolwe.flow.models.Data` object being prepared for. :param executor: The fully qualified name of the executor that is to be used for this data object. :return: Tuple containing the relative fully qualified name of the executor class ('relative' to how the executor will be run) and the path to the directory where the executor will be deployed. :rtype: (str, str)
def parse_expression(expression: str) -> Tuple[Set[str], List[CompositeAxis]]: """ Parses an indexing expression (for a single tensor). Checks uniqueness of names, checks usage of '...' (allowed only once) Returns set of all used identifiers and a list of axis groups """ identifiers = set() composite_axes = [] if '.' in expression: if '...' not in expression: raise EinopsError('Expression may contain dots only inside ellipsis (...)') if str.count(expression, '...') != 1 or str.count(expression, '.') != 3: raise EinopsError('Expression may contain dots only inside ellipsis (...); only one ellipsis for tensor ') expression = expression.replace('...', _ellipsis) bracket_group = None def add_axis_name(x): if x is not None: if x in identifiers: raise ValueError('Indexing expression contains duplicate dimension "{}"'.format(x)) identifiers.add(x) if bracket_group is None: composite_axes.append([x]) else: bracket_group.append(x) current_identifier = None for char in expression: if char in '() ' + _ellipsis: add_axis_name(current_identifier) current_identifier = None if char == _ellipsis: if bracket_group is not None: raise EinopsError("Ellipsis can't be used inside the composite axis (inside brackets)") composite_axes.append(_ellipsis) identifiers.add(_ellipsis) elif char == '(': if bracket_group is not None: raise EinopsError("Axis composition is one-level (brackets inside brackets not allowed)") bracket_group = [] elif char == ')': if bracket_group is None: raise EinopsError('Brackets are not balanced') composite_axes.append(bracket_group) bracket_group = None elif '0' <= char <= '9': if current_identifier is None: raise EinopsError("Axis name can't start with a digit") current_identifier += char elif 'a' <= char <= 'z': if current_identifier is None: current_identifier = char else: current_identifier += char else: if 'A' <= char <= 'Z': raise EinopsError("Only lower-case latin letters allowed in names, not '{}'".format(char)) raise EinopsError("Unknown character '{}'".format(char)) if bracket_group is not None: raise EinopsError('Imbalanced parentheses in expression: "{}"'.format(expression)) add_axis_name(current_identifier) return identifiers, composite_axes
Parses an indexing expression (for a single tensor). Checks uniqueness of names, checks usage of '...' (allowed only once) Returns set of all used identifiers and a list of axis groups
def apply_mutation(module_path, operator, occurrence): """Apply a specific mutation to a file on disk. Args: module_path: The path to the module to mutate. operator: The `operator` instance to use. occurrence: The occurrence of the operator to apply. Returns: A `(unmutated-code, mutated-code)` tuple to the with-block. If there was no mutation performed, the `mutated-code` is `None`. """ module_ast = get_ast(module_path, python_version=operator.python_version) original_code = module_ast.get_code() visitor = MutationVisitor(occurrence, operator) mutated_ast = visitor.walk(module_ast) mutated_code = None if visitor.mutation_applied: mutated_code = mutated_ast.get_code() with module_path.open(mode='wt', encoding='utf-8') as handle: handle.write(mutated_code) handle.flush() return original_code, mutated_code
Apply a specific mutation to a file on disk. Args: module_path: The path to the module to mutate. operator: The `operator` instance to use. occurrence: The occurrence of the operator to apply. Returns: A `(unmutated-code, mutated-code)` tuple to the with-block. If there was no mutation performed, the `mutated-code` is `None`.
def parse_rcfile(rcfile): """ Parses rcfile Invalid lines are ignored with a warning """ def parse_bool(value): """Parse boolean string""" value = value.lower() if value in ['yes', 'true']: return True elif value in ['no', 'false']: return False else: raise ValueError('''Can't parse {}'''.format(value)) valid_keys = { 'size': int, 'comment': str, 'template': str, 'reverse': parse_bool, 'opposite': parse_bool, 'position': int, } params = {} for linenum, line in enumerate(rcfile): line = line.strip() if not line or line[0] == '#': continue pos = line.find(' ') key = line[:pos] value = line[pos:].strip() if key in valid_keys.keys(): try: params[key] = valid_keys[key](value) except ValueError: print('Ignoring line {} from rcfile'.format(linenum + 1), file=sys.stderr) return params
Parses rcfile Invalid lines are ignored with a warning
def dropped(self, param, event): """Adds the dropped parameter *param* into the protocol list. Re-implemented from :meth:`AbstractDragView<sparkle.gui.abstract_drag_view.AbstractDragView.dropped>` """ if event.source() == self or isinstance(param, AddLabel): index = self.indexAt(event.pos()) self.model().insertRows(index.row(),1) if event.source() == self: self.model().setData(index, param) else: self.hintRequested.emit('Select Components in view to modify -- click to toggle membership of component in auto-parameter') row = index.row() # select rows doesn't work with -ve indexes if row == -1: row = self.model().rowCount() - 1 self.selectRow(row) self.parameterChanged.emit(self.model().selection(index)) self.dragActive.emit(False)
Adds the dropped parameter *param* into the protocol list. Re-implemented from :meth:`AbstractDragView<sparkle.gui.abstract_drag_view.AbstractDragView.dropped>`
def call(self, command, *args): """ Sends call to the function, whose name is specified by command. Used by Script invocations and normalizes calls using standard Redis arguments to use the expected redis-py arguments. """ command = self._normalize_command_name(command) args = self._normalize_command_args(command, *args) redis_function = getattr(self, command) value = redis_function(*args) return self._normalize_command_response(command, value)
Sends call to the function, whose name is specified by command. Used by Script invocations and normalizes calls using standard Redis arguments to use the expected redis-py arguments.
def get_assessment_offered_query_session_for_bank(self, bank_id): """Gets the ``OsidSession`` associated with the assessment offered query service for the given bank. arg: bank_id (osid.id.Id): the ``Id`` of the bank return: (osid.assessment.AssessmentOfferedQuerySession) - an ``AssessmentOfferedQuerySession`` raise: NotFound - ``bank_id`` not found raise: NullArgument - ``bank_id`` is ``null`` raise: OperationFailed - ``unable to complete request`` raise: Unimplemented - ``supports_assessment_offered_query()`` or ``supports_visible_federation()`` is ``false`` *compliance: optional -- This method must be implemented if ``supports_assessment_offered_query()`` and ``supports_visible_federation()`` are ``true``.* """ if not self.supports_assessment_offered_query(): raise errors.Unimplemented() ## # Also include check to see if the catalog Id is found otherwise raise errors.NotFound ## # pylint: disable=no-member return sessions.AssessmentOfferedQuerySession(bank_id, runtime=self._runtime)
Gets the ``OsidSession`` associated with the assessment offered query service for the given bank. arg: bank_id (osid.id.Id): the ``Id`` of the bank return: (osid.assessment.AssessmentOfferedQuerySession) - an ``AssessmentOfferedQuerySession`` raise: NotFound - ``bank_id`` not found raise: NullArgument - ``bank_id`` is ``null`` raise: OperationFailed - ``unable to complete request`` raise: Unimplemented - ``supports_assessment_offered_query()`` or ``supports_visible_federation()`` is ``false`` *compliance: optional -- This method must be implemented if ``supports_assessment_offered_query()`` and ``supports_visible_federation()`` are ``true``.*
def get_vocab(self, vocab_name, **kwargs): """ Returns data stream of an rdf vocabulary args: vocab_name: the name or uri of the vocab to return """ vocab_dict = self.__get_vocab_dict__(vocab_name, **kwargs) filepaths = list(set([os.path.join(self.cache_dir, vocab_dict['filename']), os.path.join(self.vocab_dir, vocab_dict['filename'])])) for path in filepaths: if os.path.exists(path): with open(path, 'rb') as f_obj: vocab_dict.update({"name": vocab_name, "data": f_obj.read(), "modified": os.path.getmtime(path)}) return vocab_dict download_locs = make_list(vocab_dict.get('download',[])) for loc in download_locs: loc_web = urllib.request.urlopen(loc) # loc_file_date = date_parse(loc_web.info()['Last-Modified']) urllib.request.urlretrieve(loc, filepaths[0]) with open(filepaths[0], 'rb') as f_obj: vocab_dict.update({"name": vocab_name, "data": f_obj.read(), "modified": os.path.getmtime(filepaths[0])}) return vocab_dict
Returns data stream of an rdf vocabulary args: vocab_name: the name or uri of the vocab to return
def produce(self, **kwargs): """Call the primitive function, or the predict method of the primitive. The given keyword arguments will be passed directly to the primitive, if it is a simple function, or to the `produce` method of the primitive instance specified in the JSON annotation, if it is a class. If any of the arguments expected by the fit method had been given during the MLBlock initialization, they will be passed as well. Returns: The output of the call to the primitive function or primitive produce method. """ produce_args = self._produce_params.copy() produce_args.update(kwargs) if self._class: return getattr(self.instance, self.produce_method)(**produce_args) produce_args.update(self._hyperparameters) return self.primitive(**produce_args)
Call the primitive function, or the predict method of the primitive. The given keyword arguments will be passed directly to the primitive, if it is a simple function, or to the `produce` method of the primitive instance specified in the JSON annotation, if it is a class. If any of the arguments expected by the fit method had been given during the MLBlock initialization, they will be passed as well. Returns: The output of the call to the primitive function or primitive produce method.
def parse_sdk_name(name): """Returns a filename or URL for the SDK name. The name can be a version string, a remote URL or a local path. """ # Version like x.y.z, return as-is. if all(part.isdigit() for part in name.split('.', 2)): return DOWNLOAD_URL % name # A network location. url = urlparse.urlparse(name) if url.scheme: return name # Else must be a filename. return os.path.abspath(name)
Returns a filename or URL for the SDK name. The name can be a version string, a remote URL or a local path.
def set_properties(self, properties, recursive=True): """ Adds new or modifies existing properties listed in properties properties - is a dict which contains the property names and values to set. Property values can be a list or tuple to set multiple values for a key. recursive - on folders property attachment is recursive by default. It is possible to force recursive behavior. """ if not properties: return return self._accessor.set_properties(self, properties, recursive)
Adds new or modifies existing properties listed in properties properties - is a dict which contains the property names and values to set. Property values can be a list or tuple to set multiple values for a key. recursive - on folders property attachment is recursive by default. It is possible to force recursive behavior.
def galleries(self): """Instance depends on the API version: * 2018-06-01: :class:`GalleriesOperations<azure.mgmt.compute.v2018_06_01.operations.GalleriesOperations>` * 2019-03-01: :class:`GalleriesOperations<azure.mgmt.compute.v2019_03_01.operations.GalleriesOperations>` """ api_version = self._get_api_version('galleries') if api_version == '2018-06-01': from .v2018_06_01.operations import GalleriesOperations as OperationClass elif api_version == '2019-03-01': from .v2019_03_01.operations import GalleriesOperations as OperationClass else: raise NotImplementedError("APIVersion {} is not available".format(api_version)) return OperationClass(self._client, self.config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version)))
Instance depends on the API version: * 2018-06-01: :class:`GalleriesOperations<azure.mgmt.compute.v2018_06_01.operations.GalleriesOperations>` * 2019-03-01: :class:`GalleriesOperations<azure.mgmt.compute.v2019_03_01.operations.GalleriesOperations>`
def qteBindKeyWidget(self, keysequence, macroName: str, widgetObj: QtGui.QWidget): """ Bind ``macroName`` to ``widgetObj`` and associate it with ``keysequence``. This method does not affect the key bindings of other applets and/or widgets and can be used to individualise the key bindings inside every applet instance and every widget inside that instance. Even multiple instances of the same applet type (eg. multiple text buffers) can all have individual key bindings. The ``keysequence`` can be specified either as a string (eg '<ctrl>+x <ctrl>+f'), or a list of tuples containing the constants from the ``QtCore.Qt`` name space (eg. [(ControlModifier, Key_X), (ControlModifier, Key_F)]), or as a ``QtmacsKeysequence`` object. |Args| * ``keysequence`` (**str**, **list** of **tuples**, **QtmacsKeysequence**): key sequence to activate ``macroName`` for specified ``widgetSignature``. * ``macroName`` (**str**): the macro to associated with ``keysequence``. * ``widgetObj`` (**QWidget**): determines which widgets signature to use. |Returns| * **bool**: whether or not at least one widget was successfully bound. |Raises| * **QtmacsArgumentError** if at least one argument has an invalid type. * **QtmacsKeysequenceError** if the provided ``keysequence`` could not be parsed. * **QtmacsOtherError** if ``widgetObj`` was not added with ``qteAddWidget``. """ # Convert the key sequence into a QtmacsKeysequence object, or # raise an QtmacsKeysequenceError if the conversion is # impossible. keysequence = QtmacsKeysequence(keysequence) # Check type of input arguments. if not hasattr(widgetObj, '_qteAdmin'): msg = '<widgetObj> was probably not added with <qteAddWidget>' msg += ' method because it lacks the <_qteAdmin> attribute.' raise QtmacsOtherError(msg) # Verify that Qtmacs knows a macro named 'macroName'. if not self.qteIsMacroRegistered(macroName): msg = ('Cannot bind key to unknown macro <b>{}</b>.' .format(macroName)) self.qteLogger.error(msg, stack_info=True) return False # Associate 'keysequence' with 'macroName' for 'widgetObj'. try: widgetObj._qteAdmin.keyMap.qteInsertKey(keysequence, macroName) except AttributeError: msg = 'Received an invalid macro object.' self.qteLogger.error(msg, stack_info=True) return False return True
Bind ``macroName`` to ``widgetObj`` and associate it with ``keysequence``. This method does not affect the key bindings of other applets and/or widgets and can be used to individualise the key bindings inside every applet instance and every widget inside that instance. Even multiple instances of the same applet type (eg. multiple text buffers) can all have individual key bindings. The ``keysequence`` can be specified either as a string (eg '<ctrl>+x <ctrl>+f'), or a list of tuples containing the constants from the ``QtCore.Qt`` name space (eg. [(ControlModifier, Key_X), (ControlModifier, Key_F)]), or as a ``QtmacsKeysequence`` object. |Args| * ``keysequence`` (**str**, **list** of **tuples**, **QtmacsKeysequence**): key sequence to activate ``macroName`` for specified ``widgetSignature``. * ``macroName`` (**str**): the macro to associated with ``keysequence``. * ``widgetObj`` (**QWidget**): determines which widgets signature to use. |Returns| * **bool**: whether or not at least one widget was successfully bound. |Raises| * **QtmacsArgumentError** if at least one argument has an invalid type. * **QtmacsKeysequenceError** if the provided ``keysequence`` could not be parsed. * **QtmacsOtherError** if ``widgetObj`` was not added with ``qteAddWidget``.
def randomColor( self ): """ Generates a random color. :return <QColor> """ r = random.randint(120, 180) g = random.randint(120, 180) b = random.randint(120, 180) return QColor(r, g, b)
Generates a random color. :return <QColor>
def log_likelihood(self,x, K_extra=1): """ Estimate the log likelihood with samples from the model. Draw k_extra components which were not populated by the current model in order to create a truncated approximate mixture model. """ x = np.asarray(x) ks = self._get_occupied() K = len(ks) K_total = K + K_extra # Sample observation distributions given current labels obs_distns = [] for k in range(K): o = copy.deepcopy(self.obs_distn) o.resample(data=self._get_data_withlabel(k)) obs_distns.append(o) # Sample extra observation distributions from prior for k in range(K_extra): o = copy.deepcopy(self.obs_distn) o.resample() obs_distns.append(o) # Sample a set of weights weights = Categorical(alpha_0=self.alpha_0, K=K_total, weights=None) assert len(self.labels_list) == 1 weights.resample(data=self.labels_list[0].z) # Now compute the log likelihood vals = np.empty((x.shape[0],K_total)) for k in range(K_total): vals[:,k] = obs_distns[k].log_likelihood(x) vals += weights.log_likelihood(np.arange(K_total)) assert not np.isnan(vals).any() return logsumexp(vals,axis=1).sum()
Estimate the log likelihood with samples from the model. Draw k_extra components which were not populated by the current model in order to create a truncated approximate mixture model.
def DbUnExportServer(self, argin): """ Mark all devices belonging to a specified device server process as non exported :param argin: Device server name (executable/instance) :type: tango.DevString :return: :rtype: tango.DevVoid """ self._log.debug("In DbUnExportServer()") self.db.unexport_server(argin)
Mark all devices belonging to a specified device server process as non exported :param argin: Device server name (executable/instance) :type: tango.DevString :return: :rtype: tango.DevVoid
def convolve_spatial3(im, psfs, mode="constant", grid_dim=None, sub_blocks=None, pad_factor=2, plan=None, return_plan=False, verbose=False): """ GPU accelerated spatial varying convolution of an 3d image with a (Gz, Gy, Gx) grid of psfs assumed to be equally spaced within the image the input image im is subdivided into (Gz, Gy,Gx) blocks, each block is convolved with the corresponding psf and linearly interpolated to give the final result The psfs can be given either in A) Stackmode psfs.shape = (Gz, Gy, Gx, Hz, Hy, Hx) then psfs[k,j,i] is the psf at the center of each block (i,j,k) in the image B) Flatmode psfs.shape = im.shape then the psfs are assumed to be definied on the gridpoints of the images itself in this case grid_dim = (Gz,Gy,Gx) has to be given as of now each image dimension has to be divisible by the grid dim, i.e. :: Nx % Gx == 0 Ny % Gy == 0 Nz % Gz == 0 GPU Memory consumption is of order 8*Nx*Ny*Nz If not enough GPU memory is available, consider using sub_blocks = (n,m,l) then the operation is carried out in a tiled fashion reducing memory consumption to 8*Nx*Ny*(1/n+2/Gx)*(1/m+2/Gy)*(1/l+2/Gz) (so there is no much use if n>Gx/2...) Example ------- im = np.zeros((64,64,64)) im[::10,::10,::10] = 1. # Stackmode psfs = np.ones((8,8,8,4,4,4)) res = convolve_spatial3(im, psfs, mode = "wrap") # Flatmode _Xs = np.meshgrid(*(np.arange(64),)*2) psfs = np.prod([np.clip(np.sin(2*np.pi*_X/8),0,1) for _X in _Xs],axis=0) res = convolve_spatial2(im, psfs, grid_dim = (16,16,16)) Parameters ---------- im: ndarray the image to convolve psfs: ndarray the (Gx,Gy) psf grid, either of shape (Gx,Gy, Hy, Hx) or im.shape mode: string, optional padding mode, either "constant" or "wrap" grid_dim: tuple, optional the (Gy,Gx) grid dimension, has to be provided if psfs.shape = im.shape sub_blocks: tuple, optional tiling mode, give e.g. (2,2) to sequentially operate on quadratnts pad_factor: int the factor of its size each block get tiled, use pad_factor=2 if the psfs are well localized, use pad_factor = 3 if not (e.g. if you experience blocking)_ plan: fft_plan, optional when given use this as the fft plan return_plan: bool, optional return (res, plan) with plan being the fft plan for further use Returns ------- res: ndarray the convolved image """ ndim = im.ndim if ndim != 3: raise ValueError("wrong dimensions of input!") if grid_dim: if psfs.shape != im.shape: raise ValueError("if grid_dim is set, then im.shape = hs.shape !") else: if not psfs.ndim == 2 * ndim: raise ValueError("wrong dimensions of psf grid! should be (Gz,Gy,Gx,Nz,Ny,Nx)") if grid_dim: Gs = grid_dim else: Gs = psfs.shape[:ndim] if not np.all([n % g == 0 for n, g in zip(im.shape, Gs)]): raise NotImplementedError( "shape of image has to be divisible by Gx Gy = %s shape mismatch" % (str(psfs.shape[:2]))) if sub_blocks == None: return _convolve_spatial3(im, psfs, mode=mode, pad_factor=pad_factor, plan=plan, return_plan=return_plan, grid_dim=grid_dim) else: if not np.all([g % n == 0 for n, g in zip(sub_blocks, Gs)]): raise ValueError("psf grid dimension has to be divisible corresponding n_blocks") N_sub = [n // s for n, s in zip(im.shape, sub_blocks)] Nblocks = [n // g for n, g in zip(im.shape, Gs)] Npads = [n * (s > 1) for n, s in zip(Nblocks, sub_blocks)] grid_dim_sub = [g // s + 2 * (s > 1) for g, s in zip(Gs, sub_blocks)] if grid_dim: res = np.empty(im.shape, np.float32) plan = None for i, ((im_tile, im_s_src, im_s_dest), (hs_tile, hs_s_src, hs_s_dest)) \ in enumerate(zip(tile_iterator(im, blocksize=N_sub, padsize=Npads, mode=mode, verbose=verbose), \ tile_iterator(psfs, blocksize=N_sub, padsize=Npads, mode=mode, verbose=verbose ))): if verbose: print("convolve_spatial3 ... %s\t/ %s" % (i + 1, np.prod(sub_blocks))) res_tile, plan = _convolve_spatial3(im_tile.copy(), hs_tile.copy(), mode=mode, pad_factor=pad_factor, return_plan=True, plan=plan, grid_dim=grid_dim_sub) res[im_s_src] = res_tile[im_s_dest] return res else: raise NotImplementedError("sub_blocks only implemented for Flatmode")
GPU accelerated spatial varying convolution of an 3d image with a (Gz, Gy, Gx) grid of psfs assumed to be equally spaced within the image the input image im is subdivided into (Gz, Gy,Gx) blocks, each block is convolved with the corresponding psf and linearly interpolated to give the final result The psfs can be given either in A) Stackmode psfs.shape = (Gz, Gy, Gx, Hz, Hy, Hx) then psfs[k,j,i] is the psf at the center of each block (i,j,k) in the image B) Flatmode psfs.shape = im.shape then the psfs are assumed to be definied on the gridpoints of the images itself in this case grid_dim = (Gz,Gy,Gx) has to be given as of now each image dimension has to be divisible by the grid dim, i.e. :: Nx % Gx == 0 Ny % Gy == 0 Nz % Gz == 0 GPU Memory consumption is of order 8*Nx*Ny*Nz If not enough GPU memory is available, consider using sub_blocks = (n,m,l) then the operation is carried out in a tiled fashion reducing memory consumption to 8*Nx*Ny*(1/n+2/Gx)*(1/m+2/Gy)*(1/l+2/Gz) (so there is no much use if n>Gx/2...) Example ------- im = np.zeros((64,64,64)) im[::10,::10,::10] = 1. # Stackmode psfs = np.ones((8,8,8,4,4,4)) res = convolve_spatial3(im, psfs, mode = "wrap") # Flatmode _Xs = np.meshgrid(*(np.arange(64),)*2) psfs = np.prod([np.clip(np.sin(2*np.pi*_X/8),0,1) for _X in _Xs],axis=0) res = convolve_spatial2(im, psfs, grid_dim = (16,16,16)) Parameters ---------- im: ndarray the image to convolve psfs: ndarray the (Gx,Gy) psf grid, either of shape (Gx,Gy, Hy, Hx) or im.shape mode: string, optional padding mode, either "constant" or "wrap" grid_dim: tuple, optional the (Gy,Gx) grid dimension, has to be provided if psfs.shape = im.shape sub_blocks: tuple, optional tiling mode, give e.g. (2,2) to sequentially operate on quadratnts pad_factor: int the factor of its size each block get tiled, use pad_factor=2 if the psfs are well localized, use pad_factor = 3 if not (e.g. if you experience blocking)_ plan: fft_plan, optional when given use this as the fft plan return_plan: bool, optional return (res, plan) with plan being the fft plan for further use Returns ------- res: ndarray the convolved image
def slice_bounds_by_doubling(x_initial, target_log_prob, log_slice_heights, max_doublings, step_size, seed=None, name=None): """Returns the bounds of the slice at each stage of doubling procedure. Precomputes the x coordinates of the left (L) and right (R) endpoints of the interval `I` produced in the "doubling" algorithm [Neal 2003][1] P713. Note that we simultaneously compute all possible doubling values for each chain, for the reason that at small-medium densities, the gains from parallel evaluation might cause a speed-up, but this will be benchmarked against the while loop implementation. Args: x_initial: `tf.Tensor` of any shape and any real dtype consumable by `target_log_prob`. The initial points. target_log_prob: A callable taking a `tf.Tensor` of shape and dtype as `x_initial` and returning a tensor of the same shape. The log density of the target distribution. log_slice_heights: `tf.Tensor` with the same shape as `x_initial` and the same dtype as returned by `target_log_prob`. The log of the height of the slice for each chain. The values must be bounded above by `target_log_prob(x_initial)`. max_doublings: Scalar positive int32 `tf.Tensor`. The maximum number of doublings to consider. step_size: `tf.Tensor` with same dtype as and shape compatible with `x_initial`. The size of the initial interval. seed: (Optional) positive int. The random seed. If None, no seed is set. name: Python `str` name prefixed to Ops created by this function. Default value: `None` (i.e., 'find_slice_bounds'). Returns: upper_bounds: A tensor of same shape and dtype as `x_initial`. Slice upper bounds for each chain. lower_bounds: A tensor of same shape and dtype as `x_initial`. Slice lower bounds for each chain. both_ok: A tensor of shape `x_initial` and boolean dtype. Indicates if both the chosen upper and lower bound lie outside of the slice. #### References [1]: Radford M. Neal. Slice Sampling. The Annals of Statistics. 2003, Vol 31, No. 3 , 705-767. https://projecteuclid.org/download/pdf_1/euclid.aos/1056562461 """ with tf.compat.v1.name_scope( name, 'slice_bounds_by_doubling', [x_initial, log_slice_heights, max_doublings, step_size]): seed_gen = distributions.SeedStream(seed, salt='slice_bounds_by_doubling') x_initial = tf.convert_to_tensor(value=x_initial) batch_shape = tf.shape(input=x_initial) dtype = step_size.dtype.base_dtype left_endpoints = x_initial + step_size * tf.random.uniform( batch_shape, minval=-1.0, maxval=0.0, dtype=dtype, seed=seed_gen()) # Compute the increments by which we need to step the upper and lower bounds # part of the doubling procedure. left_increments, widths = _left_doubling_increments( batch_shape, max_doublings, step_size, seed=seed_gen()) # The left and right end points. Shape (max_doublings+1,) + batch_shape. left_endpoints -= left_increments right_endpoints = left_endpoints + widths # Test if these end points lie outside of the slice. # Checks if the end points of the slice are outside the graph of the pdf. left_ep_values = tf.map_fn(target_log_prob, left_endpoints) right_ep_values = tf.map_fn(target_log_prob, right_endpoints) left_ok = left_ep_values < log_slice_heights right_ok = right_ep_values < log_slice_heights both_ok = left_ok & right_ok both_ok_f = tf.reshape(both_ok, [max_doublings + 1, -1]) best_interval_idx = _find_best_interval_idx( tf.cast(both_ok_f, dtype=tf.int32)) # Formats the above index as required to use with gather_nd. point_index_gather = tf.stack( [best_interval_idx, tf.range(tf.size(input=best_interval_idx))], axis=1, name='point_index_gather') left_ep_f = tf.reshape(left_endpoints, [max_doublings + 1, -1]) right_ep_f = tf.reshape(right_endpoints, [max_doublings + 1, -1]) # The x values of the uppper and lower bounds of the slices for each chain. lower_bounds = tf.reshape(tf.gather_nd(left_ep_f, point_index_gather), batch_shape) upper_bounds = tf.reshape(tf.gather_nd(right_ep_f, point_index_gather), batch_shape) both_ok = tf.reduce_any(input_tensor=both_ok, axis=0) return upper_bounds, lower_bounds, both_ok
Returns the bounds of the slice at each stage of doubling procedure. Precomputes the x coordinates of the left (L) and right (R) endpoints of the interval `I` produced in the "doubling" algorithm [Neal 2003][1] P713. Note that we simultaneously compute all possible doubling values for each chain, for the reason that at small-medium densities, the gains from parallel evaluation might cause a speed-up, but this will be benchmarked against the while loop implementation. Args: x_initial: `tf.Tensor` of any shape and any real dtype consumable by `target_log_prob`. The initial points. target_log_prob: A callable taking a `tf.Tensor` of shape and dtype as `x_initial` and returning a tensor of the same shape. The log density of the target distribution. log_slice_heights: `tf.Tensor` with the same shape as `x_initial` and the same dtype as returned by `target_log_prob`. The log of the height of the slice for each chain. The values must be bounded above by `target_log_prob(x_initial)`. max_doublings: Scalar positive int32 `tf.Tensor`. The maximum number of doublings to consider. step_size: `tf.Tensor` with same dtype as and shape compatible with `x_initial`. The size of the initial interval. seed: (Optional) positive int. The random seed. If None, no seed is set. name: Python `str` name prefixed to Ops created by this function. Default value: `None` (i.e., 'find_slice_bounds'). Returns: upper_bounds: A tensor of same shape and dtype as `x_initial`. Slice upper bounds for each chain. lower_bounds: A tensor of same shape and dtype as `x_initial`. Slice lower bounds for each chain. both_ok: A tensor of shape `x_initial` and boolean dtype. Indicates if both the chosen upper and lower bound lie outside of the slice. #### References [1]: Radford M. Neal. Slice Sampling. The Annals of Statistics. 2003, Vol 31, No. 3 , 705-767. https://projecteuclid.org/download/pdf_1/euclid.aos/1056562461
def pretty_repr(instance): """ A function assignable to the ``__repr__`` dunder method, so that the ``prettyprinter`` definition for the type is used to provide repr output. Usage: .. code:: python from prettyprinter import pretty_repr class MyClass: __repr__ = pretty_repr """ instance_type = type(instance) if not is_registered( instance_type, check_superclasses=True, check_deferred=True, register_deferred=True ): warnings.warn( "pretty_repr is assigned as the __repr__ method of " "'{}'. However, no pretty printer is registered for that type, " "its superclasses or its subclasses. Falling back to the default " "repr implementation. To fix this warning, register a pretty " "printer using prettyprinter.register_pretty.".format( instance_type.__qualname__ ), UserWarning ) return object.__repr__(instance) return pformat(instance)
A function assignable to the ``__repr__`` dunder method, so that the ``prettyprinter`` definition for the type is used to provide repr output. Usage: .. code:: python from prettyprinter import pretty_repr class MyClass: __repr__ = pretty_repr
def _dpi(self, resolution_tag): """ Return the dpi value calculated for *resolution_tag*, which can be either TIFF_TAG.X_RESOLUTION or TIFF_TAG.Y_RESOLUTION. The calculation is based on the values of both that tag and the TIFF_TAG.RESOLUTION_UNIT tag in this parser's |_IfdEntries| instance. """ ifd_entries = self._ifd_entries if resolution_tag not in ifd_entries: return 72 # resolution unit defaults to inches (2) resolution_unit = ( ifd_entries[TIFF_TAG.RESOLUTION_UNIT] if TIFF_TAG.RESOLUTION_UNIT in ifd_entries else 2 ) if resolution_unit == 1: # aspect ratio only return 72 # resolution_unit == 2 for inches, 3 for centimeters units_per_inch = 1 if resolution_unit == 2 else 2.54 dots_per_unit = ifd_entries[resolution_tag] return int(round(dots_per_unit * units_per_inch))
Return the dpi value calculated for *resolution_tag*, which can be either TIFF_TAG.X_RESOLUTION or TIFF_TAG.Y_RESOLUTION. The calculation is based on the values of both that tag and the TIFF_TAG.RESOLUTION_UNIT tag in this parser's |_IfdEntries| instance.
def get_purchase(self, purchase_id, purchase_key='sid'): """ Retrieve information about a purchase using the system's unique ID or a client's ID @param id_: a string that represents a unique_id or an extid. @param key: a string that is either 'sid' or 'extid'. """ data = {'purchase_id': purchase_id, 'purchase_key': purchase_key} return self.api_get('purchase', data)
Retrieve information about a purchase using the system's unique ID or a client's ID @param id_: a string that represents a unique_id or an extid. @param key: a string that is either 'sid' or 'extid'.
def setup_logger(debug, color): """Configure the logger.""" if debug: log_level = logging.DEBUG else: log_level = logging.INFO logger = logging.getLogger('exifread') stream = Handler(log_level, debug, color) logger.addHandler(stream) logger.setLevel(log_level)
Configure the logger.
def from_graph(cls, graph, linear_energy_ranges, quadratic_energy_ranges): """Create Theta from a graph and energy ranges. Args: graph (:obj:`networkx.Graph`): Provides the structure for Theta. linear_energy_ranges (dict): A dict of the form {v: (min, max), ...} where min and max are the range of values allowed to v. quadratic_energy_ranges (dict): A dict of the form {(u, v): (min, max), ...} where min and max are the range of values allowed to (u, v). Returns: :obj:`.Theta` """ get_env().enable_infix_notation = True # not sure why we need this here theta = cls.empty(dimod.SPIN) theta.add_offset(Symbol('offset', REAL)) def Linear(v): """Create a Symbol for the linear bias including the energy range constraints.""" bias = Symbol('h_{}'.format(v), REAL) min_, max_ = linear_energy_ranges[v] theta.assertions.add(LE(bias, limitReal(max_))) theta.assertions.add(GE(bias, limitReal(min_))) return bias def Quadratic(u, v): """Create a Symbol for the quadratic bias including the energy range constraints.""" bias = Symbol('J_{},{}'.format(u, v), REAL) if (v, u) in quadratic_energy_ranges: min_, max_ = quadratic_energy_ranges[(v, u)] else: min_, max_ = quadratic_energy_ranges[(u, v)] theta.assertions.add(LE(bias, limitReal(max_))) theta.assertions.add(GE(bias, limitReal(min_))) return bias for v in graph.nodes: theta.add_variable(v, Linear(v)) for u, v in graph.edges: theta.add_interaction(u, v, Quadratic(u, v)) return theta
Create Theta from a graph and energy ranges. Args: graph (:obj:`networkx.Graph`): Provides the structure for Theta. linear_energy_ranges (dict): A dict of the form {v: (min, max), ...} where min and max are the range of values allowed to v. quadratic_energy_ranges (dict): A dict of the form {(u, v): (min, max), ...} where min and max are the range of values allowed to (u, v). Returns: :obj:`.Theta`
def save(self): """ Create the writer & save """ # GH21227 internal compression is not used when file-like passed. if self.compression and hasattr(self.path_or_buf, 'write'): msg = ("compression has no effect when passing file-like " "object as input.") warnings.warn(msg, RuntimeWarning, stacklevel=2) # when zip compression is called. is_zip = isinstance(self.path_or_buf, ZipFile) or ( not hasattr(self.path_or_buf, 'write') and self.compression == 'zip') if is_zip: # zipfile doesn't support writing string to archive. uses string # buffer to receive csv writing and dump into zip compression # file handle. GH21241, GH21118 f = StringIO() close = False elif hasattr(self.path_or_buf, 'write'): f = self.path_or_buf close = False else: f, handles = _get_handle(self.path_or_buf, self.mode, encoding=self.encoding, compression=self.compression) close = True try: writer_kwargs = dict(lineterminator=self.line_terminator, delimiter=self.sep, quoting=self.quoting, doublequote=self.doublequote, escapechar=self.escapechar, quotechar=self.quotechar) if self.encoding == 'ascii': self.writer = csvlib.writer(f, **writer_kwargs) else: writer_kwargs['encoding'] = self.encoding self.writer = UnicodeWriter(f, **writer_kwargs) self._save() finally: if is_zip: # GH17778 handles zip compression separately. buf = f.getvalue() if hasattr(self.path_or_buf, 'write'): self.path_or_buf.write(buf) else: f, handles = _get_handle(self.path_or_buf, self.mode, encoding=self.encoding, compression=self.compression) f.write(buf) close = True if close: f.close() for _fh in handles: _fh.close()
Create the writer & save
def access_token(): """Token view handles exchange/refresh access tokens.""" client = Client.query.filter_by( client_id=request.form.get('client_id') ).first() if not client: abort(404) if not client.is_confidential and \ 'client_credentials' == request.form.get('grant_type'): error = InvalidClientError() response = jsonify(dict(error.twotuples)) response.status_code = error.status_code abort(response) # Return None or a dictionary. Dictionary will be merged with token # returned to the client requesting the access token. # Response is in application/json return None
Token view handles exchange/refresh access tokens.
def camelcase_underscore(name): """ Convert camelcase names to underscore """ s1 = re.sub('(.)([A-Z][a-z]+)', r'\1_\2', name) return re.sub('([a-z0-9])([A-Z])', r'\1_\2', s1).lower()
Convert camelcase names to underscore
def pull(i): """ Input: { (path) - repo UOA (where to create entry) (type) - type (url) - URL or (data_uoa) - repo UOA (clone) - if 'yes', clone repo instead of update (current_repos) - if resolving dependencies on other repos, list of repos being updated (to avoid infinite recursion) (git) - if 'yes', use git protocol instead of https (ignore_pull) - useful just for switching to another branch (stable) - take stable version (highly experimental) (version) - checkout version (default - stable) (branch) - git branch (checkout) - git checkout } Output: { return - return code = 0, if successful > 0, if error (error) - error text if return > 0 } """ o=i.get('out','') xrecache=False pp=[] px=i.get('path','') t=i.get('type','') url=i.get('url','') stable=i.get('stable','') version=i.get('version','') if stable=='yes': version='stable' branch=i.get('branch','') checkout=i.get('checkout','') ip=i.get('ignore_pull','') cr=i.get('current_repos',[]) tt='pull' if i.get('clone','')=='yes': tt='clone' if px!='': pp.append({'path':px, 'type':t, 'url':url}) uoa=i.get('data_uoa','') cids=i.get('cids',[]) if len(cids)>0 and uoa=='': uoa=cids[0] # If url is not empty and uoa is empty, get name from URL: if url!='' and uoa=='' and px=='': ix=url.rfind('/') if ix>0: uoa=url[ix+1:] if uoa.endswith('.git'): uoa=uoa[:-4] i['data_uoa']=uoa if uoa=='' and len(pp)==0 and url=='': uoa='*' if uoa!='': if uoa.find('*')>=0 or uoa.find('?')>=0: r=ck.list_data({'module_uoa':work['self_module_uoa'], 'data_uoa':uoa}) if r['return']>0: return r lst=r['lst'] for q in lst: # Loading repo r=ck.access({'action':'load', 'module_uoa':work['self_module_uoa'], 'data_uoa':q['data_uoa'], 'common':'yes'}) if r['return']>0: return r d=r['dict'] t=d.get('shared','') duoa=r['data_uoa'] if d.get('recache','')=='yes': xrecache=True if t!='': p=d.get('path','') url=d.get('url','') checkouts=d.get('checkouts',{}) pp.append({'path':p, 'type':t, 'url':url, 'data_uoa':duoa, 'checkouts':checkouts}) else: # Loading repo r=ck.access({'action':'load', 'module_uoa':work['self_module_uoa'], 'data_uoa':uoa, 'common':'yes'}) if r['return']>0: if r['return']==16: # If not found, try to add from GIT i['action']='add' i['shared']='yes' x=i.get('quiet','') if x=='': x='yes' i['quiet']=x i['current_repos']=cr return add(i) else: return r d=r['dict'] duoa=r['data_uoa'] if d.get('recache','')=='yes': xrecache=True p=d['path'] t=d.get('shared','') url=d.get('url','') checkouts=d.get('checkouts',{}) pp.append({'path':p, 'type':t, 'url':url, 'data_uoa':duoa, 'checkouts':checkouts}) # Updating ... for q in pp: p=q.get('path','') duoa=q.get('data_uoa','') t=q.get('type','') url=q.get('url','') # Semi hack (useful for Anton) if i.get('git','')=='yes': url=url.replace('https://','git@') j=url.find('/') if j>0: url=url[:j]+':'+url[j+1:] url+='.git' if o=='con' and tt!='clone': ck.out('******************************************************************') ck.out('Updating repo "'+duoa+'" ...') ck.out('') ck.out(' Local path: '+p) ck.out(' URL: '+url) if t=='git': # Check if git is installed rq=ck.gen_tmp_file({}) if rq['return']>0: return rq xfn=rq['file_name'] os.system('git --version > '+xfn) rq=ck.load_text_file({'text_file':xfn, 'delete_after_read':'yes'}) xs='' if rq['return']==0: xs=rq['string'].strip() if xs.find(' version ')<0: return{'return':1, 'error':'git command line client is not found - please, install it or download repo as zip'} # Continue try: px=os.getcwd() except OSError: from os.path import expanduser px=expanduser("~") if not os.path.isdir(p): os.makedirs(p) if o=='con': ck.out('') ck.out(' cd '+p) os.chdir(p) r=0 if ip!='yes': s=ck.cfg['repo_types'][t][tt].replace('$#url#$', url).replace('$#path#$', p) if o=='con': ck.out(' '+s) ck.out('') r=os.system(s) if o=='con': ck.out('') os.chdir(px) # Restore path if r>0: if o=='con': ck.out('') ck.out(' WARNING: repository update likely failed OR IN A DIFFERENT BRANCH/CHECKOUT (git exit code: '+str(r)+')') ck.out('') rx=ck.inp({'text': 'Would you like to continue (Y/n)?: '}) x=rx['string'].lower() if x=='n' or x=='no': return {'return':1, 'error':'repository update likely failed - exit code '+str(r)} else: return {'return':1, 'error':'repository update likely failed - exit code '+str(r)} else: if o=='con': ck.out('CK warning: this repository is not shared!') # Check deps if tt!='clone': # clone is done in add ... if o=='con': ck.out(' ========================================') ck.out(' Checking dependencies on other repos ...') ck.out('') r=deps({'path':p, 'current_path':cr, 'how':'pull', 'version':version, 'branch':branch, 'checkout':checkout, 'out':o}) if r['return']>0: return r # Re-caching ... if xrecache: if o=='con': ck.out(' ==============================================') ck.out(' At least one repository requires recaching ...') ck.out('') r=recache({'out':o}) if r['return']>0: return r return {'return':0}
Input: { (path) - repo UOA (where to create entry) (type) - type (url) - URL or (data_uoa) - repo UOA (clone) - if 'yes', clone repo instead of update (current_repos) - if resolving dependencies on other repos, list of repos being updated (to avoid infinite recursion) (git) - if 'yes', use git protocol instead of https (ignore_pull) - useful just for switching to another branch (stable) - take stable version (highly experimental) (version) - checkout version (default - stable) (branch) - git branch (checkout) - git checkout } Output: { return - return code = 0, if successful > 0, if error (error) - error text if return > 0 }
def register_action(self, name, **kwargs): """ Registers given action name, optional arguments like a parent, icon, slot etc ... can be given. :param name: Action to register. :type name: unicode :param \*\*kwargs: Keywords arguments. :type \*\*kwargs: \*\* :return: Action. :rtype: QAction """ settings = foundations.data_structures.Structure(**{"parent": None, "text": None, "icon": None, "icon_text": None, "checkable": None, "checked": None, "status_tip": None, "whats_this": None, "tool_tip": None, "shortcut": None, "shortcut_context": None, "slot": None}) settings.update(kwargs) name = self.__normalize_name(name) category = foundations.namespace.get_namespace(name) name = foundations.namespace.remove_namespace(name) action = QAction(name, settings.parent or self) self.add_to_category(category, name, action) settings.text and action.setText(settings.text) settings.icon and action.setIcon(settings.icon) settings.icon_text and action.setIconText(settings.icon_text) settings.checkable and action.setCheckable(settings.checkable) settings.checked and action.set_checked(settings.checked) settings.status_tip and action.setStatusTip(settings.status_tip) settings.whats_this and action.setWhatsThis(settings.whats_this) settings.tool_tip and action.setToolTip(settings.tool_tip) settings.shortcut and action.setShortcut(QKeySequence(settings.shortcut)) settings.shortcut_context and action.setShortcutContext(settings.shortcut_context) if settings.slot: self.__actions_signals_slots[action] = settings.slot action.triggered.connect(settings.slot) return action
Registers given action name, optional arguments like a parent, icon, slot etc ... can be given. :param name: Action to register. :type name: unicode :param \*\*kwargs: Keywords arguments. :type \*\*kwargs: \*\* :return: Action. :rtype: QAction
def _retrieveRegions(self): """ Retrieve and store Python region instances for each column """ self.sensors = [] self.coarseSensors = [] self.locationInputs = [] self.L4Columns = [] self.L2Columns = [] self.L5Columns = [] self.L6Columns = [] for i in xrange(self.numColumns): self.sensors.append( self.network.regions["sensorInput_" + str(i)].getSelf() ) self.coarseSensors.append( self.network.regions["coarseSensorInput_" + str(i)].getSelf() ) self.locationInputs.append( self.network.regions["locationInput_" + str(i)].getSelf() ) self.L4Columns.append( self.network.regions["L4Column_" + str(i)].getSelf() ) self.L2Columns.append( self.network.regions["L2Column_" + str(i)].getSelf() ) self.L5Columns.append( self.network.regions["L5Column_" + str(i)].getSelf() ) self.L6Columns.append( self.network.regions["L6Column_" + str(i)].getSelf() )
Retrieve and store Python region instances for each column
def load_all(self, group): """ Loads all plugins advertising entry points with the given group name. The specified plugin needs to be a callable that accepts the everest configurator as single argument. """ for ep in iter_entry_points(group=group): plugin = ep.load() plugin(self.__config)
Loads all plugins advertising entry points with the given group name. The specified plugin needs to be a callable that accepts the everest configurator as single argument.
def salvar(self, destino=None, prefix='tmp', suffix='-sat.log'): """Salva o arquivo de log decodificado. :param str destino: (Opcional) Caminho completo para o arquivo onde os dados dos logs deverão ser salvos. Se não informado, será criado um arquivo temporário via :func:`tempfile.mkstemp`. :param str prefix: (Opcional) Prefixo para o nome do arquivo. Se não informado será usado ``"tmp"``. :param str suffix: (Opcional) Sufixo para o nome do arquivo. Se não informado será usado ``"-sat.log"``. :return: Retorna o caminho completo para o arquivo salvo. :rtype: str :raises IOError: Se o destino for informado e o arquivo já existir. """ if destino: if os.path.exists(destino): raise IOError((errno.EEXIST, 'File exists', destino,)) destino = os.path.abspath(destino) fd = os.open(destino, os.O_EXCL|os.O_CREAT|os.O_WRONLY) else: fd, destino = tempfile.mkstemp(prefix=prefix, suffix=suffix) os.write(fd, self.conteudo()) os.fsync(fd) os.close(fd) return os.path.abspath(destino)
Salva o arquivo de log decodificado. :param str destino: (Opcional) Caminho completo para o arquivo onde os dados dos logs deverão ser salvos. Se não informado, será criado um arquivo temporário via :func:`tempfile.mkstemp`. :param str prefix: (Opcional) Prefixo para o nome do arquivo. Se não informado será usado ``"tmp"``. :param str suffix: (Opcional) Sufixo para o nome do arquivo. Se não informado será usado ``"-sat.log"``. :return: Retorna o caminho completo para o arquivo salvo. :rtype: str :raises IOError: Se o destino for informado e o arquivo já existir.
def port_str_arrange(ports): """ Gives a str in the format (always tcp listed first). T:<tcp ports/portrange comma separated>U:<udp ports comma separated> """ b_tcp = ports.find("T") b_udp = ports.find("U") if (b_udp != -1 and b_tcp != -1) and b_udp < b_tcp: return ports[b_tcp:] + ports[b_udp:b_tcp] return ports
Gives a str in the format (always tcp listed first). T:<tcp ports/portrange comma separated>U:<udp ports comma separated>
def to_phase(self, time, component=None, t0='t0_supconj', **kwargs): """ Get the phase(s) of a time(s) for a given ephemeris :parameter time: time to convert to phases (should be in same system as t0s) :type time: float, list, or array :parameter t0: qualifier of the parameter to be used for t0 :type t0: str :parameter str component: component for which to get the ephemeris. If not given, component will default to the top-most level of the current hierarchy :parameter **kwargs: any value passed through kwargs will override the ephemeris retrieved by component (ie period, t0, dpdt). Note: be careful about units - input values will not be converted. :return: phase (float) or phases (array) """ if kwargs.get('shift', False): raise ValueError("support for phshift was removed as of 2.1. Please pass t0 instead.") ephem = self.get_ephemeris(component=component, t0=t0, **kwargs) if isinstance(time, list): time = np.array(time) elif isinstance(time, Parameter): time = time.get_value(u.d) elif isinstance(time, str): time = self.get_value(time, u.d) t0 = ephem.get('t0', 0.0) period = ephem.get('period', 1.0) dpdt = ephem.get('dpdt', 0.0) if dpdt != 0: phase = np.mod(1./dpdt * np.log(period + dpdt*(time-t0)), 1.0) else: phase = np.mod((time-t0)/period, 1.0) if isinstance(phase, float): if phase > 0.5: phase -= 1 else: # then should be an array phase[phase > 0.5] -= 1 return phase
Get the phase(s) of a time(s) for a given ephemeris :parameter time: time to convert to phases (should be in same system as t0s) :type time: float, list, or array :parameter t0: qualifier of the parameter to be used for t0 :type t0: str :parameter str component: component for which to get the ephemeris. If not given, component will default to the top-most level of the current hierarchy :parameter **kwargs: any value passed through kwargs will override the ephemeris retrieved by component (ie period, t0, dpdt). Note: be careful about units - input values will not be converted. :return: phase (float) or phases (array)