text
stringlengths
78
104k
score
float64
0
0.18
def _expectation(p, mean, none1, none2, none3, nghp=None): """ Compute the expectation: <m(X)>_p(X) - m(x) :: Linear, Identity or Constant mean function :return: NxQ """ return mean(p.mu)
0.004545
def is_valid_regex(string): """ Checks whether the re module can compile the given regular expression. Parameters ---------- string: str Returns ------- boolean """ try: re.compile(string) is_valid = True except re.error: is_valid = False return is_valid
0.003049
def is_positive_semidefinite_matrix(mat, rtol=RTOL_DEFAULT, atol=ATOL_DEFAULT): """Test if a matrix is positive semidefinite""" if atol is None: atol = ATOL_DEFAULT if rtol is None: rtol = RTOL_DEFAULT if not is_hermitian_matrix(mat, rtol=rtol, atol=atol): return False # Check eigenvalues are all positive vals = np.linalg.eigvalsh(mat) for v in vals: if v < -atol: return False return True
0.002141
def format_value(column_dict, value, key=None): """ Format a value coming from the database (for example converts datetimes to strings) :param column_dict: The column datas collected during inspection :param value: A value coming from the database :param key: The exportation key """ formatter = column_dict.get('formatter') prop = column_dict['__col__'] res = value if value in ('', None,): res = '' elif formatter is not None: res = formatter(value) else: if hasattr(prop, "columns"): sqla_column = prop.columns[0] column_type = getattr(sqla_column.type, 'impl', sqla_column.type) formatter = FORMATTERS_REGISTRY.get_formatter(column_type, key) if formatter is not None: res = formatter(value) return res
0.001168
def reload(self, d=None): """Reload model from given dict or database.""" if d: self.clear() self.update(d) elif self.id: new_dict = self.by_id(self._id) self.clear() self.update(new_dict) else: # should I raise an exception here? # Like "Model must be saved first." pass
0.005013
def unstash(self): """ Pops the last stash if EPAB made a stash before """ if not self.stashed: LOGGER.error('no stash') else: LOGGER.info('popping stash') self.repo.git.stash('pop') self.stashed = False
0.006873
def execute(self, input_data): # Spin up SWF class swf = SWF() # Get the raw_bytes raw_bytes = input_data['sample']['raw_bytes'] # Parse it swf.parse(StringIO(raw_bytes)) # Header info head = swf.header output = {'version':head.version,'file_length':head.file_length,'frame_count':head.frame_count, 'frame_rate':head.frame_rate,'frame_size':head.frame_size.__str__(),'compressed':head.compressed} # Loop through all the tags output['tags'] = [tag.__str__() for tag in swf.tags] # Add the meta data to the output output.update(input_data['meta']) return output ''' # Map all tag names to indexes tag_map = {tag.name:index for tag,index in enumerate(swf.tags)} # FileAttribute Info file_attr_tag = swf.tags[tag_map] ''' ''' # Build up return data structure output = {name:value for name,value in locals().iteritems() if name not in ['self', 'input_data','raw_bytes']} output.update(input_data['meta']) return output '''
0.016653
def int_to_bytes(i, minlen=1, order='big'): # pragma: no cover """convert integer to bytes""" blen = max(minlen, PGPObject.int_byte_len(i), 1) if six.PY2: r = iter(_ * 8 for _ in (range(blen) if order == 'little' else range(blen - 1, -1, -1))) return bytes(bytearray((i >> c) & 0xff for c in r)) return i.to_bytes(blen, order)
0.007792
def dumped(text, level, indent=2): """Put curly brackets round an indented text""" return indented("{\n%s\n}" % indented(text, level + 1, indent) or "None", level, indent) + "\n"
0.010753
def getObject(self, url_or_requests_response, params=None): 'Take a url or some xml response from JottaCloud and wrap it up with the corresponding JFS* class' if isinstance(url_or_requests_response, requests.models.Response): # this is a raw xml response that we need to parse url = url_or_requests_response.url o = lxml.objectify.fromstring(url_or_requests_response.content) else: # this is an url that we need to fetch url = url_or_requests_response o = self.get(url, params=params) # (.get() will parse this for us) parent = os.path.dirname(url).replace('up.jottacloud.com', 'www.jottacloud.com') if o.tag == 'error': JFSError.raiseError(o, url) elif o.tag == 'device': return JFSDevice(o, jfs=self, parentpath=parent) elif o.tag == 'folder': return JFSFolder(o, jfs=self, parentpath=parent) elif o.tag == 'mountPoint': return JFSMountPoint(o, jfs=self, parentpath=parent) elif o.tag == 'restoredFiles': return JFSFile(o, jfs=self, parentpath=parent) elif o.tag == 'deleteFiles': return JFSFile(o, jfs=self, parentpath=parent) elif o.tag == 'file': return ProtoFile.factory(o, jfs=self, parentpath=parent) # try: # if o.latestRevision.state == 'INCOMPLETE': # return JFSIncompleteFile(o, jfs=self, parentpath=parent) # elif o.latestRevision.state == 'CORRUPT': # return JFSCorruptFile(o, jfs=self, parentpath=parent) # except AttributeError: # return JFSFile(o, jfs=self, parentpath=parent) elif o.tag == 'enableSharing': return JFSenableSharing(o, jfs=self) elif o.tag == 'user': self.fs = o return self.fs elif o.tag == 'filedirlist': return JFSFileDirList(o, jfs=self, parentpath=parent) elif o.tag == 'searchresult': return JFSsearchresult(o, jfs=self) raise JFSError("invalid object: %s <- %s" % (repr(o), url_or_requests_response))
0.009519
def _check_copy_conditions(self, src, dst): # type: (SyncCopy, blobxfer.models.azure.StorageEntity, # blobxfer.models.azure.StorageEntity) -> UploadAction """Check for synccopy conditions :param SyncCopy self: this :param blobxfer.models.azure.StorageEntity src: src :param blobxfer.models.azure.StorageEntity dst: dst :rtype: SynccopyAction :return: synccopy action """ # if remote file doesn't exist, copy if dst is None or dst.from_local: return SynccopyAction.Copy # check overwrite option if not self._spec.options.overwrite: logger.info( 'not overwriting remote file: {})'.format(dst.path)) return SynccopyAction.Skip # check skip on options, MD5 match takes priority src_md5 = blobxfer.models.metadata.get_md5_from_metadata(src) dst_md5 = blobxfer.models.metadata.get_md5_from_metadata(dst) if (self._spec.skip_on.md5_match and blobxfer.util.is_not_empty(src_md5)): if src_md5 == dst_md5: return SynccopyAction.Skip else: return SynccopyAction.Copy # if neither of the remaining skip on actions are activated, copy if (not self._spec.skip_on.filesize_match and not self._spec.skip_on.lmt_ge): return SynccopyAction.Copy # check skip on file size match ul_fs = None if self._spec.skip_on.filesize_match: if src.size == dst.size: ul_fs = False else: ul_fs = True # check skip on lmt ge ul_lmt = None if self._spec.skip_on.lmt_ge: if dst.lmt >= src.lmt: ul_lmt = False else: ul_lmt = True # upload if either skip on mismatch is True if ul_fs or ul_lmt: return SynccopyAction.Copy else: return SynccopyAction.Skip
0.001966
def splitext(path): """splitext for paths with directories that may contain dots. From https://stackoverflow.com/questions/5930036/separating-file-extensions-using-python-os-path-module""" li = [] path_without_extensions = os.path.join(os.path.dirname(path), os.path.basename(path).split(os.extsep)[0]) extensions = os.path.basename(path).split(os.extsep)[1:] li.append(path_without_extensions) # li.append(extensions) if you want extensions in another list inside the list that is returned. li.extend(extensions) return li
0.007055
def add_custom_func(self, func, dim, *args, **kwargs): """ adds a user defined function to extract features Parameters ---------- func : function a user-defined function, which accepts mdtraj.Trajectory object as first parameter and as many optional and named arguments as desired. Has to return a numpy.ndarray ndim=2. dim : int output dimension of :py:obj:`function` description: str or None a message for the describe feature list. args : any number of positional arguments these have to be in the same order as :py:obj:`func` is expecting them kwargs : dictionary named arguments passed to func Notes ----- You can pass a description list to describe the output of your function by element, by passing a list of strings with the same lengths as dimensions. Alternatively a single element list or str will be expanded to match the output dimension. """ description = kwargs.pop('description', None) f = CustomFeature(func, dim=dim, description=description, fun_args=args, fun_kwargs=kwargs) self.add_custom_feature(f)
0.005632
def change_and_save(obj, update_only_changed_fields=False, save_kwargs=None, **changed_fields): """ Changes a given `changed_fields` on object, saves it and returns changed object. """ from chamber.models import SmartModel change(obj, **changed_fields) if update_only_changed_fields and not isinstance(obj, SmartModel): raise TypeError('update_only_changed_fields can be used only with SmartModel') save_kwargs = save_kwargs if save_kwargs is not None else {} if update_only_changed_fields: save_kwargs['update_only_changed_fields'] = True obj.save(**save_kwargs) return obj
0.006319
def save_data(self, session, exp_id, content): '''save data will obtain the current subid from the session, and save it depending on the database type. Currently we just support flat files''' subid = session.get('subid') # We only attempt save if there is a subject id, set at start data_file = None if subid is not None: data_base = "%s/%s" %(self.data_base, subid) # If not running in headless, ensure path exists if not self.headless and not os.path.exists(data_base): mkdir_p(data_base) # Conditions for saving: do_save = False # If headless with token pre-generated OR not headless if self.headless and os.path.exists(data_base) or not self.headless: do_save = True if data_base.endswith(('revoked','finished')): do_save = False # If headless with token pre-generated OR not headless if do_save is True: data_file = "%s/%s-results.json" %(data_base, exp_id) if os.path.exists(data_file): self.logger.warning('%s exists, and is being overwritten.' %data_file) write_json(content, data_file) return data_file
0.005738
def should_bypass_proxies(url): """ Returns whether we should bypass proxies or not. """ get_proxy = lambda k: os.environ.get(k) or os.environ.get(k.upper()) # First check whether no_proxy is defined. If it is, check that the URL # we're getting isn't in the no_proxy list. no_proxy = get_proxy('no_proxy') netloc = urlparse(url).netloc if no_proxy: # We need to check whether we match here. We need to see if we match # the end of the netloc, both with and without the port. no_proxy = no_proxy.replace(' ', '').split(',') ip = netloc.split(':')[0] if is_ipv4_address(ip): for proxy_ip in no_proxy: if is_valid_cidr(proxy_ip): if address_in_network(ip, proxy_ip): return True else: for host in no_proxy: if netloc.endswith(host) or netloc.split(':')[0].endswith(host): # The URL does match something in no_proxy, so we don't want # to apply the proxies on this URL. return True # If the system proxy settings indicate that this URL should be bypassed, # don't proxy. # The proxy_bypass function is incredibly buggy on OS X in early versions # of Python 2.6, so allow this call to fail. Only catch the specific # exceptions we've seen, though: this call failing in other ways can reveal # legitimate problems. try: bypass = proxy_bypass(netloc) except (TypeError, socket.gaierror): bypass = False if bypass: return True return False
0.00243
def setTextEdit( self, textEdit ): """ Sets the text edit that this find widget will use to search. :param textEdit | <QTextEdit> """ if ( self._textEdit ): self._textEdit.removeAction(self._findAction) self._textEdit = textEdit if ( textEdit ): textEdit.addAction(self._findAction)
0.025
def prepare(self, node): '''Gather analysis result required by this analysis''' if isinstance(node, ast.Module): self.ctx.module = node elif isinstance(node, ast.FunctionDef): self.ctx.function = node for D in self.deps: d = D() d.attach(self.passmanager, self.ctx) result = d.run(node) setattr(self, uncamel(D.__name__), result)
0.004608
def from_points(lons, lats): """ Compute the BoundingBox from a set of latitudes and longitudes :param lons: longitudes :param lats: latitudes :return: BoundingBox """ north, west = max(lats), min(lons) south, east = min(lats), max(lons) return BoundingBox(north=north, west=west, south=south, east=east)
0.005305
def is_pathlike(value, **kwargs): """Indicate whether ``value`` is a path-like object. :param value: The value to evaluate. :returns: ``True`` if ``value`` is valid, ``False`` if it is not. :rtype: :class:`bool <python:bool>` :raises SyntaxError: if ``kwargs`` contains duplicate keyword parameters or duplicates keyword parameters passed to the underlying validator """ try: value = validators.path(value, **kwargs) except SyntaxError as error: raise error except Exception: return False return True
0.003478
def create_document_dictionary(self, document, document_key=None, owner_document=None): """ Given document generates a dictionary representation of the document. Includes the widget for each for each field in the document. """ doc_dict = self.create_doc_dict(document, document_key, owner_document) for doc_key, doc_field in doc_dict.items(): # Base fields should not be evaluated if doc_key.startswith("_"): continue if isinstance(doc_field, ListField): doc_dict[doc_key] = self.create_list_dict(document, doc_field, doc_key) elif isinstance(doc_field, EmbeddedDocumentField): doc_dict[doc_key] = self.create_document_dictionary(doc_dict[doc_key].document_type_obj, doc_key) else: doc_dict[doc_key] = {"_document": document, "_key": doc_key, "_document_field": doc_field, "_widget": get_widget(doc_dict[doc_key], getattr(doc_field, 'disabled', False))} return doc_dict
0.004658
def calc_columns_rows(n): """ Calculate the number of columns and rows required to divide an image into ``n`` parts. Return a tuple of integers in the format (num_columns, num_rows) """ num_columns = int(ceil(sqrt(n))) num_rows = int(ceil(n / float(num_columns))) return (num_columns, num_rows)
0.003058
def reassemble_options(payload): ''' Reassemble partial options to options, returns a list of dhcp_option DHCP options are basically `|tag|length|value|` structure. When an option is longer than 255 bytes, it can be splitted into multiple structures with the same tag. The splitted structures must be joined back to get the original option. `dhcp_option_partial` is used to present the splitted options, and `dhcp_option` is used for reassembled option. ''' options = [] option_indices = {} def process_option_list(partials): for p in partials: if p.tag == OPTION_END: break if p.tag == OPTION_PAD: continue if p.tag in option_indices: # Reassemble the data options[option_indices[p.tag]][1].append(p.data) else: options.append((p.tag, [p.data])) option_indices[p.tag] = len(options) - 1 # First process options field process_option_list(payload.options) if OPTION_OVERLOAD in option_indices: # There is an overload option data = b''.join(options[option_indices[OPTION_OVERLOAD]][1]) overload_option = dhcp_overload.create(data) if overload_option & OVERLOAD_FILE: process_option_list(dhcp_option_partial[0].create(payload.file)) if overload_option & OVERLOAD_SNAME: process_option_list(dhcp_option_partial[0].create(payload.sname)) def _create_dhcp_option(tag, data): opt = dhcp_option(tag = tag) opt._setextra(data) opt._autosubclass() return opt return [_create_dhcp_option(tag, b''.join(data)) for tag,data in options]
0.004577
def QA_indicator_DMA(DataFrame, M1=10, M2=50, M3=10): """ 平均线差 DMA """ CLOSE = DataFrame.close DDD = MA(CLOSE, M1) - MA(CLOSE, M2) AMA = MA(DDD, M3) return pd.DataFrame({ 'DDD': DDD, 'AMA': AMA })
0.004237
def json_validate(schema, resp): """ Validate an RPC response. The response must either take the form of the given schema, or it must take the form of {'error': ...} Returns the resp on success Returns {'error': ...} on validation error """ # is this an error? try: json_validate_error(resp) except ValidationError: if json_is_exception(resp): # got a traceback if BLOCKSTACK_TEST: log.error('\n{}'.format(resp['traceback'])) return {'error': 'Blockstack Core encountered an exception. See `traceback` for details', 'traceback': resp['traceback'], 'http_status': 500} if 'error' in resp and 'http_status' not in resp: # bad error message raise # not an error. jsonschema.validate(resp, schema) return resp
0.004566
def domain_relationship(self): """ Returns a domain relationship equivalent with this resource relationship. """ if self.__domain_relationship is None: ent = self.relator.get_entity() self.__domain_relationship = \ self.descriptor.make_relationship(ent) return self.__domain_relationship
0.007916
def wrap_objective(f, *args, **kwds): """Decorator for creating Objective factories. Changes f from the closure: (args) => () => TF Tensor into an Obejective factory: (args) => Objective while perserving function name, arg info, docs... for interactive python. """ objective_func = f(*args, **kwds) objective_name = f.__name__ args_str = " [" + ", ".join([_make_arg_str(arg) for arg in args]) + "]" description = objective_name.title() + args_str return Objective(objective_func, objective_name, description)
0.013208
def _parseIsComment(self): """ Detect whether the element is HTML comment or not. Result is saved to the :attr:`_iscomment` property. """ self._iscomment = ( self._element.startswith("<!--") and self._element.endswith("-->") )
0.006969
async def connect(self): """Create connection pool asynchronously. """ self.pool = await aiomysql.create_pool( loop=self.loop, db=self.database, connect_timeout=self.timeout, **self.connect_params)
0.007435
def new( name: str, arity: Arity, class_name: str=None, *, associative: bool=False, commutative: bool=False, one_identity: bool=False, infix: bool=False ) -> Type['Operation']: """Utility method to create a new operation type. Example: >>> Times = Operation.new('*', Arity.polyadic, 'Times', associative=True, commutative=True, one_identity=True) >>> Times Times['*', Arity(min_count=2, fixed_size=False), associative, commutative, one_identity] >>> str(Times(Symbol('a'), Symbol('b'))) '*(a, b)' Args: name: Name or symbol for the operator. Will be used as name for the new class if `class_name` is not specified. arity: The arity of the operator as explained in the documentation of `Operation`. class_name: Name for the new operation class to be used instead of name. This argument is required if `name` is not a valid python identifier. Keyword Args: associative: See :attr:`~Operation.associative`. commutative: See :attr:`~Operation.commutative`. one_identity: See :attr:`~Operation.one_identity`. infix: See :attr:`~Operation.infix`. Raises: ValueError: if the class name of the operation is not a valid class identifier. """ class_name = class_name or name if not class_name.isidentifier() or keyword.iskeyword(class_name): raise ValueError("Invalid identifier for new operator class.") return type( class_name, (Operation, ), { 'name': name, 'arity': arity, 'associative': associative, 'commutative': commutative, 'one_identity': one_identity, 'infix': infix } )
0.009183
def get_explorer_pid(self): """ Tries to find the process ID for "explorer.exe". @rtype: int or None @return: Returns the process ID, or C{None} on error. """ try: exp = win32.SHGetFolderPath(win32.CSIDL_WINDOWS) except Exception: exp = None if not exp: exp = os.getenv('SystemRoot') if exp: exp = os.path.join(exp, 'explorer.exe') exp_list = self.find_processes_by_filename(exp) if exp_list: return exp_list[0][0].get_pid() return None
0.0033
def getChemicalPotential(self, solution): """Call solver in order to calculate chemical potential. """ if isinstance(solution, Solution): solution = solution.getSolution() self.mu = self.solver.chemicalPotential(solution) return self.mu
0.00692
def get_hosts_retriever(s=None): """ Given the function name, looks up the method for dynamically retrieving host data. """ s = s or env.hosts_retriever # #assert s, 'No hosts retriever specified.' if not s: return env_hosts_retriever # module_name = '.'.join(s.split('.')[:-1]) # func_name = s.split('.')[-1] # retriever = getattr(importlib.import_module(module_name), func_name) # return retriever return str_to_callable(s) or env_hosts_retriever
0.004
def longest_increasing_subsequence(xs): '''Return a longest increasing subsequence of xs. (Note that there may be more than one such subsequence.) >>> longest_increasing_subsequence(range(3)) [0, 1, 2] >>> longest_increasing_subsequence([3, 1, 2, 0]) [1, 2] ''' # Patience sort xs, stacking (x, prev_ix) pairs on the piles. # Prev_ix indexes the element at the top of the previous pile, # which has a lower x value than the current x value. piles = [[]] # Create a dummy pile 0 for x, p in patience_sort(xs): if p + 1 == len(piles): piles.append([]) # backlink to the top of the previous pile piles[p + 1].append((x, len(piles[p]) - 1)) # Backtrack to find a longest increasing subsequence npiles = len(piles) - 1 prev = 0 lis = list() for pile in range(npiles, 0, -1): x, prev = piles[pile][prev] lis.append(x) lis.reverse() return lis
0.001035
def format(self, full_info: bool = False): """ :param full_info: If True, adds more info about the chat. Please, note that this additional info requires to make up to THREE synchronous api calls. """ chat = self.api_object if full_info: self.__format_full(chat) else: self.__format_simple(chat)
0.007937
def use(plugin): """ Register plugin in grappa. `plugin` argument can be a function or a object that implement `register` method, which should accept one argument: `grappa.Engine` instance. Arguments: plugin (function|module): grappa plugin object to register. Raises: ValueError: if `plugin` is not a valid interface. Example:: import grappa class MyOperator(grappa.Operator): pass def my_plugin(engine): engine.register(MyOperator) grappa.use(my_plugin) """ log.debug('register new plugin: {}'.format(plugin)) if inspect.isfunction(plugin): return plugin(Engine) if plugin and hasattr(plugin, 'register'): return plugin.register(Engine) raise ValueError('invalid plugin: must be a function or ' 'implement register() method')
0.001117
def update_record(self, common_name, **fields): """Update fields in an existing record""" record = self.get_record(common_name) if fields is not None: for field, value in fields: record[field] = value self.save() return record
0.00678
def parse_authorization_header(value, charset='utf-8'): '''Parse an HTTP basic/digest authorisation header. :param value: the authorisation header to parse. :return: either `None` if the header was invalid or not given, otherwise an :class:`Auth` object. ''' if not value: return try: auth_type, auth_info = value.split(None, 1) auth_type = auth_type.lower() except ValueError: return if auth_type == 'basic': try: up = b64decode(auth_info.encode(CHARSET)).decode(charset) username, password = up.split(':', 1) except Exception: return return BasicAuth(username, password) elif auth_type == 'digest': auth_map = parse_dict_header(auth_info) if not digest_parameters.difference(auth_map): return DigestAuth(auth_map.pop('username'), options=auth_map)
0.001094
def solvemdbi_cg(ah, rho, b, axisM, axisK, tol=1e-5, mit=1000, isn=None): r""" Solve a multiple diagonal block linear system with a scaled identity term using Conjugate Gradient (CG) via :func:`scipy.sparse.linalg.cg`. The solution is obtained by independently solving a set of linear systems of the form (see :cite:`wohlberg-2016-efficient`) .. math:: (\rho I + \mathbf{a}_0 \mathbf{a}_0^H + \mathbf{a}_1 \mathbf{a}_1^H + \; \ldots \; + \mathbf{a}_{K-1} \mathbf{a}_{K-1}^H) \; \mathbf{x} = \mathbf{b} where each :math:`\mathbf{a}_k` is an :math:`M`-vector. The inner products and matrix products in this equation are taken along the M and K axes of the corresponding multi-dimensional arrays; the solutions are independent over the other axes. Parameters ---------- ah : array_like Linear system component :math:`\mathbf{a}^H` rho : float Parameter rho b : array_like Linear system component :math:`\mathbf{b}` axisM : int Axis in input corresponding to index m in linear system axisK : int Axis in input corresponding to index k in linear system tol : float CG tolerance mit : int CG maximum iterations isn : array_like CG initial solution Returns ------- x : ndarray Linear system solution :math:`\mathbf{x}` cgit : int Number of CG iterations """ a = np.conj(ah) if isn is not None: isn = isn.ravel() Aop = lambda x: inner(ah, x, axis=axisM) AHop = lambda x: inner(a, x, axis=axisK) AHAop = lambda x: AHop(Aop(x)) vAHAoprI = lambda x: AHAop(x.reshape(b.shape)).ravel() + rho * x.ravel() lop = LinearOperator((b.size, b.size), matvec=vAHAoprI, dtype=b.dtype) vx, cgit = _cg_wrapper(lop, b.ravel(), isn, tol, mit) return vx.reshape(b.shape), cgit
0.002655
def _populate_reporting_tab(self): """Populate trees about layers.""" self.tree.clear() self.add_layer.setEnabled(False) self.remove_layer.setEnabled(False) self.move_up.setEnabled(False) self.move_down.setEnabled(False) self.tree.setColumnCount(1) self.tree.setRootIsDecorated(False) self.tree.setHeaderHidden(True) analysis_branch = QTreeWidgetItem( self.tree.invisibleRootItem(), [FROM_ANALYSIS['name']]) analysis_branch.setFont(0, bold_font) analysis_branch.setExpanded(True) analysis_branch.setFlags(Qt.ItemIsEnabled) if self._multi_exposure_if: expected = self._multi_exposure_if.output_layers_expected() for group, layers in list(expected.items()): group_branch = QTreeWidgetItem(analysis_branch, [group]) group_branch.setFont(0, bold_font) group_branch.setExpanded(True) group_branch.setFlags(Qt.ItemIsEnabled) for layer in layers: layer = definition(layer) if layer.get('allowed_geometries', None): item = QTreeWidgetItem( group_branch, [layer.get('name')]) item.setData( 0, LAYER_ORIGIN_ROLE, FROM_ANALYSIS['key']) item.setData(0, LAYER_PARENT_ANALYSIS_ROLE, group) item.setData( 0, LAYER_PURPOSE_KEY_OR_ID_ROLE, layer['key']) item.setFlags(Qt.ItemIsEnabled | Qt.ItemIsSelectable) canvas_branch = QTreeWidgetItem( self.tree.invisibleRootItem(), [FROM_CANVAS['name']]) canvas_branch.setFont(0, bold_font) canvas_branch.setExpanded(True) canvas_branch.setFlags(Qt.ItemIsEnabled) # List layers from the canvas loaded_layers = list(QgsProject.instance().mapLayers().values()) canvas_layers = self.iface.mapCanvas().layers() flag = setting('visibleLayersOnlyFlag', expected_type=bool) for loaded_layer in loaded_layers: if flag and loaded_layer not in canvas_layers: continue title = loaded_layer.name() item = QTreeWidgetItem(canvas_branch, [title]) item.setData(0, LAYER_ORIGIN_ROLE, FROM_CANVAS['key']) item.setData(0, LAYER_PURPOSE_KEY_OR_ID_ROLE, loaded_layer.id()) item.setFlags(Qt.ItemIsEnabled | Qt.ItemIsSelectable) self.tree.resizeColumnToContents(0)
0.000762
def helical_turbulent_fd_Srinivasan(Re, Di, Dc): r'''Calculates Darcy friction factor for a fluid flowing inside a curved pipe such as a helical coil under turbulent conditions, using the method of Srinivasan [1]_, as shown in [2]_ and [3]_. .. math:: f_d = \frac{0.336}{{\left[Re\sqrt{\frac{D_i}{D_c}}\right]^{0.2}}} Parameters ---------- Re : float Reynolds number with `D=Di`, [-] Di : float Inner diameter of the coil, [m] Dc : float Diameter of the helix/coil measured from the center of the tube on one side to the center of the tube on the other side, [m] Returns ------- fd : float Darcy friction factor for a curved pipe [-] Notes ----- Valid for 0.01 < Di/Dc < 0.15, with no Reynolds number criteria given in [2]_ or [3]_. [2]_ recommends this method, using the transition criteria of Srinivasan as well. [3]_ recommends using either this method or the Ito method. This method did not make it into the popular review articles on curved flow. Examples -------- >>> helical_turbulent_fd_Srinivasan(1E4, 0.01, .02) 0.0570745212117107 References ---------- .. [1] Srinivasan, PS, SS Nandapurkar, and FA Holland. "Friction Factors for Coils." TRANSACTIONS OF THE INSTITUTION OF CHEMICAL ENGINEERS AND THE CHEMICAL ENGINEER 48, no. 4-6 (1970): T156 .. [2] Blevins, Robert D. Applied Fluid Dynamics Handbook. New York, N.Y.: Van Nostrand Reinhold Co., 1984. .. [3] Rohsenow, Warren and James Hartnett and Young Cho. Handbook of Heat Transfer, 3E. New York: McGraw-Hill, 1998. ''' De = Dean(Re=Re, Di=Di, D=Dc) return 0.336*De**-0.2
0.004553
def deregisterevent(self, event_name): """ Remove callback of registered event @param event_name: Event name in at-spi format. @type event_name: string @return: 1 if registration was successful, 0 if not. @rtype: integer """ if event_name in self._pollEvents._callback: del self._pollEvents._callback[event_name] return self._remote_deregisterevent(event_name)
0.004474
def topics(self): """获取问题所属话题. :return: 问题所属话题 :rtype: Topic.Iterable """ from .topic import Topic for topic in self.soup.find_all('a', class_='zm-item-tag'): yield Topic(Zhihu_URL + topic['href'], topic.text.replace('\n', ''), session=self._session)
0.008929
def _codes_to_ints(self, codes): """ Transform combination(s) of uint64 in one uint64 (each), in a strictly monotonic way (i.e. respecting the lexicographic order of integer combinations): see BaseMultiIndexCodesEngine documentation. Parameters ---------- codes : 1- or 2-dimensional array of dtype uint64 Combinations of integers (one per row) Returns ------ int_keys : scalar or 1-dimensional array, of dtype uint64 Integer(s) representing one combination (each). """ # Shift the representation of each level by the pre-calculated number # of bits: codes <<= self.offsets # Now sum and OR are in fact interchangeable. This is a simple # composition of the (disjunct) significant bits of each level (i.e. # each column in "codes") in a single positive integer: if codes.ndim == 1: # Single key return np.bitwise_or.reduce(codes) # Multiple keys return np.bitwise_or.reduce(codes, axis=1)
0.001818
def keywords_special_characters(keywords): """ Confirms that the keywords don't contain special characters Args: keywords (str) Raises: django.forms.ValidationError """ invalid_chars = '!\"#$%&\'()*+-./:;<=>?@[\\]^_{|}~\t\n' if any(char in invalid_chars for char in keywords): raise ValidationError(MESSAGE_KEYWORD_SPECIAL_CHARS)
0.002611
def get_es(**overrides): """Return a elasticsearch Elasticsearch object using settings from ``settings.py``. :arg overrides: Allows you to override defaults to create the ElasticSearch object. You can override any of the arguments isted in :py:func:`elasticutils.get_es`. For example, if you wanted to create an ElasticSearch with a longer timeout to a different cluster, you'd do: >>> from elasticutils.contrib.django import get_es >>> es = get_es(urls=['http://some_other_cluster:9200'], timeout=30) """ defaults = { 'urls': settings.ES_URLS, 'timeout': getattr(settings, 'ES_TIMEOUT', 5) } defaults.update(overrides) return base_get_es(**defaults)
0.001353
def validate_txn_obj(obj_name, obj, key, validation_fun): """Validate value of `key` in `obj` using `validation_fun`. Args: obj_name (str): name for `obj` being validated. obj (dict): dictionary object. key (str): key to be validated in `obj`. validation_fun (function): function used to validate the value of `key`. Returns: None: indicates validation successful Raises: ValidationError: `validation_fun` will raise exception on failure """ backend = bigchaindb.config['database']['backend'] if backend == 'localmongodb': data = obj.get(key, {}) if isinstance(data, dict): validate_all_keys_in_obj(obj_name, data, validation_fun) elif isinstance(data, list): validate_all_items_in_list(obj_name, data, validation_fun)
0.001119
def prune(self): """Removes the node and all descendents without looping back past the root. Note this does not remove the associated data objects. :returns: list of :class:`BaseDataNode` subclassers associated with the removed ``Node`` objects. """ targets = self.descendents_root() try: targets.remove(self.graph.root) except ValueError: # root wasn't in the target list, no problem pass results = [n.data for n in targets] results.append(self.data) for node in targets: node.delete() for parent in self.parents.all(): parent.children.remove(self) self.delete() return results
0.002594
def file_object_supports_binary(fp): # type: (BinaryIO) -> bool ''' A function to check whether a file-like object supports binary mode. Parameters: fp - The file-like object to check for binary mode support. Returns: True if the file-like object supports binary mode, False otherwise. ''' if hasattr(fp, 'mode'): return 'b' in fp.mode # Python 3 if sys.version_info >= (3, 0): return isinstance(fp, (io.RawIOBase, io.BufferedIOBase)) # Python 2 return isinstance(fp, (cStringIO.OutputType, cStringIO.InputType, io.RawIOBase, io.BufferedIOBase))
0.003236
def check_cmake_exists(cmake_command): """ Check whether CMake is installed. If not, print informative error message and quits. """ from subprocess import Popen, PIPE p = Popen( '{0} --version'.format(cmake_command), shell=True, stdin=PIPE, stdout=PIPE) if not ('cmake version' in p.communicate()[0].decode('UTF-8')): sys.stderr.write(' This code is built using CMake\n\n') sys.stderr.write(' CMake is not found\n') sys.stderr.write(' get CMake at http://www.cmake.org/\n') sys.stderr.write(' on many clusters CMake is installed\n') sys.stderr.write(' but you have to load it first:\n') sys.stderr.write(' $ module load cmake\n') sys.exit(1)
0.001299
def decompress_messages(self, partitions_offmsgs): """ Decompress pre-defined compressed fields for each message. """ for pomsg in partitions_offmsgs: if pomsg['message']: pomsg['message'] = self.decompress_fun(pomsg['message']) yield pomsg
0.006734
def update_log_entry(self, log_entry_form): """Updates an existing log entry. arg: log_entry_form (osid.logging.LogEntryForm): the form containing the elements to be updated raise: IllegalState - ``log_entry_form`` already used in an update transaction raise: InvalidArgument - the form contains an invalid value raise: NullArgument - ``log_entry_form`` is ``null`` raise: OperationFailed - unable to complete request raise: PermissionDenied - authorization failure raise: Unsupported - ``log_entry_form`` did not originate from ``get_log_entry_form_for_update()`` *compliance: mandatory -- This method must be implemented.* """ # Implemented from template for # osid.resource.ResourceAdminSession.update_resource_template collection = JSONClientValidated('logging', collection='LogEntry', runtime=self._runtime) if not isinstance(log_entry_form, ABCLogEntryForm): raise errors.InvalidArgument('argument type is not an LogEntryForm') if not log_entry_form.is_for_update(): raise errors.InvalidArgument('the LogEntryForm is for update only, not create') try: if self._forms[log_entry_form.get_id().get_identifier()] == UPDATED: raise errors.IllegalState('log_entry_form already used in an update transaction') except KeyError: raise errors.Unsupported('log_entry_form did not originate from this session') if not log_entry_form.is_valid(): raise errors.InvalidArgument('one or more of the form elements is invalid') collection.save(log_entry_form._my_map) self._forms[log_entry_form.get_id().get_identifier()] = UPDATED # Note: this is out of spec. The OSIDs don't require an object to be returned: return objects.LogEntry( osid_object_map=log_entry_form._my_map, runtime=self._runtime, proxy=self._proxy)
0.004215
def absent(name, auth=None, **kwargs): ''' Ensure a network does not exists name Name of the network ''' ret = {'name': name, 'changes': {}, 'result': True, 'comment': ''} kwargs = __utils__['args.clean_kwargs'](**kwargs) __salt__['neutronng.setup_clouds'](auth) kwargs['name'] = name network = __salt__['neutronng.network_get'](name=name) if network: if __opts__['test'] is True: ret['result'] = None ret['changes'] = {'id': network.id} ret['comment'] = 'Network will be deleted.' return ret __salt__['neutronng.network_delete'](name=network) ret['changes']['id'] = network.id ret['comment'] = 'Deleted network' return ret
0.001256
def logsignalrate(self, s0, s1, slide, step): """Calculate the normalized log rate density of signals via lookup""" td = numpy.array(s0['end_time'] - s1['end_time'] - slide*step, ndmin=1) pd = numpy.array((s0['coa_phase'] - s1['coa_phase']) % \ (2. * numpy.pi), ndmin=1) rd = numpy.array((s0['sigmasq'] / s1['sigmasq']) ** 0.5, ndmin=1) sn0 = numpy.array(s0['snr'], ndmin=1) sn1 = numpy.array(s1['snr'], ndmin=1) snr0 = sn0 * 1 snr1 = sn1 * 1 snr0[rd > 1] = sn1[rd > 1] snr1[rd > 1] = sn0[rd > 1] rd[rd > 1] = 1. / rd[rd > 1] # Find which bin each coinc falls into tv = numpy.searchsorted(self.tbins, td) - 1 pv = numpy.searchsorted(self.pbins, pd) - 1 s0v = numpy.searchsorted(self.sbins, snr0) - 1 s1v = numpy.searchsorted(self.sbins, snr1) - 1 rv = numpy.searchsorted(self.rbins, rd) - 1 # Enforce that points fits into the bin boundaries: if a point lies # outside the boundaries it is pushed back to the nearest bin. tv[tv < 0] = 0 tv[tv >= len(self.tbins) - 1] = len(self.tbins) - 2 pv[pv < 0] = 0 pv[pv >= len(self.pbins) - 1] = len(self.pbins) - 2 s0v[s0v < 0] = 0 s0v[s0v >= len(self.sbins) - 1] = len(self.sbins) - 2 s1v[s1v < 0] = 0 s1v[s1v >= len(self.sbins) - 1] = len(self.sbins) - 2 rv[rv < 0] = 0 rv[rv >= len(self.rbins) - 1] = len(self.rbins) - 2 return self.hist[tv, pv, s0v, s1v, rv]
0.001907
def SNNL(x, y, temp, cos_distance): """Soft Nearest Neighbor Loss :param x: a matrix. :param y: a list of labels for each element of x. :param temp: Temperature. :cos_distance: Boolean for using cosine or Euclidean distance. :returns: A tensor for the Soft Nearest Neighbor Loss of the points in x with labels y. """ summed_masked_pick_prob = tf.reduce_sum( SNNLCrossEntropy.masked_pick_probability(x, y, temp, cos_distance), 1) return tf.reduce_mean( -tf.log(SNNLCrossEntropy.STABILITY_EPS + summed_masked_pick_prob))
0.001709
def load_data(filename): """Loads data from a file. Parameters ---------- filename : :obj:`str` The file to load the collection from. Returns ------- :obj:`numpy.ndarray` of float The data read from the file. Raises ------ ValueError If the file extension is not .npy or .npz. """ file_root, file_ext = os.path.splitext(filename) data = None if file_ext == '.npy': data = np.load(filename) elif file_ext == '.npz': data = np.load(filename)['arr_0'] else: raise ValueError('Extension %s not supported for point reads' %(file_ext)) return data
0.005319
def compare_operands(self, p_operand1, p_operand2): """ Returns True if conditional constructed from both operands and self.operator is valid. Returns False otherwise. """ if self.operator == '<': return p_operand1 < p_operand2 elif self.operator == '<=': return p_operand1 <= p_operand2 elif self.operator == '=': return p_operand1 == p_operand2 elif self.operator == '>=': return p_operand1 >= p_operand2 elif self.operator == '>': return p_operand1 > p_operand2 elif self.operator == '!': return p_operand1 != p_operand2 return False
0.002869
def entropy_calc(item, POP): """ Calculate reference and response likelihood. :param item : TOP or P :type item : dict :param POP: population :type POP : dict :return: reference or response likelihood as float """ try: result = 0 for i in item.keys(): likelihood = item[i] / POP[i] if likelihood != 0: result += likelihood * math.log(likelihood, 2) return -result except Exception: return "None"
0.001969
def _ResolveRelativeImport(name, package): """Resolves a relative import into an absolute path. This is mostly an adapted version of the logic found in the backported version of import_module in Python 2.7. https://github.com/python/cpython/blob/2.7/Lib/importlib/__init__.py Args: name: relative name imported, such as '.a' or '..b.c' package: absolute package path, such as 'a.b.c.d.e' Returns: The absolute path of the name to be imported, or None if it is invalid. Examples: _ResolveRelativeImport('.c', 'a.b') -> 'a.b.c' _ResolveRelativeImport('..c', 'a.b') -> 'a.c' _ResolveRelativeImport('...c', 'a.c') -> None """ level = sum(1 for c in itertools.takewhile(lambda c: c == '.', name)) if level == 1: return package + name else: parts = package.split('.')[:-(level - 1)] if not parts: return None parts.append(name[level:]) return '.'.join(parts)
0.006417
def run(self, args): """ Give the user with user_full_name the auth_role permissions on the remote project with project_name. :param args Namespace arguments parsed from the command line """ email = args.email # email of person to give permissions, will be None if username is specified username = args.username # username of person to give permissions, will be None if email is specified auth_role = args.auth_role # type of permission(project_admin) project = self.fetch_project(args, must_exist=True, include_children=False) user = self.remote_store.lookup_or_register_user_by_email_or_username(email, username) self.remote_store.set_user_project_permission(project, user, auth_role) print(u'Gave user {} {} permissions for project {}.'.format(user.full_name, auth_role, project.name))
0.008762
def pv_present(name, **kwargs): ''' Set a Physical Device to be used as an LVM Physical Volume name The device name to initialize. kwargs Any supported options to pvcreate. See :mod:`linux_lvm <salt.modules.linux_lvm>` for more details. ''' ret = {'changes': {}, 'comment': '', 'name': name, 'result': True} if __salt__['lvm.pvdisplay'](name, quiet=True): ret['comment'] = 'Physical Volume {0} already present'.format(name) elif __opts__['test']: ret['comment'] = 'Physical Volume {0} is set to be created'.format(name) ret['result'] = None return ret else: changes = __salt__['lvm.pvcreate'](name, **kwargs) if __salt__['lvm.pvdisplay'](name): ret['comment'] = 'Created Physical Volume {0}'.format(name) ret['changes']['created'] = changes else: ret['comment'] = 'Failed to create Physical Volume {0}'.format(name) ret['result'] = False return ret
0.002849
def find_module_defining_flag(self, flagname, default=None): """Return the name of the module defining this flag, or default. Args: flagname: str, name of the flag to lookup. default: Value to return if flagname is not defined. Defaults to None. Returns: The name of the module which registered the flag with this name. If no such module exists (i.e. no flag with this name exists), we return default. """ registered_flag = self._flags().get(flagname) if registered_flag is None: return default for module, flags in six.iteritems(self.flags_by_module_dict()): for flag in flags: # It must compare the flag with the one in _flags. This is because a # flag might be overridden only for its long name (or short name), # and only its short name (or long name) is considered registered. if (flag.name == registered_flag.name and flag.short_name == registered_flag.short_name): return module return default
0.004808
def nullity_sort(df, sort=None): """ Sorts a DataFrame according to its nullity, in either ascending or descending order. :param df: The DataFrame object being sorted. :param sort: The sorting method: either "ascending", "descending", or None (default). :return: The nullity-sorted DataFrame. """ if sort == 'ascending': return df.iloc[np.argsort(df.count(axis='columns').values), :] elif sort == 'descending': return df.iloc[np.flipud(np.argsort(df.count(axis='columns').values)), :] else: return df
0.00713
def _set_fcoe_fsb(self, v, load=False): """ Setter method for fcoe_fsb, mapped from YANG variable /fcoe_fsb (container) If this variable is read-only (config: false) in the source YANG file, then _set_fcoe_fsb is considered as a private method. Backends looking to populate this variable should do so via calling thisObj._set_fcoe_fsb() directly. YANG Description: This CLI will disable/enable fsb mode """ if hasattr(v, "_utype"): v = v._utype(v) try: t = YANGDynClass(v,base=fcoe_fsb.fcoe_fsb, is_container='container', presence=False, yang_name="fcoe-fsb", rest_name="fsb", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'callpoint': u'fcoe_fsb_cp', u'info': u'Enable/Disable the fsb mode', u'hidden': u'debug', u'alt-name': u'fsb', u'cli-incomplete-no': None}}, namespace='urn:brocade.com:mgmt:brocade-fcoe', defining_module='brocade-fcoe', yang_type='container', is_config=True) except (TypeError, ValueError): raise ValueError({ 'error-string': """fcoe_fsb must be of a type compatible with container""", 'defined-type': "container", 'generated-type': """YANGDynClass(base=fcoe_fsb.fcoe_fsb, is_container='container', presence=False, yang_name="fcoe-fsb", rest_name="fsb", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'callpoint': u'fcoe_fsb_cp', u'info': u'Enable/Disable the fsb mode', u'hidden': u'debug', u'alt-name': u'fsb', u'cli-incomplete-no': None}}, namespace='urn:brocade.com:mgmt:brocade-fcoe', defining_module='brocade-fcoe', yang_type='container', is_config=True)""", }) self.__fcoe_fsb = t if hasattr(self, '_set'): self._set()
0.00494
def _set_redist_connected(self, v, load=False): """ Setter method for redist_connected, mapped from YANG variable /isis_state/router_isis_config/is_address_family_v6/redist_connected (container) If this variable is read-only (config: false) in the source YANG file, then _set_redist_connected is considered as a private method. Backends looking to populate this variable should do so via calling thisObj._set_redist_connected() directly. """ if hasattr(v, "_utype"): v = v._utype(v) try: t = YANGDynClass(v,base=redist_connected.redist_connected, is_container='container', presence=False, yang_name="redist-connected", rest_name="redist-connected", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'callpoint': u'isis-redistribution-redist-connected-1'}}, namespace='urn:brocade.com:mgmt:brocade-isis-operational', defining_module='brocade-isis-operational', yang_type='container', is_config=False) except (TypeError, ValueError): raise ValueError({ 'error-string': """redist_connected must be of a type compatible with container""", 'defined-type': "container", 'generated-type': """YANGDynClass(base=redist_connected.redist_connected, is_container='container', presence=False, yang_name="redist-connected", rest_name="redist-connected", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'callpoint': u'isis-redistribution-redist-connected-1'}}, namespace='urn:brocade.com:mgmt:brocade-isis-operational', defining_module='brocade-isis-operational', yang_type='container', is_config=False)""", }) self.__redist_connected = t if hasattr(self, '_set'): self._set()
0.005482
def available_providers(request): "Adds the list of enabled providers to the context." if APPENGINE: # Note: AppEngine inequality queries are limited to one property. # See https://developers.google.com/appengine/docs/python/datastore/queries#Python_Restrictions_on_queries # Users have also noted that the exclusion queries don't work # See https://github.com/mlavin/django-all-access/pull/46 # So this is lazily-filtered in Python qs = SimpleLazyObject(lambda: _get_enabled()) else: qs = Provider.objects.filter(consumer_secret__isnull=False, consumer_key__isnull=False) return {'allaccess_providers': qs}
0.004418
def Write(packer_type, buf, head, n): """ Write encodes `n` at buf[head] using `packer_type`. """ packer_type.pack_into(buf, head, n)
0.007092
def to_hmtk(self, use_centroid=True): ''' Convert the content of the GCMT catalogue to a HMTK catalogue. ''' self._preallocate_data_dict() for iloc, gcmt in enumerate(self.catalogue.gcmts): self.catalogue.data['eventID'][iloc] = iloc if use_centroid: self.catalogue.data['year'][iloc] = \ gcmt.centroid.date.year self.catalogue.data['month'][iloc] = \ gcmt.centroid.date.month self.catalogue.data['day'][iloc] = \ gcmt.centroid.date.day self.catalogue.data['hour'][iloc] = \ gcmt.centroid.time.hour self.catalogue.data['minute'][iloc] = \ gcmt.centroid.time.minute self.catalogue.data['second'][iloc] = \ gcmt.centroid.time.second self.catalogue.data['longitude'][iloc] = \ gcmt.centroid.longitude self.catalogue.data['latitude'][iloc] = \ gcmt.centroid.latitude self.catalogue.data['depth'][iloc] = \ gcmt.centroid.depth else: self.catalogue.data['year'][iloc] = \ gcmt.hypocentre.date.year self.catalogue.data['month'][iloc] = \ gcmt.hypocentre.date.month self.catalogue.data['day'][iloc] = \ gcmt.hypocentre.date.day self.catalogue.data['hour'][iloc] = \ gcmt.hypocentre.time.hour self.catalogue.data['minute'][iloc] = \ gcmt.hypocentre.time.minute self.catalogue.data['second'][iloc] = \ gcmt.hypocentre.time.second self.catalogue.data['longitude'][iloc] = \ gcmt.hypocentre.longitude self.catalogue.data['latitude'][iloc] = \ gcmt.hypocentre.latitude self.catalogue.data['depth'][iloc] = \ gcmt.hypocentre.depth # Moment, magnitude and relative errors self.catalogue.data['moment'][iloc] = gcmt.moment self.catalogue.data['magnitude'][iloc] = gcmt.magnitude self.catalogue.data['f_clvd'][iloc] = gcmt.f_clvd self.catalogue.data['e_rel'][iloc] = gcmt.e_rel self.catalogue.data['centroidID'][iloc] = gcmt.identifier # Nodal planes self.catalogue.data['strike1'][iloc] = \ gcmt.nodal_planes.nodal_plane_1['strike'] self.catalogue.data['dip1'][iloc] = \ gcmt.nodal_planes.nodal_plane_1['dip'] self.catalogue.data['rake1'][iloc] = \ gcmt.nodal_planes.nodal_plane_1['rake'] self.catalogue.data['strike2'][iloc] = \ gcmt.nodal_planes.nodal_plane_2['strike'] self.catalogue.data['dip2'][iloc] = \ gcmt.nodal_planes.nodal_plane_2['dip'] self.catalogue.data['rake2'][iloc] = \ gcmt.nodal_planes.nodal_plane_2['rake'] # Principal axes self.catalogue.data['eigenvalue_b'][iloc] = \ gcmt.principal_axes.b_axis['eigenvalue'] self.catalogue.data['azimuth_b'][iloc] = \ gcmt.principal_axes.b_axis['azimuth'] self.catalogue.data['plunge_b'][iloc] = \ gcmt.principal_axes.b_axis['plunge'] self.catalogue.data['eigenvalue_p'][iloc] = \ gcmt.principal_axes.p_axis['eigenvalue'] self.catalogue.data['azimuth_p'][iloc] = \ gcmt.principal_axes.p_axis['azimuth'] self.catalogue.data['plunge_p'][iloc] = \ gcmt.principal_axes.p_axis['plunge'] self.catalogue.data['eigenvalue_t'][iloc] = \ gcmt.principal_axes.t_axis['eigenvalue'] self.catalogue.data['azimuth_t'][iloc] = \ gcmt.principal_axes.t_axis['azimuth'] self.catalogue.data['plunge_t'][iloc] = \ gcmt.principal_axes.t_axis['plunge'] return self.catalogue
0.000471
def remove_completer(self): """ Removes current completer. :return: Method success. :rtype: bool """ if self.__completer: LOGGER.debug("> Removing '{0}' completer.".format(self.__completer)) # Signals / Slots. self.__completer.activated.disconnect(self.__insert_completion) self.__completer.deleteLater() self.__completer = None return True
0.006536
def _get_unique_index(self, dropna=False): """ Returns an index containing unique values. Parameters ---------- dropna : bool If True, NaN values are dropped. Returns ------- uniques : index """ if self.is_unique and not dropna: return self values = self.values if not self.is_unique: values = self.unique() if dropna: try: if self.hasnans: values = values[~isna(values)] except NotImplementedError: pass return self._shallow_copy(values)
0.002994
def _read_socket(socket): """ The stdout and stderr data from the container multiplexed into one stream of response from the Docker API. It follows the protocol described here https://docs.docker.com/engine/api/v1.30/#operation/ContainerAttach. The stream starts with a 8 byte header that contains the frame type and also payload size. Follwing that is the actual payload of given size. Once you read off this payload, we are ready to read the next header. This method will follow this protocol to read payload from the stream and return an iterator that returns a tuple containing the frame type and frame data. Callers can handle the data appropriately based on the frame type. Stdout => Frame Type = 1 Stderr => Frame Type = 2 Parameters ---------- socket Socket to read responses from Yields ------- int Type of the stream (1 => stdout, 2 => stderr) str Data in the stream """ # Keep reading the stream until the stream terminates while True: try: payload_type, payload_size = _read_header(socket) if payload_size < 0: # Something is wrong with the data stream. Payload size can't be less than zero break for data in _read_payload(socket, payload_size): yield payload_type, data except timeout: # Timeouts are normal during debug sessions and long running tasks LOG.debug("Ignoring docker socket timeout") except SocketError: # There isn't enough data in the stream. Probably the socket terminated break
0.005338
def as_dict(self, default=None): """ Returns a ``SettingDict`` object for this queryset. """ settings = SettingDict(queryset=self, default=default) return settings
0.009852
def _collapse_to_cwl_record_single(data, want_attrs, input_files): """Convert a single sample into a CWL record. """ out = {} for key in want_attrs: key_parts = key.split("__") out[key] = _to_cwl(tz.get_in(key_parts, data), input_files) return out
0.003534
def adapt_package(package): """Adapts ``.epub.Package`` to a ``BinderItem`` and cascades the adaptation downward to ``DocumentItem`` and ``ResourceItem``. The results of this process provide the same interface as ``.models.Binder``, ``.models.Document`` and ``.models.Resource``. """ navigation_item = package.navigation html = etree.parse(navigation_item.data) tree = parse_navigation_html_to_tree(html, navigation_item.name) return _node_to_model(tree, package)
0.001988
def update_metadata(self, key, value): """Set *key* in the metadata to *value*. Returns the previous value of *key*, or None if the key was not previously set. """ old_value = self.contents['metadata'].get(key) self.contents['metadata'][key] = value self._log('Updated metadata: %s=%s' % (key, value)) return old_value
0.005222
def get_logw(ns_run, simulate=False): r"""Calculates the log posterior weights of the samples (using logarithms to avoid overflow errors with very large or small values). Uses the trapezium rule such that the weight of point i is .. math:: w_i = \mathcal{L}_i (X_{i-1} - X_{i+1}) / 2 Parameters ---------- ns_run: dict Nested sampling run dict (see data_processing module docstring for more details). simulate: bool, optional Should log prior volumes logx be simulated from their distribution (if false their expected values are used). Returns ------- logw: 1d numpy array Log posterior masses of points. """ try: # find logX value for each point logx = get_logx(ns_run['nlive_array'], simulate=simulate) logw = np.zeros(ns_run['logl'].shape[0]) # Vectorized trapezium rule: w_i prop to (X_{i-1} - X_{i+1}) / 2 logw[1:-1] = log_subtract(logx[:-2], logx[2:]) - np.log(2) # Assign all prior volume closest to first point X_first to that point: # that is from logx=0 to logx=log((X_first + X_second) / 2) logw[0] = log_subtract(0, scipy.special.logsumexp([logx[0], logx[1]]) - np.log(2)) # Assign all prior volume closest to final point X_last to that point: # that is from logx=log((X_penultimate + X_last) / 2) to logx=-inf logw[-1] = scipy.special.logsumexp([logx[-2], logx[-1]]) - np.log(2) # multiply by likelihood (add in log space) logw += ns_run['logl'] return logw except IndexError: if ns_run['logl'].shape[0] == 1: # If there is only one point in the run then assign all prior # volume X \in (0, 1) to that point, so the weight is just # 1 * logl_0 = logl_0 return copy.deepcopy(ns_run['logl']) else: raise
0.000517
def granule_paths(self, band_id): """Return the path of all granules of a given band.""" band_id = str(band_id).zfill(2) try: assert isinstance(band_id, str) assert band_id in BAND_IDS except AssertionError: raise AttributeError( "band ID not valid: %s" % band_id ) return [ granule.band_path(band_id) for granule in self.granules ]
0.004202
def change_term_title(title): """ only works on unix systems only tested on Ubuntu GNOME changes text on terminal title for identifying debugging tasks. The title will remain until python exists Args: title (str): References: http://stackoverflow.com/questions/5343265/setting-title-for-tabs-in-terminator-console-application-in-ubuntu/8850484#8850484 CommandLine: python -m utool change_term_title echo -en "\033]0;newtitle\a" printf "\e]2;newtitle\a"; echo -en "\033]0;DocTest /home/joncrall/code/ibeis/ibeis.algo.graph.core.py --test-AnnotInference._make_state_delta\a" Example: >>> # DISABLE_DOCTEST >>> from utool.util_cplat import * # NOQA >>> title = 'change title test' >>> result = change_term_title(title) >>> print(result) """ if True: # Disabled return if not WIN32: #print("CHANGE TERM TITLE to %r" % (title,)) if title: #os.environ['PS1'] = os.environ['PS1'] + '''"\e]2;\"''' + title + '''\"\a"''' cmd_str = r'''echo -en "\033]0;''' + title + '''\a"''' os.system(cmd_str)
0.004188
def _process_observations(base_env, policies, batch_builder_pool, active_episodes, unfiltered_obs, rewards, dones, infos, off_policy_actions, horizon, preprocessors, obs_filters, unroll_length, pack, callbacks, soft_horizon): """Record new data from the environment and prepare for policy evaluation. Returns: active_envs: set of non-terminated env ids to_eval: map of policy_id to list of agent PolicyEvalData outputs: list of metrics and samples to return from the sampler """ active_envs = set() to_eval = defaultdict(list) outputs = [] # For each environment for env_id, agent_obs in unfiltered_obs.items(): new_episode = env_id not in active_episodes episode = active_episodes[env_id] if not new_episode: episode.length += 1 episode.batch_builder.count += 1 episode._add_agent_rewards(rewards[env_id]) if (episode.batch_builder.total() > max(1000, unroll_length * 10) and log_once("large_batch_warning")): logger.warning( "More than {} observations for {} env steps ".format( episode.batch_builder.total(), episode.batch_builder.count) + "are buffered in " "the sampler. If this is more than you expected, check that " "that you set a horizon on your environment correctly. Note " "that in multi-agent environments, `sample_batch_size` sets " "the batch size based on environment steps, not the steps of " "individual agents, which can result in unexpectedly large " "batches.") # Check episode termination conditions if dones[env_id]["__all__"] or episode.length >= horizon: hit_horizon = (episode.length >= horizon and not dones[env_id]["__all__"]) all_done = True atari_metrics = _fetch_atari_metrics(base_env) if atari_metrics is not None: for m in atari_metrics: outputs.append( m._replace(custom_metrics=episode.custom_metrics)) else: outputs.append( RolloutMetrics(episode.length, episode.total_reward, dict(episode.agent_rewards), episode.custom_metrics, {})) else: hit_horizon = False all_done = False active_envs.add(env_id) # For each agent in the environment for agent_id, raw_obs in agent_obs.items(): policy_id = episode.policy_for(agent_id) prep_obs = _get_or_raise(preprocessors, policy_id).transform(raw_obs) if log_once("prep_obs"): logger.info("Preprocessed obs: {}".format(summarize(prep_obs))) filtered_obs = _get_or_raise(obs_filters, policy_id)(prep_obs) if log_once("filtered_obs"): logger.info("Filtered obs: {}".format(summarize(filtered_obs))) agent_done = bool(all_done or dones[env_id].get(agent_id)) if not agent_done: to_eval[policy_id].append( PolicyEvalData(env_id, agent_id, filtered_obs, infos[env_id].get(agent_id, {}), episode.rnn_state_for(agent_id), episode.last_action_for(agent_id), rewards[env_id][agent_id] or 0.0)) last_observation = episode.last_observation_for(agent_id) episode._set_last_observation(agent_id, filtered_obs) episode._set_last_raw_obs(agent_id, raw_obs) episode._set_last_info(agent_id, infos[env_id].get(agent_id, {})) # Record transition info if applicable if (last_observation is not None and infos[env_id].get( agent_id, {}).get("training_enabled", True)): episode.batch_builder.add_values( agent_id, policy_id, t=episode.length - 1, eps_id=episode.episode_id, agent_index=episode._agent_index(agent_id), obs=last_observation, actions=episode.last_action_for(agent_id), rewards=rewards[env_id][agent_id], prev_actions=episode.prev_action_for(agent_id), prev_rewards=episode.prev_reward_for(agent_id), dones=(False if (hit_horizon and soft_horizon) else agent_done), infos=infos[env_id].get(agent_id, {}), new_obs=filtered_obs, **episode.last_pi_info_for(agent_id)) # Invoke the step callback after the step is logged to the episode if callbacks.get("on_episode_step"): callbacks["on_episode_step"]({"env": base_env, "episode": episode}) # Cut the batch if we're not packing multiple episodes into one, # or if we've exceeded the requested batch size. if episode.batch_builder.has_pending_data(): if dones[env_id]["__all__"]: episode.batch_builder.check_missing_dones() if (all_done and not pack) or \ episode.batch_builder.count >= unroll_length: outputs.append(episode.batch_builder.build_and_reset(episode)) elif all_done: # Make sure postprocessor stays within one episode episode.batch_builder.postprocess_batch_so_far(episode) if all_done: # Handle episode termination batch_builder_pool.append(episode.batch_builder) if callbacks.get("on_episode_end"): callbacks["on_episode_end"]({ "env": base_env, "policy": policies, "episode": episode }) if hit_horizon and soft_horizon: episode.soft_reset() resetted_obs = agent_obs else: del active_episodes[env_id] resetted_obs = base_env.try_reset(env_id) if resetted_obs is None: # Reset not supported, drop this env from the ready list if horizon != float("inf"): raise ValueError( "Setting episode horizon requires reset() support " "from the environment.") elif resetted_obs != ASYNC_RESET_RETURN: # Creates a new episode if this is not async return # If reset is async, we will get its result in some future poll episode = active_episodes[env_id] for agent_id, raw_obs in resetted_obs.items(): policy_id = episode.policy_for(agent_id) policy = _get_or_raise(policies, policy_id) prep_obs = _get_or_raise(preprocessors, policy_id).transform(raw_obs) filtered_obs = _get_or_raise(obs_filters, policy_id)(prep_obs) episode._set_last_observation(agent_id, filtered_obs) to_eval[policy_id].append( PolicyEvalData( env_id, agent_id, filtered_obs, episode.last_info_for(agent_id) or {}, episode.rnn_state_for(agent_id), np.zeros_like( _flatten_action(policy.action_space.sample())), 0.0)) return active_envs, to_eval, outputs
0.000124
def use_with(data, fn, *attrs): """Apply a function on the attributes of the data :param data: an object :param fn: a function :param attrs: some attributes of the object :returns: an object Let's create some data first:: >>> from collections import namedtuple >>> Person = namedtuple('Person', ('name', 'age', 'gender')) >>> alice = Person('Alice', 30, 'F') Usage:: >>> make_csv_row = lambda n, a, g: '%s,%d,%s' % (n, a, g) >>> use_with(alice, make_csv_row, 'name', 'age', 'gender') 'Alice,30,F' """ args = [getattr(data, x) for x in attrs] return fn(*args)
0.001534
def _image_data(self): """Returns the data in image format, with scaling and conversion to uint8 types. Returns ------- :obj:`numpy.ndarray` of uint8 A 3D matrix representing the image. The first dimension is rows, the second is columns, and the third is simply the IR entry scaled to between 0 and BINARY_IM_MAX_VAL. """ return (self._data * (float(BINARY_IM_MAX_VAL) / MAX_IR)).astype(np.uint8)
0.012793
def num_rows(self): """ Returns the number of rows. Returns ------- out : int Number of rows in the SFrame. """ if self._is_vertex_frame(): return self.__graph__.summary()['num_vertices'] elif self._is_edge_frame(): return self.__graph__.summary()['num_edges']
0.00554
def RLS_SDR(anchors, W, r, print_out=False): """ Range least squares (RLS) using SDR. Algorithm cited by A.Beck, P.Stoica in "Approximate and Exact solutions of Source Localization Problems". :param anchors: anchor points :param r2: squared distances from anchors to point x. :return: estimated position of point x. """ from pylocus.basics import low_rank_approximation, eigendecomp from pylocus.mds import x_from_eigendecomp m = anchors.shape[0] d = anchors.shape[1] G = Variable(m + 1, m + 1) X = Variable(d + 1, d + 1) constraints = [G[m, m] == 1.0, X[d, d] == 1.0, G >> 0, X >> 0, G == G.T, X == X.T] for i in range(m): Ci = np.eye(d + 1) Ci[:-1, -1] = -anchors[i] Ci[-1, :-1] = -anchors[i].T Ci[-1, -1] = np.linalg.norm(anchors[i])**2 constraints.append(G[i, i] == trace(Ci * X)) obj = Minimize(trace(G) - 2 * sum_entries(mul_elemwise(r, G[m, :-1].T))) prob = Problem(obj, constraints) ## Solution total = prob.solve(verbose=True) rank_G = np.linalg.matrix_rank(G.value) rank_X = np.linalg.matrix_rank(X.value) if rank_G > 1: u, s, v = np.linalg.svd(G.value, full_matrices=False) print('optimal G is not of rank 1!') print(s) if rank_X > 1: u, s, v = np.linalg.svd(X.value, full_matrices=False) print('optimal X is not of rank 1!') print(s) factor, u = eigendecomp(X.value, 1) xhat = x_from_eigendecomp(factor, u, 1) return xhat
0.001892
def make_statement(self, action, mention): """Makes an INDRA statement from a Geneways action and action mention. Parameters ---------- action : GenewaysAction The mechanism that the Geneways mention maps to. Note that several text mentions can correspond to the same action if they are referring to the same relationship - there may be multiple Geneways action mentions corresponding to each action. mention : GenewaysActionMention The Geneways action mention object corresponding to a single mention of a mechanism in a specific text. We make a new INDRA statement corresponding to each action mention. Returns ------- statement : indra.statements.Statement An INDRA statement corresponding to the provided Geneways action mention, or None if the action mention's type does not map onto any INDRA statement type in geneways_action_type_mapper. """ (statement_generator, is_direct) = \ geneways_action_to_indra_statement_type(mention.actiontype, action.plo) if statement_generator is None: # Geneways statement does not map onto an indra statement return None # Try to find the full-text sentence # Unfortunately, the sentence numbers in the Geneways dataset # don't correspond to an obvious sentence segmentation. # This code looks for sentences with the subject, object, and verb # listed by the Geneways action mention table and only includes # it in the evidence if there is exactly one such sentence text = None if self.get_ft_mention: try: content, content_type = get_full_text(mention.pmid, 'pmid') if content is not None: ftm = FullTextMention(mention, content) sentences = ftm.find_matching_sentences() if len(sentences) == 1: text = sentences[0] except Exception: logger.warning('Could not fetch full text for PMID ' + mention.pmid) # Make an evidence object epistemics = dict() epistemics['direct'] = is_direct annotations = mention.make_annotation() annotations['plo'] = action.plo # plo only in action table evidence = Evidence(source_api='geneways', source_id=mention.actionmentionid, pmid=mention.pmid, text=text, epistemics=epistemics, annotations=annotations) # Construct the grounded and name standardized agents # Note that this involves grounding the agent by # converting the Entrez ID listed in the Geneways data with # HGNC and UniProt upstream_agent = get_agent(mention.upstream, action.up) downstream_agent = get_agent(mention.downstream, action.dn) # Make the statement return statement_generator(upstream_agent, downstream_agent, evidence)
0.000616
def uses_base_tear_down(cls): """Checks whether the tearDown method is the BasePlug implementation.""" this_tear_down = getattr(cls, 'tearDown') base_tear_down = getattr(BasePlug, 'tearDown') return this_tear_down.__code__ is base_tear_down.__code__
0.003774
def _variance_scale_term(self): """Helper to `_covariance` and `_variance` which computes a shared scale.""" # Expand back the last dim so the shape of _variance_scale_term matches the # shape of self.concentration. c0 = self.total_concentration[..., tf.newaxis] return tf.sqrt((1. + c0 / self.total_count[..., tf.newaxis]) / (1. + c0))
0.005618
def statsd_middleware_factory(app, handler): """Send the application stats to statsd.""" @coroutine def middleware(request): """Send stats to statsd.""" timer = Timer() timer.start() statsd = yield from app.ps.metrics.client() pipe = statsd.pipe() pipe.incr('request.method.%s' % request.method) try: response = yield from handler(request) pipe.incr('response.status.%s' % response.status) return response except HTTPException as exc: pipe.incr('response.status.%s' % exc.status_code) raise except Exception: pipe.incr('response.exception') raise finally: timer.stop() pipe.timing('response.time', timer.ms) pipe.disconnect() return middleware
0.001156
def get_tag(self, tagtype): ''' Get the first tag of a particular type''' for tag in self.__tags: if tag.tagtype == tagtype: return tag return None
0.01005
def example_repl(self, text, example, start_index, continue_flag): """ REPL for interactive tutorials """ if start_index: start_index = start_index + 1 cmd = ' '.join(text.split()[:start_index]) example_cli = CommandLineInterface( application=self.create_application( full_layout=False), eventloop=create_eventloop()) example_cli.buffers['example_line'].reset( initial_document=Document(u'{}\n'.format( add_new_lines(example))) ) while start_index < len(text.split()): if self.default_command: cmd = cmd.replace(self.default_command + ' ', '') example_cli.buffers[DEFAULT_BUFFER].reset( initial_document=Document( u'{}'.format(cmd), cursor_position=len(cmd))) example_cli.request_redraw() answer = example_cli.run() if not answer: return "", True answer = answer.text if answer.strip('\n') == cmd.strip('\n'): continue else: if len(answer.split()) > 1: start_index += 1 cmd += " " + answer.split()[-1] + " " +\ u' '.join(text.split()[start_index:start_index + 1]) example_cli.exit() del example_cli else: cmd = text return cmd, continue_flag
0.001846
def get_localzone(): """Returns the zoneinfo-based tzinfo object that matches the Windows-configured timezone.""" global _cache_tz if _cache_tz is None: _cache_tz = pytz.timezone(get_localzone_name()) utils.assert_tz_offset(_cache_tz) return _cache_tz
0.007143
def get_group(name: str) -> _Group: """ Get a configuration variable group named |name| """ global _groups if name in _groups: return _groups[name] group = _Group(name) _groups[name] = group return group
0.004065
def _linux_disks(): ''' Return list of disk devices and work out if they are SSD or HDD. ''' ret = {'disks': [], 'SSDs': []} for entry in glob.glob('/sys/block/*/queue/rotational'): try: with salt.utils.files.fopen(entry) as entry_fp: device = entry.split('/')[3] flag = entry_fp.read(1) if flag == '0': ret['SSDs'].append(device) log.trace('Device %s reports itself as an SSD', device) elif flag == '1': ret['disks'].append(device) log.trace('Device %s reports itself as an HDD', device) else: log.trace( 'Unable to identify device %s as an SSD or HDD. It does ' 'not report 0 or 1', device ) except IOError: pass return ret
0.002132
def mofval(value, indent=MOF_INDENT, maxline=MAX_MOF_LINE, line_pos=0, end_space=0): """ Low level function that returns the MOF representation of a non-string value (i.e. a value that cannot not be split into multiple parts, for example a numeric or boolean value). If the MOF representation of the value does not fit into the remaining space of the current line, it is put into a new line, considering the specified indentation. If it also does not fit on the remaining space of the new line, ValueError is raised. Parameters: value (:term:`unicode string`): The non-string value. Must not be `None`. indent (:term:`integer`): Number of spaces to indent any new lines that are generated. maxline (:term:`integer`): Maximum line length for the generated MOF. line_pos (:term:`integer`): Length of content already on the current line. end_space (:term:`integer`): Length of space to be left free on the last line. Returns: tuple of * :term:`unicode string`: MOF string. * new line_pos Raises: ValueError: The value does not fit onto an entire new line. """ assert isinstance(value, six.text_type) # Check for output on current line avl_len = maxline - line_pos - end_space if len(value) <= avl_len: line_pos += len(value) return value, line_pos # Check for output on new line avl_len = maxline - indent - end_space if len(value) <= avl_len: mof_str = u'\n' + _indent_str(indent) + value line_pos = indent + len(value) return mof_str, line_pos raise ValueError( _format("Cannot fit value {0!A} onto new MOF line, missing {1} " "characters", value, len(value) - avl_len))
0.000549
def getResourceTypes(self): """ Get the list of resource types supported by the HydroShare server :return: A set of strings representing the HydroShare resource types :raises: HydroShareHTTPException to signal an HTTP error """ url = "{url_base}/resource/types".format(url_base=self.url_base) r = self._request('GET', url) if r.status_code != 200: raise HydroShareHTTPException((url, 'GET', r.status_code)) resource_types = r.json() return set([t['resource_type'] for t in resource_types])
0.003466
def run_command(self, cmd, history=True, new_prompt=True): """Run command in interpreter""" if not cmd: cmd = '' else: if history: self.add_to_history(cmd) if not self.multithreaded: if 'input' not in cmd: self.interpreter.stdin_write.write( to_binary_string(cmd + '\n')) self.interpreter.run_line() self.refresh.emit() else: self.write(_('In order to use commands like "raw_input" ' 'or "input" run Spyder with the multithread ' 'option (--multithread) from a system terminal'), error=True) else: self.interpreter.stdin_write.write(to_binary_string(cmd + '\n'))
0.002237
def create_connection(self, from_obj, to_obj): """ Creates and returns a connection between the given objects. If a connection already exists, that connection will be returned instead. """ self._validate_ctypes(from_obj, to_obj) return Connection.objects.get_or_create(relationship_name=self.name, from_pk=from_obj.pk, to_pk=to_obj.pk)[0]
0.006912
def return_train_dataset(self): """Returns train data set Returns: X (numpy.ndarray): Features y (numpy.ndarray): Labels """ X, y = self.return_main_dataset() if self.test_dataset['method'] == 'split_from_main': X, X_test, y, y_test = train_test_split( X, y, test_size=self.test_dataset['split_ratio'], random_state=self.test_dataset['split_seed'], stratify=y ) return X, y
0.003617
def transform(self, image_feature, bigdl_type="float"): """ transform ImageFeature """ callBigDlFunc(bigdl_type, "transformImageFeature", self.value, image_feature) return image_feature
0.013333
def list_handler(HandlerResult="nparray"): """Wraps a function to handle list inputs.""" def decorate(func): def wrapper(*args, **kwargs): """Run through the wrapped function once for each array element. :param HandlerResult: output type. Defaults to numpy arrays. """ sequences = [] enumsUnitCheck = enumerate(args) argsList = list(args) #This for loop identifies pint unit objects and strips them #of their units. for num, arg in enumsUnitCheck: if type(arg) == type(1 * u.m): argsList[num] = arg.to_base_units().magnitude enumsUnitless = enumerate(argsList) #This for loop identifies arguments that are sequences and #adds their index location to the list 'sequences'. for num, arg in enumsUnitless: if isinstance(arg, (list, tuple, np.ndarray)): sequences.append(num) #If there are no sequences to iterate through, simply return #the function. if len(sequences) == 0: result = func(*args, **kwargs) else: #iterant keeps track of how many times we've iterated and #limiter stops the loop once we've iterated as many times #as there are list elements. Without this check, a few #erroneous runs will occur, appending the last couple values #to the end of the list multiple times. # #We only care about the length of sequences[0] because this #function is recursive, and sequences[0] is always the relevant #sequences for any given run. limiter = len(argsList[sequences[0]]) iterant = 0 result = [] for num in sequences: for arg in argsList[num]: if iterant >= limiter: break #We can safely replace the entire list argument #with a single element from it because of the looping #we're doing. We redefine the object, but that #definition remains within this namespace and does #not penetrate further up the function. argsList[num] = arg #Here we dive down the rabbit hole. This ends up #creating a multi-dimensional array shaped by the #sizes and shapes of the lists passed. result.append(wrapper(*argsList, HandlerResult=HandlerResult, **kwargs)) iterant += 1 #HandlerResult allows the user to specify what type to #return the generated sequence as. It defaults to numpy #arrays because functions tend to handle them better, but if #the user does not wish to import numpy the base Python options #are available to them. if HandlerResult == "nparray": result = np.array(result) elif HandlerResult == "tuple": result = tuple(result) elif HandlerResult == "list": result == list(result) return result return wrapper return decorate
0.008477
def copymode(src, dst): """Copy mode bits from src to dst""" if hasattr(os, 'chmod'): st = os.stat(src) mode = stat.S_IMODE(st.st_mode) os.chmod(dst, mode)
0.005348